code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package org.example.declaration.data
import org.example.declaration.data.{B => B_Renamed}
import scala.util._
object UsageSameTargetPackage4 {
def main(args: Array[String]): Unit = {
println(this.getClass)
println(classOf[Random])
println(Properties.versionString)
println()
val x: X = ???
val a: A = ???
val b: B_Renamed = ???
}
} | JetBrains/intellij-scala | scala/scala-impl/testdata/move/moveClass_NameClashesWithOtherNamesImportedFromOtherPackageWithWithWildcard/after/org/example/declaration/data/UsageSameTargetPackage4.scala | Scala | apache-2.0 | 367 |
/*******************************************************************************
* (C) Copyright 2015 ADP, LLC.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package unicorn.json
import org.specs2.mutable._
/**
* @author Haifeng Li
*/
class JsonSerializerSpec extends Specification {
"The BsonSerializer" should {
"serialize JsNull" in {
val serializer = new BsonSerializer
serializer.deserialize(serializer.serialize(JsNull)) === JsNull
}
"serialize JsTrue" in {
val serializer = new BsonSerializer
serializer.deserialize(serializer.serialize(JsTrue)) === JsTrue
}
"serialize JsFalse" in {
val serializer = new BsonSerializer
serializer.deserialize(serializer.serialize(JsFalse)) === JsFalse
}
"serialize 0" in {
val serializer = new BsonSerializer
serializer.deserialize(serializer.serialize(JsInt.zero)) === JsInt.zero
}
"serialize '1.23'" in {
val serializer = new BsonSerializer
serializer.deserialize(serializer.serialize(JsDouble(1.23))) === JsDouble(1.23)
}
"serialize \\"xyz\\"" in {
val serializer = new BsonSerializer
serializer.deserialize(serializer.serialize(JsString("xyz"))) === JsString("xyz")
}
"serialize escapes in a JsString" in {
val serializer = new BsonSerializer
serializer.deserialize(serializer.serialize(JsString("\\"\\\\/\\b\\f\\n\\r\\t"))) === JsString("\\"\\\\/\\b\\f\\n\\r\\t")
serializer.deserialize(serializer.serialize(JsString("Länder"))) === JsString("Länder")
}
"serialize '1302806349000'" in {
val serializer = new BsonSerializer
serializer.deserialize(serializer.serialize(JsLong(1302806349000L))) === JsLong(1302806349000L)
}
"serialize '2015-08-10T10:00:00.123Z'" in {
val serializer = new BsonSerializer
serializer.deserialize(serializer.serialize(JsDate("2015-08-10T10:00:00.123Z"))) === JsDate("2015-08-10T10:00:00.123Z")
}
"serialize 'CA761232-ED42-11CE-BACD-00AA0057B223'" in {
val serializer = new BsonSerializer
serializer.deserialize(serializer.serialize(JsUUID("CA761232-ED42-11CE-BACD-00AA0057B223"))) === JsUUID("CA761232-ED42-11CE-BACD-00AA0057B223")
}
"serialize test.json" in {
val serializer = new BsonSerializer
val jsonSource = scala.io.Source.fromInputStream(getClass.getResourceAsStream("/test.json")).mkString
val json = JsonParser(jsonSource)
val bson = serializer.serialize(json)
bson.size === 1
bson.isDefinedAt(serializer.root) ==== true
serializer.deserialize(bson) === json
}
}
"The ColumnarJsonSerializer" should {
"serialize JsNull" in {
val serializer = new ColumnarJsonSerializer
serializer.deserialize(serializer.serialize(JsNull)) === JsNull
}
"serialize JsTrue" in {
val serializer = new ColumnarJsonSerializer
serializer.deserialize(serializer.serialize(JsTrue)) === JsTrue
}
"serialize JsFalse" in {
val serializer = new ColumnarJsonSerializer
serializer.deserialize(serializer.serialize(JsFalse)) === JsFalse
}
"serialize 0" in {
val serializer = new ColumnarJsonSerializer
serializer.deserialize(serializer.serialize(JsInt.zero)) === JsInt.zero
}
"serialize '1.23'" in {
val serializer = new ColumnarJsonSerializer
serializer.deserialize(serializer.serialize(JsDouble(1.23))) === JsDouble(1.23)
}
"serialize \\"xyz\\"" in {
val serializer = new ColumnarJsonSerializer
serializer.deserialize(serializer.serialize(JsString("xyz"))) === JsString("xyz")
}
"serialize escapes in a JsString" in {
val serializer = new ColumnarJsonSerializer
serializer.deserialize(serializer.serialize(JsString("\\"\\\\/\\b\\f\\n\\r\\t"))) === JsString("\\"\\\\/\\b\\f\\n\\r\\t")
serializer.deserialize(serializer.serialize(JsString("Länder"))) === JsString("Länder")
}
"serialize '1302806349000'" in {
val serializer = new ColumnarJsonSerializer
serializer.deserialize(serializer.serialize(JsLong(1302806349000L))) === JsLong(1302806349000L)
}
"serialize '2015-08-10T10:00:00.123Z'" in {
val serializer = new ColumnarJsonSerializer
serializer.deserialize(serializer.serialize(JsDate("2015-08-10T10:00:00.123Z"))) === JsDate("2015-08-10T10:00:00.123Z")
}
"serialize 'CA761232-ED42-11CE-BACD-00AA0057B223'" in {
val serializer = new ColumnarJsonSerializer
serializer.deserialize(serializer.serialize(JsUUID("CA761232-ED42-11CE-BACD-00AA0057B223"))) === JsUUID("CA761232-ED42-11CE-BACD-00AA0057B223")
}
"serialize test.json" in {
val serializer = new ColumnarJsonSerializer
val jsonSource = scala.io.Source.fromInputStream(getClass.getResourceAsStream("/test.json")).mkString
val json = JsonParser(jsonSource)
serializer.deserialize(serializer.serialize(json)) === json
}
}
}
| adplabs/unicorn | json/src/test/scala/unicorn/json/JsonSerializerSpec.scala | Scala | apache-2.0 | 5,509 |
package com.ubirch.avatar.backend.route
import com.ubirch.avatar.test.base.{ElasticsearchSpec, RouteSpec}
import com.ubirch.avatar.util.server.RouteConstants
/**
* author: cvandrei
* since: 2016-10-27
*/
class DeviceStateRouteSpec extends RouteSpec
with ElasticsearchSpec {
feature(s"GET ${RouteConstants.pathDeviceState(":deviceId")}") {
ignore("deviceId does not exist") {
// TODO write test
}
ignore("deviceId has no state") {
// TODO write test
}
ignore("deviceId has a state") {
// TODO write test
}
}
feature(s"POST ${RouteConstants.pathDeviceState(":deviceId")}") {
ignore("deviceId does not exist") {
// TODO write test
}
ignore("input json is invalid") {
// TODO write test
}
ignore("state update is successful") {
// TODO write test
}
}
}
| ubirch/ubirch-avatar-service | server/src/test/scala/com/ubirch/avatar/backend/route/DeviceStateRouteSpec.scala | Scala | apache-2.0 | 863 |
package esp.eventuate
import esp.PendingIfUnimplemented
import esp.model.ApiTest
class EventuateApiTest extends ApiTest with EventuateApi with PendingIfUnimplemented
| lukasz-golebiewski/event-sourcing-playground | src/test/scala/esp/eventuate/EventuateApiTest.scala | Scala | gpl-2.0 | 168 |
// scalac: -Xfatal-warnings
//
class A {
def f1(x: Int) = x match {
case _ if false => x // unreachable
case _ => x
}
def f2(x: Int) = x match {
case _ if false => x // unreachable
case _ if true => x
}
def f3(x: Int) = x match {
case _ => x
case _ if true => x // unreachable
}
def test1(x: Int) = x match {
case c if c < 0 => 0
case 1 => 1
case _ => 2
}
}
| lrytz/scala | test/files/neg/t6048.scala | Scala | apache-2.0 | 442 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.stream.table
import java.sql.Timestamp
import org.apache.flink.api.scala._
import org.apache.flink.table.api.scala._
import org.apache.flink.table.utils.TableTestBase
import org.apache.flink.table.utils.TableTestUtil._
import org.junit.Test
/**
* Currently only time-windowed inner joins can be processed in a streaming fashion.
*/
class JoinTest extends TableTestBase {
@Test
def testRowTimeWindowInnerJoin(): Unit = {
val util = streamTestUtil()
val left = util.addTable[(Long, Int, String)]('a, 'b, 'c, 'ltime.rowtime)
val right = util.addTable[(Long, Int, String)]('d, 'e, 'f, 'rtime.rowtime)
val resultTable = left.join(right)
.where('a === 'd && 'ltime >= 'rtime - 5.minutes && 'ltime < 'rtime + 3.seconds)
.select('a, 'e, 'ltime)
val expected =
unaryNode(
"DataStreamCalc",
binaryNode(
"DataStreamWindowJoin",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "ltime")
),
unaryNode(
"DataStreamCalc",
streamTableNode(1),
term("select", "d", "e", "rtime")
),
term("where", "AND(=(a, d), >=(ltime, -(rtime, 300000))," +
" <(ltime, DATETIME_PLUS(rtime, 3000)))"),
term("join", "a", "ltime", "d", "e", "rtime"),
term("joinType", "InnerJoin")
),
term("select", "a", "e", "ltime")
)
util.verifyTable(resultTable, expected)
}
@Test
def testProcTimeWindowInnerJoin(): Unit = {
val util = streamTestUtil()
val left = util.addTable[(Long, Int, String)]('a, 'b, 'c, 'ltime.proctime)
val right = util.addTable[(Long, Int, String)]('d, 'e, 'f, 'rtime.proctime)
val resultTable = left.join(right)
.where('a === 'd && 'ltime >= 'rtime - 1.second && 'ltime < 'rtime)
.select('a, 'e, 'ltime)
val expected =
unaryNode(
"DataStreamCalc",
binaryNode(
"DataStreamWindowJoin",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "ltime")
),
unaryNode(
"DataStreamCalc",
streamTableNode(1),
term("select", "d", "e", "rtime")
),
term("where", "AND(=(a, d), >=(ltime, -(rtime, 1000)), <(ltime, rtime))"),
term("join", "a", "ltime", "d", "e", "rtime"),
term("joinType", "InnerJoin")
),
term("select", "a", "e", "PROCTIME(ltime) AS ltime")
)
util.verifyTable(resultTable, expected)
}
/**
* The time indicator can be accessed from non-time predicates now.
*/
@Test
def testInnerJoinWithTimeIndicatorAccessed(): Unit = {
val util = streamTestUtil()
val left = util.addTable[(Long, Int, Timestamp)]('a, 'b, 'c, 'ltime.rowtime)
val right = util.addTable[(Long, Int, Timestamp)]('d, 'e, 'f, 'rtime.rowtime)
val resultTable = left.join(right)
.where('a ==='d && 'ltime >= 'rtime - 5.minutes && 'ltime < 'rtime && 'ltime > 'f)
val expected =
binaryNode(
"DataStreamWindowJoin",
streamTableNode(0),
streamTableNode(1),
term("where", "AND(=(a, d), >=(ltime, -(rtime, 300000)), <(ltime, rtime), >(ltime, f))"),
term("join", "a", "b", "c", "ltime", "d", "e", "f", "rtime"),
term("joinType", "InnerJoin")
)
util.verifyTable(resultTable, expected)
}
}
| haohui/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/table/JoinTest.scala | Scala | apache-2.0 | 4,308 |
package org.jetbrains.plugins.scala.annotator.template
import org.jetbrains.plugins.scala.annotator.AnnotatorTestBase
import org.jetbrains.plugins.scala.annotator.Error
/**
* Pavel Fatin
*/
class ObjectCreationImpossibleTest extends AnnotatorTestBase(ObjectCreationImpossible) {
def testFineNew {
assertNothing(messages("class C; new C"))
assertNothing(messages("class C; new C {}"))
assertNothing(messages("class C; trait T; new C with T"))
assertNothing(messages("class C; trait T; new C with T {}"))
}
def testFineObject {
assertNothing(messages("class C; object O extends C"))
assertNothing(messages("class C; object O extends C {}"))
assertNothing(messages("class C; trait T; object O extends C with T"))
assertNothing(messages("class C; trait T; object O extends C with T {}"))
}
def testTypeSkipDeclarations {
assertNothing(messages("class C { def f }"))
}
def testSkipAbstractInstantiations {
assertNothing(messages("trait T; new T"))
}
def testSkipConcrete {
assertNothing(messages("class C { def f }; new C"))
assertNothing(messages("class C { def f }; new C {}"))
assertNothing(messages("class C { def f }; new Object with C"))
assertNothing(messages("class C { def f }; new Object with C {}"))
}
def testSkipInvalidDirect {
assertNothing(messages("new { def f }"))
assertNothing(messages("new Object { def f }"))
assertNothing(messages("object O { def f }"))
}
def testUndefinedMember {
val Message = ObjectCreationImpossible.message(("f: Unit", "Holder.T"))
assertMatches(messages("trait T { def f }; new T {}")) {
case Error("T", Message) :: Nil =>
}
}
def testUndefinedMemberObject {
val Message = ObjectCreationImpossible.message(("f: Unit", "Holder.T"))
assertMatches(messages("trait T { def f }; object O extends T {}")) {
case Error("O", Message) :: Nil =>
}
}
def testUndefinedAndWith{
val Message = ObjectCreationImpossible.message(("f: Unit", "Holder.T"))
assertMatches(messages("trait T { def f }; new Object with T {}")) {
case Error("Object", Message) :: Nil =>
}
}
def testNeedsToBeAbstractPlaceDiffer {
val Message = ObjectCreationImpossible.message(
("b: Unit", "Holder.B"), ("a: Unit", "Holder.A"))
val ReversedMessage = ObjectCreationImpossible.message(
("a: Unit", "Holder.A"), ("b: Unit", "Holder.B"))
assertMatches(messages("trait A { def a }; trait B { def b }; new A with B {}")) {
case Error("A", Message) :: Nil =>
case Error("A", ReversedMessage) :: Nil =>
}
}
def testSkipTypeDeclarationSCL2887 {
assertMatches(messages("trait A { type a }; new A {}")) {
case Nil =>
}
}
} | consulo/consulo-scala | test/org/jetbrains/plugins/scala/annotator/template/ObjectCreationImpossibleTest.scala | Scala | apache-2.0 | 2,750 |
package scalariform.formatter
import scalariform.formatter.preferences._
import scalariform.parser._
trait AnnotationFormatter { self: HasFormattingPreferences with TypeFormatter with ExprFormatter ⇒
def format(annotation: Annotation)(implicit formatterState: FormatterState): FormatResult = {
val Annotation(_, annotationType, argumentExprss, newlineOption) = annotation
var formatResult: FormatResult = NoFormatResult
formatResult = formatResult.before(annotationType.firstToken, Compact)
formatResult ++= format(annotationType)
for (argumentExprs ← argumentExprss)
formatResult ++= format(argumentExprs)._1
for (newline ← newlineOption)
formatResult = formatResult.formatNewline(newline, Compact) // TODO: rethink
formatResult
}
}
| mdr/scalariform | scalariform/src/main/scala/scalariform/formatter/AnnotationFormatter.scala | Scala | mit | 789 |
package com.github.ldaniels528.trifecta.io.kafka
import com.github.ldaniels528.trifecta.io.kafka.KafkaMicroConsumer.ConsumerDetailsPM
import com.github.ldaniels528.trifecta.io.kafka.KafkaZkUtils._
import com.github.ldaniels528.trifecta.io.zookeeper.ZKProxy
import net.liftweb.json.parse
import scala.language.postfixOps
import scala.util.Try
/**
* Kafka-Zookeeper Utilities
* @author lawrence.daniels@gmail.com
*/
class KafkaZkUtils(var rootKafkaPath: String = "/") {
private implicit val formats = net.liftweb.json.DefaultFormats
/**
* Returns the bootstrap servers as a comma-delimited string
* @return the bootstrap servers as a comma-delimited string
*/
def getBootstrapServers(implicit zk: ZKProxy): String = {
getBrokerList map (b => s"${b.host}:${b.port}") mkString ","
}
/**
* Retrieves the list of defined brokers from Zookeeper
*/
def getBrokerList(implicit zk: ZKProxy): Seq[BrokerDetails] = {
val basePath = getPrefixedPath("/brokers/ids")
for {
brokerId <- zk.getChildren(basePath)
brokerPath = s"$basePath/$brokerId"
json <- zk.readString(brokerPath) if zk.exists(brokerPath)
details = parse(json).extract[BrokerDetails]
} yield details
}
def getBrokerTopics(implicit zk: ZKProxy): Seq[TopicState] = {
val basePath = getPrefixedPath("/brokers/topics")
for {
topic <- zk.getChildren(basePath)
partitionsPath = s"$basePath/$topic/partitions"
partition <- Try(zk.getChildren(partitionsPath)).getOrElse(Nil) if zk.exists(partitionsPath)
statePath = s"$partitionsPath/$partition/state"
json <- zk.readString(statePath) if zk.exists(statePath)
state = parse(json).extract[TopicStateRaw]
} yield TopicState(
topic = topic,
partition = partition.toInt,
controller_epoch = state.controller_epoch,
leader_epoch = state.leader_epoch,
leader = state.leader,
version = state.version,
isr = state.isr
)
}
private def check(path: String)(implicit zk: ZKProxy) {
System.out.println(s"path: '$path' (exists? ${zk.exists(path)})")
}
def getBrokerTopicNames(implicit zk: ZKProxy): Seq[String] = {
zk.getChildren(path = getPrefixedPath("/brokers/topics"))
}
/**
* Returns the list of partitions for the given topic
*/
def getBrokerTopicPartitions(topic: String)(implicit zk: ZKProxy): Seq[Int] = {
val basePath = getPrefixedPath(s"/brokers/topics/$topic/partitions")
if (zk.exists(basePath)) zk.getChildren(basePath) map (_.toInt) else Nil
}
def getConsumerDetails(implicit zk: ZKProxy): Seq[ConsumerDetails] = {
for {
groupId <- getConsumerGroupIds
consumerOffset <- Try(getConsumerOffsets(groupId)).getOrElse(Nil)
threads = Try(getConsumerThreads(groupId)).getOrElse(Nil)
consumerOwners = Try(getConsumerOwners(groupId)).getOrElse(Nil)
// lookup the owner object
consumerOwner = consumerOwners.find(o =>
o.topic == consumerOffset.topic &&
o.partition == consumerOffset.partition)
// lookup the thread object
thread = threads.find(t => t.topic == consumerOffset.topic)
} yield ConsumerDetails(
consumerId = groupId,
version = thread.map(_.version),
threadId = consumerOwner.map(_.threadId),
topic = consumerOffset.topic,
partition = consumerOffset.partition,
offset = consumerOffset.offset,
lastModified = consumerOffset.lastModifiedTime,
lastModifiedISO = consumerOffset.lastModifiedTime.flatMap(toISODateTime(_).toOption)
)
}
def getConsumerGroup(groupId: String)(implicit zk: ZKProxy): Option[ConsumerGroup] = {
Option(ConsumerGroup(
consumerId = groupId,
offsets = Try(getConsumerOffsets(groupId)).getOrElse(Nil),
owners = Try(getConsumerOwners(groupId)).getOrElse(Nil),
threads = Try(getConsumerThreads(groupId)).getOrElse(Nil)
))
}
def getConsumerGroupIds(implicit zk: ZKProxy): Seq[String] = {
zk.getChildren(path = getPrefixedPath("/consumers"))
}
/**
* Retrieves consumer owner information from zk:/consumers/<groupId>/owners
* @param groupId the given consumer group ID
* @param zk the implicit [[ZKProxy]]
* @return a collection of [[ConsumerOwner]]s
*/
def getConsumerOwners(groupId: String)(implicit zk: ZKProxy): Seq[ConsumerOwner] = {
val ownersPath = getPrefixedPath(s"/consumers/$groupId/owners")
// retrieve the owners: /consumers/<groupId>/owners/<topic>/<partition>
for {
topic <- zk.getChildren(ownersPath)
consumerPath = s"$ownersPath/$topic"
partitionId <- zk.getChildren(consumerPath).map(_.trim)
consumerOffsetPath = s"$consumerPath/$partitionId" if partitionId.nonEmpty
consumerThreadId <- zk.readString(consumerOffsetPath) if zk.exists(consumerOffsetPath)
} yield ConsumerOwner(groupId, topic, consumerThreadId, partitionId.toInt)
}
/**
* Retrieves consumer offset information from zk:/consumers/<groupId>/offsets
* @param groupId the given consumer group ID
* @param zk the implicit [[ZKProxy]]
* @return a collection of [[ConsumerOffset]]s
*/
def getConsumerOffsets(groupId: String)(implicit zk: ZKProxy): Seq[ConsumerOffset] = {
val offsetsPath = getPrefixedPath(s"/consumers/$groupId/offsets")
// retrieve the owners: /consumers/<groupId>/offsets/<topic>/<partition>
for {
topic <- zk.getChildren(offsetsPath)
consumerTopicPath = s"$offsetsPath/$topic"
partitionId <- zk.getChildren(consumerTopicPath) if zk.exists(consumerTopicPath)
consumerPartitionPath = s"$consumerTopicPath/$partitionId"
consumerOffset <- zk.readString(consumerPartitionPath) if zk.exists(consumerPartitionPath)
lastModifiedTime = zk.getModificationTime(consumerPartitionPath)
} yield ConsumerOffset(groupId, topic, partitionId.toInt, consumerOffset.toLong, lastModifiedTime)
}
/**
* Retrieves consumer thread information from zk:/consumers/<groupId>/ids
* @param groupId the given consumer group ID
* @param zk the implicit [[ZKProxy]]
* @return a collection of [[ConsumerThread]]s
*/
def getConsumerThreads(groupId: String)(implicit zk: ZKProxy): Seq[ConsumerThread] = {
val idsPath = getPrefixedPath(s"/consumers/$groupId/ids")
// retrieve the owners: /consumers/<groupId>/ids/<threadId>
// {"version":1,"subscription":{"birf_json_qa_pibv":4},"pattern":"static","timestamp":"1483744242777"}
(for {
threadId <- zk.getChildren(idsPath)
threadInfoPath = s"$idsPath/$threadId"
json <- zk.readString(threadInfoPath).map(_.trim) if zk.exists(threadInfoPath)
jsObj = parse(json).extract[ConsumerThreadRaw] if json.nonEmpty
threads = jsObj.subscription map { case (topic, _) =>
ConsumerThread(
version = jsObj.version,
groupId = groupId,
threadId = threadId,
topic = topic,
timestamp = jsObj.timestamp,
timestampISO = jsObj.timestampISO)
} toSeq
} yield threads).flatten
}
/**
* Retrieves the list of consumers from Zookeeper (Kafka-Storm Partition Manager Version)
*/
def getConsumersForStorm()(implicit zk: ZKProxy): Seq[ConsumerDetailsPM] = {
zk.getFamily(path = getPrefixedPath("/")).distinct filter (_.matches( """\\S+[/]partition_\\d+""")) flatMap { path =>
zk.readString(path) flatMap { jsonString =>
val lastModified = zk.getModificationTime(path)
Try {
val json = parse(jsonString)
val id = (json \\ "topology" \\ "id").extract[String]
val name = (json \\ "topology" \\ "name").extract[String]
val topic = (json \\ "topic").extract[String]
val offset = (json \\ "offset").extract[Long]
val partition = (json \\ "partition").extract[Int]
val brokerHost = (json \\ "broker" \\ "host").extract[String]
val brokerPort = (json \\ "broker" \\ "port").extract[Int]
ConsumerDetailsPM(id, name, topic, partition, offset, lastModified, s"$brokerHost:$brokerPort")
} toOption
}
}
}
/**
* Prefixes the given path to support instances where the Zookeeper is either multi-tenant or uses
* a custom-directory structure.
* @param path the given Zookeeper/Kafka path
* @return the prefixed path
*/
def getPrefixedPath(path: String): String = s"$rootKafkaPath$path".replaceAllLiterally("//", "/")
}
/**
* Kafka-Zookeeper Utilities
* @author lawrence.daniels@gmail.com
*/
object KafkaZkUtils {
def toISODateTime(ts: String): Try[String] = {
val sdf = new java.text.SimpleDateFormat("yyyy-MM-dd HH:mm:ss z")
Try(sdf.format(new java.util.Date(ts.toLong)))
}
def toISODateTime(ts: Long): Try[String] = {
val sdf = new java.text.SimpleDateFormat("yyyy-MM-dd HH:mm:ss z")
Try(sdf.format(new java.util.Date(ts)))
}
/**
* Object representation of JSON broker information
* @example {{{ {"jmx_port":9999,"timestamp":"1405818758964","host":"dev502","version":1,"port":9092} }}}
*/
case class BrokerDetails(jmx_port: Int, timestamp: String, host: String, version: Int, port: Int) {
lazy val timestampISO: Option[String] = toISODateTime(timestamp).toOption
}
/**
* Represents the consumer group details for a given topic partition
*/
case class ConsumerDetails(version: Option[Int], consumerId: String, threadId: Option[String], topic: String, partition: Int, offset: Long, lastModified: Option[Long], lastModifiedISO: Option[String])
case class ConsumerGroup(consumerId: String, offsets: Seq[ConsumerOffset], owners: Seq[ConsumerOwner], threads: Seq[ConsumerThread])
case class ConsumerOwner(groupId: String, topic: String, threadId: String, partition: Int)
case class ConsumerOffset(groupId: String, topic: String, partition: Int, offset: Long, lastModifiedTime: Option[Long])
/**
* Object representation of JSON consumer thread
* @param version the Kafka protocol version number
* @param subscription the topic/partition subscription mapping
* @param pattern the pattern (e.g. "static")
* @param timestamp the last updated time
* @example {{{ {"version":1,"subscription":{"birf_json_qa_pibv":4},"pattern":"static","timestamp":"1483744242777"} }}}
*/
case class ConsumerThreadRaw(version: Int, subscription: Map[String, Int], pattern: String, timestamp: String) {
lazy val timestampISO: Option[String] = toISODateTime(timestamp).toOption
}
case class ConsumerThread(version: Int, groupId: String, threadId: String, topic: String, timestamp: String, timestampISO: Option[String])
case class TopicState(topic: String, partition: Int, controller_epoch: Int, leader_epoch: Int, leader: Int, version: Int, isr: Seq[Int])
case class TopicStateRaw(controller_epoch: Int, leader_epoch: Int, leader: Int, version: Int, isr: Seq[Int])
case class TopicWithPartition(topic: String, partitionCount: Int)
} | ldaniels528/trifecta | src/main/scala/com/github/ldaniels528/trifecta/io/kafka/KafkaZkUtils.scala | Scala | apache-2.0 | 10,997 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.asterix.connector
import org.apache.asterix.connector.QueryType.QueryType
import org.apache.asterix.connector.rdd.AsterixRDD
import org.apache.asterix.connector.result.AsterixClient
import org.apache.hyracks.api.dataset.DatasetDirectoryRecord.Status
import org.apache.spark.{Logging, SparkContext}
import scala.util.{Failure, Success, Try}
/**
* This class extends SparkContext (implicitly) to query AsterixDB.
* @param sc SparkContext
*/
class SparkContextFunctions(@transient sc: SparkContext) extends Serializable with Logging{
private val WaitTime = 100;
private val configuration: Configuration = {
val sparkConf = sc.getConf
//Non-optional configurations
val host: String = sparkConf.get(Configuration.AsterixDBHost)
val port: String = sparkConf.get(Configuration.AsterixDBPort)
val frameSize: String = sparkConf.get(Configuration.AsterixDBFrameSize)
//Optional configurations
val nFrame: Int = Try(sparkConf.get(Configuration.AsterixDBFrameNumber)) match {
case Success(n) => n.toInt
case Failure(e) => AsterixClient.NUM_FRAMES
}
val nReader: Int = Try(sparkConf.get(Configuration.AsterixDBNumberOfReaders)) match {
case Success(n) => n.toInt
case Failure(e) => AsterixClient.NUM_READERS
}
val prefetchThreshold: Int = Try(sparkConf.get(Configuration.AsterixDBPrefetchThreshold)) match {
case Success(n) => n.toInt
case Failure(e) => AsterixClient.PREFETCH_THRESHOLD
}
logInfo(Configuration.AsterixDBHost + " " + host)
logInfo(Configuration.AsterixDBPort + " " + port)
logInfo(Configuration.AsterixDBFrameSize + " " + frameSize)
logInfo(Configuration.AsterixDBFrameNumber + " " + nFrame)
logInfo(Configuration.AsterixDBNumberOfReaders + " " + nReader)
logInfo(Configuration.AsterixDBPrefetchThreshold + " " + prefetchThreshold)
new Configuration(
host,
port,
frameSize.toInt,
nFrame,
nReader,
prefetchThreshold
)
}
private val api = new AsterixHttpAPI(configuration)
def aql(aql:String): AsterixRDD = {
executeQuery(aql, QueryType.AQL)
}
def sqlpp(sqlpp:String): AsterixRDD = {
executeQuery(sqlpp, QueryType.SQLPP)
}
private def executeQuery(query: String, queryType: QueryType): AsterixRDD = {
val handle = queryType match {
case QueryType.AQL => api.executeAQL(query)
case QueryType.SQLPP => api.executeSQLPP(query)
}
var isRunning = true
while(isRunning) {
val status = api.getStatus(handle)
status match {
case Status.SUCCESS => isRunning = false
case Status.FAILED => throw new AsterixConnectorException("Job " + handle.jobId + " failed.")
case _ => wait(WaitTime) //Status.RUNNING
}
}
val resultLocations = api.getResultLocations(handle)
val rdd = new AsterixRDD(sc, query, api, resultLocations, handle, configuration)
rdd
}
}
| Nullification/asterixdb-spark-connector | src/main/scala/org/apache/asterix/connector/SparkContextFunctions.scala | Scala | apache-2.0 | 3,753 |
package extruder.core
import extruder.data.PathElement
trait Settings {
val typeKey: String = "type"
final def pathElementListToString(path: List[PathElement]): String =
pathToString(pathElementsAsStrings(path))
final def pathElementsAsStrings(path: List[PathElement]): List[String] = path.collect {
case PathElement.Standard(element) => element
case PathElement.ClassName(className) if includeClassNameInPath => className
case PathElement.Type => typeKey
}
def pathToString(path: List[String]): String
val includeClassNameInPath: Boolean = true
}
| janstenpickle/extruder | core/src/main/scala/extruder/core/Settings.scala | Scala | mit | 582 |
package org.mtrupkin.console
/**
* Created by mtrupkin on 12/14/2014.
*/
case class RGB(r: Int, g: Int, b:Int)
object RGB {
implicit def toString(rgb: RGB): String = {
import rgb._
f"#${r}%02X$g%02X$b%02X"
}
implicit def toRGB(s: String): RGB = {
def next(s0: String): String = {
s0.substring(0, 2)
}
def toInt(h: String): Int = {
Integer.parseInt(h, 16)
}
val r = next(s.substring(1))
val g = next(s.substring(3))
val b = next(s.substring(5))
RGB(toInt(r), toInt(g), toInt(b))
}
}
object Colors {
val Black = RGB(0, 0, 0)
val White = RGB(255, 255, 255)
val LightGrey = RGB(126, 126, 126)
val Yellow = RGB(255, 255, 0)
val Blue = RGB(0, 0, 255)
val Red = RGB(255, 0, 0)
val Green = RGB(0, 255, 0)
val LightYellow = RGB(126, 126, 0)
val LightBlue = RGB(21, 105, 199)
val LightRed = RGB(126, 0, 0)
val LightGreen = RGB(0, 126, 0)
}
| mtrupkin/console-core | src/main/scala/console/RGB.scala | Scala | mit | 922 |
/*
* Copyright (C) 2011-2013 org.bayswater
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bayswater.musicrest.authentication
import com.typesafe.config.ConfigException
import scala.concurrent.{ ExecutionContext, Future, Promise }
import scala.concurrent.ExecutionContext.Implicits.global
import spray.routing.authentication._
import spray.routing.authentication.BasicUserContext
import org.bayswater.musicrest.model.UserModel
/** an authenticator guarding access to particular URLs that checks credentials with the backend
*
*/
object Backend {
val UserAuthenticator = UserPassAuthenticator[BasicUserContext] { userPassOption ⇒ Future(userPassOption match {
case Some(UserPass(user, pass)) => {
try {
// println("Authenticating: " + user + ":" + pass)
if (UserModel().isValidUser(user, pass) ) Some(BasicUserContext(user)) else None
}
catch {
case _ : Throwable => None
}
}
case _ => None
})
}
val AdminAuthenticator = UserPassAuthenticator[BasicUserContext] { userPassOption => {
val userFuture = UserAuthenticator(userPassOption)
userFuture.map(someUser => someUser.filter(bu => bu.username == "administrator"))
}
}
}
| newlandsvalley/musicrest | src/main/scala/org/bayswater/musicrest/authentication/BackendAuthenticator.scala | Scala | apache-2.0 | 1,823 |
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
* */
package io.github.tailhq.dynaml.optimization
import breeze.linalg.{DenseMatrix, DenseVector, cholesky, inv}
import breeze.numerics.sqrt
import io.github.tailhq.dynaml.DynaMLPipe._
import io.github.tailhq.dynaml.pipes.DataPipe
import io.github.tailhq.dynaml.probability.Likelihood
/**
* Created by mandar on 6/4/16.
*/
class LaplacePosteriorMode[I](l: Likelihood[DenseVector[Double],
DenseVector[Double], DenseMatrix[Double],
(DenseVector[Double], DenseVector[Double])]) extends
RegularizedOptimizer[DenseVector[Double], I,
Double, (DenseMatrix[Double], DenseVector[Double])]{
val likelihood = l
/**
* Solve the convex optimization problem.
*/
override def optimize(nPoints: Long,
ParamOutEdges: (DenseMatrix[Double], DenseVector[Double]),
initialP: DenseVector[Double]): DenseVector[Double] =
LaplacePosteriorMode.run(
nPoints, ParamOutEdges,
this.likelihood, initialP,
this.numIterations,
identityPipe[(DenseMatrix[Double], DenseVector[Double])])
}
object LaplacePosteriorMode {
def run[T](nPoints: Long, data: T,
likelihood: Likelihood[
DenseVector[Double], DenseVector[Double], DenseMatrix[Double],
(DenseVector[Double], DenseVector[Double])],
initialP: DenseVector[Double], numIterations: Int,
transform: DataPipe[T, (DenseMatrix[Double], DenseVector[Double])]): DenseVector[Double] = {
val (kMat, y) = transform(data)
var mode = initialP
var b = DenseVector.zeros[Double](y.length)
var a = DenseVector.zeros[Double](y.length)
val id = DenseMatrix.eye[Double](y.length)
(1 to numIterations).foreach{ iter =>
val wMat = likelihood.hessian(y, mode) * -1.0
val wMatsq = sqrt(wMat)
val L = cholesky(id + wMatsq*kMat*wMatsq)
b = wMat*mode + likelihood.gradient(y, mode)
val buff1 = wMatsq*kMat*b
val buff2 = inv(L)*buff1
a = b - inv(wMatsq*L.t)*buff2
mode = kMat*a
}
mode
}
}
| mandar2812/DynaML | dynaml-core/src/main/scala/io/github/tailhq/dynaml/optimization/LaplacePosteriorMode.scala | Scala | apache-2.0 | 2,809 |
package ee.cone.c4actor.rdb_impl
import java.lang.Math.toIntExact
import java.sql.{CallableStatement, Connection, PreparedStatement, ResultSet}
import java.util.concurrent.{CompletableFuture, ExecutorService, Executors}
import com.typesafe.scalalogging.LazyLogging
import ee.cone.c4actor._
import ee.cone.c4actor.rdb._
import ee.cone.c4di.c4
import scala.annotation.tailrec
@c4("RDBSyncApp") final class ExternalDBSyncClient(
dbFactory: ExternalDBFactory,
externalIsActive: ExternalIsActive,
db: CompletableFuture[RConnectionPool] = new CompletableFuture() //dataSource: javax.sql.DataSource
) extends Executable with ExternalDBClient {
// def toInject: List[Injectable] = WithJDBCKey.set(f=>getConnectionPool.doWith(f))
def run(): Unit = concurrent.blocking {
if (externalIsActive.isActive)
assert(db.complete(dbFactory.create(
createConnection => new RConnectionPool {
def doWith[T](f: RConnection => T): T = {
FinallyClose(createConnection()) { sqlConn =>
FinallyClose[ExecutorService, T](_.shutdown())(Executors.newFixedThreadPool(1)) { pool =>
sqlConn.setNetworkTimeout(pool, 1000 * 60 * 15)
val conn = new RConnectionImpl(sqlConn)
f(conn)
}
}
}
}
)))
}
def getConnectionPool: RConnectionPool =
if (externalIsActive.isActive) concurrent.blocking(db.get) else FailWith("Nonactive mode")
}
object FinallyFree {
def apply[A,T](o: A, close: A=>Unit)(f: A=>T): T = try f(o) finally close(o)
}
abstract class RDBBindImpl[R] extends RDBBind[R] with LazyLogging {
def connection: java.sql.Connection
def index: Int
def code(wasCode: String): String
def execute(stmt: java.sql.CallableStatement): R
//
private def inObject(value: Object) = {
//println(Thread.currentThread.getName,"bind",value)
new InObjectRDBBind[R](this, value)
}
def in(value: Long): RDBBind[R] = inObject(value:java.lang.Long)
def in(value: Boolean): RDBBind[R] = inObject(value:java.lang.Boolean)
def in(value: String): RDBBind[R] =
if(value.length < 1000) inObject(value) else new InTextRDBBind(this, value)
def call(): R = concurrent.blocking {
val theCode = code("")
logger.debug(s"${Thread.currentThread.getName} code $theCode")
FinallyClose(connection.prepareCall(theCode))(execute)
}
def justExecute(stmt: CallableStatement) = {
ignoreIrrelevantExecutionResult(stmt.execute())
}
private def ignoreIrrelevantExecutionResult(value: Boolean): Unit = ()
}
class InObjectRDBBind[R](val prev: RDBBindImpl[R], value: Object) extends ArgRDBBind[R] {
def execute(stmt: CallableStatement): R = {
stmt.setObject(index,value)
prev.execute(stmt)
}
}
class InTextRDBBind[R](val prev: RDBBindImpl[R], value: String) extends ArgRDBBind[R] {
def execute(stmt: CallableStatement): R = {
FinallyClose[java.sql.Clob,R](_.free())(connection.createClob()){ clob =>
assert(clob.setString(1,value)==value.length)
stmt.setClob(index,clob)
prev.execute(stmt)
}
}
}
abstract class ArgRDBBind[R] extends RDBBindImpl[R] {
def prev: RDBBindImpl[R]
def connection: Connection = prev.connection
def index: Int = prev.index + 1
def code(wasCode: String): String =
prev.code(if(wasCode.isEmpty) "?" else s"?,$wasCode")
}
class OutUnitRDBBind(
val connection: java.sql.Connection, name: String
) extends RDBBindImpl[Unit] {
def index = 0
def code(wasCode: String): String = s"{call $name ($wasCode)}"
def execute(stmt: CallableStatement): Unit = justExecute(stmt)
}
class OutLongRDBBind(
val connection: java.sql.Connection, name: String
) extends RDBBindImpl[Option[Long]] {
def index = 1
def code(wasCode: String): String = s"{? = call $name ($wasCode)}"
def execute(stmt: CallableStatement): Option[Long] = {
stmt.registerOutParameter(index,java.sql.Types.BIGINT)
justExecute(stmt)
Option(stmt.getLong(index))
}
}
class OutTextRDBBind(
val connection: java.sql.Connection, name: String
) extends RDBBindImpl[String] {
def index = 1
def code(wasCode: String): String = s"{? = call $name ($wasCode)}"
def execute(stmt: CallableStatement): String = {
stmt.registerOutParameter(index,java.sql.Types.CLOB)
justExecute(stmt)
FinallyClose[Option[java.sql.Clob],String](_.foreach(_.free()))(
Option(stmt.getClob(index))
){ clob =>
clob.map(c=>c.getSubString(1,toIntExact(c.length()))).getOrElse("")
}
}
}
class RConnectionImpl(conn: java.sql.Connection) extends RConnection with LazyLogging {
private def bindObjects(stmt: java.sql.PreparedStatement, bindList: List[Object]) =
bindList.zipWithIndex.foreach{ case (v,i) => stmt.setObject(i+1,v) }
def outUnit(name: String): RDBBind[Unit] = new OutUnitRDBBind(conn, name)
def outLongOption(name: String): RDBBind[Option[Long]] = new OutLongRDBBind(conn, name)
def outText(name: String): RDBBind[String] = new OutTextRDBBind(conn, name)
def execute(code: String): Unit = concurrent.blocking {
FinallyClose(conn.prepareStatement(code)) { stmt =>
logger.debug(code)
ignoreIrrelevantExecutionResult(stmt.execute())
//println(stmt.getWarnings)
}
}
private def ignoreIrrelevantExecutionResult(value: Boolean): Unit = ()
def executeQuery(
code: String, cols: List[String], bindList: List[Object]
): List[Map[String,Object]] = concurrent.blocking {
//println(s"code:: [$code]")
//conn.prepareCall(code).re
FinallyClose(conn.prepareStatement(code)) { stmt =>
bindObjects(stmt, bindList)
FinallyClose(stmt.executeQuery()) { rs: ResultSet =>
type Res = List[Map[String, Object]]
@tailrec def iter(res: Res): Res =
if(rs.next()) iter(cols.map(cn => cn -> rs.getObject(cn)).toMap :: res)
else res.reverse
iter(Nil)
}
}
}
}
| conecenter/c4proto | extra_lib/src/main/scala/ee/cone/c4actor/rdb_impl/JDBCImpl.scala | Scala | apache-2.0 | 5,900 |
/*
* Sentilab SARE: a Sentiment Analysis Research Environment
* Copyright (C) 2013 Sabanci University Sentilab
* http://sentilab.sabanciuniv.edu
*
* This file is part of SARE.
*
* SARE is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* SARE is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with SARE. If not, see <http://www.gnu.org/licenses/>.
*/
package edu.sabanciuniv.sentilab.sare.models.base.documentStore
import edu.sabanciuniv.sentilab.core.models.ModelLike
import edu.sabanciuniv.sentilab.sare.models.base.document.DocumentLike
/**
* The base interface for all document stores.
* @author Mus'ab Husaini
*/
trait DocumentStoreLike extends ModelLike {
/**
* Gets the title of this store.
* @return the title of this store.
*/
def getTitle: String
/**
* Gets the language that this store's documents are in.
* @return the language of this store.
*/
def getLanguage: String
/**
* Gets the description of this store.
* @return
*/
def getDescription: String
/**
* Gets the documents in this store.
* @return the {@link Iterable} containing {@link DocumentLike} objects stored in this store.
*/
def getDocuments: java.lang.Iterable[_ <: DocumentLike]
}
| musabhusaini/sare | sare-lib/modules/sare-base/src/main/scala/edu/sabanciuniv/sentilab/sare/models/base/documentStore/DocumentStoreLike.scala | Scala | gpl-3.0 | 1,665 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs
import java.util.concurrent.ConcurrentHashMap
import com.github.benmanes.caffeine.cache.{CacheLoader, Caffeine}
import com.typesafe.scalalogging.LazyLogging
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileContext, Path}
import org.locationtech.geomesa.fs.storage.api.FileSystemStorage
import org.locationtech.geomesa.fs.storage.common.FileSystemStorageFactory
import org.locationtech.geomesa.fs.storage.common.utils.PathCache
import org.locationtech.geomesa.utils.stats.MethodProfiling
/**
* Manages the storages and associated simple feature types underneath a given path
*
* @param fc file context
* @param conf configuration
* @param root root path for the data store
*/
class FileSystemStorageManager private (fc: FileContext, conf: Configuration, root: Path)
extends MethodProfiling with LazyLogging {
import scala.collection.JavaConverters._
private val cache = new ConcurrentHashMap[String, (Path, FileSystemStorage)]().asScala
/**
* Gets the storage associated with the given simple feature type, if any
*
* @param typeName simple feature type name
* @return
*/
def storage(typeName: String): Option[FileSystemStorage] = {
cache.get(typeName).map(_._2) // check cached values
.orElse(Some(defaultPath(typeName)).filter(PathCache.exists(fc, _)).flatMap(loadPath)) // check expected (default) path
.orElse(loadAll().find(_.getMetadata.getSchema.getTypeName == typeName)) // check other paths until we find it
}
/**
* Gets the storage under a given path, if any
*
* @param path root path for the storage
* @return
*/
def storage(path: Path): Option[FileSystemStorage] =
cache.collectFirst { case (_, (p, storage)) if p == path => storage }.orElse(loadPath(path))
/**
* Gets all storages under the root path
*
* @return
*/
def storages(): Seq[FileSystemStorage] = {
loadAll().foreach(_ => Unit) // force loading of everything
cache.map { case (_, (_, storage)) => storage }.toSeq
}
/**
* Caches a storage instance for future use. Avoids loading it a second time if referenced later.
*
* @param path path for the storage
* @param storage storage instance
*/
def register(path: Path, storage: FileSystemStorage): Unit =
cache.put(storage.getMetadata.getSchema.getTypeName, (path, storage))
/**
* Default path for a given simple feature type name. Generally the simple feature type will go under
* a folder with the type name, but this is not required
*
* @param typeName simple feature type name
* @return
*/
def defaultPath(typeName: String): Path = new Path(root, typeName)
/**
* Loads all storages under this root (if they aren't already loaded)
*
* @return
*/
private def loadAll(): Iterator[FileSystemStorage] = {
if (!PathCache.exists(fc, root)) { Iterator.empty } else {
val dirs = PathCache.list(fc, root).filter(_.isDirectory).map(_.getPath)
dirs.filterNot(path => cache.exists { case (_, (p, _)) => p == path }).flatMap(loadPath)
}
}
/**
* Attempt to load a storage under the given root path. Requires an appropriate storage implementation
* to be available on the classpath.
*
* @param path storage root path
* @return
*/
private def loadPath(path: Path): Option[FileSystemStorage] = {
import org.locationtech.geomesa.utils.conversions.JavaConverters._
profile {
val loaded = FileSystemStorageFactory.factories().flatMap(_.load(fc, conf, path).asScala.iterator)
if (!loaded.hasNext) { None } else {
val storage = loaded.next
register(path, storage)
Some(storage)
}
} { (s, time) => logger.debug(s"${ if (s.isDefined) "Loaded" else "No" } storage at path '$path' in ${time}ms") }
}
}
object FileSystemStorageManager {
private val cache = Caffeine.newBuilder().build(
new CacheLoader[(FileContext, Configuration, Path), FileSystemStorageManager]() {
override def load(key: (FileContext, Configuration, Path)): FileSystemStorageManager =
new FileSystemStorageManager(key._1, key._2, key._3)
}
)
/**
* Load a cached storage manager instance
*
* @param fc file context
* @param conf configuration
* @param root data store root path
* @return
*/
def apply(fc: FileContext, conf: Configuration, root: Path): FileSystemStorageManager = cache.get((fc, conf, root))
}
| ddseapy/geomesa | geomesa-fs/geomesa-fs-datastore/src/main/scala/org/locationtech/geomesa/fs/FileSystemStorageManager.scala | Scala | apache-2.0 | 4,981 |
package org.scalameter.picklers
import java.io.File
import java.util.Date
import org.scalatest.prop.PropertyChecks
import org.scalatest.{FunSuite, Matchers}
import org.scalameter.picklers.Implicits._
import org.scalameter.utils.ClassPath
class PicklerSpecification extends FunSuite with PropertyChecks with Matchers {
def validatePickler[T: Pickler](o: T) = {
val pickler = implicitly[Pickler[T]]
val p = pickler.pickle(o)
val uo = pickler.unpickle(p)
uo should === (o)
}
test("Unit pickling") {
forAll { o: Unit =>
validatePickler(o)
}
}
test("Byte pickling") {
forAll { o: Byte =>
validatePickler(o)
}
}
test("Boolean pickling") {
forAll { o: Boolean =>
validatePickler(o)
}
}
test("Char pickling") {
forAll { o: Char =>
validatePickler(o)
}
}
test("Short pickling") {
forAll { o: Short =>
validatePickler(o)
}
}
test("Int pickling") {
forAll { o: Int =>
validatePickler(o)
}
}
test("Long pickling") {
forAll { o: Long =>
validatePickler(o)
}
}
test("Float pickling") {
forAll { o: Float =>
validatePickler(o)
}
}
test("Double pickling") {
forAll { o: Double =>
validatePickler(o)
}
}
test("String pickling") {
forAll { o: String =>
validatePickler(o)
}
}
test("Date pickling") {
forAll { o: Date =>
validatePickler(o)
}
}
test("List[String] pickling") {
forAll { o: List[String] =>
validatePickler(o)
}
}
test("Seq[Long] pickling") {
forAll { o: Seq[Long] =>
validatePickler(o)
}
}
test("Option[Date] pickling") {
forAll { o: Option[Date] =>
validatePickler(o)
}
}
test("ClassPath pickling") {
forAll { o: List[String] =>
validatePickler(ClassPath(
o.map(s => new File(s.replaceAll(s""""|${File.pathSeparatorChar}""", "")))))
}
}
test("Enum pickling") {
forAll { o: TestEnum1 =>
validatePickler(o)
}
forAll { o: TestEnum2 =>
validatePickler(o)
}
}
}
| storm-enroute/scalameter | scalameter-core/src/test/scala/org/scalameter/picklers/PicklerSpecification.scala | Scala | bsd-3-clause | 2,108 |
package com.wavesplatform.state.diffs
import scala.util.{Left, Right}
import cats._
import com.wavesplatform.account.{Address, AddressScheme}
import com.wavesplatform.features.{BlockchainFeature, BlockchainFeatures}
import com.wavesplatform.features.OverdraftValidationProvider._
import com.wavesplatform.lang.ValidationError
import com.wavesplatform.lang.directives.values._
import com.wavesplatform.lang.script.{ContractScript, Script}
import com.wavesplatform.lang.script.ContractScript.ContractScriptImpl
import com.wavesplatform.lang.script.v1.ExprScript
import com.wavesplatform.settings.FunctionalitySettings
import com.wavesplatform.state._
import com.wavesplatform.transaction._
import com.wavesplatform.transaction.Asset.{IssuedAsset, Waves}
import com.wavesplatform.transaction.TxValidationError._
import com.wavesplatform.transaction.assets._
import com.wavesplatform.transaction.assets.exchange._
import com.wavesplatform.transaction.lease._
import com.wavesplatform.transaction.smart.{InvokeScriptTransaction, SetScriptTransaction}
import com.wavesplatform.transaction.smart.InvokeScriptTransaction.Payment
import com.wavesplatform.transaction.transfer._
object CommonValidation {
def disallowSendingGreaterThanBalance[T <: Transaction](blockchain: Blockchain, blockTime: Long, tx: T): Either[ValidationError, T] =
if (blockTime >= blockchain.settings.functionalitySettings.allowTemporaryNegativeUntil) {
def checkTransfer(
sender: Address,
assetId: Asset,
amount: Long,
feeAssetId: Asset,
feeAmount: Long,
allowFeeOverdraft: Boolean = false
) = {
val amountDiff = assetId match {
case aid @ IssuedAsset(_) => Portfolio(0, LeaseBalance.empty, Map(aid -> -amount))
case Waves => Portfolio(-amount, LeaseBalance.empty, Map.empty)
}
val feeDiff = feeAssetId match {
case aid @ IssuedAsset(_) => Portfolio(0, LeaseBalance.empty, Map(aid -> -feeAmount))
case Waves => Portfolio(-feeAmount, LeaseBalance.empty, Map.empty)
}
val spendings = Monoid.combine(amountDiff, feeDiff)
val oldWavesBalance = blockchain.balance(sender, Waves)
val newWavesBalance = oldWavesBalance + spendings.balance
val feeUncheckedBalance = oldWavesBalance + amountDiff.balance
val overdraftFilter = allowFeeOverdraft && feeUncheckedBalance >= 0
if (!overdraftFilter && newWavesBalance < 0) {
Left(
GenericError(
"Attempt to transfer unavailable funds: Transaction application leads to " +
s"negative waves balance to (at least) temporary negative state, current balance equals $oldWavesBalance, " +
s"spends equals ${spendings.balance}, result is $newWavesBalance"
)
)
} else {
val balanceError = spendings.assets.collectFirst {
case (aid, delta) if delta < 0 && blockchain.balance(sender, aid) + delta < 0 =>
val availableBalance = blockchain.balance(sender, aid)
GenericError(
"Attempt to transfer unavailable funds: Transaction application leads to negative asset " +
s"'$aid' balance to (at least) temporary negative state, current balance is $availableBalance, " +
s"spends equals $delta, result is ${availableBalance + delta}"
)
}
balanceError.fold[Either[ValidationError, T]](Right(tx))(Left(_))
}
}
tx match {
case ptx: PaymentTransaction if blockchain.balance(ptx.sender.toAddress, Waves) < (ptx.amount + ptx.fee) =>
Left(
GenericError(
"Attempt to pay unavailable funds: balance " +
s"${blockchain.balance(ptx.sender.toAddress, Waves)} is less than ${ptx.amount + ptx.fee}"
)
)
case ttx: TransferTransaction => checkTransfer(ttx.sender.toAddress, ttx.assetId, ttx.amount, ttx.feeAssetId, ttx.fee)
case mtx: MassTransferTransaction => checkTransfer(mtx.sender.toAddress, mtx.assetId, mtx.transfers.map(_.amount).sum, Waves, mtx.fee)
case citx: InvokeScriptTransaction =>
val foldPayments: Iterable[Payment] => Iterable[Payment] =
if (blockchain.useCorrectPaymentCheck)
_.groupBy(_.assetId)
.map { case (assetId, p) => Payment(p.map(_.amount).sum, assetId) } else
identity
for {
address <- blockchain.resolveAlias(citx.dAppAddressOrAlias)
allowFeeOverdraft = blockchain.accountScript(address) match {
case Some(AccountScriptInfo(_, ContractScriptImpl(version, _), _, _)) if version >= V4 && blockchain.useCorrectPaymentCheck => true
case _ => false
}
check <- foldPayments(citx.payments)
.map(p => checkTransfer(citx.sender.toAddress, p.assetId, p.amount, citx.feeAssetId, citx.fee, allowFeeOverdraft))
.find(_.isLeft)
.getOrElse(Right(tx))
} yield check
case _ => Right(tx)
}
} else Right(tx)
def disallowDuplicateIds[T <: Transaction](blockchain: Blockchain, tx: T): Either[ValidationError, T] = tx match {
case _: PaymentTransaction => Right(tx)
case _ =>
val id = tx.id()
Either.cond(!blockchain.containsTransaction(tx), tx, AlreadyInTheState(id, blockchain.transactionMeta(id).get.height))
}
def disallowFromAnotherNetwork[T <: Transaction](tx: T, currentChainId: Byte): Either[ValidationError, T] =
Either.cond(
tx.chainId == currentChainId,
tx,
GenericError(
s"Data from other network: expected: ${AddressScheme.current.chainId}(${AddressScheme.current.chainId.toChar}), actual: ${tx.chainId}(${tx.chainId.toChar})"
)
)
def disallowBeforeActivationTime[T <: Transaction](blockchain: Blockchain, tx: T): Either[ValidationError, T] = {
def activationBarrier(b: BlockchainFeature, msg: Option[String] = None): Either[ActivationError, T] =
Either.cond(
blockchain.isFeatureActivated(b, blockchain.height),
tx,
TxValidationError.ActivationError(msg.getOrElse(b.description + " feature has not been activated yet"))
)
def scriptActivation(sc: Script): Either[ActivationError, T] = {
val v3Activation = activationBarrier(BlockchainFeatures.Ride4DApps)
val v4Activation = activationBarrier(BlockchainFeatures.BlockV5)
val v5Activation = activationBarrier(BlockchainFeatures.SynchronousCalls)
def scriptVersionActivation(sc: Script): Either[ActivationError, T] = sc.stdLibVersion match {
case V1 | V2 | V3 if sc.containsArray => v4Activation
case V1 | V2 if sc.containsBlockV2() => v3Activation
case V1 | V2 => Right(tx)
case V3 => v3Activation
case V4 => v4Activation
case V5 => v5Activation
}
def scriptTypeActivation(sc: Script): Either[ActivationError, T] = (sc: @unchecked) match {
case _: ExprScript => Right(tx)
case _: ContractScript.ContractScriptImpl => v3Activation
}
for {
_ <- scriptVersionActivation(sc)
_ <- scriptTypeActivation(sc)
} yield tx
}
def generic1or2Barrier(t: VersionedTransaction): Either[ActivationError, T] = {
if (t.version == 1.toByte) Right(tx)
else if (t.version == 2.toByte) activationBarrier(BlockchainFeatures.SmartAccounts)
else Right(tx)
}
val versionsBarrier = tx match {
case p: LegacyPBSwitch if p.isProtobufVersion =>
activationBarrier(BlockchainFeatures.BlockV5)
case v: VersionedTransaction if !v.builder.supportedVersions.contains(v.version) =>
Left(GenericError(s"Invalid tx version: $v"))
case _ =>
Right(tx)
}
val typedBarrier = tx match {
case _: PaymentTransaction => Right(tx)
case _: GenesisTransaction => Right(tx)
case e: ExchangeTransaction if e.version == TxVersion.V1 => Right(tx)
case exv2: ExchangeTransaction if exv2.version >= TxVersion.V2 =>
activationBarrier(BlockchainFeatures.SmartAccountTrading).flatMap { tx =>
(exv2.buyOrder, exv2.sellOrder) match {
case (o1, o2) if o1.version >= 3 || o2.version >= 3 => activationBarrier(BlockchainFeatures.OrderV3)
case _ => Right(tx)
}
}
case _: MassTransferTransaction => activationBarrier(BlockchainFeatures.MassTransfer)
case _: DataTransaction => activationBarrier(BlockchainFeatures.DataTransaction)
case sst: SetScriptTransaction =>
sst.script match {
case None => Right(tx)
case Some(sc) => scriptActivation(sc)
}
case it: IssueTransaction =>
it.script match {
case None => Right(tx)
case Some(sc) => scriptActivation(sc)
}
case sast: SetAssetScriptTransaction =>
activationBarrier(BlockchainFeatures.SmartAssets).flatMap { _ =>
sast.script match {
case None => Right(tx)
case Some(sc) => scriptActivation(sc)
}
}
case t: TransferTransaction => generic1or2Barrier(t)
case t: CreateAliasTransaction => generic1or2Barrier(t)
case t: LeaseTransaction => generic1or2Barrier(t)
case t: LeaseCancelTransaction => generic1or2Barrier(t)
case t: ReissueTransaction => generic1or2Barrier(t)
case t: BurnTransaction => generic1or2Barrier(t)
case _: SponsorFeeTransaction => activationBarrier(BlockchainFeatures.FeeSponsorship)
case _: InvokeScriptTransaction => activationBarrier(BlockchainFeatures.Ride4DApps)
case _: UpdateAssetInfoTransaction => activationBarrier(BlockchainFeatures.BlockV5)
case _ => Left(GenericError("Unknown transaction must be explicitly activated"))
}
val proofsValidate = tx match {
case s: ProvenTransaction =>
Proofs
.create(s.proofs.proofs)
.map(_ => tx)
case _ =>
Right(tx)
}
for {
_ <- versionsBarrier
_ <- typedBarrier
_ <- proofsValidate
} yield tx
}
def disallowTxFromFuture[T <: Transaction](settings: FunctionalitySettings, time: Long, tx: T): Either[ValidationError, T] = {
val allowTransactionsFromFutureByTimestamp = tx.timestamp < settings.allowTransactionsFromFutureUntil
if (!allowTransactionsFromFutureByTimestamp && tx.timestamp - time > settings.maxTransactionTimeForwardOffset.toMillis)
Left(
Mistiming(
s"""Transaction timestamp ${tx.timestamp}
|is more than ${settings.maxTransactionTimeForwardOffset.toMillis}ms in the future
|relative to block timestamp $time""".stripMargin
.replaceAll("\\n", " ")
.replaceAll("\\r", "")
)
)
else Right(tx)
}
def disallowTxFromPast[T <: Transaction](settings: FunctionalitySettings, prevBlockTime: Option[Long], tx: T): Either[ValidationError, T] =
prevBlockTime match {
case Some(t) if (t - tx.timestamp) > settings.maxTransactionTimeBackOffset.toMillis =>
Left(
Mistiming(
s"""Transaction timestamp ${tx.timestamp}
|is more than ${settings.maxTransactionTimeBackOffset.toMillis}ms in the past
|relative to previous block timestamp $prevBlockTime""".stripMargin
.replaceAll("\\n", " ")
.replaceAll("\\r", "")
)
)
case _ => Right(tx)
}
}
| wavesplatform/Waves | node/src/main/scala/com/wavesplatform/state/diffs/CommonValidation.scala | Scala | mit | 11,947 |
package org.jetbrains.plugins.scala
package findUsages
import java.util
import com.intellij.openapi.application.ReadActionProcessor
import com.intellij.openapi.project.IndexNotReadyException
import com.intellij.openapi.roots.FileIndexFacade
import com.intellij.openapi.util.Condition
import com.intellij.openapi.vfs.VirtualFile
import com.intellij.psi.impl.PsiManagerEx
import com.intellij.psi.impl.cache.impl.id.{IdIndex, IdIndexEntry}
import com.intellij.psi.impl.search.PsiSearchHelperImpl
import com.intellij.psi.search.searches.ReferencesSearch
import com.intellij.psi.search.{GlobalSearchScope, PsiSearchHelper, TextOccurenceProcessor, UsageSearchContext}
import com.intellij.psi.{PsiElement, PsiManager, PsiReference}
import com.intellij.util.containers.ContainerUtil
import com.intellij.util.indexing.FileBasedIndex
import com.intellij.util.{CommonProcessors, Processor, QueryExecutor}
import org.jetbrains.plugins.scala.extensions.inReadAction
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScNamedElement
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaNamesUtil
/**
* Nikolay.Tropin
* 9/10/13
*/
class OperatorAndBacktickedSearcher extends QueryExecutor[PsiReference, ReferencesSearch.SearchParameters] {
def execute(queryParameters: ReferencesSearch.SearchParameters, consumer: Processor[PsiReference]): Boolean = {
val scope = inReadAction(queryParameters.getEffectiveSearchScope)
val element = queryParameters.getElementToSearch
val manager = PsiManager.getInstance(queryParameters.getProject)
val toProcess: Seq[(PsiElement, String)] = inReadAction {
element match {
case e if !e.isValid => Nil
case ScalaNamesUtil.isBackticked(name) => if (name != "") Seq((element, name), (element, s"`$name`")) else Seq((element, "``"))
case named: ScNamedElement if named.name.exists(ScalaNamesUtil.isOpCharacter) => Seq((named, named.name))
case _ => Nil
}
}
toProcess.foreach { case (elem, name) =>
val processor = new TextOccurenceProcessor {
def execute(element: PsiElement, offsetInElement: Int): Boolean = {
val references = inReadAction(element.getReferences)
for (ref <- references if ref.getRangeInElement.contains(offsetInElement)) {
inReadAction {
if (ref.isReferenceTo(elem) || ref.resolve() == elem) {
if (!consumer.process(ref)) return false
}
}
}
true
}
}
val helper: PsiSearchHelper = new ScalaPsiSearchHelper(manager.asInstanceOf[PsiManagerEx])
try {
helper.processElementsWithWord(processor, scope, name, UsageSearchContext.IN_CODE, true)
}
catch {
case ignore: IndexNotReadyException =>
}
}
true
}
private class ScalaPsiSearchHelper(manager: PsiManagerEx) extends PsiSearchHelperImpl(manager) {
override def processFilesWithText(scope: GlobalSearchScope,
searchContext: Short,
caseSensitively: Boolean,
text: String,
processor: Processor[VirtualFile]): Boolean = {
val entries = getWordEntries(text, caseSensitively)
if (entries.isEmpty) return true
val collectProcessor: CommonProcessors.CollectProcessor[VirtualFile] = new CommonProcessors.CollectProcessor[VirtualFile]
val checker = new Condition[Integer] {
def value(integer: Integer): Boolean = (integer.intValue & searchContext) != 0
}
inReadAction {
FileBasedIndex.getInstance.processFilesContainingAllKeys(IdIndex.NAME, entries, scope, checker, collectProcessor)
}
val index: FileIndexFacade = FileIndexFacade.getInstance(manager.getProject)
ContainerUtil.process(collectProcessor.getResults, new ReadActionProcessor[VirtualFile] {
def processInReadAction(virtualFile: VirtualFile): Boolean = {
!index.shouldBeFound(scope, virtualFile) || processor.process(virtualFile)
}
})
}
/**
* Only this method is actually differs from PsiSearchHelperImpl,
* because it works only for java identifiers there.
*/
private def getWordEntries(name: String, caseSensitively: Boolean): util.List[IdIndexEntry] = {
val keys = new util.ArrayList[IdIndexEntry]
if (ScalaNamesUtil.isIdentifier(name)) keys.add(new IdIndexEntry(name, caseSensitively))
keys
}
}
}
| whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/findUsages/OperatorAndBacktickedSearcher.scala | Scala | apache-2.0 | 4,526 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Created by matan on 1/8/16.
*/
import java.nio.file.{Paths, Files}
object UtilCommands {
val OSX_NAME = "Mac OS X"
val linuxSshpass = if (Files.exists(Paths.get("bin/utils/sshpass"))) "bin/utils/sshpass" else "sshpass"
val osxSshpass = "/usr/local/bin/sshpass"
val sshpass = if(isOSX) osxSshpass else linuxSshpass
def isOSX = System.getProperty("os.name") == OSX_NAME
}
| nruppin/CM-Well | server/cmwell-cons/src/main/scala/UtilCommands.scala | Scala | apache-2.0 | 1,011 |
package org.github.sguzman.scala.game.scalebra.mvc.controller.schema
import org.github.sguzman.scala.game.scalebra.Scalebra
import org.github.sguzman.scala.game.scalebra.mvc.model.{Direction, Down, Left, Right, Up}
import org.github.sguzman.scala.game.scalebra.mvc.view.pause.TogglePause
import org.github.sguzman.scala.game.scalebra.util.log.L
import org.lwjgl.input.Keyboard
/**
* @author Salvador Guzman - sguzman
* @group Scalebra
* @version org.github.sguzman.scala.game.scalebra.mvc.controller.schema
* @note This class will take some action based on key events gotten from Input
* @since 5/8/16 1:00 AM
*/
class ControlS extends SchemaControl {
/**
* Take some action based on the key pressed. No parameter will be passed.
* Instead the implementing class is expected to deal with the Keyboard.*
* methods directly.
*/
override def action(): Unit = {
L.i("Key event seen", "Input")
if (!Keyboard.getEventKeyState) {
return
}
/** Get Event key */
val key = Keyboard.getEventKey
val dir: Option[Direction] = key match {
case Keyboard.KEY_W =>
L.i("Up", "Input")
Some(Up())
case Keyboard.KEY_S =>
L.i("Down", "Input")
Some(Down())
case Keyboard.KEY_L =>
L.i("Left", "Input")
Some(Left())
case Keyboard.KEY_R =>
L.i("Right", "Input")
Some(Right())
case Keyboard.KEY_P =>
L.i("P", "Input")
Scalebra.viewAc ! TogglePause
None
case _ => None
}
if (dir.isDefined) {
Scalebra.viewAc ! dir.get
}
}
}
| sguzman/Scalebra | src/main/scala/org/github/sguzman/scala/game/scalebra/mvc/controller/schema/ControlS.scala | Scala | mit | 1,608 |
package jigg.nlp.ccg
/*
Copyright 2013-2015 Hiroshi Noji
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import tagger.{LF=>Feature, MaxEntMultiTagger, MaxEntMultiTaggerTrainer, FeatureExtractors}
import lexicon._
import jigg.ml._
import scala.collection.mutable.HashMap
case class SuperTaggerModel(
dict: Dictionary,
featureMap: HashMap[Feature, Int],
weights: WeightVec,
extractors: FeatureExtractors) { self =>
def reduceFeatures(): SuperTaggerModel = {
val buffer = weights.asInstanceOf[GrowableWeightVector[Float]].array // 0 1.0 2.0 0 0 1.0 ...
val activeIdxs = buffer.zipWithIndex filter (_._1 != 0) map (_._2) // 1 2 5
println(s"# features reduced from ${buffer.size} to ${activeIdxs.size}")
val idxMap = activeIdxs.zipWithIndex.toMap // {1->0, 2->1 5->2}
val newFeatureMap = featureMap collect {
case (f, oldIdx) if idxMap.isDefinedAt(oldIdx) => (f, idxMap(oldIdx))
}
val newWeights = new FixedWeightVector[Float](activeIdxs.map(buffer).toArray)
this copy (featureMap = newFeatureMap, weights = newWeights)
}
def mkMultiTaggerTrainer(classifierTrainer: OnlineLogLinearTrainer[Int]) =
new MaxEntMultiTaggerTrainer(mkIndexer(), extractors, classifierTrainer, dict)
def mkMultiTagger() =
new MaxEntMultiTagger(mkIndexer(), extractors, mkClassifier(), dict)
def mkClassifier() = new LogLinearClassifier[Int] {
override val weights = self.weights
}
private def mkIndexer() = new ExactFeatureIndexer(featureMap)
}
object SuperTaggerModel {
def saveTo(path: String, model: SuperTaggerModel) = {
System.err.println("Saving tagger model to " + path)
val os = jigg.util.IOUtil.openBinOut(path)
os.writeObject(model)
os.close
}
def loadFrom(path: String): SuperTaggerModel = {
jigg.util.LogUtil.track("Loading supertagger model ...") {
val in = jigg.util.IOUtil.openBinIn(path)
val model = in.readObject.asInstanceOf[SuperTaggerModel]
in.close
model
}
}
}
| mynlp/jigg | src/main/scala/jigg/nlp/ccg/SuperTaggerModel.scala | Scala | apache-2.0 | 2,488 |
/*
* Copyright (C) 2016 DANS - Data Archiving and Networked Services (info@dans.knaw.nl)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy
import java.io.IOException
import java.nio.charset.{ Charset, StandardCharsets }
import better.files.File
import cats.data.EitherNec
import org.joda.time.format.{ DateTimeFormatter, ISODateTimeFormat }
import org.joda.time.{ DateTime, DateTimeZone }
import scala.xml.{ Elem, PrettyPrinter, Utility, XML }
package object multideposit {
type FailFast[T] = Either[ConversionFailed, T]
type FailFastNec[T] = EitherNec[ConversionFailed, T]
val dateTimeFormatter: DateTimeFormatter = ISODateTimeFormat.dateTime()
def now: String = DateTime.now(DateTimeZone.UTC).toString(dateTimeFormatter)
val encoding: Charset = StandardCharsets.UTF_8
case class DepositPermissions(permissions: String, group: String)
sealed trait SmdError {
val msg: String
val cause: Option[Throwable] = None
}
case class ParseFailed(override val msg: String) extends SmdError
sealed trait ConversionFailed extends SmdError
case class ActionError(override val msg: String, override val cause: Option[Throwable] = None) extends ConversionFailed
object ActionError {
def apply(msg: String, cause: Throwable): ActionError = new ActionError(msg, Option(cause))
}
case class InvalidDatamanager(override val msg: String) extends ConversionFailed
case class InvalidInput(row: Int, localMsg: String) extends ConversionFailed {
override val msg = s"row $row: $localMsg"
}
implicit class BetterFileExtensions(val file: File) extends AnyVal {
/**
* Writes the xml to `file` and prepends a simple xml header: `<?xml version="1.0" encoding="UTF-8"?>`
*
* @param elem the xml to be written
* @param encoding the encoding applied to this xml
*/
@throws[IOException]("in case of an I/O error")
def writeXml(elem: Elem, encoding: Charset = encoding): Unit = {
file.parent.createDirectories()
XML.save(file.toString, Utility.trim(elem), encoding.toString, xmlDecl = true)
}
}
}
| DANS-KNAW/easy-process-sip | src/main/scala/nl.knaw.dans.easy.multideposit/package.scala | Scala | apache-2.0 | 2,622 |
package phenan.parsers.reader
class OffsetPosition (val offset: Int, val source: CharSeqSource) extends Position {
def lineString: String = source.lineString(offset)
def column: Int = source.column(offset)
def line: Int = source.line(offset)
override def < (pos: Position): Boolean = pos match {
case that: OffsetPosition => this.offset < that.offset
case _ => super.< (pos)
}
override def <= (pos: Position): Boolean = pos match {
case that: OffsetPosition => this.offset <= that.offset
case _ => super.<= (pos)
}
override def equals (pos: Position): Boolean = pos match {
case that: OffsetPosition => this.offset == that.offset && this.source == that.source
case p => super.equals(p)
}
}
| phenan/parsers | src/main/scala/phenan/parsers/reader/OffsetPosition.scala | Scala | mit | 736 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution
import monix.execution.atomic.PaddingStrategy._
import monix.execution.internal.atomic.BoxPaddingStrategy
/** A small toolkit of classes that support compare-and-swap semantics
* for safe mutation of variables.
*
* On top of the JVM, this means dealing with lock-free thread-safe
* programming. Also works on top of Javascript, with ''Scala.js'',
* for API compatibility purposes and because it's a useful way to
* box a value.
*
* The backbone of Atomic references is this method:
* {{{
* def compareAndSet(expect: T, update: T): Boolean
* }}}
*
* This method atomically sets a variable to the `update` value if it
* currently holds the `expect` value, reporting `true` on success or
* `false` on failure. The classes in this package also contain
* methods to get and unconditionally set values.
*
* Building a reference is easy with the provided constructor, which
* will automatically return the most specific type needed (in the
* following sample, that's an `AtomicDouble`, inheriting from
* `AtomicNumber[A]`):
*
* {{{
* val atomicNumber = Atomic(12.2)
*
* atomicNumber.incrementAndGet()
* // => 13.2
* }}}
*
* These also provide useful helpers for atomically mutating of
* values (i.e. `transform`, `transformAndGet`, `getAndTransform`,
* etc...) or of numbers of any kind (`incrementAndGet`, `getAndAdd`,
* etc...).
*/
package object atomic {
/** Internal utility for converting between padding strategy representations. */
private[execution] def boxStrategyToPaddingStrategy(s: PaddingStrategy): BoxPaddingStrategy =
s match {
case NoPadding =>
BoxPaddingStrategy.NO_PADDING
case Left64 =>
BoxPaddingStrategy.LEFT_64
case Right64 =>
BoxPaddingStrategy.RIGHT_64
case LeftRight128 =>
BoxPaddingStrategy.LEFT_RIGHT_128
case Left128 =>
BoxPaddingStrategy.LEFT_128
case Right128 =>
BoxPaddingStrategy.RIGHT_128
case LeftRight256 =>
BoxPaddingStrategy.LEFT_RIGHT_256
}
}
| Wogan/monix | monix-execution/jvm/src/main/scala/monix/execution/atomic/package.scala | Scala | apache-2.0 | 2,761 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package java.lang
import scala.scalajs.js
/* This is a hijacked class. Its instances are primitive numbers.
* Constructors are not emitted.
*/
final class Integer private () extends Number with Comparable[Integer] {
def this(value: scala.Int) = this()
def this(s: String) = this()
@inline def intValue(): scala.Int =
this.asInstanceOf[scala.Int]
@inline override def byteValue(): scala.Byte = intValue.toByte
@inline override def shortValue(): scala.Short = intValue.toShort
@inline def longValue(): scala.Long = intValue.toLong
@inline def floatValue(): scala.Float = intValue.toFloat
@inline def doubleValue(): scala.Double = intValue.toDouble
@inline override def equals(that: Any): scala.Boolean =
this eq that.asInstanceOf[AnyRef]
@inline override def hashCode(): Int =
intValue
@inline override def compareTo(that: Integer): Int =
Integer.compare(intValue, that.intValue)
@inline override def toString(): String =
Integer.toString(intValue)
}
object Integer {
final val TYPE = classOf[scala.Int]
final val MIN_VALUE = -2147483648
final val MAX_VALUE = 2147483647
final val SIZE = 32
final val BYTES = 4
@inline def `new`(value: scala.Int): Integer = valueOf(value)
@inline def `new`(s: String): Integer = valueOf(s)
@inline def valueOf(i: scala.Int): Integer = i.asInstanceOf[Integer]
@inline def valueOf(s: String): Integer = valueOf(parseInt(s))
@inline def valueOf(s: String, radix: Int): Integer =
valueOf(parseInt(s, radix))
@inline def parseInt(s: String): scala.Int = parseInt(s, 10)
@noinline def parseInt(s: String, radix: scala.Int): scala.Int =
parseIntImpl(s, radix, signed = true)
@inline def parseUnsignedInt(s: String): scala.Int = parseUnsignedInt(s, 10)
@noinline def parseUnsignedInt(s: String, radix: scala.Int): scala.Int =
parseIntImpl(s, radix, signed = false)
@inline
private def parseIntImpl(s: String, radix: scala.Int,
signed: scala.Boolean): scala.Int = {
def fail(): Nothing =
throw new NumberFormatException(s"""For input string: "$s"""")
val len = if (s == null) 0 else s.length
if (len == 0 || radix < Character.MIN_RADIX || radix > Character.MAX_RADIX)
fail()
val firstChar = s.charAt(0)
val negative = signed && firstChar == '-'
val maxAbsValue: scala.Double = {
if (!signed) 0xffffffffL.toDouble
else if (negative) 0x80000000L.toDouble
else 0x7fffffffL.toDouble
}
var i = if (negative || firstChar == '+') 1 else 0
// We need at least one digit
if (i >= s.length)
fail()
var result: scala.Double = 0.0
while (i != len) {
val digit = Character.digitWithValidRadix(s.charAt(i), radix)
result = result * radix + digit
if (digit == -1 || result > maxAbsValue)
fail()
i += 1
}
if (negative)
asInt(-result)
else
asInt(result)
}
@inline def toString(i: scala.Int): String = "" + i
@inline def toUnsignedString(i: Int, radix: Int): String =
toStringBase(i, radix)
@inline def compare(x: scala.Int, y: scala.Int): scala.Int =
if (x == y) 0 else if (x < y) -1 else 1
@inline def compareUnsigned(x: scala.Int, y: scala.Int): scala.Int = {
import js.JSNumberOps._
if (x == y) 0
else if (x.toUint > y.toUint) 1
else -1
}
@inline def toUnsignedLong(x: Int): scala.Long =
x.toLong & 0xffffffffL
def bitCount(i: scala.Int): scala.Int = {
/* See http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
*
* The original algorithm uses *logical* shift rights. Here we use
* *arithmetic* shift rights instead. >> is shorter than >>>, especially
* since the latter needs (a >>> b) | 0 in JS. It might also be the case
* that >>> is a bit slower for that reason on some VMs.
*
* Using >> is valid because:
* * For the 2 first >>, the possible sign bit extension is &'ed away
* * For (t2 >> 4), t2 cannot be negative because it is at most the result
* of 2 * 0x33333333, which does not overflow and is positive.
* * For the last >> 24, the left operand cannot be negative either.
* Assume it was, that means the result of a >>> would be >= 128, but
* the correct result must be <= 32. So by contradiction, it is positive.
*/
val t1 = i - ((i >> 1) & 0x55555555)
val t2 = (t1 & 0x33333333) + ((t1 >> 2) & 0x33333333)
(((t2 + (t2 >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24
}
@inline def divideUnsigned(dividend: Int, divisor: Int): Int =
if (divisor == 0) 0 / 0
else asInt(asUint(dividend) / asUint(divisor))
@inline def remainderUnsigned(dividend: Int, divisor: Int): Int =
if (divisor == 0) 0 % 0
else asInt(asUint(dividend) % asUint(divisor))
@inline def highestOneBit(i: Int): Int = {
/* The natural way of implementing this is:
* if (i == 0) 0
* else (1 << 31) >>> numberOfLeadingZeros(i)
*
* We can deal with the 0 case in a branchless fashion by adding `& i` to
* the else branch:
* ((1 << 31) >>> numberOfLeadingZeros(i)) & i
* Indeed, when i == 0, the `& i` collapses everything to 0. And otherwise,
* we know that ((1 << 31) >>> numberOfLeadingZeros(i)) is the highest 1
* bit of i, so &'ing with i is a no-op.
*
* Finally, since we're &'ing with i anyway, we can replace the >>> by a
* >>, which is shorter in JS and does not require the additional `| 0`.
*/
((1 << 31) >> numberOfLeadingZeros(i)) & i
}
@inline def lowestOneBit(i: Int): Int =
i & -i
def reverseBytes(i: scala.Int): scala.Int = {
val byte3 = i >>> 24
val byte2 = (i >>> 8) & 0xFF00
val byte1 = (i << 8) & 0xFF0000
val byte0 = i << 24
byte0 | byte1 | byte2 | byte3
}
def reverse(i: scala.Int): scala.Int = {
// From Hacker's Delight, 7-1, Figure 7-1
val j = (i & 0x55555555) << 1 | (i >> 1) & 0x55555555
val k = (j & 0x33333333) << 2 | (j >> 2) & 0x33333333
reverseBytes((k & 0x0F0F0F0F) << 4 | (k >> 4) & 0x0F0F0F0F)
}
@inline def rotateLeft(i: scala.Int, distance: scala.Int): scala.Int =
(i << distance) | (i >>> -distance)
@inline def rotateRight(i: scala.Int, distance: scala.Int): scala.Int =
(i >>> distance) | (i << -distance)
@inline def signum(i: scala.Int): scala.Int =
if (i == 0) 0 else if (i < 0) -1 else 1
// Intrinsic
def numberOfLeadingZeros(i: scala.Int): scala.Int = {
// See Hacker's Delight, Section 5-3
var x = i
if (x == 0) {
32
} else {
var r = 1
if ((x & 0xffff0000) == 0) { x <<= 16; r += 16 }
if ((x & 0xff000000) == 0) { x <<= 8; r += 8 }
if ((x & 0xf0000000) == 0) { x <<= 4; r += 4 }
if ((x & 0xc0000000) == 0) { x <<= 2; r += 2 }
r + (x >> 31)
}
}
@inline def numberOfTrailingZeros(i: scala.Int): scala.Int =
if (i == 0) 32
else 31 - numberOfLeadingZeros(i & -i)
def toBinaryString(i: scala.Int): String = toStringBase(i, 2)
def toHexString(i: scala.Int): String = toStringBase(i, 16)
def toOctalString(i: scala.Int): String = toStringBase(i, 8)
@inline // because radix is almost certainly constant at call site
def toString(i: Int, radix: Int): String = {
if (radix == 10 || radix < Character.MIN_RADIX || radix > Character.MAX_RADIX) {
Integer.toString(i)
} else {
import js.JSNumberOps.enableJSNumberOps
i.toString(radix)
}
}
@inline def toUnsignedString(i: scala.Int): String = toUnsignedString(i, 10)
@inline def hashCode(value: Int): Int = value.hashCode
@inline def sum(a: Int, b: Int): Int = a + b
@inline def max(a: Int, b: Int): Int = Math.max(a, b)
@inline def min(a: Int, b: Int): Int = Math.min(a, b)
@inline private[this] def toStringBase(i: scala.Int, base: scala.Int): String = {
asUint(i).asInstanceOf[js.Dynamic]
.applyDynamic("toString")(base.asInstanceOf[js.Dynamic])
.asInstanceOf[String]
}
@inline private def asInt(n: scala.Double): scala.Int =
(n.asInstanceOf[js.Dynamic] | 0.asInstanceOf[js.Dynamic]).asInstanceOf[Int]
@inline private def asUint(n: scala.Int): scala.Double =
(n.asInstanceOf[js.Dynamic] >>> 0.asInstanceOf[js.Dynamic]).asInstanceOf[scala.Double]
}
| nicolasstucki/scala-js | javalanglib/src/main/scala/java/lang/Integer.scala | Scala | apache-2.0 | 8,562 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.jdbc
import org.apache.spark.sql.types.{DataType, MetadataBuilder}
/**
* AggregatedDialect can unify multiple dialects into one virtual Dialect.
* Dialects are tried in order, and the first dialect that does not return a
* neutral element will will.
*
* @param dialects List of dialects.
*/
private class AggregatedDialect(dialects: List[JdbcDialect]) extends JdbcDialect {
require(dialects.nonEmpty)
override def canHandle(url : String): Boolean =
dialects.map(_.canHandle(url)).reduce(_ && _)
override def getCatalystType(
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] = {
dialects.flatMap(_.getCatalystType(sqlType, typeName, size, md)).headOption
}
override def getJDBCType(dt: DataType): Option[JdbcType] = {
dialects.flatMap(_.getJDBCType(dt)).headOption
}
}
| aokolnychyi/spark | sql/core/src/main/scala/org/apache/spark/sql/jdbc/AggregatedDialect.scala | Scala | apache-2.0 | 1,674 |
package spinoco.protocol.kafka.codec
import scodec.Codec
import shapeless.{::, HNil, tag}
import spinoco.protocol.kafka.Response.MetadataResponse
import spinoco.protocol.kafka._
import scodec.codecs._
import spinoco.protocol.common.util._
import shapeless.tag.@@
import spinoco.protocol.kafka.Request.MetadataRequest
object MetadataCodec {
val requestCodec:Codec[MetadataRequest] = {
val tagger = tagF[Vector,String]
kafkaArray(kafkaRequiredString)
.xmap(topics => MetadataRequest(tagger(topics)),rq => tagger.unwrap(rq.topics))
}
val metadataResponseCodec:Codec[MetadataResponse] = {
(
("Brokers" | kafkaArray(impl.brokerCodec)) ~
( "Topics" | kafkaArray(impl.topicMetadataCodec))
)
.xmap(MetadataResponse.apply _ tupled, mr => (mr.brokers, mr.topics))
}
object impl {
val brokerCodec:Codec[Broker] = {
"Broker" | (
("Node Id" | int32) ::
("Host" | kafkaRequiredString) ::
("Port" | int32 )
).xmap(
{ case nodeId :: host :: port :: HNil => Broker(tag[Broker](nodeId),host,port)}
, b => (b.nodeId:Int) :: b.host :: b.port :: HNil
)
}
val partitionMetadataCodec:Codec[PartitionMetadata] = {
"Partition" | (
("Error Code" | kafkaError ) ::
("Partition Id" | int32) ::
("Leader" | int32) ::
("Replicas" | kafkaArray(int32)) ::
("Isr" | kafkaArray(int32))
).xmap(
{ case error :: pid :: leader :: replicas :: isrs :: HNil =>
val leaderOption = if (leader == -1) None else Some(tag[Broker](leader))
PartitionMetadata(
error = error
, id = tag[PartitionId](pid)
, leader = leaderOption
, replicas = replicas.asInstanceOf[Vector[Int @@ Broker]] // unsafe but saves vector traversal
, isr = isrs.asInstanceOf[Vector[Int @@ Broker]] // unsafe but saves vector traversal
)
}
, pm => pm.error :: (pm.id:Int) :: pm.leader.getOrElse(-1) :: (pm.replicas:Vector[Int]) :: (pm.isr:Vector[Int]) :: HNil
)
}
val topicMetadataCodec:Codec[TopicMetadata] = {
"Topic" | (
("Error Code" | kafkaError) ::
("TopicName" | kafkaRequiredString ) ::
("Partitions" | kafkaArray(partitionMetadataCodec))
).xmap(
{ case error :: name :: partitions :: HNil =>
TopicMetadata(error,tag[TopicName](name),partitions)
}
, tm => tm.error :: (tm.name:String) :: tm.partitions :: HNil
)
}
}
}
| Spinoco/protocol | kafka/src/main/scala/spinoco/protocol/kafka/codec/MetadataCodec.scala | Scala | mit | 2,611 |
/*
* Copyright 2017 Sumo Logic
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ws.epigraph.java.service.assemblers
import java.nio.file.Path
import ws.epigraph.compiler.CDatumType
import ws.epigraph.java._
import ws.epigraph.java.NewlineStringInterpolator.NewlineHelper
import ws.epigraph.java.JavaGenNames.{ln, lqn2}
import ws.epigraph.java.service.projections.req.output.ReqOutputRecordModelProjectionGen
import ws.epigraph.lang.Qn
import ws.epigraph.util.JavaNames
/**
* @author <a href="mailto:konstantin.sobolev@gmail.com">Konstantin Sobolev</a>
*/
class FieldAssemblersGen(rag: RecordAsmGen, val ctx: GenContext) extends JavaGen with Fragments {
private val cType: CDatumType = JavaGenUtils.toCType(rag.g.op.`type`())
val namespace: Qn = rag.g.namespace
val shortClassName: String = ln(cType) + "FieldAssemblers"
val fullClassName: String = namespace.append(shortClassName).toString
override def relativeFilePath: Path = JavaGenUtils.fqnToPath(namespace).resolve(shortClassName + ".java")
private lazy val parentOpt: Option[FieldAssemblersGen] = rag.g.parentClassGenOpt.map(
pg => pg.asInstanceOf[ReqOutputRecordModelProjectionGen].assemblerGen.fieldAssemblersGen
)
def methodName(fieldName: String): String = JavaNames.javaName(fieldName)
case class AsmSupplier(
fieldName: String,
overloaded: Boolean,
pg: ReqOutputRecordModelProjectionGen) {
private val fieldPart = rag.fieldPart(fieldName).get
val projectionType: Fragment = Fragment.imp(fieldPart.fieldGen.fullClassName)
val assemblerResultType: Fragment = Fragment.imp(lqn2(fieldPart.fieldType, namespace.toString))
val resultTypeSuffix: String = if (fieldPart.isEntity) "" else ".Value"
def gen: Fragment = Fragment(/*@formatter:off*/sn"""\\
/**
* Builds {@code $fieldName} field value
*
* @param dto data transfer object
* @param projection request projection
* @param ctx assembly context
*
* @return field value
*/
public ${frag.notNull}$assemblerResultType$resultTypeSuffix ${methodName(fieldName)}(${frag.notNull}D dto, ${frag.notNull}$projectionType projection, ${frag.notNull}${frag.assemblerContext} ctx);
"""/*@formatter:on*/
)
}
val asmSuppliers: Seq[AsmSupplier] = rag.g.fieldProjections.toSeq.map { case (fieldName, (parentGenOpt, _)) =>
AsmSupplier(
fieldName,
parentGenOpt.isDefined,
parentGenOpt.getOrElse(rag.g).asInstanceOf[ReqOutputRecordModelProjectionGen]
)
}
override protected def generate: String = {
if (rag.g.invalidParentClassGenerator) {
throw new TryLaterException(s"Can't generate $fullClassName because parent projection wasn't created yet")
}
val parentImp: Option[Fragment] = parentOpt.map(p => Fragment.imp(p.fullClassName))
val extendsClause: Fragment = parentImp.map(ip => Fragment(s"extends $ip<D> ")).getOrElse(Fragment.empty)
interpolate(
namespace, Fragment(
/*@formatter:off*/sn"""\\
${JavaGenUtils.topLevelComment}
package $namespace;\\
${Fragment.emptyLine}\\
${Fragment.imports}\\
${Fragment.emptyLine}\\
/**
* Field assemblers for {@code ${ln(cType)}} type
*/
${JavaGenUtils.generatedAnnotation(this)}
public interface $shortClassName<D> $extendsClause{
${Fragment.join(asmSuppliers.map(_.gen), Fragment.emptyLine)}\\
}"""/*@formatter:on*/
)
)
}
}
| SumoLogic/epigraph | java/codegen/src/main/scala/ws/epigraph/java/service/assemblers/FieldAssemblersGen.scala | Scala | apache-2.0 | 3,868 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.matchers.dsl
import org.scalatest._
import org.scalatest.freespec.AnyFreeSpec
import org.scalatest.matchers.should.Matchers._
class StartWithWordSpec extends AnyFreeSpec with FileMocks {
"StartWithWord " - {
"should have pretty toString" in {
startWith.toString should be ("startWith")
}
"apply(String) method returns Matcher" - {
val mt = startWith ("Pr")
"should have pretty toString" in {
mt.toString should be ("startWith (\\"Pr\\")")
}
val mr = mt("Programmer")
"should have correct MatcherResult" in {
mr.matches shouldBe true
mr.failureMessage shouldBe "\\"Programmer\\" did not start with substring \\"Pr\\""
mr.negatedFailureMessage shouldBe "\\"Programmer\\" started with substring \\"Pr\\""
mr.midSentenceFailureMessage shouldBe "\\"Programmer\\" did not start with substring \\"Pr\\""
mr.midSentenceNegatedFailureMessage shouldBe "\\"Programmer\\" started with substring \\"Pr\\""
mr.rawFailureMessage shouldBe "{0} did not start with substring {1}"
mr.rawNegatedFailureMessage shouldBe "{0} started with substring {1}"
mr.rawMidSentenceFailureMessage shouldBe "{0} did not start with substring {1}"
mr.rawMidSentenceNegatedFailureMessage shouldBe "{0} started with substring {1}"
mr.failureMessageArgs shouldBe Vector("Programmer", "Pr")
mr.negatedFailureMessageArgs shouldBe Vector("Programmer", "Pr")
mr.midSentenceFailureMessageArgs shouldBe Vector("Programmer", "Pr")
mr.midSentenceNegatedFailureMessageArgs shouldBe Vector("Programmer", "Pr")
}
val nmr = mr.negated
"should have correct negated MatcherResult" in {
nmr.matches shouldBe false
nmr.failureMessage shouldBe "\\"Programmer\\" started with substring \\"Pr\\""
nmr.negatedFailureMessage shouldBe "\\"Programmer\\" did not start with substring \\"Pr\\""
nmr.midSentenceFailureMessage shouldBe "\\"Programmer\\" started with substring \\"Pr\\""
nmr.midSentenceNegatedFailureMessage shouldBe "\\"Programmer\\" did not start with substring \\"Pr\\""
nmr.rawFailureMessage shouldBe "{0} started with substring {1}"
nmr.rawNegatedFailureMessage shouldBe "{0} did not start with substring {1}"
nmr.rawMidSentenceFailureMessage shouldBe "{0} started with substring {1}"
nmr.rawMidSentenceNegatedFailureMessage shouldBe "{0} did not start with substring {1}"
nmr.failureMessageArgs shouldBe Vector("Programmer", "Pr")
nmr.negatedFailureMessageArgs shouldBe Vector("Programmer", "Pr")
nmr.midSentenceFailureMessageArgs shouldBe Vector("Programmer", "Pr")
nmr.midSentenceNegatedFailureMessageArgs shouldBe Vector("Programmer", "Pr")
}
}
"regex(String) method returns Matcher" - {
val decimal = """(-)?(\\d+)(\\.\\d*)?"""
val mt = startWith regex decimal
"should have pretty toString" in {
mt.toString should be ("startWith regex " + decimal)
}
val mr = mt("2.7b")
"should have correct MatcherResult" in {
mr.matches shouldBe true
mr.failureMessage shouldBe "\\"2.7b\\" did not start with a substring that matched the regular expression " + decimal
mr.negatedFailureMessage shouldBe "\\"2.7b\\" started with a substring that matched the regular expression " + decimal
mr.midSentenceFailureMessage shouldBe "\\"2.7b\\" did not start with a substring that matched the regular expression " + decimal
mr.midSentenceNegatedFailureMessage shouldBe "\\"2.7b\\" started with a substring that matched the regular expression " + decimal
mr.rawFailureMessage shouldBe "{0} did not start with a substring that matched the regular expression {1}"
mr.rawNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}"
mr.rawMidSentenceFailureMessage shouldBe "{0} did not start with a substring that matched the regular expression {1}"
mr.rawMidSentenceNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}"
mr.failureMessageArgs shouldBe Vector("2.7b", UnquotedString(decimal))
mr.negatedFailureMessageArgs shouldBe Vector("2.7b", UnquotedString(decimal))
mr.midSentenceFailureMessageArgs shouldBe Vector("2.7b", UnquotedString(decimal))
mr.midSentenceNegatedFailureMessageArgs shouldBe Vector("2.7b", UnquotedString(decimal))
}
val nmr = mr.negated
"should have correct negated MatcherResult" in {
nmr.matches shouldBe false
nmr.failureMessage shouldBe "\\"2.7b\\" started with a substring that matched the regular expression " + decimal
nmr.negatedFailureMessage shouldBe "\\"2.7b\\" did not start with a substring that matched the regular expression " + decimal
nmr.midSentenceFailureMessage shouldBe "\\"2.7b\\" started with a substring that matched the regular expression " + decimal
nmr.midSentenceNegatedFailureMessage shouldBe "\\"2.7b\\" did not start with a substring that matched the regular expression " + decimal
nmr.rawFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}"
nmr.rawNegatedFailureMessage shouldBe "{0} did not start with a substring that matched the regular expression {1}"
nmr.rawMidSentenceFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}"
nmr.rawMidSentenceNegatedFailureMessage shouldBe "{0} did not start with a substring that matched the regular expression {1}"
nmr.failureMessageArgs shouldBe Vector("2.7b", UnquotedString(decimal))
nmr.negatedFailureMessageArgs shouldBe Vector("2.7b", UnquotedString(decimal))
nmr.midSentenceFailureMessageArgs shouldBe Vector("2.7b", UnquotedString(decimal))
nmr.midSentenceNegatedFailureMessageArgs shouldBe Vector("2.7b", UnquotedString(decimal))
}
}
"regex(Regex) method returns Matcher" - {
val decimal = """(-)?(\\d+)(\\.\\d*)?"""
val mt = startWith regex decimal.r
"should have pretty toString" in {
mt.toString should be ("startWith regex " + decimal)
}
val mr = mt("2.7b")
"should have correct MatcherResult" in {
mr.matches shouldBe true
mr.failureMessage shouldBe "\\"2.7b\\" did not start with a substring that matched the regular expression " + decimal
mr.negatedFailureMessage shouldBe "\\"2.7b\\" started with a substring that matched the regular expression " + decimal
mr.midSentenceFailureMessage shouldBe "\\"2.7b\\" did not start with a substring that matched the regular expression " + decimal
mr.midSentenceNegatedFailureMessage shouldBe "\\"2.7b\\" started with a substring that matched the regular expression " + decimal
mr.rawFailureMessage shouldBe "{0} did not start with a substring that matched the regular expression {1}"
mr.rawNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}"
mr.rawMidSentenceFailureMessage shouldBe "{0} did not start with a substring that matched the regular expression {1}"
mr.rawMidSentenceNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}"
mr.failureMessageArgs shouldBe Vector("2.7b", UnquotedString(decimal))
mr.negatedFailureMessageArgs shouldBe Vector("2.7b", UnquotedString(decimal))
mr.midSentenceFailureMessageArgs shouldBe Vector("2.7b", UnquotedString(decimal))
mr.midSentenceNegatedFailureMessageArgs shouldBe Vector("2.7b", UnquotedString(decimal))
}
val nmr = mr.negated
"should have correct negated MatcherResult" in {
nmr.matches shouldBe false
nmr.failureMessage shouldBe "\\"2.7b\\" started with a substring that matched the regular expression " + decimal
nmr.negatedFailureMessage shouldBe "\\"2.7b\\" did not start with a substring that matched the regular expression " + decimal
nmr.midSentenceFailureMessage shouldBe "\\"2.7b\\" started with a substring that matched the regular expression " + decimal
nmr.midSentenceNegatedFailureMessage shouldBe "\\"2.7b\\" did not start with a substring that matched the regular expression " + decimal
nmr.rawFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}"
nmr.rawNegatedFailureMessage shouldBe "{0} did not start with a substring that matched the regular expression {1}"
nmr.rawMidSentenceFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}"
nmr.rawMidSentenceNegatedFailureMessage shouldBe "{0} did not start with a substring that matched the regular expression {1}"
nmr.failureMessageArgs shouldBe Vector("2.7b", UnquotedString(decimal))
nmr.negatedFailureMessageArgs shouldBe Vector("2.7b", UnquotedString(decimal))
nmr.midSentenceFailureMessageArgs shouldBe Vector("2.7b", UnquotedString(decimal))
nmr.midSentenceNegatedFailureMessageArgs shouldBe Vector("2.7b", UnquotedString(decimal))
}
}
"regex(a(b*)c withGroup bb) method returns Matcher" - {
val bb = "bb"
val mt = startWith regex ("""a(b*)c""" withGroup bb)
"should have pretty toString" in {
mt.toString should be ("startWith regex \\"a(b*)c\\" withGroup (\\"" + bb + "\\")")
}
val mr1 = mt("abbc")
"when apply with \\"abbc\\"" - {
"should have correct MatcherResult" in {
mr1.matches shouldBe true
mr1.failureMessage shouldBe "\\"abbc\\" started with a substring that matched the regular expression a(b*)c, but \\"bb\\" did not match group bb"
mr1.negatedFailureMessage shouldBe "\\"abbc\\" started with a substring that matched the regular expression a(b*)c and group bb"
mr1.midSentenceFailureMessage shouldBe "\\"abbc\\" started with a substring that matched the regular expression a(b*)c, but \\"bb\\" did not match group bb"
mr1.midSentenceNegatedFailureMessage shouldBe "\\"abbc\\" started with a substring that matched the regular expression a(b*)c and group bb"
mr1.rawFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}, but {2} did not match group {3}"
mr1.rawNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1} and group {2}"
mr1.rawMidSentenceFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}, but {2} did not match group {3}"
mr1.rawMidSentenceNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1} and group {2}"
mr1.failureMessageArgs shouldBe Vector("abbc", UnquotedString("a(b*)c"), "bb", UnquotedString("bb"))
mr1.negatedFailureMessageArgs shouldBe Vector("abbc", UnquotedString("a(b*)c"), UnquotedString("bb"))
mr1.midSentenceFailureMessageArgs shouldBe Vector("abbc", UnquotedString("a(b*)c"), "bb", UnquotedString("bb"))
mr1.midSentenceNegatedFailureMessageArgs shouldBe Vector("abbc", UnquotedString("a(b*)c"), UnquotedString("bb"))
}
val nmr = mr1.negated
"should have correct negated MatcherResult" in {
nmr.matches shouldBe false
nmr.failureMessage shouldBe "\\"abbc\\" started with a substring that matched the regular expression a(b*)c and group bb"
nmr.negatedFailureMessage shouldBe "\\"abbc\\" started with a substring that matched the regular expression a(b*)c, but \\"bb\\" did not match group bb"
nmr.midSentenceFailureMessage shouldBe "\\"abbc\\" started with a substring that matched the regular expression a(b*)c and group bb"
nmr.midSentenceNegatedFailureMessage shouldBe "\\"abbc\\" started with a substring that matched the regular expression a(b*)c, but \\"bb\\" did not match group bb"
nmr.rawFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1} and group {2}"
nmr.rawNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}, but {2} did not match group {3}"
nmr.rawMidSentenceFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1} and group {2}"
nmr.rawMidSentenceNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}, but {2} did not match group {3}"
nmr.failureMessageArgs shouldBe Vector("abbc", UnquotedString("a(b*)c"), UnquotedString("bb"))
nmr.negatedFailureMessageArgs shouldBe Vector("abbc", UnquotedString("a(b*)c"), "bb", UnquotedString("bb"))
nmr.midSentenceFailureMessageArgs shouldBe Vector("abbc", UnquotedString("a(b*)c"), UnquotedString("bb"))
nmr.midSentenceNegatedFailureMessageArgs shouldBe Vector("abbc", UnquotedString("a(b*)c"), "bb", UnquotedString("bb"))
}
}
val mr2 = mt("abbbc")
"when apply with \\"abbbc\\"" - {
"should have correct MatcherResult" in {
mr2.matches shouldBe false
mr2.failureMessage shouldBe "\\"abbbc\\" started with a substring that matched the regular expression a(b*)c, but \\"bbb\\" did not match group bb"
mr2.negatedFailureMessage shouldBe "\\"abbbc\\" started with a substring that matched the regular expression a(b*)c and group bb"
mr2.midSentenceFailureMessage shouldBe "\\"abbbc\\" started with a substring that matched the regular expression a(b*)c, but \\"bbb\\" did not match group bb"
mr2.midSentenceNegatedFailureMessage shouldBe "\\"abbbc\\" started with a substring that matched the regular expression a(b*)c and group bb"
mr2.rawFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}, but {2} did not match group {3}"
mr2.rawNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1} and group {2}"
mr2.rawMidSentenceFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}, but {2} did not match group {3}"
mr2.rawMidSentenceNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1} and group {2}"
mr2.failureMessageArgs shouldBe Vector("abbbc", UnquotedString("a(b*)c"), "bbb", UnquotedString("bb"))
mr2.negatedFailureMessageArgs shouldBe Vector("abbbc", UnquotedString("a(b*)c"), UnquotedString("bb"))
mr2.midSentenceFailureMessageArgs shouldBe Vector("abbbc", UnquotedString("a(b*)c"), "bbb", UnquotedString("bb"))
mr2.midSentenceNegatedFailureMessageArgs shouldBe Vector("abbbc", UnquotedString("a(b*)c"), UnquotedString("bb"))
}
val nmr = mr2.negated
"should have correct negated MatcherResult" in {
nmr.matches shouldBe true
nmr.failureMessage shouldBe "\\"abbbc\\" started with a substring that matched the regular expression a(b*)c and group bb"
nmr.negatedFailureMessage shouldBe "\\"abbbc\\" started with a substring that matched the regular expression a(b*)c, but \\"bbb\\" did not match group bb"
nmr.midSentenceFailureMessage shouldBe "\\"abbbc\\" started with a substring that matched the regular expression a(b*)c and group bb"
nmr.midSentenceNegatedFailureMessage shouldBe "\\"abbbc\\" started with a substring that matched the regular expression a(b*)c, but \\"bbb\\" did not match group bb"
nmr.rawFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1} and group {2}"
nmr.rawNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}, but {2} did not match group {3}"
nmr.rawMidSentenceFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1} and group {2}"
nmr.rawMidSentenceNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}, but {2} did not match group {3}"
nmr.failureMessageArgs shouldBe Vector("abbbc", UnquotedString("a(b*)c"), UnquotedString("bb"))
nmr.negatedFailureMessageArgs shouldBe Vector("abbbc", UnquotedString("a(b*)c"), "bbb", UnquotedString("bb"))
nmr.midSentenceFailureMessageArgs shouldBe Vector("abbbc", UnquotedString("a(b*)c"), UnquotedString("bb"))
nmr.midSentenceNegatedFailureMessageArgs shouldBe Vector("abbbc", UnquotedString("a(b*)c"), "bbb", UnquotedString("bb"))
}
}
val mr3 = mt("ABBC")
"when apply with \\"ABBC\\"" - {
"should have correct MatcherResult" in {
mr3.matches shouldBe false
mr3.failureMessage shouldBe "\\"ABBC\\" did not start with a substring that matched the regular expression a(b*)c"
mr3.negatedFailureMessage shouldBe "\\"ABBC\\" started with a substring that matched the regular expression a(b*)c"
mr3.midSentenceFailureMessage shouldBe "\\"ABBC\\" did not start with a substring that matched the regular expression a(b*)c"
mr3.midSentenceNegatedFailureMessage shouldBe "\\"ABBC\\" started with a substring that matched the regular expression a(b*)c"
mr3.rawFailureMessage shouldBe "{0} did not start with a substring that matched the regular expression {1}"
mr3.rawNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}"
mr3.rawMidSentenceFailureMessage shouldBe "{0} did not start with a substring that matched the regular expression {1}"
mr3.rawMidSentenceNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}"
mr3.failureMessageArgs shouldBe Vector("ABBC", UnquotedString("a(b*)c"))
mr3.negatedFailureMessageArgs shouldBe Vector("ABBC", UnquotedString("a(b*)c"))
mr3.midSentenceFailureMessageArgs shouldBe Vector("ABBC", UnquotedString("a(b*)c"))
mr3.midSentenceNegatedFailureMessageArgs shouldBe Vector("ABBC", UnquotedString("a(b*)c"))
}
val nmr = mr3.negated
"should have correct negated MatcherResult" in {
nmr.matches shouldBe true
nmr.failureMessage shouldBe "\\"ABBC\\" started with a substring that matched the regular expression a(b*)c"
nmr.negatedFailureMessage shouldBe "\\"ABBC\\" did not start with a substring that matched the regular expression a(b*)c"
nmr.midSentenceFailureMessage shouldBe "\\"ABBC\\" started with a substring that matched the regular expression a(b*)c"
nmr.midSentenceNegatedFailureMessage shouldBe "\\"ABBC\\" did not start with a substring that matched the regular expression a(b*)c"
nmr.rawFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}"
nmr.rawNegatedFailureMessage shouldBe "{0} did not start with a substring that matched the regular expression {1}"
nmr.rawMidSentenceFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}"
nmr.rawMidSentenceNegatedFailureMessage shouldBe "{0} did not start with a substring that matched the regular expression {1}"
nmr.failureMessageArgs shouldBe Vector("ABBC", UnquotedString("a(b*)c"))
nmr.negatedFailureMessageArgs shouldBe Vector("ABBC", UnquotedString("a(b*)c"))
nmr.midSentenceFailureMessageArgs shouldBe Vector("ABBC", UnquotedString("a(b*)c"))
nmr.midSentenceNegatedFailureMessageArgs shouldBe Vector("ABBC", UnquotedString("a(b*)c"))
}
}
}
"regex(a(b*)(c*) withGroup bb) method returns Matcher" - {
val bb = "bb"
val cc = "cc"
val mt = startWith regex ("""a(b*)(c*)""" withGroups (bb, cc))
"should have pretty toString" in {
mt.toString should be ("startWith regex \\"a(b*)(c*)\\" withGroups (\\"" + bb + "\\", \\"" + cc + "\\")")
}
val mr = mt("abbccc")
"should have correct MatcherResult" in {
mr.matches shouldBe false
mr.failureMessage shouldBe "\\"abbccc\\" started with a substring that matched the regular expression a(b*)(c*), but \\"ccc\\" did not match group cc at index 1"
mr.negatedFailureMessage shouldBe "\\"abbccc\\" started with a substring that matched the regular expression a(b*)(c*) and group bb, cc"
mr.midSentenceFailureMessage shouldBe "\\"abbccc\\" started with a substring that matched the regular expression a(b*)(c*), but \\"ccc\\" did not match group cc at index 1"
mr.midSentenceNegatedFailureMessage shouldBe "\\"abbccc\\" started with a substring that matched the regular expression a(b*)(c*) and group bb, cc"
mr.rawFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}, but {2} did not match group {3} at index {4}"
mr.rawNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1} and group {2}"
mr.rawMidSentenceFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}, but {2} did not match group {3} at index {4}"
mr.rawMidSentenceNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1} and group {2}"
mr.failureMessageArgs shouldBe Vector("abbccc", UnquotedString("a(b*)(c*)"), "ccc", UnquotedString("cc"), 1)
mr.negatedFailureMessageArgs shouldBe Vector("abbccc", UnquotedString("a(b*)(c*)"), UnquotedString("bb, cc"))
mr.midSentenceFailureMessageArgs shouldBe Vector("abbccc", UnquotedString("a(b*)(c*)"), "ccc", UnquotedString("cc"), 1)
mr.midSentenceNegatedFailureMessageArgs shouldBe Vector("abbccc", UnquotedString("a(b*)(c*)"), UnquotedString("bb, cc"))
}
val nmr = mr.negated
"should have correct negated MatcherResult" in {
nmr.matches shouldBe true
nmr.failureMessage shouldBe "\\"abbccc\\" started with a substring that matched the regular expression a(b*)(c*) and group bb, cc"
nmr.negatedFailureMessage shouldBe "\\"abbccc\\" started with a substring that matched the regular expression a(b*)(c*), but \\"ccc\\" did not match group cc at index 1"
nmr.midSentenceFailureMessage shouldBe "\\"abbccc\\" started with a substring that matched the regular expression a(b*)(c*) and group bb, cc"
nmr.midSentenceNegatedFailureMessage shouldBe "\\"abbccc\\" started with a substring that matched the regular expression a(b*)(c*), but \\"ccc\\" did not match group cc at index 1"
nmr.rawFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1} and group {2}"
nmr.rawNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}, but {2} did not match group {3} at index {4}"
nmr.rawMidSentenceFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1} and group {2}"
nmr.rawMidSentenceNegatedFailureMessage shouldBe "{0} started with a substring that matched the regular expression {1}, but {2} did not match group {3} at index {4}"
nmr.failureMessageArgs shouldBe Vector("abbccc", UnquotedString("a(b*)(c*)"), UnquotedString("bb, cc"))
nmr.negatedFailureMessageArgs shouldBe Vector("abbccc", UnquotedString("a(b*)(c*)"), "ccc", UnquotedString("cc"), 1)
nmr.midSentenceFailureMessageArgs shouldBe Vector("abbccc", UnquotedString("a(b*)(c*)"), UnquotedString("bb, cc"))
nmr.midSentenceNegatedFailureMessageArgs shouldBe Vector("abbccc", UnquotedString("a(b*)(c*)"), "ccc", UnquotedString("cc"), 1)
}
}
}
}
| scalatest/scalatest | jvm/scalatest-test/src/test/scala/org/scalatest/matchers/dsl/StartWithWordSpec.scala | Scala | apache-2.0 | 24,962 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.physical.stream
import org.apache.flink.streaming.api.transformations.OneInputTransformation
import org.apache.flink.table.api.window.{CountWindow, TimeWindow}
import org.apache.flink.table.api.{StreamTableEnvironment, TableConfig, TableException}
import org.apache.flink.table.calcite.FlinkRelBuilder.PlannerNamedWindowProperty
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.codegen.agg.AggsHandlerCodeGenerator
import org.apache.flink.table.codegen.{CodeGeneratorContext, EqualiserCodeGenerator}
import org.apache.flink.table.dataformat.BaseRow
import org.apache.flink.table.generated.{GeneratedNamespaceAggsHandleFunction, GeneratedRecordEqualiser}
import org.apache.flink.table.plan.logical._
import org.apache.flink.table.plan.nodes.exec.{ExecNode, StreamExecNode}
import org.apache.flink.table.plan.rules.physical.stream.StreamExecRetractionRules
import org.apache.flink.table.plan.util.AggregateUtil.{hasRowIntervalType, hasTimeIntervalType, isProctimeAttribute, isRowtimeAttribute, toDuration, toLong, transformToStreamAggregateInfoList}
import org.apache.flink.table.plan.util.{AggregateInfoList, KeySelectorUtil, RelExplainUtil, WindowEmitStrategy}
import org.apache.flink.table.runtime.window.{WindowOperator, WindowOperatorBuilder}
import org.apache.flink.table.types.LogicalTypeDataTypeConverter.fromDataTypeToLogicalType
import org.apache.flink.table.types.logical.LogicalType
import org.apache.flink.table.typeutils.BaseRowTypeInfo
import org.apache.calcite.plan.{RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.AggregateCall
import org.apache.calcite.rel.{RelNode, RelWriter, SingleRel}
import org.apache.calcite.tools.RelBuilder
import java.time.Duration
import java.util
import java.util.Calendar
import org.apache.flink.api.dag.Transformation
import scala.collection.JavaConversions._
/**
* Streaming group window aggregate physical node which will be translate to window operator.
*/
class StreamExecGroupWindowAggregate(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
outputRowType: RelDataType,
inputRowType: RelDataType,
grouping: Array[Int],
val aggCalls: Seq[AggregateCall],
val window: LogicalWindow,
namedProperties: Seq[PlannerNamedWindowProperty],
inputTimeFieldIndex: Int,
val emitStrategy: WindowEmitStrategy)
extends SingleRel(cluster, traitSet, inputRel)
with StreamPhysicalRel
with StreamExecNode[BaseRow] {
override def producesUpdates: Boolean = emitStrategy.produceUpdates
override def consumesRetractions = true
override def needsUpdatesAsRetraction(input: RelNode) = true
override def producesRetractions: Boolean = false
override def requireWatermark: Boolean = window match {
case TumblingGroupWindow(_, timeField, size)
if isRowtimeAttribute(timeField) && hasTimeIntervalType(size) => true
case SlidingGroupWindow(_, timeField, size, _)
if isRowtimeAttribute(timeField) && hasTimeIntervalType(size) => true
case SessionGroupWindow(_, timeField, _)
if isRowtimeAttribute(timeField) => true
case _ => false
}
def getGrouping: Array[Int] = grouping
def getWindowProperties: Seq[PlannerNamedWindowProperty] = namedProperties
override def deriveRowType(): RelDataType = outputRowType
override def copy(traitSet: RelTraitSet, inputs: java.util.List[RelNode]): RelNode = {
new StreamExecGroupWindowAggregate(
cluster,
traitSet,
inputs.get(0),
outputRowType,
inputRowType,
grouping,
aggCalls,
window,
namedProperties,
inputTimeFieldIndex,
emitStrategy)
}
override def explainTerms(pw: RelWriter): RelWriter = {
super.explainTerms(pw)
.itemIf("groupBy", RelExplainUtil.fieldToString(grouping, inputRowType), grouping.nonEmpty)
.item("window", window)
.itemIf("properties", namedProperties.map(_.name).mkString(", "), namedProperties.nonEmpty)
.item("select", RelExplainUtil.streamWindowAggregationToString(
inputRowType,
grouping,
outputRowType,
aggCalls,
namedProperties))
.itemIf("emit", emitStrategy, !emitStrategy.toString.isEmpty)
}
//~ ExecNode methods -----------------------------------------------------------
override def getInputNodes: util.List[ExecNode[StreamTableEnvironment, _]] = {
getInputs.map(_.asInstanceOf[ExecNode[StreamTableEnvironment, _]])
}
override def replaceInputNode(
ordinalInParent: Int,
newInputNode: ExecNode[StreamTableEnvironment, _]): Unit = {
replaceInput(ordinalInParent, newInputNode.asInstanceOf[RelNode])
}
override protected def translateToPlanInternal(
tableEnv: StreamTableEnvironment): Transformation[BaseRow] = {
val config = tableEnv.getConfig
val inputTransform = getInputNodes.get(0).translateToPlan(tableEnv)
.asInstanceOf[Transformation[BaseRow]]
val inputRowTypeInfo = inputTransform.getOutputType.asInstanceOf[BaseRowTypeInfo]
val outRowType = BaseRowTypeInfo.of(FlinkTypeFactory.toLogicalRowType(outputRowType))
val inputIsAccRetract = StreamExecRetractionRules.isAccRetract(input)
if (inputIsAccRetract) {
throw new TableException(
"Group Window Agg: Retraction on windowed GroupBy aggregation is not supported yet. \\n" +
"please re-check sql grammar. \\n" +
"Note: Windowed GroupBy aggregation should not follow a" +
"non-windowed GroupBy aggregation.")
}
val isCountWindow = window match {
case TumblingGroupWindow(_, _, size) if hasRowIntervalType(size) => true
case SlidingGroupWindow(_, _, size, _) if hasRowIntervalType(size) => true
case _ => false
}
if (isCountWindow && grouping.length > 0 && config.getMinIdleStateRetentionTime < 0) {
LOG.warn(
"No state retention interval configured for a query which accumulates state. " +
"Please provide a query configuration with valid retention interval to prevent " +
"excessive state size. You may specify a retention time of 0 to not clean up the state.")
}
// validation
emitStrategy.checkValidation()
val aggString = RelExplainUtil.streamWindowAggregationToString(
inputRowType,
grouping,
outputRowType,
aggCalls,
namedProperties)
val timeIdx = if (isRowtimeAttribute(window.timeAttribute)) {
if (inputTimeFieldIndex < 0) {
throw new TableException(
"Group window aggregate must defined on a time attribute, " +
"but the time attribute can't be found.\\n" +
"This should never happen. Please file an issue."
)
}
inputTimeFieldIndex
} else {
-1
}
val needRetraction = StreamExecRetractionRules.isAccRetract(getInput)
val aggInfoList = transformToStreamAggregateInfoList(
aggCalls,
inputRowType,
Array.fill(aggCalls.size)(needRetraction),
needInputCount = needRetraction,
isStateBackendDataViews = true)
val aggsHandler = createAggsHandler(
aggInfoList,
config,
tableEnv.getRelBuilder,
inputRowTypeInfo.getLogicalTypes,
needRetraction)
val aggResultTypes = aggInfoList.getActualValueTypes.map(fromDataTypeToLogicalType)
val windowPropertyTypes = namedProperties.map(_.property.resultType).toArray
val generator = new EqualiserCodeGenerator(aggResultTypes ++ windowPropertyTypes)
val equaliser = generator.generateRecordEqualiser("WindowValueEqualiser")
val aggValueTypes = aggInfoList.getActualValueTypes.map(fromDataTypeToLogicalType)
val accTypes = aggInfoList.getAccTypes.map(fromDataTypeToLogicalType)
val operator = createWindowOperator(
config,
aggsHandler,
equaliser,
accTypes,
windowPropertyTypes,
aggValueTypes,
inputRowTypeInfo.getLogicalTypes,
timeIdx)
val operatorName = if (grouping.nonEmpty) {
s"window: ($window), " +
s"groupBy: (${RelExplainUtil.fieldToString(grouping, inputRowType)}), " +
s"select: ($aggString)"
} else {
s"window: ($window), select: ($aggString)"
}
val transformation = new OneInputTransformation(
inputTransform,
operatorName,
operator,
outRowType,
getResource.getParallelism)
if (getResource.getMaxParallelism > 0) {
transformation.setMaxParallelism(getResource.getMaxParallelism)
}
val selector = KeySelectorUtil.getBaseRowSelector(grouping, inputRowTypeInfo)
// set KeyType and Selector for state
transformation.setStateKeySelector(selector)
transformation.setStateKeyType(selector.getProducedType)
transformation
}
private def createAggsHandler(
aggInfoList: AggregateInfoList,
config: TableConfig,
relBuilder: RelBuilder,
fieldTypeInfos: Seq[LogicalType],
needRetraction: Boolean): GeneratedNamespaceAggsHandleFunction[_] = {
val needMerge = window match {
case SlidingGroupWindow(_, _, size, _) if hasTimeIntervalType(size) => true
case SessionGroupWindow(_, _, _) => true
case _ => false
}
val windowClass = window match {
case TumblingGroupWindow(_, _, size) if hasRowIntervalType(size) =>
classOf[CountWindow]
case SlidingGroupWindow(_, _, size, _) if hasRowIntervalType(size) =>
classOf[CountWindow]
case _ => classOf[TimeWindow]
}
val generator = new AggsHandlerCodeGenerator(
CodeGeneratorContext(config),
relBuilder,
fieldTypeInfos,
copyInputField = false)
generator.needAccumulate()
if (needMerge) {
generator.needMerge(mergedAccOffset = 0, mergedAccOnHeap = false)
}
if (needRetraction) {
generator.needRetract()
}
generator.generateNamespaceAggsHandler(
"GroupingWindowAggsHandler",
aggInfoList,
namedProperties.map(_.property),
windowClass)
}
private def createWindowOperator(
config: TableConfig,
aggsHandler: GeneratedNamespaceAggsHandleFunction[_],
recordEqualiser: GeneratedRecordEqualiser,
accTypes: Array[LogicalType],
windowPropertyTypes: Array[LogicalType],
aggValueTypes: Array[LogicalType],
inputFields: Seq[LogicalType],
timeIdx: Int): WindowOperator[_, _] = {
val builder = WindowOperatorBuilder
.builder()
.withInputFields(inputFields.toArray)
val timeZoneOffset = -config.getTimeZone.getOffset(Calendar.ZONE_OFFSET)
val newBuilder = window match {
case TumblingGroupWindow(_, timeField, size)
if isProctimeAttribute(timeField) && hasTimeIntervalType(size) =>
builder.tumble(toDuration(size), timeZoneOffset).withProcessingTime()
case TumblingGroupWindow(_, timeField, size)
if isRowtimeAttribute(timeField) && hasTimeIntervalType(size) =>
builder.tumble(toDuration(size), timeZoneOffset).withEventTime(timeIdx)
case TumblingGroupWindow(_, timeField, size)
if isProctimeAttribute(timeField) && hasRowIntervalType(size) =>
builder.countWindow(toLong(size))
case TumblingGroupWindow(_, _, _) =>
// TODO: EventTimeTumblingGroupWindow should sort the stream on event time
// before applying the windowing logic. Otherwise, this would be the same as a
// ProcessingTimeTumblingGroupWindow
throw new UnsupportedOperationException(
"Event-time grouping windows on row intervals are currently not supported.")
case SlidingGroupWindow(_, timeField, size, slide)
if isProctimeAttribute(timeField) && hasTimeIntervalType(size) =>
builder.sliding(toDuration(size), toDuration(slide), timeZoneOffset)
.withProcessingTime()
case SlidingGroupWindow(_, timeField, size, slide)
if isRowtimeAttribute(timeField) && hasTimeIntervalType(size) =>
builder.sliding(toDuration(size), toDuration(slide), timeZoneOffset)
.withEventTime(timeIdx)
case SlidingGroupWindow(_, timeField, size, slide)
if isProctimeAttribute(timeField) && hasRowIntervalType(size) =>
builder.countWindow(toLong(size), toLong(slide))
case SlidingGroupWindow(_, _, _, _) =>
// TODO: EventTimeTumblingGroupWindow should sort the stream on event time
// before applying the windowing logic. Otherwise, this would be the same as a
// ProcessingTimeTumblingGroupWindow
throw new UnsupportedOperationException(
"Event-time grouping windows on row intervals are currently not supported.")
case SessionGroupWindow(_, timeField, gap)
if isProctimeAttribute(timeField) =>
builder.session(toDuration(gap)).withProcessingTime()
case SessionGroupWindow(_, timeField, gap)
if isRowtimeAttribute(timeField) =>
builder.session(toDuration(gap)).withEventTime(timeIdx)
}
if (emitStrategy.produceUpdates) {
// mark this operator will send retraction and set new trigger
newBuilder
.withSendRetraction()
.triggering(emitStrategy.getTrigger)
}
newBuilder
.aggregate(aggsHandler, recordEqualiser, accTypes, aggValueTypes, windowPropertyTypes)
.withAllowedLateness(Duration.ofMillis(emitStrategy.getAllowLateness))
.build()
}
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/nodes/physical/stream/StreamExecGroupWindowAggregate.scala | Scala | apache-2.0 | 14,251 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.action.builder
import io.gatling.core.action.{ Action, ExitHereIfFailed }
import io.gatling.core.structure.ScenarioContext
object ExitHereIfFailedBuilder extends ActionBuilder {
override def build(ctx: ScenarioContext, next: Action): Action =
new ExitHereIfFailed(ctx.coreComponents.exit, ctx.coreComponents.statsEngine, next)
}
| wiacekm/gatling | gatling-core/src/main/scala/io/gatling/core/action/builder/ExitHereIfFailedBuilder.scala | Scala | apache-2.0 | 980 |
package org.vertx.scala.core
import org.vertx.java.core.buffer.{ Buffer => JBuffer }
import org.vertx.java.core.eventbus.{ EventBus => JEventBus }
import org.vertx.java.core.eventbus.{ Message => JMessage }
import org.vertx.scala.core.json.JsonArray
import org.vertx.scala.core.json.JsonObject
import org.vertx.scala.core.buffer.Buffer
/**
* @author <a href="http://www.campudus.com/">Joern Bernhardt</a>
*/
package object eventbus {
sealed trait MessageData {
type InternalType
val data: InternalType
def send(eb: JEventBus, address: String)
def send[X](eb: JEventBus, address: String, resultHandler: Handler[JMessage[X]])
def sendWithTimeout[X](eb: JEventBus, address: String, resultHandler: Handler[AsyncResult[JMessage[X]]], timeout: Long)
def publish(eb: JEventBus, address: String)
def reply[A](msg: JMessage[A])
def reply[A, B](msg: JMessage[A], resultHandler: Handler[JMessage[B]])
def replyWithTimeout[A, B](msg: JMessage[A], timeout: Long, resultHandler: Handler[AsyncResult[JMessage[B]]])
}
sealed trait JMessageData extends MessageData {
def asScala: MessageData
}
import scala.language.implicitConversions
implicit def anyToMessageData(any: Any): MessageData = any match {
case sth: String => StringData(sth)
case sth: JsonArray => JsonArrayData(sth)
case sth: JsonObject => JsonObjectData(sth)
case sth: Buffer => BufferData(sth)
// BufferData passes down the java buffer version
// The opposite happens here, convert the java version to the scala Buffer
// avoiding exposure of Java Buffer version to the client
case sth: JBuffer => BufferData(Buffer(sth))
case sth: Array[Byte] => ByteArrayData(sth)
case sth: Boolean => BooleanData(sth)
case sth: Integer => IntegerData(sth)
case sth: Long => LongData(sth)
case sth: Short => ShortData(sth)
case sth: Float => FloatData(sth)
case sth: Double => DoubleData(sth)
case sth: Character => CharacterData(sth)
case x => throw new IllegalArgumentException("Cannot convert type of " + x + " to MessageData!")
}
implicit class StringData(val data: String) extends MessageData {
type InternalType = String
def send(eb: JEventBus, address: String) = eb.send(address, data)
def send[T](eb: JEventBus, address: String, handler: Handler[JMessage[T]]) =
eb.send(address, data, handler)
def sendWithTimeout[T](eb: JEventBus, address: String, handler: Handler[AsyncResult[JMessage[T]]], timeout: Long) =
eb.sendWithTimeout(address, data, timeout, handler)
def publish(eb: JEventBus, address: String) = eb.publish(address, data)
def reply[A](msg: JMessage[A]) = msg.reply(data)
def reply[A, B](msg: JMessage[A], handler: Handler[JMessage[B]]) = msg.reply(data, handler)
def replyWithTimeout[A, B](msg: JMessage[A], timeout: Long, handler: Handler[AsyncResult[JMessage[B]]]) =
msg.replyWithTimeout(data, timeout, handler)
}
implicit class JsonObjectData(val data: JsonObject) extends MessageData {
type InternalType = JsonObject
def send(eb: JEventBus, address: String) = eb.send(address, data)
def send[T](eb: JEventBus, address: String, handler: Handler[JMessage[T]]) =
eb.send(address, data, handler)
def sendWithTimeout[T](eb: JEventBus, address: String, handler: Handler[AsyncResult[JMessage[T]]], timeout: Long) =
eb.sendWithTimeout(address, data, timeout, handler)
def publish(eb: JEventBus, address: String) = eb.publish(address, data)
def reply[A](msg: JMessage[A]) = msg.reply(data)
def reply[A, B](msg: JMessage[A], handler: Handler[JMessage[B]]) = msg.reply(data, handler)
def replyWithTimeout[A, B](msg: JMessage[A], timeout: Long, handler: Handler[AsyncResult[JMessage[B]]]) =
msg.replyWithTimeout(data, timeout, handler)
}
implicit class JsonArrayData(val data: JsonArray) extends MessageData {
type InternalType = JsonArray
def send(eb: JEventBus, address: String) = eb.send(address, data)
def send[T](eb: JEventBus, address: String, handler: Handler[JMessage[T]]) =
eb.send(address, data, handler)
def sendWithTimeout[T](eb: JEventBus, address: String, handler: Handler[AsyncResult[JMessage[T]]], timeout: Long) =
eb.sendWithTimeout(address, data, timeout, handler)
def publish(eb: JEventBus, address: String) = eb.publish(address, data)
def reply[A](msg: JMessage[A]) = msg.reply(data)
def reply[A, B](msg: JMessage[A], handler: Handler[JMessage[B]]) = msg.reply(data, handler)
def replyWithTimeout[A, B](msg: JMessage[A], timeout: Long, handler: Handler[AsyncResult[JMessage[B]]]) =
msg.replyWithTimeout(data, timeout, handler)
}
implicit class BufferData(val data: Buffer) extends MessageData {
type InternalType = Buffer
def send(eb: JEventBus, address: String) = eb.send(address, data.asJava)
def send[T](eb: JEventBus, address: String, handler: Handler[JMessage[T]]) =
eb.send(address, data.asJava, handler)
def sendWithTimeout[T](eb: JEventBus, address: String, handler: Handler[AsyncResult[JMessage[T]]], timeout: Long) =
eb.sendWithTimeout(address, data.asJava, timeout, handler)
def publish(eb: JEventBus, address: String) = eb.publish(address, data.asJava)
def reply[A](msg: JMessage[A]) = msg.reply(data.asJava)
def reply[A, B](msg: JMessage[A], handler: Handler[JMessage[B]]) = msg.reply(data.asJava, handler)
def replyWithTimeout[A, B](msg: JMessage[A], timeout: Long, handler: Handler[AsyncResult[JMessage[B]]]) =
msg.replyWithTimeout(data.asJava, timeout, handler)
}
implicit class JBufferData(val data: JBuffer) extends JMessageData {
type InternalType = JBuffer
def send(eb: JEventBus, address: String) = eb.send(address, data)
def send[T](eb: JEventBus, address: String, handler: Handler[JMessage[T]]) =
eb.send(address, data, handler)
def sendWithTimeout[T](eb: JEventBus, address: String, handler: Handler[AsyncResult[JMessage[T]]], timeout: Long) =
eb.sendWithTimeout(address, data, timeout, handler)
def publish(eb: JEventBus, address: String) = eb.publish(address, data)
def reply[A](msg: JMessage[A]) = msg.reply(data)
def reply[A, B](msg: JMessage[A], handler: Handler[JMessage[B]]) = msg.reply(data, handler)
def asScala(): BufferData = BufferData(Buffer(data))
def replyWithTimeout[A, B](msg: JMessage[A], timeout: Long, handler: Handler[AsyncResult[JMessage[B]]]) =
msg.replyWithTimeout(data, timeout, handler)
}
implicit class ByteArrayData(val data: Array[Byte]) extends MessageData {
type InternalType = Array[Byte]
def send(eb: JEventBus, address: String) = eb.send(address, data)
def send[T](eb: JEventBus, address: String, handler: Handler[JMessage[T]]) =
eb.send(address, data, handler)
def sendWithTimeout[T](eb: JEventBus, address: String, handler: Handler[AsyncResult[JMessage[T]]], timeout: Long) =
eb.sendWithTimeout(address, data, timeout, handler)
def publish(eb: JEventBus, address: String) = eb.publish(address, data)
def reply[A](msg: JMessage[A]) = msg.reply(data)
def reply[A, B](msg: JMessage[A], handler: Handler[JMessage[B]]) = msg.reply(data, handler)
def replyWithTimeout[A, B](msg: JMessage[A], timeout: Long, handler: Handler[AsyncResult[JMessage[B]]]) =
msg.replyWithTimeout(data, timeout, handler)
}
implicit class IntegerData(val data: Int) extends MessageData {
type InternalType = Int
def send(eb: JEventBus, address: String) = eb.send(address, data)
def send[T](eb: JEventBus, address: String, handler: Handler[JMessage[T]]) =
eb.send(address, Int.box(data), handler)
def sendWithTimeout[T](eb: JEventBus, address: String, handler: Handler[AsyncResult[JMessage[T]]], timeout: Long) =
eb.sendWithTimeout(address, data, timeout, handler)
def publish(eb: JEventBus, address: String) = eb.publish(address, data)
def reply[A](msg: JMessage[A]) = msg.reply(data)
def reply[A, B](msg: JMessage[A], handler: Handler[JMessage[B]]) = msg.reply(data, handler)
def replyWithTimeout[A, B](msg: JMessage[A], timeout: Long, handler: Handler[AsyncResult[JMessage[B]]]) =
msg.replyWithTimeout(data, timeout, handler)
}
implicit class LongData(val data: Long) extends MessageData {
type InternalType = Long
def send(eb: JEventBus, address: String) = eb.send(address, data)
def send[T](eb: JEventBus, address: String, handler: Handler[JMessage[T]]) =
eb.send(address, Long.box(data), handler)
def sendWithTimeout[T](eb: JEventBus, address: String, handler: Handler[AsyncResult[JMessage[T]]], timeout: Long) =
eb.sendWithTimeout(address, data, timeout, handler)
def publish(eb: JEventBus, address: String) = eb.publish(address, data)
def reply[A](msg: JMessage[A]) = msg.reply(data)
def reply[A, B](msg: JMessage[A], handler: Handler[JMessage[B]]) = msg.reply(data, handler)
def replyWithTimeout[A, B](msg: JMessage[A], timeout: Long, handler: Handler[AsyncResult[JMessage[B]]]) =
msg.replyWithTimeout(data, timeout, handler)
}
implicit class ShortData(val data: Short) extends MessageData {
type InternalType = Short
def send(eb: JEventBus, address: String) = eb.send(address, data)
def send[T](eb: JEventBus, address: String, handler: Handler[JMessage[T]]) =
eb.send(address, Short.box(data), handler)
def sendWithTimeout[T](eb: JEventBus, address: String, handler: Handler[AsyncResult[JMessage[T]]], timeout: Long) =
eb.sendWithTimeout(address, data, timeout, handler)
def publish(eb: JEventBus, address: String) = eb.publish(address, data)
def reply[A](msg: JMessage[A]) = msg.reply(data)
def reply[A, B](msg: JMessage[A], handler: Handler[JMessage[B]]) = msg.reply(data, handler)
def replyWithTimeout[A, B](msg: JMessage[A], timeout: Long, handler: Handler[AsyncResult[JMessage[B]]]) =
msg.replyWithTimeout(data, timeout, handler)
}
implicit class CharacterData(val data: Character) extends MessageData {
type InternalType = Character
def send(eb: JEventBus, address: String) = eb.send(address, data)
def send[T](eb: JEventBus, address: String, handler: Handler[JMessage[T]]) =
eb.send(address, data, handler)
def sendWithTimeout[T](eb: JEventBus, address: String, handler: Handler[AsyncResult[JMessage[T]]], timeout: Long) =
eb.sendWithTimeout(address, data, timeout, handler)
def publish(eb: JEventBus, address: String) = eb.publish(address, data)
def reply[A](msg: JMessage[A]) = msg.reply(data)
def reply[A, B](msg: JMessage[A], handler: Handler[JMessage[B]]) = msg.reply(data, handler)
def replyWithTimeout[A, B](msg: JMessage[A], timeout: Long, handler: Handler[AsyncResult[JMessage[B]]]) =
msg.replyWithTimeout(data, timeout, handler)
}
implicit class BooleanData(val data: Boolean) extends MessageData {
type InternalType = Boolean
def send(eb: JEventBus, address: String) = eb.send(address, data)
def send[T](eb: JEventBus, address: String, handler: Handler[JMessage[T]]) =
eb.send(address, data, handler)
def sendWithTimeout[T](eb: JEventBus, address: String, handler: Handler[AsyncResult[JMessage[T]]], timeout: Long) =
eb.sendWithTimeout(address, data, timeout, handler)
def publish(eb: JEventBus, address: String) = eb.publish(address, data)
def reply[A](msg: JMessage[A]) = msg.reply(data)
def reply[A, B](msg: JMessage[A], handler: Handler[JMessage[B]]) = msg.reply(data, handler)
def replyWithTimeout[A, B](msg: JMessage[A], timeout: Long, handler: Handler[AsyncResult[JMessage[B]]]) =
msg.replyWithTimeout(data, timeout, handler)
}
implicit class FloatData(val data: Float) extends MessageData {
type InternalType = Float
def send(eb: JEventBus, address: String) = eb.send(address, data)
def send[T](eb: JEventBus, address: String, handler: Handler[JMessage[T]]) =
eb.send(address, Float.box(data), handler)
def sendWithTimeout[T](eb: JEventBus, address: String, handler: Handler[AsyncResult[JMessage[T]]], timeout: Long) =
eb.sendWithTimeout(address, data, timeout, handler)
def publish(eb: JEventBus, address: String) = eb.publish(address, data)
def reply[A](msg: JMessage[A]) = msg.reply(data)
def reply[A, B](msg: JMessage[A], handler: Handler[JMessage[B]]) = msg.reply(data, handler)
def replyWithTimeout[A, B](msg: JMessage[A], timeout: Long, handler: Handler[AsyncResult[JMessage[B]]]) =
msg.replyWithTimeout(data, timeout, handler)
}
implicit class DoubleData(val data: Double) extends MessageData {
type InternalType = Double
def send(eb: JEventBus, address: String) = eb.send(address, data)
def send[T](eb: JEventBus, address: String, handler: Handler[JMessage[T]]) =
eb.send(address, data, handler)
def sendWithTimeout[T](eb: JEventBus, address: String, handler: Handler[AsyncResult[JMessage[T]]], timeout: Long) =
eb.sendWithTimeout(address, data, timeout, handler)
def publish(eb: JEventBus, address: String) = eb.publish(address, data)
def reply[A](msg: JMessage[A]) = msg.reply(data)
def reply[A, B](msg: JMessage[A], handler: Handler[JMessage[B]]) = msg.reply(data, handler)
def replyWithTimeout[A, B](msg: JMessage[A], timeout: Long, handler: Handler[AsyncResult[JMessage[B]]]) =
msg.replyWithTimeout(data, timeout, handler)
}
} | vert-x/mod-lang-scala | src/main/scala/org/vertx/scala/core/eventbus/package.scala | Scala | apache-2.0 | 13,419 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.util
import java.net._
import java.io._
import java.lang.management.ManagementFactory
import java.util.zip.CRC32
import org.apache.samza.{SamzaException, Partition}
import org.apache.samza.system.{SystemFactory, SystemStreamPartition, SystemStream}
import java.util.Random
import org.apache.samza.config.Config
import org.apache.samza.config.SystemConfig
import org.apache.samza.config.JobConfig.Config2Job
import org.apache.samza.config.SystemConfig.Config2System
import org.apache.samza.config.ConfigException
import org.apache.samza.config.MapConfig
import scala.collection.JavaConversions._
import org.apache.samza.config.JobConfig
import java.io.InputStreamReader
import scala.collection.immutable.Map
import org.apache.samza.serializers._
object Util extends Logging {
val random = new Random
def clock: Long = System.currentTimeMillis
/**
* Make an environment variable string safe to pass.
*/
def envVarEscape(str: String) = str.replace("\\"", "\\\\\\"").replace("'", "\\\\'")
/**
* Get a random number >= startInclusive, and < endExclusive.
*/
def randomBetween(startInclusive: Int, endExclusive: Int) =
startInclusive + random.nextInt(endExclusive - startInclusive)
/**
* Recursively remove a directory (or file), and all sub-directories. Equivalent
* to rm -rf.
*/
def rm(file: File) {
if (file == null) {
return
} else if (file.isDirectory) {
val files = file.listFiles()
if (files != null) {
for (f <- files)
rm(f)
}
file.delete()
} else {
file.delete()
}
}
/**
* Instantiate a class instance from a given className.
*/
def getObj[T](className: String) = {
Class
.forName(className)
.newInstance
.asInstanceOf[T]
}
/**
* Returns a SystemStream object based on the system stream name given. For
* example, kafka.topic would return new SystemStream("kafka", "topic").
*/
def getSystemStreamFromNames(systemStreamNames: String): SystemStream = {
val idx = systemStreamNames.indexOf('.')
if (idx < 0) {
throw new IllegalArgumentException("No '.' in stream name '" + systemStreamNames + "'. Stream names should be in the form 'system.stream'")
}
new SystemStream(systemStreamNames.substring(0, idx), systemStreamNames.substring(idx + 1, systemStreamNames.length))
}
/**
* Returns a SystemStream object based on the system stream name given. For
* example, kafka.topic would return new SystemStream("kafka", "topic").
*/
def getNameFromSystemStream(systemStream: SystemStream) = {
systemStream.getSystem + "." + systemStream.getStream
}
/**
* Makes sure that an object is not null, and throws a NullPointerException
* if it is.
*/
def notNull[T](obj: T, msg: String) = if (obj == null) {
throw new NullPointerException(msg)
}
/**
* Returns the name representing the JVM. It usually contains the PID of the process plus some additional information
* @return String that contains the name representing this JVM
*/
def getContainerPID(): String = {
ManagementFactory.getRuntimeMXBean().getName()
}
/**
* Overriding read method defined below so that it can be accessed from Java classes with default values
*/
def read(url: URL, timeout: Int): String = {
read(url, timeout, new ExponentialSleepStrategy)
}
/**
* Reads a URL and returns its body as a string. Does no error handling.
*
* @param url HTTP URL to read from.
* @param timeout How long to wait before timing out when connecting to or reading from the HTTP server.
* @param retryBackoff Instance of exponentialSleepStrategy that encapsulates info on how long to sleep and retry operation
* @return String payload of the body of the HTTP response.
*/
def read(url: URL, timeout: Int = 60000, retryBackoff: ExponentialSleepStrategy = new ExponentialSleepStrategy): String = {
var httpConn = getHttpConnection(url, timeout)
retryBackoff.run(loop => {
if(httpConn.getResponseCode != 200)
{
warn("Error: " + httpConn.getResponseCode)
val errorContent = readStream(httpConn.getErrorStream)
warn("Error reading stream, failed with response %s" format errorContent)
httpConn = getHttpConnection(url, timeout)
}
else
{
loop.done
}
},
(exception, loop) => {
exception match {
case ioe: IOException => {
warn("Error getting response from Job coordinator server. received IOException: %s. Retrying..." format ioe.getClass)
httpConn = getHttpConnection(url, timeout)
}
case e: Exception =>
loop.done
error("Unable to connect to Job coordinator server, received exception", e)
throw e
}
})
if(httpConn.getResponseCode != 200) {
throw new SamzaException("Unable to read JobModel from Jobcoordinator HTTP server")
}
readStream(httpConn.getInputStream)
}
private def getHttpConnection(url: URL, timeout: Int): HttpURLConnection = {
val conn = url.openConnection()
conn.setConnectTimeout(timeout)
conn.setReadTimeout(timeout)
conn.asInstanceOf[HttpURLConnection]
}
private def readStream(stream: InputStream): String = {
val br = new BufferedReader(new InputStreamReader(stream));
var line: String = null;
val body = Iterator.continually(br.readLine()).takeWhile(_ != null).mkString
br.close
stream.close
body
}
/**
* Generates a coordinator stream name based off of the job name and job id
* for the jobd. The format is of the stream name will be
* __samza_coordinator_<JOBNAME>_<JOBID>.
*/
def getCoordinatorStreamName(jobName: String, jobId: String) = {
"__samza_coordinator_%s_%s" format (jobName.replaceAll("_", "-"), jobId.replaceAll("_", "-"))
}
/**
* Get a job's name and ID given a config. Job ID is defaulted to 1 if not
* defined in the config, and job name must be defined in config.
*
* @return A tuple of (jobName, jobId)
*/
def getJobNameAndId(config: Config) = {
(config.getName.getOrElse(throw new ConfigException("Missing required config: job.name")), config.getJobId.getOrElse("1"))
}
/**
* Given a job's full config object, build a subset config which includes
* only the job name, job id, and system config for the coordinator stream.
*/
def buildCoordinatorStreamConfig(config: Config) = {
val (jobName, jobId) = getJobNameAndId(config)
// Build a map with just the system config and job.name/job.id. This is what's required to start the JobCoordinator.
new MapConfig(
config.subset(SystemConfig.SYSTEM_PREFIX format config.getCoordinatorSystemName, false) ++
Map[String, String](
JobConfig.JOB_NAME -> jobName,
JobConfig.JOB_ID -> jobId,
JobConfig.JOB_COORDINATOR_SYSTEM -> config.getCoordinatorSystemName,
JobConfig.MONITOR_PARTITION_CHANGE -> String.valueOf(config.getMonitorPartitionChange),
JobConfig.MONITOR_PARTITION_CHANGE_FREQUENCY_MS -> String.valueOf(config.getMonitorPartitionChangeFrequency)))
}
/**
* Get the Coordinator System and system factory from the configuration
* @param config
* @return
*/
def getCoordinatorSystemStreamAndFactory(config: Config) = {
val systemName = config.getCoordinatorSystemName
val (jobName, jobId) = Util.getJobNameAndId(config)
val streamName = Util.getCoordinatorStreamName(jobName, jobId)
val coordinatorSystemStream = new SystemStream(systemName, streamName)
val systemFactoryClassName = config
.getSystemFactory(systemName)
.getOrElse(throw new SamzaException("Missing configuration: " + SystemConfig.SYSTEM_FACTORY format systemName))
val systemFactory = Util.getObj[SystemFactory](systemFactoryClassName)
(coordinatorSystemStream, systemFactory)
}
/**
* The helper function converts a SSP to a string
* @param ssp System stream partition
* @return The string representation of the SSP
*/
def sspToString(ssp: SystemStreamPartition): String = {
ssp.getSystem() + "." + ssp.getStream() + "." + String.valueOf(ssp.getPartition().getPartitionId())
}
/**
* The method converts the string SSP back to a SSP
* @param ssp The string form of the SSP
* @return An SSP typed object
*/
def stringToSsp(ssp: String): SystemStreamPartition = {
val idx = ssp.indexOf('.');
val lastIdx = ssp.lastIndexOf('.')
if (idx < 0 || lastIdx < 0) {
throw new IllegalArgumentException("System stream partition expected in format 'system.stream.partition")
}
new SystemStreamPartition(new SystemStream(ssp.substring(0, idx), ssp.substring(idx + 1, lastIdx)),
new Partition(Integer.parseInt(ssp.substring(lastIdx + 1))))
}
/**
* Method to generate the CRC32 checksum code for any given data
* @param data The string for which checksum has to be generated
* @return long type value representing the checksum
* */
def getChecksumValue(data: String) = {
val crc = new CRC32
crc.update(data.getBytes)
crc.getValue
}
/**
* Method that always writes checksum & data to a file
* Checksum is pre-fixed to the data and is a 32-bit long type data.
* @param file The file handle to write to
* @param data The data to be written to the file
* */
def writeDataToFile(file: File, data: String) = {
val checksum = getChecksumValue(data)
var oos: ObjectOutputStream = null
var fos: FileOutputStream = null
try {
fos = new FileOutputStream(file)
oos = new ObjectOutputStream(fos)
oos.writeLong(checksum)
oos.writeUTF(data)
} finally {
oos.close()
fos.close()
}
}
/**
* Method to read from a file that has a checksum prepended to the data
* @param file The file handle to read from
* */
def readDataFromFile(file: File) = {
var fis: FileInputStream = null
var ois: ObjectInputStream = null
try {
fis = new FileInputStream(file)
ois = new ObjectInputStream(fis)
val checksumFromFile = ois.readLong()
val data = ois.readUTF()
if(checksumFromFile == getChecksumValue(data)) {
data
} else {
info("Checksum match failed. Data in file is corrupted. Skipping content.")
null
}
} finally {
ois.close()
fis.close()
}
}
/**
* Convert a java map to a Scala map
* */
def javaMapAsScalaMap[T, K](javaMap: java.util.Map[T, K]): Map[T, K] = {
javaMap.toMap
}
/**
* Returns the the first host address which is not the loopback address, or {@link java.net.InetAddress#getLocalHost InetAddress.getLocalhost()} as a fallback
*
* @return the {@link java.net.InetAddress InetAddress} which represents the localhost
*/
def getLocalHost: InetAddress = {
val localHost = InetAddress.getLocalHost
if (localHost.isLoopbackAddress) {
warn("Hostname %s resolves to a loopback address, trying to resolve an external IP address.".format(localHost.getHostName))
val networkInterfaces = if (System.getProperty("os.name").startsWith("Windows")) NetworkInterface.getNetworkInterfaces.toList else NetworkInterface.getNetworkInterfaces.toList.reverse
for (networkInterface <- networkInterfaces) {
val addresses = networkInterface.getInetAddresses.toList.filterNot(address => address.isLinkLocalAddress || address.isLoopbackAddress)
if (addresses.nonEmpty) {
val address = addresses.find(_.isInstanceOf[Inet4Address]).getOrElse(addresses.head)
debug("Found an external IP address %s which represents the localhost.".format(address.getHostAddress))
return InetAddress.getByAddress(address.getAddress)
}
}
}
localHost
}
/**
* A helper function which returns system's default serde factory class according to the
* serde name. If not found, throw exception.
*/
def defaultSerdeFactoryFromSerdeName(serdeName: String) = {
info("looking for default serdes")
val serde = serdeName match {
case "byte" => classOf[ByteSerdeFactory].getCanonicalName
case "bytebuffer" => classOf[ByteBufferSerdeFactory].getCanonicalName
case "integer" => classOf[IntegerSerdeFactory].getCanonicalName
case "json" => classOf[JsonSerdeFactory].getCanonicalName
case "long" => classOf[LongSerdeFactory].getCanonicalName
case "serializable" => classOf[SerializableSerdeFactory[java.io.Serializable]].getCanonicalName
case "string" => classOf[StringSerdeFactory].getCanonicalName
case _ => throw new SamzaException("No class defined for serde %s" format serdeName)
}
info("use default serde %s for %s" format (serde, serdeName))
serde
}
}
| vjagadish/samza-clone | samza-core/src/main/scala/org/apache/samza/util/Util.scala | Scala | apache-2.0 | 13,665 |
package fpinscala.exercises.parsing
import fpinscala.answers.testing.exhaustive.*
import fpinscala.answers.testing.exhaustive.Gen.`**`
import fpinscala.answers.testing.exhaustive.Prop.*
import fpinscala.exercises.common.Common.*
import fpinscala.exercises.common.PropSuite
import fpinscala.exercises.parsing.JSON.*
import fpinscala.exercises.parsing.{JSON, Parsers}
// Exercise 9.9
class JSONSuite extends PropSuite:
private val parser = JSON.jsonParser(UnitTestParser)
test("JSON.JNull")(Gen.unit(())) { _ =>
assertEquals(parser.run("""{ "key": null }"""), Right(JObject(Map("key" -> JNull))))
assertEquals(parser.run("""[ null ]"""), Right(JArray(IndexedSeq(JNull))))
assertEquals(
parser.run("""[ null, null, null ]"""),
Right(JArray(IndexedSeq(JNull, JNull, JNull)))
)
}
test("JSON.JNumber")(Gen.double ** Gen.double ** Gen.double) { case d1 ** d2 ** d3 =>
assertEquals(parser.run(s"""{ "key": $d1 }"""), Right(JObject(Map("key" -> JNumber(d1)))))
assertEquals(parser.run(s"""[ $d1 ]"""), Right(JArray(IndexedSeq(JNumber(d1)))))
assertEquals(
parser.run(s"""[ $d1, $d2, $d3 ]"""),
Right(JArray(IndexedSeq(JNumber(d1), JNumber(d2), JNumber(d3))))
)
}
test("JSON.JString")(genString ** genString ** genString) { case s1 ** s2 ** s3 =>
assertEquals(parser.run(s"""{ "key": "$s1" }"""), Right(JObject(Map("key" -> JString(s1)))))
assertEquals(parser.run(s"""[ "$s1" ]"""), Right(JArray(IndexedSeq(JString(s1)))))
assertEquals(
parser.run(s"""[ "$s1", "$s2", "$s3" ]"""),
Right(JArray(IndexedSeq(JString(s1), JString(s2), JString(s3))))
)
}
test("JSON.JBool")(Gen.boolean ** Gen.boolean ** Gen.boolean) { case b1 ** b2 ** b3 =>
assertEquals(parser.run(s"""{ "key": $b1 }"""), Right(JObject(Map("key" -> JBool(b1)))))
assertEquals(parser.run(s"""[ $b1 ]"""), Right(JArray(IndexedSeq(JBool(b1)))))
assertEquals(
parser.run(s"""[ $b1, $b2, $b3 ]"""),
Right(JArray(IndexedSeq(JBool(b1), JBool(b2), JBool(b3))))
)
}
test("JSON.JArray")(Gen.double ** genString ** Gen.boolean) { case d ** s ** b =>
assertEquals(parser.run("[ ]"), Right(JArray(IndexedSeq.empty[JSON])))
assertEquals(
parser.run(s"""[ null, $d, "$s", $b ]"""),
Right(JArray(IndexedSeq(JNull, JNumber(d), JString(s), JBool(b))))
)
assertEquals(
parser.run(s"""[ null, [ $d, [ "$s", $b ] ] ]"""),
Right(
JArray(
IndexedSeq(
JNull,
JArray(IndexedSeq(JNumber(d), JArray(IndexedSeq(JString(s), JBool(b)))))
)
)
)
)
}
private val jObjectJson1 = """
{
"Company name" : "Microsoft Corporation",
"Ticker" : "MSFT",
"Active" : true,
"Price" : 30.66,
"Shares outstanding" : 8.38e9,
"Related companies" : [ "HPQ", "IBM", "YHOO", "DELL", "GOOG" ]
}
"""
test("JSON.JObject, 1")(Gen.unit(())) { _ =>
assertEquals(
parser.run(jObjectJson1),
Right(
JObject(
Map(
"Company name" -> JString("Microsoft Corporation"),
"Ticker" -> JString("MSFT"),
"Active" -> JBool(true),
"Price" -> JNumber(30.66),
"Shares outstanding" -> JNumber(8.38e9),
"Related companies" -> JArray(
IndexedSeq(JString("HPQ"), JString("IBM"), JString("YHOO"), JString("DELL"), JString("GOOG"))
)
)
)
)
)
}
private val jObjectJson2 = """
{
"Book" : "Functional Programming in Scala, Second Edition",
"Active" : true,
"Pages" : 322,
"Parts" : {
"Part 1" : {
"Title" : "Introduction to functional programming",
"Content" : [
{
"Chapter 1": {
"Title" : "What is functional programming?",
"Content" : [
"1.1 The benefits of FP: a simple example",
[
"1.1.1 A program with side effects",
"1.1.2 A functional solution: removing the side effects"
]
]
}
}
]
}
}
}
"""
test("JSON.JObject, 2")(Gen.unit(())) { _ =>
assertEquals(
parser.run(jObjectJson2),
Right(
JObject(
Map(
"Book" -> JString("Functional Programming in Scala, Second Edition"),
"Active" -> JBool(true),
"Pages" -> JNumber(322),
"Parts" -> JObject(
Map(
"Part 1" -> JObject(
Map(
"Title" -> JString("Introduction to functional programming"),
"Content" -> JArray(
IndexedSeq(
JObject(
Map(
"Chapter 1" -> JObject(
Map(
"Title" -> JString("What is functional programming?"),
"Content" -> JArray(
IndexedSeq(
JString("1.1 The benefits of FP: a simple example"),
JArray(
IndexedSeq(
JString("1.1.1 A program with side effects"),
JString("1.1.2 A functional solution: removing the side effects")
)
)
)
)
)
)
)
)
)
)
)
)
)
)
)
)
)
)
}
private val genMalformedJSONs: Gen[String] =
IndexedSeq(
"""
"Company name"
""",
"""
"Company name" : "Microsoft Corporation"
""",
"""
{
"Company name" : "Microsoft Corporation"
""",
"""
"Company name" : "Microsoft Corporation"
}
""",
"""
{
"Company name" ; "Microsoft Corporation"
}
""",
"""
[ "HPQ" "IBM" ]
""",
"""
[
[ "HPQ", "IBM",
"YHOO", "DELL" ++
"GOOG"
]
]
"""
).map(Gen.unit).reduce(Gen.union)
test("malformed JSONs")(genMalformedJSONs) { json =>
assert(parser.run(json).isLeft)
}
| fpinscala/fpinscala | src/test/scala/fpinscala/exercises/parsing/JSONSuite.scala | Scala | mit | 6,582 |
package com.twitter.finagle.netty4.ssl.client
import com.twitter.finagle.client.Transporter
import com.twitter.finagle.netty4.ssl.Alpn
import com.twitter.finagle.ssl.{ApplicationProtocols, Engine}
import com.twitter.finagle.ssl.client.{
SslClientConfiguration, SslClientEngineFactory, SslClientSessionVerifier}
import com.twitter.finagle.transport.Transport
import com.twitter.finagle.{Address, Stack}
import io.netty.channel.{Channel, ChannelInitializer, ChannelPipeline}
import io.netty.handler.ssl.SslHandler
import io.netty.util.concurrent.{Future => NettyFuture, GenericFutureListener}
/**
* A channel handler that takes [[Stack.Params]] and upgrades the pipeline with missing
* SSL/TLS pieces required for client-side transport encryption.
*
* No matter if the underlying pipeline has been modified or not (or exception was thrown), this
* handler removes itself from the pipeline on `handlerAdded`.
*/
private[netty4] class Netty4ClientSslHandler(
params: Stack.Params)
extends ChannelInitializer[Channel] {
// The reason we can't close the channel immediately is because we're in process of decoding an
// inbound message that is represented by a bunch of TLS records. We need to finish decoding
// and send that message up to the pipeline before closing the channel. This is why we queue the
// close event.
//
// See CSL-1610 (internal ticket) for more details.
private[this] val closeChannelOnCloseNotify =
new GenericFutureListener[NettyFuture[Channel]] {
def operationComplete(f: NettyFuture[Channel]): Unit = {
val channel = f.getNow
if (channel != null && f.isSuccess) {
channel.eventLoop().execute(new Runnable {
def run(): Unit = channel.close()
})
}
}
}
/**
* Read the configured `SslClientEngineFactory` out of the stack param.
* The default for clients is `JdkClientEngineFactory`. If it's configured
* to use the default, for Netty 4, we replace it with the [[Netty4ClientEngineFactory]]
* instead.
*/
private[this] def selectEngineFactory(ch: Channel): SslClientEngineFactory = {
val SslClientEngineFactory.Param(defaultEngineFactory) =
SslClientEngineFactory.Param.param.default
val SslClientEngineFactory.Param(engineFactory) =
params[SslClientEngineFactory.Param]
if (engineFactory == defaultEngineFactory) Netty4ClientEngineFactory(ch.alloc())
else engineFactory
}
/**
* This method combines `ApplicationProtocols` that may have been set by the user
* with ones that are set based on using a protocol like HTTP/2.
*/
private[this] def combineApplicationProtocols(
config: SslClientConfiguration
): SslClientConfiguration = {
val Alpn(protocols) = params[Alpn]
config.copy(applicationProtocols =
ApplicationProtocols.combine(protocols, config.applicationProtocols))
}
private[this] def createSslHandler(engine: Engine): SslHandler = {
// Rip the `SSLEngine` out of the wrapper `Engine` and use it to
// create an `SslHandler`.
val ssl: SslHandler = new SslHandler(engine.self)
// Close channel on close_notify received from a remote peer.
ssl.sslCloseFuture().addListener(closeChannelOnCloseNotify)
ssl
}
private[this] def createSslConnectHandler(
sslHandler: SslHandler,
address: Address,
config: SslClientConfiguration
): SslClientConnectHandler = {
val SslClientSessionVerifier.Param(sessionVerifier) = params[SslClientSessionVerifier.Param]
new SslClientConnectHandler(sslHandler, address, config, sessionVerifier)
}
private[this] def addHandlersToPipeline(
pipeline: ChannelPipeline,
sslHandler: SslHandler,
sslConnectHandler: SslClientConnectHandler
): Unit = {
pipeline.addFirst("sslConnect", sslConnectHandler)
pipeline.addFirst("ssl", sslHandler)
}
/**
* In this method, an `Engine` is created by an `SslClientEngineFactory` via
* an `SslClientConfiguration` and an `Address`. The `Engine` and the
* `SslClientConfiguration` are then used to create the appropriate Netty
* handlers, and they are subsequently added to the channel pipeline.
*/
def initChannel(ch: Channel): Unit = {
val Transporter.EndpointAddr(address) = params[Transporter.EndpointAddr]
val Transport.ClientSsl(configuration) = params[Transport.ClientSsl]
for (config <- configuration) {
val factory: SslClientEngineFactory = selectEngineFactory(ch)
val combined: SslClientConfiguration = combineApplicationProtocols(config)
val engine: Engine = factory(address, combined)
val sslHandler: SslHandler = createSslHandler(engine)
val sslConnectHandler: SslClientConnectHandler =
createSslConnectHandler(sslHandler, address, combined)
addHandlersToPipeline(ch.pipeline(), sslHandler, sslConnectHandler)
}
}
}
| koshelev/finagle | finagle-netty4/src/main/scala/com/twitter/finagle/netty4/ssl/client/Netty4ClientSslHandler.scala | Scala | apache-2.0 | 4,870 |
package net.liftweb.http
import net.liftweb.http.provider.HTTPResponse
import net.liftweb.util.Helpers
trait ServiceRequestTimer {
def logTime(req: Req, resp: HTTPResponse)(doService: (Req, HTTPResponse) => Boolean): Boolean
}
object NoOpServiceTimer extends ServiceRequestTimer {
override def logTime(req: Req, resp: HTTPResponse)(doService: (Req, HTTPResponse) => Boolean): Boolean = {
doService(req, resp)
}
}
object StandardServiceTimer extends ServiceRequestTimer {
override def logTime(req: Req, resp: HTTPResponse)(doService: (Req, HTTPResponse) => Boolean): Boolean = {
Helpers.logTime {
val ret = doService(req, resp)
val msg = "Service request (" + req.request.method + ") " + req.request.uri + " returned " + resp.getStatus + ","
(msg, ret)
}
}
}
| lift/framework | web/webkit/src/main/scala/net/liftweb/http/ServiceRequestTimer.scala | Scala | apache-2.0 | 802 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.libs.openid
import org.specs2.mutable.Specification
import org.specs2.mock._
import java.net.URL
import play.api.http.HeaderNames
import play.api.http.Status._
import scala.concurrent.duration.Duration
import scala.concurrent.Await
import java.util.concurrent.TimeUnit
import play.api.libs.ws._
import scala.concurrent.ExecutionContext.Implicits.global
class DiscoveryClientSpec extends Specification with Mockito {
val dur = Duration(10, TimeUnit.SECONDS)
private def normalize(s: String) = {
val ws = new WSMock
val discovery = new WsDiscovery(ws)
discovery.normalizeIdentifier(s)
}
"Discovery normalization" should {
// Adapted from org.openid4java.discovery.NormalizationTest
// Original authors: Marius Scurtescu, Johnny Bufu
"normalize uppercase URL identifiers" in {
normalize("HTTP://EXAMPLE.COM/") must be equalTo "http://example.com/"
}
"normalize percent signs" in {
normalize("HTTP://EXAMPLE.COM/%63") must be equalTo "http://example.com/c"
}
"normalize port" in {
normalize("HTTP://EXAMPLE.COM:80/A/B?Q=Z#") must be equalTo "http://example.com/A/B?Q=Z"
normalize("https://example.com:443") must be equalTo "https://example.com/"
}
"normalize paths" in {
normalize("http://example.com//a/./b/../b/c/") must be equalTo "http://example.com/a/b/c/"
normalize("http://example.com?bla") must be equalTo "http://example.com/?bla"
}
}
"Discovery normalization" should {
// http://openid.net/specs/openid-authentication-2_0.html#normalization_example
"normalize URLs according to he OpenID example in the spec" in {
"A URI with a missing scheme is normalized to a http URI" in {
normalize("example.com") must be equalTo "http://example.com/"
}
"An empty path component is normalized to a slash" in {
normalize("http://example.com") must be equalTo "http://example.com/"
}
"https URIs remain https URIs" in {
normalize("https://example.com/") must be equalTo "https://example.com/"
}
"No trailing slash is added to non-empty path components" in {
normalize("http://example.com/user") must be equalTo "http://example.com/user"
}
"Trailing slashes are preserved on non-empty path components" in {
normalize("http://example.com/user/") must be equalTo "http://example.com/user/"
}
"Trailing slashes are preserved when the path is empty" in {
normalize("http://example.com/") must be equalTo "http://example.com/"
}
}
// Spec 7.2 - Normalization
"normalize URLs according to he OpenID 2.0 spec" in {
// XRIs are currently not supported
// 1. If the user's input starts with the "xri://" prefix, it MUST be stripped off, so that XRIs are used in the canonical form.
// 2. If the first character of the resulting string is an XRI Global Context Symbol ("=", "@", "+", "$", "!") or "(", as defined in Section 2.2.1 of [XRI_Syntax_2.0], then the input SHOULD be treated as an XRI.
// XRI is currently not supported
"The input SHOULD be treated as an http URL; if it does not include a \\"http\\" or \\"https\\" scheme, the Identifier MUST be prefixed with the string \\"http://\\"." in {
normalize("example.com") must be equalTo "http://example.com/"
}
"If the URL contains a fragment part, it MUST be stripped off together with the fragment delimiter character \\"#\\"." in {
normalize("example.com#thefragment") must be equalTo "http://example.com/"
normalize("example.com/#thefragment") must be equalTo "http://example.com/"
normalize("http://example.com#thefragment") must be equalTo "http://example.com/"
normalize("https://example.com/#thefragment") must be equalTo "https://example.com/"
}
}
}
"The XRDS resolver" should {
import Discovery._
"parse a Google account response" in {
val response = mock[WSResponse]
response.header(HeaderNames.CONTENT_TYPE) returns Some("application/xrds+xml")
response.xml returns scala.xml.XML.loadString(readFixture("discovery/xrds/google-account-response.xml"))
val maybeOpenIdServer = new XrdsResolver().resolve(response)
maybeOpenIdServer.map(_.url) must beSome("https://www.google.com/accounts/o8/ud")
}
"parse an XRDS response with a single Service element" in {
val response = mock[WSResponse]
response.header(HeaderNames.CONTENT_TYPE) returns Some("application/xrds+xml")
response.xml returns scala.xml.XML.loadString(readFixture("discovery/xrds/simple-op.xml"))
val maybeOpenIdServer = new XrdsResolver().resolve(response)
maybeOpenIdServer.map(_.url) must beSome("https://www.google.com/a/example.com/o8/ud?be=o8")
}
"parse an XRDS response with multiple Service elements" in {
val response = mock[WSResponse]
response.header(HeaderNames.CONTENT_TYPE) returns Some("application/xrds+xml")
response.xml returns scala.xml.XML.loadString(readFixture("discovery/xrds/multi-service.xml"))
val maybeOpenIdServer = new XrdsResolver().resolve(response)
maybeOpenIdServer.map(_.url) must beSome("http://www.myopenid.com/server")
}
// See 7.3.2.2. Extracting Authentication Data
"return the OP Identifier over the Claimed Identifier if both are present" in {
val response = mock[WSResponse]
response.header(HeaderNames.CONTENT_TYPE) returns Some("application/xrds+xml")
response.xml returns scala.xml.XML.loadString(readFixture("discovery/xrds/multi-service-with-op-and-claimed-id-service.xml"))
val maybeOpenIdServer = new XrdsResolver().resolve(response)
maybeOpenIdServer.map(_.url) must beSome("http://openidprovider-opid.example.com")
}
"extract and use OpenID Authentication 1.0 service elements from XRDS documents, if Yadis succeeds on an URL Identifier." in {
val response = mock[WSResponse]
response.header(HeaderNames.CONTENT_TYPE) returns Some("application/xrds+xml")
response.xml returns scala.xml.XML.loadString(readFixture("discovery/xrds/simple-openid-1-op.xml"))
val maybeOpenIdServer = new XrdsResolver().resolve(response)
maybeOpenIdServer.map(_.url) must beSome("http://openidprovider-server-1.example.com")
}
"extract and use OpenID Authentication 1.1 service elements from XRDS documents, if Yadis succeeds on an URL Identifier." in {
val response = mock[WSResponse]
response.header(HeaderNames.CONTENT_TYPE) returns Some("application/xrds+xml")
response.xml returns scala.xml.XML.loadString(readFixture("discovery/xrds/simple-openid-1.1-op.xml"))
val maybeOpenIdServer = new XrdsResolver().resolve(response)
maybeOpenIdServer.map(_.url) must beSome("http://openidprovider-server-1.1.example.com")
}
}
"OpenID.redirectURL" should {
"resolve an OpenID server via Yadis" in {
"with a single service element" in {
val ws = new WSMock
ws.response.xml returns scala.xml.XML.loadString(readFixture("discovery/xrds/simple-op.xml"))
ws.response.header(HeaderNames.CONTENT_TYPE) returns Some("application/xrds+xml")
val returnTo = "http://foo.bar.com/openid"
val openId = "http://abc.example.com/foo"
val redirectUrl = Await.result(new WsOpenIdClient(ws, new WsDiscovery(ws)).redirectURL(openId, returnTo), dur)
there was one(ws.request).get()
new URL(redirectUrl).hostAndPath must be equalTo "https://www.google.com/a/example.com/o8/ud"
verifyValidOpenIDRequest(parseQueryString(redirectUrl), openId, returnTo)
}
"should redirect to identifier selection" in {
val ws = new WSMock
ws.response.xml returns scala.xml.XML.loadString(readFixture("discovery/xrds/simple-op-non-unique.xml"))
ws.response.header(HeaderNames.CONTENT_TYPE) returns Some("application/xrds+xml")
val returnTo = "http://foo.bar.com/openid"
val openId = "http://abc.example.com/foo"
val identifierSelection = "http://specs.openid.net/auth/2.0/identifier_select"
val redirectUrl = Await.result(new WsOpenIdClient(ws, new WsDiscovery(ws)).redirectURL(openId, returnTo), dur)
there was one(ws.request).get()
new URL(redirectUrl).hostAndPath must be equalTo "https://www.google.com/a/example.com/o8/ud"
verifyValidOpenIDRequest(parseQueryString(redirectUrl), identifierSelection, returnTo)
}
"should fall back to HTML based discovery if OP Identifier cannot be found in the XRDS" in {
val ws = new WSMock
ws.response.status returns OK thenReturns OK
ws.response.body returns readFixture("discovery/html/openIDProvider.html")
ws.response.xml returns scala.xml.XML.loadString(readFixture("discovery/xrds/invalid-op-identifier.xml"))
ws.response.header(HeaderNames.CONTENT_TYPE) returns Some("text/html") thenReturns Some("application/xrds+xml")
val returnTo = "http://foo.bar.com/openid"
val openId = "http://abc.example.com/foo"
val redirectUrl = Await.result(new WsOpenIdClient(ws, new WsDiscovery(ws)).redirectURL(openId, returnTo), dur)
there was one(ws.request).get()
new URL(redirectUrl).hostAndPath must be equalTo "https://www.example.com/openidserver/openid.server"
verifyValidOpenIDRequest(parseQueryString(redirectUrl), openId, returnTo)
}
// OpenID 1.1 compatibility - http://openid.net/specs/openid-authentication-2_0.html#anchor38
"should fall back to HTML based discovery (with an OpenID 1.1 document) if OP Identifier cannot be found in the XRDS" in {
val ws = new WSMock
ws.response.status returns OK thenReturns OK
ws.response.body returns readFixture("discovery/html/openIDProvider-OpenID-1.1.html")
ws.response.xml returns scala.xml.XML.loadString(readFixture("discovery/xrds/invalid-op-identifier.xml"))
ws.response.header(HeaderNames.CONTENT_TYPE) returns Some("text/html") thenReturns Some("application/xrds+xml")
val returnTo = "http://foo.bar.com/openid"
val openId = "http://abc.example.com/foo"
val redirectUrl = Await.result(new WsOpenIdClient(ws, new WsDiscovery(ws)).redirectURL(openId, returnTo), dur)
there was one(ws.request).get()
new URL(redirectUrl).hostAndPath must be equalTo "https://www.example.com/openidserver/openid.server-1"
verifyValidOpenIDRequest(parseQueryString(redirectUrl), openId, returnTo)
}
}
"resolve an OpenID server via HTML" in {
"when given a response that includes openid meta information" in {
val ws = new WSMock
ws.response.body returns readFixture("discovery/html/openIDProvider.html")
val returnTo = "http://foo.bar.com/openid"
val openId = "http://abc.example.com/foo"
val redirectUrl = Await.result(new WsOpenIdClient(ws, new WsDiscovery(ws)).redirectURL(openId, returnTo), dur)
there was one(ws.request).get()
new URL(redirectUrl).hostAndPath must be equalTo "https://www.example.com/openidserver/openid.server"
verifyValidOpenIDRequest(parseQueryString(redirectUrl), openId, returnTo)
}
"when given a response that includes a local identifier (using openid2.local_id openid.delegate)" in {
val ws = new WSMock
ws.response.body returns readFixture("discovery/html/opLocalIdentityPage.html")
val returnTo = "http://foo.bar.com/openid"
val redirectUrl = Await.result(new WsOpenIdClient(ws, new WsDiscovery(ws)).redirectURL("http://example.com/", returnTo), dur)
there was one(ws.request).get()
new URL(redirectUrl).hostAndPath must be equalTo "http://www.example.com:8080/openidserver/openid.server"
verifyValidOpenIDRequest(parseQueryString(redirectUrl), "http://example.com/", returnTo,
opLocalIdentifier = Some("http://exampleuser.example.com/"))
}
}
}
// See 9.1 http://openid.net/specs/openid-authentication-2_0.html#anchor27
private def verifyValidOpenIDRequest(
params: Map[String, Seq[String]],
claimedId: String,
returnTo: String,
opLocalIdentifier: Option[String] = None,
realm: Option[String] = None) = {
"valid request parameters need to be present" in {
params.get("openid.ns") must_== Some(Seq("http://specs.openid.net/auth/2.0"))
params.get("openid.mode") must_== Some(Seq("checkid_setup"))
params.get("openid.claimed_id") must_== Some(Seq(claimedId))
params.get("openid.return_to") must_== Some(Seq(returnTo))
}
"realm must be handled correctly (absent if not defined)" in {
verifyOptionalParam(params, "openid.realm", realm)
}
"OP-Local Identifiers must be handled correctly (if a different OP-Local Identifier is not specified, the claimed identifier MUST be used as the value for openid.identity." in {
val value = params.get("openid.identity")
opLocalIdentifier match {
case Some(id) => value must_== Some(Seq(id))
case _ => value must be equalTo params.get("openid.claimed_id")
}
}
"request parameters need to be absent in stateless mode" in {
params.get("openid.assoc_handle") must beNone
}
}
// Define matchers based on the expected value. Param must be absent if the expected value is None, it must match otherwise
private def verifyOptionalParam(params: Params, key: String, expected: Option[String] = None) = expected match {
case Some(value) => params.get(key) must_== Some(Seq(value))
case _ => params.get(key) must beNone
}
}
| wsargent/playframework | framework/src/play-openid/src/test/scala/play/api/libs/openid/DiscoveryClientSpec.scala | Scala | apache-2.0 | 13,748 |
package org.qirx.cms.metadata.properties
import org.qirx.cms.i18n.Messages
import org.qirx.cms.metadata.dsl.Property
import play.api.libs.json.JsObject
import play.api.libs.json.JsString
import play.api.libs.json.JsValue
class Label(id: String) extends Property(id) {
lazy val extraJson = None
protected def validate(messages: Messages, value: JsValue): Option[JsObject] =
toType[JsString](value)
.right.map(validateString(messages, _))
.left.map(Option.apply)
.merge
protected def validateString(messages: Messages, value: JsString): Option[JsObject] =
nonEmpty(messages, value).left.toOption
}
object Label extends Label("label") | EECOLOR/play-cms | cms/src/main/scala/org/qirx/cms/metadata/properties/Label.scala | Scala | mit | 669 |
package picasso.frontend.compilerPlugin.domains
import picasso.utils.{LogCritical, LogError, LogWarning, LogNotice, LogInfo, LogDebug, Logger}
import picasso.frontend.compilerPlugin._
import picasso.ast.{Ident => PIdent, Process => PProcess, Block => PBlock, Value => PValue, _}
import picasso.math.hol.{Type => HType, ClassType => HClassType, Application => HApplication, Bool => HBool, Wildcard => HWildcard, Binding => HBinding, _}
import picasso.graph.{GT, DiGraph, Automaton, Labeled}
import picasso.utils.Namer
import scala.tools.nsc._
trait DomainSpecificOperations {
self: AnalysisUniverse =>
import global._
import global.definitions._
var operations: List[Operations] = Nil
def allOperations = "Operations are: " + operations.map(_.name).mkString("",", ","")
//TODO explain what is going on here
/** extending that trait provides unapply for process and expression conversion */
abstract class Operations {
def name: String
//automatically add the class inheriting this trait to operations
operations = this :: operations
def process: PartialFunction[Tree, PProcess]
def expression: PartialFunction[Tree, Expression]
def removeMarking(a: AgentDefinition[PC]): Option[AgentDefinition[PC]]
def edgeToGraph: PartialFunction[(PClass, PC, PProcess, PC, Map[PC, Set[ID]]), Seq[PartialDBT]]
}
}
| dzufferey/picasso | frontend/compilerPlugin/src/main/scala/picasso/frontend/compilerPlugin/domains/DomainSpecificOperations.scala | Scala | bsd-2-clause | 1,355 |
package org.morpheus
/**
* Created by zslajchrt on 29/04/15.
*/
sealed trait FragmentProvider
case object FactoryProvider extends FragmentProvider
case object SingletonProvider extends FragmentProvider
//case object ConfiguratorProvider extends FragmentProvider
case object InstanceProvider extends FragmentProvider
/**
* This class is used as a carrier of objects needed for two operations: dereference and completion.
* In both cases a new composite instance refers to another one carried in the `src` field.
* See Morpheus.complete, Morpheus.deref macros.
* @param src
* @param placeholderFactMap
* @param conformanceLevel
* @param conformanceLevelTpe
* @param delegation if true the `src` field carries an instance of `MorphKernelBase`, otherwise it carries a `MorphKernelRef`.
*
*/
case class CopyProvider(src: Any, placeholderFactMap: Any, conformanceLevel: Morpheus.ConformanceLevel,
conformanceLevelTpe: Any, delegation: Boolean, noHiddenFragments: Boolean) extends FragmentProvider
//case class ForkProvider(src1: Any, src2: Any) extends FragmentProvider
| zslajchrt/morpheus | src/main/scala/org/morpheus/FragmentProvider.scala | Scala | apache-2.0 | 1,107 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.integration.spark.testsuite.complexType
import java.sql.Timestamp
import scala.collection.mutable
import org.apache.spark.sql.Row
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
class TestCompactionComplexType extends QueryTest with BeforeAndAfterAll {
private val compactionThreshold = CarbonProperties.getInstance()
.getProperty(CarbonCommonConstants.COMPACTION_SEGMENT_LEVEL_THRESHOLD,
CarbonCommonConstants.DEFAULT_SEGMENT_LEVEL_THRESHOLD)
override protected def beforeAll(): Unit = {
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.COMPACTION_SEGMENT_LEVEL_THRESHOLD, "2,3")
}
override protected def afterAll(): Unit = {
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.COMPACTION_SEGMENT_LEVEL_THRESHOLD, compactionThreshold)
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
sql("DROP TABLE IF EXISTS compactComplex")
}
test("test INT with struct and array, Encoding INT-->BYTE") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:int,name:string,marks:array<int>>) " +
"STORED AS carbondata")
sql(
s"load data inpath '$resourcesPath/adap.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(20, 30, 40)))),
Row(2, Row(600, "abc", mutable.WrappedArray.make(Array(20, 30, 40)))),
Row(3, Row(600, "abc", mutable.WrappedArray.make(Array(20, 30, 40))))))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:int,name:string,marks:array<int>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 500, 'name', 'abc', 'marks', array(20,30,40)))")
sql("insert into adaptive values(2,named_struct('id', 600, 'name', 'abc', 'marks', array(30,30,40)))")
sql("insert into adaptive values(3,named_struct('id', 700, 'name', 'abc', 'marks', array(40,30,40)))")
sql("insert into adaptive values(4,named_struct('id', 800, 'name', 'abc', 'marks', array(50,30,40)))")
sql("alter table adaptive compact 'major'").show(200,false)
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(20, 30, 40)))),
Row(2, Row(600, "abc", mutable.WrappedArray.make(Array(30, 30, 40)))),
Row(3, Row(700, "abc", mutable.WrappedArray.make(Array(40, 30, 40)))),
Row(4, Row(800, "abc", mutable.WrappedArray.make(Array(50, 30, 40))))))
}
test("test INT with struct and array, Encoding INT-->SHORT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:int,name:string,marks:array<int>>) " +
"STORED AS carbondata")
sql(
s"load data inpath '$resourcesPath/adap_int1.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400))))))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:int,name:string,marks:array<int>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 500, 'name', 'abc', 'marks', array(200,300,400)))")
sql("insert into adaptive values(2,named_struct('id', 600, 'name', 'abc', 'marks', array(300,300,400)))")
sql("insert into adaptive values(3,named_struct('id', 700, 'name', 'abc', 'marks', array(400,300,400)))")
sql("insert into adaptive values(4,named_struct('id', 800, 'name', 'abc', 'marks', array(500,300,400)))")
sql("alter table adaptive compact 'major'").show(200,false)
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(600, "abc", mutable.WrappedArray.make(Array(300, 300, 400)))),
Row(3, Row(700, "abc", mutable.WrappedArray.make(Array(400, 300, 400)))),
Row(4, Row(800, "abc", mutable.WrappedArray.make(Array(500, 300, 400))))))
}
test("test INT with struct and array, Encoding INT-->SHORT INT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:int,name:string,marks:array<int>>) " +
"STORED AS carbondata")
sql(
s"load data inpath '$resourcesPath/adap_int2.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000))))))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:int,name:string,marks:array<int>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 50000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(2,named_struct('id', 70000, 'name', 'abc', 'marks', array(2000000,4000000,4000000)))")
sql("insert into adaptive values(3,named_struct('id', 100000, 'name', 'abc', 'marks', array(2000000,5000000,4000000)))")
sql("insert into adaptive values(4,named_struct('id', 200000, 'name', 'abc', 'marks', array(2000000,6000000,4000000)))")
sql("alter table adaptive compact 'major'").show(200,false)
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 4000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 5000000, 4000000)))),
Row(4, Row(200000, "abc", mutable.WrappedArray.make(Array(2000000, 6000000, 4000000))))))
}
test("test INT with struct and array, Encoding INT-->INT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:int,name:string,marks:array<int>>) " +
"STORED AS carbondata")
sql(
s"load data inpath '$resourcesPath/adap_int3.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(7000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000))))))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:int,name:string,marks:array<int>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 500000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(2,named_struct('id', 700000, 'name', 'abc', 'marks', array(210,350,52000000)))")
sql("insert into adaptive values(3,named_struct('id', 10000000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(4,named_struct('id', 10000001, 'name', 'abd', 'marks', array(250,450,62000000)))")
sql("alter table adaptive compact 'major'").show(200,false)
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(700000, "abc", mutable.WrappedArray.make(Array(210, 350, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(4, Row(10000001, "abd", mutable.WrappedArray.make(Array(250, 450, 62000000))))))
}
test("test SMALLINT with struct and array SMALLINT --> BYTE") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:smallint,name:string," +
"marks:array<smallint>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 100, 'name', 'abc', 'marks', array(20,30,40)))")
sql("insert into adaptive values(2,named_struct('id', 200, 'name', 'abc', 'marks', array(30,40,50)))")
sql("insert into adaptive values(3,named_struct('id', 300, 'name', 'abd', 'marks', array(30,41,55)))")
sql("insert into adaptive values(4,named_struct('id', 400, 'name', 'abe', 'marks', array(30,42,56)))")
sql("alter table adaptive compact 'major'").show(200,false)
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(100, "abc", mutable.WrappedArray.make(Array(20, 30, 40)))),
Row(2, Row(200, "abc", mutable.WrappedArray.make(Array(30, 40, 50)))),
Row(3, Row(300, "abd", mutable.WrappedArray.make(Array(30, 41, 55)))),
Row(4, Row(400, "abe", mutable.WrappedArray.make(Array(30, 42, 56))))))
}
test("test SMALLINT with struct and array SMALLINT --> SHORT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:smallint,name:string," +
"marks:array<smallint>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 500, 'name', 'abc', 'marks', array(200,300,400)))")
sql("insert into adaptive values(2,named_struct('id', 8000, 'name', 'abc', 'marks', array(300,410,500)))")
sql("insert into adaptive values(3,named_struct('id', 9000, 'name', 'abee', 'marks', array(310,420,400)))")
sql("insert into adaptive values(4,named_struct('id', 9900, 'name', 'abfffffffffffffff', 'marks', array(320,430,500)))")
sql("alter table adaptive compact 'major'").show(200,false)
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(8000, "abc", mutable.WrappedArray.make(Array(300, 410, 500)))),
Row(3, Row(9000, "abee", mutable.WrappedArray.make(Array(310, 420, 400)))),
Row(4, Row(9900, "abfffffffffffffff", mutable.WrappedArray.make(Array(320, 430, 500))))))
sql("insert into adaptive values(5,named_struct('id', 500, 'name', 'abc', 'marks', array(200,310,400)))")
sql("insert into adaptive values(6,named_struct('id', 8000, 'name', 'abc', 'marks', array(300,310,500)))")
sql("insert into adaptive values(7,named_struct('id', 9000, 'name', 'abee', 'marks', array(310,320,400)))")
sql("insert into adaptive values(8,named_struct('id', 9900, 'name', 'abfffffffffffffffeeee', 'marks', array(320,330,500)))")
sql("alter table adaptive compact 'major'").show(200,false)
sql("SHOW SEGMENTS FOR TABLE adaptive").show(200,false)
sql("clean files for table adaptive").show(200,false)
sql("SHOW SEGMENTS FOR TABLE adaptive").show(200,false)
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(8000, "abc", mutable.WrappedArray.make(Array(300, 410, 500)))),
Row(3, Row(9000, "abee", mutable.WrappedArray.make(Array(310, 420, 400)))),
Row(4, Row(9900, "abfffffffffffffff", mutable.WrappedArray.make(Array(320, 430, 500)))),
Row(5, Row(500, "abc", mutable.WrappedArray.make(Array(200, 310, 400)))),
Row(6, Row(8000, "abc", mutable.WrappedArray.make(Array(300, 310, 500)))),
Row(7, Row(9000, "abee", mutable.WrappedArray.make(Array(310, 320, 400)))),
Row(8, Row(9900, "abfffffffffffffffeeee", mutable.WrappedArray.make(Array(320, 330, 500))))))
}
test("test BigInt with struct and array BIGINT --> BYTE") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:bigint,name:string," +
"marks:array<bigint>>) STORED AS carbondata")
sql("insert into adaptive values(11,named_struct('id', 1, 'name', 'abc', 'marks', array(21,30,40)))")
sql("insert into adaptive values(12,named_struct('id', 1, 'name', 'ab1', 'marks', array(22,30,40)))")
sql("insert into adaptive values(13,named_struct('id', 1, 'name', 'ab2', 'marks', array(23,30,40)))")
sql("insert into adaptive values(14,named_struct('id', 1, 'name', 'ab3', 'marks', array(24,30,40)))")
sql("insert into adaptive values(15,named_struct('id', 1, 'name', 'ab4', 'marks', array(25,30,40)))")
sql("insert into adaptive values(16,named_struct('id', 1, 'name', 'ab5', 'marks', array(26,30,40)))")
sql("insert into adaptive values(17,named_struct('id', 1, 'name', 'ab6', 'marks', array(27,30,40)))")
sql("insert into adaptive values(18,named_struct('id', 1, 'name', 'ab7', 'marks', array(28,30,40)))")
sql("insert into adaptive values(19,named_struct('id', 1, 'name', 'ab8', 'marks', array(29,30,40)))")
sql("insert into adaptive values(20,named_struct('id', 1, 'name', 'ab9', 'marks', array(30,30,40)))")
sql("insert into adaptive values(21,named_struct('id', 1, 'name', 'ab10', 'marks', array(31,30,40)))")
sql("insert into adaptive values(22,named_struct('id', 1, 'name', 'ab11', 'marks', array(32,30,40)))")
sql("alter table adaptive compact 'major'").show(200,false)
sql("SHOW SEGMENTS FOR TABLE adaptive").show(200,false)
sql("clean files for table adaptive").show(200,false)
sql("SHOW SEGMENTS FOR TABLE adaptive").show(200,false)
checkAnswer(sql("select * from adaptive"),
Seq(Row(11, Row(1, "abc", mutable.WrappedArray.make(Array(21, 30, 40)))),
Row(12, Row(1, "ab1", mutable.WrappedArray.make(Array(22, 30, 40)))),
Row(13, Row(1, "ab2", mutable.WrappedArray.make(Array(23, 30, 40)))),
Row(14, Row(1, "ab3", mutable.WrappedArray.make(Array(24, 30, 40)))),
Row(15, Row(1, "ab4", mutable.WrappedArray.make(Array(25, 30, 40)))),
Row(16, Row(1, "ab5", mutable.WrappedArray.make(Array(26, 30, 40)))),
Row(17, Row(1, "ab6", mutable.WrappedArray.make(Array(27, 30, 40)))),
Row(18, Row(1, "ab7", mutable.WrappedArray.make(Array(28, 30, 40)))),
Row(19, Row(1, "ab8", mutable.WrappedArray.make(Array(29, 30, 40)))),
Row(20, Row(1, "ab9", mutable.WrappedArray.make(Array(30, 30, 40)))),
Row(21, Row(1, "ab10", mutable.WrappedArray.make(Array(31, 30, 40)))),
Row(22, Row(1, "ab11", mutable.WrappedArray.make(Array(32, 30, 40))))
))
}
test("test BigInt with struct and array BIGINT --> SHORT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:bigint,name:string," +
"marks:array<bigint>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 500, 'name', 'abc', 'marks', array(200,300,400)))")
sql("insert into adaptive values(2,named_struct('id', 8000, 'name', 'abc', 'marks', array(300,400,500)))")
sql("insert into adaptive values(3,named_struct('id', 9000, 'name', 'abc', 'marks', array(300,400,500)))")
sql("insert into adaptive values(4,named_struct('id', 10000, 'name', 'abc', 'marks', array(300,400,500)))")
sql("alter table adaptive compact'major'")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(8000, "abc", mutable.WrappedArray.make(Array(300, 400, 500)))),
Row(3, Row(9000, "abc", mutable.WrappedArray.make(Array(300, 400, 500)))),
Row(4, Row(10000, "abc", mutable.WrappedArray.make(Array(300, 400, 500))))))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:BIGINT,name:string,marks:array<BIGINT>>)" +
" " +
"STORED AS carbondata")
sql(
s"load data inpath '$resourcesPath/adap_int1.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_int1.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_int1.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_int1.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql("alter table adaptive compact'major'")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400))))
))
}
test("test BigInt with struct and array BIGINT --> SHORT INT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:bigint,name:string," +
"marks:array<bigint>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 50000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(2,named_struct('id', 70000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(3,named_struct('id', 100000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(1,named_struct('id', 50000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(2,named_struct('id', 70000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(3,named_struct('id', 100000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(1,named_struct('id', 50000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(2,named_struct('id', 70000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(3,named_struct('id', 100000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(1,named_struct('id', 50000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(2,named_struct('id', 70000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(3,named_struct('id', 100000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("alter table adaptive compact'major'")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000))))
))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:BIGINT,name:string,marks:array<BIGINT>>)" +
" " +
"STORED AS carbondata")
sql(
s"load data inpath '$resourcesPath/adap_int2.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_int2.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_int2.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_int2.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql("alter table adaptive compact'major'")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000))))
))
}
test("test BIGINT with struct and array, Encoding INT-->INT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:BIGINT,name:string,marks:array<BIGINT>>)" +
" " +
"STORED AS carbondata")
sql(
s"load data inpath '$resourcesPath/adap_int3.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_int3.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_int3.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_int3.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql("alter table adaptive compact'major'")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(7000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(7000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(7000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(7000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000))))
))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:BIGINT,name:string,marks:array<BIGINT>>)" +
" " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 500000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(2,named_struct('id', 700000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(3,named_struct('id', 10000000, 'name','abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(1,named_struct('id', 500000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(2,named_struct('id', 700000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(3,named_struct('id', 10000000, 'name','abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(1,named_struct('id', 500000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(2,named_struct('id', 700000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(3,named_struct('id', 10000000, 'name','abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(1,named_struct('id', 500000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(2,named_struct('id', 700000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(3,named_struct('id', 10000000, 'name','abc', 'marks', array(200,300,52000000)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(700000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(700000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(700000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(700000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000))))
))
}
test("test Double with Struct and Array DOUBLE --> BYTE") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string," +
"marks:array<double>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 1.323, 'name', 'abc', 'marks', array(2.2,3.3,4.4)))")
sql("insert into adaptive values(2,named_struct('id', 1.324, 'name', 'abc', 'marks', array(2.2,3.3,4.4)))")
sql("insert into adaptive values(3,named_struct('id', 1.325, 'name', 'abc', 'marks', array(2.2,3.3,4.4)))")
sql("insert into adaptive values(4,named_struct('id', 1.326, 'name', 'abc', 'marks', array(2.2,3.3,4.4)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(2, Row(1.324, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(3, Row(1.325, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(4, Row(1.326, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4))))))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string,marks:array<double>>)" +
" " +
"STORED AS carbondata")
sql(
s"load data inpath '$resourcesPath/adap_double1.csv' into table adaptive options('delimiter'='," +
"'," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_double1.csv' into table adaptive options('delimiter'='," +
"'," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_double1.csv' into table adaptive options('delimiter'='," +
"'," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_double1.csv' into table adaptive options('delimiter'='," +
"'," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(2, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(3, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(2, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(3, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(2, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(3, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(2, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(3, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4))))
))
}
test("test Double with Struct and Array DOUBLE --> SHORT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string," +
"marks:array<double>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 1.323, 'name', 'abc', 'marks', array(20.2,30.3,40.4)))")
sql("insert into adaptive values(2,named_struct('id', 1.324, 'name', 'abc', 'marks', array(20.2,30.3,40.5)))")
sql("insert into adaptive values(3,named_struct('id', 1.325, 'name', 'abc', 'marks', array(20.2,30.3,40.6)))")
sql("insert into adaptive values(4,named_struct('id', 1.326, 'name', 'abc', 'marks', array(20.2,30.3,40.7)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(2, Row(1.324, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.5)))),
Row(3, Row(1.325, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.6)))),
Row(4, Row(1.326, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.7))))
))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string,marks:array<double>>)" +
" " +
"STORED AS carbondata")
sql(
s"load data inpath '$resourcesPath/adap_double2.csv' into table adaptive options('delimiter'='," +
"'," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_double2.csv' into table adaptive options('delimiter'='," +
"'," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_double2.csv' into table adaptive options('delimiter'='," +
"'," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_double2.csv' into table adaptive options('delimiter'='," +
"'," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(3, Row(4.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(3, Row(4.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(3, Row(4.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(3, Row(4.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4))))
))
}
test("test Double with Struct and Array DOUBLE --> SHORT INT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string," +
"marks:array<double>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 10.323, 'name', 'abc', 'marks', array(20.2,30.3,501.423)))")
sql("insert into adaptive values(2,named_struct('id', 10.323, 'name', 'abc', 'marks', array(20.2,30.3,502.421)))")
sql("insert into adaptive values(3,named_struct('id', 10.323, 'name', 'abc', 'marks', array(20.2,30.3,503.422)))")
sql("insert into adaptive values(4,named_struct('id', 10.323, 'name', 'abc', 'marks', array(20.2,30.3,504.424)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(10.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 501.423)))),
Row(2, Row(10.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 502.421)))),
Row(3, Row(10.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 503.422)))),
Row(4, Row(10.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 504.424))))
))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string,marks:array<double>>)" +
" " +
"STORED AS carbondata")
sql(
s"load data inpath '$resourcesPath/adap_double3.csv' into table adaptive options('delimiter'='," +
"'," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_double3.csv' into table adaptive options('delimiter'='," +
"'," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_double3.csv' into table adaptive options('delimiter'='," +
"'," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_double3.csv' into table adaptive options('delimiter'='," +
"'," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(3, Row(50.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(3, Row(50.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(3, Row(50.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(3, Row(50.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423))))
))
}
test("test Double with Struct and Array DOUBLE --> INT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string," +
"marks:array<double>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 1000.323, 'name', 'abc', 'marks', array(20.2,30.3,60000.423)))")
sql("insert into adaptive values(2,named_struct('id', 1000.324, 'name', 'abc', 'marks', array(20.2,30.3,70000.424)))")
sql("insert into adaptive values(3,named_struct('id', 1000.325, 'name', 'abc', 'marks', array(20.2,30.3,80000.425)))")
sql("insert into adaptive values(4,named_struct('id', 1000.326, 'name', 'abc', 'marks', array(20.2,30.3,90000.426)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(1000.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 60000.423)))),
Row(2, Row(1000.324, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 70000.424)))),
Row(3, Row(1000.325, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 80000.425)))),
Row(4, Row(1000.326, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 90000.426))))
))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string,marks:array<double>>)" +
" " +
"STORED AS carbondata")
sql(
s"load data inpath '$resourcesPath/adap_double4.csv' into table adaptive options('delimiter'='," +
"'," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_double4.csv' into table adaptive options('delimiter'='," +
"'," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_double4.csv' into table adaptive options('delimiter'='," +
"'," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_double4.csv' into table adaptive options('delimiter'='," +
"'," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(3, Row(50000.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(3, Row(50000.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(3, Row(50000.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(3, Row(50000.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423))))
))
}
test("test Double with Struct and Array DOUBLE --> DOUBLE") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string," +
"marks:array<double>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 1.797693134862315, 'name', 'abc', 'marks', array(2.2,30.3,1.797693134862315)))")
sql("insert into adaptive values(2,named_struct('id', 1.797693134862316, 'name', 'abc', 'marks', array(2.2,30.3,1.797693134862316)))")
sql("insert into adaptive values(3,named_struct('id', 1.797693134862317, 'name', 'abc', 'marks', array(2.2,30.3,1.797693134862317)))")
sql("insert into adaptive values(4,named_struct('id', 1.797693134862318, 'name', 'abc', 'marks', array(2.2,30.3,1.797693134862318)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1,
Row(1.797693134862315,
"abc",
mutable.WrappedArray.make(Array(2.2, 30.3, 1.797693134862315)))),
Row(2,
Row(1.797693134862316,
"abc",
mutable.WrappedArray.make(Array(2.2, 30.3, 1.797693134862316)))),
Row(3,
Row(1.797693134862317,
"abc",
mutable.WrappedArray.make(Array(2.2, 30.3, 1.797693134862317)))),
Row(4,
Row(1.797693134862318,
"abc",
mutable.WrappedArray.make(Array(2.2, 30.3, 1.797693134862318))))
))
}
test("test Decimal with Struct") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:decimal(3,2),name:string>)" +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 3.2, 'name', 'abc'))")
sql("select * from adaptive").show(false)
}
test("test Decimal with Array") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<name:string," +
"marks:array<decimal>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('name', 'abc', 'marks', array(20.2,30.3,40.4)))")
sql("select * from adaptive").show(false)
}
test("test Timestamp with Struct") {
sql("Drop table if exists adaptive")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
sql(
"create table adaptive(roll int, student struct<id:timestamp,name:string>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', '2017-01-01 00:00:00', 'name', 'abc'))")
sql("insert into adaptive values(2,named_struct('id', '2017-01-02 00:00:00', 'name', 'abc'))")
sql("insert into adaptive values(3,named_struct('id', '2017-01-03 00:00:00', 'name', 'abc'))")
sql("insert into adaptive values(4,named_struct('id', '2017-01-04 00:00:00', 'name', 'abc'))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(Timestamp.valueOf("2017-01-01 00:00:00.0"), "abc")),
Row(2, Row(Timestamp.valueOf("2017-01-02 00:00:00.0"), "abc")),
Row(3, Row(Timestamp.valueOf("2017-01-03 00:00:00.0"), "abc")),
Row(4, Row(Timestamp.valueOf("2017-01-04 00:00:00.0"), "abc"))
))
}
test("test Timestamp with Array") {
sql("Drop table if exists adaptive")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
sql(
"create table adaptive(roll int, student struct<name:string," +
"marks:array<timestamp>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('name', 'abc1', 'marks', array('2017-01-01 00:00:00.0','2018-01-01 00:00:00.0')))")
sql("insert into adaptive values(2,named_struct('name', 'abc2', 'marks', array('2017-01-02 00:00:00.0','2018-01-03 00:00:00.0')))")
sql("insert into adaptive values(3,named_struct('name', 'abc3', 'marks', array('2017-01-04 00:00:00.0','2018-01-05 00:00:00.0')))")
sql("insert into adaptive values(4,named_struct('name', 'abc4', 'marks', array('2017-01-06 00:00:00.0','2018-01-07 00:00:00.0')))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1,
Row("abc1",
mutable.WrappedArray
.make(Array(Timestamp.valueOf("2017-01-01 00:00:00.0"),
Timestamp.valueOf("2018-01-01 00:00:00.0"))))),
Row(2,
Row("abc2",
mutable.WrappedArray
.make(Array(Timestamp.valueOf("2017-01-02 00:00:00.0"),
Timestamp.valueOf("2018-01-03 00:00:00.0"))))),
Row(3,
Row("abc3",
mutable.WrappedArray
.make(Array(Timestamp.valueOf("2017-01-04 00:00:00.0"),
Timestamp.valueOf("2018-01-05 00:00:00.0"))))),
Row(4,
Row("abc4",
mutable.WrappedArray
.make(Array(Timestamp.valueOf("2017-01-06 00:00:00.0"),
Timestamp.valueOf("2018-01-07 00:00:00.0")))))
))
}
test("test DATE with Array") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<name:string," +
"marks:array<date>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('name', 'abc', 'marks', array('2017-01-01')))")
sql("select * from adaptive").show(false)
}
test("test LONG with Array and Struct Encoding LONG --> BYTE") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:long,name:string,marks:array<long>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(20,30,40)))")
sql("insert into adaptive values(2,named_struct('id', 11111, 'name', 'abc', 'marks', array(55,65,75)))")
sql("insert into adaptive values(3,named_struct('id', 11111, 'name', 'abc', 'marks', array(88,98,8)))")
sql("insert into adaptive values(4,named_struct('id', 11111, 'name', 'abc', 'marks', array(99,9,19)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(20, 30, 40)))),
Row(2, Row(11111, "abc", mutable.WrappedArray.make(Array(55, 65, 75)))),
Row(3, Row(11111, "abc", mutable.WrappedArray.make(Array(88, 98, 8)))),
Row(4, Row(11111, "abc", mutable.WrappedArray.make(Array(99, 9, 19))))
))
}
test("test LONG with Array and Struct Encoding LONG --> SHORT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:long,name:string,marks:array<long>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,400)))")
sql("insert into adaptive values(2,named_struct('id', 11111, 'name', 'abc', 'marks', array(201,301,401)))")
sql("insert into adaptive values(3,named_struct('id', 11111, 'name', 'abc', 'marks', array(202,302,402)))")
sql("insert into adaptive values(4,named_struct('id', 11111, 'name', 'abc', 'marks', array(203,303,403)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(11111, "abc", mutable.WrappedArray.make(Array(201, 301, 401)))),
Row(3, Row(11111, "abc", mutable.WrappedArray.make(Array(202, 302, 402)))),
Row(4, Row(11111, "abc", mutable.WrappedArray.make(Array(203, 303, 403))))
))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:LONG,name:string,marks:array<LONG>>) " +
"STORED AS carbondata")
sql(
s"load data inpath '$resourcesPath/adap_int1.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_int1.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_int1.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_int1.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400))))
))
}
test("test LONG with struct and array, Encoding LONG-->SHORT INT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:LONG,name:string,marks:array<LONG>>) " +
"STORED AS carbondata")
sql(
s"load data inpath '$resourcesPath/adap_int2.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000))))))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:LONG,name:string,marks:array<LONG>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 50000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(2,named_struct('id', 70000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(3,named_struct('id', 100000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000))))))
}
test("test LONG with struct and array, Encoding LONG-->INT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:LONG,name:string,marks:array<LONG>>) " +
"STORED AS carbondata")
sql(
s"load data inpath '$resourcesPath/adap_int3.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(7000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000))))))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:LONG,name:string,marks:array<LONG>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 500000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(2,named_struct('id', 700000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(3,named_struct('id', 10000000, 'name', 'abc', 'marks', array(200,300,52000000)))")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(700000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000))))))
}
test("test LONG with struct and array, Encoding LONG-->LONG") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:LONG,name:string,marks:array<LONG>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 500000, 'name', 'abc', 'marks', array(200,300,52000000000)))")
sql("insert into adaptive values(2,named_struct('id', 700000, 'name', 'abc', 'marks', array(200,300,52000000000)))")
sql("insert into adaptive values(3,named_struct('id', 10000000, 'name', 'abc', 'marks', array(200,300,52000000000)))")
sql("select * from adaptive").show(false)
}
test("test SHORT with Array and Struct Encoding SHORT -->BYTE") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:short,name:string,marks:array<short>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 11, 'name', 'abc', 'marks', array(20,30,40)))")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(11, "abc", mutable.WrappedArray.make(Array(20, 30, 40))))))
}
test("test SHORT with Array and Struct Encoding SHORT --> SHORT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:SHORT,name:string,marks:array<SHORT>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,400)))")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,401)))")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,402)))")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,403)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 401)))),
Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 402)))),
Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 403))))
))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:SHORT,name:string,marks:array<SHORT>>) " +
"STORED AS carbondata")
sql(
s"load data inpath '$resourcesPath/adap_int1.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_int1.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_int1.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql(
s"load data inpath '$resourcesPath/adap_int1.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400))))
))
}
test("test Boolean with Struct and Array") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:boolean,name:string," +
"marks:array<boolean>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', true, 'name', 'abc', 'marks', array(false,true,false)))")
sql("insert into adaptive values(1,named_struct('id', true, 'name', 'abc', 'marks', array(false,true,true)))")
sql("insert into adaptive values(1,named_struct('id', true, 'name', 'abc', 'marks', array(false,true,true)))")
sql("insert into adaptive values(1,named_struct('id', true, 'name', 'abc', 'marks', array(false,true,false)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(true, "abc", mutable.WrappedArray.make(Array(false, true, false)))),
Row(1, Row(true, "abc", mutable.WrappedArray.make(Array(false, true, true)))),
Row(1, Row(true, "abc", mutable.WrappedArray.make(Array(false, true, true)))),
Row(1, Row(true, "abc", mutable.WrappedArray.make(Array(false, true, false))))
))
}
test("complex type compaction") {
sql("drop table if exists complexcarbontable")
sql("create table complexcarbontable(deviceInformationId int, channelsId string," +
"ROMSize string, purchasedate string, mobile struct<imei:string, imsi:string>," +
"MAC array<string>, locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string, " +
"ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>," +
"proddate struct<productionDate:string,activeDeactivedate:array<string>>, gamePointId " +
"double,contractNumber double) " +
"STORED AS carbondata"
)
sql(
s"LOAD DATA local inpath '$resourcesPath/complexdata.csv' INTO table " +
"complexcarbontable " +
"OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\\"', 'FILEHEADER'='deviceInformationId,channelsId," +
"ROMSize,purchasedate,mobile,MAC,locationinfo,proddate,gamePointId,contractNumber'," +
"'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')"
)
sql(
s"LOAD DATA local inpath '$resourcesPath/complexdata.csv' INTO table " +
"complexcarbontable " +
"OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\\"', 'FILEHEADER'='deviceInformationId,channelsId," +
"ROMSize,purchasedate,mobile,MAC,locationinfo,proddate,gamePointId,contractNumber'," +
"'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')"
)
sql("alter table complexcarbontable compact 'minor'")
sql(
"select locationinfo,proddate from complexcarbontable where deviceInformationId=1 limit 1")
.show(false)
checkAnswer(sql(
"select locationinfo,proddate from complexcarbontable where deviceInformationId=1 limit 1"),
Seq(Row(mutable
.WrappedArray
.make(Array(Row(7, "Chinese", "Hubei Province", "yichang", "yichang", "yichang"),
Row(7, "India", "New Delhi", "delhi", "delhi", "delhi"))),
Row("29-11-2015", mutable
.WrappedArray.make(Array("29-11-2015", "29-11-2015"))))))
sql("drop table if exists complexcarbontable")
}
test("test minor compaction with all complex types") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:SHORT,name:string,marks:array<SHORT>>, " +
"mapField map<int, string>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,400)),map(1, 'Nalla', 2, 'Singh', 3, 'Gupta', 4, 'Kumar'))")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,401)),map(11, 'Nalla', 12, 'Singh', 13, 'Gupta', 14, 'Kumar'))")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,402)),map(21, 'Nalla', 22, 'Singh', 23, 'Gupta', 24, 'Kumar'))")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,403)),map(31, 'Nalla', 32, 'Singh', 33, 'Gupta', 34, 'Kumar'))")
sql("alter table adaptive compact 'minor' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 400))), Map(1 -> "Nalla", 2 -> "Singh", 3 -> "Gupta", 4 -> "Kumar")),
Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 401))), Map(11 -> "Nalla", 12 -> "Singh", 13 -> "Gupta", 14 -> "Kumar")),
Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 402))), Map(21 -> "Nalla", 22 -> "Singh", 23 -> "Gupta", 24 -> "Kumar")),
Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 403))), Map(31 -> "Nalla", 32 -> "Singh", 33 -> "Gupta", 34 -> "Kumar"))
))
sql("Drop table if exists adaptive")
}
test("Test major compaction with dictionary include for struct of array type") {
sql("DROP TABLE IF EXISTS compactComplex")
sql(
"CREATE TABLE compactComplex(CUST_ID string,YEAR int, MONTH int, AGE int, GENDER string,EDUCATED " +
"string,IS_MARRIED " +
"string," +
"STRUCT_OF_ARRAY struct<ID:int,CHECK_DATE:string,SNo:array<int>,sal1:array<double>," +
"state:array<string>," +
"date1:array<string>>,CARD_COUNT int,DEBIT_COUNT int,CREDIT_COUNT int, DEPOSIT double, " +
"HQ_DEPOSIT double) STORED AS carbondata")
sql(
s"LOAD DATA LOCAL INPATH '$resourcesPath/structofarray.csv' INTO TABLE compactComplex OPTIONS" +
s"('DELIMITER'=',','QUOTECHAR'='\\'," +
"'FILEHEADER'='CUST_ID,YEAR,MONTH,AGE, GENDER,EDUCATED,IS_MARRIED,STRUCT_OF_ARRAY," +
"CARD_COUNT," +
"DEBIT_COUNT,CREDIT_COUNT, DEPOSIT,HQ_DEPOSIT','COMPLEX_DELIMITER_LEVEL_1'='$', " +
"'COMPLEX_DELIMITER_LEVEL_2'='&')")
sql(
s"LOAD DATA LOCAL INPATH '$resourcesPath/structofarray.csv' INTO TABLE compactComplex OPTIONS" +
s"('DELIMITER'=',','QUOTECHAR'='\\'," +
"'FILEHEADER'='CUST_ID,YEAR,MONTH,AGE, GENDER,EDUCATED,IS_MARRIED,STRUCT_OF_ARRAY," +
"CARD_COUNT," +
"DEBIT_COUNT,CREDIT_COUNT, DEPOSIT,HQ_DEPOSIT','COMPLEX_DELIMITER_LEVEL_1'='$', " +
"'COMPLEX_DELIMITER_LEVEL_2'='&')")
sql(
s"LOAD DATA LOCAL INPATH '$resourcesPath/structofarray.csv' INTO TABLE compactComplex OPTIONS" +
s"('DELIMITER'=',','QUOTECHAR'='\\'," +
"'FILEHEADER'='CUST_ID,YEAR,MONTH,AGE,GENDER,EDUCATED,IS_MARRIED,STRUCT_OF_ARRAY," +
"CARD_COUNT," +
"DEBIT_COUNT,CREDIT_COUNT, DEPOSIT,HQ_DEPOSIT','COMPLEX_DELIMITER_LEVEL_1'='$', " +
"'COMPLEX_DELIMITER_LEVEL_2'='&')")
sql("ALTER TABLE compactComplex COMPACT 'major'")
checkAnswer(sql("Select count(*) from compactComplex"), Row(63))
}
test("Test Compaction for complex types with table restructured") {
sql("drop table if exists compactComplex")
sql(
"""
| create table compactComplex (
| name string,
| age int,
| number string,
| structfield struct<a:array<int> ,b:int>
| )
| stored as carbondata
""".stripMargin)
sql("INSERT into compactComplex values('man',25,'222',named_struct('a', array(1000,2000), 'b', 1))")
sql("INSERT into compactComplex values('can',24,'333',named_struct('a', array(1000,2000), 'b', 2))")
sql("INSERT into compactComplex values('dan',25,'222',named_struct('a', array(1000,2000), 'b', 3))")
sql("ALTER TABLE compactComplex drop columns(age)")
sql("ALTER TABLE compactComplex COMPACT 'major'")
checkAnswer(sql("SELECT * FROM compactComplex"),
Seq(Row("man", "222", Row(mutable.WrappedArray.make(Array(1000, 2000)), 1)),
Row("can", "333", Row(mutable.WrappedArray.make(Array(1000, 2000)), 2)),
Row("dan", "222", Row(mutable.WrappedArray.make(Array(1000, 2000)), 3))
))
}
}
| jackylk/incubator-carbondata | integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestCompactionComplexType.scala | Scala | apache-2.0 | 70,220 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.io.IOException
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
import org.apache.spark._
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeSet}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.SQLExecution
import org.apache.spark.sql.execution.command.RunnableCommand
import org.apache.spark.sql.internal.SQLConf
/**
* A command for writing data to a [[HadoopFsRelation]]. Supports both overwriting and appending.
* Writing to dynamic partitions is also supported. Each [[InsertIntoHadoopFsRelationCommand]]
* issues a single write job, and owns a UUID that identifies this job. Each concrete
* implementation of [[HadoopFsRelation]] should use this UUID together with task id to generate
* unique file path for each task output file. This UUID is passed to executor side via a
* property named `spark.sql.sources.writeJobUUID`.
*
* Different writer containers, [[DefaultWriterContainer]] and [[DynamicPartitionWriterContainer]]
* are used to write to normal tables and tables with dynamic partitions.
*
* Basic work flow of this command is:
*
* 1. Driver side setup, including output committer initialization and data source specific
* preparation work for the write job to be issued.
* 2. Issues a write job consists of one or more executor side tasks, each of which writes all
* rows within an RDD partition.
* 3. If no exception is thrown in a task, commits that task, otherwise aborts that task; If any
* exception is thrown during task commitment, also aborts that task.
* 4. If all tasks are committed, commit the job, otherwise aborts the job; If any exception is
* thrown during job commitment, also aborts the job.
*/
case class InsertIntoHadoopFsRelationCommand(
outputPath: Path,
partitionColumns: Seq[Attribute],
bucketSpec: Option[BucketSpec],
fileFormat: FileFormat,
refreshFunction: () => Unit,
options: Map[String, String],
@transient query: LogicalPlan,
mode: SaveMode)
extends RunnableCommand {
override protected def innerChildren: Seq[LogicalPlan] = query :: Nil
override def run(sparkSession: SparkSession): Seq[Row] = {
// Most formats don't do well with duplicate columns, so lets not allow that
if (query.schema.fieldNames.length != query.schema.fieldNames.distinct.length) {
val duplicateColumns = query.schema.fieldNames.groupBy(identity).collect {
case (x, ys) if ys.length > 1 => "\"" + x + "\""
}.mkString(", ")
throw new AnalysisException(s"Duplicate column(s) : $duplicateColumns found, " +
s"cannot save to file.")
}
val hadoopConf = sparkSession.sessionState.newHadoopConfWithOptions(options)
val fs = outputPath.getFileSystem(hadoopConf)
val qualifiedOutputPath = outputPath.makeQualified(fs.getUri, fs.getWorkingDirectory)
val pathExists = fs.exists(qualifiedOutputPath)
val doInsertion = (mode, pathExists) match {
case (SaveMode.ErrorIfExists, true) =>
throw new AnalysisException(s"path $qualifiedOutputPath already exists.")
case (SaveMode.Overwrite, true) =>
if (!fs.delete(qualifiedOutputPath, true /* recursively */)) {
throw new IOException(s"Unable to clear output " +
s"directory $qualifiedOutputPath prior to writing to it")
}
true
case (SaveMode.Append, _) | (SaveMode.Overwrite, _) | (SaveMode.ErrorIfExists, false) =>
true
case (SaveMode.Ignore, exists) =>
!exists
case (s, exists) =>
throw new IllegalStateException(s"unsupported save mode $s ($exists)")
}
// If we are appending data to an existing dir.
val isAppend = pathExists && (mode == SaveMode.Append)
if (doInsertion) {
val job = Job.getInstance(hadoopConf)
job.setOutputKeyClass(classOf[Void])
job.setOutputValueClass(classOf[InternalRow])
FileOutputFormat.setOutputPath(job, qualifiedOutputPath)
val partitionSet = AttributeSet(partitionColumns)
val dataColumns = query.output.filterNot(partitionSet.contains)
val queryExecution = Dataset.ofRows(sparkSession, query).queryExecution
SQLExecution.withNewExecutionId(sparkSession, queryExecution) {
val relation =
WriteRelation(
sparkSession,
dataColumns.toStructType,
qualifiedOutputPath.toString,
fileFormat.prepareWrite(sparkSession, _, options, dataColumns.toStructType),
bucketSpec)
val writerContainer = if (partitionColumns.isEmpty && bucketSpec.isEmpty) {
new DefaultWriterContainer(relation, job, isAppend)
} else {
new DynamicPartitionWriterContainer(
relation,
job,
partitionColumns = partitionColumns,
dataColumns = dataColumns,
inputSchema = query.output,
PartitioningUtils.DEFAULT_PARTITION_NAME,
sparkSession.conf.get(SQLConf.PARTITION_MAX_FILES),
isAppend)
}
// This call shouldn't be put into the `try` block below because it only initializes and
// prepares the job, any exception thrown from here shouldn't cause abortJob() to be called.
writerContainer.driverSideSetup()
try {
sparkSession.sparkContext.runJob(queryExecution.toRdd, writerContainer.writeRows _)
writerContainer.commitJob()
refreshFunction()
} catch { case cause: Throwable =>
logError("Aborting job.", cause)
writerContainer.abortJob()
throw new SparkException("Job aborted.", cause)
}
}
} else {
logInfo("Skipping insertion into a relation that already exists.")
}
Seq.empty[Row]
}
}
| gioenn/xSpark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/InsertIntoHadoopFsRelationCommand.scala | Scala | apache-2.0 | 6,875 |
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic.anyvals
import scala.collection.immutable.NumericRange
//
// Numbers greater than or equal to zero.
//
// (Pronounced like "posey".)
//
/**
* TODO
*
* @param value The <code>Double</code> value underlying this <code>PosZDouble</code>.
*/
final class PosZDouble private (val value: Double) extends AnyVal {
/**
* A string representation of this <code>PosZDouble</code>.
*/
override def toString: String = s"PosZDouble($value)"
/**
* Converts this <code>PosZDouble</code> to a <code>Byte</code>.
*/
def toByte: Byte = value.toByte
/**
* Converts this <code>PosZDouble</code> to a <code>Short</code>.
*/
def toShort: Short = value.toShort
/**
* Converts this <code>PosZDouble</code> to a <code>Char</code>.
*/
def toChar: Char = value.toChar
/**
* Converts this <code>PosZDouble</code> to an <code>Int</code>.
*/
def toInt: Int = value.toInt
/**
* Converts this <code>PosZDouble</code> to a <code>Long</code>.
*/
def toLong: Long = value.toLong
/**
* Converts this <code>PosZDouble</code> to a <code>Float</code>.
*/
def toFloat: Float = value.toFloat
/**
* Converts this <code>PosZDouble</code> to a <code>Double</code>.
*/
def toDouble: Double = value.toDouble
/** Returns this value, unmodified. */
def unary_+ : PosZDouble = this
/** Returns the negation of this value. */
def unary_- : Double = -value
/**
* Converts this <code>PosZDouble</code>'s value to a string then concatenates the given string.
*/
def +(x: String): String = value + x
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Byte): Boolean = value < x
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Short): Boolean = value < x
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Char): Boolean = value < x
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Int): Boolean = value < x
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Long): Boolean = value < x
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Float): Boolean = value < x
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Double): Boolean = value < x
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Byte): Boolean = value <= x
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Short): Boolean = value <= x
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Char): Boolean = value <= x
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Int): Boolean = value <= x
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Long): Boolean = value <= x
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Float): Boolean = value <= x
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Double): Boolean = value <= x
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Byte): Boolean = value > x
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Short): Boolean = value > x
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Char): Boolean = value > x
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Int): Boolean = value > x
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Long): Boolean = value > x
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Float): Boolean = value > x
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Double): Boolean = value > x
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Byte): Boolean = value >= x
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Short): Boolean = value >= x
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Char): Boolean = value >= x
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Int): Boolean = value >= x
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Long): Boolean = value >= x
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Float): Boolean = value >= x
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Double): Boolean = value >= x
/** Returns the sum of this value and `x`. */
def +(x: Byte): Double = value + x
/** Returns the sum of this value and `x`. */
def +(x: Short): Double = value + x
/** Returns the sum of this value and `x`. */
def +(x: Char): Double = value + x
/** Returns the sum of this value and `x`. */
def +(x: Int): Double = value + x
/** Returns the sum of this value and `x`. */
def +(x: Long): Double = value + x
/** Returns the sum of this value and `x`. */
def +(x: Float): Double = value + x
/** Returns the sum of this value and `x`. */
def +(x: Double): Double = value + x
/** Returns the difference of this value and `x`. */
def -(x: Byte): Double = value - x
/** Returns the difference of this value and `x`. */
def -(x: Short): Double = value - x
/** Returns the difference of this value and `x`. */
def -(x: Char): Double = value - x
/** Returns the difference of this value and `x`. */
def -(x: Int): Double = value - x
/** Returns the difference of this value and `x`. */
def -(x: Long): Double = value - x
/** Returns the difference of this value and `x`. */
def -(x: Float): Double = value - x
/** Returns the difference of this value and `x`. */
def -(x: Double): Double = value - x
/** Returns the product of this value and `x`. */
def *(x: Byte): Double = value * x
/** Returns the product of this value and `x`. */
def *(x: Short): Double = value * x
/** Returns the product of this value and `x`. */
def *(x: Char): Double = value * x
/** Returns the product of this value and `x`. */
def *(x: Int): Double = value * x
/** Returns the product of this value and `x`. */
def *(x: Long): Double = value * x
/** Returns the product of this value and `x`. */
def *(x: Float): Double = value * x
/** Returns the product of this value and `x`. */
def *(x: Double): Double = value * x
/** Returns the quotient of this value and `x`. */
def /(x: Byte): Double = value / x
/** Returns the quotient of this value and `x`. */
def /(x: Short): Double = value / x
/** Returns the quotient of this value and `x`. */
def /(x: Char): Double = value / x
/** Returns the quotient of this value and `x`. */
def /(x: Int): Double = value / x
/** Returns the quotient of this value and `x`. */
def /(x: Long): Double = value / x
/** Returns the quotient of this value and `x`. */
def /(x: Float): Double = value / x
/** Returns the quotient of this value and `x`. */
def /(x: Double): Double = value / x
/** Returns the remainder of the division of this value by `x`. */
def %(x: Byte): Double = value % x
/** Returns the remainder of the division of this value by `x`. */
def %(x: Short): Double = value % x
/** Returns the remainder of the division of this value by `x`. */
def %(x: Char): Double = value % x
/** Returns the remainder of the division of this value by `x`. */
def %(x: Int): Double = value % x
/** Returns the remainder of the division of this value by `x`. */
def %(x: Long): Double = value % x
/** Returns the remainder of the division of this value by `x`. */
def %(x: Float): Double = value % x
/** Returns the remainder of the division of this value by `x`. */
def %(x: Double): Double = value % x
// Stuff from RichDouble
def isPosInfinity: Boolean = Double.PositiveInfinity == value
def max(that: PosZDouble): PosZDouble = if (math.max(value, that.value) == value) this else that
def min(that: PosZDouble): PosZDouble = if (math.min(value, that.value) == value) this else that
def isWhole = {
val longValue = value.toLong
longValue.toDouble == value || longValue == Long.MaxValue && value < Double.PositiveInfinity || longValue == Long.MinValue && value > Double.NegativeInfinity
}
def round: PosZLong = PosZLong.from(math.round(value)).get
def ceil: PosZDouble = PosZDouble.from(math.ceil(value)).get
def floor: PosZDouble = PosZDouble.from(math.floor(value)).get
/** Converts an angle measured in degrees to an approximately equivalent
* angle measured in radians.
*
* @return the measurement of the angle x in radians.
*/
def toRadians: Double = math.toRadians(value)
/** Converts an angle measured in radians to an approximately equivalent
* angle measured in degrees.
* @return the measurement of the angle x in degrees.
*/
def toDegrees: Double = math.toDegrees(value)
// adapted from RichInt:
/**
* @param end The final bound of the range to make.
* @return A [[scala.collection.immutable.Range.Partial[Double, NumericRange[Double]]]] from `this` up to but
* not including `end`.
*/
def until(end: Double): Range.Partial[Double, NumericRange[Double]] =
value.until(end)
/**
* @param end The final bound of the range to make.
* @param step The number to increase by for each step of the range.
* @return A [[scala.collection.immutable.NumericRange.Exclusive[Double]]] from `this` up to but
* not including `end`.
*/
def until(end: Double, step: Double): NumericRange.Exclusive[Double] =
value.until(end, step)
/**
* @param end The final bound of the range to make.
* @return A [[scala.collection.immutable.Range.Partial[Double, NumericRange[Double]]]] from `'''this'''` up to
* and including `end`.
*/
def to(end: Double): Range.Partial[Double, NumericRange[Double]] =
value.to(end)
/**
* @param end The final bound of the range to make.
* @param step The number to increase by for each step of the range.
* @return A [[scala.collection.immutable.NumericRange.Inclusive[Double]]] from `'''this'''` up to
* and including `end`.
*/
def to(end: Double, step: Double): NumericRange.Inclusive[Double] =
value.to(end, step)
}
object PosZDouble {
def from(value: Double): Option[PosZDouble] =
if (value >= 0.0) Some(new PosZDouble(value)) else None
import language.experimental.macros
import scala.language.implicitConversions
implicit def apply(value: Double): PosZDouble = macro PosZDoubleMacro.apply
implicit def widenToDouble(poz: PosZDouble): Double = poz.value
}
| cheeseng/scalatest | scalactic-macro/src/main/scala/org/scalactic/anyvals/PosZDouble.scala | Scala | apache-2.0 | 11,616 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package support
import config.PbikAppConfig
import connectors.HmrcTierConnector
import javax.inject.Inject
import models.{AuthenticatedRequest, EiLPerson}
import services.EiLListService
import uk.gov.hmrc.http.HeaderCarrier
import utils.URIInformation
import scala.concurrent.Future
class StubEiLListService @Inject()(
pbikAppConfig: PbikAppConfig,
tierConnector: HmrcTierConnector,
uRIInformation: URIInformation)
extends EiLListService(pbikAppConfig, tierConnector, uRIInformation) {
private lazy val ListOfPeople: List[EiLPerson] = List(
EiLPerson("AA111111", "John", Some("Stones"), "Smith", Some("123"), Some("01/01/1980"), Some("male"), Some(10), 0),
EiLPerson("AB111111", "Adam", None, "Smith", None, Some("01/01/1980"), Some("male"), None, 0),
EiLPerson(
"AC111111",
"Humpty",
Some("Alexander"),
"Dumpty",
Some("123"),
Some("01/01/1980"),
Some("male"),
Some(10),
0),
EiLPerson("AD111111", "Peter", Some("James"), "Johnson", None, None, None, None, 0),
EiLPerson(
"AE111111",
"Alice",
Some("In"),
"Wonderland",
Some("123"),
Some("03/02/1978"),
Some("female"),
Some(10),
0),
EiLPerson(
"AF111111",
"Humpty",
Some("Alexander"),
"Dumpty",
Some("123"),
Some("01/01/1980"),
Some("male"),
Some(10),
0)
)
override def currentYearEiL(iabdType: String, year: Int)(
implicit hc: HeaderCarrier,
request: AuthenticatedRequest[_]): Future[List[EiLPerson]] =
Future.successful(ListOfPeople)
}
class StubEiLListServiceOneExclusion @Inject()(
pbikAppConfig: PbikAppConfig,
tierConnector: HmrcTierConnector,
uRIInformation: URIInformation)
extends StubEiLListService(pbikAppConfig, tierConnector, uRIInformation) {
private lazy val ListOfPeople: List[EiLPerson] = List(
EiLPerson("AA111111", "John", Some("Stones"), "Smith", Some("123"), Some("01/01/1980"), Some("male"), Some(10), 0),
EiLPerson("AB111111", "Adam", None, "Smith", None, Some("01/01/1980"), Some("male"), None, 0),
EiLPerson(
"AC111111",
"Humpty",
Some("Alexander"),
"Dumpty",
Some("123"),
Some("01/01/1980"),
Some("male"),
Some(10),
0),
EiLPerson("AD111111", "Peter", Some("James"), "Johnson", None, None, None, None, 0),
EiLPerson(
"AE111111",
"Alice",
Some("In"),
"Wonderland",
Some("123"),
Some("03/02/1978"),
Some("female"),
Some(10),
0),
EiLPerson(
"AF111111",
"Humpty",
Some("Alexander"),
"Dumpty",
Some("123"),
Some("01/01/1980"),
Some("male"),
Some(10),
0)
)
override def currentYearEiL(iabdType: String, year: Int)(
implicit hc: HeaderCarrier,
request: AuthenticatedRequest[_]): Future[List[EiLPerson]] =
Future.successful(List(ListOfPeople.head))
}
| hmrc/pbik-frontend | test/support/StubEiLListService.scala | Scala | apache-2.0 | 3,557 |
/*
* Copyright 2013 - 2020 Outworkers Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.outworkers.phantom.thrift.tests.tjson.suites
import com.outworkers.phantom.finagle._
import com.outworkers.phantom.thrift.tests.ThriftRecord
import com.outworkers.phantom.thrift.tests.tjson.TJsonSuite
import com.outworkers.util.samplers._
class ThriftListOperations extends TJsonSuite {
it should "prepend an item to a thrift list column" in {
val sample = gen[ThriftRecord]
val sample2 = gen[ThriftTest]
val operation = for {
insertDone <- thriftDb.thriftColumnTable.store(sample).future()
update <- thriftDb.thriftColumnTable
.update.where(_.id eqs sample.id)
.modify(_.thriftList prepend sample2)
.future()
select <- thriftDb.thriftColumnTable.select(_.thriftList).where(_.id eqs sample.id).one
} yield select
whenReady(operation) { items =>
items shouldBe defined
items.value shouldEqual (sample2 :: sample.thriftList)
}
}
it should "prepend an item to a thrift list column with Twitter Futures" in {
val sample = gen[ThriftRecord]
val sample2 = gen[ThriftTest]
val operation = for {
insertDone <- thriftDb.thriftColumnTable.store(sample).future()
update <- thriftDb.thriftColumnTable
.update.where(_.id eqs sample.id)
.modify(_.thriftList prepend sample2)
.future()
select <- thriftDb.thriftColumnTable.select(_.thriftList).where(_.id eqs sample.id).one()
} yield select
whenReady(operation) { items =>
items shouldBe defined
items.value shouldEqual (sample2 :: sample.thriftList)
}
}
it should "prepend several items to a thrift list column" in {
val sample = gen[ThriftRecord]
val appendable = genList[ThriftTest]()
val prependedValues = if (cassandraVersion.value < Version.`2.0.13`) appendable.reverse else appendable
val operation = for {
insertDone <- thriftDb.thriftColumnTable.store(sample).future()
update <- thriftDb.thriftColumnTable.update
.where(_.id eqs sample.id)
.modify(_.thriftList prepend appendable)
.future()
select <- thriftDb.thriftColumnTable.select(_.thriftList).where(_.id eqs sample.id).one
} yield select
whenReady(operation) { items =>
items shouldBe defined
items.value shouldEqual prependedValues ::: sample.thriftList
}
}
it should "prepend several items to a thrift list column with Twitter Futures" in {
val sample = gen[ThriftRecord]
val appendable = genList[ThriftTest]()
val prependedValues = if (cassandraVersion.value < Version.`2.0.13`) appendable.reverse else appendable
val operation = for {
insertDone <- thriftDb.thriftColumnTable.store(sample).future()
update <- thriftDb.thriftColumnTable.update
.where(_.id eqs sample.id)
.modify(_.thriftList prepend appendable)
.future()
select <- thriftDb.thriftColumnTable.select(_.thriftList).where(_.id eqs sample.id).one()
} yield select
whenReady(operation) { items =>
items shouldBe defined
items.value shouldEqual prependedValues ::: sample.thriftList
}
}
it should "append an item to a thrift list column" in {
val sample = gen[ThriftRecord]
val sample2 = gen[ThriftTest]
val operation = for {
insertDone <- thriftDb.thriftColumnTable.store(sample).future()
update <- thriftDb.thriftColumnTable.update
.where(_.id eqs sample.id)
.modify(_.thriftList append sample2)
.future()
select <- thriftDb.thriftColumnTable.select(_.thriftList).where(_.id eqs sample.id).one
} yield select
whenReady(operation) { items =>
items shouldBe defined
items.value shouldEqual sample.thriftList :+ sample2
}
}
it should "append an item to a thrift list column with Twitter Futures" in {
val sample = gen[ThriftRecord]
val sample2 = gen[ThriftTest]
val operation = for {
insertDone <- thriftDb.thriftColumnTable.store(sample).future()
update <- thriftDb.thriftColumnTable.update
.where(_.id eqs sample.id)
.modify(_.thriftList append sample2)
.future()
select <- thriftDb.thriftColumnTable.select(_.thriftList).where(_.id eqs sample.id).one()
} yield select
whenReady(operation) { items =>
items shouldBe defined
items.value shouldEqual sample.thriftList :+ sample2
}
}
it should "append several items to a thrift list column" in {
val sample = gen[ThriftRecord]
val sample2 = genList[ThriftTest]()
val operation = for {
insertDone <- thriftDb.thriftColumnTable.store(sample).future()
update <- thriftDb.thriftColumnTable.update
.where(_.id eqs sample.id)
.modify(_.thriftList append sample2)
.future()
select <- thriftDb.thriftColumnTable.select(_.thriftList).where(_.id eqs sample.id).one
} yield select
whenReady(operation) { items =>
items shouldBe defined
items.value shouldEqual (sample.thriftList ::: sample2)
}
}
it should "append several items to a thrift list column with Twitter Futures" in {
val sample = gen[ThriftRecord]
val sample2 = genList[ThriftTest]()
val operation = for {
insertDone <- thriftDb.thriftColumnTable.store(sample).future()
update <- thriftDb.thriftColumnTable.update
.where(_.id eqs sample.id)
.modify(_.thriftList append sample2)
.future()
select <- thriftDb.thriftColumnTable.select(_.thriftList).where(_.id eqs sample.id).one()
} yield select
whenReady(operation) { items =>
items shouldBe defined
items.value shouldEqual (sample.thriftList ::: sample2)
}
}
it should "remove an item from a thrift list column" in {
val sample = gen[ThriftRecord]
val sample2 = gen[ThriftTest]
val operation = for {
_ <- thriftDb.thriftColumnTable.store(sample).future
update <- thriftDb.thriftColumnTable
.update.where(_.id eqs sample.id)
.modify(_.thriftList discard sample2)
.future()
select <- thriftDb.thriftColumnTable
.select(_.thriftList)
.where(_.id eqs sample.id)
.one
} yield select
whenReady(operation) { items =>
items shouldBe defined
items.value shouldEqual (sample.thriftList diff List(sample2))
}
}
it should "remove an item from a thrift list column with Twitter Futures" in {
val sample = gen[ThriftRecord]
val sample2 = gen[ThriftTest]
val operation = for {
_ <- thriftDb.thriftColumnTable.store(sample).future()
update <- thriftDb.thriftColumnTable
.update.where(_.id eqs sample.id)
.modify(_.thriftList discard sample2)
.future()
select <- thriftDb.thriftColumnTable
.select(_.thriftList)
.where(_.id eqs sample.id)
.one()
} yield select
whenReady(operation) { items =>
items shouldBe defined
items.value shouldEqual (sample.thriftList diff List(sample2))
}
}
it should "remove several items from a thrift list column" in {
val sample = gen[ThriftRecord]
val removables = genList[ThriftTest]()
val operation = for {
insertDone <- thriftDb.thriftColumnTable.store(sample).future()
update <- thriftDb.thriftColumnTable.update.where(_.id eqs sample.id)
.modify(_.thriftList discard removables)
.future()
select <- thriftDb.thriftColumnTable.select(_.thriftList).where(_.id eqs sample.id).one
} yield select
whenReady(operation) { items =>
items shouldBe defined
items.value shouldEqual (sample.thriftList diff removables)
}
}
it should "remove several items from a thrift list column with Twitter Futures" in {
val sample = gen[ThriftRecord]
val removables = genList[ThriftTest]()
val operation = for {
insertDone <- thriftDb.thriftColumnTable.store(sample).future()
update <- thriftDb.thriftColumnTable.update.where(_.id eqs sample.id)
.modify(_.thriftList discard removables)
.future()
select <- thriftDb.thriftColumnTable.select(_.thriftList).where(_.id eqs sample.id).one()
} yield select
whenReady(operation) { items =>
items shouldBe defined
items.value shouldEqual (sample.thriftList diff removables)
}
}
it should "set an index to a given value" in {
val sample = gen[ThriftRecord]
val sample2 = gen[ThriftTest]
val operation = for {
insertDone <- thriftDb.thriftColumnTable.store(sample).future
update <- thriftDb.thriftColumnTable.update
.where(_.id eqs sample.id)
.modify(_.thriftList setIdx(0, sample2))
.future()
select <- thriftDb.thriftColumnTable.select(_.thriftList).where(_.id eqs sample.id).one
} yield select
whenReady(operation) { items =>
items shouldBe defined
items.value.isDefinedAt(2) shouldEqual true
items.value should contain (sample2)
}
}
it should "set an index to a given value with Twitter Futures" in {
val sample = gen[ThriftRecord]
val sample2 = gen[ThriftTest]
val operation = for {
insertDone <- thriftDb.thriftColumnTable.store(sample).future()
update <- thriftDb.thriftColumnTable.update
.where(_.id eqs sample.id)
.modify(_.thriftList setIdx(0, sample2))
.future()
select <- thriftDb.thriftColumnTable.select(_.thriftList).where(_.id eqs sample.id).one()
} yield select
whenReady(operation) { items =>
items shouldBe defined
items.value should contain (sample2)
}
}
it should "set a non-zero index to a given value" in {
val sample = gen[ThriftRecord]
val sample2 = gen[ThriftTest]
val operation = for {
insertDone <- thriftDb.thriftColumnTable.store(sample).future()
update <- thriftDb.thriftColumnTable.update.where(_.id eqs sample.id).modify(_.thriftList setIdx(2, sample2)).future()
select <- thriftDb.thriftColumnTable.select(_.thriftList).where(_.id eqs sample.id).one
} yield select
whenReady(operation) { items =>
items shouldBe defined
items.value should contain (sample2)
}
}
it should "set a non-zero index to a given value with Twitter Futures" in {
val sample = gen[ThriftRecord]
val sample2 = gen[ThriftTest]
val operation = for {
insertDone <- thriftDb.thriftColumnTable.store(sample).future()
update <- thriftDb.thriftColumnTable.update.where(_.id eqs sample.id).modify(_.thriftList setIdx(2, sample2)).future()
select <- thriftDb.thriftColumnTable.select(_.thriftList).where(_.id eqs sample.id).one()
} yield select
whenReady(operation) { items =>
items shouldBe defined
items.value should contain (sample2)
}
}
}
| outworkers/phantom | phantom-thrift/src/test/scala/com/outworkers/phantom/thrift/tests/tjson/suites/ThriftListOperations.scala | Scala | apache-2.0 | 11,377 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.io.File
import kafka.utils._
import junit.framework.Assert._
import java.util.{Random, Properties}
import kafka.consumer.SimpleConsumer
import org.junit.{After, Before, Test}
import kafka.message.{NoCompressionCodec, ByteBufferMessageSet, Message}
import kafka.zk.ZooKeeperTestHarness
import org.scalatest.junit.JUnit3Suite
import kafka.admin.AdminUtils
import kafka.api.{PartitionOffsetRequestInfo, FetchRequestBuilder, OffsetRequest}
import kafka.utils.TestUtils._
import kafka.common.{ErrorMapping, TopicAndPartition}
import kafka.utils.nonthreadsafe
import kafka.utils.threadsafe
import org.junit.After
import org.junit.Before
import org.junit.Test
class LogOffsetTest extends JUnit3Suite with ZooKeeperTestHarness {
val random = new Random()
var logDir: File = null
var topicLogDir: File = null
var server: KafkaServer = null
var logSize: Int = 100
val brokerPort: Int = 9099
var simpleConsumer: SimpleConsumer = null
var time: Time = new MockTime()
@Before
override def setUp() {
super.setUp()
val config: Properties = createBrokerConfig(1, brokerPort)
val logDirPath = config.getProperty("log.dir")
logDir = new File(logDirPath)
time = new MockTime()
server = TestUtils.createServer(new KafkaConfig(config), time)
simpleConsumer = new SimpleConsumer("localhost", brokerPort, 1000000, 64*1024, "")
}
@After
override def tearDown() {
simpleConsumer.close
server.shutdown
Utils.rm(logDir)
super.tearDown()
}
@Test
def testGetOffsetsForUnknownTopic() {
val topicAndPartition = TopicAndPartition("foo", 0)
val request = OffsetRequest(
Map(topicAndPartition -> PartitionOffsetRequestInfo(OffsetRequest.LatestTime, 10)))
val offsetResponse = simpleConsumer.getOffsetsBefore(request)
assertEquals(ErrorMapping.UnknownTopicOrPartitionCode,
offsetResponse.partitionErrorAndOffsets(topicAndPartition).error)
}
@Test
def testGetOffsetsBeforeLatestTime() {
val topicPartition = "kafka-" + 0
val topic = topicPartition.split("-").head
val part = Integer.valueOf(topicPartition.split("-").last).intValue
// setup brokers in zookeeper as owners of partitions for this test
AdminUtils.createTopic(zkClient, topic, 1, 1)
val logManager = server.getLogManager
val log = logManager.createLog(TopicAndPartition(topic, part), logManager.defaultConfig)
val message = new Message(Integer.toString(42).getBytes())
for(i <- 0 until 20)
log.append(new ByteBufferMessageSet(NoCompressionCodec, message))
log.flush()
val offsets = server.apis.fetchOffsets(logManager, TopicAndPartition(topic, part), OffsetRequest.LatestTime, 10)
assertEquals(Seq(20L, 16L, 12L, 8L, 4L, 0L), offsets)
waitUntilTrue(() => isLeaderLocalOnBroker(topic, part, server), 1000)
val topicAndPartition = TopicAndPartition(topic, part)
val offsetRequest = OffsetRequest(
Map(topicAndPartition -> PartitionOffsetRequestInfo(OffsetRequest.LatestTime, 10)),
replicaId = 0)
val consumerOffsets =
simpleConsumer.getOffsetsBefore(offsetRequest).partitionErrorAndOffsets(topicAndPartition).offsets
assertEquals(Seq(20L, 16L, 12L, 8L, 4L, 0L), consumerOffsets)
// try to fetch using latest offset
val fetchResponse = simpleConsumer.fetch(
new FetchRequestBuilder().addFetch(topic, 0, consumerOffsets.head, 300 * 1024).build())
assertFalse(fetchResponse.messageSet(topic, 0).iterator.hasNext)
}
@Test
def testEmptyLogsGetOffsets() {
val topicPartition = "kafka-" + random.nextInt(10)
val topicPartitionPath = getLogDir.getAbsolutePath + "/" + topicPartition
topicLogDir = new File(topicPartitionPath)
topicLogDir.mkdir
val topic = topicPartition.split("-").head
// setup brokers in zookeeper as owners of partitions for this test
AdminUtils.createTopic(zkClient, topic, 1, 1)
TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, 0, 500)
var offsetChanged = false
for(i <- 1 to 14) {
val topicAndPartition = TopicAndPartition(topic, 0)
val offsetRequest =
OffsetRequest(Map(topicAndPartition -> PartitionOffsetRequestInfo(OffsetRequest.EarliestTime, 1)))
val consumerOffsets =
simpleConsumer.getOffsetsBefore(offsetRequest).partitionErrorAndOffsets(topicAndPartition).offsets
if(consumerOffsets(0) == 1) {
offsetChanged = true
}
}
assertFalse(offsetChanged)
}
@Test
def testGetOffsetsBeforeNow() {
val topicPartition = "kafka-" + random.nextInt(3)
val topic = topicPartition.split("-").head
val part = Integer.valueOf(topicPartition.split("-").last).intValue
// setup brokers in zookeeper as owners of partitions for this test
AdminUtils.createTopic(zkClient, topic, 3, 1)
val logManager = server.getLogManager
val log = logManager.createLog(TopicAndPartition(topic, part), logManager.defaultConfig)
val message = new Message(Integer.toString(42).getBytes())
for(i <- 0 until 20)
log.append(new ByteBufferMessageSet(NoCompressionCodec, message))
log.flush()
val now = time.milliseconds + 30000 // pretend it is the future to avoid race conditions with the fs
val offsets = server.apis.fetchOffsets(logManager, TopicAndPartition(topic, part), now, 10)
assertEquals(Seq(20L, 16L, 12L, 8L, 4L, 0L), offsets)
waitUntilTrue(() => isLeaderLocalOnBroker(topic, part, server), 1000)
val topicAndPartition = TopicAndPartition(topic, part)
val offsetRequest = OffsetRequest(Map(topicAndPartition -> PartitionOffsetRequestInfo(now, 10)), replicaId = 0)
val consumerOffsets =
simpleConsumer.getOffsetsBefore(offsetRequest).partitionErrorAndOffsets(topicAndPartition).offsets
assertEquals(Seq(20L, 16L, 12L, 8L, 4L, 0L), consumerOffsets)
}
@Test
def testGetOffsetsBeforeEarliestTime() {
val topicPartition = "kafka-" + random.nextInt(3)
val topic = topicPartition.split("-").head
val part = Integer.valueOf(topicPartition.split("-").last).intValue
// setup brokers in zookeeper as owners of partitions for this test
AdminUtils.createTopic(zkClient, topic, 3, 1)
val logManager = server.getLogManager
val log = logManager.createLog(TopicAndPartition(topic, part), logManager.defaultConfig)
val message = new Message(Integer.toString(42).getBytes())
for(i <- 0 until 20)
log.append(new ByteBufferMessageSet(NoCompressionCodec, message))
log.flush()
val offsets = server.apis.fetchOffsets(logManager, TopicAndPartition(topic, part), OffsetRequest.EarliestTime, 10)
assertEquals(Seq(0L), offsets)
waitUntilTrue(() => isLeaderLocalOnBroker(topic, part, server), 1000)
val topicAndPartition = TopicAndPartition(topic, part)
val offsetRequest =
OffsetRequest(Map(topicAndPartition -> PartitionOffsetRequestInfo(OffsetRequest.EarliestTime, 10)))
val consumerOffsets =
simpleConsumer.getOffsetsBefore(offsetRequest).partitionErrorAndOffsets(topicAndPartition).offsets
assertEquals(Seq(0L), consumerOffsets)
}
private def createBrokerConfig(nodeId: Int, port: Int): Properties = {
val props = new Properties
props.put("broker.id", nodeId.toString)
props.put("port", port.toString)
props.put("log.dir", getLogDir.getAbsolutePath)
props.put("log.flush.interval.messages", "1")
props.put("enable.zookeeper", "false")
props.put("num.partitions", "20")
props.put("log.retention.hours", "10")
props.put("log.retention.check.interval.ms", (5*1000*60).toString)
props.put("log.segment.bytes", logSize.toString)
props.put("zookeeper.connect", zkConnect.toString)
props
}
private def getLogDir(): File = {
val dir = TestUtils.tempDir()
dir
}
}
| fintler/kafka | core/src/test/scala/unit/kafka/server/LogOffsetTest.scala | Scala | apache-2.0 | 8,617 |
/**
* *************************************************************************
* This file is part of GTuring. *
* *
* GTuring is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or *
* (at your option) any later version. *
* *
* GTuring is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with GTuring. If not, see <http://www.gnu.org/licenses/>. *
* *
* *************************************************************************
*/
package gnieh.turing.bytecode
package v2
/**
* A TBC instruction.
*
* @author Lucas Satabin
*
*/
sealed trait Instruction
case class Pop(n: Short) extends Instruction
case class Read(tape: Byte, compare: Short, offset: Int, register: Byte) extends Instruction
case class ARead(tape: Byte, offset: Int, register: Byte) extends Instruction
case class SRead(tape: Byte, offset: Int, register: Byte) extends Instruction
case class Write(tape: Byte, char: Short) extends Instruction
case class SWrite(tape: Byte, offset: Int, register: Byte) extends Instruction
case class Movep(tape: Byte, of: Short) extends Instruction
case class Movem(tape: Byte, of: Short) extends Instruction
case class Jump(offset: Int, register: Byte) extends Instruction
case class Loadl(value: Short) extends Instruction
case class Loadc(tape: Byte) extends Instruction
case class Load(offset: Int, register: Byte) extends Instruction
case class SLoad(offset: Int, register: Byte) extends Instruction
case class Call(tape: Byte, name: String, paramTypes: List[Type]) extends Instruction
case class Return(offset: Int, register: Byte) extends Instruction
case object End extends Instruction
case class TAlloc(size: Int) extends Instruction | satabin/gniehturing | bytecode/src/main/scala/gnieh/turing/bytecode/v2/Instruction.scala | Scala | gpl-3.0 | 2,534 |
/**
* Licensed to Gravity.com under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Gravity.com licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gander.images
import org.jsoup.nodes.Document
import gander.utils.{CanLog, Logging}
import org.slf4j.Logger
/**
* Created by Jim Plush
* User: jim
* Date: 8/18/11
*/
// represents a file stored on disk that we've downloaded
case class LocallyStoredImage(imgSrc: String,
localFileName: String,
linkhash: String,
bytes: Long,
fileExtension: String = "",
height: Int = 0,
width: Int = 0)
trait ImageExtractor extends CanLog {
protected def doc: Document
def getBestImage(): Option[Image]
protected def logPrefix: String = ImageExtractor.loggingPrefix
protected def critical(msg: String, refs: Any*): Unit = {
ImageExtractor.critical(msg, refs: _*)
}
protected def critical(t: Throwable, msg: String, refs: Any*): Unit = {
ImageExtractor.critical(t, msg, refs: _*)
}
protected def debug(msg: String, refs: Any*): Unit = {
ImageExtractor.debug(msg, refs: _*)
}
protected def debug(t: Throwable, msg: String, refs: Any*): Unit = {
ImageExtractor.debug(t, msg, refs: _*)
}
protected def info(msg: String, refs: Any*): Unit = {
ImageExtractor.info(msg, refs: _*)
}
protected def info(t: Throwable, msg: String, refs: Any*): Unit = {
ImageExtractor.info(t, msg, refs: _*)
}
protected def logger: Logger = ImageExtractor.logger
protected def trace(msg: String, refs: Any*): Unit = {
ImageExtractor.trace(msg, refs: _*)
}
protected def trace(t: Throwable, msg: String, refs: Any*): Unit = {
ImageExtractor.trace(t, msg, refs: _*)
}
protected def warn(msg: String, refs: Any*): Unit = {
ImageExtractor.warn(msg, refs: _*)
}
protected def warn(t: Throwable, msg: String, refs: Any*): Unit = {
ImageExtractor.warn(t, msg, refs: _*)
}
}
object ImageExtractor extends Logging {
val loggingPrefix = "images: "
}
| lloydmeta/gander | src/main/scala/gander/images/ImageExtractor.scala | Scala | apache-2.0 | 2,801 |
package fp
import fp.MonadSpecification.monadLaws
import fp.SeedSpecification.arbSeed
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.Gen.Parameters.default
import org.scalacheck.Gen.{const, listOfN}
import org.scalacheck.rng.Seed
import org.scalacheck.{Arbitrary, Cogen, Gen, Properties}
object GenSpecification extends Properties("Gen") {
implicit val genMonad = new Monad[Gen] {
def unit[A](a: => A) = const(a)
override def flatMap[A, B](ma: Gen[A])(f: A => Gen[B]) = ma flatMap f
}
implicit def genEqual[A: Equal : Arbitrary]: Equal[Gen[A]] =
(g1, g2) =>
listOfN(100, arbitrary[Seed]).sample.get.forall { seed =>
g1.pureApply(default, seed) equal g2.pureApply(default, seed)
}
implicit def arbGen[A: Arbitrary : Cogen]: Arbitrary[Gen[A]] = Arbitrary {
for {
g <- arbitrary[A]
f <- arbitrary[A => A]
} yield g map f
}
include(monadLaws[Gen])
}
| adamgfraser/fp | src/test/scala/fp/GenSpecification.scala | Scala | apache-2.0 | 933 |
package org.scalatra
package scalate
import org.fusesource.scalate.layout.DefaultLayoutStrategy
import org.scalatra.test.specs2.ScalatraSpec
class ScalateSupportSpec extends ScalatraSpec {
def is =
"ScalateSupport should" ^
"render uncaught errors with 500.scaml" ! e1 ^ br ^
"not throw a NullPointerException for trivial requests" ! e2 ^ br ^
"render a simple template" ! e3 ^ br ^
"render a simple template with params" ! e4 ^ br ^
"looks for layouts in /WEB-INF/layouts" ! e5 ^ br ^
"generate a url from a template" ! e6 ^ br ^
"generate a url with params from a template" ! e7 ^ br ^
"render a simple template via jade method" ! e8 ^ br ^
"render a simple template with params via jade method" ! e9 ^ br ^
"render a simple template via scaml method" ! e10 ^ br ^
"render a simple template with params via scaml method" ! e11 ^ br ^
"render a simple template via ssp method" ! e12 ^ br ^
"render a simple template with params via ssp method" ! e13 ^ br ^
"render a simple template via mustache method" ! e14 ^ br ^
"render a simple template with params via mustache method" ! e15 ^ br ^
"looks for templates in legacy /WEB-INF/scalate/templates" ! e16 ^ br ^
"looks for index page if no template found" ! e17 ^ br ^
"implicitly bind flash" ! e18 ^ br ^
"implicitly bind session" ! e19 ^ br ^
"implicitly bind params" ! e20 ^ br ^
"implicitly bind multiParams" ! e21 ^ br ^
"set templateAttributes when creating a render context" ! e22 ^ br ^
"render to a string instead of response" ! e23 ^ br ^
"set status to 500 when rendering 500.scaml" ! e24 ^ br ^
end
addServlet(new ScalatraServlet with ScalateSupport with ScalateUrlGeneratorSupport with FlashMapSupport {
get("/barf") {
throw new RuntimeException
}
get("/happy-happy") {
"puppy dogs"
}
get("/simple-template") {
layoutTemplate("/simple.jade")
}
get("/params") {
layoutTemplate("/params.jade", "foo" -> "Configurable")
}
get("/jade-template") {
jade("simple")
}
get("/jade-params") {
jade("params", "foo" -> "Configurable")
}
get("/scaml-template") {
scaml("simple")
}
get("/scaml-params") {
scaml("params", "foo" -> "Configurable")
}
get("/ssp-template") {
ssp("simple")
}
get("/ssp-params") {
ssp("params", "foo" -> "Configurable")
}
get("/mustache-template") {
mustache("simple")
}
get("/mustache-params") {
mustache("params", "foo" -> "Configurable")
}
get("/layout-strategy") {
templateEngine.layoutStrategy.asInstanceOf[DefaultLayoutStrategy].defaultLayouts mkString ";"
}
val urlGeneration = get("/url-generation") {
layoutTemplate("/urlGeneration.jade")
}
val urlGenerationWithParams = get("/url-generation-with-params/:a/vs/:b") {
layoutTemplate("/urlGenerationWithParams.jade", ("a" -> params("a")), ("b" -> params("b")))
}
get("/legacy-view-path") {
jade("legacy")
}
get("/directory") {
jade("directory/index")
}
get("/bindings/*") {
flash.now("message") = "flash works"
session("message") = "session works"
jade(requestPath)
}
get("/bindings/params/:foo") {
jade("/bindings/params")
}
get("/bindings/multiParams/*/*") {
jade("/bindings/multiParams")
}
get("/template-attributes") {
templateAttributes("foo") = "from attributes"
scaml("params")
}
get("/render-to-string") {
response.setHeader("X-Template-Output", layoutTemplate("simple"))
}
}, "/*")
def e1 = get("/barf") {
body must contain("id=\\"scalate-error\\"")
}
def e2 = get("/happy-happy") {
body must_== "puppy dogs"
}
def e3 = get("/simple-template") {
body must_== "<div>Jade template</div>\\n"
}
def e4 = get("/params") {
body must_== "<div>Configurable template</div>\\n"
}
// Testing the default layouts is going to be hard, but we can at least
// verify that it's looking in the right place.
def e5 = get("/layout-strategy") {
body must_== (List(
"/WEB-INF/templates/layouts/default.mustache",
"/WEB-INF/templates/layouts/default.ssp",
"/WEB-INF/templates/layouts/default.scaml",
"/WEB-INF/templates/layouts/default.jade",
"/WEB-INF/layouts/default.mustache",
"/WEB-INF/layouts/default.ssp",
"/WEB-INF/layouts/default.scaml",
"/WEB-INF/layouts/default.jade",
"/WEB-INF/scalate/layouts/default.mustache",
"/WEB-INF/scalate/layouts/default.ssp",
"/WEB-INF/scalate/layouts/default.scaml",
"/WEB-INF/scalate/layouts/default.jade"
) mkString ";")
}
def e6 = get("/url-generation") {
body must_== "/url-generation\\n"
}
def e7 = get("/url-generation-with-params/jedi/vs/sith") {
body must_== "/url-generation-with-params/jedi/vs/sith\\n"
}
def e8 = get("/jade-template") {
body must_== "<div>Jade template</div>\\n"
}
def e9 = get("/jade-params") {
body must_== "<div>Configurable template</div>\\n"
}
def e10 = get("/scaml-template") {
body must_== "<div>Scaml template</div>\\n"
}
def e11 = get("/scaml-params") {
body must_== "<div>Configurable template</div>\\n"
}
def e12 = get("/ssp-template") {
body must_== "<div>SSP template</div>"
}
def e13 = get("/ssp-params") {
body must_== "<div>Configurable template</div>\\n"
}
def e14 = get("/mustache-template") {
body must_== "<div>Mustache template</div>\\n"
}
def e15 = get("/mustache-params") {
body must_== "<div>Configurable template</div>\\n"
}
def e16 = get("/legacy-view-path") {
body must_== "<p>legacy</p>\\n"
}
def e17 = get("/directory") {
body must_== "<p>index</p>\\n"
}
def e18 = get("/bindings/flash") {
body must_== "<div>flash works</div>\\n"
}
def e19 = get("/bindings/session") {
body must_== "<div>session works</div>\\n"
}
def e20 = get("/bindings/params/bar") {
body must_== "<div>bar</div>\\n"
}
def e21 = get("/bindings/multiParams/bar/baz") {
body must_== "<div>bar;baz</div>\\n"
}
def e22 = get("/template-attributes") {
body must_== "<div>from attributes template</div>\\n"
}
def e23 = get("/render-to-string") {
val hdr = header("X-Template-Output")
hdr must_== "<div>SSP template</div>"
}
def e24 = get("/barf") {
status must_== 500
}
}
| lightvector/scalatra | scalate/src/test/scala/org/scalatra/scalate/ScalateSupportSpec.scala | Scala | bsd-2-clause | 6,543 |
package models.tenant
case class ShiftType(mnemonic: String, description: String)
| thomastoye/speelsysteem | app/models/tenant/ShiftType.scala | Scala | gpl-2.0 | 83 |
package com.alanjz.meerkat.evaluation
trait ThreefoldRepetitionFilter {
} | spacenut/meerkat-chess | src/com/alanjz/meerkat/evaluation/ThreefoldRepetitionFilter.scala | Scala | gpl-2.0 | 75 |
package com.twitter.finatra.http
import com.twitter.conversions.DurationOps._
import com.twitter.inject.server.EmbeddedTwitterServer
import com.twitter.inject.server.PortUtils
import com.twitter.inject.server.Ports
import com.twitter.inject.server.info
import com.twitter.util.jackson.ScalaObjectMapper
import com.twitter.util.Await
import com.twitter.util.Closable
import com.twitter.util.Promise
import net.codingwell.scalaguice.typeLiteral
import scala.collection.JavaConverters._
/** Internal utility which represents an http client to external interfaces of an [[EmbeddedTwitterServer]] */
private[twitter] trait ExternalHttpClient { self: EmbeddedTwitterServer =>
/**
* Underlying Embedded TwitterServer exposed as a [[com.twitter.inject.server.Ports]]
* @return the underlying TwitterServer as a [[com.twitter.inject.server.Ports]].
*/
def twitterServer: Ports
/**
* The expected flag that sets the external port for serving the underlying Http service.
* @return a String representing the Http port flag.
* @see [[com.twitter.app.Flag]]
*/
def httpPortFlag: String = "http.port"
/**
* The expected flag that sets the external port for serving the underlying Http service.
* The default "" means "unset". To enable the default Finatra HTTPS port, set the value to `https.port`.
* @return a String representing the Https port flag.
* @see [[com.twitter.app.Flag]]
*/
def httpsPortFlag: String = ""
/** Provide an override to the underlying server's mapper */
def mapperOverride: Option[ScalaObjectMapper]
/** Provide an override to the external HTTPS client */
private[twitter] def httpsClientOverride: Option[JsonAwareEmbeddedHttpClient] = None
/* Overrides */
/** Logs the external http and/or https host and port of the underlying EmbeddedHttpServer */
override protected[twitter] def logStartup(): Unit = {
self.logStartup()
if (twitterServer.httpExternalPort.isDefined) {
info(s"ExternalHttp -> http://$externalHttpHostAndPort", disableLogging)
}
if (twitterServer.httpsExternalPort.isDefined) {
info(s"ExternalHttps -> https://$externalHttpsHostAndPort", disableLogging)
}
}
/**
* Adds the [[httpPortFlag]] with a value pointing to the ephemeral loopback address to
* the list of flags to be passed to the underlying server.
* @see [[PortUtils.ephemeralLoopback]].
*/
override protected[twitter] def combineArgs(): Array[String] = {
configurePortFlag(Option(httpPortFlag)) ++
configurePortFlag(Option(httpsPortFlag)) ++
self.combineArgs
}
/* Public */
/** A `host:post` String of the loopback and external "http" port for the underlying embedded HttpServer */
lazy val externalHttpHostAndPort: String = {
PortUtils.loopbackAddressForPort(httpExternalPort())
}
/** A `host:post` String of the loopback and external "https" port for the underlying embedded HttpServer */
lazy val externalHttpsHostAndPort: String = {
PortUtils.loopbackAddressForPort(httpsExternalPort())
}
/** Supplements an absolute path URI with the http scheme and authority */
def fullHttpURI(path: String): String = {
s"http://$externalHttpHostAndPort$path"
}
/** Supplements an absolute path URI with the https scheme and authority */
def fullHttpsURI(path: String): String = {
s"https://$externalHttpsHostAndPort$path"
}
/* Promise that signals that the underlying twitterServer's httpPort has been bound */
private[this] val httpPortReady: Promise[Unit] = EmbeddedTwitterServer.isPortReady(
twitterServer,
twitterServer.httpExternalPort.isDefined && twitterServer.httpExternalPort.get != 0
)
/* Promise that signals that the underlying twitterServer's httpsPort has been bound */
private[this] val httpsPortReady: Promise[Unit] = EmbeddedTwitterServer.isPortReady(
twitterServer,
twitterServer.httpsExternalPort.isDefined && twitterServer.httpsExternalPort.get != 0
)
/** The assigned external "http" port for the underlying embedded HttpServer */
def httpExternalPort(): Int = {
start()
Await.ready(httpPortReady, 5.seconds)
twitterServer.httpExternalPort.get
}
/** The assigned external "https" port for the underlying embedded HttpServer */
def httpsExternalPort(): Int = {
start()
Await.ready(httpsPortReady, 5.seconds)
twitterServer.httpsExternalPort.get
}
/**
* The embedded [[ScalaObjectMapper]]. When the underlying embedded HttpServer is an injectable
* TwitterServer and has configured an object mapper, this will represent the server's configured
* object mapper, otherwise it is a default instantiation of the [[ScalaObjectMapper]].
*
* @see [[ScalaObjectMapper(injector: Injector)]]
*/
final lazy val mapper: ScalaObjectMapper = {
if (isInjectable) {
// if there is an object mapper bound, use it as the default otherwise create a new one
val default =
if (injector.underlying.findBindingsByType(typeLiteral[ScalaObjectMapper]).asScala.nonEmpty)
injector.instance[ScalaObjectMapper]
else ScalaObjectMapper()
mapperOverride.getOrElse(default)
} else {
ScalaObjectMapper()
}
}
final lazy val httpClient: JsonAwareEmbeddedHttpClient = {
val client = new JsonAwareEmbeddedHttpClient(
"httpClient",
httpExternalPort(),
tls = false,
sessionAcquisitionTimeout = 1.second,
streamResponses = streamResponse,
defaultHeaders = () => defaultRequestHeaders,
mapper,
disableLogging = self.disableLogging
)
closeOnExit(client)
client
}
final lazy val httpsClient: JsonAwareEmbeddedHttpClient = httpsClientOverride.getOrElse {
val client = new JsonAwareEmbeddedHttpClient(
"httpsClient",
httpsExternalPort(),
tls = true,
sessionAcquisitionTimeout = 1.second,
streamResponses = streamResponse,
defaultHeaders = () => defaultRequestHeaders,
mapper,
disableLogging = self.disableLogging
)
closeOnExit(client)
client
}
final def closeOnExit(client: JsonAwareEmbeddedHttpClient): Unit = closeOnExit {
if (isStarted) {
Closable.make { deadline =>
info(s"Closing embedded http client: ${client.label}", disableLogging)
client.close(deadline)
}
} else Closable.nop
}
private[this] def configurePortFlag(flagOpt: Option[String]): Array[String] = flagOpt match {
case Some(flg) if flg.nonEmpty =>
Array(s"-$flg=${PortUtils.ephemeralLoopback}")
case _ => Array.empty[String]
}
}
| twitter/finatra | http-server/src/test/scala/com/twitter/finatra/http/ExternalHttpClient.scala | Scala | apache-2.0 | 6,593 |
package colossus.protocols.redis
import org.scalatest._
import akka.util.ByteString
import UnifiedProtocol._
import colossus.core.DataBuffer
import colossus.util.DataSize._
import colossus.util.ParseException
class FastCommandSuite extends FlatSpec with Matchers {
//def commandParser = new SuperFastCommandParser
def commandParser = RedisCommandParser.command
"SuperFastCommandParser" should "parse a command" in {
val command = ByteString("*3\\r\\n$3\\r\\nGET\\r\\n$4\\r\\nakey\\r\\n$3\\r\\nfoo\\r\\n")
val expected = Some(Command(CMD_GET, Seq(ByteString("akey"), ByteString("foo"))))
val parser = commandParser
val actual = parser.parse(DataBuffer.fromByteString(command))
actual should equal(expected)
}
it should "parse a fragmented command" in {
val command = ByteString("*3\\r\\n$3\\r\\nGET\\r\\n$4\\r\\nakey\\r\\n$3\\r\\nfoo\\r\\n")
val expected = Some(Command(CMD_GET, Seq(ByteString("akey"), ByteString("foo"))))
(1 until command.size).foreach { i =>
val (p1, p2) = command.splitAt(i)
val parser = commandParser
parser.parse(DataBuffer.fromByteString(p1))
val actual = parser.parse(DataBuffer.fromByteString(p2))
actual should equal(expected)
}
}
it should "parse newline in argument" in {
val command = ByteString("*1\\r\\n$4\\r\\n\\r\\n\\r\\n\\r\\n")
val expected = Some(Command("\\r\\n\\r\\n"))
val parser = commandParser
parser.parse(DataBuffer(command)) should equal(expected)
}
it should "parse inline command" in {
val command = ByteString("SET FOO bar\\r\\n")
val expected = Some(Command(CMD_SET, Seq(ByteString("FOO"), ByteString("bar"))))
val parser = commandParser
val actual = parser.parse(DataBuffer.fromByteString(command))
actual should equal(expected)
}
it should "parse two commands separately" in {
val command1 = ByteString("*3\\r\\n$3\\r\\nGET\\r\\n$4\\r\\nakey\\r\\n$3\\r\\nfoo\\r\\n")
val command2 = ByteString("*3\\r\\n$3\\r\\nGET\\r\\n$4\\r\\nbkey\\r\\n$3\\r\\nbar\\r\\n")
val commands = DataBuffer.fromByteString(command1 ++ command2)
val parser = commandParser
parser.parse(commands) should equal(Some(Command("GET", "akey", "foo")))
parser.parse(commands) should equal(Some(Command("GET", "bkey", "bar")))
}
it should "reject command with too few arguments" in {
val command1 = ByteString("*4\\r\\n$3\\r\\nGET\\r\\n$4\\r\\nakey\\r\\n$3\\r\\nfoo\\r\\n")
val command2 = ByteString("*3\\r\\n$3\\r\\nGET\\r\\n$4\\r\\nakey\\r\\n$3\\r\\nfoo\\r\\n")
val parser = commandParser
parser.parse(DataBuffer(command1)) should equal(None)
intercept[ParseException] {
parser.parse(DataBuffer(command2))
}
}
it should "reject command with too-short argument" in {
val command1 = ByteString("*3\\r\\n$3\\r\\nGET\\r\\n$4\\r\\nak\\r\\n$3\\r\\nfoo\\r\\n")
val parser = commandParser
intercept[ParseException] {
parser.parse(DataBuffer(command1))
}
}
it should "reject command with too-long argument" in {
val command1 = ByteString("*3\\r\\n$3\\r\\nGET\\r\\n$4\\r\\nakOMGWTF\\r\\n$3\\r\\nfoo\\r\\n")
val parser = commandParser
intercept[ParseException] {
parser.parse(DataBuffer(command1))
}
}
it should "allow command under size limit" in {
val command = ByteString("*1\\r\\n$3\\r\\nGET\\r\\n")
val expected = Some(Command(CMD_GET))
val parser = RedisCommandParser(command.size.bytes)
val actual = parser.parse(DataBuffer.fromByteString(command))
actual should equal(expected)
}
it should "reject command over size limit" in {
val command = ByteString("*1\\r\\n$3\\r\\nGET\\r\\n")
val expected = Some(Command(CMD_GET))
val parser = RedisCommandParser((command.size - 1).bytes)
intercept[ParseException] {
parser.parse(DataBuffer(command))
}
}
}
class FastReplySuite extends FlatSpec with Matchers {
def replyParser = RedisReplyParser()
"FastReplyParser" should "parse status reply" in {
val reply = ByteString("+OK\\r\\n")
val parser = replyParser
val actual = parser.parse(DataBuffer.fromByteString(reply))
actual should equal(Some(StatusReply("OK")))
}
it should "parse bulk reply" in {
val reply = ByteString("$5\\r\\nabcde\\r\\n")
val parser = replyParser
parser.parse(DataBuffer.fromByteString(reply)) should equal(Some(BulkReply(ByteString("abcde"))))
}
it should "parse nil reply" in {
val reply = ByteString("$-1\\r\\n")
val parser = replyParser
parser.parse(DataBuffer.fromByteString(reply)) should equal(Some(NilReply))
}
it should "parse empty list reply" in {
val reply = ByteString("*0\\r\\n")
val parser = replyParser
parser.parse(DataBuffer.fromByteString(reply)) should equal(Some(EmptyMBulkReply))
}
it should "parse nil mbulk reply" in {
val reply = ByteString("*-1\\r\\n")
val parser = replyParser
parser.parse(DataBuffer.fromByteString(reply)) should equal(Some(NilMBulkReply))
}
it should "parse bulk reply in fragments" in {
val reply = ByteString("$5\\r\\nabcde\\r\\n")
(1 until reply.size - 1).foreach { i =>
val parser = replyParser
val (p1, p2) = reply.splitAt(i)
parser.parse(DataBuffer.fromByteString(p1)) should equal(None)
parser.parse(DataBuffer.fromByteString(p2)) should equal(Some(BulkReply(ByteString("abcde"))))
}
}
it should "parse mbulk reply" in {
val reply = ByteString("*2\\r\\n$5\\r\\nabcde\\r\\n$3\\r\\n123\\r\\n")
val parser = replyParser
val expected = Some(MBulkReply(Seq(BulkReply(ByteString("abcde")), BulkReply(ByteString("123")))))
parser.parse(DataBuffer.fromByteString(reply)) should equal(expected)
}
it should "parse mbulk in fragments" in {
val reply = ByteString("*2\\r\\n$5\\r\\nabcde\\r\\n$3\\r\\n123\\r\\n")
(1 until reply.size - 1).foreach { i =>
val parser = replyParser
val expected = Some(MBulkReply(Seq(BulkReply(ByteString("abcde")), BulkReply(ByteString("123")))))
val (p1, p2) = reply.splitAt(i)
parser.parse(DataBuffer.fromByteString(p1)) should equal(None)
parser.parse(DataBuffer.fromByteString(p2)) should equal(expected)
}
}
it should "parse large integer" in {
val reply = IntegerReply(89840626838L)
val parser = replyParser
parser.parse(reply.raw) should equal(Some(reply))
}
}
| tumblr/colossus | colossus-tests/src/test/scala/colossus/protocols/redis/RedisParserSpec.scala | Scala | apache-2.0 | 6,265 |
package org.bitcoins.commons.util
import com.typesafe.config.{Config, ConfigFactory}
import org.bitcoins.commons.config.AppConfig
import java.nio.file.{Path, Paths}
/** Parses the correct datadir given the possible input sources for datadir config
* 1. The --datadir command line flag
* 2. Inferring the datadir based on the bitcoin network configured
* 3. ??? Anything else i'm forgetting ????
*/
case class DatadirParser(
serverArgs: ServerArgParser,
customFinalDirOpt: Option[String]) {
/** Sets the default data dir, overridden by the --datadir option */
private lazy val datadirPath: Path = serverArgs.datadirOpt match {
case None => AppConfig.DEFAULT_BITCOIN_S_DATADIR
case Some(datadir) => datadir
}
lazy val datadirConfig: Config =
ConfigFactory.parseString(
s"bitcoin-s.datadir = ${AppConfig.safePathToString(datadirPath)}")
lazy val networkConfig: Config = serverArgs.networkOpt match {
case Some(network) =>
val networkStr = DatadirUtil.networkStrToDirName(network.name)
ConfigFactory.parseString(s"bitcoin-s.network = $networkStr")
case None => ConfigFactory.empty()
}
lazy val baseConfig: Config = {
serverArgs.configOpt match {
case None =>
AppConfig
.getBaseConfig(datadirPath, Vector(networkConfig))
.withFallback(datadirConfig)
.resolve()
case Some(config) =>
val conf = ConfigFactory
.parseFile(config.toFile)
.withFallback(datadirConfig)
.resolve()
networkConfig.withFallback(conf)
}
}
/** Base directory for all bitcoin-s data. This is the resulting datadir from
* the --datadir option and all configuration files.
*/
lazy val datadir: Path =
Paths.get(baseConfig.getString("bitcoin-s.datadir"))
/** Directory specific for current network or custom dir
* Examples are
* HOME/.bitcoin-s/mainnet
* HOME/.bitcoin-s/testnet3
* HOME/.bitcoin-s/oracle
*/
def networkDir: Path =
DatadirUtil.getFinalDatadir(datadir, baseConfig, customFinalDirOpt)
}
| bitcoin-s/bitcoin-s | app-commons/src/main/scala/org/bitcoins/commons/util/DatadirParser.scala | Scala | mit | 2,098 |
package com.rasterfoundry.common.cache
import java.util.concurrent.Executors
import cats.data._
import com.rasterfoundry.common.{Config, RfStackTrace, RollbarNotifier}
import com.github.blemale.scaffeine.{Cache, Scaffeine}
import com.google.common.util.concurrent.ThreadFactoryBuilder
import com.typesafe.scalalogging.LazyLogging
import net.spy.memcached._
import scala.annotation.tailrec
import scala.concurrent._
import scala.util.{Failure, Success}
object CacheClientThreadPool extends RollbarNotifier {
implicit lazy val ec: ExecutionContext =
ExecutionContext.fromExecutor(
Executors.newFixedThreadPool(
Config.memcached.threads,
new ThreadFactoryBuilder().setNameFormat("cache-client-%d").build()
)
)
}
class CacheClient(client: => MemcachedClient)
extends LazyLogging
with RollbarNotifier {
import CacheClientThreadPool._
val cacheEnabled: Boolean = false // Config.memcached.enabled
val localCacheEnabled: Boolean = false // Config.memcached.localCacheEnabled
val localCacheSize: Int = Config.memcached.localCacheSize
val keySize: Int = Config.memcached.keySize
val retryMaxMillis = 30000
val retrySleepMillis = 25
val maxRetryDepth: Int = retryMaxMillis / retrySleepMillis
val localCache: Cache[String, Option[Any]] =
Scaffeine()
.maximumSize(localCacheSize)
.build[String, Option[Any]]()
private def abbreviateKey(key: String): String =
if (key.length <= Config.memcached.keySize) key
else key.substring(0, keySize - 1)
private def withAbbreviatedKey[T](key: String)(body: String => T): T =
body(abbreviateKey(key))
def delete(key: String): Unit =
if (cacheEnabled) {
withAbbreviatedKey(key)(client.delete)
()
} else { () }
def setValue[T](key: String, value: T, ttlSeconds: Int = 0): Unit =
withAbbreviatedKey(key) { key =>
logger.debug(s"Setting Key: $key with TTL $ttlSeconds")
val f = Future {
client.set(key, ttlSeconds, value)
}
f.onComplete {
case Failure(e) =>
logger.error(s"Error ${e.getMessage}")
sendError(e)
case Success(_) => ()
}
}
// Suppress asInstanceOf warning because we can't pattern match on the returned type since it's
// eliminated by type erasure
@SuppressWarnings(Array("AsInstanceOf"))
@tailrec
final def localGetOrElse[CachedType](
cacheKey: String,
expensiveOperation: => Future[Option[CachedType]],
doCache: Boolean = true,
depth: Int = 0)(
fallbackFunction: (String,
=> Future[Option[CachedType]],
Boolean) => Future[Option[CachedType]]
): Future[Option[CachedType]] = {
// in order not to break tailrec
val key = abbreviateKey(cacheKey)
def fallback: Future[Option[CachedType]] = {
// Signal to other cache reads that the operation in already in progress
localCache.put(key, Some("AWAIT"))
// Use the fallback function to retrieve the value and cache it
val fallbackFuture: Future[Option[CachedType]] =
fallbackFunction(key, expensiveOperation, doCache)
fallbackFuture.onComplete {
case Success(cachedValueO) =>
localCache.put(key, cachedValueO)
case Failure(e) =>
sendError(RfStackTrace(e))
logger.error(s"Cache set error at local cache: ${RfStackTrace(e)}")
}
fallbackFuture
}
localCache.getIfPresent(key) match {
// The requested key is in the local cache
case Some(cachedValueO) =>
cachedValueO match {
// The requested key is already being computed, try again
case Some("AWAIT") if depth < maxRetryDepth =>
Thread.sleep(retrySleepMillis)
localGetOrElse(key, expensiveOperation, doCache, depth + 1)(
fallbackFunction)
case Some(cachedValue) if depth < maxRetryDepth =>
logger.debug(s"Local Cache Hit: $key")
Future.successful(Some(cachedValue.asInstanceOf[CachedType]))
case _ =>
fallback
}
// The requested key is not present in the local cache, so do the else function
case _ =>
// Load the local cache with the result of the else function
fallback
}
}
// Suppress asInstanceOf warning because we can't pattern match on the returned type since it's
// eliminated by type erasure
@SuppressWarnings(Array("AsInstanceOf"))
def getOrElseUpdateMemcached[CachedType](
cacheKey: String,
expensiveOperation: => Future[Option[CachedType]],
doCache: Boolean = true
): Future[Option[CachedType]] = withAbbreviatedKey(cacheKey) { cacheKey =>
val futureCached = Future { client.asyncGet(cacheKey).get() }
futureCached.flatMap(
{
case null =>
logger.debug(s"Cache Miss: $cacheKey")
val futureCached: Future[Option[CachedType]] = expensiveOperation
futureCached.onComplete {
case Success(cachedValue) =>
cachedValue match {
case Some(_) =>
if (doCache) {
setValue(cacheKey, cachedValue)
}
case None =>
if (doCache) {
setValue(cacheKey, cachedValue, ttlSeconds = 300)
}
}
case Failure(e) =>
sendError(RfStackTrace(e))
logger.error(s"Cache Set Error: ${RfStackTrace(e)}")
}
futureCached
case o =>
logger.debug(s"Cache Hit: $cacheKey")
Future.successful(o.asInstanceOf[Option[CachedType]])
}
)
}
def getOrElseUpdate[CachedType](
cacheKey: String,
expensiveOperation: => Future[Option[CachedType]],
doCache: Boolean = true
): Future[Option[CachedType]] = withAbbreviatedKey(cacheKey) { cacheKey =>
(doCache, cacheEnabled, localCacheEnabled) match {
case (true, true, true) =>
localGetOrElse[CachedType](cacheKey, expensiveOperation, doCache)(
getOrElseUpdateMemcached[CachedType])
case (true, true, false) =>
getOrElseUpdateMemcached[CachedType](cacheKey,
expensiveOperation,
doCache)
case _ => expensiveOperation
}
}
def cachingOptionT[T](cacheKey: String, doCache: Boolean = true)(
mappingFunction: => OptionT[Future, T]
): OptionT[Future, T] = {
val futureOption =
getOrElseUpdate[T](cacheKey, mappingFunction.value, doCache)
OptionT(futureOption)
}
}
| azavea/raster-foundry | app-backend/common/src/main/scala/cache/CacheClient.scala | Scala | apache-2.0 | 6,648 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.pages.amends
import forms.AmendmentTypeForm
import org.jsoup.Jsoup
import testHelpers.ViewSpecHelpers.CommonViewSpecHelper
import testHelpers.ViewSpecHelpers.ip2016.RemovePsoDetailsViewMessages
import uk.gov.hmrc.play.views.html.helpers.{ErrorSummary, FormWithCSRF}
import views.html.pages.amends.removePsoDebits
class RemovePsoDebitsViewSpec extends CommonViewSpecHelper with RemovePsoDetailsViewMessages{
implicit val errorSummary: ErrorSummary = app.injector.instanceOf[ErrorSummary]
implicit val formWithCSRF: FormWithCSRF = app.injector.instanceOf[FormWithCSRF]
"the RemovePsoDetailsView" should{
val amendmentForm = AmendmentTypeForm.amendmentTypeForm.bind(Map(
"protectionType" -> "ip2016",
"status" -> "open"))
lazy val view = application.injector.instanceOf[removePsoDebits]
lazy val doc = Jsoup.parse(view.apply(amendmentForm).body)
lazy val form = doc.select("form")
"have the correct title" in{
doc.title() shouldBe plaPsoDetailsTitle
}
"have the right explanatory paragraph" in{
doc.select("p").eq(0).text shouldBe plaPsoDetailsRemovePso
}
"have the correct and properly formatted header"in{
doc.select("h1").text shouldBe plaPsoDetailsTitle
}
"have a valid form" in{
form.attr("method") shouldBe "POST"
form.attr("action") shouldBe controllers.routes.AmendsController.submitRemovePso().url
}
"have a functional cancellation link" in{
doc.select("a").text shouldBe plaPsoDetailsCancelRemove
doc.select("a").attr("href") shouldBe plaAmendsPsoDetailsCancellationLink
}
"have a remove button" in{
doc.select("button").text shouldBe plaBaseRemove
doc.select("button").attr("type") shouldBe "submit"
}
}
}
| hmrc/pensions-lifetime-allowance-frontend | test/views/pages/amends/RemovePsoDebitsViewSpec.scala | Scala | apache-2.0 | 2,390 |
/*******************************************************************************
* Copyright (c) 2019. Carl Minden
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package com.anathema_roguelike
package entities.items
import com.anathema_roguelike.main.utilities.HasWeightedProbability
import com.anathema_roguelike.stats.effects.{Effect, HasEffect}
import com.anathema_roguelike.stats.itemstats.ItemStat
abstract class ItemProperty[T <: Item](name: String, weight: Double) extends HasEffect[Effect[Item, ItemStat]] with HasWeightedProbability {
def getName: String = name
protected def getWeight: Double = weight
}
| carlminden/anathema-roguelike | src/com/anathema_roguelike/entities/items/ItemProperty.scala | Scala | gpl-3.0 | 1,307 |
package com.dominikgruber.fpinscala.chapter05
import org.scalatest._
class Exercise07Spec extends FlatSpec with Matchers {
"map" should "transform an int Stream to strings" in {
Stream(1, 2, 3).map(_.toString).toList should be (List("1", "2", "3"))
}
"filter" should "filter out odd numbers" in {
Stream(1, 2, 3, 4, 5).filter(_ % 2 == 0).toList should be (List(2, 4))
}
"append" should "append a Stream" in {
Stream(1, 2).append(Stream(3, 4)).toList should be (List(1, 2, 3, 4))
}
"flatMap" should "duplicate the elements" in {
Stream(1, 2, 3).flatMap((x) => Stream(x, x)).toList should be (List(1, 1, 2, 2, 3, 3))
}
} | TheDom/functional-programming-in-scala | src/test/scala/com/dominikgruber/fpinscala/chapter05/Exercise07Spec.scala | Scala | mit | 656 |
package TAPLcomp.rcdsubbot
import scala.util.parsing.combinator.{ImplicitConversions, PackratParsers}
import scala.util.parsing.combinator.syntactical.StandardTokenParsers
sealed trait Ty
case object TyTop extends Ty
case object TyBot extends Ty
case class TyArr(t1: Ty, t2: Ty) extends Ty
case class TyRecord(els: List[(String, Ty)]) extends Ty
sealed trait Term
case class TmVar(i: String) extends Term
case class TmAbs(v: String, ty: Ty, t: Term) extends Term
case class TmApp(t1: Term, t2: Term) extends Term
case class TmRecord(fields: List[(String, Term)]) extends Term
case class TmProj(t: Term, proj: String) extends Term
object RcdSubBotParsers extends StandardTokenParsers with PackratParsers with ImplicitConversions {
lexical.reserved += ("Bool", "true", "false", "if", "then", "else",
"Nat", "String", "Unit", "Float", "unit", "case", "let", "in", "succ", "pred",
"as", "of", "fix", "iszero", "Top", "Bot")
lexical.delimiters += ("\\\\", "(", ")", ";", "/", ".", ":", "->", "=", "<", ">", "{", "}", "=>", "==>", ",", "|")
// lower-case identifier
lazy val lcid: PackratParser[String] = ident ^? { case id if id.charAt(0).isLower => id }
// upper-case identifier
lazy val ucid: PackratParser[String] = ident ^? { case id if id.charAt(0).isUpper => id }
lazy val `type`: PackratParser[Ty] = arrowType
lazy val aType: PackratParser[Ty] =
"(" ~> `type` <~ ")" |
"Bot" ^^ { _ => TyBot } |
"Top" ^^ { _ => TyTop } |
"{" ~> fieldTypes <~ "}" ^^ { ft => TyRecord(ft) }
lazy val fieldTypes: PackratParser[List[(String, Ty)]] =
repsep(fieldType, ",")
lazy val fieldType: PackratParser[(String, Ty)] =
lcid ~ (":" ~> `type`) ^^ { case id ~ ty => (id, ty) }
lazy val arrowType: PackratParser[Ty] =
(aType <~ "->") ~ arrowType ^^ { case t1 ~ t2 => TyArr(t1, t2) } |
aType
// TERMS
lazy val term: PackratParser[Term] =
appTerm |
("\\\\" ~> lcid) ~ (":" ~> `type`) ~ ("." ~> term) ^^ { case v ~ ty ~ t => TmAbs(v, ty, t) }
lazy val appTerm: PackratParser[Term] =
appTerm ~ pathTerm ^^ { case t1 ~ t2 => TmApp(t1, t2) } |
pathTerm
lazy val pathTerm: PackratParser[Term] =
pathTerm ~ ("." ~> lcid) ^^ { case t1 ~ l => TmProj(t1, l) } |
pathTerm ~ ("." ~> numericLit) ^^ { case t1 ~ l => TmProj(t1, l) } |
aTerm
lazy val aTerm: PackratParser[Term] =
"(" ~> term <~ ")" |
lcid ^^ { i => TmVar(i) } |
"{" ~> fields <~ "}" ^^ { fs => TmRecord(fs) }
lazy val fields: PackratParser[List[(String, Term)]] =
repsep(field, ",")
lazy val field: PackratParser[(String, Term)] =
lcid ~ ("=" ~> term) ^^ { case id ~ t => (id, t) }
def input(s: String) = phrase(term)(new lexical.Scanner(s)) match {
case t if t.successful => t.get
case t => sys.error(t.toString)
}
} | hy-zhang/parser | Scala/Parser/src/TAPLcomp/rcdsubbot/parser.scala | Scala | bsd-3-clause | 2,821 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.NotSerializableException
import java.nio.ByteBuffer
import java.util.concurrent.ConcurrentLinkedQueue
import scala.collection.immutable.Map
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import scala.math.max
import scala.util.control.NonFatal
import org.apache.spark._
import org.apache.spark.TaskState.TaskState
import org.apache.spark.internal.{config, Logging}
import org.apache.spark.internal.config._
import org.apache.spark.resource.ResourceInformation
import org.apache.spark.scheduler.SchedulingMode._
import org.apache.spark.util.{AccumulatorV2, Clock, LongAccumulator, SystemClock, Utils}
import org.apache.spark.util.collection.MedianHeap
/**
* Schedules the tasks within a single TaskSet in the TaskSchedulerImpl. This class keeps track of
* each task, retries tasks if they fail (up to a limited number of times), and
* handles locality-aware scheduling for this TaskSet via delay scheduling. The main interfaces
* to it are resourceOffer, which asks the TaskSet whether it wants to run a task on one node,
* and handleSuccessfulTask/handleFailedTask, which tells it that one of its tasks changed state
* (e.g. finished/failed).
*
* THREADING: This class is designed to only be called from code with a lock on the
* TaskScheduler (e.g. its event handlers). It should not be called from other threads.
*
* @param sched the TaskSchedulerImpl associated with the TaskSetManager
* @param taskSet the TaskSet to manage scheduling for
* @param maxTaskFailures if any particular task fails this number of times, the entire
* task set will be aborted
*/
private[spark] class TaskSetManager(
sched: TaskSchedulerImpl,
val taskSet: TaskSet,
val maxTaskFailures: Int,
blacklistTracker: Option[BlacklistTracker] = None,
clock: Clock = new SystemClock()) extends Schedulable with Logging {
private val conf = sched.sc.conf
// SPARK-21563 make a copy of the jars/files so they are consistent across the TaskSet
private val addedJars = HashMap[String, Long](sched.sc.addedJars.toSeq: _*)
private val addedFiles = HashMap[String, Long](sched.sc.addedFiles.toSeq: _*)
val maxResultSize = conf.get(config.MAX_RESULT_SIZE)
// Serializer for closures and tasks.
val env = SparkEnv.get
val ser = env.closureSerializer.newInstance()
val tasks = taskSet.tasks
private[scheduler] val partitionToIndex = tasks.zipWithIndex
.map { case (t, idx) => t.partitionId -> idx }.toMap
val numTasks = tasks.length
val copiesRunning = new Array[Int](numTasks)
val speculationEnabled = conf.get(SPECULATION_ENABLED)
// Quantile of tasks at which to start speculation
val speculationQuantile = conf.get(SPECULATION_QUANTILE)
val speculationMultiplier = conf.get(SPECULATION_MULTIPLIER)
val minFinishedForSpeculation = math.max((speculationQuantile * numTasks).floor.toInt, 1)
// For each task, tracks whether a copy of the task has succeeded. A task will also be
// marked as "succeeded" if it failed with a fetch failure, in which case it should not
// be re-run because the missing map data needs to be regenerated first.
val successful = new Array[Boolean](numTasks)
private val numFailures = new Array[Int](numTasks)
// Add the tid of task into this HashSet when the task is killed by other attempt tasks.
// This happened while we set the `spark.speculation` to true. The task killed by others
// should not resubmit while executor lost.
private val killedByOtherAttempt = new HashSet[Long]
val taskAttempts = Array.fill[List[TaskInfo]](numTasks)(Nil)
private[scheduler] var tasksSuccessful = 0
val weight = 1
val minShare = 0
var priority = taskSet.priority
var stageId = taskSet.stageId
val name = "TaskSet_" + taskSet.id
var parent: Pool = null
private var totalResultSize = 0L
private var calculatedTasks = 0
private[scheduler] val taskSetBlacklistHelperOpt: Option[TaskSetBlacklist] = {
blacklistTracker.map { _ =>
new TaskSetBlacklist(sched.sc.listenerBus, conf, stageId, taskSet.stageAttemptId, clock)
}
}
private[scheduler] val runningTasksSet = new HashSet[Long]
override def runningTasks: Int = runningTasksSet.size
def someAttemptSucceeded(tid: Long): Boolean = {
successful(taskInfos(tid).index)
}
// True once no more tasks should be launched for this task set manager. TaskSetManagers enter
// the zombie state once at least one attempt of each task has completed successfully, or if the
// task set is aborted (for example, because it was killed). TaskSetManagers remain in the zombie
// state until all tasks have finished running; we keep TaskSetManagers that are in the zombie
// state in order to continue to track and account for the running tasks.
// TODO: We should kill any running task attempts when the task set manager becomes a zombie.
private[scheduler] var isZombie = false
// Whether the taskSet run tasks from a barrier stage. Spark must launch all the tasks at the
// same time for a barrier stage.
private[scheduler] def isBarrier = taskSet.tasks.nonEmpty && taskSet.tasks(0).isBarrier
// Store tasks waiting to be scheduled by locality preferences
private[scheduler] val pendingTasks = new PendingTasksByLocality()
// Tasks that can be speculated. Since these will be a small fraction of total
// tasks, we'll just hold them in a HashSet. The HashSet here ensures that we do not add
// duplicate speculatable tasks.
private[scheduler] val speculatableTasks = new HashSet[Int]
// Store speculatable tasks by locality preferences
private[scheduler] val pendingSpeculatableTasks = new PendingTasksByLocality()
// Task index, start and finish time for each task attempt (indexed by task ID)
private[scheduler] val taskInfos = new HashMap[Long, TaskInfo]
// Use a MedianHeap to record durations of successful tasks so we know when to launch
// speculative tasks. This is only used when speculation is enabled, to avoid the overhead
// of inserting into the heap when the heap won't be used.
val successfulTaskDurations = new MedianHeap()
// How frequently to reprint duplicate exceptions in full, in milliseconds
val EXCEPTION_PRINT_INTERVAL =
conf.getLong("spark.logging.exceptionPrintInterval", 10000)
// Map of recent exceptions (identified by string representation and top stack frame) to
// duplicate count (how many times the same exception has appeared) and time the full exception
// was printed. This should ideally be an LRU map that can drop old exceptions automatically.
private val recentExceptions = HashMap[String, (Int, Long)]()
// Figure out the current map output tracker epoch and set it on all tasks
val epoch = sched.mapOutputTracker.getEpoch
logDebug("Epoch for " + taskSet + ": " + epoch)
for (t <- tasks) {
t.epoch = epoch
}
// Add all our tasks to the pending lists. We do this in reverse order
// of task index so that tasks with low indices get launched first.
addPendingTasks()
private def addPendingTasks(): Unit = {
val (_, duration) = Utils.timeTakenMs {
for (i <- (0 until numTasks).reverse) {
addPendingTask(i, resolveRacks = false)
}
// Resolve the rack for each host. This can be slow, so de-dupe the list of hosts,
// and assign the rack to all relevant task indices.
val (hosts, indicesForHosts) = pendingTasks.forHost.toSeq.unzip
val racks = sched.getRacksForHosts(hosts)
racks.zip(indicesForHosts).foreach {
case (Some(rack), indices) =>
pendingTasks.forRack.getOrElseUpdate(rack, new ArrayBuffer) ++= indices
case (None, _) => // no rack, nothing to do
}
}
logDebug(s"Adding pending tasks took $duration ms")
}
/**
* Track the set of locality levels which are valid given the tasks locality preferences and
* the set of currently available executors. This is updated as executors are added and removed.
* This allows a performance optimization, of skipping levels that aren't relevant (eg., skip
* PROCESS_LOCAL if no tasks could be run PROCESS_LOCAL for the current set of executors).
*/
private[scheduler] var myLocalityLevels = computeValidLocalityLevels()
// Time to wait at each level
private[scheduler] var localityWaits = myLocalityLevels.map(getLocalityWait)
// Delay scheduling variables: we keep track of our current locality level and the time we
// last launched a task at that level, and move up a level when localityWaits[curLevel] expires.
// We then move down if we manage to launch a "more local" task.
private var currentLocalityIndex = 0 // Index of our current locality level in validLocalityLevels
private var lastLaunchTime = clock.getTimeMillis() // Time we last launched a task at this level
override def schedulableQueue: ConcurrentLinkedQueue[Schedulable] = null
override def schedulingMode: SchedulingMode = SchedulingMode.NONE
private[scheduler] var emittedTaskSizeWarning = false
/** Add a task to all the pending-task lists that it should be on. */
private[spark] def addPendingTask(
index: Int,
resolveRacks: Boolean = true,
speculatable: Boolean = false): Unit = {
val pendingTaskSetToAddTo = if (speculatable) pendingSpeculatableTasks else pendingTasks
for (loc <- tasks(index).preferredLocations) {
loc match {
case e: ExecutorCacheTaskLocation =>
pendingTaskSetToAddTo.forExecutor.getOrElseUpdate(e.executorId, new ArrayBuffer) += index
case e: HDFSCacheTaskLocation =>
val exe = sched.getExecutorsAliveOnHost(loc.host)
exe match {
case Some(set) =>
for (e <- set) {
pendingTaskSetToAddTo.forExecutor.getOrElseUpdate(e, new ArrayBuffer) += index
}
logInfo(s"Pending task $index has a cached location at ${e.host} " +
", where there are executors " + set.mkString(","))
case None => logDebug(s"Pending task $index has a cached location at ${e.host} " +
", but there are no executors alive there.")
}
case _ =>
}
pendingTaskSetToAddTo.forHost.getOrElseUpdate(loc.host, new ArrayBuffer) += index
if (resolveRacks) {
sched.getRackForHost(loc.host).foreach { rack =>
pendingTaskSetToAddTo.forRack.getOrElseUpdate(rack, new ArrayBuffer) += index
}
}
}
if (tasks(index).preferredLocations == Nil) {
pendingTaskSetToAddTo.noPrefs += index
}
pendingTaskSetToAddTo.all += index
}
/**
* Dequeue a pending task from the given list and return its index.
* Return None if the list is empty.
* This method also cleans up any tasks in the list that have already
* been launched, since we want that to happen lazily.
*/
private def dequeueTaskFromList(
execId: String,
host: String,
list: ArrayBuffer[Int],
speculative: Boolean = false): Option[Int] = {
var indexOffset = list.size
while (indexOffset > 0) {
indexOffset -= 1
val index = list(indexOffset)
if (!isTaskBlacklistedOnExecOrNode(index, execId, host) &&
!(speculative && hasAttemptOnHost(index, host))) {
// This should almost always be list.trimEnd(1) to remove tail
list.remove(indexOffset)
// Speculatable task should only be launched when at most one copy of the
// original task is running
if (!successful(index)) {
if (copiesRunning(index) == 0) {
return Some(index)
} else if (speculative && copiesRunning(index) == 1) {
return Some(index)
}
}
}
}
None
}
/** Check whether a task once ran an attempt on a given host */
private def hasAttemptOnHost(taskIndex: Int, host: String): Boolean = {
taskAttempts(taskIndex).exists(_.host == host)
}
private def isTaskBlacklistedOnExecOrNode(index: Int, execId: String, host: String): Boolean = {
taskSetBlacklistHelperOpt.exists { blacklist =>
blacklist.isNodeBlacklistedForTask(host, index) ||
blacklist.isExecutorBlacklistedForTask(execId, index)
}
}
/**
* Dequeue a pending task for a given node and return its index and locality level.
* Only search for tasks matching the given locality constraint.
*
* @return An option containing (task index within the task set, locality, is speculative?)
*/
private def dequeueTask(
execId: String,
host: String,
maxLocality: TaskLocality.Value): Option[(Int, TaskLocality.Value, Boolean)] = {
// Tries to schedule a regular task first; if it returns None, then schedules
// a speculative task
dequeueTaskHelper(execId, host, maxLocality, false).orElse(
dequeueTaskHelper(execId, host, maxLocality, true))
}
protected def dequeueTaskHelper(
execId: String,
host: String,
maxLocality: TaskLocality.Value,
speculative: Boolean): Option[(Int, TaskLocality.Value, Boolean)] = {
if (speculative && speculatableTasks.isEmpty) {
return None
}
val pendingTaskSetToUse = if (speculative) pendingSpeculatableTasks else pendingTasks
def dequeue(list: ArrayBuffer[Int]): Option[Int] = {
val task = dequeueTaskFromList(execId, host, list, speculative)
if (speculative && task.isDefined) {
speculatableTasks -= task.get
}
task
}
dequeue(pendingTaskSetToUse.forExecutor.getOrElse(execId, ArrayBuffer())).foreach { index =>
return Some((index, TaskLocality.PROCESS_LOCAL, speculative))
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.NODE_LOCAL)) {
dequeue(pendingTaskSetToUse.forHost.getOrElse(host, ArrayBuffer())).foreach { index =>
return Some((index, TaskLocality.NODE_LOCAL, speculative))
}
}
// Look for noPref tasks after NODE_LOCAL for minimize cross-rack traffic
if (TaskLocality.isAllowed(maxLocality, TaskLocality.NO_PREF)) {
dequeue(pendingTaskSetToUse.noPrefs).foreach { index =>
return Some((index, TaskLocality.PROCESS_LOCAL, speculative))
}
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.RACK_LOCAL)) {
for {
rack <- sched.getRackForHost(host)
index <- dequeue(pendingTaskSetToUse.forRack.getOrElse(rack, ArrayBuffer()))
} {
return Some((index, TaskLocality.RACK_LOCAL, speculative))
}
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.ANY)) {
dequeue(pendingTaskSetToUse.all).foreach { index =>
return Some((index, TaskLocality.ANY, speculative))
}
}
None
}
/**
* Respond to an offer of a single executor from the scheduler by finding a task
*
* NOTE: this function is either called with a maxLocality which
* would be adjusted by delay scheduling algorithm or it will be with a special
* NO_PREF locality which will be not modified
*
* @param execId the executor Id of the offered resource
* @param host the host Id of the offered resource
* @param maxLocality the maximum locality we want to schedule the tasks at
*/
@throws[TaskNotSerializableException]
def resourceOffer(
execId: String,
host: String,
maxLocality: TaskLocality.TaskLocality,
availableResources: Map[String, Seq[String]] = Map.empty)
: Option[TaskDescription] =
{
val offerBlacklisted = taskSetBlacklistHelperOpt.exists { blacklist =>
blacklist.isNodeBlacklistedForTaskSet(host) ||
blacklist.isExecutorBlacklistedForTaskSet(execId)
}
if (!isZombie && !offerBlacklisted) {
val curTime = clock.getTimeMillis()
var allowedLocality = maxLocality
if (maxLocality != TaskLocality.NO_PREF) {
allowedLocality = getAllowedLocalityLevel(curTime)
if (allowedLocality > maxLocality) {
// We're not allowed to search for farther-away tasks
allowedLocality = maxLocality
}
}
dequeueTask(execId, host, allowedLocality).map { case ((index, taskLocality, speculative)) =>
// Found a task; do some bookkeeping and return a task description
val task = tasks(index)
val taskId = sched.newTaskId()
// Do various bookkeeping
copiesRunning(index) += 1
val attemptNum = taskAttempts(index).size
val info = new TaskInfo(taskId, index, attemptNum, curTime,
execId, host, taskLocality, speculative)
taskInfos(taskId) = info
taskAttempts(index) = info :: taskAttempts(index)
// Update our locality level for delay scheduling
// NO_PREF will not affect the variables related to delay scheduling
if (maxLocality != TaskLocality.NO_PREF) {
currentLocalityIndex = getLocalityIndex(taskLocality)
lastLaunchTime = curTime
}
// Serialize and return the task
val serializedTask: ByteBuffer = try {
ser.serialize(task)
} catch {
// If the task cannot be serialized, then there's no point to re-attempt the task,
// as it will always fail. So just abort the whole task-set.
case NonFatal(e) =>
val msg = s"Failed to serialize task $taskId, not attempting to retry it."
logError(msg, e)
abort(s"$msg Exception during serialization: $e")
throw new TaskNotSerializableException(e)
}
if (serializedTask.limit() > TaskSetManager.TASK_SIZE_TO_WARN_KIB * 1024 &&
!emittedTaskSizeWarning) {
emittedTaskSizeWarning = true
logWarning(s"Stage ${task.stageId} contains a task of very large size " +
s"(${serializedTask.limit() / 1024} KiB). The maximum recommended task size is " +
s"${TaskSetManager.TASK_SIZE_TO_WARN_KIB} KiB.")
}
addRunningTask(taskId)
// We used to log the time it takes to serialize the task, but task size is already
// a good proxy to task serialization time.
// val timeTaken = clock.getTime() - startTime
val taskName = s"task ${info.id} in stage ${taskSet.id}"
logInfo(s"Starting $taskName (TID $taskId, $host, executor ${info.executorId}, " +
s"partition ${task.partitionId}, $taskLocality, ${serializedTask.limit()} bytes)")
val extraResources = sched.resourcesReqsPerTask.map { taskReq =>
val rName = taskReq.resourceName
val count = taskReq.amount
val rAddresses = availableResources.getOrElse(rName, Seq.empty)
assert(rAddresses.size >= count, s"Required $count $rName addresses, but only " +
s"${rAddresses.size} available.")
// We'll drop the allocated addresses later inside TaskSchedulerImpl.
val allocatedAddresses = rAddresses.take(count)
(rName, new ResourceInformation(rName, allocatedAddresses.toArray))
}.toMap
sched.dagScheduler.taskStarted(task, info)
new TaskDescription(
taskId,
attemptNum,
execId,
taskName,
index,
task.partitionId,
addedFiles,
addedJars,
task.localProperties,
extraResources,
serializedTask)
}
} else {
None
}
}
private def maybeFinishTaskSet() {
if (isZombie && runningTasks == 0) {
sched.taskSetFinished(this)
if (tasksSuccessful == numTasks) {
blacklistTracker.foreach(_.updateBlacklistForSuccessfulTaskSet(
taskSet.stageId,
taskSet.stageAttemptId,
taskSetBlacklistHelperOpt.get.execToFailures))
}
}
}
/**
* Get the level we can launch tasks according to delay scheduling, based on current wait time.
*/
private def getAllowedLocalityLevel(curTime: Long): TaskLocality.TaskLocality = {
// Remove the scheduled or finished tasks lazily
def tasksNeedToBeScheduledFrom(pendingTaskIds: ArrayBuffer[Int]): Boolean = {
var indexOffset = pendingTaskIds.size
while (indexOffset > 0) {
indexOffset -= 1
val index = pendingTaskIds(indexOffset)
if (copiesRunning(index) == 0 && !successful(index)) {
return true
} else {
pendingTaskIds.remove(indexOffset)
}
}
false
}
// Walk through the list of tasks that can be scheduled at each location and returns true
// if there are any tasks that still need to be scheduled. Lazily cleans up tasks that have
// already been scheduled.
def moreTasksToRunIn(pendingTasks: HashMap[String, ArrayBuffer[Int]]): Boolean = {
val emptyKeys = new ArrayBuffer[String]
val hasTasks = pendingTasks.exists {
case (id: String, tasks: ArrayBuffer[Int]) =>
if (tasksNeedToBeScheduledFrom(tasks)) {
true
} else {
emptyKeys += id
false
}
}
// The key could be executorId, host or rackId
emptyKeys.foreach(id => pendingTasks.remove(id))
hasTasks
}
while (currentLocalityIndex < myLocalityLevels.length - 1) {
val moreTasks = myLocalityLevels(currentLocalityIndex) match {
case TaskLocality.PROCESS_LOCAL => moreTasksToRunIn(pendingTasks.forExecutor)
case TaskLocality.NODE_LOCAL => moreTasksToRunIn(pendingTasks.forHost)
case TaskLocality.NO_PREF => pendingTasks.noPrefs.nonEmpty
case TaskLocality.RACK_LOCAL => moreTasksToRunIn(pendingTasks.forRack)
}
if (!moreTasks) {
// This is a performance optimization: if there are no more tasks that can
// be scheduled at a particular locality level, there is no point in waiting
// for the locality wait timeout (SPARK-4939).
lastLaunchTime = curTime
logDebug(s"No tasks for locality level ${myLocalityLevels(currentLocalityIndex)}, " +
s"so moving to locality level ${myLocalityLevels(currentLocalityIndex + 1)}")
currentLocalityIndex += 1
} else if (curTime - lastLaunchTime >= localityWaits(currentLocalityIndex)) {
// Jump to the next locality level, and reset lastLaunchTime so that the next locality
// wait timer doesn't immediately expire
lastLaunchTime += localityWaits(currentLocalityIndex)
logDebug(s"Moving to ${myLocalityLevels(currentLocalityIndex + 1)} after waiting for " +
s"${localityWaits(currentLocalityIndex)}ms")
currentLocalityIndex += 1
} else {
return myLocalityLevels(currentLocalityIndex)
}
}
myLocalityLevels(currentLocalityIndex)
}
/**
* Find the index in myLocalityLevels for a given locality. This is also designed to work with
* localities that are not in myLocalityLevels (in case we somehow get those) by returning the
* next-biggest level we have. Uses the fact that the last value in myLocalityLevels is ANY.
*/
def getLocalityIndex(locality: TaskLocality.TaskLocality): Int = {
var index = 0
while (locality > myLocalityLevels(index)) {
index += 1
}
index
}
/**
* Check whether the given task set has been blacklisted to the point that it can't run anywhere.
*
* It is possible that this taskset has become impossible to schedule *anywhere* due to the
* blacklist. The most common scenario would be if there are fewer executors than
* spark.task.maxFailures. We need to detect this so we can avoid the job from being hung.
* We try to acquire new executor/s by killing an existing idle blacklisted executor.
*
* There's a tradeoff here: we could make sure all tasks in the task set are schedulable, but that
* would add extra time to each iteration of the scheduling loop. Here, we take the approach of
* making sure at least one of the unscheduled tasks is schedulable. This means we may not detect
* the hang as quickly as we could have, but we'll always detect the hang eventually, and the
* method is faster in the typical case. In the worst case, this method can take
* O(maxTaskFailures + numTasks) time, but it will be faster when there haven't been any task
* failures (this is because the method picks one unscheduled task, and then iterates through each
* executor until it finds one that the task isn't blacklisted on).
*/
private[scheduler] def getCompletelyBlacklistedTaskIfAny(
hostToExecutors: HashMap[String, HashSet[String]]): Option[Int] = {
taskSetBlacklistHelperOpt.flatMap { taskSetBlacklist =>
val appBlacklist = blacklistTracker.get
// Only look for unschedulable tasks when at least one executor has registered. Otherwise,
// task sets will be (unnecessarily) aborted in cases when no executors have registered yet.
if (hostToExecutors.nonEmpty) {
// find any task that needs to be scheduled
val pendingTask: Option[Int] = {
// usually this will just take the last pending task, but because of the lazy removal
// from each list, we may need to go deeper in the list. We poll from the end because
// failed tasks are put back at the end of allPendingTasks, so we're more likely to find
// an unschedulable task this way.
val indexOffset = pendingTasks.all.lastIndexWhere { indexInTaskSet =>
copiesRunning(indexInTaskSet) == 0 && !successful(indexInTaskSet)
}
if (indexOffset == -1) {
None
} else {
Some(pendingTasks.all(indexOffset))
}
}
pendingTask.find { indexInTaskSet =>
// try to find some executor this task can run on. Its possible that some *other*
// task isn't schedulable anywhere, but we will discover that in some later call,
// when that unschedulable task is the last task remaining.
hostToExecutors.forall { case (host, execsOnHost) =>
// Check if the task can run on the node
val nodeBlacklisted =
appBlacklist.isNodeBlacklisted(host) ||
taskSetBlacklist.isNodeBlacklistedForTaskSet(host) ||
taskSetBlacklist.isNodeBlacklistedForTask(host, indexInTaskSet)
if (nodeBlacklisted) {
true
} else {
// Check if the task can run on any of the executors
execsOnHost.forall { exec =>
appBlacklist.isExecutorBlacklisted(exec) ||
taskSetBlacklist.isExecutorBlacklistedForTaskSet(exec) ||
taskSetBlacklist.isExecutorBlacklistedForTask(exec, indexInTaskSet)
}
}
}
}
} else {
None
}
}
}
private[scheduler] def abortSinceCompletelyBlacklisted(indexInTaskSet: Int): Unit = {
taskSetBlacklistHelperOpt.foreach { taskSetBlacklist =>
val partition = tasks(indexInTaskSet).partitionId
abort(s"""
|Aborting $taskSet because task $indexInTaskSet (partition $partition)
|cannot run anywhere due to node and executor blacklist.
|Most recent failure:
|${taskSetBlacklist.getLatestFailureReason}
|
|Blacklisting behavior can be configured via spark.blacklist.*.
|""".stripMargin)
}
}
/**
* Marks the task as getting result and notifies the DAG Scheduler
*/
def handleTaskGettingResult(tid: Long): Unit = {
val info = taskInfos(tid)
info.markGettingResult(clock.getTimeMillis())
sched.dagScheduler.taskGettingResult(info)
}
/**
* Check whether has enough quota to fetch the result with `size` bytes
*/
def canFetchMoreResults(size: Long): Boolean = sched.synchronized {
totalResultSize += size
calculatedTasks += 1
if (maxResultSize > 0 && totalResultSize > maxResultSize) {
val msg = s"Total size of serialized results of ${calculatedTasks} tasks " +
s"(${Utils.bytesToString(totalResultSize)}) is bigger than ${config.MAX_RESULT_SIZE.key} " +
s"(${Utils.bytesToString(maxResultSize)})"
logError(msg)
abort(msg)
false
} else {
true
}
}
/**
* Marks a task as successful and notifies the DAGScheduler that the task has ended.
*/
def handleSuccessfulTask(tid: Long, result: DirectTaskResult[_]): Unit = {
val info = taskInfos(tid)
val index = info.index
// Check if any other attempt succeeded before this and this attempt has not been handled
if (successful(index) && killedByOtherAttempt.contains(tid)) {
// Undo the effect on calculatedTasks and totalResultSize made earlier when
// checking if can fetch more results
calculatedTasks -= 1
val resultSizeAcc = result.accumUpdates.find(a =>
a.name == Some(InternalAccumulator.RESULT_SIZE))
if (resultSizeAcc.isDefined) {
totalResultSize -= resultSizeAcc.get.asInstanceOf[LongAccumulator].value
}
// Handle this task as a killed task
handleFailedTask(tid, TaskState.KILLED,
TaskKilled("Finish but did not commit due to another attempt succeeded"))
return
}
info.markFinished(TaskState.FINISHED, clock.getTimeMillis())
if (speculationEnabled) {
successfulTaskDurations.insert(info.duration)
}
removeRunningTask(tid)
// Kill any other attempts for the same task (since those are unnecessary now that one
// attempt completed successfully).
for (attemptInfo <- taskAttempts(index) if attemptInfo.running) {
logInfo(s"Killing attempt ${attemptInfo.attemptNumber} for task ${attemptInfo.id} " +
s"in stage ${taskSet.id} (TID ${attemptInfo.taskId}) on ${attemptInfo.host} " +
s"as the attempt ${info.attemptNumber} succeeded on ${info.host}")
killedByOtherAttempt += attemptInfo.taskId
sched.backend.killTask(
attemptInfo.taskId,
attemptInfo.executorId,
interruptThread = true,
reason = "another attempt succeeded")
}
if (!successful(index)) {
tasksSuccessful += 1
logInfo(s"Finished task ${info.id} in stage ${taskSet.id} (TID ${info.taskId}) in" +
s" ${info.duration} ms on ${info.host} (executor ${info.executorId})" +
s" ($tasksSuccessful/$numTasks)")
// Mark successful and stop if all the tasks have succeeded.
successful(index) = true
if (tasksSuccessful == numTasks) {
isZombie = true
}
} else {
logInfo("Ignoring task-finished event for " + info.id + " in stage " + taskSet.id +
" because task " + index + " has already completed successfully")
}
// This method is called by "TaskSchedulerImpl.handleSuccessfulTask" which holds the
// "TaskSchedulerImpl" lock until exiting. To avoid the SPARK-7655 issue, we should not
// "deserialize" the value when holding a lock to avoid blocking other threads. So we call
// "result.value()" in "TaskResultGetter.enqueueSuccessfulTask" before reaching here.
// Note: "result.value()" only deserializes the value when it's called at the first time, so
// here "result.value()" just returns the value and won't block other threads.
sched.dagScheduler.taskEnded(tasks(index), Success, result.value(), result.accumUpdates,
result.metricPeaks, info)
maybeFinishTaskSet()
}
private[scheduler] def markPartitionCompleted(partitionId: Int): Unit = {
partitionToIndex.get(partitionId).foreach { index =>
if (!successful(index)) {
tasksSuccessful += 1
successful(index) = true
if (tasksSuccessful == numTasks) {
isZombie = true
}
maybeFinishTaskSet()
}
}
}
/**
* Marks the task as failed, re-adds it to the list of pending tasks, and notifies the
* DAG Scheduler.
*/
def handleFailedTask(tid: Long, state: TaskState, reason: TaskFailedReason) {
val info = taskInfos(tid)
if (info.failed || info.killed) {
return
}
removeRunningTask(tid)
info.markFinished(state, clock.getTimeMillis())
val index = info.index
copiesRunning(index) -= 1
var accumUpdates: Seq[AccumulatorV2[_, _]] = Seq.empty
var metricPeaks: Array[Long] = Array.empty
val failureReason = s"Lost task ${info.id} in stage ${taskSet.id} (TID $tid, ${info.host}," +
s" executor ${info.executorId}): ${reason.toErrorString}"
val failureException: Option[Throwable] = reason match {
case fetchFailed: FetchFailed =>
logWarning(failureReason)
if (!successful(index)) {
successful(index) = true
tasksSuccessful += 1
}
isZombie = true
if (fetchFailed.bmAddress != null) {
blacklistTracker.foreach(_.updateBlacklistForFetchFailure(
fetchFailed.bmAddress.host, fetchFailed.bmAddress.executorId))
}
None
case ef: ExceptionFailure =>
// ExceptionFailure's might have accumulator updates
accumUpdates = ef.accums
metricPeaks = ef.metricPeaks.toArray
if (ef.className == classOf[NotSerializableException].getName) {
// If the task result wasn't serializable, there's no point in trying to re-execute it.
logError("Task %s in stage %s (TID %d) had a not serializable result: %s; not retrying"
.format(info.id, taskSet.id, tid, ef.description))
abort("Task %s in stage %s (TID %d) had a not serializable result: %s".format(
info.id, taskSet.id, tid, ef.description))
return
}
val key = ef.description
val now = clock.getTimeMillis()
val (printFull, dupCount) = {
if (recentExceptions.contains(key)) {
val (dupCount, printTime) = recentExceptions(key)
if (now - printTime > EXCEPTION_PRINT_INTERVAL) {
recentExceptions(key) = (0, now)
(true, 0)
} else {
recentExceptions(key) = (dupCount + 1, printTime)
(false, dupCount + 1)
}
} else {
recentExceptions(key) = (0, now)
(true, 0)
}
}
if (printFull) {
logWarning(failureReason)
} else {
logInfo(
s"Lost task ${info.id} in stage ${taskSet.id} (TID $tid) on ${info.host}, executor" +
s" ${info.executorId}: ${ef.className} (${ef.description}) [duplicate $dupCount]")
}
ef.exception
case tk: TaskKilled =>
// TaskKilled might have accumulator updates
accumUpdates = tk.accums
metricPeaks = tk.metricPeaks.toArray
logWarning(failureReason)
None
case e: ExecutorLostFailure if !e.exitCausedByApp =>
logInfo(s"Task $tid failed because while it was being computed, its executor " +
"exited for a reason unrelated to the task. Not counting this failure towards the " +
"maximum number of failures for the task.")
None
case e: TaskFailedReason => // TaskResultLost and others
logWarning(failureReason)
None
}
if (tasks(index).isBarrier) {
isZombie = true
}
sched.dagScheduler.taskEnded(tasks(index), reason, null, accumUpdates, metricPeaks, info)
if (!isZombie && reason.countTowardsTaskFailures) {
assert (null != failureReason)
taskSetBlacklistHelperOpt.foreach(_.updateBlacklistForFailedTask(
info.host, info.executorId, index, failureReason))
numFailures(index) += 1
if (numFailures(index) >= maxTaskFailures) {
logError("Task %d in stage %s failed %d times; aborting job".format(
index, taskSet.id, maxTaskFailures))
abort("Task %d in stage %s failed %d times, most recent failure: %s\\nDriver stacktrace:"
.format(index, taskSet.id, maxTaskFailures, failureReason), failureException)
return
}
}
if (successful(index)) {
logInfo(s"Task ${info.id} in stage ${taskSet.id} (TID $tid) failed, but the task will not" +
s" be re-executed (either because the task failed with a shuffle data fetch failure," +
s" so the previous stage needs to be re-run, or because a different copy of the task" +
s" has already succeeded).")
} else {
addPendingTask(index)
}
maybeFinishTaskSet()
}
def abort(message: String, exception: Option[Throwable] = None): Unit = sched.synchronized {
// TODO: Kill running tasks if we were not terminated due to a Mesos error
sched.dagScheduler.taskSetFailed(taskSet, message, exception)
isZombie = true
maybeFinishTaskSet()
}
/** If the given task ID is not in the set of running tasks, adds it.
*
* Used to keep track of the number of running tasks, for enforcing scheduling policies.
*/
def addRunningTask(tid: Long) {
if (runningTasksSet.add(tid) && parent != null) {
parent.increaseRunningTasks(1)
}
}
/** If the given task ID is in the set of running tasks, removes it. */
def removeRunningTask(tid: Long) {
if (runningTasksSet.remove(tid) && parent != null) {
parent.decreaseRunningTasks(1)
}
}
override def getSchedulableByName(name: String): Schedulable = {
null
}
override def addSchedulable(schedulable: Schedulable) {}
override def removeSchedulable(schedulable: Schedulable) {}
override def getSortedTaskSetQueue(): ArrayBuffer[TaskSetManager] = {
val sortedTaskSetQueue = new ArrayBuffer[TaskSetManager]()
sortedTaskSetQueue += this
sortedTaskSetQueue
}
/** Called by TaskScheduler when an executor is lost so we can re-enqueue our tasks */
override def executorLost(execId: String, host: String, reason: ExecutorLossReason) {
// Re-enqueue any tasks that ran on the failed executor if this is a shuffle map stage,
// and we are not using an external shuffle server which could serve the shuffle outputs.
// The reason is the next stage wouldn't be able to fetch the data from this dead executor
// so we would need to rerun these tasks on other executors.
if (tasks(0).isInstanceOf[ShuffleMapTask] && !env.blockManager.externalShuffleServiceEnabled
&& !isZombie) {
for ((tid, info) <- taskInfos if info.executorId == execId) {
val index = taskInfos(tid).index
if (successful(index) && !killedByOtherAttempt.contains(tid)) {
successful(index) = false
copiesRunning(index) -= 1
tasksSuccessful -= 1
addPendingTask(index)
// Tell the DAGScheduler that this task was resubmitted so that it doesn't think our
// stage finishes when a total of tasks.size tasks finish.
sched.dagScheduler.taskEnded(
tasks(index), Resubmitted, null, Seq.empty, Array.empty, info)
}
}
}
for ((tid, info) <- taskInfos if info.running && info.executorId == execId) {
val exitCausedByApp: Boolean = reason match {
case exited: ExecutorExited => exited.exitCausedByApp
case ExecutorKilled => false
case _ => true
}
handleFailedTask(tid, TaskState.FAILED, ExecutorLostFailure(info.executorId, exitCausedByApp,
Some(reason.toString)))
}
// recalculate valid locality levels and waits when executor is lost
recomputeLocality()
}
/**
* Check for tasks to be speculated and return true if there are any. This is called periodically
* by the TaskScheduler.
*
*/
override def checkSpeculatableTasks(minTimeToSpeculation: Int): Boolean = {
// Can't speculate if we only have one task, and no need to speculate if the task set is a
// zombie or is from a barrier stage.
if (isZombie || isBarrier || numTasks == 1) {
return false
}
var foundTasks = false
logDebug("Checking for speculative tasks: minFinished = " + minFinishedForSpeculation)
// It's possible that a task is marked as completed by the scheduler, then the size of
// `successfulTaskDurations` may not equal to `tasksSuccessful`. Here we should only count the
// tasks that are submitted by this `TaskSetManager` and are completed successfully.
val numSuccessfulTasks = successfulTaskDurations.size()
if (numSuccessfulTasks >= minFinishedForSpeculation) {
val time = clock.getTimeMillis()
val medianDuration = successfulTaskDurations.median
val threshold = max(speculationMultiplier * medianDuration, minTimeToSpeculation)
// TODO: Threshold should also look at standard deviation of task durations and have a lower
// bound based on that.
logDebug("Task length threshold for speculation: " + threshold)
for (tid <- runningTasksSet) {
val info = taskInfos(tid)
val index = info.index
if (!successful(index) && copiesRunning(index) == 1 && info.timeRunning(time) > threshold &&
!speculatableTasks.contains(index)) {
addPendingTask(index, speculatable = true)
logInfo(
("Marking task %d in stage %s (on %s) as speculatable because it ran more" +
" than %.0f ms(%d speculatable tasks in this taskset now)")
.format(index, taskSet.id, info.host, threshold, speculatableTasks.size + 1))
speculatableTasks += index
sched.dagScheduler.speculativeTaskSubmitted(tasks(index))
foundTasks = true
}
}
}
foundTasks
}
private def getLocalityWait(level: TaskLocality.TaskLocality): Long = {
val localityWait = level match {
case TaskLocality.PROCESS_LOCAL => config.LOCALITY_WAIT_PROCESS
case TaskLocality.NODE_LOCAL => config.LOCALITY_WAIT_NODE
case TaskLocality.RACK_LOCAL => config.LOCALITY_WAIT_RACK
case _ => null
}
if (localityWait != null) {
conf.get(localityWait)
} else {
0L
}
}
/**
* Compute the locality levels used in this TaskSet. Assumes that all tasks have already been
* added to queues using addPendingTask.
*
*/
private def computeValidLocalityLevels(): Array[TaskLocality.TaskLocality] = {
import TaskLocality.{PROCESS_LOCAL, NODE_LOCAL, NO_PREF, RACK_LOCAL, ANY}
val levels = new ArrayBuffer[TaskLocality.TaskLocality]
if (!pendingTasks.forExecutor.isEmpty &&
pendingTasks.forExecutor.keySet.exists(sched.isExecutorAlive(_))) {
levels += PROCESS_LOCAL
}
if (!pendingTasks.forHost.isEmpty &&
pendingTasks.forHost.keySet.exists(sched.hasExecutorsAliveOnHost(_))) {
levels += NODE_LOCAL
}
if (!pendingTasks.noPrefs.isEmpty) {
levels += NO_PREF
}
if (!pendingTasks.forRack.isEmpty &&
pendingTasks.forRack.keySet.exists(sched.hasHostAliveOnRack(_))) {
levels += RACK_LOCAL
}
levels += ANY
logDebug("Valid locality levels for " + taskSet + ": " + levels.mkString(", "))
levels.toArray
}
def recomputeLocality() {
val previousLocalityLevel = myLocalityLevels(currentLocalityIndex)
myLocalityLevels = computeValidLocalityLevels()
localityWaits = myLocalityLevels.map(getLocalityWait)
currentLocalityIndex = getLocalityIndex(previousLocalityLevel)
}
def executorAdded() {
recomputeLocality()
}
}
private[spark] object TaskSetManager {
// The user will be warned if any stages contain a task that has a serialized size greater than
// this.
val TASK_SIZE_TO_WARN_KIB = 1000
}
/**
* Set of pending tasks for various levels of locality: executor, host, rack,
* noPrefs and anyPrefs. These collections are actually
* treated as stacks, in which new tasks are added to the end of the
* ArrayBuffer and removed from the end. This makes it faster to detect
* tasks that repeatedly fail because whenever a task failed, it is put
* back at the head of the stack. These collections may contain duplicates
* for two reasons:
* (1): Tasks are only removed lazily; when a task is launched, it remains
* in all the pending lists except the one that it was launched from.
* (2): Tasks may be re-added to these lists multiple times as a result
* of failures.
* Duplicates are handled in dequeueTaskFromList, which ensures that a
* task hasn't already started running before launching it.
*/
private[scheduler] class PendingTasksByLocality {
// Set of pending tasks for each executor.
val forExecutor = new HashMap[String, ArrayBuffer[Int]]
// Set of pending tasks for each host. Similar to pendingTasksForExecutor, but at host level.
val forHost = new HashMap[String, ArrayBuffer[Int]]
// Set containing pending tasks with no locality preferences.
val noPrefs = new ArrayBuffer[Int]
// Set of pending tasks for each rack -- similar to the above.
val forRack = new HashMap[String, ArrayBuffer[Int]]
// Set containing all pending tasks (also used as a stack, as above).
val all = new ArrayBuffer[Int]
}
| pgandhi999/spark | core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala | Scala | apache-2.0 | 45,668 |
/*
* Copyright 2017 Abcelo, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.abcelo.util.aws.lambda
import scala.collection.mutable.MutableList
import com.amazonaws.services.lambda.runtime.LambdaLogger
class TestLambdaLogger extends LambdaLogger {
private val logs = new MutableList[String]()
def log(string: String): Unit = {
logs += string
}
def getLogs = logs.toList
}
| abcelo/abcelo-util-aws-lambda | src/test/scala/com/abcelo/util/aws/lambda/TestLambdaLogger.scala | Scala | apache-2.0 | 930 |
package mot.monitoring
trait SimpleCommandHandler extends CommandHandler {
def handle(processedCommands: Seq[String], commands: Seq[String], partWriter: String => Unit) =
simpleHandle(processedCommands, commands)
def simpleHandle(processedCommands: Seq[String], commands: Seq[String]): String
} | marianobarrios/mot | src/main/scala/mot/monitoring/SimpleCommandHandler.scala | Scala | bsd-2-clause | 312 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt
package plugins
import sbt.Def.Setting
import sbt.Keys._
object SbtPlugin extends AutoPlugin {
override def requires = ScriptedPlugin
override lazy val projectSettings: Seq[Setting[_]] = Seq(
sbtPlugin := true
)
}
| xuwei-k/xsbt | main/src/main/scala/sbt/plugins/SbtPlugin.scala | Scala | apache-2.0 | 385 |
package filodb.prometheus.ast
import scala.util.Try
import filodb.core.{query, GlobalConfig}
import filodb.core.query.{ColumnFilter, RangeParams}
import filodb.query._
object Vectors {
val PromMetricLabel = "__name__"
val TypeLabel = "_type_"
val BucketFilterLabel = "_bucket_"
}
object WindowConstants {
val conf = GlobalConfig.systemConfig
val staleDataLookbackMillis = conf.getConfig("filodb.query").
getDuration("stale-sample-after").toMillis
}
trait Vectors extends Scalars with TimeUnits with Base {
import Vectors._
sealed trait JoinMatching {
def labels: Seq[String]
}
case class Ignoring(labels: Seq[String]) extends JoinMatching
case class On(labels: Seq[String]) extends JoinMatching
sealed trait JoinGrouping {
def labels: Seq[String]
}
case class GroupLeft(labels: Seq[String]) extends JoinGrouping
case class GroupRight(labels: Seq[String]) extends JoinGrouping
sealed trait Cardinal {
def cardinality: Cardinality
}
case object OneToOne extends Cardinal {
def cardinality: Cardinality = Cardinality.OneToOne
}
case object OneToMany extends Cardinal {
def cardinality: Cardinality = Cardinality.OneToMany
}
case object ManyToOne extends Cardinal {
def cardinality: Cardinality = Cardinality.ManyToOne
}
case object ManyToMany extends Cardinal {
def cardinality: Cardinality = Cardinality.ManyToMany
}
case class VectorMatch(matching: Option[JoinMatching],
grouping: Option[JoinGrouping]) {
lazy val cardinality: Cardinal = grouping match {
case Some(GroupLeft(_)) => ManyToOne
case Some(GroupRight(_)) => OneToMany
case None => OneToOne
}
def notEmpty: Boolean = matching.isDefined || grouping.isDefined
def validate(operator: Operator, lhs: Expression, rhs: Expression): Unit = {
if (notEmpty && (lhs.isInstanceOf[Scalar] || rhs.isInstanceOf[Scalar])) {
throw new IllegalArgumentException("vector matching only allowed between instant vectors")
}
if (grouping.isDefined && operator.isInstanceOf[SetOp]) {
throw new IllegalArgumentException("no grouping allowed for and, or, unless operations")
}
validateGroupAndMatch()
}
private def validateGroupAndMatch(): Unit = if (grouping.isDefined && matching.isDefined) {
val group = grouping.get
val matcher = matching.get
val matchLabels = matcher.labels
val groupLabels = group.labels
groupLabels.foreach { label =>
if (matchLabels.contains(label) && matcher.isInstanceOf[On]) {
throw new IllegalArgumentException("Labels must not occur in ON and GROUP clause at once")
}
}
}
}
sealed trait Vector extends Expression {
def metricName: Option[String]
def labelSelection: Seq[LabelMatch]
val regexColumnName: String = "::(?=[^::]+$)" //regex pattern to extract ::columnName at the end
// Convert metricName{labels} -> {labels, __name__="metricName"} so it's uniform
lazy val mergeNameToLabels: Seq[LabelMatch] = {
val nameLabel = labelSelection.find(_.label == PromMetricLabel)
if (nameLabel.isEmpty && metricName.isEmpty)
throw new IllegalArgumentException("Metric name is not present")
if (metricName.nonEmpty) {
if (nameLabel.nonEmpty) throw new IllegalArgumentException("Metric name should not be set twice")
// metric name specified but no __name__ label. Add it
labelSelection :+ LabelMatch(PromMetricLabel, EqualMatch, metricName.get)
} else {
labelSelection
}
}
def realMetricName: String = mergeNameToLabels.find(_.label == PromMetricLabel).get.value
// Returns (trimmedMetricName, column) after stripping ::columnName
private def extractStripColumn(metricName: String): (String, Option[String]) = {
val parts = metricName.split(regexColumnName)
if (parts.size > 1) {
require(parts(1).nonEmpty, "cannot use empty column name")
(parts(0), Some(parts(1)))
} else (metricName, None)
}
private def parseBucketValue(value: String): Option[Double] =
if (value.toLowerCase == "+inf") Some(Double.PositiveInfinity) else Try(value.toDouble).toOption
/**
* Converts LabelMatches to ColumnFilters. Along the way:
* - extracts ::col column name expressions in metric names to columns
* - removes ::col in __name__ label matches as needed
* Also extracts special _bucket_ histogram bucket filter
*/
protected def labelMatchesToFilters(labels: Seq[LabelMatch]) = {
var column: Option[String] = None
var bucketOpt: Option[Double] = None
val filters = labels.filter { labelMatch =>
if (labelMatch.label == BucketFilterLabel) {
bucketOpt = parseBucketValue(labelMatch.value)
false
} else true
}.map { labelMatch =>
val labelVal = labelMatch.value
val labelValue = if (labelMatch.label == PromMetricLabel) {
val (newValue, colNameOpt) = extractStripColumn(labelVal)
colNameOpt.foreach { col => column = Some(col) }
newValue
} else { labelVal }
labelMatch.labelMatchOp match {
case EqualMatch => ColumnFilter(labelMatch.label, query.Filter.Equals(labelValue))
case NotRegexMatch => ColumnFilter(labelMatch.label, query.Filter.NotEqualsRegex(labelValue))
case RegexMatch => ColumnFilter(labelMatch.label, query.Filter.EqualsRegex(labelValue))
case NotEqual(false) => ColumnFilter(labelMatch.label, query.Filter.NotEquals(labelValue))
case other: Any => throw new IllegalArgumentException(s"Unknown match operator $other")
}
}
(filters, column, bucketOpt)
}
}
/**
* Instant vector selectors allow the selection of a set of time series
* and a single sample value for each at a given timestamp (instant):
* in the simplest form, only a metric name is specified.
* This results in an instant vector containing elements
* for all time series that have this metric name.
* It is possible to filter these time series further by
* appending a set of labels to match in curly braces ({}).
*/
case class InstantExpression(metricName: Option[String],
labelSelection: Seq[LabelMatch],
offset: Option[Duration]) extends Vector with PeriodicSeries {
import WindowConstants._
private[prometheus] val (columnFilters, column, bucketOpt) = labelMatchesToFilters(mergeNameToLabels)
def toSeriesPlan(timeParams: TimeRangeParams): PeriodicSeriesPlan = {
// we start from 5 minutes earlier that provided start time in order to include last sample for the
// start timestamp. Prometheus goes back up to 5 minutes to get sample before declaring as stale
val ps = PeriodicSeries(
RawSeries(timeParamToSelector(timeParams), columnFilters, column.toSeq, Some(staleDataLookbackMillis),
offset.map(_.millis(timeParams.step * 1000))),
timeParams.start * 1000, timeParams.step * 1000, timeParams.end * 1000,
offset.map(_.millis(timeParams.step * 1000))
)
bucketOpt.map { bOpt =>
// It's a fixed value, the range params don't matter at all
val param = ScalarFixedDoublePlan(bOpt, RangeParams(0, Long.MaxValue, 60000L))
ApplyInstantFunction(ps, InstantFunctionId.HistogramBucket, Seq(param))
}.getOrElse(ps)
}
def toMetadataPlan(timeParams: TimeRangeParams, fetchFirstLastSampleTimes: Boolean): SeriesKeysByFilters = {
SeriesKeysByFilters(columnFilters, fetchFirstLastSampleTimes, timeParams.start * 1000, timeParams.end * 1000)
}
def toRawSeriesPlan(timeParams: TimeRangeParams, offsetMs: Option[Long] = None): RawSeries = {
RawSeries(timeParamToSelector(timeParams), columnFilters, column.toSeq, Some(staleDataLookbackMillis),
offsetMs)
}
}
/**
* Range vector literals work like instant vector literals,
* except that they select a range of samples back from the current instant.
* Syntactically, a range duration is appended in square brackets ([])
* at the end of a vector selector to specify how far back in time values
* should be fetched for each resulting range vector element.
*/
case class RangeExpression(metricName: Option[String],
labelSelection: Seq[LabelMatch],
window: Duration,
offset: Option[Duration]) extends Vector with SimpleSeries {
private[prometheus] val (columnFilters, column, bucketOpt) = labelMatchesToFilters(mergeNameToLabels)
def toSeriesPlan(timeParams: TimeRangeParams, isRoot: Boolean): RawSeriesLikePlan = {
if (isRoot && timeParams.start != timeParams.end) {
throw new UnsupportedOperationException("Range expression is not allowed in query_range")
}
// multiply by 1000 to convert unix timestamp in seconds to millis
val rs = RawSeries(timeParamToSelector(timeParams), columnFilters, column.toSeq,
Some(window.millis(timeParams.step * 1000)),
offset.map(_.millis(timeParams.step * 1000)))
bucketOpt.map { bOpt =>
// It's a fixed value, the range params don't matter at all
val param = ScalarFixedDoublePlan(bOpt, RangeParams(0, Long.MaxValue, 60000L))
ApplyInstantFunctionRaw(rs, InstantFunctionId.HistogramBucket, Seq(param))
}.getOrElse(rs)
}
}
}
| tuplejump/FiloDB | prometheus/src/main/scala/filodb/prometheus/ast/Vectors.scala | Scala | apache-2.0 | 9,604 |
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
package index
import _root_.org.jetbrains.plugins.scala.lang.psi.impl.search.ScSourceFilterScope
import com.intellij.openapi.project.Project
import com.intellij.psi.PsiClass
import com.intellij.psi.search.GlobalSearchScope
import com.intellij.psi.stubs.IntStubIndexExtension
/**
* @author ilyas
*/
class ScFullClassNameIndex extends IntStubIndexExtension[PsiClass] {
override def get(int: java.lang.Integer, project: Project, scope: GlobalSearchScope): java.util.Collection[PsiClass] =
super.get(int, project, new ScSourceFilterScope(scope, project))
def getKey = ScFullClassNameIndex.KEY
}
object ScFullClassNameIndex {
val KEY = ScalaIndexKeys.FQN_KEY;
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/stubs/index/ScFullClassNameIndex.scala | Scala | apache-2.0 | 748 |
package utils
import Spark.SparkManager.sparkContext
import schemas.Cassandra._
import com.datastax.spark.connector._
import org.apache.spark.rdd.RDD
import scala.io.Source
/**
* Created by raphael on 22/06/16.
*/
object CassandraLoader {
//val basePath = "/home/ubuntu/DataRio/"
val basePath = "/home/raphael/Documents/Projetos/GitProj/DataRio/"
val encoding = "ISO-8859-1"
def escolas(): Unit ={
val fileName = "escolas__.csv"
val text = sparkContext.parallelize(Source.fromFile(basePath + fileName, encoding).getLines().toSeq)
val lista = text.map(line => line.split(","))
.filter(x => x.length > 18)
.filter(x =>{
try{
x.head.toInt
true
}
catch{
case e: Exception => false
}
})
.map(x => x:+ "")
.map(x => new Escola(x(0), x(1), x(2), x(3), x(4), x(5), x(6), x(7), x(8),
x(9), x(10), x(11), x(12), x(13), x(14), x(15),x(16), x(17), x(18), x(19)))
val teste = lista.collect()
lista.saveToCassandra("spark","escolas")
}
def professores2(): Unit ={
val fileName = "ProfessoresEscola.csv"
val text = sparkContext.parallelize(Source.fromFile(basePath + fileName, encoding).getLines().toSeq)
val lista = text.map(line => line.split(","))
.filter(x => x.length > 1)
.filter(x => x(0) != "Nome")
.map(x => {
if(x(1).length == 6)
x(1) = "0"+x(1)
x
})
.map(x => new Professores2(x(1), x(3).toInt, x(4).toInt, x(5).toInt, x(6).toInt, x(7).toInt,
x(8).toInt, x(9).toInt, x(10).toInt, x(11).toInt, x(12).toInt, x(13).toInt, x(14).toInt,
x(15).toInt, x(16).toInt, x(17).toInt, x(18).toInt, x(19).toInt, x(20).toInt, x(21).toInt, x(22).toInt))
lista.saveToCassandra("spark","professores2")
}
def professores(): Unit={
val fileName = "ProfessoresEscola.csv"
val text = sparkContext.parallelize(Source.fromFile(basePath + fileName, encoding).getLines().toSeq)
val lista: RDD[Professores]= text.map(line => line.split(","))
.filter(x => x.length > 1)
.filter(x => x(0) != "Nome")
.map(x => {
if(x(1).length == 6)
x(1) = "0"+x(1)
x
})
.flatMap(x => Array[Professores](
new Professores(x(1), 15, materiasMap(15), x(3).toInt),
new Professores(x(1), 16, materiasMap(16), x(4).toInt),
new Professores(x(1), 18, materiasMap(18), x(5).toInt),
new Professores(x(1), 1, materiasMap(1), x(6).toInt),
new Professores(x(1), 10, materiasMap(10), x(7).toInt),
new Professores(x(1), 8, materiasMap(8), x(8).toInt),
new Professores(x(1), 17, materiasMap(17), x(9).toInt),
new Professores(x(1), 11, materiasMap(11), x(10).toInt),
new Professores(x(1), 12, materiasMap(12), x(11).toInt),
new Professores(x(1), 2, materiasMap(2), x(12).toInt),
new Professores(x(1), 3, materiasMap(3), x(13).toInt),
new Professores(x(1), 13, materiasMap(13), x(14).toInt),
new Professores(x(1), 5, materiasMap(5), x(15).toInt),
new Professores(x(1), 4, materiasMap(4), x(16).toInt),
new Professores(x(1), 14, materiasMap(14), x(17).toInt),
new Professores(x(1), 19, materiasMap(19), x(18).toInt),
new Professores(x(1), 20, materiasMap(20), x(19).toInt),
new Professores(x(1), 23, materiasMap(23), x(20).toInt),
new Professores(x(1), 24, materiasMap(24), x(21).toInt),
new Professores(x(1), 25, materiasMap(25), x(22).toInt)
))
lista.saveToCassandra("spark","professores")
}
def frequencia_e_aprovacao(): Unit ={
val path = basePath + "Frequencia/"
val listaId = sparkContext.cassandraTable[Escola]("spark","escolas").map(x => x.designacao)
listaId.foreach(id =>{
val text = sparkContext.parallelize(Source.fromFile(path + id + ".csv", encoding).getLines().toSeq)
val lista =text.map(x => x.split(","))
.filter(x => x.length > 1)
.filter(x => x(0) != "Ano Letivo")
.filter(x => x(3) != "Total")
.map(x =>{
if(x(2) == "")
x(2) = "0"
if(x(4) == "")
x(4) = "0"
if(x(5) == "")
x(5) = "0"
if(x(6) == "")
x(6) = "0"
x
})
.map(x => new Frequencia_e_Aprovacao(id, x(0), x(2).toInt, x(3), x(4).toInt, x(5).toInt, x(6).toInt))
lista.saveToCassandra("spark","frequencia_e_aprovacao")
})
}
def media(): Unit ={
val path = basePath + "Media/"
val listaId = sparkContext.cassandraTable[Escola]("spark","escolas").map(x => x.designacao)
listaId.foreach(id => {
val text = sparkContext.parallelize(Source.fromFile(path + id + ".csv", encoding).getLines().toSeq)
val lista = text.map(x => x.split(","))
.filter(x => x.length == 11)
.filter(x => x(0) != "Ano Letivo")
.map(x => new Media(id, x(0), x(3).toInt, x(4).toInt, x(5).toInt, x(7).toInt, x(6), x(8), x(9), x(10).toDouble))
lista.saveToCassandra("spark","media")
})
}
}
| raphael-bos/Ufrj-Spark | Spark/src/main/scala-2.10/utils/CassandraLoader.scala | Scala | gpl-3.0 | 5,104 |
/*
* Copyright (C) 2020 MapRoulette contributors (see CONTRIBUTORS.md).
* Licensed under the Apache License, Version 2.0 (see LICENSE).
*/
package org.maproulette.framework.model
import org.apache.commons.lang3.StringUtils
import org.joda.time.DateTime
import org.maproulette.data.{ItemType, TaskType}
import org.maproulette.framework.model.{Challenge, Identifiable, MapillaryImage}
import org.maproulette.framework.psql.CommonField
import org.maproulette.utils.Utils
import play.api.data.format.Formats
import play.api.libs.json._
import play.api.libs.json.JodaWrites._
import play.api.libs.json.JodaReads._
import org.maproulette.models.BaseObject
case class TaskReviewFields(
reviewStatus: Option[Int] = None,
reviewRequestedBy: Option[Long] = None,
reviewedBy: Option[Long] = None,
reviewedAt: Option[DateTime] = None,
metaReviewedBy: Option[Long] = None,
metaReviewStatus: Option[Int] = None,
metaReviewedAt: Option[DateTime] = None,
reviewStartedAt: Option[DateTime] = None,
reviewClaimedBy: Option[Long] = None,
reviewClaimedAt: Option[DateTime] = None,
additionalReviewers: Option[List[Long]] = None
) extends DefaultWrites
/**
* The primary object in MapRoulette is the task, this is the object that defines the actual problem
* in the OSM data that needs to be fixed. It is a child of a Challenge and has a special one to
* many relationship with tags. It contains the following parameters:
*
* id - A database assigned id for the Task
* name - The name of the task
* identifier - TODO: remove
* parent - The id of the challenge of the task
* instruction - A detailed instruction on how to fix this particular task
* location - The direct location of the task
* geometries - The list of geometries associated with the task
* status - Status of the Task "Created, Fixed, False_Positive, Skipped, Deleted"
*
* TODO: Because the geometries is contained in a separate table, if requesting a large number of
* tasks all at once it could cause performance issues.
*
* @author cuthbertm
*/
case class Task(
override val id: Long,
override val name: String,
override val created: DateTime,
override val modified: DateTime,
parent: Long,
instruction: Option[String] = None,
location: Option[String] = None,
geometries: String,
cooperativeWork: Option[String] = None,
status: Option[Int] = None,
mappedOn: Option[DateTime] = None,
completedTimeSpent: Option[Long] = None,
completedBy: Option[Long] = None,
review: TaskReviewFields = TaskReviewFields(),
priority: Int = Challenge.PRIORITY_HIGH,
changesetId: Option[Long] = None,
completionResponses: Option[String] = None,
bundleId: Option[Long] = None,
isBundlePrimary: Option[Boolean] = None,
mapillaryImages: Option[List[MapillaryImage]] = None
) extends BaseObject[Long]
with DefaultReads
with LowPriorityDefaultReads
with Identifiable {
override val itemType: ItemType = TaskType()
/**
* Gets the task priority
*
* @param parent The parent Challenge
* @return Priority HIGH = 0, MEDIUM = 1, LOW = 2
*/
def getTaskPriority(parent: Challenge): Int = {
val matchingList = getGeometryProperties().flatMap { props =>
if (parent.isHighPriority(props, this)) {
Some(Challenge.PRIORITY_HIGH)
} else if (parent.isMediumPriority(props, this)) {
Some(Challenge.PRIORITY_MEDIUM)
} else if (parent.isLowRulePriority(props, this)) {
Some(Challenge.PRIORITY_LOW)
} else {
None
}
}
if (matchingList.isEmpty) {
parent.priority.defaultPriority
} else if (matchingList.contains(Challenge.PRIORITY_HIGH)) {
Challenge.PRIORITY_HIGH
} else if (matchingList.contains(Challenge.PRIORITY_MEDIUM)) {
Challenge.PRIORITY_MEDIUM
} else {
Challenge.PRIORITY_LOW
}
}
def getGeometryProperties(): List[Map[String, String]] = {
if (StringUtils.isNotEmpty(this.geometries)) {
val geojson = Json.parse(this.geometries)
(geojson \\ "features")
.as[List[JsValue]]
.map(json => Utils.getProperties(json, "properties").as[Map[String, String]])
} else {
List.empty
}
}
}
object Task extends CommonField {
// TASK FIELDS
val TABLE = "tasks"
val FIELD_LOCATION = "location"
val FIELD_STATUS = "status"
val FIELD_PRIORITY = "priority"
val FIELD_BUNDLE_ID = "bundle_id"
val FIELD_BUNDLE_PRIMARY = "is_bundle_primary"
implicit object TaskFormat extends Format[Task] {
override def writes(o: Task): JsValue = {
implicit val mapillaryWrites: Writes[MapillaryImage] = Json.writes[MapillaryImage]
implicit val reviewWrites: Writes[TaskReviewFields] = Json.writes[TaskReviewFields]
implicit val taskWrites: Writes[Task] = Json.writes[Task]
var original = Json.toJson(o)(Json.writes[Task])
var updatedLocation = o.location match {
case Some(l) => Utils.insertIntoJson(original, "location", Json.parse(l), true)
case None => original
}
original = Utils.insertIntoJson(updatedLocation, "geometries", Json.parse(o.geometries), true)
var updated = o.cooperativeWork match {
case Some(cw) => Utils.insertIntoJson(original, "cooperativeWork", Json.parse(cw), true)
case None => original
}
// Move review fields up to top level
updated = o.review.reviewStatus match {
case Some(r) => {
Utils.insertIntoJson(updated, "reviewStatus", r, true)
}
case None => updated
}
updated = o.review.reviewRequestedBy match {
case Some(r) => Utils.insertIntoJson(updated, "reviewRequestedBy", r, true)
case None => updated
}
updated = o.review.reviewedBy match {
case Some(r) => Utils.insertIntoJson(updated, "reviewedBy", r, true)
case None => updated
}
updated = o.review.reviewedAt match {
case Some(r) => Utils.insertIntoJson(updated, "reviewedAt", r, true)
case None => updated
}
updated = o.review.metaReviewStatus match {
case Some(r) => {
Utils.insertIntoJson(updated, "metaReviewStatus", r, true)
}
case None => updated
}
updated = o.review.metaReviewedBy match {
case Some(r) => Utils.insertIntoJson(updated, "metaReviewedBy", r, true)
case None => updated
}
updated = o.review.metaReviewedAt match {
case Some(r) => Utils.insertIntoJson(updated, "metaReviewedAt", r, true)
case None => updated
}
updated = o.review.reviewStartedAt match {
case Some(r) => Utils.insertIntoJson(updated, "reviewStartedAt", r, true)
case None => updated
}
updated = o.review.reviewClaimedBy match {
case Some(r) => Utils.insertIntoJson(updated, "reviewClaimedBy", r, true)
case None => updated
}
updated = o.review.additionalReviewers match {
case Some(r) => Utils.insertIntoJson(updated, "additionalReviewers", r, true)
case None => updated
}
Utils.insertIntoJson(updated, "geometries", Json.parse(o.geometries), true)
}
override def reads(json: JsValue): JsResult[Task] = {
implicit val mapillaryReads: Reads[MapillaryImage] = Json.reads[MapillaryImage]
implicit val reviewReads: Reads[TaskReviewFields] = Json.reads[TaskReviewFields]
val jsonWithReview = Utils.insertIntoJson(json, "review", Map[String, String](), false)
Json.fromJson[Task](jsonWithReview)(Json.reads[Task])
}
}
implicit val doubleFormatter = Formats.doubleFormat
val STATUS_CREATED = 0
val STATUS_CREATED_NAME = "Created"
val STATUS_FIXED = 1
val STATUS_FIXED_NAME = "Fixed"
val STATUS_FALSE_POSITIVE = 2
val STATUS_FALSE_POSITIVE_NAME = "Not_An_Issue"
val STATUS_SKIPPED = 3
val STATUS_SKIPPED_NAME = "Skipped"
val STATUS_DELETED = 4
val STATUS_DELETED_NAME = "Deleted"
val STATUS_ALREADY_FIXED = 5
val STATUS_ALREADY_FIXED_NAME = "Already_Fixed"
val STATUS_TOO_HARD = 6
val STATUS_TOO_HARD_NAME = "Too_Hard"
val STATUS_ANSWERED = 7
val STATUS_ANSWERED_NAME = "Answered"
val STATUS_VALIDATED = 8
val STATUS_VALIDATED_NAME = "Validated"
val STATUS_DISABLED = 9
val STATUS_DISABLED_NAME = "Disabled"
val statusMap = Map(
STATUS_CREATED -> STATUS_CREATED_NAME,
STATUS_FIXED -> STATUS_FIXED_NAME,
STATUS_SKIPPED -> STATUS_SKIPPED_NAME,
STATUS_FALSE_POSITIVE -> STATUS_FALSE_POSITIVE_NAME,
STATUS_DELETED -> STATUS_DELETED_NAME,
STATUS_ALREADY_FIXED -> STATUS_ALREADY_FIXED_NAME,
STATUS_TOO_HARD -> STATUS_TOO_HARD_NAME,
STATUS_ANSWERED -> STATUS_ANSWERED_NAME,
STATUS_VALIDATED -> STATUS_VALIDATED_NAME,
STATUS_DISABLED -> STATUS_DISABLED_NAME
)
val REVIEW_STATUS_REQUESTED = 0
val REVIEW_STATUS_REQUESTED_NAME = "Requested"
val REVIEW_STATUS_APPROVED = 1
val REVIEW_STATUS_APPROVED_NAME = "Approved"
val REVIEW_STATUS_REJECTED = 2
val REVIEW_STATUS_REJECTED_NAME = "Rejected"
val REVIEW_STATUS_ASSISTED = 3
val REVIEW_STATUS_ASSISTED_NAME = "Assisted"
val REVIEW_STATUS_DISPUTED = 4
val REVIEW_STATUS_DISPUTED_NAME = "Disputed"
val REVIEW_STATUS_UNNECESSARY = 5
val REVIEW_STATUS_UNNECESSARY_NAME = "Unnecessary"
// For display purposes
val REVIEW_STATUS_NOT_REQUESTED = -1
val REVIEW_STATUS_NOT_REQUESTED_NAME = ""
// For Meta Reviews
val META_REVIEW_STATUS_NOT_SET = -2 //Review status is set but meta-review is not
val reviewStatusMap = Map(
REVIEW_STATUS_NOT_REQUESTED -> REVIEW_STATUS_NOT_REQUESTED_NAME,
REVIEW_STATUS_REQUESTED -> REVIEW_STATUS_REQUESTED_NAME,
REVIEW_STATUS_APPROVED -> REVIEW_STATUS_APPROVED_NAME,
REVIEW_STATUS_REJECTED -> REVIEW_STATUS_REJECTED_NAME,
REVIEW_STATUS_ASSISTED -> REVIEW_STATUS_ASSISTED_NAME,
REVIEW_STATUS_DISPUTED -> REVIEW_STATUS_DISPUTED_NAME,
REVIEW_STATUS_UNNECESSARY -> REVIEW_STATUS_UNNECESSARY_NAME
)
/**
* Based on the status id, will return a boolean stating whether it is a valid id or not
*
* @param status The id to check for validity
* @return true if status id is valid
*/
def isValidStatus(status: Int): Boolean = statusMap.contains(status)
/**
* A Task must have a valid progression between status. The following rules apply:
* If current status is created, then can be set to any of the other status's.
* If current status is fixed, then the status cannot be changed.
* If current status is false_positive, then it can only be changed to fixed (This is the case where it was accidentally set to false positive.
* If current status is skipped, then it can set the status to fixed, false_positive or deleted
* If current statis is deleted, then it can set the status to created. Essentially resetting the task
*
* The exception is if we are allowing the task status to be changed by the mapper in case
* of review revisions and then these completed statuses can also change to another completed status:
* fixed, false_positive, too_hard, already_fixed
*
* @param current The current status of the task
* @param toSet The status that the task will be set too
* @param allowChange Allow an already completed status to be changed to another completed status.
* @return True if the status can be set without violating any of the above rules
*/
def isValidStatusProgression(current: Int, toSet: Int, allowChange: Boolean = false): Boolean = {
if (current == toSet || toSet == STATUS_DELETED || toSet == STATUS_DISABLED) {
true
} else {
current match {
case STATUS_CREATED => true
case STATUS_FIXED =>
if (allowChange)
toSet == STATUS_FALSE_POSITIVE || toSet == STATUS_ALREADY_FIXED || toSet == STATUS_TOO_HARD
else false
case STATUS_FALSE_POSITIVE =>
if (allowChange)
toSet == STATUS_FIXED || toSet == STATUS_ALREADY_FIXED || toSet == STATUS_TOO_HARD
else toSet == STATUS_FIXED
case STATUS_SKIPPED | STATUS_TOO_HARD =>
toSet == STATUS_FIXED || toSet == STATUS_FALSE_POSITIVE || toSet == STATUS_ALREADY_FIXED ||
toSet == STATUS_SKIPPED || toSet == STATUS_TOO_HARD || toSet == STATUS_ANSWERED
case STATUS_DELETED => toSet == STATUS_CREATED
case STATUS_ALREADY_FIXED =>
if (allowChange)
toSet == STATUS_FIXED || toSet == STATUS_FALSE_POSITIVE || toSet == STATUS_TOO_HARD
else false
case STATUS_ANSWERED => false
case STATUS_VALIDATED => false
case STATUS_DISABLED => toSet == STATUS_CREATED
}
}
}
/**
* Gets the string name of the status based on a status id
*
* @param status The status id
* @return None if status id is invalid, otherwise the name of the status
*/
def getStatusName(status: Int): Option[String] = statusMap.get(status)
/**
* Gets the status id based on the status name
*
* @param status The status name
* @return None if status name is invalid, otherwise the id of the status
*/
def getStatusID(status: String): Option[Int] =
statusMap.find(_._2.equalsIgnoreCase(status)) match {
case Some(a) => Some(a._1)
case None => None
}
/**
* Based on the review status id, will return a boolean stating whether it is a valid id or not
*
* @param reviewStatus The id to check for validity
* @return true if review status id is valid
*/
def isValidReviewStatus(reviewStatus: Int): Boolean = reviewStatusMap.contains(reviewStatus)
/**
* Gets the string name of the review status based on a status id
*
* @param reviewStatus The review status id
* @return None if review status id is invalid, otherwise the name of the status
*/
def getReviewStatusName(reviewStatus: Int): Option[String] = reviewStatusMap.get(reviewStatus)
/**
* Gets the review status id based on the review status name
*
* @param reviewStatus The review status name
* @return None if review status name is invalid, otherwise the id of the review status
*/
def getReviewStatusID(reviewStatus: String): Option[Int] =
reviewStatusMap.find(_._2.equalsIgnoreCase(reviewStatus)) match {
case Some(a) => Some(a._1)
case None => None
}
def emptyTask(parentId: Long): Task =
Task(-1, "", DateTime.now(), DateTime.now(), parentId, Some(""), None, "")
}
| mgcuthbert/maproulette2 | app/org/maproulette/framework/model/Task.scala | Scala | apache-2.0 | 14,944 |
/** Copyright 2015 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Deprecated */
package io.prediction.data.view
import io.prediction.data.storage.Event
import io.prediction.data.storage.EventValidation
import io.prediction.data.storage.DataMap
import io.prediction.data.storage.Storage
import org.joda.time.DateTime
import scala.language.implicitConversions
import scala.concurrent.ExecutionContext.Implicits.global // TODO
object ViewPredicates {
def getStartTimePredicate(startTimeOpt: Option[DateTime])
: (Event => Boolean) = {
startTimeOpt.map(getStartTimePredicate).getOrElse(_ => true)
}
def getStartTimePredicate(startTime: DateTime): (Event => Boolean) = {
e => (!(e.eventTime.isBefore(startTime) || e.eventTime.isEqual(startTime)))
}
def getUntilTimePredicate(untilTimeOpt: Option[DateTime])
: (Event => Boolean) = {
untilTimeOpt.map(getUntilTimePredicate).getOrElse(_ => true)
}
def getUntilTimePredicate(untilTime: DateTime): (Event => Boolean) = {
_.eventTime.isBefore(untilTime)
}
def getEntityTypePredicate(entityTypeOpt: Option[String]): (Event => Boolean)
= {
entityTypeOpt.map(getEntityTypePredicate).getOrElse(_ => true)
}
def getEntityTypePredicate(entityType: String): (Event => Boolean) = {
(_.entityType == entityType)
}
def getEventPredicate(eventOpt: Option[String]): (Event => Boolean)
= {
eventOpt.map(getEventPredicate).getOrElse(_ => true)
}
def getEventPredicate(event: String): (Event => Boolean) = {
(_.event == event)
}
}
object ViewAggregators {
def getDataMapAggregator(): ((Option[DataMap], Event) => Option[DataMap]) = {
(p, e) => {
e.event match {
case "$set" => {
if (p == None) {
Some(e.properties)
} else {
p.map(_ ++ e.properties)
}
}
case "$unset" => {
if (p == None) {
None
} else {
p.map(_ -- e.properties.keySet)
}
}
case "$delete" => None
case _ => p // do nothing for others
}
}
}
}
object EventSeq {
// Need to
// >>> import scala.language.implicitConversions
// to enable implicit conversion. Only import in the code where this is
// necessary to avoid confusion.
implicit def eventSeqToList(es: EventSeq): List[Event] = es.events
implicit def listToEventSeq(l: List[Event]): EventSeq = new EventSeq(l)
}
class EventSeq(val events: List[Event]) {
def filter(
eventOpt: Option[String] = None,
entityTypeOpt: Option[String] = None,
startTimeOpt: Option[DateTime] = None,
untilTimeOpt: Option[DateTime] = None): EventSeq = {
events
.filter(ViewPredicates.getEventPredicate(eventOpt))
.filter(ViewPredicates.getStartTimePredicate(startTimeOpt))
.filter(ViewPredicates.getUntilTimePredicate(untilTimeOpt))
.filter(ViewPredicates.getEntityTypePredicate(entityTypeOpt))
}
def filter(p: (Event => Boolean)): EventSeq = events.filter(p)
def aggregateByEntityOrdered[T](init: T, op: (T, Event) => T)
: Map[String, T] = {
events
.groupBy( _.entityId )
.mapValues( _.sortBy(_.eventTime.getMillis).foldLeft[T](init)(op))
.toMap
}
}
class LBatchView(
val appId: Int,
val startTime: Option[DateTime],
val untilTime: Option[DateTime]) {
@transient lazy val eventsDb = Storage.getLEvents()
@transient lazy val _events = eventsDb.getByAppIdAndTime(appId,
startTime, untilTime).right.get.toList
@transient lazy val events: EventSeq = new EventSeq(_events)
/* Aggregate event data
*
* @param entityType only aggregate event with entityType
* @param startTimeOpt if specified, only aggregate event after (inclusive)
* startTimeOpt
* @param untilTimeOpt if specified, only aggregate event until (exclusive)
* endTimeOpt
*/
def aggregateProperties(
entityType: String,
startTimeOpt: Option[DateTime] = None,
untilTimeOpt: Option[DateTime] = None
): Map[String, DataMap] = {
events
.filter(entityTypeOpt = Some(entityType))
.filter(e => EventValidation.isSpecialEvents(e.event))
.aggregateByEntityOrdered(
init = None,
op = ViewAggregators.getDataMapAggregator())
.filter{ case (k, v) => (v != None) }
.mapValues(_.get)
}
/*
def aggregateByEntityOrdered[T](
predicate: Event => Boolean,
init: T,
op: (T, Event) => T): Map[String, T] = {
_events
.filter( predicate(_) )
.groupBy( _.entityId )
.mapValues( _.sortBy(_.eventTime.getMillis).foldLeft[T](init)(op))
.toMap
}
*/
/*
def groupByEntityOrdered[T](
predicate: Event => Boolean,
map: Event => T): Map[String, Seq[T]] = {
_events
.filter( predicate(_) )
.groupBy( _.entityId )
.mapValues( _.sortBy(_.eventTime.getMillis).map(map(_)) )
.toMap
}
*/
}
| nvoron23/PredictionIO | data/src/main/scala/io/prediction/data/view/LBatchView.scala | Scala | apache-2.0 | 5,434 |
package org.programmiersportgruppe.redis.client
import scala.collection.mutable
import akka.testkit.TestKit
import org.scalatest.time.{ Second, Span }
import org.programmiersportgruppe.redis._
import org.programmiersportgruppe.redis.client.RedisSubscriptionActor.PubSubMessage
import org.programmiersportgruppe.redis.commands.{ PUBLISH, SHUTDOWN }
import org.programmiersportgruppe.redis.test.{ ActorSystemAcceptanceTest, ProxyingParent }
class RedisSubscriptionActorTest extends ActorSystemAcceptanceTest {
behavior of "a pubsub subscription connection to the Redis server"
it should "send the onConnected message and stop when connection closes" in {
withActorSystem { implicit system =>
val testKit = new TestKit(system)
val subscriber =
withRedisServer { address =>
val subscriber = ProxyingParent(RedisSubscriptionActor.props(address, messageToParentOnSubscribed = Some("ready"), Seq("chan A"), _ => ()), testKit.testActor, "redis-actor")
testKit.expectMsg("ready")
testKit.watch(subscriber)
subscriber
}
testKit.expectTerminated(subscriber)
}
}
it should "receive own messages for subscribed channels" in {
withRedisServer { address =>
withActorSystem { implicit system =>
val messages = mutable.Queue[PubSubMessage]()
val onMessage = (m: PubSubMessage) => messages.enqueue(m)
val testKit = new TestKit(system)
val subscriber = ProxyingParent(RedisSubscriptionActor.props(address, messageToParentOnSubscribed = Some("sub ready"), Seq("chan A", "chan C"), onMessage), testKit.testActor, "subscriber")
val publisher = ProxyingParent(RedisCommandReplyActor.props(address, messageToParentOnConnected = Some("pub ready")), testKit.testActor, "publisher")
testKit.expectMsgAllOf("pub ready", "sub ready")
publisher ! PUBLISH("chan A", "a message")
publisher ! PUBLISH("chan B", "b message")
publisher ! PUBLISH("chan C", "c message")
implicit def patienceConfig = super.patienceConfig.copy(Span(1, Second))
eventually {
val formattedMessages =
messages
.map { case PubSubMessage(channel, message) => s"[${channel.utf8String}]: ${message.utf8String}" }
.mkString("; ")
assert(formattedMessages == "[chan A]: a message; [chan C]: c message")
testKit.watch(subscriber)
testKit.watch(publisher)
publisher ! SHUTDOWN()
testKit.expectTerminated(subscriber)
testKit.expectTerminated(publisher)
}
}
}
}
}
| programmiersportgruppe/akre | client/src/test/scala/org/programmiersportgruppe/redis/client/RedisSubscriptionActorTest.scala | Scala | mit | 2,621 |
package pl.touk.nussknacker.engine.avro.schemaregistry
import pl.touk.nussknacker.engine.util.convert.IntValue
sealed trait SchemaVersionOption
object SchemaVersionOption {
val LatestOptionName = "latest"
def byName(name: String): SchemaVersionOption = {
name match {
case `LatestOptionName` => LatestSchemaVersion
case IntValue(version) => ExistingSchemaVersion(version)
case _ => throw new IllegalArgumentException(s"Unexpected schema version option: $name")
}
}
}
case class ExistingSchemaVersion(version: Int) extends SchemaVersionOption
case object LatestSchemaVersion extends SchemaVersionOption
| TouK/nussknacker | utils/avro-components-utils/src/main/scala/pl/touk/nussknacker/engine/avro/schemaregistry/SchemaVersionOption.scala | Scala | apache-2.0 | 643 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.catalyst.expressions.{And, Expression, LessThan}
import org.apache.spark.sql.catalyst.optimizer.{BuildLeft, BuildRight}
import org.apache.spark.sql.catalyst.planning.ExtractEquiJoinKeys
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical.{Join, JoinHint}
import org.apache.spark.sql.execution.{SparkPlan, SparkPlanTest}
import org.apache.spark.sql.execution.exchange.EnsureRequirements
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.{DoubleType, IntegerType, StructType}
class OuterJoinSuite extends SparkPlanTest with SharedSparkSession {
private val EnsureRequirements = new EnsureRequirements()
private lazy val left = spark.createDataFrame(
sparkContext.parallelize(Seq(
Row(1, 2.0),
Row(2, 100.0),
Row(2, 1.0), // This row is duplicated to ensure that we will have multiple buffered matches
Row(2, 1.0),
Row(3, 3.0),
Row(5, 1.0),
Row(6, 6.0),
Row(null, null)
)), new StructType().add("a", IntegerType).add("b", DoubleType))
private lazy val right = spark.createDataFrame(
sparkContext.parallelize(Seq(
Row(0, 0.0),
Row(2, 3.0), // This row is duplicated to ensure that we will have multiple buffered matches
Row(2, -1.0), // This row is duplicated to ensure that we will have multiple buffered matches
Row(2, -1.0),
Row(2, 3.0),
Row(3, 2.0),
Row(4, 1.0),
Row(5, 3.0),
Row(7, 7.0),
Row(null, null)
)), new StructType().add("c", IntegerType).add("d", DoubleType))
private lazy val condition = {
And((left.col("a") === right.col("c")).expr,
LessThan(left.col("b").expr, right.col("d").expr))
}
private lazy val uniqueLeft = spark.createDataFrame(
sparkContext.parallelize(Seq(
Row(1, 2.0),
Row(2, 1.0),
Row(3, 3.0),
Row(5, 1.0),
Row(6, 6.0),
Row(null, null)
)), new StructType().add("a", IntegerType).add("b", DoubleType))
private lazy val uniqueRight = spark.createDataFrame(
sparkContext.parallelize(Seq(
Row(0, 0.0),
Row(2, 3.0),
Row(3, 2.0),
Row(4, 1.0),
Row(5, 3.0),
Row(7, 7.0),
Row(null, null)
)), new StructType().add("c", IntegerType).add("d", DoubleType))
private lazy val uniqueCondition = {
And((uniqueLeft.col("a") === uniqueRight.col("c")).expr,
LessThan(uniqueLeft.col("b").expr, uniqueRight.col("d").expr))
}
// Note: the input dataframes and expression must be evaluated lazily because
// the SQLContext should be used only within a test to keep SQL tests stable
private def testOuterJoin(
testName: String,
leftRows: => DataFrame,
rightRows: => DataFrame,
joinType: JoinType,
condition: => Expression,
expectedAnswer: Seq[Product]): Unit = {
def extractJoinParts(): Option[ExtractEquiJoinKeys.ReturnType] = {
val join = Join(leftRows.logicalPlan, rightRows.logicalPlan,
Inner, Some(condition), JoinHint.NONE)
ExtractEquiJoinKeys.unapply(join)
}
testWithWholeStageCodegenOnAndOff(s"$testName using ShuffledHashJoin") { _ =>
extractJoinParts().foreach { case (_, leftKeys, rightKeys, boundCondition, _, _, _, _) =>
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
val buildSide = if (joinType == LeftOuter) BuildRight else BuildLeft
checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) =>
EnsureRequirements.apply(
ShuffledHashJoinExec(
leftKeys, rightKeys, joinType, buildSide, boundCondition, left, right)),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
}
if (joinType != FullOuter) {
testWithWholeStageCodegenOnAndOff(s"$testName using BroadcastHashJoin") { _ =>
val buildSide = joinType match {
case LeftOuter => BuildRight
case RightOuter => BuildLeft
case _ => fail(s"Unsupported join type $joinType")
}
extractJoinParts().foreach { case (_, leftKeys, rightKeys, boundCondition, _, _, _, _) =>
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) =>
BroadcastHashJoinExec(
leftKeys, rightKeys, joinType, buildSide, boundCondition, left, right),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
}
}
testWithWholeStageCodegenOnAndOff(s"$testName using SortMergeJoin") { _ =>
extractJoinParts().foreach { case (_, leftKeys, rightKeys, boundCondition, _, _, _, _) =>
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) =>
EnsureRequirements.apply(
SortMergeJoinExec(leftKeys, rightKeys, joinType, boundCondition, left, right)),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
}
testWithWholeStageCodegenOnAndOff(s"$testName using BroadcastNestedLoopJoin build left") { _ =>
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) =>
BroadcastNestedLoopJoinExec(left, right, BuildLeft, joinType, Some(condition)),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
testWithWholeStageCodegenOnAndOff(s"$testName using BroadcastNestedLoopJoin build right") { _ =>
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) =>
BroadcastNestedLoopJoinExec(left, right, BuildRight, joinType, Some(condition)),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
}
// --- Basic outer joins ------------------------------------------------------------------------
testOuterJoin(
"basic left outer join",
left,
right,
LeftOuter,
condition,
Seq(
(null, null, null, null),
(1, 2.0, null, null),
(2, 100.0, null, null),
(2, 1.0, 2, 3.0),
(2, 1.0, 2, 3.0),
(2, 1.0, 2, 3.0),
(2, 1.0, 2, 3.0),
(3, 3.0, null, null),
(5, 1.0, 5, 3.0),
(6, 6.0, null, null)
)
)
testOuterJoin(
"basic right outer join",
left,
right,
RightOuter,
condition,
Seq(
(null, null, null, null),
(null, null, 0, 0.0),
(2, 1.0, 2, 3.0),
(2, 1.0, 2, 3.0),
(null, null, 2, -1.0),
(null, null, 2, -1.0),
(2, 1.0, 2, 3.0),
(2, 1.0, 2, 3.0),
(null, null, 3, 2.0),
(null, null, 4, 1.0),
(5, 1.0, 5, 3.0),
(null, null, 7, 7.0)
)
)
testOuterJoin(
"basic full outer join",
left,
right,
FullOuter,
condition,
Seq(
(1, 2.0, null, null),
(null, null, 2, -1.0),
(null, null, 2, -1.0),
(2, 100.0, null, null),
(2, 1.0, 2, 3.0),
(2, 1.0, 2, 3.0),
(2, 1.0, 2, 3.0),
(2, 1.0, 2, 3.0),
(3, 3.0, null, null),
(5, 1.0, 5, 3.0),
(6, 6.0, null, null),
(null, null, 0, 0.0),
(null, null, 3, 2.0),
(null, null, 4, 1.0),
(null, null, 7, 7.0),
(null, null, null, null),
(null, null, null, null)
)
)
// --- Both inputs empty ------------------------------------------------------------------------
testOuterJoin(
"left outer join with both inputs empty",
left.filter("false"),
right.filter("false"),
LeftOuter,
condition,
Seq.empty
)
testOuterJoin(
"right outer join with both inputs empty",
left.filter("false"),
right.filter("false"),
RightOuter,
condition,
Seq.empty
)
testOuterJoin(
"full outer join with both inputs empty",
left.filter("false"),
right.filter("false"),
FullOuter,
condition,
Seq.empty
)
// --- Join keys are unique ---------------------------------------------------------------------
testOuterJoin(
"left outer join with unique keys",
uniqueLeft,
uniqueRight,
LeftOuter,
uniqueCondition,
Seq(
(null, null, null, null),
(1, 2.0, null, null),
(2, 1.0, 2, 3.0),
(3, 3.0, null, null),
(5, 1.0, 5, 3.0),
(6, 6.0, null, null)
)
)
testOuterJoin(
"right outer join with unique keys",
uniqueLeft,
uniqueRight,
RightOuter,
uniqueCondition,
Seq(
(null, null, null, null),
(null, null, 0, 0.0),
(2, 1.0, 2, 3.0),
(null, null, 3, 2.0),
(null, null, 4, 1.0),
(5, 1.0, 5, 3.0),
(null, null, 7, 7.0)
)
)
}
| chuckchen/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/joins/OuterJoinSuite.scala | Scala | apache-2.0 | 9,774 |
/*
* MultiSetTest.scala
*
*
* Created By: Avi Pfeffer (apfeffer@cra.com)
* Creation Date: Jan 1, 2009
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email figaro@cra.com for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.test.util
import org.scalatest.Matchers
import org.scalatest.WordSpec
import com.cra.figaro.util._
class MultiSetTest extends WordSpec with Matchers {
"A hashing multiset" should {
"contain an element the correct number of times after multiple addOnes" in {
val ms = HashMultiSet[Int]()
ms.addOne(5)
ms.addOne(5)
ms.addOne(6)
ms(5) should equal(2)
ms(6) should equal(1)
ms(7) should equal(0)
ms.counts.size should equal(2)
}
"contain an element the correct number of times after addMany" in {
val ms = HashMultiSet[Int]()
ms.addMany(5, 2)
ms.addMany(6, 1)
ms(5) should equal(2)
ms(6) should equal(1)
ms(7) should equal(0)
ms.counts.size should equal(2)
}
"contain an element the correct number of times after multiple removeOnes" in {
val ms = HashMultiSet[Int]()
ms.addMany(5, 4)
ms.addMany(6, 1)
ms.removeOne(5)
ms.removeOne(5)
ms.removeOne(6)
ms(5) should equal(2)
ms(6) should equal(0)
ms(7) should equal(0)
ms.counts.size should equal(1)
}
"contain an element the correct number of times after removeMany" in {
val ms = HashMultiSet[Int]()
ms.addMany(5, 4)
ms.addMany(6, 1)
ms.removeAll(5)
ms(5) should equal(0)
ms(6) should equal(1)
ms(7) should equal(0)
ms.counts.size should equal(1)
}
"produce the right list of elements" in {
val ms = HashMultiSet[Int]()
ms.addMany(5, 2)
ms.addMany(6, 1)
val l = ms.elements
l.length should equal(3)
l.count(_ == 5) should equal(2)
l.count(_ == 6) should equal(1)
}
"produce the right behavior under foreach" in {
val ms = HashMultiSet[Int]()
ms.addMany(5, 2)
ms.addMany(6, 1)
var x = 0
def f(i: Int) { x += i }
ms.foreach(f)
x should equal(16)
}
"produce the right result under map" in {
val ms = HashMultiSet[Int]()
ms.addMany(5, 2)
ms.addMany(6, 1)
val ms2 = ms.map((i: Int) => i.toString)
ms2("5") should equal(2)
ms2("6") should equal(1)
ms2("7") should equal(0)
ms2.counts.size should equal(2)
}
"produce the correct union" in {
val ms1 = HashMultiSet[Int]()
ms1.addMany(5, 2)
ms1.addMany(6, 1)
val ms2 = HashMultiSet[Int]()
ms1.addMany(5, 3)
ms1.addMany(7, 4)
val ms3 = ms1 union ms2
ms3(5) should equal(5)
ms3(6) should equal(1)
ms3(7) should equal(4)
ms3.counts.size should equal(3)
}
}
}
| wkretschmer/figaro | Figaro/src/test/scala/com/cra/figaro/test/util/MultiSetTest.scala | Scala | bsd-3-clause | 2,983 |
// Wei Chen - Clustering Trait Test
// 2019-07-19
import com.scalaml.algorithm.Clustering
import org.scalatest.funsuite.AnyFunSuite
class ClusteringSuite extends AnyFunSuite {
test("Clustering Test : Create Sample Algo") {
class TestAlgo() extends Clustering {
val algoname: String = "TestAlgo"
val version: String = "TestVersion"
override def clear(): Boolean = true
override def config(paras: Map[String, Any]): Boolean = true
override def cluster(data: Array[Array[Double]]): Array[Int] = data.map(_ => 0)
}
val ta = new TestAlgo
assert(ta.algotype == "Clustering")
assert(ta.algoname == "TestAlgo")
assert(ta.version == "TestVersion")
assert(ta.clear)
assert(ta.config(Map()))
assert(ta.cluster(Array()).size == 0)
assert(ta.cluster(Array(Array(1))).head == 0)
}
}
| Wei-1/Scala-Machine-Learning | src/test/scala/algorithm/clustering/ClusteringTest.scala | Scala | mit | 928 |
package fpinscala.errorhandling
// hide std library `Option`, `Some` and `Either`, since we are writing our own in this chapter
import scala.{Option => _, Some => _, Either => _, _}
// exercise 1
sealed trait Option[+A] {
// exercise 1
def map[B](f: A => B): Option[B] =
this match {
case None => None
case Some(get) => Some(f(get))
}
// a method is contravariant on its result type
def getOrElse[B>:A](default: => B): B =
this match {
case None => default
case Some(get) => get
}
def flatMap[B](f: A => Option[B]): Option[B] = this map f getOrElse(None)
def orElse[B>:A](ob: => Option[B]): Option[B] = this map Some.apply getOrElse(ob)
// the scala.Option implements the 'orElse' return the reference ('this') rather than a new copy of non-None
def orElse1[B >: A](ob: => Option[B]): Option[B] =
this match {
case None => ob
case Some(_) => this
}
def filter(f: A => Boolean): Option[A] = this flatMap { x => if (f(x)) this else None }
}
case class Some[+A](get: A) extends Option[A]
case object None extends Option[Nothing]
object Option {
def failingFn(i: Int): Int = {
val y: Int = throw new Exception("fail!") // `val y: Int = ...` declares `y` as having type `Int`, and sets it equal to the right hand side of the `=`.
try {
val x = 42 + 5
x + y
}
catch { case e: Exception => 43 } // A `catch` block is just a pattern matching block like the ones we've seen. `case e: Exception` is a pattern that matches any `Exception`, and it binds this value to the identifier `e`. The match returns the value 43.
}
def failingFn2(i: Int): Int = {
try {
val x = 42 + 5
x + ((throw new Exception("fail!")): Int) // A thrown Exception can be given any type; here we're annotating it with the type `Int`
}
catch { case e: Exception => 43 }
}
def mean(xs: Seq[Double]): Option[Double] =
if (xs.isEmpty) None
else Some(xs.sum / xs.length)
// exercise 2
def variance(xs: Seq[Double]): Option[Double] =
mean(xs) flatMap { m => mean(xs.map(x => Math.pow(x - m, 2))) }
// exercise 3
def map2[A,B,C](a: Option[A], b: Option[B])(f: (A, B) => C): Option[C] =
(a, b) match {
case (Some(aGet), Some(bGet)) => Some(f(aGet, bGet))
case _ => None
}
def map2ViaFlatMapAndMap[A, B, C](a: Option[A], b: Option[B])(f: (A, B) => C): Option[C] =
a flatMap { aa => b map { bb => f(aa, bb) } }
def map2ViaForComprehension[A, B, C](a: Option[A], b: Option[B])(f: (A, B) => C): Option[C] =
for {
aa <- a
bb <- b
} yield f(aa, bb)
// exercise 4
import java.util.regex._
def pattern(s: String): Option[Pattern] =
try {
Some(Pattern.compile(s))
} catch {
case e: PatternSyntaxException => None
}
def mkMatcher(pat: String): Option[String => Boolean] =
pattern(pat) map { p => (s: String) => p.matcher(s).matches() }
def bothMatch(pat1: String, pat2: String, s: String): Option[Boolean] =
map2(mkMatcher(pat1), mkMatcher(pat2)) { (m1, m2) => m1(s) && m2(s) }
// exercise 5
// sequenceViaTraverse (this is not so obvious as it looks like)
def sequence[A](a: List[Option[A]]): Option[List[A]] =
traverse(a)(x => x)
// exercise 6
def traverse[A, B](a: List[A])(f: A => Option[B]): Option[List[B]] =
a.foldRight(Some(Nil): Option[List[B]]) { (a, b) => map2(f(a), b) { (x: B, xs: List[B]) => x :: xs } }
} | chuchao333/fpinscala | exercises/src/main/scala/fpinscala/errorhandling/Option.scala | Scala | mit | 3,450 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.storage.kv
import org.apache.samza.util.Logging
import org.apache.samza.serializers._
/**
* A key-value store wrapper that handles serialization
*/
class SerializedKeyValueStore[K, V](
store: KeyValueStore[Array[Byte], Array[Byte]],
keySerde: Serde[K],
msgSerde: Serde[V],
metrics: SerializedKeyValueStoreMetrics = new SerializedKeyValueStoreMetrics) extends KeyValueStore[K, V] with Logging {
def get(key: K): V = {
val keyBytes = toBytesOrNull(key, keySerde)
val found = store.get(keyBytes)
metrics.gets.inc
fromBytesOrNull(found, msgSerde)
}
override def getAll(keys: java.util.List[K]): java.util.Map[K, V] = {
metrics.gets.inc(keys.size)
val mapBytes = store.getAll(serializeKeys(keys))
if (mapBytes != null) {
val map = new java.util.HashMap[K, V](mapBytes.size)
val entryIterator = mapBytes.entrySet.iterator
while (entryIterator.hasNext) {
val entry = entryIterator.next
map.put(fromBytesOrNull(entry.getKey, keySerde), fromBytesOrNull(entry.getValue, msgSerde))
}
map
} else {
null.asInstanceOf[java.util.Map[K, V]]
}
}
def put(key: K, value: V) {
metrics.puts.inc
val keyBytes = toBytesOrNull(key, keySerde)
val valBytes = toBytesOrNull(value, msgSerde)
store.put(keyBytes, valBytes)
}
def putAll(entries: java.util.List[Entry[K, V]]) {
val list = new java.util.ArrayList[Entry[Array[Byte], Array[Byte]]](entries.size())
val iter = entries.iterator
while (iter.hasNext) {
val curr = iter.next
val keyBytes = toBytesOrNull(curr.getKey, keySerde)
val valBytes = toBytesOrNull(curr.getValue, msgSerde)
list.add(new Entry(keyBytes, valBytes))
}
store.putAll(list)
metrics.puts.inc(list.size)
}
def delete(key: K) {
metrics.deletes.inc
val keyBytes = toBytesOrNull(key, keySerde)
store.delete(keyBytes)
}
override def deleteAll(keys: java.util.List[K]) = {
metrics.deletes.inc(keys.size)
store.deleteAll(serializeKeys(keys))
}
def range(from: K, to: K): KeyValueIterator[K, V] = {
metrics.ranges.inc
val fromBytes = toBytesOrNull(from, keySerde)
val toBytes = toBytesOrNull(to, keySerde)
new DeserializingIterator(store.range(fromBytes, toBytes))
}
def all(): KeyValueIterator[K, V] = {
metrics.alls.inc
new DeserializingIterator(store.all)
}
private class DeserializingIterator(iter: KeyValueIterator[Array[Byte], Array[Byte]]) extends KeyValueIterator[K, V] {
override def hasNext() = iter.hasNext()
override def remove() = iter.remove()
override def close() = iter.close()
override def next(): Entry[K, V] = {
val nxt = iter.next()
val key = fromBytesOrNull(nxt.getKey, keySerde)
val value = fromBytesOrNull(nxt.getValue, msgSerde)
new Entry(key, value)
}
}
def flush {
trace("Flushing store.")
metrics.flushes.inc
store.flush
trace("Flushed store.")
}
def close {
trace("Closing.")
store.close
}
private def toBytesOrNull[T](t: T, serde: Serde[T]): Array[Byte] = if (t == null) {
null
} else {
val bytes = serde.toBytes(t)
metrics.bytesSerialized.inc(bytes.size)
bytes
}
private def fromBytesOrNull[T](bytes: Array[Byte], serde: Serde[T]): T = if (bytes == null) {
null.asInstanceOf[T]
} else {
val obj = serde.fromBytes(bytes)
metrics.bytesDeserialized.inc(bytes.size)
obj
}
private def serializeKeys(keys: java.util.List[K]): java.util.List[Array[Byte]] = {
val bytes = new java.util.ArrayList[Array[Byte]](keys.size)
val keysIterator = keys.iterator
while (keysIterator.hasNext) {
bytes.add(toBytesOrNull(keysIterator.next, keySerde))
}
bytes
}
}
| TiVo/samza | samza-kv/src/main/scala/org/apache/samza/storage/kv/SerializedKeyValueStore.scala | Scala | apache-2.0 | 4,604 |
package io.youi.video
import scala.concurrent.{Future, Promise}
class Video(private[youi] val element: html.Video) extends Drawable {
def isEmpty: Boolean = false
def nonEmpty: Boolean = !isEmpty
val width: Int = element.videoWidth
val height: Int = element.videoHeight
val duration: Double = element.duration
val autoPlay: Var[Boolean] = Var(false)
val loop: Var[Boolean] = Var(false)
val muted: Var[Boolean] = Var(false)
val position: Var[Double] = Var(0.0)
val volume: Var[Double] = Var(1.0)
private var updatingTime = false
private def init(autoPlay: Boolean,
loop: Boolean,
muted: Boolean): Unit = if (!isEmpty) {
this.autoPlay @= autoPlay
this.loop @= loop
this.muted @= muted
this.autoPlay.attach(element.autoplay = _)
this.loop.attach(element.loop = _)
this.muted.attach(element.muted = _)
position @= element.currentTime
position.attach { p =>
if (!updatingTime) element.currentTime = p
}
element.addEventListener("timeupdate", (_: Event) => {
modified @= System.currentTimeMillis()
updatingTime = true
try {
position @= element.currentTime
} finally {
updatingTime = false
}
})
element.addEventListener("seeked", (_: Event) => {
updatingTime = true
try {
position @= element.currentTime
} finally {
updatingTime = false
}
})
var updatingVolume = false
volume @= element.volume
volume.attach { v =>
if (!updatingVolume) element.volume = v
}
element.addEventListener("volumechange", (_: Event) => {
updatingVolume = true
try {
volume @= element.volume
} finally {
updatingVolume = false
}
})
modified @= System.currentTimeMillis()
}
def play(): Unit = element.play()
def pause(): Unit = element.pause()
def isPaused: Boolean = element.paused
def isEnded: Boolean = element.ended
def seek(position: Double): Future[Unit] = if (this.position() != position) {
val promise = Promise[Unit]
this.position.once(_ => {
promise.success(())
}, d => math.abs(position - d) <= 1.0)
this.position @= position
promise.future
} else {
Future.successful(())
}
def createImage(): Image = {
val canvas = CanvasPool(width, height)
val context = canvas.context
context.drawImage(element, 0.0, 0.0)
CanvasImage(canvas, ImageResizer.Pica)
}
override def draw(context: Context, x: Double, y: Double): Unit = if (!isEmpty) {
context.drawVideo(element)(x, y, width, height)
if (position() > 0.0 && !isPaused && !isEnded) {
updatingTime = true
try {
position @= element.currentTime
} finally {
updatingTime = false
}
modified @= System.currentTimeMillis()
}
}
def dispose(): Unit = pause()
}
object Video {
object empty extends Video(dom.create[html.Video]("video")) {
override def isEmpty: Boolean = true
}
def isVideo(file: File): Boolean = file.`type`.startsWith("video/")
def apply(file: File, autoPlay: Boolean, loop: Boolean, muted: Boolean): Future[Video] = {
val url = org.scalajs.dom.URL.createObjectURL(file)
apply(url, autoPlay, loop, muted)
}
def apply(url: URL, autoPlay: Boolean, loop: Boolean, muted: Boolean): Future[Video] = {
apply(url.toString, autoPlay, loop, muted)
}
def apply(url: String,
autoPlay: Boolean,
loop: Boolean,
muted: Boolean): Future[Video] = {
val element: html.Video = dom.create[html.Video]("video")
element.autoplay = autoPlay
element.loop = loop
element.muted = muted
val promise = Promise[Video]
element.addEventListener("loadedmetadata", (_: Event) => {
val v = new Video(element)
v.init(autoPlay, loop, muted)
promise.success(v)
})
element.src = url
promise.future
}
} | outr/youi | ui/js/src/main/scala/io/youi/video/Video.scala | Scala | mit | 3,947 |
import scala.util.parsing.combinator.Parsers
object ClassFileVersion extends Parsers {
override type Elem = Byte
def main(args: Array[String]) =
println(classfile(ClassFileReader(args(0))) match {
case Success(result, _) => result
case NoSuccess(msg, _) => "Not a valid classfile: "+msg })
def classfile = magic ~> major ~ minor ^^
{case x~y => "Classfile with major number "+ x + " and minor number " + y}
implicit def int2literal(n: Int) = elem(n.toByte)
def magic = accept(List(0xCA,0xFE,0xBA,0xBE).map(_.toByte))
def major = twoBytes
def minor = twoBytes
def twoBytes = byte ~ byte ^^ {case a~b => 256*a + b}
def byte = elem("a byte",_ => true)
}
import scala.util.parsing.input.{Reader, NoPosition}
object ClassFileReader {
def apply(file: String) = {
import java.io.{File, FileInputStream}
val fis = new FileInputStream(new File(file))
val data = Array.ofDim[Byte](file.length)
fis.read(data)
fis.close
new ClassFileReader(data.toList)
}
}
class ClassFileReader(bytes: List[Byte]) extends Reader[Byte] {
def first = bytes.head
def rest = new ClassFileReader(bytes.tail)
def pos = NoPosition
def atEnd = bytes.isEmpty
}
| grzegorzbalcerek/scala-book-examples | examples/ClassFileVersion.scala | Scala | mit | 1,199 |
package net.ceedubs.scrutinator
package readers
import scalaz._
trait FieldWithDefaultReaders extends FieldWithDefaultReaders0
trait FieldWithDefaultReaders0 extends FieldWithDefaultReaders1 {
implicit def paramWithDefaultFromSourceReader[I, A, S <: ValueSource](implicit reader: ParamReader[Validated, (NamedParam[ParamFromSource[Field[A], S]], I), Option[A]]): ParamReader[Validated, (NamedParam[ParamFromSource[FieldWithDefault[A], S]], I), A] = {
ParamReader.paramReader[Validated, (NamedParam[ParamFromSource[FieldWithDefault[A], S]], I), A] { case (cursorHistory, (paramWithDefault, input)) =>
val nestedNamedParam: NamedParam[ParamFromSource[Field[A], S]] = NamedParam(paramWithDefault.name, ParamFromSource[Field[A], S](paramWithDefault.param.param))
reader.reader.run((cursorHistory, (nestedNamedParam, input)))
.map(_.getOrElse(paramWithDefault.param.default))
}
}
}
trait FieldWithDefaultReaders1 {
implicit def paramWithDefaultReader[I, A](implicit reader: ParamReader[Validated, (NamedParam[Field[A]], I), Option[A]]): ParamReader[Validated, (NamedParam[FieldWithDefault[A]], I), A] = {
ParamReader.paramReader[Validated, (NamedParam[FieldWithDefault[A]], I), A] { case (cursorHistory, (paramWithDefault, input)) =>
val nestedNamedParam = NamedParam(paramWithDefault.name, paramWithDefault.param.param)
reader.reader.run((cursorHistory, (nestedNamedParam, input)))
.map(_.getOrElse(paramWithDefault.param.default))
}
}
}
| ceedubs/scrutinator | core/src/main/scala/net/ceedubs/scrutinator/readers/FieldWithDefaultReaders.scala | Scala | mit | 1,498 |
package aecor.example.account
import cats.Functor
import cats.implicits._
final class DefaultAccountService[F[_]: Functor](accounts: Accounts[F]) extends AccountService[F] {
override def openAccount(
accountId: AccountId,
checkBalance: Boolean
): F[Either[String, Unit]] =
accounts(accountId).open(checkBalance).map(_.left.map(_.toString))
}
object DefaultAccountService {
def apply[F[_]: Functor](accounts: Accounts[F]): AccountService[F] = new DefaultAccountService[F](accounts)
} | notxcain/aecor | modules/example/src/main/scala/aecor/example/account/DefaultAccountService.scala | Scala | mit | 575 |
package maven2sbt.core
import hedgehog._
import cats.syntax.all._
import maven2sbt.core.Repository.{RepoId, RepoName, RepoUrl}
import maven2sbt.core.{Prop => M2sProp}
import scala.util.Random
/**
* @author Kevin Lee
* @since 2019-04-22
*/
object Gens {
def genCharByRange(range: List[(Int, Int)]): Gen[Char] =
Gen.frequencyUnsafe(
range.map { case (from, to) =>
(to + 1 - from) -> Gen.char(from.toChar, to.toChar)
}
)
def genGroupId: Gen[GroupId] = Gen.string(Gen.alphaNum, Range.linear(1, 10)).map(GroupId.apply)
def genArtifactId: Gen[ArtifactId] = Gen.string(Gen.alphaNum, Range.linear(1, 10)).map(ArtifactId.apply)
def genVersion: Gen[Version] = Gen.string(Gen.alphaNum, Range.linear(1, 10)).map(Version.apply)
def genProjectInfo: Gen[ProjectInfo] = for {
groupId <- genGroupId
artifactId <- genArtifactId
version <- genVersion
} yield ProjectInfo(groupId, artifactId, version)
def genRepositoryId: Gen[RepoId] =
Gen.string(Gen.alphaNum, Range.linear(1, 10))
.list(Range.linear(1, 5))
.map(id => RepoId(id.stringsMkString("-")))
def genRepositoryName: Gen[RepoName] =
Gen.string(Gen.alphaNum, Range.linear(1, 10))
.list(Range.linear(1, 5))
.map(name => RepoName(name.stringsMkString(" ")))
def genRepositoryUrl: Gen[RepoUrl] =
Gen.string(Gen.alphaNum, Range.linear(1, 10))
.list(Range.linear(1, 5))
.map(url => RepoUrl(url.stringsMkString("https://", ".", "")))
def genRepository: Gen[Repository] = for {
id <- genRepositoryId
name <- genRepositoryName
url <- genRepositoryUrl
} yield Repository(id.some, name.some, url)
def genRepositoryWithEmptyName: Gen[Repository] = for {
id <- genRepositoryId
name = Repository.RepoName("")
url <- genRepositoryUrl
} yield Repository(id.some, name.some, url)
def genRepositoryWithNoName: Gen[Repository] = for {
id <- genRepositoryId
url <- genRepositoryUrl
} yield Repository(id.some, none[RepoName], url)
def genRepositoryWithEmptyIdEmptyName: Gen[Repository] = for {
url <- genRepositoryUrl
id = Repository.RepoId("")
name = Repository.RepoName("")
} yield Repository(id.some, name.some, url)
def genRepositoryWithNoIdNoName: Gen[Repository] =
genRepositoryUrl.map(url => Repository(none[RepoId], none[RepoName], url))
def genMavenPropertyNameWithPropNamePair: Gen[(MavenProperty.Name, Prop.PropName)] = for {
nameList <- Gen.string(Gens.genCharByRange(TestUtils.ExpectedLetters), Range.linear(1, 10)).list(Range.linear(1, 10))
delimiterList <- Gen.string(
Gen.frequency1(70 -> Gen.element1('.', '-'), 30 -> Gens.genCharByRange(TestUtils.ExpectedNonLetters)),
Range.singleton(1)
)
.list(Range.singleton(nameList.length - 1))
.map(_.toVector)
mavenPropName = nameList.zip(delimiterList :+ "").map { case (word, delimiter) => word + delimiter }.mkString
propName = (
nameList.headOption.map { first =>
first.headOption.map { c =>
if (c.isUpper || c.isLower || c === '_')
c.toString
else
s"_${c.toString}"
}.getOrElse("") + first.drop(1)
}.toList ++ nameList.drop(1).map(_.capitalize)).mkString
} yield (MavenProperty.Name(mavenPropName), Prop.PropName(propName))
def genMavenProperty: Gen[MavenProperty] = for {
nameList <- Gen.string(Gens.genCharByRange(TestUtils.ExpectedLetters), Range.linear(1, 10)).list(Range.linear(1, 10))
delimiterList <- Gen.string(
Gen.frequency1(70 -> Gen.element1('.', '-'), 30 -> Gens.genCharByRange(TestUtils.ExpectedNonLetters)),
Range.singleton(1)
)
.list(Range.singleton(nameList.length - 1))
.map(_.toVector)
name = nameList.zip(delimiterList :+ "").map { case (word, delimiter) => word + delimiter }.mkString
value <- Gen.string(Gen.unicode, Range.linear(1, 50))
} yield MavenProperty(MavenProperty.Name(name), MavenProperty.Value(value))
def genMavenPropertyAndPropPair: Gen[(MavenProperty, Prop)] = for {
nameList <- Gen.string(Gens.genCharByRange(TestUtils.ExpectedLetters), Range.linear(1, 10)).list(Range.linear(1, 10))
delimiterList <- Gen.string(
Gen.frequency1(70 -> Gen.element1('.', '-'), 30 -> Gens.genCharByRange(TestUtils.ExpectedNonLetters)),
Range.singleton(1)
)
.list(Range.singleton(nameList.length - 1))
.map(_.toVector)
key = nameList.zip(delimiterList :+ "").map { case (word, delimiter) => word + delimiter }.mkString
expectedKey = (
nameList.headOption.map { first =>
first.headOption.map { c =>
if (c.isUpper || c.isLower || c === '_')
c.toString
else
s"_${c.toString}"
}.getOrElse("") + first.drop(1)
}.toList ++ nameList.drop(1).map(_.capitalize)).mkString
value <- Gen.string(Gens.genCharByRange(TestUtils.NonWhitespaceCharRange), Range.linear(1, 50))
} yield (
MavenProperty(MavenProperty.Name(key), MavenProperty.Value(value)),
Prop(Prop.PropName(expectedKey), Prop.PropValue(value))
)
def genRenderedStringFromValueWithPropsAndQuoted: Gen[(RenderedString, String)] = for {
names <- Gens.genMavenPropertyNameWithPropNamePair
.list(Range.linear(1, 5))
values <- Gen
.string(Gens.genCharByRange(TestUtils.NonWhitespaceCharRange), Range.linear(1, 10))
.list(Range.singleton(names.length))
nameValuePairs = names.zip(values)
(_, valueWithExpectedProp) = Random
.shuffle(nameValuePairs)
.foldLeft(
List.empty[((MavenProperty.Name, String), (M2sProp.PropName, String))]
) {
case (acc, ((mavenPropName, propName), value)) =>
((mavenPropName, value), (propName, value)) :: acc
}
.unzip
valueWithProps = valueWithExpectedProp
.foldLeft(List.empty[String]) {
case (acc, (prop, value)) => s"$${${prop.propName}}$value" :: acc
}
.reverse
.mkString
} yield (
RenderedString.withProps(
valueWithProps
),
s"""s"$valueWithProps""""
)
def genRenderedStringWithOnlyPropNameAndQuoted: Gen[(RenderedString, String)] = for {
(_, propName) <- Gens.genMavenPropertyNameWithPropNamePair
} yield (
RenderedString.withProps(
s"$${${propName.propName}}"
),
propName.propName
)
def genRenderedStringWithNoPropNameAndQuoted: Gen[(RenderedString, String)] =
Gen.string(Gens.genCharByRange(TestUtils.NonWhitespaceCharRange), Range.linear(1, 10))
.map(value => (RenderedString.withoutProps(value), s""""$value""""))
def genRenderedStringWithQuotedString: Gen[(RenderedString, String)] =
Gen.frequency1(
30 -> genRenderedStringFromValueWithPropsAndQuoted,
30 -> genRenderedStringWithOnlyPropNameAndQuoted,
30 -> genRenderedStringWithNoPropNameAndQuoted
)
def genScope: Gen[Scope] =
Gen.element1(Scope.compile, Scope.test, Scope.provided, Scope.runtime, Scope.system, Scope.default)
def genExclusion: Gen[Exclusion] = for {
groupId <- genGroupId
artifactId <- genArtifactId
} yield Exclusion(groupId, artifactId)
def genDependency: Gen[Dependency] = for {
groupId <- genGroupId
artifactId <- genArtifactId
version <- genVersion
scope <- genScope
exclusions <- genExclusion.list(Range.linear(0, 5))
scalaLib <- Gen.boolean
} yield if (scalaLib) {
Dependency.scala(groupId, artifactId, version, scope, exclusions)
} else {
Dependency.java(groupId, artifactId, version, scope, exclusions)
}
def genDependencyWithNonEmptyExclusions: Gen[Dependency] = for {
groupId <- genGroupId
artifactId <- genArtifactId
version <- genVersion
scope <- genScope
exclusions <- genExclusion.list(Range.linear(1, 5))
scalaLib <- Gen.boolean
} yield if (scalaLib) {
Dependency.scala(groupId, artifactId, version, scope, exclusions)
} else {
Dependency.java(groupId, artifactId, version, scope, exclusions)
}
def genLibsName: Gen[Libs.LibsName] = for {
first <- Gen.alpha
rest <- Gen.string(Gen.alphaNum, Range.linear(0, 10))
libsName = first.toString + rest
} yield Libs.LibsName(libsName)
def genLibValNameAndDependency: Gen[(Libs.LibValName, Dependency)] =
for {
dependency <- genDependency
} yield
(
Libs.LibValName(
StringUtils
.capitalizeAfterIgnoringNonAlphaNumUnderscore(dependency.artifactId.value)
),
dependency
)
}
| Kevin-Lee/maven2sbt | core/src/test/scala/maven2sbt/core/Gens.scala | Scala | mit | 8,537 |
/** download data
* cd /home/cloudera/spark/dataset
* wget http://www.apache.org/licenses/LICENSE-2.0.txt -O apache.v2.0.txt
* hdfs dfs -put apache.v2.0.txt dataset
* check output in hdfs
* hdfs dfs -ls dataset/apache.v2.0_wordcount
* hdfs dfs -cat dataset/apache.v2.0_wordcount/*
*/
val wcRDD = sc.textFile("dataset/apache.v2.0.txt")
wcRDD.take(5).foreach(println)
wcRDD.flatMap(line => line.trim.split(" ")).take(5).foreach(println)
wcRDD.flatMap(line => line.trim.split(" ")).map(token => (token,1)).take(5).foreach(println)
wcRDD.flatMap(line => line.trim.split(" ")).map(token => (token,1)).reduceByKey((x,y) => (x+y)).take(5).foreach(println)
val wcntRDD = wcRDD.flatMap(line => line.trim.split(" ")).map(token => (token,1)).reduceByKey((x,y) => (x+y)).sortBy(_._2,false)
wcntRDD.take(5).foreach(println)
wcntRDD.count() // res28: Long = 594
wcntRDD.saveAsTextFile("dataset/apache.v2.0_wordcount")
| nixphix/bigdata | spark/spark-shell/word_count.scala | Scala | apache-2.0 | 921 |
package org.http4s
import cats.data.{Kleisli, OptionT}
import cats.{Applicative, Defer}
import cats.implicits._
object ContextRoutes {
/** Lifts a function into an [[ContextRoutes]]. The application of `run`
* is suspended in `F` to permit more efficient combination of
* routes via `SemigroupK`.
*
* @tparam F the effect of the [[ContextRoutes]]
* @tparam T the type of the auth info in the [[ContextRequest]] accepted by the [[ContextRoutes]]
* @param run the function to lift
* @return an [[ContextRoutes]] that wraps `run`
*/
def apply[T, F[_]](run: ContextRequest[F, T] => OptionT[F, Response[F]])(
implicit F: Defer[F]): ContextRoutes[T, F] =
Kleisli(req => OptionT(F.defer(run(req).value)))
/** Lifts a partial function into an [[ContextRoutes]]. The application of the
* partial function is suspended in `F` to permit more efficient combination
* of authed services via `SemigroupK`.
*
* @tparam F the base effect of the [[ContextRoutes]]
* @param pf the partial function to lift
* @return An [[ContextRoutes]] that returns some [[Response]] in an `OptionT[F, ?]`
* wherever `pf` is defined, an `OptionT.none` wherever it is not
*/
def of[T, F[_]](pf: PartialFunction[ContextRequest[F, T], F[Response[F]]])(
implicit F: Defer[F],
FA: Applicative[F]): ContextRoutes[T, F] =
Kleisli(req => OptionT(F.defer(pf.lift(req).sequence)))
/**
* The empty service (all requests fallthrough).
*
* @tparam T - ignored.
* @return
*/
def empty[T, F[_]: Applicative]: ContextRoutes[T, F] =
Kleisli.liftF(OptionT.none)
}
| ChristopherDavenport/http4s | core/src/main/scala/org/http4s/ContextRoutes.scala | Scala | apache-2.0 | 1,646 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio
import com.spotify.scio.spanner.syntax.{SCollectionSyntax, ScioContextSyntax}
package object spanner extends ScioContextSyntax with SCollectionSyntax
| spotify/scio | scio-google-cloud-platform/src/main/scala/com/spotify/scio/spanner/package.scala | Scala | apache-2.0 | 777 |
package lila.evaluation
import Math.{ pow, abs, sqrt, E, exp }
import scalaz.NonEmptyList
object Statistics {
import Erf._
import scala.annotation._
def variance[T](a: NonEmptyList[T])(implicit n: Numeric[T]): Double = {
val mean = average(a)
a.map(i => pow(n.toDouble(i) - mean, 2)).list.sum / a.size
}
def deviation[T](a: NonEmptyList[T])(implicit n: Numeric[T]): Double =
sqrt(variance(a))
def average[T](a: NonEmptyList[T])(implicit n: Numeric[T]): Double = {
@tailrec def average(a: List[T], sum: T, depth: Int): Double = {
a match {
case Nil => n.toDouble(sum) / depth
case x :: xs => average(xs, n.plus(sum, x), depth + 1)
}
}
average(a.tail, a.head, 1)
}
// Coefficient of Variance
def coefVariation(a: NonEmptyList[Int]): Double = sqrt(variance(a)) / average(a)
// ups all values by 5 (0.5s)
// as to avoid very high variation on bullet games
// where all move times are low (https://en.lichess.org/@/AlisaP?mod)
def moveTimeCoefVariation(a: NonEmptyList[Int]): Double = coefVariation(a.map(5+))
def moveTimeCoefVariation(pov: lila.game.Pov): Option[Double] =
pov.game.moveTimes(pov.color).toNel.map(moveTimeCoefVariation)
def consistentMoveTimes(pov: lila.game.Pov): Boolean =
moveTimeCoefVariation(pov) ?? (_ < 0.4)
def noFastMoves(pov: lila.game.Pov): Boolean = pov.game.moveTimes(pov.color).count(2>) <= 2
def intervalToVariance4(interval: Double): Double = pow(interval / 3, 8) // roughly speaking
// Accumulative probability function for normal distributions
def cdf[T](x: T, avg: T, sd: T)(implicit n: Numeric[T]): Double =
0.5 * (1 + erf(n.toDouble(n.minus(x, avg)) / (n.toDouble(sd) * sqrt(2))))
// The probability that you are outside of abs(x-n) from the mean on both sides
def confInterval[T](x: T, avg: T, sd: T)(implicit n: Numeric[T]): Double =
1 - cdf(n.abs(x), avg, sd) + cdf(n.times(n.fromInt(-1), n.abs(x)), avg, sd)
def listAverage[T](x: List[T])(implicit n: Numeric[T]): Double = x match {
case Nil => 0
case a :: Nil => n.toDouble(a)
case a :: b => average(NonEmptyList.nel(a, b))
}
def listDeviation[T](x: List[T])(implicit n: Numeric[T]): Double = x match {
case Nil => 0
case _ :: Nil => 0
case a :: b => deviation(NonEmptyList.nel(a, b))
}
}
object Erf {
// constants
val a1: Double = 0.254829592
val a2: Double = -0.284496736
val a3: Double = 1.421413741
val a4: Double = -1.453152027
val a5: Double = 1.061405429
val p: Double = 0.3275911
def erf(x: Double): Double = {
// Save the sign of x
val sign = if (x < 0) -1 else 1
val absx = abs(x)
// A&S formula 7.1.26, rational approximation of error function
val t = 1.0 / (1.0 + p * absx);
val y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * exp(-x * x);
sign * y
}
}
| clarkerubber/lila | modules/evaluation/src/main/Statistics.scala | Scala | agpl-3.0 | 2,894 |
/**
* Copyright (C) 2011 - 101loops.com <dev@101loops.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.crashnote.test.logger.unit.jul
import com.crashnote.core.model.types.LogLevel
import com.crashnote.jul.impl.JulEvt
import java.util.logging._
import com.crashnote.test.base.defs.BaseMockSpec
class JulEvtSpec
extends BaseMockSpec[LogRecord] {
"JUL Event" should {
"instantiate" >> {
val args = Array("Bob")
val err = new RuntimeException("oops")
// mock
val m_evt = getMock(Level.SEVERE)
m_evt.getThreadID returns 1
m_evt.getMessage returns "oops"
m_evt.getThrown returns err
m_evt.getLoggerName returns "com.example"
m_evt.getMillis returns 123456789L
m_evt.getParameters returns args.asInstanceOf[Array[AnyRef]]
// execute
val r = new JulEvt(m_evt, null)
// verify
r.getLoggerName === "com.example"
r.getLevel === LogLevel.ERROR
r.getThreadName === "1"
r.getArgs === Array("Bob")
r.getThrowable === err
r.getMessage === "oops"
r.getTimeStamp === 123456789L
}
"convert log level" >> {
"error" >> {
new JulEvt(getMock(Level.SEVERE)).getLevel === LogLevel.ERROR
}
"warn" >> {
new JulEvt(getMock(Level.WARNING)).getLevel === LogLevel.WARN
}
"info" >> {
new JulEvt(getMock(Level.CONFIG)).getLevel === LogLevel.INFO
}
"debug" >> {
new JulEvt(getMock(Level.FINE)).getLevel === LogLevel.DEBUG
new JulEvt(getMock(Level.FINER)).getLevel === LogLevel.DEBUG
new JulEvt(getMock(Level.FINEST)).getLevel === LogLevel.DEBUG
}
}
}
// SETUP ======================================================================================
def getMock(l: Level) = {
val m_evt = mock[LogRecord]
m_evt.getLevel returns l
m_evt
}
} | crashnote/crashnote-java | modules/logger/src/test/scala/com/crashnote/test/logger/unit/jul/JulEvtSpec.scala | Scala | apache-2.0 | 2,396 |
package beam.agentsim.agents.choice.mode
import beam.agentsim.agents.choice.logit
import beam.agentsim.agents.choice.logit._
import beam.agentsim.agents.choice.mode.ModeChoiceMultinomialLogit.ModeCostTimeTransfer
import beam.agentsim.agents.modalbehaviors.ModeChoiceCalculator
import beam.router.Modes.BeamMode
import beam.router.Modes.BeamMode._
import beam.router.model.{EmbodiedBeamLeg, EmbodiedBeamTrip}
import beam.sim.BeamServices
import beam.sim.config.BeamConfig.Beam.Agentsim.Agents
import beam.sim.config.BeamConfig.Beam.Agentsim.Agents.ModalBehaviors
import beam.sim.population.AttributesOfIndividual
import beam.utils.logging.ExponentialLazyLogging
import org.matsim.api.core.v01.Id
import org.matsim.api.core.v01.population.{Activity, Person}
import org.matsim.vehicles.Vehicle
import beam.agentsim.agents.modalbehaviors.ModeChoiceCalculator._
import beam.sim.config.BeamConfig
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.util.Random
/**
* BEAM
*/
class ModeChoiceMultinomialLogit(val beamServices: BeamServices, val model: MultinomialLogit[String, String])
extends ModeChoiceCalculator
with ExponentialLazyLogging {
override lazy val beamConfig: BeamConfig = beamServices.beamConfig
var expectedMaximumUtility: Double = 0.0
val modalBehaviors: ModalBehaviors = beamServices.beamConfig.beam.agentsim.agents.modalBehaviors
private val shouldLogDetails: Boolean = false
override def apply(
alternatives: IndexedSeq[EmbodiedBeamTrip],
attributesOfIndividual: AttributesOfIndividual,
destinationActivity: Option[Activity],
person: Option[Person] = None
): Option[EmbodiedBeamTrip] = {
if (alternatives.isEmpty) {
None
} else {
val modeCostTimeTransfers = altsToModeCostTimeTransfers(alternatives, attributesOfIndividual, destinationActivity)
val bestInGroup =
modeCostTimeTransfers groupBy (_.mode) map {
case (_, group) => group minBy timeAndCost
}
val inputData = bestInGroup.map { mct =>
val theParams: Map[String, Double] =
Map("cost" -> (mct.cost + mct.scaledTime))
val transferParam: Map[String, Double] = if (mct.mode.isTransit) {
Map("transfer" -> mct.numTransfers)
} else {
Map()
}
(mct.mode.value, theParams ++ transferParam)
}.toMap
val chosenModeOpt = model.sampleAlternative(inputData, new Random())
expectedMaximumUtility = model.getExpectedMaximumUtility(inputData).getOrElse(0)
if (shouldLogDetails) {
val personId = person.map(_.getId)
val msgToLog =
s"""|@@@[$personId]-----------------------------------------
|@@@[$personId]Alternatives:${alternatives}
|@@@[$personId]AttributesOfIndividual:${attributesOfIndividual}
|@@@[$personId]DestinationActivity:${destinationActivity}
|@@@[$personId]modeCostTimeTransfers:$modeCostTimeTransfers
|@@@[$personId]bestInGroup:$bestInGroup
|@@@[$personId]inputData:$inputData
|@@@[$personId]chosenModeOpt:${chosenModeOpt}
|@@@[$personId]expectedMaximumUtility:${chosenModeOpt}
|@@@[$personId]-----------------------------------------
|""".stripMargin
logger.debug(msgToLog)
}
chosenModeOpt match {
case Some(chosenMode) =>
val chosenModeCostTime =
bestInGroup.filter(_.mode.value.equalsIgnoreCase(chosenMode.alternativeType))
if (chosenModeCostTime.isEmpty || chosenModeCostTime.head.index < 0) {
None
} else {
Some(alternatives(chosenModeCostTime.head.index))
}
case None =>
None
}
}
}
def timeAndCost(mct: ModeCostTimeTransfer): Double = {
mct.scaledTime + mct.cost
}
// Generalized Time is always in hours!
override def getGeneralizedTimeOfTrip(
embodiedBeamTrip: EmbodiedBeamTrip,
attributesOfIndividual: Option[AttributesOfIndividual],
destinationActivity: Option[Activity]
): Double = {
val waitingTime = embodiedBeamTrip.totalTravelTimeInSecs - embodiedBeamTrip.legs.map(_.beamLeg.duration).sum
embodiedBeamTrip.legs
.map(x => getGeneralizedTimeOfLeg(x, attributesOfIndividual, destinationActivity))
.sum + getGeneralizedTime(waitingTime, None, None)
}
override def getGeneralizedTimeOfLeg(
embodiedBeamLeg: EmbodiedBeamLeg,
attributesOfIndividual: Option[AttributesOfIndividual],
destinationActivity: Option[Activity]
): Double = {
attributesOfIndividual match {
case Some(attributes) =>
attributes.getGeneralizedTimeOfLegForMNL(
embodiedBeamLeg,
this,
beamServices,
destinationActivity
)
case None =>
embodiedBeamLeg.beamLeg.duration * modeMultipliers.getOrElse(Some(embodiedBeamLeg.beamLeg.mode), 1.0) / 3600
}
}
override def getGeneralizedTime(
time: Double,
beamMode: Option[BeamMode] = None,
beamLeg: Option[EmbodiedBeamLeg] = None
): Double = {
time / 3600 * modeMultipliers.getOrElse(beamMode, 1.0)
}
def altsToModeCostTimeTransfers(
alternatives: IndexedSeq[EmbodiedBeamTrip],
attributesOfIndividual: AttributesOfIndividual,
destinationActivity: Option[Activity]
): IndexedSeq[ModeCostTimeTransfer] = {
alternatives.zipWithIndex.map { altAndIdx =>
val mode = altAndIdx._1.tripClassifier
val totalCost = getNonTimeCost(altAndIdx._1)
val incentive: Double = beamServices.beamScenario.modeIncentives.computeIncentive(attributesOfIndividual, mode)
val incentivizedCost =
Math.max(0, totalCost.toDouble - incentive)
if (totalCost < incentive)
logger.warn(
"Mode incentive is even higher then the cost, setting cost to zero. Mode: {}, Cost: {}, Incentive: {}",
mode,
totalCost,
incentive
)
val numTransfers = mode match {
case TRANSIT | WALK_TRANSIT | DRIVE_TRANSIT | RIDE_HAIL_TRANSIT =>
var nVeh = -1
var vehId = Id.create("dummy", classOf[Vehicle])
altAndIdx._1.legs.foreach { leg =>
if (leg.beamLeg.mode.isTransit && leg.beamVehicleId != vehId) {
vehId = leg.beamVehicleId
nVeh = nVeh + 1
}
}
nVeh
case _ =>
0
}
assert(numTransfers >= 0)
val scaledTime = attributesOfIndividual.getVOT(
getGeneralizedTimeOfTrip(altAndIdx._1, Some(attributesOfIndividual), destinationActivity)
)
ModeCostTimeTransfer(
mode,
incentivizedCost,
scaledTime,
numTransfers,
altAndIdx._2
)
}
}
lazy val modeMultipliers: mutable.Map[Option[BeamMode], Double] =
mutable.Map[Option[BeamMode], Double](
Some(TRANSIT) -> modalBehaviors.modeVotMultiplier.transit,
Some(RIDE_HAIL) -> modalBehaviors.modeVotMultiplier.rideHail,
Some(RIDE_HAIL_POOLED) -> modalBehaviors.modeVotMultiplier.rideHailPooled,
Some(RIDE_HAIL_TRANSIT) -> modalBehaviors.modeVotMultiplier.rideHailTransit,
Some(CAV) -> modalBehaviors.modeVotMultiplier.CAV,
// Some(WAITING) -> modalBehaviors.modeVotMultiplier.waiting, TODO think of alternative for waiting. For now assume "NONE" is waiting
Some(BIKE) -> modalBehaviors.modeVotMultiplier.bike,
Some(WALK) -> modalBehaviors.modeVotMultiplier.walk,
Some(CAR) -> modalBehaviors.modeVotMultiplier.drive,
None -> modalBehaviors.modeVotMultiplier.waiting
)
lazy val poolingMultipliers: mutable.Map[automationLevel, Double] =
mutable.Map[automationLevel, Double](
levelLE2 -> modalBehaviors.poolingMultiplier.LevelLE2,
level3 -> modalBehaviors.poolingMultiplier.Level3,
level4 -> modalBehaviors.poolingMultiplier.Level4,
level5 -> modalBehaviors.poolingMultiplier.Level5
)
lazy val situationMultipliers: mutable.Map[(timeSensitivity, congestionLevel, roadwayType, automationLevel), Double] =
mutable.Map[(timeSensitivity, congestionLevel, roadwayType, automationLevel), Double](
(highSensitivity, highCongestion, highway, levelLE2) -> modalBehaviors.highTimeSensitivity.highCongestion.highwayFactor.LevelLE2,
(highSensitivity, highCongestion, nonHighway, levelLE2) -> modalBehaviors.highTimeSensitivity.highCongestion.nonHighwayFactor.LevelLE2,
(highSensitivity, lowCongestion, highway, levelLE2) -> modalBehaviors.highTimeSensitivity.lowCongestion.highwayFactor.LevelLE2,
(highSensitivity, lowCongestion, nonHighway, levelLE2) -> modalBehaviors.highTimeSensitivity.lowCongestion.nonHighwayFactor.LevelLE2,
(lowSensitivity, highCongestion, highway, levelLE2) -> modalBehaviors.lowTimeSensitivity.highCongestion.highwayFactor.LevelLE2,
(lowSensitivity, highCongestion, nonHighway, levelLE2) -> modalBehaviors.lowTimeSensitivity.highCongestion.nonHighwayFactor.LevelLE2,
(lowSensitivity, lowCongestion, highway, levelLE2) -> modalBehaviors.lowTimeSensitivity.lowCongestion.highwayFactor.LevelLE2,
(lowSensitivity, lowCongestion, nonHighway, levelLE2) -> modalBehaviors.lowTimeSensitivity.lowCongestion.nonHighwayFactor.LevelLE2,
(highSensitivity, highCongestion, highway, level3) -> modalBehaviors.highTimeSensitivity.highCongestion.highwayFactor.Level3,
(highSensitivity, highCongestion, nonHighway, level3) -> modalBehaviors.highTimeSensitivity.highCongestion.nonHighwayFactor.Level3,
(highSensitivity, lowCongestion, highway, level3) -> modalBehaviors.highTimeSensitivity.lowCongestion.highwayFactor.Level3,
(highSensitivity, lowCongestion, nonHighway, level3) -> modalBehaviors.highTimeSensitivity.lowCongestion.nonHighwayFactor.Level3,
(lowSensitivity, highCongestion, highway, level3) -> modalBehaviors.lowTimeSensitivity.highCongestion.highwayFactor.Level3,
(lowSensitivity, highCongestion, nonHighway, level3) -> modalBehaviors.lowTimeSensitivity.highCongestion.nonHighwayFactor.Level3,
(lowSensitivity, lowCongestion, highway, level3) -> modalBehaviors.lowTimeSensitivity.lowCongestion.highwayFactor.Level3,
(lowSensitivity, lowCongestion, nonHighway, level3) -> modalBehaviors.lowTimeSensitivity.lowCongestion.nonHighwayFactor.Level3,
(highSensitivity, highCongestion, highway, level4) -> modalBehaviors.highTimeSensitivity.highCongestion.highwayFactor.Level4,
(highSensitivity, highCongestion, nonHighway, level4) -> modalBehaviors.highTimeSensitivity.highCongestion.nonHighwayFactor.Level4,
(highSensitivity, lowCongestion, highway, level4) -> modalBehaviors.highTimeSensitivity.lowCongestion.highwayFactor.Level4,
(highSensitivity, lowCongestion, nonHighway, level4) -> modalBehaviors.highTimeSensitivity.lowCongestion.nonHighwayFactor.Level4,
(lowSensitivity, highCongestion, highway, level4) -> modalBehaviors.lowTimeSensitivity.highCongestion.highwayFactor.Level4,
(lowSensitivity, highCongestion, nonHighway, level4) -> modalBehaviors.lowTimeSensitivity.highCongestion.nonHighwayFactor.Level4,
(lowSensitivity, lowCongestion, highway, level4) -> modalBehaviors.lowTimeSensitivity.lowCongestion.highwayFactor.Level4,
(lowSensitivity, lowCongestion, nonHighway, level4) -> modalBehaviors.lowTimeSensitivity.lowCongestion.nonHighwayFactor.Level4,
(highSensitivity, highCongestion, highway, level5) -> modalBehaviors.highTimeSensitivity.highCongestion.highwayFactor.Level5,
(highSensitivity, highCongestion, nonHighway, level5) -> modalBehaviors.highTimeSensitivity.highCongestion.nonHighwayFactor.Level5,
(highSensitivity, lowCongestion, highway, level5) -> modalBehaviors.highTimeSensitivity.lowCongestion.highwayFactor.Level5,
(highSensitivity, lowCongestion, nonHighway, level5) -> modalBehaviors.highTimeSensitivity.lowCongestion.nonHighwayFactor.Level5,
(lowSensitivity, highCongestion, highway, level5) -> modalBehaviors.lowTimeSensitivity.highCongestion.highwayFactor.Level5,
(lowSensitivity, highCongestion, nonHighway, level5) -> modalBehaviors.lowTimeSensitivity.highCongestion.nonHighwayFactor.Level5,
(lowSensitivity, lowCongestion, highway, level5) -> modalBehaviors.lowTimeSensitivity.lowCongestion.highwayFactor.Level5,
(lowSensitivity, lowCongestion, nonHighway, level5) -> modalBehaviors.lowTimeSensitivity.lowCongestion.nonHighwayFactor.Level5
)
override def utilityOf(
alternative: EmbodiedBeamTrip,
attributesOfIndividual: AttributesOfIndividual,
destinationActivity: Option[Activity]
): Double = {
val modeCostTimeTransfer =
altsToModeCostTimeTransfers(IndexedSeq(alternative), attributesOfIndividual, destinationActivity).head
utilityOf(
modeCostTimeTransfer.mode,
modeCostTimeTransfer.cost + modeCostTimeTransfer.scaledTime,
modeCostTimeTransfer.scaledTime,
modeCostTimeTransfer.numTransfers
)
}
def utilityOf(mode: BeamMode, cost: Double, time: Double, numTransfers: Int = 0): Double = {
val variables =
Map(
"transfer" -> numTransfers.toDouble,
"cost" -> cost
)
model.getUtilityOfAlternative(mode.value, variables).getOrElse(0)
}
override def computeAllDayUtility(
trips: ListBuffer[EmbodiedBeamTrip],
person: Person,
attributesOfIndividual: AttributesOfIndividual
): Double = trips.map(utilityOf(_, attributesOfIndividual, None)).sum // TODO: Update with destination activity
}
object ModeChoiceMultinomialLogit {
def buildModelFromConfig(mnlConfig: Agents.ModalBehaviors.MulitnomialLogit): MultinomialLogit[String, String] = {
val commonUtility: Map[String, UtilityFunctionOperation] = Map(
"cost" -> UtilityFunctionOperation("multiplier", -1)
)
val mnlUtilityFunctions: Map[String, Map[String, UtilityFunctionOperation]] = Map(
"car" -> Map(
"intercept" ->
UtilityFunctionOperation("intercept", mnlConfig.params.car_intercept)
),
"cav" -> Map("intercept" -> UtilityFunctionOperation("intercept", mnlConfig.params.cav_intercept)),
"walk" -> Map("intercept" -> UtilityFunctionOperation("intercept", mnlConfig.params.walk_intercept)),
"ride_hail" -> Map("intercept" -> UtilityFunctionOperation("intercept", mnlConfig.params.ride_hail_intercept)),
"ride_hail_pooled" -> Map(
"intercept" -> UtilityFunctionOperation("intercept", mnlConfig.params.ride_hail_pooled_intercept)
),
"ride_hail_transit" -> Map(
"intercept" -> UtilityFunctionOperation("intercept", mnlConfig.params.ride_hail_transit_intercept),
"transfer" -> UtilityFunctionOperation("multiplier", mnlConfig.params.transfer)
),
"bike" -> Map("intercept" -> UtilityFunctionOperation("intercept", mnlConfig.params.bike_intercept)),
"walk_transit" -> Map(
"intercept" -> UtilityFunctionOperation("intercept", mnlConfig.params.walk_transit_intercept),
"transfer" -> UtilityFunctionOperation("multiplier", mnlConfig.params.transfer)
),
"drive_transit" -> Map(
"intercept" -> UtilityFunctionOperation("intercept", mnlConfig.params.drive_transit_intercept),
"transfer" -> UtilityFunctionOperation("multiplier", mnlConfig.params.transfer)
)
)
logit.MultinomialLogit(
mnlUtilityFunctions,
commonUtility
)
}
case class ModeCostTimeTransfer(
mode: BeamMode,
cost: Double,
scaledTime: Double,
numTransfers: Int,
index: Int = -1
)
}
| colinsheppard/beam | src/main/scala/beam/agentsim/agents/choice/mode/ModeChoiceMultinomialLogit.scala | Scala | gpl-3.0 | 15,798 |
package scala.slick.relational
import scala.language.existentials
import scala.slick.ast._
import scala.slick.SlickException
import scala.slick.util.SlickLogger
import org.slf4j.LoggerFactory
/** Create a ResultConverter for parameters and result sets. Subclasses have
* to provide profile-specific createColumnConverter implementations. */
trait ResultConverterCompiler[Domain <: ResultConverterDomain] {
def compile(n: Node): ResultConverter[Domain, _] = n match {
case InsertColumn(paths, fs, _) =>
val pathConvs = paths.map { case Select(_, ElementSymbol(idx)) => createColumnConverter(n, idx, Some(fs)) }
if(pathConvs.length == 1) pathConvs.head else CompoundResultConverter(1, pathConvs: _*)
case OptionApply(InsertColumn(paths, fs, _)) =>
val pathConvs = paths.map { case Select(_, ElementSymbol(idx)) => createColumnConverter(n, idx, Some(fs)) }
if(pathConvs.length == 1) pathConvs.head else CompoundResultConverter(1, pathConvs: _*)
case Select(_, ElementSymbol(idx)) => createColumnConverter(n, idx, None)
case cast @ Library.SilentCast(sel @ Select(_, ElementSymbol(idx))) =>
createColumnConverter(sel.nodeTypedOrCopy(cast.nodeType), idx, None)
case OptionApply(Select(_, ElementSymbol(idx))) => createColumnConverter(n, idx, None)
case ProductNode(ch) =>
if(ch.isEmpty) new UnitResultConverter
else new ProductResultConverter(ch.map(n => compile(n))(collection.breakOut): _*)
case GetOrElse(ch, default) =>
createGetOrElseResultConverter(compile(ch).asInstanceOf[ResultConverter[Domain, Option[Any]]], default)
case TypeMapping(ch, mapper, _) =>
createTypeMappingResultConverter(compile(ch).asInstanceOf[ResultConverter[Domain, Any]], mapper)
case RebuildOption(disc, data) =>
val discConv = createGetOrElseResultConverter(compile(disc).asInstanceOf[ResultConverter[Domain, Option[Int]]], () => 0)
val dataConv = compile(data).asInstanceOf[ResultConverter[Domain, Any]]
createOptionRebuildingConverter(discConv, dataConv)
case n =>
throw new SlickException("Unexpected node in ResultSetMapping: "+n)
}
def createGetOrElseResultConverter[T](rc: ResultConverter[Domain, Option[T]], default: () => T): ResultConverter[Domain, T] =
new GetOrElseResultConverter[Domain, T](rc, default)
def createTypeMappingResultConverter(rc: ResultConverter[Domain, Any], mapper: MappedScalaType.Mapper): ResultConverter[Domain, Any] =
new TypeMappingResultConverter(rc, mapper.toBase, mapper.toMapped)
def createOptionRebuildingConverter(discriminator: ResultConverter[Domain, Int], data: ResultConverter[Domain, Any]): ResultConverter[Domain, Option[Any]] =
new OptionRebuildingResultConverter(discriminator, data)
def createColumnConverter(n: Node, idx: Int, column: Option[FieldSymbol]): ResultConverter[Domain, _]
def compileMapping(n: Node): CompiledMapping = {
val rc = compile(n)
ResultConverterCompiler.logger.debug("Compiled ResultConverter", rc)
CompiledMapping(rc, n.nodeType)
}
}
object ResultConverterCompiler {
protected lazy val logger = new SlickLogger(LoggerFactory.getLogger(classOf[ResultConverterCompiler[_]]))
}
/** A node that wraps a ResultConverter */
final case class CompiledMapping(converter: ResultConverter[_ <: ResultConverterDomain, _], tpe: Type) extends NullaryNode with TypedNode {
type Self = CompiledMapping
def nodeRebuild = copy()
override def getDumpInfo = {
val di = super.getDumpInfo
di.copy(mainInfo = "", children = di.children ++ Vector(("converter", converter)))
}
}
| nuodb/slick | src/main/scala/scala/slick/relational/ResultConverterCompiler.scala | Scala | bsd-2-clause | 3,593 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ly.stealth.f2k
import kafka.utils.Logging
import java.util.Properties
import kafka.consumer.{Consumer, ConsumerConfig}
import kafka.serializer.{DefaultDecoder, StringDecoder}
import org.apache.avro.generic.{GenericData, GenericDatumReader, GenericRecord}
import org.apache.avro.Schema.Parser
import java.io._
import java.nio.file.{Path, StandardOpenOption, Paths, Files}
import org.apache.avro.io.DecoderFactory
import kafka.consumer.Whitelist
import java.util.concurrent.TimeUnit
import org.apache.avro.util.Utf8
import java.nio.ByteBuffer
import org.apache.avro.reflect.ReflectDatumReader
import org.apache.avro.generic.GenericData.Record
import ly.stealth.f2k.serialization.FileTypeDecoder
class KafkaDownloader(topic: String,
groupId: String,
zookeeperConnect: String,
decoderType: String,
zkSessionTimeoutMs: Int = 30000,
readFromStartOfStream: Boolean = true) extends Logging {
val props = new Properties()
props.put("group.id", groupId)
props.put("zookeeper.connect", zookeeperConnect)
props.put("auto.offset.reset", if (readFromStartOfStream) "smallest" else "largest")
props.put("zookeeper.session.timeout.ms", zkSessionTimeoutMs.toString)
val config = new ConsumerConfig(props)
val connector = Consumer.create(config)
val filterSpec = new Whitelist(topic)
val maxWaitTimeout = 15000
var lastUpdate = 0L
var decoder = FileTypeDecoder.decoder(decoderType)
info("Trying to start consumer: topic=%s for zk=%s and groupId=%s".format(topic, zookeeperConnect, groupId))
val stream = connector.createMessageStreamsByFilter(filterSpec, 1, new StringDecoder(), new DefaultDecoder()).head
info("Started consumer: topic=%s for zk=%s and groupId=%s".format(topic, zookeeperConnect, groupId))
def download(pathToDownload: Path) = {
val it = stream.iterator()
try {
var current = Paths.get("")
var out: BufferedOutputStream = null
lastUpdate = System.currentTimeMillis()
val watcher = new Thread(new ConsumerWatcher)
watcher.setDaemon(true)
watcher.start()
while (it.hasNext()) {
debug("Trying to download file bit")
val messageAndTopic = it.next()
debug("Downloaded file bit")
val path = Paths.get(messageAndTopic.key())
if (path.startsWith(pathToDownload)) {
if (!messageAndTopic.message().isEmpty) {
debug("Trying to decode file bit")
val record = decoder.decode(messageAndTopic.message())
debug("File bit has been decoded")
val parent = path.getParent
if (!Files.exists(parent)) {
trace("Directory %s does not exist, trying to create".format(parent.toString))
Files.createDirectories(parent)
trace("Сreated directory %s".format(parent.toString))
}
if (path != current) {
debug("File %s has been successfully downloaded".format(current.toString))
trace("Trying to close out for file %s".format(current.toString))
if (out != null) out.close()
trace("Сlosed out for file %s".format(current.toString))
current = path
trace("Trying to create new out for file %s".format(current.toString))
out = new BufferedOutputStream(Files.newOutputStream(path, StandardOpenOption.APPEND, StandardOpenOption.CREATE))
trace("Created new out for file %s".format(current.toString))
}
debug("Trying to write data for file %s".format(path.toString))
out.write(record.data)
out.flush()
debug("Wrote data for file %s".format(path.toString))
lastUpdate = System.currentTimeMillis()
}
}
}
} catch {
case e: Exception => {
warn("Consumer has been stopped", e)
}
}
info("Files has been successfully downloaded")
}
def close() = {
info("Shutting down consumer: topic=%s for zk=%s and groupId=%s".format(topic, zookeeperConnect, groupId))
connector.shutdown()
info("Shut down consumer: topic=%s for zk=%s and groupId=%s".format(topic, zookeeperConnect, groupId))
}
class ConsumerWatcher extends Runnable {
override def run() {
while (!Thread.currentThread().isInterrupted) {
if (System.currentTimeMillis() - lastUpdate > maxWaitTimeout) {
close()
return
} else {
TimeUnit.MILLISECONDS.sleep(maxWaitTimeout)
}
}
}
}
}
| stealthly/f2k | src/main/scala/ly/stealth/f2k/KafkaDownloader.scala | Scala | apache-2.0 | 5,418 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import java.io.{File, IOException, ObjectInputStream}
import java.util.concurrent.ConcurrentLinkedQueue
import scala.collection.JavaConverters._
import scala.language.implicitConversions
import scala.reflect.ClassTag
import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.Eventually.timeout
import org.scalatest.concurrent.PatienceConfiguration
import org.scalatest.time.{Seconds => ScalaTestSeconds, Span}
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, ForEachDStream, InputDStream}
import org.apache.spark.streaming.scheduler._
import org.apache.spark.util.{ManualClock, Utils}
/**
* A dummy stream that does absolutely nothing.
*/
private[streaming] class DummyDStream(ssc: StreamingContext) extends DStream[Int](ssc) {
override def dependencies: List[DStream[Int]] = List.empty
override def slideDuration: Duration = Seconds(1)
override def compute(time: Time): Option[RDD[Int]] = Some(ssc.sc.emptyRDD[Int])
}
/**
* A dummy input stream that does absolutely nothing.
*/
private[streaming] class DummyInputDStream(ssc: StreamingContext) extends InputDStream[Int](ssc) {
override def start(): Unit = { }
override def stop(): Unit = { }
override def compute(time: Time): Option[RDD[Int]] = Some(ssc.sc.emptyRDD[Int])
}
/**
* This is a input stream just for the testsuites. This is equivalent to a checkpointable,
* replayable, reliable message queue like Kafka. It requires a sequence as input, and
* returns the i_th element at the i_th batch under manual clock.
*/
class TestInputStream[T: ClassTag](_ssc: StreamingContext, input: Seq[Seq[T]], numPartitions: Int)
extends InputDStream[T](_ssc) {
def start() {}
def stop() {}
def compute(validTime: Time): Option[RDD[T]] = {
logInfo("Computing RDD for time " + validTime)
val index = ((validTime - zeroTime) / slideDuration - 1).toInt
val selectedInput = if (index < input.size) input(index) else Seq[T]()
// lets us test cases where RDDs are not created
if (selectedInput == null) {
return None
}
// Report the input data's information to InputInfoTracker for testing
val inputInfo = StreamInputInfo(id, selectedInput.length.toLong)
ssc.scheduler.inputInfoTracker.reportInfo(validTime, inputInfo)
val rdd = ssc.sc.makeRDD(selectedInput, numPartitions)
logInfo("Created RDD " + rdd.id + " with " + selectedInput)
Some(rdd)
}
}
/**
* This is a output stream just for the testsuites. All the output is collected into a
* ConcurrentLinkedQueue. This queue is wiped clean on being restored from checkpoint.
*
* The buffer contains a sequence of RDD's, each containing a sequence of items.
*/
class TestOutputStream[T: ClassTag](
parent: DStream[T],
val output: ConcurrentLinkedQueue[Seq[T]] =
new ConcurrentLinkedQueue[Seq[T]]()
) extends ForEachDStream[T](parent, (rdd: RDD[T], t: Time) => {
val collected = rdd.collect()
output.add(collected)
}, false) {
// This is to clear the output buffer every it is read from a checkpoint
@throws(classOf[IOException])
private def readObject(ois: ObjectInputStream): Unit = Utils.tryOrIOException {
ois.defaultReadObject()
output.clear()
}
}
/**
* This is a output stream just for the testsuites. All the output is collected into a
* ConcurrentLinkedQueue. This queue is wiped clean on being restored from checkpoint.
*
* The queue contains a sequence of RDD's, each containing a sequence of partitions, each
* containing a sequence of items.
*/
class TestOutputStreamWithPartitions[T: ClassTag](
parent: DStream[T],
val output: ConcurrentLinkedQueue[Seq[Seq[T]]] =
new ConcurrentLinkedQueue[Seq[Seq[T]]]())
extends ForEachDStream[T](parent, (rdd: RDD[T], t: Time) => {
val collected = rdd.glom().collect().map(_.toSeq)
output.add(collected)
}, false) {
// This is to clear the output buffer every it is read from a checkpoint
@throws(classOf[IOException])
private def readObject(ois: ObjectInputStream): Unit = Utils.tryOrIOException {
ois.defaultReadObject()
output.clear()
}
}
/**
* An object that counts the number of started / completed batches. This is implemented using a
* StreamingListener. Constructing a new instance automatically registers a StreamingListener on
* the given StreamingContext.
*/
class BatchCounter(ssc: StreamingContext) {
// All access to this state should be guarded by `BatchCounter.this.synchronized`
private var numCompletedBatches = 0
private var numStartedBatches = 0
private var lastCompletedBatchTime: Time = null
private val listener = new StreamingListener {
override def onBatchStarted(batchStarted: StreamingListenerBatchStarted): Unit =
BatchCounter.this.synchronized {
numStartedBatches += 1
BatchCounter.this.notifyAll()
}
override def onBatchCompleted(batchCompleted: StreamingListenerBatchCompleted): Unit =
BatchCounter.this.synchronized {
numCompletedBatches += 1
lastCompletedBatchTime = batchCompleted.batchInfo.batchTime
BatchCounter.this.notifyAll()
}
}
ssc.addStreamingListener(listener)
def getNumCompletedBatches: Int = this.synchronized {
numCompletedBatches
}
def getNumStartedBatches: Int = this.synchronized {
numStartedBatches
}
def getLastCompletedBatchTime: Time = this.synchronized {
lastCompletedBatchTime
}
/**
* Wait until `expectedNumCompletedBatches` batches are completed, or timeout. Return true if
* `expectedNumCompletedBatches` batches are completed. Otherwise, return false to indicate it's
* timeout.
*
* @param expectedNumCompletedBatches the `expectedNumCompletedBatches` batches to wait
* @param timeout the maximum time to wait in milliseconds.
*/
def waitUntilBatchesCompleted(expectedNumCompletedBatches: Int, timeout: Long): Boolean =
waitUntilConditionBecomeTrue(numCompletedBatches >= expectedNumCompletedBatches, timeout)
/**
* Wait until `expectedNumStartedBatches` batches are completed, or timeout. Return true if
* `expectedNumStartedBatches` batches are completed. Otherwise, return false to indicate it's
* timeout.
*
* @param expectedNumStartedBatches the `expectedNumStartedBatches` batches to wait
* @param timeout the maximum time to wait in milliseconds.
*/
def waitUntilBatchesStarted(expectedNumStartedBatches: Int, timeout: Long): Boolean =
waitUntilConditionBecomeTrue(numStartedBatches >= expectedNumStartedBatches, timeout)
private def waitUntilConditionBecomeTrue(condition: => Boolean, timeout: Long): Boolean = {
synchronized {
var now = System.currentTimeMillis()
val timeoutTick = now + timeout
while (!condition && timeoutTick > now) {
wait(timeoutTick - now)
now = System.currentTimeMillis()
}
condition
}
}
}
/**
* This is the base trait for Spark Streaming testsuites. This provides basic functionality
* to run user-defined set of input on user-defined stream operations, and verify the output.
*/
trait TestSuiteBase extends SparkFunSuite with BeforeAndAfter with Logging {
// Name of the framework for Spark context
def framework: String = this.getClass.getSimpleName
// Master for Spark context
def master: String = "local[2]"
// Batch duration
def batchDuration: Duration = Seconds(1)
// Directory where the checkpoint data will be saved
lazy val checkpointDir: String = {
val dir = Utils.createTempDir()
logDebug(s"checkpointDir: $dir")
dir.toString
}
// Number of partitions of the input parallel collections created for testing
def numInputPartitions: Int = 2
// Maximum time to wait before the test times out
def maxWaitTimeMillis: Int = 10000
// Whether to use manual clock or not
def useManualClock: Boolean = true
// Whether to actually wait in real time before changing manual clock
def actuallyWait: Boolean = false
// A SparkConf to use in tests. Can be modified before calling setupStreams to configure things.
val conf = new SparkConf()
.setMaster(master)
.setAppName(framework)
// Timeout for use in ScalaTest `eventually` blocks
val eventuallyTimeout: PatienceConfiguration.Timeout = timeout(Span(10, ScalaTestSeconds))
// Default before function for any streaming test suite. Override this
// if you want to add your stuff to "before" (i.e., don't call before { } )
def beforeFunction() {
if (useManualClock) {
logInfo("Using manual clock")
conf.set("spark.streaming.clock", "org.apache.spark.util.ManualClock")
} else {
logInfo("Using real clock")
conf.set("spark.streaming.clock", "org.apache.spark.util.SystemClock")
}
}
// Default after function for any streaming test suite. Override this
// if you want to add your stuff to "after" (i.e., don't call after { } )
def afterFunction() {
System.clearProperty("spark.streaming.clock")
}
before(beforeFunction)
after(afterFunction)
/**
* Run a block of code with the given StreamingContext and automatically
* stop the context when the block completes or when an exception is thrown.
*/
def withStreamingContext[R](ssc: StreamingContext)(block: StreamingContext => R): R = {
try {
block(ssc)
} finally {
try {
ssc.stop(stopSparkContext = true)
} catch {
case e: Exception =>
logError("Error stopping StreamingContext", e)
}
}
}
/**
* Run a block of code with the given TestServer and automatically
* stop the server when the block completes or when an exception is thrown.
*/
def withTestServer[R](testServer: TestServer)(block: TestServer => R): R = {
try {
block(testServer)
} finally {
try {
testServer.stop()
} catch {
case e: Exception =>
logError("Error stopping TestServer", e)
}
}
}
/**
* Set up required DStreams to test the DStream operation using the two sequences
* of input collections.
*/
def setupStreams[U: ClassTag, V: ClassTag](
input: Seq[Seq[U]],
operation: DStream[U] => DStream[V],
numPartitions: Int = numInputPartitions
): StreamingContext = {
// Create StreamingContext
val ssc = new StreamingContext(conf, batchDuration)
if (checkpointDir != null) {
ssc.checkpoint(checkpointDir)
}
// Setup the stream computation
val inputStream = new TestInputStream(ssc, input, numPartitions)
val operatedStream = operation(inputStream)
val outputStream = new TestOutputStreamWithPartitions(operatedStream,
new ConcurrentLinkedQueue[Seq[Seq[V]]])
outputStream.register()
ssc
}
/**
* Set up required DStreams to test the binary operation using the sequence
* of input collections.
*/
def setupStreams[U: ClassTag, V: ClassTag, W: ClassTag](
input1: Seq[Seq[U]],
input2: Seq[Seq[V]],
operation: (DStream[U], DStream[V]) => DStream[W]
): StreamingContext = {
// Create StreamingContext
val ssc = new StreamingContext(conf, batchDuration)
if (checkpointDir != null) {
ssc.checkpoint(checkpointDir)
}
// Setup the stream computation
val inputStream1 = new TestInputStream(ssc, input1, numInputPartitions)
val inputStream2 = new TestInputStream(ssc, input2, numInputPartitions)
val operatedStream = operation(inputStream1, inputStream2)
val outputStream = new TestOutputStreamWithPartitions(operatedStream,
new ConcurrentLinkedQueue[Seq[Seq[W]]])
outputStream.register()
ssc
}
/**
* Runs the streams set up in `ssc` on manual clock for `numBatches` batches and
* returns the collected output. It will wait until `numExpectedOutput` number of
* output data has been collected or timeout (set by `maxWaitTimeMillis`) is reached.
*
* Returns a sequence of items for each RDD.
*
* @param ssc The StreamingContext
* @param numBatches The number of batches should be run
* @param numExpectedOutput The number of expected output
* @param preStop The function to run before stopping StreamingContext
*/
def runStreams[V: ClassTag](
ssc: StreamingContext,
numBatches: Int,
numExpectedOutput: Int,
preStop: () => Unit = () => {}
): Seq[Seq[V]] = {
// Flatten each RDD into a single Seq
runStreamsWithPartitions(ssc, numBatches, numExpectedOutput, preStop).map(_.flatten.toSeq)
}
/**
* Runs the streams set up in `ssc` on manual clock for `numBatches` batches and
* returns the collected output. It will wait until `numExpectedOutput` number of
* output data has been collected or timeout (set by `maxWaitTimeMillis`) is reached.
*
* Returns a sequence of RDD's. Each RDD is represented as several sequences of items, each
* representing one partition.
*
* @param ssc The StreamingContext
* @param numBatches The number of batches should be run
* @param numExpectedOutput The number of expected output
* @param preStop The function to run before stopping StreamingContext
*/
def runStreamsWithPartitions[V: ClassTag](
ssc: StreamingContext,
numBatches: Int,
numExpectedOutput: Int,
preStop: () => Unit = () => {}
): Seq[Seq[Seq[V]]] = {
assert(numBatches > 0, "Number of batches to run stream computation is zero")
assert(numExpectedOutput > 0, "Number of expected outputs after " + numBatches + " is zero")
logInfo("numBatches = " + numBatches + ", numExpectedOutput = " + numExpectedOutput)
// Get the output buffer
val outputStream = ssc.graph.getOutputStreams.
filter(_.isInstanceOf[TestOutputStreamWithPartitions[_]]).
head.asInstanceOf[TestOutputStreamWithPartitions[V]]
val output = outputStream.output
try {
// Start computation
ssc.start()
// Advance manual clock
val clock = ssc.scheduler.clock.asInstanceOf[ManualClock]
logInfo("Manual clock before advancing = " + clock.getTimeMillis())
if (actuallyWait) {
for (i <- 1 to numBatches) {
logInfo("Actually waiting for " + batchDuration)
clock.advance(batchDuration.milliseconds)
Thread.sleep(batchDuration.milliseconds)
}
} else {
clock.advance(numBatches * batchDuration.milliseconds)
}
logInfo("Manual clock after advancing = " + clock.getTimeMillis())
// Wait until expected number of output items have been generated
val startTime = System.currentTimeMillis()
while (output.size < numExpectedOutput &&
System.currentTimeMillis() - startTime < maxWaitTimeMillis) {
logInfo("output.size = " + output.size + ", numExpectedOutput = " + numExpectedOutput)
ssc.awaitTerminationOrTimeout(50)
}
val timeTaken = System.currentTimeMillis() - startTime
logInfo("Output generated in " + timeTaken + " milliseconds")
output.asScala.foreach(x => logInfo("[" + x.mkString(",") + "]"))
assert(timeTaken < maxWaitTimeMillis, "Operation timed out after " + timeTaken + " ms")
assert(output.size === numExpectedOutput, "Unexpected number of outputs generated")
Thread.sleep(100) // Give some time for the forgetting old RDDs to complete
preStop()
} finally {
ssc.stop(stopSparkContext = true)
}
output.asScala.toSeq
}
/**
* Verify whether the output values after running a DStream operation
* is same as the expected output values, by comparing the output
* collections either as lists (order matters) or sets (order does not matter)
*/
def verifyOutput[V: ClassTag](
output: Seq[Seq[V]],
expectedOutput: Seq[Seq[V]],
useSet: Boolean
) {
logInfo("--------------------------------")
logInfo("output.size = " + output.size)
logInfo("output")
output.foreach(x => logInfo("[" + x.mkString(",") + "]"))
logInfo("expected output.size = " + expectedOutput.size)
logInfo("expected output")
expectedOutput.foreach(x => logInfo("[" + x.mkString(",") + "]"))
logInfo("--------------------------------")
// Match the output with the expected output
for (i <- 0 until output.size) {
if (useSet) {
assert(
output(i).toSet === expectedOutput(i).toSet,
s"Set comparison failed\n" +
s"Expected output (${expectedOutput.size} items):\n${expectedOutput.mkString("\n")}\n" +
s"Generated output (${output.size} items): ${output.mkString("\n")}"
)
} else {
assert(
output(i).toList === expectedOutput(i).toList,
s"Ordered list comparison failed\n" +
s"Expected output (${expectedOutput.size} items):\n${expectedOutput.mkString("\n")}\n" +
s"Generated output (${output.size} items): ${output.mkString("\n")}"
)
}
}
logInfo("Output verified successfully")
}
/**
* Test unary DStream operation with a list of inputs, with number of
* batches to run same as the number of expected output values
*/
def testOperation[U: ClassTag, V: ClassTag](
input: Seq[Seq[U]],
operation: DStream[U] => DStream[V],
expectedOutput: Seq[Seq[V]],
useSet: Boolean = false
) {
testOperation[U, V](input, operation, expectedOutput, -1, useSet)
}
/**
* Test unary DStream operation with a list of inputs
* @param input Sequence of input collections
* @param operation Binary DStream operation to be applied to the 2 inputs
* @param expectedOutput Sequence of expected output collections
* @param numBatches Number of batches to run the operation for
* @param useSet Compare the output values with the expected output values
* as sets (order matters) or as lists (order does not matter)
*/
def testOperation[U: ClassTag, V: ClassTag](
input: Seq[Seq[U]],
operation: DStream[U] => DStream[V],
expectedOutput: Seq[Seq[V]],
numBatches: Int,
useSet: Boolean
) {
val numBatches_ = if (numBatches > 0) numBatches else expectedOutput.size
withStreamingContext(setupStreams[U, V](input, operation)) { ssc =>
val output = runStreams[V](ssc, numBatches_, expectedOutput.size)
verifyOutput[V](output.toSeq, expectedOutput, useSet)
}
}
/**
* Test binary DStream operation with two lists of inputs, with number of
* batches to run same as the number of expected output values
*/
def testOperation[U: ClassTag, V: ClassTag, W: ClassTag](
input1: Seq[Seq[U]],
input2: Seq[Seq[V]],
operation: (DStream[U], DStream[V]) => DStream[W],
expectedOutput: Seq[Seq[W]],
useSet: Boolean
) {
testOperation[U, V, W](input1, input2, operation, expectedOutput, -1, useSet)
}
/**
* Test binary DStream operation with two lists of inputs
* @param input1 First sequence of input collections
* @param input2 Second sequence of input collections
* @param operation Binary DStream operation to be applied to the 2 inputs
* @param expectedOutput Sequence of expected output collections
* @param numBatches Number of batches to run the operation for
* @param useSet Compare the output values with the expected output values
* as sets (order matters) or as lists (order does not matter)
*/
def testOperation[U: ClassTag, V: ClassTag, W: ClassTag](
input1: Seq[Seq[U]],
input2: Seq[Seq[V]],
operation: (DStream[U], DStream[V]) => DStream[W],
expectedOutput: Seq[Seq[W]],
numBatches: Int,
useSet: Boolean
) {
val numBatches_ = if (numBatches > 0) numBatches else expectedOutput.size
withStreamingContext(setupStreams[U, V, W](input1, input2, operation)) { ssc =>
val output = runStreams[W](ssc, numBatches_, expectedOutput.size)
verifyOutput[W](output.toSeq, expectedOutput, useSet)
}
}
/**
* Creates a temporary directory, which is then passed to `f` and will be deleted after `f`
* returns.
* (originally from `SqlTestUtils`.)
* @todo Probably this method should be moved to a more general place
*/
protected def withTempDir(f: File => Unit): Unit = {
val dir = Utils.createTempDir().getCanonicalFile
try f(dir) finally Utils.deleteRecursively(dir)
}
}
| michalsenkyr/spark | streaming/src/test/scala/org/apache/spark/streaming/TestSuiteBase.scala | Scala | apache-2.0 | 21,287 |
package com.amazon.datagen.api.rof
import com.amazon.mqa.datagen.rof.{DefaultObjectFactory => jObjectFactory}
import com.google.common.base.Supplier
import scala.collection.JavaConversions
import scala.reflect._
import scala.reflect.runtime.universe._
/**
* Scala Api for object factory
*/
trait ObjectFactory { self =>
def create[T: TypeTag: ClassTag]: T
def isDefinedAt[T: TypeTag: ClassTag]: Boolean
def orElse(factory: ObjectFactory): ObjectFactory = new ObjectFactory {
override def create[T: TypeTag: ClassTag]: T = {
if (self.isDefinedAt[T]) self.create[T]
else if (factory.isDefinedAt[T]) factory.create[T]
else throw new RuntimeException(s"unsupported type: ${typeOf[T]}")
}
override def isDefinedAt[T: TypeTag: ClassTag]: Boolean = self.isDefinedAt[T] || factory.isDefinedAt[T]
}
}
trait ReflectionObjectFactory extends ObjectFactory {
def apply(): ObjectFactory = apply(Config())
def apply(config: Config): ObjectFactory =
orElse(new BasicObjectFactory(config.suppliers))
.orElse(new JavaObjectFactory(
new jObjectFactory(JavaConversions.mapAsJavaMap(config.suppliers),
new Supplier[Integer] {
override def get(): Integer = config.arraySizeSupplier.get()
})
))
def fromSupplier[S: TypeTag](supplier: Supplier[S]): ObjectFactory = new ObjectFactory {
override def create[T: TypeTag: ClassTag]: T = supplier.get().asInstanceOf[T]
override def isDefinedAt[T: TypeTag: ClassTag]: Boolean = typeOf[T] =:= typeOf[S]
}
// def fromSupplier[S: TypeTag](supplier: Supplier[_]): ObjectFactory = new ObjectFactory {
// override def create[T: TypeTag: ClassTag]: T = supplier.get().asInstanceOf[T]
// override def isDefinedAt[T: TypeTag: ClassTag]: Boolean = typeOf[T] =:= typeOf[S]
// }
def fromSupplier[S: TypeTag](f: => S): ObjectFactory = new ObjectFactory {
override def create[T: TypeTag: ClassTag]: T = f.asInstanceOf[T]
override def isDefinedAt[T: TypeTag: ClassTag]: Boolean = typeOf[T] =:= typeOf[S]
}
override def isDefinedAt[T: TypeTag: ClassTag]: Boolean = false
override def create[T: TypeTag: ClassTag]: T = throw new RuntimeException("unsupported")
}
object ReflectionObjectFactory extends ReflectionObjectFactory {
def apply(factories: ObjectFactory *): ObjectFactory = {
factories reduce { (f1, f2) =>
f1.orElse(f2)
}
}
}
class BasicObjectFactory(suppliers: Map[Class[_], Supplier[_]]) extends ObjectFactory {
override def create[T: TypeTag: ClassTag]: T = {
suppliers.get(classTag[T].runtimeClass) match {
case Some(supplier) => supplier.get().asInstanceOf[T]
case None => throw new RuntimeException(s"unsupported basic class: ${classTag[T].runtimeClass}")
}
}
override def isDefinedAt[T: TypeTag: ClassTag]: Boolean = suppliers.contains(classTag[T].runtimeClass)
}
class TypedObjectFactory(objectFactory: ObjectFactory) extends ObjectFactory {
override def create[T: TypeTag: ClassTag]: T = {
// val TypeRef(_, _, args) = typeOf[T]
//
// if (args.length == 1) {
// (1 to 10) map { _=>
// objectFactory.create[args.type]
// }
//
// } else {
// throw new RuntimeException
// }
???
}
override def isDefinedAt[T: TypeTag : ClassTag]: Boolean = ??? //typeOf[T] <:< typeOf[List]
}
class JavaObjectFactory(factory: jObjectFactory) extends ObjectFactory {
override def create[T: TypeTag: ClassTag]: T = factory.create(classTag[T].runtimeClass).asInstanceOf[T]
override def isDefinedAt[T: TypeTag: ClassTag]: Boolean = {
val TypeRef(_, symbol, _) = typeOf[T]
symbol.isJava
}
} | leakingtapan/rof-scala | src/main/scala/com/amazon/datagen/api/rof/ObjectFactory.scala | Scala | apache-2.0 | 3,663 |
package com.partup
import akka.actor.{Actor, ActorLogging, ActorRef, ActorRefFactory, Props}
import com.partup.RawEventJsonProtocol._
import iot.jcypher.database.IDBAccess
import iot.jcypher.query.api.IClause
import iot.jcypher.query.factories.clause._
import iot.jcypher.query.values.{JcNode, JcNumber, JcRelation}
import iot.jcypher.query.{JcQuery, JcQueryResult}
import spray.http.{MediaTypes, StatusCodes}
import spray.routing._
import spray.routing.directives.LoggingMagnet
import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext
// we don't implement our route structure directly in the service actor because
// we want to be able to test it independently, without having to spin up an actor
class EventsApiActor(eventReceivers: Seq[ActorRef], var conn: Option[IDBAccess])
extends Actor with EventsApiService with ActorLogging {
// this actor only runs our route, but you could add
// other things here, like request stream processing
// or timeout handling
override def receive: Receive = runRoute(logRequest(LoggingMagnet {
request =>
log.info(request.uri.path.toString())
}) {
myRoute
})
def persistEvent(e: RawEvent): Unit = {
eventReceivers.foreach(_ ! e)
}
def actorRefFactory: ActorRefFactory = context
}
object EventsApiActor {
def props(eventReceivers: Seq[ActorRef], conn: Option[IDBAccess]): Props =
Props(new EventsApiActor(eventReceivers, conn))
}
// this trait defines our service behavior independently from the service actor
trait EventsApiService extends HttpService {
import StatusReport._
import spray.json._
implicit val ec: ExecutionContext = actorRefFactory.dispatcher
val myRoute: Route =
pathEndOrSingleSlash {
get {
complete(
"""
|Welcome to Part-up REST API.
|Endpoint to posts events: /events""".stripMargin)
}
} ~
path("events") {
get {
//TODO provide a better URL
complete("Post events as described in http://doc.part-up.com")
} ~
post {
entity(as[RawEvent]) { e =>
persistEvent(e)
complete("OK")
}
}
} ~
pathPrefix("partups") {
path("recommended" / "for" / "user" / Segment) { userId =>
get {
getRecommendedPartups(userId) match {
case Some(ids) => respondWithMediaType(MediaTypes.`application/json`) {
complete(JsObject(Map("partUpIds" -> ids.toJson)).prettyPrint)
}
case None =>
respondWithStatus(StatusCodes.NotFound) {
complete(s"User '$userId' not found.")
}
}
}
}
}
def conn: Option[IDBAccess]
def persistEvent(e: RawEvent): Unit
def getRecommendedPartups(userId: String): Option[List[String]] = {
if (!userExists(userId).get) {
return None
}
val user = new JcNode("u")
val team = new JcNode("t")
val recommend = new JcRelation("r")
val result = sendToNeo4j(Array(
MATCH.node(user).label("User").property("_id").value(userId)
.relation(recommend).out.`type`("RECOMMEND")
.node(team).label("Team"),
WITH.value(team),
RETURN.value(team.property("_id"))
)).get
Some(
if (result.hasErrors) List()
else result.resultOf(team.property("_id")).asScala
.filter(_ != null)
.map(_.toString)
.toList
)
}
def userExists(userId: String): Option[Boolean] = {
val res = sendToNeo4j(Array(
NATIVE.cypher(s"MATCH (n:User) WHERE n._id = '$userId' RETURN count(*) LIMIT 1")
))
res.map({ result =>
if (result.hasErrors) {
println("Failed to query Neo4J..")
if (result.getGeneralErrors.asScala.nonEmpty) {
println("General errors:")
result.getGeneralErrors.asScala.foreach(x => println(x.toString))
}
if (result.getDBErrors.asScala.nonEmpty) {
println("Database errors:")
result.getDBErrors.asScala.foreach(x => println(x.toString))
}
false
} else result.resultOf(new JcNumber("count(*)")).asScala.map(_.intValue()).head >= 1
})
}
def sendToNeo4j(clauses: Array[IClause]): Option[JcQueryResult] = conn.map({ c =>
val query = new JcQuery()
query.setClauses(clauses)
query.setExtractParams(false)
c.execute(query)
})
}
class UserNotFoundException(message: String = null, cause: Throwable = null) extends RuntimeException(message, cause) | part-up/api | src/main/scala/com/partup/EventsApiService.scala | Scala | agpl-3.0 | 4,583 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.keras.layers
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.Shape
import com.intel.analytics.bigdl.dllib.keras.Sequential
import com.intel.analytics.bigdl.dllib.keras.serializer.ModuleSerializationTest
class EmbeddingSpec extends KerasBaseSpec {
// Compared results with Keras on Python side
"Embedding with weights" should "work properly" in {
val weights = Tensor[Float](10, 32).rand()
val seq = Sequential[Float]()
val layer = Embedding[Float](10, 32, weights = weights, inputLength = 4)
seq.add(layer)
require(seq.getWeightsBias().sameElements(Array(weights)))
seq.getOutputShape().toSingle().toArray should be (Array(-1, 4, 32))
val input = Tensor[Float](2, 4)
input(Array(1, 1)) = 1
input(Array(1, 2)) = 2
input(Array(1, 3)) = 4
input(Array(1, 4)) = 5
input(Array(2, 1)) = 4
input(Array(2, 2)) = 3
input(Array(2, 3)) = 2
input(Array(2, 4)) = 6
val output = seq.forward(input).toTensor[Float]
for (i <- 0 to 1) {
val nonBatchOutput = output.split(1)(i)
for (j <- 0 to 3) {
val actual = nonBatchOutput.split(1)(j)
val expected = weights.select(1, input.valueAt(i + 1, j + 1).toInt + 1)
require(actual == expected)
}
}
val gradInput = seq.backward(input, output)
}
}
class EmbeddingSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val layer = Embedding[Float](1000, 32, inputLength = 4)
layer.build(Shape(2, 4))
val input = Tensor[Float](2, 4)
input(Array(1, 1)) = 1
input(Array(1, 2)) = 2
input(Array(1, 3)) = 4
input(Array(1, 4)) = 5
input(Array(2, 1)) = 4
input(Array(2, 2)) = 3
input(Array(2, 3)) = 2
input(Array(2, 4)) = 6
runSerializationTest(layer, input)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/keras/layers/EmbeddingSpec.scala | Scala | apache-2.0 | 2,471 |
package org.juanitodread.conwaygameoflife.model
import org.juanitodread.conwaygameoflife.UnitSpec
import org.juanitodread.conwaygameoflife.model.cell.{
Cell,
State
}
class BoardSpec extends UnitSpec with BoardSpecConstants {
"A Board" should "be created with a default size of 30" in {
val board = Board()
assert(board.size === 30)
}
"A Board" should "be created with a size of 100" in {
val board = Board(100)
assert(board.size === 100)
}
"A Board" should "throw IllegalArgumentException if the provided size is less than 30" in {
assertThrows[IllegalArgumentException] {
Board(29)
}
}
"A Board" should "throw IllegalArgumentException if the provided size is greather than 100" in {
assertThrows[IllegalArgumentException] {
Board(101)
}
}
"A Board" should "return a Dead Cell according the position [0,0]" in {
val board = Board()
val cell = board.cellAt(0, 0)
assert(cell.isInstanceOf[Cell])
}
"A Board" should "be initialized with Dead Cells" in {
val size = 50
val board = Board(size)
for (
row <- 0 until size;
col <- 0 until size
) {
assert(board.cellAt(row, col).isDead())
}
}
"A Board" should "create a new Dead Cell at the position [0,0]" in {
val board = Board()
val oldCell = board.cellAt(0, 0)
board.deadCell(0, 0)
val newCell = board.cellAt(0, 0)
assert(oldCell !== newCell)
assert(newCell.isDead())
}
"A Board" should "create a new Alive Cell at the position [0,0]" in {
val board = Board()
val oldCell = board.cellAt(0, 0)
board.aliveCell(0, 0)
val newCell = board.cellAt(0, 0)
assert(oldCell !== newCell)
assert(newCell.isAlive())
}
"A Default Board" should "return a String grid of size 30 with o's" in {
val board = Board()
assert(board.toString === DefaultBoardToString)
}
"A Board" should "calculate a valid position for edges in the board" in {
val board = Board()
val minEdgeCase = -1
val maxEdgeCase = board.size
assert(Board.calculateCircularPosition(minEdgeCase, board.size) === board.size - 1)
assert(Board.calculateCircularPosition(maxEdgeCase, board.size) === 0)
for (i <- 0 until board.size) {
assert(Board.calculateCircularPosition(i, board.size) === i)
}
}
"A Board" should "calculate how many alive neighbors a cell has" in {
val board = Board()
var neighbors = board.countAliveNeighborsForCell(0, 0)
assert(neighbors === 0)
board.aliveCell(29, 29)
neighbors = board.countAliveNeighborsForCell(0, 0)
assert(neighbors === 1)
board.aliveCell(29, 0)
neighbors = board.countAliveNeighborsForCell(0, 0)
assert(neighbors === 2)
board.aliveCell(29, 1)
neighbors = board.countAliveNeighborsForCell(0, 0)
assert(neighbors === 3)
board.aliveCell(0, 29)
neighbors = board.countAliveNeighborsForCell(0, 0)
assert(neighbors === 4)
board.aliveCell(0, 1)
neighbors = board.countAliveNeighborsForCell(0, 0)
assert(neighbors === 5)
board.aliveCell(1, 29)
neighbors = board.countAliveNeighborsForCell(0, 0)
assert(neighbors === 6)
board.aliveCell(1, 0)
neighbors = board.countAliveNeighborsForCell(0, 0)
assert(neighbors === 7)
board.aliveCell(1, 1)
neighbors = board.countAliveNeighborsForCell(0, 0)
assert(neighbors === 8)
board.aliveCell(2, 2)
neighbors = board.countAliveNeighborsForCell(0, 0)
assert(neighbors === 8)
}
"A Board" should "calculate how many dead neighbors a cell has" in {
val board = Board()
var neighbors = board.countDeadNeighborsForCell(0, 0)
assert(neighbors === 8)
board.aliveCell(29, 29)
neighbors = board.countDeadNeighborsForCell(0, 0)
assert(neighbors === 7)
board.aliveCell(29, 0)
neighbors = board.countDeadNeighborsForCell(0, 0)
assert(neighbors === 6)
board.aliveCell(29, 1)
neighbors = board.countDeadNeighborsForCell(0, 0)
assert(neighbors === 5)
board.aliveCell(0, 29)
neighbors = board.countDeadNeighborsForCell(0, 0)
assert(neighbors === 4)
board.aliveCell(0, 1)
neighbors = board.countDeadNeighborsForCell(0, 0)
assert(neighbors === 3)
board.aliveCell(1, 29)
neighbors = board.countDeadNeighborsForCell(0, 0)
assert(neighbors === 2)
board.aliveCell(1, 0)
neighbors = board.countDeadNeighborsForCell(0, 0)
assert(neighbors === 1)
board.aliveCell(1, 1)
neighbors = board.countDeadNeighborsForCell(0, 0)
assert(neighbors === 0)
board.aliveCell(2, 2)
neighbors = board.countDeadNeighborsForCell(0, 0)
assert(neighbors === 0)
}
"A Board" should "calculate the new state of a Cell" in {
val board = Board()
board.aliveCell(0, 0)
assert(board.calculateCellState(0, 0) === State.Dead)
board.aliveCell(0, 1)
board.aliveCell(1, 0)
assert(board.calculateCellState(0, 0) === State.Alive)
board.aliveCell(1, 1)
assert(board.calculateCellState(0, 0) === State.Alive)
board.aliveCell(29, 29)
assert(board.calculateCellState(0, 0) === State.Dead)
}
} | juanitodread/conway-game-of-life | src/test/scala/org/juanitodread/conwaygameoflife/model/BoardSpec.scala | Scala | apache-2.0 | 5,150 |
// Copyright (c) 2016 PSForever.net to present
package net.psforever.packet.game
import net.psforever.newcodecs.newcodecs
import net.psforever.packet.{GamePacketOpcode, Marshallable, PlanetSideGamePacket}
import scodec.Codec
import scodec.codecs._
import shapeless.{::, HNil}
/**
* The position of a waypoint in the game world.
* Only two coordinates are required as the beam travels from a specific height to ground level.
* @param x the x-coordinate of the waypoint
* @param y the y-coordinate of the waypoint
*/
final case class Waypoint(x : Float,
y : Float)
/**
* Dispatched by the server to tell the client to display an orbital strike waypoint somewhere in the game world.<br>
* <br>
* Waypoints are kept unique by the `guid` that is passed with them.
* To clear a waypoint is to pass the another packet to the client with the same GUID but with no coordinates.
* Passing new coordinates with that GUID will update the position of the indicated waypoint.
* If the GUID sent with the packet belongs to the client's avatar that player will be given text overlay instructions:<br>
* "Press the fire key or button to launch an orbital strike at the waypoint."<br>
* The text will fade shortly after the waypoint has been cleared.<br>
* <br>
* All `OrbitalStrikeWaypointMessage` packets sent to a client will create a waypoint that will be seen by that client.
* All rendered waypoints, regardless of the users who summoned them, will be seen in the faction color of the client's avatar.
* (Black OPs orbital strike waypoints are green, as expected.)
* The server should not notify the wrong clients about another faction's prepared orbital strikes;
* however, even if it did, those beams would be seen as a same-faction's marker.
* @param guid coordinates used to identify the waypoint;
* ostensibly, the GUID of the player who placed the waypoint
* @param coords the coordinates of the waypoint;
* `None` if clearing a waypoint (use the same `guid` as to create it)
*/
final case class OrbitalStrikeWaypointMessage(guid : PlanetSideGUID,
coords : Option[Waypoint] = None)
extends PlanetSideGamePacket {
type Packet = OrbitalStrikeWaypointMessage
def opcode = GamePacketOpcode.OrbitalStrikeWaypointMessage
def encode = OrbitalStrikeWaypointMessage.encode(this)
}
object OrbitalStrikeWaypointMessage extends Marshallable[OrbitalStrikeWaypointMessage] {
/**
* An abbreviated constructor for creating `OrbitalStrikeWaypointMessage`, assuming mandatory coordinates.
* @param guid na
* @param x the x-coordinate of the waypoint
* @param y the y-coordinate of the waypoint
* @return an `OrbitalStrikeWaypointMessage` object
*/
def apply(guid : PlanetSideGUID, x : Float, y : Float) : OrbitalStrikeWaypointMessage =
new OrbitalStrikeWaypointMessage(guid, Option(Waypoint(x, y)))
/**
* A `Codec` for recording the two coordinates of the waypoint map position, if they are present.
*/
private val coords_value : Codec[Waypoint] = (
("x" | newcodecs.q_float(0.0, 8192.0, 20)) ::
("y" | newcodecs.q_float(0.0, 8192.0, 20))
).xmap[Waypoint] (
{
case x :: y :: HNil =>
Waypoint(x, y)
},
{
case Waypoint(x, y) =>
x :: y :: HNil
}
)
implicit val codec : Codec[OrbitalStrikeWaypointMessage] = (
("guid" | PlanetSideGUID.codec) ::
optional(bool, coords_value)
).xmap[OrbitalStrikeWaypointMessage] (
{
case u :: coords :: HNil =>
OrbitalStrikeWaypointMessage(u, coords)
},
{
case OrbitalStrikeWaypointMessage(u, coords) =>
u :: coords :: HNil
}
)
}
| Fate-JH/PSF-Server | common/src/main/scala/net/psforever/packet/game/OrbitalStrikeWaypointMessage.scala | Scala | gpl-3.0 | 3,760 |
package lila.common
import org.specs2.mutable.Specification
class MultiKeyMapTest extends Specification {
case class V(a: Int, b: Int)
"MultiKeyMap.removed" should {
val m = MultiKeyMap(Set(V(1, 100)))(_.a, _.b)
"have entries" in {
m.values.toSet == Set(V(1, 100))
}
"add a new entry" in {
m.updated(V(2, 200)).values.toSet must_== Set(V(1, 100), V(2, 200))
}
"replace an entry" in {
m.updated(V(1, 200)).values.toSet must_== Set(V(1, 200))
}
"remove empty entries" in {
m.removed(Set.empty[V]).values.toSet must_== m.values.toSet
}
"remove entries" in {
m.removed(Set(V(1, 100))).values.toSet must_== Set.empty
}
"expose keys" in {
m.key1s.toSet must_== Set(1)
}
}
}
| luanlv/lila | modules/common/src/test/MultiKeyMapTest.scala | Scala | mit | 766 |
/*
* Copyright 2015 - 2016 Red Bull Media House GmbH <http://www.redbullmediahouse.com> - all rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rbmhtechnology.eventuate
import com.typesafe.config._
object MultiNodeConfigCassandra {
val providerConfig: Config = ConfigFactory.parseString(
s"""
|eventuate.log.cassandra.default-port = 9142
|eventuate.log.cassandra.index-update-limit = 3
|eventuate.log.cassandra.table-prefix = mnt
""".stripMargin)
}
| RBMHTechnology/eventuate | eventuate-log-cassandra/src/multi-jvm/scala/com/rbmhtechnology/eventuate/MultiNodeConfigCassandra.scala | Scala | apache-2.0 | 1,026 |
package nasa.nccs.streaming
//
//import nasa.nccs.cdapi.tensors.CDFloatArray
//import nasa.nccs.utilities.Loggable
//import org.apache.spark.SparkConf
//import org.apache.spark.storage.StorageLevel
//import org.apache.spark.streaming.receiver.Receiver
//import ucar.nc2.Variable
//import ucar.nc2.dataset.NetcdfDataset
//import ucar.ma2
//import org.apache.spark.streaming._
//import org.apache.spark.streaming.dstream.ReceiverInputDStream
//class SectionFeeder( section: ma2.Section, nRecords: Int, recordSize: Int = 1, storageLevel: StorageLevel = StorageLevel.MEMORY_ONLY )
// extends Receiver[String](storageLevel) {
// def onStart() {
// new Thread("Feeder Thread") {
// override def run() { feedSections() }
// }.start()
// }
//
// def onStop() { }
//
// private def feedSections() = {
// var startIndex = section.getOrigin(0)
// val endIndex = startIndex + section.getShape(0)
// while( ( startIndex < endIndex ) ) {
// val sections = for( iRecord <- (0 until nRecords); recStart = startIndex + iRecord * recordSize; if recStart < endIndex ) yield {
// val recLast = Math.min( recStart + recordSize - 1, endIndex -1 )
// new ma2.Section(section).replaceRange( 0, new ma2.Range( recStart, recLast ) ).toString
// }
// store( sections.toIterator )
// startIndex = startIndex + nRecords * recordSize
// }
// }
//}
//
//class SectionReader( val ncmlFile: String, val varName: String ) extends Serializable with Loggable {
// def read( sectionSpec: String ): CDFloatArray = {
// try {
// val datset = NetcdfDataset.openDataset( ncmlFile, true, -1, null, null)
// Option(datset.findVariable(varName)) match {
// case None => throw new IllegalStateException("Variable '%s' was not loaded".format(varName))
// case Some(ncVar) => CDFloatArray.factory( ncVar.read( new ma2.Section(sectionSpec) ), Float.NaN )
// }
// } catch {
// case e: java.io.IOException =>
// logger.error("Couldn't open dataset %s".format(ncmlFile))
// throw e
// case ex: Exception =>
// logger.error("Something went wrong while reading %s".format(ncmlFile))
// throw ex
// }
// }
//}
//class streamingTest extends Loggable {
//
// def main(args: Array[String]): Unit = {
// val ncmlFile = "/att/gpfsfs/ffs2004/ppl/tpmaxwel/cdas/cache/collections/NCML/ncml.xml"
// val varName = "T"
// val nRecords = 8
// val recordSize = 1
// val conf = new SparkConf().setMaster(s"local[$nRecords]").setAppName("StreamingTest")
// val ssc = new StreamingContext( conf, Milliseconds(1000) )
// val section = new ma2.Section( Array(0,10,0,0), Array(53668,1,361,576) )
// val sectionsStream: ReceiverInputDStream[String] = ssc.receiverStream(new SectionFeeder( section, nRecords, recordSize ) )
// val sectionReader = new SectionReader( ncmlFile, varName )
// val inputStream = sectionsStream.map( sectionSpec => sectionReader.read(sectionSpec) )
// val maxStream = inputStream.map( data => data.max() )
// maxStream.print(nRecords)
// }
//}
//
//
| nasa-nccs-cds/CDAS2 | src/main/scala/nasa/nccs/streaming/receiver.scala | Scala | gpl-2.0 | 3,202 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.