code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.parser
import java.sql.{Date, Timestamp}
import org.apache.spark.sql.catalyst.FunctionIdentifier
import org.apache.spark.sql.catalyst.analysis.{UnresolvedAttribute, _}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.CalendarInterval
/**
* Test basic expression parsing. If a type of expression is supported it should be tested here.
*
* Please note that some of the expressions test don't have to be sound expressions, only their
* structure needs to be valid. Unsound expressions should be caught by the Analyzer or
* CheckAnalysis classes.
*/
class ExpressionParserSuite extends PlanTest {
import CatalystSqlParser._
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
def assertEqual(sqlCommand: String, e: Expression): Unit = {
compareExpressions(parseExpression(sqlCommand), e)
}
def intercept(sqlCommand: String, messages: String*): Unit = {
val e = intercept[ParseException](parseExpression(sqlCommand))
messages.foreach { message =>
assert(e.message.contains(message))
}
}
test("star expressions") {
// Global Star
assertEqual("*", UnresolvedStar(None))
// Targeted Star
assertEqual("a.b.*", UnresolvedStar(Option(Seq("a", "b"))))
}
// NamedExpression (Alias/Multialias)
test("named expressions") {
// No Alias
val r0 = 'a
assertEqual("a", r0)
// Single Alias.
val r1 = 'a as "b"
assertEqual("a as b", r1)
assertEqual("a b", r1)
// Multi-Alias
assertEqual("a as (b, c)", MultiAlias('a, Seq("b", "c")))
assertEqual("a() (b, c)", MultiAlias('a.function(), Seq("b", "c")))
// Numeric literals without a space between the literal qualifier and the alias, should not be
// interpreted as such. An unresolved reference should be returned instead.
// TODO add the JIRA-ticket number.
assertEqual("1SL", Symbol("1SL"))
// Aliased star is allowed.
assertEqual("a.* b", UnresolvedStar(Option(Seq("a"))) as 'b)
}
test("binary logical expressions") {
// And
assertEqual("a and b", 'a && 'b)
// Or
assertEqual("a or b", 'a || 'b)
// Combination And/Or check precedence
assertEqual("a and b or c and d", ('a && 'b) || ('c && 'd))
assertEqual("a or b or c and d", 'a || 'b || ('c && 'd))
// Multiple AND/OR get converted into a balanced tree
assertEqual("a or b or c or d or e or f", (('a || 'b) || 'c) || (('d || 'e) || 'f))
assertEqual("a and b and c and d and e and f", (('a && 'b) && 'c) && (('d && 'e) && 'f))
}
test("long binary logical expressions") {
def testVeryBinaryExpression(op: String, clazz: Class[_]): Unit = {
val sql = (1 to 1000).map(x => s"$x == $x").mkString(op)
val e = parseExpression(sql)
assert(e.collect { case _: EqualTo => true }.size === 1000)
assert(e.collect { case x if clazz.isInstance(x) => true }.size === 999)
}
testVeryBinaryExpression(" AND ", classOf[And])
testVeryBinaryExpression(" OR ", classOf[Or])
}
test("not expressions") {
assertEqual("not a", !'a)
assertEqual("!a", !'a)
assertEqual("not true > true", Not(GreaterThan(true, true)))
}
test("exists expression") {
assertEqual(
"exists (select 1 from b where b.x = a.x)",
Exists(table("b").where(Symbol("b.x") === Symbol("a.x")).select(1)))
}
test("comparison expressions") {
assertEqual("a = b", 'a === 'b)
assertEqual("a == b", 'a === 'b)
assertEqual("a <=> b", 'a <=> 'b)
assertEqual("a <> b", 'a =!= 'b)
assertEqual("a != b", 'a =!= 'b)
assertEqual("a < b", 'a < 'b)
assertEqual("a <= b", 'a <= 'b)
assertEqual("a !> b", 'a <= 'b)
assertEqual("a > b", 'a > 'b)
assertEqual("a >= b", 'a >= 'b)
assertEqual("a !< b", 'a >= 'b)
}
test("between expressions") {
assertEqual("a between b and c", 'a >= 'b && 'a <= 'c)
assertEqual("a not between b and c", !('a >= 'b && 'a <= 'c))
}
test("in expressions") {
assertEqual("a in (b, c, d)", 'a in ('b, 'c, 'd))
assertEqual("a not in (b, c, d)", !('a in ('b, 'c, 'd)))
}
test("in sub-query") {
assertEqual(
"a in (select b from c)",
In('a, Seq(ListQuery(table("c").select('b)))))
}
test("like expressions") {
assertEqual("a like 'pattern%'", 'a like "pattern%")
assertEqual("a not like 'pattern%'", !('a like "pattern%"))
assertEqual("a rlike 'pattern%'", 'a rlike "pattern%")
assertEqual("a not rlike 'pattern%'", !('a rlike "pattern%"))
assertEqual("a regexp 'pattern%'", 'a rlike "pattern%")
assertEqual("a not regexp 'pattern%'", !('a rlike "pattern%"))
}
test("is null expressions") {
assertEqual("a is null", 'a.isNull)
assertEqual("a is not null", 'a.isNotNull)
assertEqual("a = b is null", ('a === 'b).isNull)
assertEqual("a = b is not null", ('a === 'b).isNotNull)
}
test("binary arithmetic expressions") {
// Simple operations
assertEqual("a * b", 'a * 'b)
assertEqual("a / b", 'a / 'b)
assertEqual("a DIV b", ('a / 'b).cast(LongType))
assertEqual("a % b", 'a % 'b)
assertEqual("a + b", 'a + 'b)
assertEqual("a - b", 'a - 'b)
assertEqual("a & b", 'a & 'b)
assertEqual("a ^ b", 'a ^ 'b)
assertEqual("a | b", 'a | 'b)
// Check precedences
assertEqual(
"a * t | b ^ c & d - e + f % g DIV h / i * k",
'a * 't | ('b ^ ('c & ('d - 'e + (('f % 'g / 'h).cast(LongType) / 'i * 'k)))))
}
test("unary arithmetic expressions") {
assertEqual("+a", 'a)
assertEqual("-a", -'a)
assertEqual("~a", ~'a)
assertEqual("-+~~a", -(~(~'a)))
}
test("cast expressions") {
// Note that DataType parsing is tested elsewhere.
assertEqual("cast(a as int)", 'a.cast(IntegerType))
assertEqual("cast(a as timestamp)", 'a.cast(TimestampType))
assertEqual("cast(a as array<int>)", 'a.cast(ArrayType(IntegerType)))
assertEqual("cast(cast(a as int) as long)", 'a.cast(IntegerType).cast(LongType))
}
test("function expressions") {
assertEqual("foo()", 'foo.function())
assertEqual("foo.bar()",
UnresolvedFunction(FunctionIdentifier("bar", Some("foo")), Seq.empty, isDistinct = false))
assertEqual("foo(*)", 'foo.function(star()))
assertEqual("count(*)", 'count.function(1))
assertEqual("foo(a, b)", 'foo.function('a, 'b))
assertEqual("foo(all a, b)", 'foo.function('a, 'b))
assertEqual("foo(distinct a, b)", 'foo.distinctFunction('a, 'b))
assertEqual("grouping(distinct a, b)", 'grouping.distinctFunction('a, 'b))
assertEqual("`select`(all a, b)", 'select.function('a, 'b))
}
test("window function expressions") {
val func = 'foo.function(star())
def windowed(
partitioning: Seq[Expression] = Seq.empty,
ordering: Seq[SortOrder] = Seq.empty,
frame: WindowFrame = UnspecifiedFrame): Expression = {
WindowExpression(func, WindowSpecDefinition(partitioning, ordering, frame))
}
// Basic window testing.
assertEqual("foo(*) over w1", UnresolvedWindowExpression(func, WindowSpecReference("w1")))
assertEqual("foo(*) over ()", windowed())
assertEqual("foo(*) over (partition by a, b)", windowed(Seq('a, 'b)))
assertEqual("foo(*) over (distribute by a, b)", windowed(Seq('a, 'b)))
assertEqual("foo(*) over (cluster by a, b)", windowed(Seq('a, 'b)))
assertEqual("foo(*) over (order by a desc, b asc)", windowed(Seq.empty, Seq('a.desc, 'b.asc )))
assertEqual("foo(*) over (sort by a desc, b asc)", windowed(Seq.empty, Seq('a.desc, 'b.asc )))
assertEqual("foo(*) over (partition by a, b order by c)", windowed(Seq('a, 'b), Seq('c.asc)))
assertEqual("foo(*) over (distribute by a, b sort by c)", windowed(Seq('a, 'b), Seq('c.asc)))
// Test use of expressions in window functions.
assertEqual(
"sum(product + 1) over (partition by ((product) + (1)) order by 2)",
WindowExpression('sum.function('product + 1),
WindowSpecDefinition(Seq('product + 1), Seq(Literal(2).asc), UnspecifiedFrame)))
assertEqual(
"sum(product + 1) over (partition by ((product / 2) + 1) order by 2)",
WindowExpression('sum.function('product + 1),
WindowSpecDefinition(Seq('product / 2 + 1), Seq(Literal(2).asc), UnspecifiedFrame)))
// Range/Row
val frameTypes = Seq(("rows", RowFrame), ("range", RangeFrame))
val boundaries = Seq(
("10 preceding", ValuePreceding(10), CurrentRow),
("3 + 1 following", ValueFollowing(4), CurrentRow), // Will fail during analysis
("unbounded preceding", UnboundedPreceding, CurrentRow),
("unbounded following", UnboundedFollowing, CurrentRow), // Will fail during analysis
("between unbounded preceding and current row", UnboundedPreceding, CurrentRow),
("between unbounded preceding and unbounded following",
UnboundedPreceding, UnboundedFollowing),
("between 10 preceding and current row", ValuePreceding(10), CurrentRow),
("between current row and 5 following", CurrentRow, ValueFollowing(5)),
("between 10 preceding and 5 following", ValuePreceding(10), ValueFollowing(5))
)
frameTypes.foreach {
case (frameTypeSql, frameType) =>
boundaries.foreach {
case (boundarySql, begin, end) =>
val query = s"foo(*) over (partition by a order by b $frameTypeSql $boundarySql)"
val expr = windowed(Seq('a), Seq('b.asc), SpecifiedWindowFrame(frameType, begin, end))
assertEqual(query, expr)
}
}
// We cannot use non integer constants.
intercept("foo(*) over (partition by a order by b rows 10.0 preceding)",
"Frame bound value must be a constant integer.")
// We cannot use an arbitrary expression.
intercept("foo(*) over (partition by a order by b rows exp(b) preceding)",
"Frame bound value must be a constant integer.")
}
test("row constructor") {
// Note that '(a)' will be interpreted as a nested expression.
assertEqual("(a, b)", CreateStruct(Seq('a, 'b)))
assertEqual("(a, b, c)", CreateStruct(Seq('a, 'b, 'c)))
}
test("scalar sub-query") {
assertEqual(
"(select max(val) from tbl) > current",
ScalarSubquery(table("tbl").select('max.function('val))) > 'current)
assertEqual(
"a = (select b from s)",
'a === ScalarSubquery(table("s").select('b)))
}
test("case when") {
assertEqual("case a when 1 then b when 2 then c else d end",
CaseKeyWhen('a, Seq(1, 'b, 2, 'c, 'd)))
assertEqual("case (a or b) when true then c when false then d else e end",
CaseKeyWhen('a || 'b, Seq(true, 'c, false, 'd, 'e)))
assertEqual("case 'a'='a' when true then 1 end",
CaseKeyWhen("a" === "a", Seq(true, 1)))
assertEqual("case when a = 1 then b when a = 2 then c else d end",
CaseWhen(Seq(('a === 1, 'b.expr), ('a === 2, 'c.expr)), 'd))
assertEqual("case when (1) + case when a > b then c else d end then f else g end",
CaseWhen(Seq((Literal(1) + CaseWhen(Seq(('a > 'b, 'c.expr)), 'd.expr), 'f.expr)), 'g))
}
test("dereference") {
assertEqual("a.b", UnresolvedAttribute("a.b"))
assertEqual("`select`.b", UnresolvedAttribute("select.b"))
assertEqual("(a + b).b", ('a + 'b).getField("b")) // This will fail analysis.
assertEqual("struct(a, b).b", 'struct.function('a, 'b).getField("b"))
}
test("reference") {
// Regular
assertEqual("a", 'a)
// Starting with a digit.
assertEqual("1a", Symbol("1a"))
// Quoted using a keyword.
assertEqual("`select`", 'select)
// Unquoted using an unreserved keyword.
assertEqual("columns", 'columns)
}
test("subscript") {
assertEqual("a[b]", 'a.getItem('b))
assertEqual("a[1 + 1]", 'a.getItem(Literal(1) + 1))
assertEqual("`c`.a[b]", UnresolvedAttribute("c.a").getItem('b))
}
test("parenthesis") {
assertEqual("(a)", 'a)
assertEqual("r * (a + b)", 'r * ('a + 'b))
}
test("type constructors") {
// Dates.
assertEqual("dAte '2016-03-11'", Literal(Date.valueOf("2016-03-11")))
intercept("DAtE 'mar 11 2016'")
// Timestamps.
assertEqual("tImEstAmp '2016-03-11 20:54:00.000'",
Literal(Timestamp.valueOf("2016-03-11 20:54:00.000")))
intercept("timestamP '2016-33-11 20:54:00.000'")
// Binary.
assertEqual("X'A'", Literal(Array(0x0a).map(_.toByte)))
assertEqual("x'A10C'", Literal(Array(0xa1, 0x0c).map(_.toByte)))
intercept("x'A1OC'")
// Unsupported datatype.
intercept("GEO '(10,-6)'", "Literals of type 'GEO' are currently not supported.")
}
test("literals") {
def testDecimal(value: String): Unit = {
assertEqual(value, Literal(BigDecimal(value).underlying))
}
// NULL
assertEqual("null", Literal(null))
// Boolean
assertEqual("trUe", Literal(true))
assertEqual("False", Literal(false))
// Integral should have the narrowest possible type
assertEqual("787324", Literal(787324))
assertEqual("7873247234798249234", Literal(7873247234798249234L))
testDecimal("78732472347982492793712334")
// Decimal
testDecimal("7873247234798249279371.2334")
// Scientific Decimal
testDecimal("9.0e1")
testDecimal(".9e+2")
testDecimal("0.9e+2")
testDecimal("900e-1")
testDecimal("900.0E-1")
testDecimal("9.e+1")
intercept(".e3")
// Tiny Int Literal
assertEqual("10Y", Literal(10.toByte))
intercept("-1000Y", s"does not fit in range [${Byte.MinValue}, ${Byte.MaxValue}]")
// Small Int Literal
assertEqual("10S", Literal(10.toShort))
intercept("40000S", s"does not fit in range [${Short.MinValue}, ${Short.MaxValue}]")
// Long Int Literal
assertEqual("10L", Literal(10L))
intercept("78732472347982492793712334L",
s"does not fit in range [${Long.MinValue}, ${Long.MaxValue}]")
// Double Literal
assertEqual("10.0D", Literal(10.0D))
intercept("-1.8E308D", s"does not fit in range")
intercept("1.8E308D", s"does not fit in range")
// BigDecimal Literal
assertEqual("90912830918230182310293801923652346786BD",
Literal(BigDecimal("90912830918230182310293801923652346786").underlying()))
assertEqual("123.0E-28BD", Literal(BigDecimal("123.0E-28").underlying()))
assertEqual("123.08BD", Literal(BigDecimal("123.08").underlying()))
intercept("1.20E-38BD", "DecimalType can only support precision up to 38")
}
test("strings") {
// Single Strings.
assertEqual("\\"hello\\"", "hello")
assertEqual("'hello'", "hello")
// Multi-Strings.
assertEqual("\\"hello\\" 'world'", "helloworld")
assertEqual("'hello' \\" \\" 'world'", "hello world")
// 'LIKE' string literals. Notice that an escaped '%' is the same as an escaped '\\' and a
// regular '%'; to get the correct result you need to add another escaped '\\'.
// TODO figure out if we shouldn't change the ParseUtils.unescapeSQLString method?
assertEqual("'pattern%'", "pattern%")
assertEqual("'no-pattern\\\\%'", "no-pattern\\\\%")
assertEqual("'pattern\\\\\\\\%'", "pattern\\\\%")
assertEqual("'pattern\\\\\\\\\\\\%'", "pattern\\\\\\\\%")
// Escaped characters.
// See: http://dev.mysql.com/doc/refman/5.7/en/string-literals.html
assertEqual("'\\\\0'", "\\u0000") // ASCII NUL (X'00')
assertEqual("'\\\\''", "\\'") // Single quote
assertEqual("'\\\\\\"'", "\\"") // Double quote
assertEqual("'\\\\b'", "\\b") // Backspace
assertEqual("'\\\\n'", "\\n") // Newline
assertEqual("'\\\\r'", "\\r") // Carriage return
assertEqual("'\\\\t'", "\\t") // Tab character
assertEqual("'\\\\Z'", "\\u001A") // ASCII 26 - CTRL + Z (EOF on windows)
// Octals
assertEqual("'\\\\110\\\\145\\\\154\\\\154\\\\157\\\\041'", "Hello!")
// Unicode
assertEqual("'\\\\u0057\\\\u006F\\\\u0072\\\\u006C\\\\u0064\\\\u0020\\\\u003A\\\\u0029'", "World :)")
}
test("intervals") {
def intervalLiteral(u: String, s: String): Literal = {
Literal(CalendarInterval.fromSingleUnitString(u, s))
}
// Empty interval statement
intercept("interval", "at least one time unit should be given for interval literal")
// Single Intervals.
val units = Seq(
"year",
"month",
"week",
"day",
"hour",
"minute",
"second",
"millisecond",
"microsecond")
val forms = Seq("", "s")
val values = Seq("0", "10", "-7", "21")
units.foreach { unit =>
forms.foreach { form =>
values.foreach { value =>
val expected = intervalLiteral(unit, value)
assertEqual(s"interval $value $unit$form", expected)
assertEqual(s"interval '$value' $unit$form", expected)
}
}
}
// Hive nanosecond notation.
assertEqual("interval 13.123456789 seconds", intervalLiteral("second", "13.123456789"))
assertEqual("interval -13.123456789 second", intervalLiteral("second", "-13.123456789"))
// Non Existing unit
intercept("interval 10 nanoseconds", "No interval can be constructed")
// Year-Month intervals.
val yearMonthValues = Seq("123-10", "496-0", "-2-3", "-123-0")
yearMonthValues.foreach { value =>
val result = Literal(CalendarInterval.fromYearMonthString(value))
assertEqual(s"interval '$value' year to month", result)
}
// Day-Time intervals.
val datTimeValues = Seq(
"99 11:22:33.123456789",
"-99 11:22:33.123456789",
"10 9:8:7.123456789",
"1 0:0:0",
"-1 0:0:0",
"1 0:0:1")
datTimeValues.foreach { value =>
val result = Literal(CalendarInterval.fromDayTimeString(value))
assertEqual(s"interval '$value' day to second", result)
}
// Unknown FROM TO intervals
intercept("interval 10 month to second", "Intervals FROM month TO second are not supported.")
// Composed intervals.
assertEqual(
"interval 3 months 22 seconds 1 millisecond",
Literal(new CalendarInterval(3, 22001000L)))
assertEqual(
"interval 3 years '-1-10' year to month 3 weeks '1 0:0:2' day to second",
Literal(new CalendarInterval(14,
22 * CalendarInterval.MICROS_PER_DAY + 2 * CalendarInterval.MICROS_PER_SECOND)))
}
test("composed expressions") {
assertEqual("1 + r.r As q", (Literal(1) + UnresolvedAttribute("r.r")).as("q"))
assertEqual("1 - f('o', o(bar))", Literal(1) - 'f.function("o", 'o.function('bar)))
intercept("1 - f('o', o(bar)) hello * world", "mismatched input '*'")
}
test("current date/timestamp braceless expressions") {
assertEqual("current_date", CurrentDate())
assertEqual("current_timestamp", CurrentTimestamp())
}
test("SPARK-17364, fully qualified column name which starts with number") {
assertEqual("123_", UnresolvedAttribute("123_"))
assertEqual("1a.123_", UnresolvedAttribute("1a.123_"))
// ".123" should not be treated as token of type DECIMAL_VALUE
assertEqual("a.123A", UnresolvedAttribute("a.123A"))
// ".123E3" should not be treated as token of type SCIENTIFIC_DECIMAL_VALUE
assertEqual("a.123E3_column", UnresolvedAttribute("a.123E3_column"))
// ".123D" should not be treated as token of type DOUBLE_LITERAL
assertEqual("a.123D_column", UnresolvedAttribute("a.123D_column"))
// ".123BD" should not be treated as token of type BIGDECIMAL_LITERAL
assertEqual("a.123BD_column", UnresolvedAttribute("a.123BD_column"))
}
test("SPARK-17832 function identifier contains backtick") {
val complexName = FunctionIdentifier("`ba`r", Some("`fo`o"))
assertEqual(complexName.quotedString, UnresolvedAttribute("`fo`o.`ba`r"))
intercept(complexName.unquotedString, "mismatched input")
// Function identifier contains countious backticks should be treated correctly.
val complexName2 = FunctionIdentifier("ba``r", Some("fo``o"))
assertEqual(complexName2.quotedString, UnresolvedAttribute("fo``o.ba``r"))
}
}
|
u2009cf/spark-radar
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala
|
Scala
|
apache-2.0
| 20,815
|
class Test private[doesnotexist]()
|
yusuke2255/dotty
|
tests/untried/neg/t7388.scala
|
Scala
|
bsd-3-clause
| 35
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver.server
import java.util.{Map => JMap}
import scala.collection.mutable.Map
import org.apache.hive.service.cli._
import org.apache.hive.service.cli.operation.{ExecuteStatementOperation, Operation, OperationManager}
import org.apache.hive.service.cli.session.HiveSession
import org.apache.spark.Logging
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.hive.thriftserver.{SparkExecuteStatementOperation, ReflectionUtils}
/**
* Executes queries using Spark SQL, and maintains a list of handles to active queries.
*/
private[thriftserver] class SparkSQLOperationManager()
extends OperationManager with Logging {
val handleToOperation = ReflectionUtils
.getSuperField[JMap[OperationHandle, Operation]](this, "handleToOperation")
val sessionToActivePool = Map[SessionHandle, String]()
val sessionToContexts = Map[SessionHandle, HiveContext]()
override def newExecuteStatementOperation(
parentSession: HiveSession,
statement: String,
confOverlay: JMap[String, String],
async: Boolean): ExecuteStatementOperation = synchronized {
val hiveContext = sessionToContexts(parentSession.getSessionHandle)
val runInBackground = async && hiveContext.hiveThriftServerAsync
val operation = new SparkExecuteStatementOperation(parentSession, statement, confOverlay,
runInBackground)(hiveContext, sessionToActivePool)
handleToOperation.put(operation.getHandle, operation)
logDebug(s"Created Operation for $statement with session=$parentSession, " +
s"runInBackground=$runInBackground")
operation
}
}
|
chenc10/Spark-PAF
|
sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/server/SparkSQLOperationManager.scala
|
Scala
|
apache-2.0
| 2,433
|
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio
import com.spotify.scio.ContextAndArgs.{ArgsParser, PipelineOptionsParser, UsageOrHelpException}
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import scala.util.{Failure, Success, Try}
import org.apache.beam.sdk.options.PipelineOptions
import org.apache.beam.sdk.options.Validation.Required
class ArgsTest extends AnyFlatSpec with Matchers {
"Args" should "support String" in {
Args("--str=value".split(" "))("str") shouldBe "value"
}
it should "support getOrElse" in {
Args("--key1=value1".split(" "))
.getOrElse("key2", "value2") shouldBe "value2"
}
it should "support list" in {
Args("--key=value1 --key=value2".split(" "))
.list("key") shouldBe List("value1", "value2")
}
it should "support optional" in {
val args = Args("--key1=value1".split(" "))
args.optional("key1") shouldBe Some("value1")
args.optional("key2") shouldBe None
}
it should "support required" in {
Args("--key=value".split(" ")).required("key") shouldBe "value"
}
it should "fail required with missing value" in {
the[IllegalArgumentException] thrownBy {
Args(Array.empty).required("key")
} should have message "Missing value for property 'key'"
}
it should "fail required with multiple values" in {
the[IllegalArgumentException] thrownBy {
Args("--key=value1 --key=value2".split(" ")).required("key")
} should have message "Multiple values for property 'key'"
}
it should "support int" in {
val args = Args("--key1=10".split(" "))
args.int("key1") shouldBe 10
args.int("key2", 20) shouldBe 20
}
it should "support long" in {
val args = Args("--key1=10".split(" "))
args.long("key1") shouldBe 10L
args.long("key2", 20L) shouldBe 20L
}
it should "support float" in {
val args = Args("--key1=1.5".split(" "))
args.float("key1") shouldBe 1.5f
args.float("key2", 2.5f) shouldBe 2.5f
}
it should "support double" in {
val args = Args("--key1=1.5".split(" "))
args.double("key1") shouldBe 1.5
args.double("key2", 2.5) shouldBe 2.5
}
it should "support boolean" in {
val args = Args("--key1=true --key2=false --key3".split(" "))
args.boolean("key1") shouldBe true
args.boolean("key2") shouldBe false
args.boolean("key3") shouldBe true
args.boolean("key4", true) shouldBe true
args.boolean("key5", false) shouldBe false
}
it should "support quotes" in {
def list(s: String): List[String] = Args(Array(s"--list=$s")).list("list")
list("a,b,c") shouldBe List("a", "b", "c")
list(",a,b") shouldBe List("", "a", "b")
list("a,,b") shouldBe List("a", "", "b")
list("a,b,") shouldBe List("a", "b", "")
list("\\"a1,a2\\",b,c") shouldBe List("\\"a1,a2\\"", "b", "c")
list("a,\\"b1,b2\\",c") shouldBe List("a", "\\"b1,b2\\"", "c")
list("a,b,\\"c1,c2\\"") shouldBe List("a", "b", "\\"c1,c2\\"")
list("a,\\"b1, b2\\",c") shouldBe List("a", "\\"b1, b2\\"", "c")
list("a,b0 \\"b1, b2\\" b3,c") shouldBe List("a", "b0 \\"b1, b2\\" b3", "c")
}
it should "support toString" in {
val args =
Args(Array("--key1=value1", "--key2=value2", "--key2=value3", "--key3"))
args.toString shouldBe "Args(--key1=value1, --key2=[value2, value3], --key3=true)"
}
trait Options extends PipelineOptions {
@Required
def getInput: String
def setInput(input: String): Unit
@Required
def getOutput: String
def setOutput(output: String): Unit
}
"PipelineOptionsParser" should "parse" in {
val rawArgs = Array("--input=value1", "--output=value2")
val result = PipelineOptionsParser[Options]().parse(rawArgs)
result should be a Symbol("success")
}
it should "fail on missing args" in {
val rawArgs = Array("--input=value1")
val result = PipelineOptionsParser[Options]().parse(rawArgs)
result should be a Symbol("failure")
}
it should "fail on unused args" in {
val rawArgs = Array("--input=value1", "--output=value2", "--unused")
val result = PipelineOptionsParser[Options]().parse(rawArgs)
result should be a Symbol("failure")
}
"ContextAndArgs" should "rethrow parser exception" in {
class FailingParserException extends Exception
class FailingParser extends ArgsParser[Try] {
override type ArgsType = Unit
override def parse(args: Array[String]): Try[Result] = Failure(new FailingParserException)
}
assertThrows[FailingParserException] {
ContextAndArgs.withParser(new FailingParser)(Array())
}
}
it should "throw UsageOrHelpException on usage or help request" in {
class UsageOrHelpParser extends ArgsParser[Try] {
override type ArgsType = Unit
override def parse(args: Array[String]): Try[Result] = Success(Left("This is usage message"))
}
assertThrows[UsageOrHelpException] {
ContextAndArgs.withParser(new UsageOrHelpParser)(Array())
}
}
}
|
spotify/scio
|
scio-test/src/test/scala/com/spotify/scio/ArgsTest.scala
|
Scala
|
apache-2.0
| 5,548
|
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.blueeyes.json.serialization
import quasar.blueeyes._, json._
import json._
import java.util.{ Date => JDate }
import java.math.MathContext
import java.time.LocalDateTime
/** Decomposers for all basic types.
*/
trait DefaultDecomposers {
implicit val JValueDecomposer: Decomposer[JValue] = new Decomposer[JValue] {
def decompose(tvalue: JValue): JValue = tvalue
}
implicit val StringDecomposer: Decomposer[String] = new Decomposer[String] {
def decompose(tvalue: String): JValue = JString(tvalue)
}
implicit val BooleanDecomposer: Decomposer[Boolean] = new Decomposer[Boolean] {
def decompose(tvalue: Boolean): JValue = JBool(tvalue)
}
implicit val IntDecomposer: Decomposer[Int] = new Decomposer[Int] {
def decompose(tvalue: Int): JValue = JNum(BigDecimal(tvalue, MathContext.UNLIMITED))
}
implicit val LongDecomposer: Decomposer[Long] = new Decomposer[Long] {
def decompose(tvalue: Long): JValue = JNum(BigDecimal(tvalue, MathContext.UNLIMITED))
}
implicit val FloatDecomposer: Decomposer[Float] = new Decomposer[Float] {
def decompose(tvalue: Float): JValue = JNum(BigDecimal(tvalue, MathContext.UNLIMITED))
}
implicit val DoubleDecomposer: Decomposer[Double] = new Decomposer[Double] {
def decompose(tvalue: Double): JValue = JNum(BigDecimal(tvalue, MathContext.UNLIMITED))
}
implicit val BigDecimalDecomposer: Decomposer[BigDecimal] = new Decomposer[BigDecimal] {
def decompose(tvalue: BigDecimal): JValue = JNum(tvalue)
}
implicit val DateDecomposer: Decomposer[JDate] = new Decomposer[JDate] {
def decompose(date: JDate): JValue = JNum(BigDecimal(date.getTime, MathContext.UNLIMITED))
}
implicit def OptionDecomposer[T](implicit decomposer: Decomposer[T]): Decomposer[Option[T]] = new Decomposer[Option[T]] {
def decompose(tvalue: Option[T]): JValue = tvalue match {
case None => JNull
case Some(v) => decomposer.decompose(v)
}
}
implicit def Tuple2Decomposer[T1, T2](implicit decomposer1: Decomposer[T1], decomposer2: Decomposer[T2]): Decomposer[(T1, T2)] = new Decomposer[(T1, T2)] {
def decompose(tvalue: (T1, T2)) = JArray(decomposer1(tvalue._1) :: decomposer2(tvalue._2) :: Nil)
}
implicit def Tuple3Decomposer[T1, T2, T3](implicit decomposer1: Decomposer[T1],
decomposer2: Decomposer[T2],
decomposer3: Decomposer[T3]): Decomposer[(T1, T2, T3)] = new Decomposer[(T1, T2, T3)] {
def decompose(tvalue: (T1, T2, T3)) = JArray(decomposer1(tvalue._1) :: decomposer2(tvalue._2) :: decomposer3(tvalue._3) :: Nil)
}
implicit def Tuple4Decomposer[T1, T2, T3, T4](implicit decomposer1: Decomposer[T1],
decomposer2: Decomposer[T2],
decomposer3: Decomposer[T3],
decomposer4: Decomposer[T4]): Decomposer[(T1, T2, T3, T4)] = new Decomposer[(T1, T2, T3, T4)] {
def decompose(tvalue: (T1, T2, T3, T4)) =
JArray(decomposer1(tvalue._1) :: decomposer2(tvalue._2) :: decomposer3(tvalue._3) :: decomposer4(tvalue._4) :: Nil)
}
implicit def Tuple5Decomposer[T1, T2, T3, T4, T5](implicit decomposer1: Decomposer[T1],
decomposer2: Decomposer[T2],
decomposer3: Decomposer[T3],
decomposer4: Decomposer[T4],
decomposer5: Decomposer[T5]): Decomposer[(T1, T2, T3, T4, T5)] = new Decomposer[(T1, T2, T3, T4, T5)] {
def decompose(tvalue: (T1, T2, T3, T4, T5)) =
JArray(decomposer1(tvalue._1) :: decomposer2(tvalue._2) :: decomposer3(tvalue._3) :: decomposer4(tvalue._4) :: decomposer5(tvalue._5) :: Nil)
}
implicit def ArrayDecomposer[T](implicit elementDecomposer: Decomposer[T]): Decomposer[Array[T]] = new Decomposer[Array[T]] {
def decompose(tvalue: Array[T]): JValue = JArray(tvalue.toList.map(elementDecomposer.decompose _))
}
implicit def SetDecomposer[T](implicit elementDecomposer: Decomposer[T]): Decomposer[Set[T]] = new Decomposer[Set[T]] {
def decompose(tvalue: Set[T]): JValue = JArray(tvalue.toList.map(elementDecomposer.decompose _))
}
implicit def SeqDecomposer[T](implicit elementDecomposer: Decomposer[T]): Decomposer[Seq[T]] = new Decomposer[Seq[T]] {
def decompose(tvalue: Seq[T]): JValue = JArray(tvalue.toList.map(elementDecomposer.decompose _))
}
implicit def ListDecomposer[T: Decomposer]: Decomposer[List[T]] = SeqDecomposer[T].contramap((_: List[T]).toSeq)
implicit def VectorDecomposer[T: Decomposer]: Decomposer[Vector[T]] = SeqDecomposer[T].contramap((_: Vector[T]).toSeq)
implicit def MapDecomposer[K, V](implicit keyDecomposer: Decomposer[K], valueDecomposer: Decomposer[V]): Decomposer[Map[K, V]] = new Decomposer[Map[K, V]] {
def decompose(tvalue: Map[K, V]): JValue = SeqDecomposer(Tuple2Decomposer(keyDecomposer, valueDecomposer)).decompose(tvalue.toList)
}
implicit def StringMapDecomposer[V](implicit valueDecomposer: Decomposer[V]): Decomposer[Map[String, V]] = new Decomposer[Map[String, V]] {
def decompose(tvalue: Map[String, V]): JValue =
JObject(tvalue.keys.toList.map { key =>
JField(key, valueDecomposer(tvalue.apply(key)))
})
}
implicit val LocalDateTimeDecomposer = new Decomposer[LocalDateTime] {
def decompose(dateTime: LocalDateTime): JValue = JNum(dateTime.getMillis)
}
}
object DefaultDecomposers extends DefaultDecomposers
|
jedesah/Quasar
|
blueeyes/src/main/scala/quasar/blueeyes/json/serialization/DefaultDecomposers.scala
|
Scala
|
apache-2.0
| 6,259
|
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.cassandra
import java.net.InetSocketAddress
import com.datastax.driver.core.Cluster
import com.datastax.driver.core.policies.LatencyAwarePolicy
import com.datastax.driver.core.policies.RoundRobinPolicy
import com.datastax.driver.core.policies.TokenAwarePolicy
import com.google.common.net.HostAndPort
import com.twitter.app.App
import com.twitter.finagle.stats.{DefaultStatsReceiver, StatsReceiver}
import com.twitter.util.Duration
import com.twitter.zipkin.storage.cassandra._
import com.twitter.zipkin.storage.cassandra.CassandraSpanStoreDefaults._
import org.twitter.zipkin.storage.cassandra.Repository
import org.twitter.zipkin.storage.cassandra.ZipkinRetryPolicy
import scala.collection.JavaConversions
import scala.collection.JavaConverters._
trait CassandraSpanStoreFactory {self: App =>
val keyspace = flag[String] ("zipkin.store.cassandra.keyspace", KeyspaceName, "name of the keyspace to use")
val cassandraDest = flag[String] ("zipkin.store.cassandra.dest", "localhost:9042", "dest of the cassandra cluster; comma-separated list of host:port pairs")
val cassandraSpanTtl = flag[Duration] ("zipkin.store.cassandra.spanTTL", SpanTtl, "length of time cassandra should store spans")
val cassandraIndexTtl = flag[Duration] ("zipkin.store.cassandra.indexTTL", IndexTtl, "length of time cassandra should store span indexes")
val cassandraMaxTraceCols = flag[Int] ("zipkin.store.cassandra.maxTraceCols", MaxTraceCols, "max number of spans to return from a query")
val cassandraUsername = flag[String] ("zipkin.store.cassandra.username", "cassandra authentication user name")
val cassandraPassword = flag[String] ("zipkin.store.cassandra.password", "cassandra authentication password")
// eagerly makes network connections, so lazy
private[this] lazy val lazyRepository = new Repository(keyspace(), createClusterBuilder().build())
def newCassandraStore(stats: StatsReceiver = DefaultStatsReceiver.scope("CassandraSpanStore")) = {
new CassandraSpanStore(stats.scope(keyspace()), cassandraSpanTtl(), cassandraIndexTtl(), cassandraMaxTraceCols()) {
override lazy val repository = lazyRepository
}
}
def newCassandraDependencies(stats: StatsReceiver = DefaultStatsReceiver.scope("CassandraDependencyStore")) = {
new CassandraDependencyStore() {
override lazy val repository = lazyRepository
}
}
def createClusterBuilder(): Cluster.Builder = {
val builder = Cluster.builder()
val contactPoints = parseContactPoints()
val defaultPort = findConnectPort(contactPoints)
builder.addContactPointsWithPorts(contactPoints)
builder.withPort(defaultPort) // This ends up config.protocolOptions.port
if (cassandraUsername.isDefined && cassandraPassword.isDefined)
builder.withCredentials(cassandraUsername(), cassandraPassword())
builder.withRetryPolicy(ZipkinRetryPolicy.INSTANCE)
builder.withLoadBalancingPolicy(new TokenAwarePolicy(new LatencyAwarePolicy.Builder(new RoundRobinPolicy()).build()))
}
def parseContactPoints() = {
JavaConversions.seqAsJavaList(cassandraDest().split(",")
.map(HostAndPort.fromString)
.map(cp => new java.net.InetSocketAddress(cp.getHostText, cp.getPortOrDefault(9042))))
}
/** Returns the consistent port across all contact points or 9042 */
def findConnectPort(contactPoints: java.util.List[InetSocketAddress]) = {
val ports = contactPoints.asScala.map(_.getPort).toSet
if (ports.size == 1) {
ports.head
} else {
9042
}
}
}
|
jfeltesse-mdsol/zipkin
|
zipkin-cassandra/src/main/scala/com/twitter/zipkin/cassandra/CassandraSpanStoreFactory.scala
|
Scala
|
apache-2.0
| 4,180
|
package sclack.domain
/**
* Store basic application information about Sclack here (authors, versions
* etc).
*
* @author Simon Symeonidis
*/
object Sclack {
}
|
psyomn/sclack
|
src/main/scala/domain/Sclack.scala
|
Scala
|
gpl-3.0
| 167
|
import com.eklavya.scqla._
import org.omg.PortableInterceptor.SUCCESSFUL
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{ Matchers, BeforeAndAfterAll, FlatSpec }
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import Scqla._
/**
* Created by eklavya on 18/2/14.
*/
case class Emp(empId: Int, deptId: Int, alive: Boolean, id: java.util.UUID, first: String, last: String, salary: Double, age: Long)
class QuerySpec extends FlatSpec with BeforeAndAfterAll with Matchers with ScalaFutures {
var receivedEvent = false
override def beforeAll {
Scqla.connect
Events.registerDBEvent(CreatedEvent, (a, b) => {
receivedEvent = true
println(s"Hear hear, $a $b have come to be.")
})
}
"Driver" should "be able to create a new keyspace" in {
val res = Await.result(query("CREATE KEYSPACE demodb WITH REPLICATION = {'class' : 'SimpleStrategy','replication_factor': 1}"), 5 seconds)
res.isInstanceOf[SchemaChange.type] should be(true)
}
"Driver" should "report events and execute callbacks" in {
receivedEvent should be(true)
}
"Driver" should "be able to set global keyspace" in {
val res = Await.result(query("use demodb"), 5 seconds)
res.isInstanceOf[SetKeyspace.type] should be(true)
}
"Driver" should "be able to create a new table" in {
val res = Await.result(query(
"""CREATE TABLE demodb.emp (
empID int,
deptID int,
alive boolean,
id uuid,
first_name varchar,
last_name varchar,
salary double,
age bigint,
PRIMARY KEY (empID, deptID))"""), 5 seconds)
res.isInstanceOf[SchemaChange.type] should be(true)
}
"Driver" should "be able to execute prepared queries" in {
val res = Await.result(prepare("INSERT INTO demodb.emp (empID, deptID, alive, id, first_name, last_name, salary, age) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"), 5 seconds)
val res1 = Await.result(res.execute(104, 15, true, new java.util.UUID(0, 0), "Hot", "Shot", 10000000.0, 98763L), 5 seconds)
res1.isInstanceOf[Successful.type] should be(true)
}
"Driver" should "return proper case class list" in {
val res = Await.result(queryAs[Emp]("select empID, deptID, alive, id, first_name, last_name, salary, age from demodb.emp"), 5 seconds)
res.head should equal(Emp(104, 15, true, new java.util.UUID(0, 0), "Hot", "Shot", 10000000.0, 98763L))
}
"Driver" should "return proper primitives" in {
val res = Await.result(queryAs[Int]("select empid from demodb.emp"), 5 seconds)
res.head should equal(104)
}
"Driver" should "return proper strings" in {
val res = Await.result(queryAs[String]("select first_name from demodb.emp"), 5 seconds)
res.head should equal("Hot")
}
"Driver" should "be able to execute prepared queries and get results" in {
val res = Await.result(prepare("select empID, deptID, alive, id, first_name, last_name, salary, age from demodb.emp where empid = ? and deptid = ?"), 5 seconds)
val res1 = Await.result(res.executeGet[Emp](104, 15), 5 seconds)
res1.head should equal(Emp(104, 15, true, new java.util.UUID(0, 0), "Hot", "Shot", 10000000.0, 98763L))
}
"Driver" should "report error for a bad query" in {
val res = prepare("select emID, deptID, alive, id, first_name, last_name, salary, age from demodb.emp where empid = ? and deptid = ?")
whenReady(res.failed) { ex =>
ex shouldBe an [ErrorException]
}
}
"Driver" should "be able to drop keyspace" in {
val res = Await.result(query("drop KEYSPACE demodb"), 5 seconds)
res.isInstanceOf[SchemaChange.type] should be(true)
}
}
|
EchoSYSU/Scqla
|
src/test/scala/QuerySpec.scala
|
Scala
|
apache-2.0
| 3,699
|
package com.github.karlhigley.spark.neighbors.lsh
import java.util.Random
import org.apache.spark.mllib.linalg.SparseVector
/**
*
* References:
* - Broder, A. "On the resemblance and containment of documents."
* Compression and Complexity of Sequences: Proceedings, 1997.
*
* @see [[https://en.wikipedia.org/wiki/MinHash MinHash (Wikipedia)]]
*/
private[neighbors] class MinhashFunction(
private[this] val permutations: Array[PermutationFunction]
) extends LSHFunction[IntSignature] with Serializable {
/**
* Compute minhash signature for a vector.
*
* Since Spark doesn't support binary vectors, this uses
* SparseVectors and treats any active element of the vector
* as a member of the set. Note that "active" includes explicit
* zeros, which should not (but still might) be present in SparseVectors.
*/
def signature(vector: SparseVector): IntSignature = {
val sig = permutations.map(p => {
vector.indices.map(p.apply).min
})
new IntSignature(sig)
}
/**
* Build a hash table entry for the supplied vector
*/
def hashTableEntry(id: Long, table: Int, v: SparseVector): IntHashTableEntry = {
IntHashTableEntry(id, table, signature(v), v)
}
}
private[neighbors] object MinhashFunction {
/**
* Randomly generate a new minhash function
*/
def generate(
dimensions: Int,
signatureLength: Int,
prime: Int,
random: Random = new Random
): MinhashFunction = {
val perms = new Array[PermutationFunction](signatureLength)
var i = 0
while (i < signatureLength) {
perms(i) = PermutationFunction.random(dimensions, prime, random)
i += 1
}
new MinhashFunction(perms)
}
}
|
L2V/like2vec
|
src/prediction/src/main/scala/com/github/karlhigley/spark/neighbors/lsh/MinhashFunction.scala
|
Scala
|
apache-2.0
| 1,705
|
package dotty.tools.scaladoc
import org.scalajs.dom._
import org.scalajs.dom.ext._
import utils.HTML._
class SocialLinks:
def addIcon(elem: html.Element) =
elem.appendChild(
img(src := s"${Globals.pathToRoot}images/${elem.getAttribute("data-icon-path")}")()
)
document.querySelectorAll(".social-icon").collect { case e: html.Element => e }.foreach(addIcon)
|
lampepfl/dotty
|
scaladoc-js/main/src/social-links/SocialLinks.scala
|
Scala
|
apache-2.0
| 380
|
package org.tagsharp.util
import scala.util.matching.Regex
import org.scalatest.matchers.{MatchResult, Matcher}
import org.tagsharp.test.{Pass, Result}
trait CustomMatchers {
/**
* @see http://www.scalatest.org/user_guide/using_matchers#usingCustomMatchers
*/
class PassMatcher extends Matcher[Result] {
def apply(left: Result) = {
val msg = "Result was not a Pass >> " + left
MatchResult(
left match {
case Pass() => true
case _ => false
},
msg, msg, msg, msg)
}
}
def pass = new PassMatcher
class RegexMatcher(pair: (CharSequence, String)) extends Matcher[Regex] {
def apply(left: Regex) = {
val term = pair._1
val result = pair._2
val msg = s"Error matching [$left] against [$term]"
MatchResult(left findFirstIn term match {
case Some(value) => result == value
case None => false
}, msg, msg, msg, msg)
}
}
class Insider(result: String) {
def inside(input: CharSequence) = new RegexMatcher(input, result)
}
def find(result: String) = new Insider(result)
}
|
reggoodwin/tagsharp
|
src/test/scala/org/tagsharp/util/CustomMatchers.scala
|
Scala
|
mit
| 1,118
|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package forms
import java.time.LocalDate
import play.api.data.{FormError, Forms, Mapping}
import play.api.data.format.Formatter
import utils.Constants
import common.Dates._
import common.Exceptions
import common.Validation._
import scala.util.{Failure, Success, Try}
trait CommonBinders {
private val EMPTY_STRING = ""
private val DAY_LIMIT = 31
private val MONTH_LIMIT = 12
//############General Formatters###################
private def stringToOptionalIntFormatter(errorLabel: String) = new Formatter[Option[Int]] {
override def bind(key: String, data: Map[String, String]): Either[Seq[FormError], Option[Int]] = {
data.getOrElse(key, EMPTY_STRING) match {
case EMPTY_STRING => Left(Seq(FormError(key, s"pla.base.errors.$errorLabel")))
case str => Try(str.toInt) match {
case Success(result) => Right(Some(result))
case Failure(_) => Left(Seq(FormError(key, "error.real")))
}
}
}
override def unbind(key: String, value: Option[Int]): Map[String, String] = Map(key -> value.map(_.toString).getOrElse(""))
}
def psoPartialDateBinder(errorLabel: String): Mapping[Option[Int]] = Forms.of[Option[Int]](stringToOptionalIntFormatter(errorLabel))
private def stringToIntFormatter(errorLabel: String) = new Formatter[Int] {
override def bind(key: String, data: Map[String, String]): Either[Seq[FormError], Int] = {
data.getOrElse(key, EMPTY_STRING) match {
case EMPTY_STRING => Left(Seq(FormError(key, s"pla.base.errors.$errorLabel")))
case str => Try(str.toInt) match {
case Success(result) => Right(result)
case Failure(_) => Left(Seq(FormError(key, "error.real")))
}
}
}
override def unbind(key: String, value: Int): Map[String, String] = Map(key -> value.toString)
}
def intWithCustomError(errorLabel: String): Mapping[Int] = Forms.of[Int](stringToIntFormatter(errorLabel))
private def protectionFormatter = new Formatter[String] {
override def bind(key: String, data: Map[String, String]): Either[Seq[FormError], String] = {
data.getOrElse(key, "") match {
case success if success.equals("ip2016") || success.equals("ip2014") => Right(success)
case other => throw new Exceptions.RequiredNotFoundProtectionTypeException("[protectionFormatter]")
}
}
override def unbind(key: String, value: String): Map[String, String] = Map(key -> value)
}
def protectionTypeFormatter: Mapping[String] = Forms.of[String](protectionFormatter)
implicit val optionalBigDecimalFormatter = new Formatter[Option[BigDecimal]] {
def bind(key: String, data: Map[String, String]): Either[Seq[FormError], Option[BigDecimal]] =
if(ifAmtIsRequired(key, data)){
data.get(key).map(validateNumber(key, _)).getOrElse(Left(Seq(FormError(key, "pla.base.errors.errorQuestion", Nil))))
} else Right(None)
def unbind(key: String, value: Option[BigDecimal]): Map[String, String] = {
value match {
case Some(data) => Map(key -> data.toString())
case None => Map()
}
}
}
val yesNoOptionalBigDecimal: Mapping[Option[BigDecimal]] = Forms.of[Option[BigDecimal]](optionalBigDecimalFormatter)
//############Withdrawal Date###################
private[forms] def withdrawDateValidationFormatter(errorLabel: String) = new Formatter[Option[Int]] {
override def bind(key: String, data: Map[String, String]): Either[Seq[FormError], Option[Int]] = {
data.getOrElse(key, EMPTY_STRING) match {
case EMPTY_STRING => Left(Seq(FormError(key,s"pla.withdraw.date-input.form.$errorLabel-empty", Nil )))
case str => Try(str.toInt) match {
case Success(result) =>
if (key.equals("withdrawDay")) dateBoundaryValidation(key,result,errorLabel, DAY_LIMIT)
else if(key.equals("withdrawMonth")) dateBoundaryValidation(key,result,errorLabel, MONTH_LIMIT)
else Right(Some(result))
case Failure(_) => Left(Seq(FormError(key, "error.real")))
}
}
}
override def unbind(key: String, value: Option[Int]): Map[String, String] = Map(key -> value.map(_.toString).getOrElse(""))
}
def withdrawDatePartialFormatter(errorLabel: String): Mapping[Option[Int]] = Forms.of[Option[Int]](withdrawDateValidationFormatter(errorLabel))
private[forms] def withdrawDateStringToIntFormatter = new Formatter[Option[Int]] {
override def bind(key: String, data: Map[String, String]) = {
val groupedIntsWithCustomErrors: Either[Seq[FormError], (Int, Int, Int)] = for {
day <- withdrawDateValidationFormatter("day").bind("withdrawDay", data).right
month <- withdrawDateValidationFormatter("month").bind("withdrawMonth", data).right
year <- withdrawDateValidationFormatter("year").bind("withdrawYear", data).right
} yield {
(day.get, month.get, year.get)
}
val returnValue: Either[Seq[FormError], Option[Int]] = groupedIntsWithCustomErrors fold(
errs => withdrawDateValidationFormatter("day").bind("withdrawDay", data),
success => {
val (day, month, year) = (success._1, success._2, success._3)
validateWithdrawDate(key, day, month, year)
}
)
returnValue
}
override def unbind(key: String, value: Option[Int]) = Map(key -> value.map(_.toString).getOrElse(""))
}
def withdrawDateFormatter: Mapping[Option[Int]] = Forms.of[Option[Int]](withdrawDateStringToIntFormatter)
//############PSO Date###################
private def psoDateStringToIntFormatter = new Formatter[Int] {
override def bind(key: String, data: Map[String, String]) = {
val groupedIntsWithCustomErrors: Either[Seq[FormError], (Int, Int, Int)] = for {
day <- stringToIntFormatter("dayEmpty").bind("psoDay", data).right
month <- stringToIntFormatter("monthEmpty").bind("psoMonth", data).right
year <- stringToIntFormatter("yearEmpty").bind("psoYear", data).right
} yield {
(day, month, year)
}
val returnValue = groupedIntsWithCustomErrors fold (
errs => stringToIntFormatter("dayEmpty").bind("psoDay", data),
success => {
val (day, month, year) = (success._1, success._2, success._3)
validateCompleteDate(key,Constants.minIP16PSODate,"IP16PsoDetails",day,month,year)
}
)
returnValue
}
override def unbind(key: String, value: Int) = Map(key -> value.toString)
}
def psoDateFormatterFromString: Mapping[Int] = Forms.of[Int](psoDateStringToIntFormatter)
private def dateStringToOptionalIntFormatter = new Formatter[Option[Int]] {
override def bind(key: String, data: Map[String, String]) = {
val groupedIntsWithCustomErrors: Either[Seq[FormError], (Int, Int, Int, String)] = for {
day <- stringToOptionalIntFormatter("dayEmpty").bind("psoDay", data).right
month <- stringToOptionalIntFormatter("monthEmpty").bind("psoMonth", data).right
year <- stringToOptionalIntFormatter("yearEmpty").bind("psoYear", data).right
protectionType <- protectionFormatter.bind("protectionType", data).right
} yield {
(day.get, month.get, year.get, protectionType)
}
val returnValue: Either[Seq[FormError], Option[Int]] = groupedIntsWithCustomErrors.fold(
errs => stringToOptionalIntFormatter("dayEmpty").bind("psoDay", data),
success => {
val (day, month, year, protectionType) = (success._1, success._2, success._3, success._4)
val protectionTypeErrorMsg = if (protectionType.toLowerCase.equals("ip2016")) "IP16PsoDetails" else "IP14PsoDetails"
val dateComparator = if (protectionType.toLowerCase.equals("ip2016")) Constants.minIP16PSODate else Constants.minIP14PSODate
validateCompleteDate(key,dateComparator,protectionTypeErrorMsg,day,month,year).fold[Either[Seq[FormError], Option[Int]]] (
errors => Left(errors),
value => Right(Some(value))
)
}
)
returnValue
}
override def unbind(key: String, value: Option[Int]) = Map(key -> value.map(_.toString).getOrElse(""))
}
def dateFormatterFromInt: Mapping[Option[Int]] = Forms.of[Option[Int]](dateStringToOptionalIntFormatter)
//#################HELPER METHODS#################################//
private def dateBoundaryValidation(key: String, value: Int, errorMsg: String, upperLimit: Int): Either[Seq[FormError], Option[Int]] ={
value match {
case lowValue if lowValue <= 0 => Left(Seq(FormError(key,s"pla.withdraw.date-input.form.$errorMsg-too-low", Nil )))
case highValue if highValue > upperLimit => Left(Seq(FormError(key,s"pla.withdraw.date-input.form.$errorMsg-too-high", Nil )))
case _ => Right(Some(value))
}
}
private def validateWithdrawDate(key: String, day: Int, month: Int, year: Int): Either[Seq[FormError], Option[Int]] = {
if (!isValidDate(day, month, year)) Left(Seq(FormError(key, "pla.base.errors.invalidDate", Nil)))
else if (futureDate(day, month, year)) Left(Seq(FormError(key, "pla.withdraw.date-input.form.date-in-future", Nil)))
else Right(Some(day))
}
private def validateCompleteDate(key: String, dateComparator: LocalDate, errorMsg: String, day: Int, month: Int, year: Int): Either[Seq[FormError], Int] ={
if (!isValidDate(day, month, year)) Left(Seq(FormError(key, "pla.base.errors.invalidDate", Nil)))
else if (dateBefore(day, month, year, dateComparator)) Left(Seq(FormError(key, s"pla.$errorMsg.errorDateOutOfRange", Nil)))
else if (futureDate(day, month, year)) Left(Seq(FormError(key, s"pla.$errorMsg.errorDateOutOfRange", Nil)))
else Right(day)
}
private def validateNumber(key: String, number: String): Either[Seq[FormError], Some[BigDecimal]] = {
number match {
case "" => Left(Seq(FormError(key, "pla.base.errors.errorQuestion", Nil)))
case value if!checkIfValidBigDecimal(value) => Left(Seq(FormError(key,"error.real")))
case value if!isPositive(BigDecimal(value)) => Left(Seq(FormError(key, "pla.base.errors.errorNegative", Nil)))
case value if!isLessThanDouble(value.toDouble, Constants.npsMaxCurrency) => Left(Seq(FormError(key, "pla.base.errors.errorMaximum", Nil)))
case value if!isMaxTwoDecimalPlaces(BigDecimal(value)) => Left(Seq(FormError(key, "pla.base.errors.errorDecimalPlaces", Nil)))
case value => Right(Some(BigDecimal(value)))
}
}
private def checkIfValidBigDecimal(value: String): Boolean = {
val output = Try(BigDecimal(value))
output match {
case Success(result) => true
case Failure(_) => false
}
}
private def ifAmtIsRequired(key: String, data: Map[String, String]): Boolean ={
data.getOrElse(key.take(key.length - 3), "no").equals("yes")
}
}
|
hmrc/pensions-lifetime-allowance-frontend
|
app/forms/CommonBinders.scala
|
Scala
|
apache-2.0
| 11,435
|
/*
* -╥⌐⌐⌐⌐ -⌐⌐⌐⌐-
* ≡╢░░░░⌐\\░░░φ ╓╝░░░░⌐░░░░╪╕
* ╣╬░░` `░░░╢┘ φ▒╣╬╝╜ ░░╢╣Q
* ║╣╬░⌐ ` ╤▒▒▒Å` ║╢╬╣
* ╚╣╬░⌐ ╔▒▒▒▒`«╕ ╢╢╣▒
* ╫╬░░╖ .░ ╙╨╨ ╣╣╬░φ ╓φ░╢╢Å
* ╙╢░░░░⌐"░░░╜ ╙Å░░░░⌐░░░░╝`
* ``˚¬ ⌐ ˚˚⌐´
*
* Copyright © 2016 Flipkart.com
*/
package com.flipkart.connekt.busybees.streams.flows.eventcreators
import com.flipkart.connekt.busybees.streams.errors.ConnektPNStageException
import com.flipkart.connekt.busybees.streams.flows.MapAsyncFlowStage
import com.flipkart.connekt.commons.factories.{ConnektLogger, LogFile, ServiceFactory}
import com.flipkart.connekt.commons.helpers.ConnektRequestHelper._
import com.flipkart.connekt.commons.iomodels.MessageStatus.InternalStatus
import com.flipkart.connekt.commons.iomodels.{ConnektRequest, PNRequestInfo}
import com.flipkart.connekt.commons.metrics.Instrumented
import com.flipkart.connekt.commons.services.ConnektConfig
import com.flipkart.connekt.commons.utils.StringUtils._
import scala.concurrent.{ExecutionContext, Future, Promise}
class NotificationQueueRecorder(parallelism: Int)(implicit ec: ExecutionContext) extends MapAsyncFlowStage[ConnektRequest, ConnektRequest](parallelism) with Instrumented {
private lazy val shouldAwait = ConnektConfig.getBoolean("topology.push.queue.await").getOrElse(false)
override val map: (ConnektRequest) => Future[List[ConnektRequest]] = message => {
val profiler = timer("map").time()
try {
ConnektLogger(LogFile.PROCESSORS).trace(s"NotificationQueueRecorder received message: ${message.id}")
val pnInfo = message.channelInfo.asInstanceOf[PNRequestInfo]
val promise = Promise[List[ConnektRequest]]()
if (message.isTestRequest)
promise.success(List(message))
else {
val enqueueFutures = pnInfo.deviceIds.map(
ServiceFactory.getMessageQueueService.enqueueMessage(message.appName, _, message.id)
)
if (shouldAwait) //TODO: Remove this when cross-dc calls are taken care of.
Future.sequence(enqueueFutures).onComplete(_ => promise.success(List(message)))
else
promise.success(List(message))
}
promise.future.onComplete(_ => profiler.stop())
promise.future
} catch {
case e: Exception =>
ConnektLogger(LogFile.PROCESSORS).error(s"NotificationQueueRecorder error for ${message.id}", e)
throw ConnektPNStageException(message.id, message.clientId, message.destinations, InternalStatus.StageError, message.appName, message.platform, message.contextId.orEmpty, message.meta, "NotificationQueueRecorder::".concat(e.getMessage), e)
}
}
}
|
Flipkart/connekt
|
busybees/src/main/scala/com/flipkart/connekt/busybees/streams/flows/eventcreators/NotificationQueueRecorder.scala
|
Scala
|
mit
| 2,959
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources.v2
import java.io.File
import test.org.apache.spark.sql.sources.v2._
import org.apache.spark.SparkException
import org.apache.spark.sql.{DataFrame, QueryTest, Row}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.datasources.v2.{BatchScanExec, DataSourceV2Relation}
import org.apache.spark.sql.execution.exchange.{Exchange, ShuffleExchangeExec}
import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector
import org.apache.spark.sql.functions._
import org.apache.spark.sql.sources.{Filter, GreaterThan}
import org.apache.spark.sql.sources.v2.reader._
import org.apache.spark.sql.sources.v2.reader.partitioning.{ClusteredDistribution, Distribution, Partitioning}
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types.{IntegerType, StructType}
import org.apache.spark.sql.vectorized.ColumnarBatch
class DataSourceV2Suite extends QueryTest with SharedSQLContext {
import testImplicits._
private def getBatch(query: DataFrame): AdvancedBatch = {
query.queryExecution.executedPlan.collect {
case d: BatchScanExec =>
d.batch.asInstanceOf[AdvancedBatch]
}.head
}
private def getJavaBatch(query: DataFrame): JavaAdvancedDataSourceV2.AdvancedBatch = {
query.queryExecution.executedPlan.collect {
case d: BatchScanExec =>
d.batch.asInstanceOf[JavaAdvancedDataSourceV2.AdvancedBatch]
}.head
}
test("simplest implementation") {
Seq(classOf[SimpleDataSourceV2], classOf[JavaSimpleDataSourceV2]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
checkAnswer(df, (0 until 10).map(i => Row(i, -i)))
checkAnswer(df.select('j), (0 until 10).map(i => Row(-i)))
checkAnswer(df.filter('i > 5), (6 until 10).map(i => Row(i, -i)))
}
}
}
test("advanced implementation") {
Seq(classOf[AdvancedDataSourceV2], classOf[JavaAdvancedDataSourceV2]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
checkAnswer(df, (0 until 10).map(i => Row(i, -i)))
val q1 = df.select('j)
checkAnswer(q1, (0 until 10).map(i => Row(-i)))
if (cls == classOf[AdvancedDataSourceV2]) {
val batch = getBatch(q1)
assert(batch.filters.isEmpty)
assert(batch.requiredSchema.fieldNames === Seq("j"))
} else {
val batch = getJavaBatch(q1)
assert(batch.filters.isEmpty)
assert(batch.requiredSchema.fieldNames === Seq("j"))
}
val q2 = df.filter('i > 3)
checkAnswer(q2, (4 until 10).map(i => Row(i, -i)))
if (cls == classOf[AdvancedDataSourceV2]) {
val batch = getBatch(q2)
assert(batch.filters.flatMap(_.references).toSet == Set("i"))
assert(batch.requiredSchema.fieldNames === Seq("i", "j"))
} else {
val batch = getJavaBatch(q2)
assert(batch.filters.flatMap(_.references).toSet == Set("i"))
assert(batch.requiredSchema.fieldNames === Seq("i", "j"))
}
val q3 = df.select('i).filter('i > 6)
checkAnswer(q3, (7 until 10).map(i => Row(i)))
if (cls == classOf[AdvancedDataSourceV2]) {
val batch = getBatch(q3)
assert(batch.filters.flatMap(_.references).toSet == Set("i"))
assert(batch.requiredSchema.fieldNames === Seq("i"))
} else {
val batch = getJavaBatch(q3)
assert(batch.filters.flatMap(_.references).toSet == Set("i"))
assert(batch.requiredSchema.fieldNames === Seq("i"))
}
val q4 = df.select('j).filter('j < -10)
checkAnswer(q4, Nil)
if (cls == classOf[AdvancedDataSourceV2]) {
val batch = getBatch(q4)
// 'j < 10 is not supported by the testing data source.
assert(batch.filters.isEmpty)
assert(batch.requiredSchema.fieldNames === Seq("j"))
} else {
val batch = getJavaBatch(q4)
// 'j < 10 is not supported by the testing data source.
assert(batch.filters.isEmpty)
assert(batch.requiredSchema.fieldNames === Seq("j"))
}
}
}
}
test("columnar batch scan implementation") {
Seq(classOf[ColumnarDataSourceV2], classOf[JavaColumnarDataSourceV2]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
checkAnswer(df, (0 until 90).map(i => Row(i, -i)))
checkAnswer(df.select('j), (0 until 90).map(i => Row(-i)))
checkAnswer(df.filter('i > 50), (51 until 90).map(i => Row(i, -i)))
}
}
}
test("schema required data source") {
Seq(classOf[SchemaRequiredDataSource], classOf[JavaSchemaRequiredDataSource]).foreach { cls =>
withClue(cls.getName) {
val e = intercept[IllegalArgumentException](spark.read.format(cls.getName).load())
assert(e.getMessage.contains("requires a user-supplied schema"))
val schema = new StructType().add("i", "int").add("s", "string")
val df = spark.read.format(cls.getName).schema(schema).load()
assert(df.schema == schema)
assert(df.collect().isEmpty)
}
}
}
test("partitioning reporting") {
import org.apache.spark.sql.functions.{count, sum}
Seq(classOf[PartitionAwareDataSource], classOf[JavaPartitionAwareDataSource]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
checkAnswer(df, Seq(Row(1, 4), Row(1, 4), Row(3, 6), Row(2, 6), Row(4, 2), Row(4, 2)))
val groupByColA = df.groupBy('i).agg(sum('j))
checkAnswer(groupByColA, Seq(Row(1, 8), Row(2, 6), Row(3, 6), Row(4, 4)))
assert(groupByColA.queryExecution.executedPlan.collectFirst {
case e: ShuffleExchangeExec => e
}.isEmpty)
val groupByColAB = df.groupBy('i, 'j).agg(count("*"))
checkAnswer(groupByColAB, Seq(Row(1, 4, 2), Row(2, 6, 1), Row(3, 6, 1), Row(4, 2, 2)))
assert(groupByColAB.queryExecution.executedPlan.collectFirst {
case e: ShuffleExchangeExec => e
}.isEmpty)
val groupByColB = df.groupBy('j).agg(sum('i))
checkAnswer(groupByColB, Seq(Row(2, 8), Row(4, 2), Row(6, 5)))
assert(groupByColB.queryExecution.executedPlan.collectFirst {
case e: ShuffleExchangeExec => e
}.isDefined)
val groupByAPlusB = df.groupBy('i + 'j).agg(count("*"))
checkAnswer(groupByAPlusB, Seq(Row(5, 2), Row(6, 2), Row(8, 1), Row(9, 1)))
assert(groupByAPlusB.queryExecution.executedPlan.collectFirst {
case e: ShuffleExchangeExec => e
}.isDefined)
}
}
}
test("SPARK-23574: no shuffle exchange with single partition") {
val df = spark.read.format(classOf[SimpleSinglePartitionSource].getName).load().agg(count("*"))
assert(df.queryExecution.executedPlan.collect { case e: Exchange => e }.isEmpty)
}
test("simple writable data source") {
// TODO: java implementation.
Seq(classOf[SimpleWritableDataSource]).foreach { cls =>
withTempPath { file =>
val path = file.getCanonicalPath
assert(spark.read.format(cls.getName).option("path", path).load().collect().isEmpty)
spark.range(10).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(10).select('id, -'id))
// test with different save modes
spark.range(10).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).mode("append").save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(10).union(spark.range(10)).select('id, -'id))
spark.range(5).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).mode("overwrite").save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(5).select('id, -'id))
spark.range(5).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).mode("ignore").save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(5).select('id, -'id))
val e = intercept[Exception] {
spark.range(5).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).mode("error").save()
}
assert(e.getMessage.contains("data already exists"))
// test transaction
val failingUdf = org.apache.spark.sql.functions.udf {
var count = 0
(id: Long) => {
if (count > 5) {
throw new RuntimeException("testing error")
}
count += 1
id
}
}
// this input data will fail to read middle way.
val input = spark.range(10).select(failingUdf('id).as('i)).select('i, -'i as 'j)
val e2 = intercept[SparkException] {
input.write.format(cls.getName).option("path", path).mode("overwrite").save()
}
assert(e2.getMessage.contains("Writing job aborted"))
// make sure we don't have partial data.
assert(spark.read.format(cls.getName).option("path", path).load().collect().isEmpty)
}
}
}
test("simple counter in writer with onDataWriterCommit") {
Seq(classOf[SimpleWritableDataSource]).foreach { cls =>
withTempPath { file =>
val path = file.getCanonicalPath
assert(spark.read.format(cls.getName).option("path", path).load().collect().isEmpty)
val numPartition = 6
spark.range(0, 10, 1, numPartition).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(10).select('id, -'id))
assert(SimpleCounter.getCounter == numPartition,
"method onDataWriterCommit should be called as many as the number of partitions")
}
}
}
test("SPARK-23293: data source v2 self join") {
val df = spark.read.format(classOf[SimpleDataSourceV2].getName).load()
val df2 = df.select(($"i" + 1).as("k"), $"j")
checkAnswer(df.join(df2, "j"), (0 until 10).map(i => Row(-i, i, i + 1)))
}
test("SPARK-23301: column pruning with arbitrary expressions") {
val df = spark.read.format(classOf[AdvancedDataSourceV2].getName).load()
val q1 = df.select('i + 1)
checkAnswer(q1, (1 until 11).map(i => Row(i)))
val batch1 = getBatch(q1)
assert(batch1.requiredSchema.fieldNames === Seq("i"))
val q2 = df.select(lit(1))
checkAnswer(q2, (0 until 10).map(i => Row(1)))
val batch2 = getBatch(q2)
assert(batch2.requiredSchema.isEmpty)
// 'j === 1 can't be pushed down, but we should still be able do column pruning
val q3 = df.filter('j === -1).select('j * 2)
checkAnswer(q3, Row(-2))
val batch3 = getBatch(q3)
assert(batch3.filters.isEmpty)
assert(batch3.requiredSchema.fieldNames === Seq("j"))
// column pruning should work with other operators.
val q4 = df.sort('i).limit(1).select('i + 1)
checkAnswer(q4, Row(1))
val batch4 = getBatch(q4)
assert(batch4.requiredSchema.fieldNames === Seq("i"))
}
test("SPARK-23315: get output from canonicalized data source v2 related plans") {
def checkCanonicalizedOutput(
df: DataFrame, logicalNumOutput: Int, physicalNumOutput: Int): Unit = {
val logical = df.queryExecution.optimizedPlan.collect {
case d: DataSourceV2Relation => d
}.head
assert(logical.canonicalized.output.length == logicalNumOutput)
val physical = df.queryExecution.executedPlan.collect {
case d: BatchScanExec => d
}.head
assert(physical.canonicalized.output.length == physicalNumOutput)
}
val df = spark.read.format(classOf[AdvancedDataSourceV2].getName).load()
checkCanonicalizedOutput(df, 2, 2)
checkCanonicalizedOutput(df.select('i), 2, 1)
}
test("SPARK-25425: extra options should override sessions options during reading") {
val prefix = "spark.datasource.userDefinedDataSource."
val optionName = "optionA"
withSQLConf(prefix + optionName -> "true") {
val df = spark
.read
.option(optionName, false)
.format(classOf[DataSourceV2WithSessionConfig].getName).load()
val options = df.queryExecution.optimizedPlan.collectFirst {
case d: DataSourceV2Relation => d.options
}.get
assert(options.get(optionName).get == "false")
}
}
test("SPARK-25425: extra options should override sessions options during writing") {
withTempPath { path =>
val sessionPath = path.getCanonicalPath
withSQLConf("spark.datasource.simpleWritableDataSource.path" -> sessionPath) {
withTempPath { file =>
val optionPath = file.getCanonicalPath
val format = classOf[SimpleWritableDataSource].getName
val df = Seq((1L, 2L)).toDF("i", "j")
df.write.format(format).option("path", optionPath).save()
assert(!new File(sessionPath).exists)
checkAnswer(spark.read.format(format).option("path", optionPath).load(), df)
}
}
}
}
test("SPARK-25700: do not read schema when writing in other modes except append and overwrite") {
withTempPath { file =>
val cls = classOf[SimpleWriteOnlyDataSource]
val path = file.getCanonicalPath
val df = spark.range(5).select('id as 'i, -'id as 'j)
// non-append mode should not throw exception, as they don't access schema.
df.write.format(cls.getName).option("path", path).mode("error").save()
df.write.format(cls.getName).option("path", path).mode("ignore").save()
// append and overwrite modes will access the schema and should throw exception.
intercept[SchemaReadAttemptException] {
df.write.format(cls.getName).option("path", path).mode("append").save()
}
intercept[SchemaReadAttemptException] {
df.write.format(cls.getName).option("path", path).mode("overwrite").save()
}
}
}
}
case class RangeInputPartition(start: Int, end: Int) extends InputPartition
object SimpleReaderFactory extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
val RangeInputPartition(start, end) = partition
new PartitionReader[InternalRow] {
private var current = start - 1
override def next(): Boolean = {
current += 1
current < end
}
override def get(): InternalRow = InternalRow(current, -current)
override def close(): Unit = {}
}
}
}
abstract class SimpleBatchTable extends Table with SupportsBatchRead {
override def schema(): StructType = new StructType().add("i", "int").add("j", "int")
override def name(): String = this.getClass.toString
}
abstract class SimpleScanBuilder extends ScanBuilder
with Batch with Scan {
override def build(): Scan = this
override def toBatch: Batch = this
override def readSchema(): StructType = new StructType().add("i", "int").add("j", "int")
override def createReaderFactory(): PartitionReaderFactory = SimpleReaderFactory
}
class SimpleSinglePartitionSource extends TableProvider {
class MyScanBuilder extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = {
Array(RangeInputPartition(0, 5))
}
}
override def getTable(options: DataSourceOptions): Table = new SimpleBatchTable {
override def newScanBuilder(options: DataSourceOptions): ScanBuilder = {
new MyScanBuilder()
}
}
}
// This class is used by pyspark tests. If this class is modified/moved, make sure pyspark
// tests still pass.
class SimpleDataSourceV2 extends TableProvider {
class MyScanBuilder extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = {
Array(RangeInputPartition(0, 5), RangeInputPartition(5, 10))
}
}
override def getTable(options: DataSourceOptions): Table = new SimpleBatchTable {
override def newScanBuilder(options: DataSourceOptions): ScanBuilder = {
new MyScanBuilder()
}
}
}
class AdvancedDataSourceV2 extends TableProvider {
override def getTable(options: DataSourceOptions): Table = new SimpleBatchTable {
override def newScanBuilder(options: DataSourceOptions): ScanBuilder = {
new AdvancedScanBuilder()
}
}
}
class AdvancedScanBuilder extends ScanBuilder
with Scan with SupportsPushDownFilters with SupportsPushDownRequiredColumns {
var requiredSchema = new StructType().add("i", "int").add("j", "int")
var filters = Array.empty[Filter]
override def pruneColumns(requiredSchema: StructType): Unit = {
this.requiredSchema = requiredSchema
}
override def readSchema(): StructType = requiredSchema
override def pushFilters(filters: Array[Filter]): Array[Filter] = {
val (supported, unsupported) = filters.partition {
case GreaterThan("i", _: Int) => true
case _ => false
}
this.filters = supported
unsupported
}
override def pushedFilters(): Array[Filter] = filters
override def build(): Scan = this
override def toBatch: Batch = new AdvancedBatch(filters, requiredSchema)
}
class AdvancedBatch(val filters: Array[Filter], val requiredSchema: StructType) extends Batch {
override def planInputPartitions(): Array[InputPartition] = {
val lowerBound = filters.collectFirst {
case GreaterThan("i", v: Int) => v
}
val res = scala.collection.mutable.ArrayBuffer.empty[InputPartition]
if (lowerBound.isEmpty) {
res.append(RangeInputPartition(0, 5))
res.append(RangeInputPartition(5, 10))
} else if (lowerBound.get < 4) {
res.append(RangeInputPartition(lowerBound.get + 1, 5))
res.append(RangeInputPartition(5, 10))
} else if (lowerBound.get < 9) {
res.append(RangeInputPartition(lowerBound.get + 1, 10))
}
res.toArray
}
override def createReaderFactory(): PartitionReaderFactory = {
new AdvancedReaderFactory(requiredSchema)
}
}
class AdvancedReaderFactory(requiredSchema: StructType) extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
val RangeInputPartition(start, end) = partition
new PartitionReader[InternalRow] {
private var current = start - 1
override def next(): Boolean = {
current += 1
current < end
}
override def get(): InternalRow = {
val values = requiredSchema.map(_.name).map {
case "i" => current
case "j" => -current
}
InternalRow.fromSeq(values)
}
override def close(): Unit = {}
}
}
}
class SchemaRequiredDataSource extends TableProvider {
class MyScanBuilder(schema: StructType) extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = Array.empty
override def readSchema(): StructType = schema
}
override def getTable(options: DataSourceOptions): Table = {
throw new IllegalArgumentException("requires a user-supplied schema")
}
override def getTable(options: DataSourceOptions, schema: StructType): Table = {
val userGivenSchema = schema
new SimpleBatchTable {
override def schema(): StructType = userGivenSchema
override def newScanBuilder(options: DataSourceOptions): ScanBuilder = {
new MyScanBuilder(userGivenSchema)
}
}
}
}
class ColumnarDataSourceV2 extends TableProvider {
class MyScanBuilder extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = {
Array(RangeInputPartition(0, 50), RangeInputPartition(50, 90))
}
override def createReaderFactory(): PartitionReaderFactory = {
ColumnarReaderFactory
}
}
override def getTable(options: DataSourceOptions): Table = new SimpleBatchTable {
override def newScanBuilder(options: DataSourceOptions): ScanBuilder = {
new MyScanBuilder()
}
}
}
object ColumnarReaderFactory extends PartitionReaderFactory {
private final val BATCH_SIZE = 20
override def supportColumnarReads(partition: InputPartition): Boolean = true
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
throw new UnsupportedOperationException
}
override def createColumnarReader(partition: InputPartition): PartitionReader[ColumnarBatch] = {
val RangeInputPartition(start, end) = partition
new PartitionReader[ColumnarBatch] {
private lazy val i = new OnHeapColumnVector(BATCH_SIZE, IntegerType)
private lazy val j = new OnHeapColumnVector(BATCH_SIZE, IntegerType)
private lazy val batch = new ColumnarBatch(Array(i, j))
private var current = start
override def next(): Boolean = {
i.reset()
j.reset()
var count = 0
while (current < end && count < BATCH_SIZE) {
i.putInt(count, current)
j.putInt(count, -current)
current += 1
count += 1
}
if (count == 0) {
false
} else {
batch.setNumRows(count)
true
}
}
override def get(): ColumnarBatch = batch
override def close(): Unit = batch.close()
}
}
}
class PartitionAwareDataSource extends TableProvider {
class MyScanBuilder extends SimpleScanBuilder
with SupportsReportPartitioning{
override def planInputPartitions(): Array[InputPartition] = {
// Note that we don't have same value of column `a` across partitions.
Array(
SpecificInputPartition(Array(1, 1, 3), Array(4, 4, 6)),
SpecificInputPartition(Array(2, 4, 4), Array(6, 2, 2)))
}
override def createReaderFactory(): PartitionReaderFactory = {
SpecificReaderFactory
}
override def outputPartitioning(): Partitioning = new MyPartitioning
}
override def getTable(options: DataSourceOptions): Table = new SimpleBatchTable {
override def newScanBuilder(options: DataSourceOptions): ScanBuilder = {
new MyScanBuilder()
}
}
class MyPartitioning extends Partitioning {
override def numPartitions(): Int = 2
override def satisfy(distribution: Distribution): Boolean = distribution match {
case c: ClusteredDistribution => c.clusteredColumns.contains("i")
case _ => false
}
}
}
case class SpecificInputPartition(i: Array[Int], j: Array[Int]) extends InputPartition
object SpecificReaderFactory extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
val p = partition.asInstanceOf[SpecificInputPartition]
new PartitionReader[InternalRow] {
private var current = -1
override def next(): Boolean = {
current += 1
current < p.i.length
}
override def get(): InternalRow = InternalRow(p.i(current), p.j(current))
override def close(): Unit = {}
}
}
}
class SchemaReadAttemptException(m: String) extends RuntimeException(m)
class SimpleWriteOnlyDataSource extends SimpleWritableDataSource {
override def getTable(options: DataSourceOptions): Table = {
new MyTable(options) {
override def schema(): StructType = {
throw new SchemaReadAttemptException("schema should not be read.")
}
}
}
}
|
WindCanDie/spark
|
sql/core/src/test/scala/org/apache/spark/sql/sources/v2/DataSourceV2Suite.scala
|
Scala
|
apache-2.0
| 24,475
|
package de.hpi.asg.breezetestgen.domain.components.brzcomponents
import de.hpi.asg.breezetestgen.domain.components.{BrzComponent, BrzComponentBehaviour, HandshakeComponent}
import BrzComponent._
class DecisionWait(id: HandshakeComponent.Id,
activate: SyncSpec,
inps: Seq[SyncSpec],
outs: Seq[SyncSpec]) extends BrzComponent(id) {
require(inps.size == outs.size, s"$id DecisionWait: sizes of inps and outs must match")
type Behaviour = DecisionWaitBehaviour
type C = DecisionWaitBehaviour.ControlState
type D = Option[Int] // index of requesting input
def behaviour(state: Option[HandshakeComponent.State[C, D]]): Behaviour =
new DecisionWaitBehaviour(state getOrElse DecisionWaitBehaviour.freshState)
object DecisionWaitBehaviour {
sealed trait ControlState
case object Idle extends ControlState
case object Activated extends ControlState
case object Executing extends ControlState
val freshState: HandshakeComponent.State[C, D] = HandshakeComponent.State(Idle, None)
}
class DecisionWaitBehaviour(initState: HandshakeComponent.State[C, D]) extends BrzComponentBehaviour[C, D](initState) {
import DecisionWaitBehaviour._
info(s"$id DecisionWaitBehaviour created in state: $initState")
when(Idle) {
case Req(`activate`, None) =>
info(s"$id Requested!")
goto(Activated)
case Req(`activate`, Some(index)) =>
info(s"$id Requested and already have index")
request(outs(index))
goto(Executing)
case Req(inp, None) if inps contains inp =>
val index = inps.indexOf(inp)
info(s"$id was requested on input number $index, but aren't activated yet")
stay using Some(index)
}
when(Activated) {
case Req(inp, _) if inps contains inp =>
val index = inps.indexOf(inp)
info(s"$id was requested on input number $index")
request(outs(index))
goto(Executing)
}
when(Executing) {
case Ack(out, _) if outs contains out =>
val index = outs.indexOf(out)
info(s"$id got acknowledge on output number $index")
acknowledge(inps(index))
acknowledge(activate)
goto(Idle) using None
}
initialize()
}
}
|
0x203/BreezeTestGen
|
src/main/scala/de/hpi/asg/breezetestgen/domain/components/brzcomponents/DecisionWait.scala
|
Scala
|
mit
| 2,287
|
package org.jetbrains.plugins.scala
package project.converter
import java.io.File
import java.nio.charset.Charset
import java.nio.file.Path
import com.google.common.io.Files
import com.intellij.conversion.{ConversionContext, ModuleSettings}
import com.intellij.openapi.components.StorageScheme
import com.intellij.openapi.vfs.VfsUtil
import org.jdom.Element
import org.jdom.xpath.XPath
import org.jetbrains.plugins.scala.extensions._
import scala.annotation.nowarn
import scala.jdk.CollectionConverters._
/**
* @author Pavel Fatin
*/
private case class LibraryReference(level: Level, name: String) {
def resolveIn(context: ConversionContext): Option[LibraryData] =
level.librariesIn(context).find(_.name == name)
def addTo(module: ModuleSettings): Unit = {
rootManagerElementIn(module).addContent(createOrderEntry())
}
private def createOrderEntry(): Element = {
val entry = new Element("orderEntry")
entry.setAttribute("type", "library")
entry.setAttribute("name", name)
entry.setAttribute("level", level.title)
entry
}
def removeFrom(module: ModuleSettings): Unit = {
val element = findOrderEntryIn(module).getOrElse(throw new IllegalArgumentException(
s"Cannot remove library (${level.title}/$name}) dependency in module ${module.getModuleName}"))
element.detach()
}
private def findOrderEntryIn(module: ModuleSettings): Option[Element] = {
@nowarn("cat=deprecation")
val node = XPath.selectSingleNode(rootManagerElementIn(module),
s"orderEntry[@type='library' and @name='$name' and @level='${level.title}']")
Option(node.asInstanceOf[Element])
}
private def rootManagerElementIn(module: ModuleSettings): Element =
module.getComponentElement("NewModuleRootManager")
def libraryStorageFileIn(context: ConversionContext): Option[Path] = {
context.getStorageScheme match {
case StorageScheme.DIRECTORY_BASED => directoryBasedLibraryFileIn(context)
case StorageScheme.DEFAULT => Some(context.getProjectFile)
}
}
private def directoryBasedLibraryFileIn(context: ConversionContext): Option[Path] = {
val libraryFiles = {
val librariesDirectory = context.getSettingsBaseDir.resolve("libraries")
val files = Option(librariesDirectory.toFile.listFiles).map(_.toSeq).getOrElse(Seq.empty)
files.filter(_.getName.endsWith(".xml"))
}
libraryFiles.find { file =>
val lines = Files.readLines(file, Charset.defaultCharset())
lines.get(1).contains("name=\"%s\"".format(name))
}
.map(_.toPath)
}
def deleteIn(context: ConversionContext): Unit = {
context.getStorageScheme match {
case StorageScheme.DIRECTORY_BASED => deleteDirectoryBasedLibrary(context)
case StorageScheme.DEFAULT => deleteProjectBasedLibrary(context)
}
}
private def deleteDirectoryBasedLibrary(context: ConversionContext): Path = {
val libraryFile = directoryBasedLibraryFileIn(context).getOrElse(
throw new IllegalArgumentException(s"Cannot delete project library: $name"))
// We have to resort to this workaround because IDEA's converter "restores" the file otherwise
invokeLater {
inWriteAction {
VfsUtil.findFile(libraryFile, true).delete(this)
}
}
libraryFile
}
private def deleteProjectBasedLibrary(context: ConversionContext): Unit = {
val libraryElement = {
val rootElement = context.getProjectSettings.getRootElement
XPath.selectSingleNode(rootElement,
s"component[@name='libraryTable']/library[@name='$name']").asInstanceOf[Element]: @nowarn("cat=deprecation")
}
if (libraryElement == null) {
throw new IllegalArgumentException(s"Cannot delete project library: $name")
}
libraryElement.detach()
}
}
private object LibraryReference {
def findAllIn(module: ModuleSettings): Seq[LibraryReference] = {
val libraryEntries = module.getOrderEntries.asScala.iterator
.filter(_.getAttributeValue("type") == "library")
libraryEntries
.map(LibraryReference(_))
.toSeq
}
def apply(element: Element): LibraryReference = {
LibraryReference(Level.fromTitle(element.getAttributeValue("level")), element.getAttributeValue("name"))
}
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/project/converter/LibraryReference.scala
|
Scala
|
apache-2.0
| 4,232
|
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package epic.features
import org.scalatest._
import org.scalatest.junit._
import org.scalatest.prop._
import org.junit.runner.RunWith
import scala.io.Source
@RunWith(classOf[JUnitRunner])
class PorterStemmerTest extends FunSuite with Checkers {
test("massive vocabulary test") {
val wStream = this.getClass.getClassLoader.getResourceAsStream("lang/eng/stem/vocabulary.txt")
val words = Source.fromInputStream(wStream).getLines()
val sStream = this.getClass.getClassLoader.getResourceAsStream("lang/eng/stem/stemmed_vocabulary.txt")
val stems = Source.fromInputStream(sStream).getLines()
try {
for ((w, s) <- words zip stems) {
expectResult(s, w)(PorterStemmer(w))
}
} finally {
wStream.close()
sStream.close()
}
}
}
|
maxim-rabinovich/epic
|
src/test/scala/epic/features/PorterStemmerTest.scala
|
Scala
|
apache-2.0
| 1,362
|
package org.jetbrains.bsp.project
import com.intellij.ide.util.projectWizard.EmptyModuleBuilder
import com.intellij.openapi.module.{Module, ModuleConfigurationEditor, ModuleType}
import com.intellij.openapi.roots.ui.configuration._
import javax.swing.Icon
import org.jetbrains.bsp.project.BspSyntheticModuleType._
import org.jetbrains.sbt.Sbt
class BspSyntheticModuleType extends ModuleType[EmptyModuleBuilder](Id) {
override def createModuleBuilder(): EmptyModuleBuilder = new EmptyModuleBuilder
override def getName: String = Name
override def getDescription: String = Description
override def getNodeIcon(isOpened: Boolean): Icon = Sbt.FolderIcon
}
object BspSyntheticModuleType {
def instance: BspSyntheticModuleType = new BspSyntheticModuleType
val Id = "BSP_SYNTHETIC_MODULE"
val Name = "bsp synthetic module"
val Description = "bsp synthetic modules map the project structure to IntelliJ and do not correspond to targets"
def unapply(m: Module): Option[Module] =
if (ModuleType.get(m).isInstanceOf[BspSyntheticModuleType]) Some(m)
else None
}
class BspSyntheticModuleEditorProvider extends ModuleConfigurationEditorProvider {
def createEditors(state: ModuleConfigurationState): Array[ModuleConfigurationEditor] = {
val module = state.getRootModel.getModule
ModuleType.get(module) match {
case _: BspSyntheticModuleType => Array(
new ContentEntriesEditor(module.getName, state),
new DefaultModuleConfigurationEditorFactoryImpl().createOutputEditor(state),
new ClasspathEditor(state)
)
case _ =>
ModuleConfigurationEditor.EMPTY
}
}
}
|
jastice/intellij-scala
|
bsp/src/org/jetbrains/bsp/project/bspModuleType.scala
|
Scala
|
apache-2.0
| 1,639
|
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.internal
import minitest.SimpleTestSuite
object PlatformSuite extends SimpleTestSuite {
test("isJS") {
assert(!Platform.isJVM, "!isJVM")
assert(Platform.isJS, "isJS")
}
test("recommendedBatchSize default ") {
assertEquals(Platform.recommendedBatchSize, 512)
}
test("autoCancelableRunLoops") {
assert(Platform.autoCancelableRunLoops)
}
test("localContextPropagation") {
assert(!Platform.localContextPropagation)
}
test("fusionMaxStackDepth") {
assertEquals(Platform.fusionMaxStackDepth, 31)
}
}
|
alexandru/monifu
|
monix-execution/js/src/test/scala/monix/execution/internal/PlatformSuite.scala
|
Scala
|
apache-2.0
| 1,243
|
import java.io.Reader
package object mg {
implicit def ReaderOps(value: Reader) = new ReaderOps(value)
}
class ReaderOps(val reader: Reader) extends AnyVal {
/** Read the reader into a string, unless the size limit is exceeded,
* in which case IllegalArgumentException.
*
* Closes the reader when finished.*/
def readAll(limit: Int = 1000000): String = {
try {
val builder = new java.lang.StringBuilder()
val cbuf = new Array[Char](1000)
var numChars = 0
var totalChars = 0
while ({numChars = reader.read(cbuf); numChars} >= 0) {
builder.append(cbuf, 0, numChars)
totalChars += numChars
if (totalChars > limit)
throw new IllegalArgumentException(s"Limit of $limit chars exceeded while reading: $reader")
}
builder.toString()
} finally {reader.close()}
}
}
|
benhutchison/mg-web
|
src/main/scala/mg/package.scala
|
Scala
|
apache-2.0
| 864
|
package com.twitter.scalding.reducer_estimation
import cascading.flow.FlowException
import com.twitter.scalding._
import com.twitter.scalding.platform.{ HadoopPlatformJobTest, HadoopSharedPlatformTest }
import java.io.FileNotFoundException
import org.scalatest.{ Matchers, WordSpec }
import scala.collection.JavaConverters._
object HipJob {
val InSrcFileSize = 2496L
val inPath = getClass.getResource("/hipster.txt") // file size is 2496 bytes
val inSrc = TextLine(inPath.toString)
val InScoresFileSize = 174L
val inScores = TypedTsv[(String, Double)](getClass.getResource("/scores.tsv").toString) // file size is 174 bytes
val out = TypedTsv[Double]("output")
val counts = TypedTsv[(String, Int)]("counts.tsv")
val size = TypedTsv[Long]("size.tsv")
val correct = Map("hello" -> 1, "goodbye" -> 1, "world" -> 2)
}
class HipJob(args: Args, customConfig: Config) extends Job(args) {
import HipJob._
override def config = super.config ++ customConfig.toMap.toMap
def tokenize(text: String): TraversableOnce[String] =
text.toLowerCase
.replaceAll("[^a-zA-Z0-9\\s]", "")
.split("\\s+")
val wordCounts = TypedPipe.from(inSrc)
.flatMap(tokenize)
.map(_ -> 1)
.group
.sum
val scores = TypedPipe.from(inScores).group
wordCounts.leftJoin(scores)
.mapValues{ case (count, score) => (count, score.getOrElse(0.0)) }
// force another M/R step - should use reducer estimation
.toTypedPipe
.map{ case (word, (count, score)) => (count, score) }
.group.sum
// force another M/R step - this should force 1 reducer because it is essentially a groupAll
.toTypedPipe.values.sum
.write(out)
}
class SimpleJob(args: Args, customConfig: Config) extends Job(args) {
import HipJob._
override def config = super.config ++ customConfig.toMap.toMap
TypedPipe.from(inSrc)
.flatMap(_.split("[^\\w]+"))
.map(_.toLowerCase -> 1)
.group
// force the number of reducers to two, to test with/without estimation
.withReducers(2)
.sum
.write(counts)
}
class SimpleGlobJob(args: Args, customConfig: Config) extends Job(args) {
import HipJob._
val inSrcGlob = inPath.toString.replace("hipster", "*")
val inSrc = TextLine(inSrcGlob)
override def config = super.config ++ customConfig.toMap.toMap
TypedPipe.from(inSrc)
.flatMap(_.split("[^\\w]+"))
.map(_.toLowerCase -> 1)
.group
// force the number of reducers to two, to test with/without estimation
.withReducers(2)
.sum
.write(counts)
}
class SimpleMemoryJob(args: Args, customConfig: Config) extends Job(args) {
import HipJob._
val inSrc = IterableSource(List(
"Direct trade American Apparel squid umami tote bag. Lo-fi XOXO gluten-free meh literally, typewriter readymade wolf salvia whatever drinking vinegar organic. Four loko literally bicycle rights drinking vinegar Cosby sweater hella stumptown. Dreamcatcher iPhone 90's organic chambray cardigan, wolf fixie gluten-free Brooklyn four loko. Mumblecore ennui twee, 8-bit food truck sustainable tote bag Williamsburg mixtape biodiesel. Semiotics Helvetica put a bird on it, roof party fashion axe organic post-ironic readymade Wes Anderson Pinterest keffiyeh. Craft beer meggings sartorial, butcher Marfa kitsch art party mustache Brooklyn vinyl.",
"Wolf flannel before they sold out vinyl, selfies four loko Bushwick Banksy Odd Future. Chillwave banh mi iPhone, Truffaut shabby chic craft beer keytar DIY. Scenester selvage deep v YOLO paleo blog photo booth fap. Sustainable wolf mixtape small batch skateboard, pop-up brunch asymmetrical seitan butcher Thundercats disrupt twee Etsy. You probably haven't heard of them freegan skateboard before they sold out, mlkshk pour-over Echo Park keytar retro farm-to-table. Tattooed sustainable beard, Helvetica Wes Anderson pickled vinyl yr pop-up Vice. Wolf bespoke lomo photo booth ethnic cliche."))
override def config = super.config ++ customConfig.toMap.toMap
TypedPipe.from(inSrc)
.flatMap(_.split("[^\\w]+"))
.map(_.toLowerCase -> 1)
.group
// force the number of reducers to two, to test with/without estimation
.withReducers(2)
.sum
.write(counts)
}
class SimpleFileNotFoundJob(args: Args, customConfig: Config) extends Job(args) {
import HipJob._
val inSrc = TextLine("file.txt")
override def config = super.config ++ customConfig.toMap.toMap
TypedPipe.from(inSrc)
.flatMap(_.split("[^\\w]+"))
.map(_.toLowerCase -> 1)
.group
// force the number of reducers to two, to test with/without estimation
.withReducers(2)
.sum
.write(counts)
}
class GroupAllJob(args: Args, customConfig: Config) extends Job(args) {
import HipJob._
override def config = super.config ++ customConfig.toMap.toMap
TypedPipe.from(inSrc)
.flatMap(_.split("[^\\w]+"))
.groupAll
.size
.values
.write(size)
}
class SimpleMapOnlyJob(args: Args, customConfig: Config) extends Job(args) {
import HipJob._
override def config = super.config ++ customConfig.toMap.toMap
// simple job with no reduce phase
TypedPipe.from(inSrc)
.flatMap(_.split("[^\\w]+"))
.write(TypedTsv[String]("mapped_output"))
}
class ReducerEstimatorTest extends WordSpec with Matchers with HadoopSharedPlatformTest {
import HipJob._
"Single-step job with reducer estimator" should {
"run with correct number of reducers" in {
val customConfig = Config.empty.addReducerEstimator(classOf[InputSizeReducerEstimator]) +
(InputSizeReducerEstimator.BytesPerReducer -> (1L << 10).toString)
HadoopPlatformJobTest(new SimpleJob(_, customConfig), cluster)
.inspectCompletedFlow { flow =>
val steps = flow.getFlowSteps.asScala
steps should have size 1
val conf = Config.fromHadoop(steps.head.getConfig)
conf.getNumReducers should contain (2)
conf.get(ReducerEstimatorConfig.originalNumReducers) should be (None)
}
.run()
}
"run with correct number of reducers when we have a glob pattern in path" in {
val customConfig = Config.empty.addReducerEstimator(classOf[InputSizeReducerEstimator]) +
(InputSizeReducerEstimator.BytesPerReducer -> (1L << 10).toString) +
(Config.ReducerEstimatorOverride -> "true")
HadoopPlatformJobTest(new SimpleGlobJob(_, customConfig), cluster)
.inspectCompletedFlow { flow =>
val steps = flow.getFlowSteps.asScala
steps should have size 1
val conf = Config.fromHadoop(steps.head.getConfig)
conf.getNumReducers should contain (3)
conf.get(ReducerEstimatorConfig.originalNumReducers) should contain ("2")
}
.run()
}
"run with correct number of reducers when overriding set values" in {
val customConfig = Config.empty.addReducerEstimator(classOf[InputSizeReducerEstimator]) +
(InputSizeReducerEstimator.BytesPerReducer -> (1L << 10).toString) +
(Config.ReducerEstimatorOverride -> "true")
HadoopPlatformJobTest(new SimpleJob(_, customConfig), cluster)
.inspectCompletedFlow { flow =>
val steps = flow.getFlowSteps.asScala
steps should have size 1
val conf = Config.fromHadoop(steps.head.getConfig)
conf.getNumReducers should contain (3)
conf.get(ReducerEstimatorConfig.originalNumReducers) should contain ("2")
}
.run()
}
"respect cap when estimated reducers is above the configured max" in {
val customConfig = Config.empty.addReducerEstimator(classOf[InputSizeReducerEstimator]) +
(Config.ReducerEstimatorOverride -> "true") +
// 1 reducer per byte, should give us a large number
(InputSizeReducerEstimator.BytesPerReducer -> 1.toString) +
(ReducerEstimatorConfig.maxEstimatedReducersKey -> 10.toString)
HadoopPlatformJobTest(new SimpleJob(_, customConfig), cluster)
.inspectCompletedFlow { flow =>
val steps = flow.getFlowSteps.asScala
steps should have size 1
val conf = Config.fromHadoop(steps.head.getConfig)
conf.get(ReducerEstimatorConfig.estimatedNumReducers) should contain ("2496")
conf.get(ReducerEstimatorConfig.cappedEstimatedNumReducersKey) should contain ("10")
conf.getNumReducers should contain (10)
}
.run()
}
"ignore memory source in input size estimation" in {
val customConfig = Config.empty.addReducerEstimator(classOf[InputSizeReducerEstimator]) +
(InputSizeReducerEstimator.BytesPerReducer -> (1L << 10).toString) +
(Config.ReducerEstimatorOverride -> "true")
HadoopPlatformJobTest(new SimpleMemoryJob(_, customConfig), cluster)
.inspectCompletedFlow { flow =>
val steps = flow.getFlowSteps.asScala
steps should have size 1
val conf = Config.fromHadoop(steps.head.getConfig)
conf.getNumReducers should contain (2)
conf.get(ReducerEstimatorConfig.originalNumReducers) should contain ("2")
}
.run()
}
"throw FileNotFoundException during estimation" in {
val customConfig = Config.empty.addReducerEstimator(classOf[InputSizeReducerEstimator]) +
(InputSizeReducerEstimator.BytesPerReducer -> (1L << 10).toString) +
(Config.ReducerEstimatorOverride -> "true")
HadoopPlatformJobTest(new SimpleFileNotFoundJob(_, customConfig), cluster)
.runExpectFailure {
case error: FlowException =>
error.getCause.getClass should be(classOf[FileNotFoundException])
}
}
}
"Group-all job with reducer estimator" should {
"run with correct number of reducers (i.e. 1)" in {
val customConfig = Config.empty.addReducerEstimator(classOf[InputSizeReducerEstimator]) +
(InputSizeReducerEstimator.BytesPerReducer -> (1L << 10).toString)
HadoopPlatformJobTest(new GroupAllJob(_, customConfig), cluster)
.inspectCompletedFlow { flow =>
val steps = flow.getFlowSteps.asScala
steps should have size 1
val conf = Config.fromHadoop(steps.head.getConfig)
conf.getNumReducers should contain (1)
}
.run()
}
}
"Multi-step job with reducer estimator" should {
"run with correct number of reducers in each step" in {
val customConfig = Config.empty.addReducerEstimator(classOf[InputSizeReducerEstimator]) +
(InputSizeReducerEstimator.BytesPerReducer -> (1L << 10).toString)
HadoopPlatformJobTest(new HipJob(_, customConfig), cluster)
.sink[Double](out)(_.head shouldBe 2.86 +- 0.0001)
.inspectCompletedFlow { flow =>
val steps = flow.getFlowSteps.asScala
val reducers = steps.map(_.getConfig.getInt(Config.HadoopNumReducers, 0)).toList
reducers shouldBe List(3, 1, 1)
}
.run()
}
}
"Map-only job with reducer estimator" should {
"not set num reducers" in {
val customConfig = Config.empty.addReducerEstimator(classOf[InputSizeReducerEstimator]) +
(InputSizeReducerEstimator.BytesPerReducer -> (1L << 10).toString)
HadoopPlatformJobTest(new SimpleMapOnlyJob(_, customConfig), cluster)
.inspectCompletedFlow { flow =>
val steps = flow.getFlowSteps.asScala
steps should have size 1
val conf = Config.fromHadoop(steps.head.getConfig)
val numReducers = conf.getNumReducers
assert(!numReducers.isDefined || numReducers.get == 0, "Reducers should be 0")
}
.run()
}
}
}
|
tdyas/scalding
|
scalding-estimators-test/src/test/scala/com/twitter/scalding/reducer_estimation/ReducerEstimatorTest.scala
|
Scala
|
apache-2.0
| 11,607
|
package com.datastax.spark.connector.writer
import java.io.IOException
import com.datastax.spark.connector.mapper.DefaultColumnMapper
import scala.collection.JavaConversions._
import com.datastax.spark.connector._
import com.datastax.spark.connector.cql._
import com.datastax.spark.connector.SomeColumns
import com.datastax.spark.connector.types.{BigIntType, TextType, IntType, TypeConverter}
import com.datastax.spark.connector.embedded._
case class KeyValue(key: Int, group: Long, value: String)
case class KeyValueWithTransient(key: Int, group: Long, value: String, @transient transientField: String)
case class KeyValueWithTTL(key: Int, group: Long, value: String, ttl: Int)
case class KeyValueWithTimestamp(key: Int, group: Long, value: String, timestamp: Long)
case class KeyValueWithConversion(key: String, group: Int, value: String)
case class ClassWithWeirdProps(devil: String, cat: Int, value: String)
class SuperKeyValue(val key: Int, val value: String) extends Serializable
class SubKeyValue(k: Int, v: String, val group: Long) extends SuperKeyValue(k, v)
case class CustomerId(id: String)
object CustomerIdConverter extends TypeConverter[String] {
def targetTypeTag = scala.reflect.runtime.universe.typeTag[String]
def convertPF = { case CustomerId(id) => id }
}
class TableWriterSpec extends SparkCassandraITFlatSpecBase {
useCassandraConfig(Seq("cassandra-default.yaml.template"))
useSparkConf(defaultSparkConf)
val conn = CassandraConnector(Set(EmbeddedCassandra.getHost(0)))
val ks = "TableWriterSpec"
conn.withSessionDo { session =>
session.execute(s"""DROP KEYSPACE IF EXISTS "$ks"""")
session.execute(s"""CREATE KEYSPACE IF NOT EXISTS "$ks" WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor': 1 }""")
for (x <- 1 to 19) {
session.execute(s"""CREATE TABLE IF NOT EXISTS "$ks".key_value_$x (key INT, group BIGINT, value TEXT, PRIMARY KEY (key, group))""")
}
session.execute(s"""CREATE TABLE IF NOT EXISTS "$ks".nulls (key INT PRIMARY KEY, text_value TEXT, int_value INT)""")
session.execute(s"""CREATE TABLE IF NOT EXISTS "$ks".collections (key INT PRIMARY KEY, l list<text>, s set<text>, m map<text, text>)""")
session.execute(s"""CREATE TABLE IF NOT EXISTS "$ks".blobs (key INT PRIMARY KEY, b blob)""")
session.execute(s"""CREATE TABLE IF NOT EXISTS "$ks".counters (pkey INT, ckey INT, c1 counter, c2 counter, PRIMARY KEY (pkey, ckey))""")
session.execute(s"""CREATE TABLE IF NOT EXISTS "$ks".counters2 (pkey INT PRIMARY KEY, c counter)""")
session.execute(s"""CREATE TABLE IF NOT EXISTS "$ks".\\"camelCase\\" (\\"primaryKey\\" INT PRIMARY KEY, \\"textValue\\" text)""")
session.execute(s"""CREATE TABLE IF NOT EXISTS "$ks".single_column (pk INT PRIMARY KEY)""")
session.execute(s"""CREATE TYPE "$ks".address (street text, city text, zip int)""")
session.execute(s"""CREATE TABLE IF NOT EXISTS "$ks".udts(key INT PRIMARY KEY, name text, addr frozen<address>)""")
}
private def verifyKeyValueTable(tableName: String) {
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM "$ks".""" + tableName).all()
result should have size 3
for (row <- result) {
Some(row.getInt(0)) should contain oneOf(1, 2, 3)
Some(row.getLong(1)) should contain oneOf(1, 2, 3)
Some(row.getString(2)) should contain oneOf("value1", "value2", "value3")
}
}
}
"A TableWriter" should "write RDD of tuples to an existing table" in {
val col = Seq((1, 1L, "value1"), (2, 2L, "value2"), (3, 3L, "value3"))
sc.parallelize(col).saveToCassandra(ks, "key_value_1", SomeColumns("key", "group", "value"))
verifyKeyValueTable("key_value_1")
}
it should "write RDD of tuples to a new table" in {
val pkey = ColumnDef("key", PartitionKeyColumn, IntType)
val group = ColumnDef("group", ClusteringColumn(0), BigIntType)
val value = ColumnDef("value", RegularColumn, TextType)
val table = TableDef(ks, "new_kv_table", Seq(pkey), Seq(group), Seq(value))
val rows = Seq((1, 1L, "value1"), (2, 2L, "value2"), (3, 3L, "value3"))
sc.parallelize(rows).saveAsCassandraTableEx(table, SomeColumns("key", "group", "value"))
verifyKeyValueTable("new_kv_table")
}
it should "write RDD of tuples applying proper data type conversions" in {
val col = Seq(("1", "1", "value1"), ("2", "2", "value2"), ("3", "3", "value3"))
sc.parallelize(col).saveToCassandra(ks, "key_value_2")
verifyKeyValueTable("key_value_2")
}
it should "write RDD of case class objects" in {
val col = Seq(KeyValue(1, 1L, "value1"), KeyValue(2, 2L, "value2"), KeyValue(3, 3L, "value3"))
sc.parallelize(col).saveToCassandra(ks, "key_value_3")
verifyKeyValueTable("key_value_3")
}
it should "write RDD of case class objects to a new table using auto mapping" in {
val col = Seq(KeyValue(1, 1L, "value1"), KeyValue(2, 2L, "value2"), KeyValue(3, 3L, "value3"))
sc.parallelize(col).saveAsCassandraTable(ks, "new_kv_table_from_case_class")
verifyKeyValueTable("new_kv_table_from_case_class")
}
it should "write RDD of case class objects applying proper data type conversions" in {
val col = Seq(
KeyValueWithConversion("1", 1, "value1"),
KeyValueWithConversion("2", 2, "value2"),
KeyValueWithConversion("3", 3, "value3")
)
sc.parallelize(col).saveToCassandra(ks, "key_value_4")
verifyKeyValueTable("key_value_4")
}
it should "write RDD of CassandraRow objects" in {
val col = Seq(
CassandraRow.fromMap(Map("key" -> 1, "group" -> 1L, "value" -> "value1")),
CassandraRow.fromMap(Map("key" -> 2, "group" -> 2L, "value" -> "value2")),
CassandraRow.fromMap(Map("key" -> 3, "group" -> 3L, "value" -> "value3"))
)
sc.parallelize(col).saveToCassandra(ks, "key_value_5")
verifyKeyValueTable("key_value_5")
}
it should "write RDD of CassandraRow objects applying proper data type conversions" in {
val col = Seq(
CassandraRow.fromMap(Map("key" -> "1", "group" -> BigInt(1), "value" -> "value1")),
CassandraRow.fromMap(Map("key" -> "2", "group" -> BigInt(2), "value" -> "value2")),
CassandraRow.fromMap(Map("key" -> "3", "group" -> BigInt(3), "value" -> "value3"))
)
sc.parallelize(col).saveToCassandra(ks, "key_value_6")
verifyKeyValueTable("key_value_6")
}
it should "write RDD of tuples to a table with camel case column names" in {
val col = Seq((1, "value1"), (2, "value2"), (3, "value3"))
sc.parallelize(col).saveToCassandra(ks, "camelCase", SomeColumns("primaryKey", "textValue"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM "$ks"."camelCase"""").all()
result should have size 3
for (row <- result) {
Some(row.getInt(0)) should contain oneOf(1, 2, 3)
Some(row.getString(1)) should contain oneOf("value1", "value2", "value3")
}
}
}
it should "write empty values" in {
val col = Seq((1, 1L, None))
sc.parallelize(col).saveToCassandra(ks, "key_value_7", SomeColumns("key", "group", "value"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM "$ks".key_value_7""").all()
result should have size 1
for (row <- result) {
row.getString(2) should be (null)
}
}
}
it should "write null values" in {
val key = 1.asInstanceOf[AnyRef]
val row = new CassandraRow(IndexedSeq("key", "text_value", "int_value"), IndexedSeq(key, null, null))
sc.parallelize(Seq(row)).saveToCassandra(ks, "nulls")
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM "$ks".nulls""").all()
result should have size 1
for (r <- result) {
r.getInt(0) shouldBe key
r.isNull(1) shouldBe true
r.isNull(2) shouldBe true
}
}
}
it should "write only specific column data if ColumnNames is passed as 'columnNames'" in {
val col = Seq((1, 1L, None))
sc.parallelize(col).saveToCassandra(ks, "key_value_8", SomeColumns("key", "group"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM "$ks".key_value_8""").all()
result should have size 1
for (row <- result) {
row.getInt(0) should be (1)
row.getString(2) should be (null)
}
}
}
it should "distinguish (deprecated) implicit `seqToSomeColumns`" in {
val col = Seq((2, 1L, None))
sc.parallelize(col).saveToCassandra(ks, "key_value_9", SomeColumns("key", "group"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM "$ks".key_value_9""").all()
result should have size 1
for (row <- result) {
row.getInt(0) should be (2)
row.getString(2) should be (null)
}
}
}
it should "write collections" in {
val col = Seq(
(1, Vector("item1", "item2"), Set("item1", "item2"), Map("key1" -> "value1", "key2" -> "value2")),
(2, Vector.empty[String], Set.empty[String], Map.empty[String, String]))
sc.parallelize(col).saveToCassandra(ks, "collections", SomeColumns("key", "l", "s", "m"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM "$ks".collections""").all()
result should have size 2
val rows = result.groupBy(_.getInt(0)).mapValues(_.head)
val row0 = rows(1)
val row1 = rows(2)
row0.getList("l", classOf[String]).toSeq shouldEqual Seq("item1", "item2")
row0.getSet("s", classOf[String]).toSeq shouldEqual Seq("item1", "item2")
row0.getMap("m", classOf[String], classOf[String]).toMap shouldEqual Map("key1" -> "value1", "key2" -> "value2")
row1.isNull("l") shouldEqual true
row1.isNull("m") shouldEqual true
row1.isNull("s") shouldEqual true
}
}
it should "write blobs" in {
val col = Seq((1, Some(Array[Byte](0, 1, 2, 3))), (2, None))
sc.parallelize(col).saveToCassandra(ks, "blobs", SomeColumns("key", "b"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM "$ks".blobs""").all()
result should have size 2
val rows = result.groupBy(_.getInt(0)).mapValues(_.head)
val row0 = rows(1)
val row1 = rows(2)
row0.getBytes("b").remaining shouldEqual 4
row1.isNull("b") shouldEqual true
}
}
it should "increment and decrement counters" in {
val col1 = Seq((0, 0, 1, 1))
sc.parallelize(col1).saveToCassandra(ks, "counters", SomeColumns("pkey", "ckey", "c1", "c2"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM "$ks".counters""").one()
result.getLong("c1") shouldEqual 1L
result.getLong("c2") shouldEqual 1L
}
val col2 = Seq((0, 0, 1))
sc.parallelize(col1).saveToCassandra(ks, "counters", SomeColumns("pkey", "ckey", "c2"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM "$ks".counters""").one()
result.getLong("c1") shouldEqual 1L
result.getLong("c2") shouldEqual 2L
}
}
it should "increment and decrement counters in batches" in {
val rowCount = 10000
val col = for (i <- 1 to rowCount) yield (i, 1)
sc.parallelize(col).saveToCassandra(ks, "counters2", SomeColumns("pkey", "c"))
sc.cassandraTable(ks, "counters2").count should be(rowCount)
}
it should "write values of user-defined classes" in {
TypeConverter.registerConverter(CustomerIdConverter)
val col = Seq((1, 1L, CustomerId("foo")))
sc.parallelize(col).saveToCassandra(ks, "key_value_10", SomeColumns("key", "group", "value"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM "$ks".key_value_10""").all()
result should have size 1
for (row <- result)
row.getString(2) shouldEqual "foo"
}
}
it should "write values of user-defined-types in Cassandra" in {
val address = UDTValue.fromMap(Map("city" -> "Warsaw", "zip" -> 10000, "street" -> "Marszałkowska"))
val col = Seq((1, "Joe", address))
sc.parallelize(col).saveToCassandra(ks, "udts", SomeColumns("key", "name", "addr"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT key, name, addr FROM "$ks".udts""").all()
result should have size 1
for (row <- result) {
row.getInt(0) shouldEqual 1
row.getString(1) shouldEqual "Joe"
row.getUDTValue(2).getString("city") shouldEqual "Warsaw"
row.getUDTValue(2).getInt("zip") shouldEqual 10000
}
}
}
it should "write to single-column tables" in {
val col = Seq(1, 2, 3, 4, 5).map(Tuple1.apply)
sc.parallelize(col).saveToCassandra(ks, "single_column", SomeColumns("pk"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM "$ks".single_column""").all()
result should have size 5
result.map(_.getInt(0)).toSet should be (Set(1, 2, 3, 4, 5))
}
}
it should "throw IOException if table is not found" in {
val col = Seq(("1", "1", "value1"), ("2", "2", "value2"), ("3", "3", "value3"))
intercept[IOException] {
sc.parallelize(col).saveToCassandra(ks, "unknown_table")
}
}
it should "write RDD of case class objects with default TTL" in {
val col = Seq(KeyValue(1, 1L, "value1"), KeyValue(2, 2L, "value2"), KeyValue(3, 3L, "value3"))
sc.parallelize(col).saveToCassandra(ks, "key_value_11", writeConf = WriteConf(ttl = TTLOption.constant(100)))
verifyKeyValueTable("key_value_11")
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT TTL(value) FROM "$ks".key_value_11""").all()
result should have size 3
result.foreach(_.getInt(0) should be > 50)
result.foreach(_.getInt(0) should be <= 100)
}
}
it should "write RDD of case class objects with default timestamp" in {
val col = Seq(KeyValue(1, 1L, "value1"), KeyValue(2, 2L, "value2"), KeyValue(3, 3L, "value3"))
val ts = System.currentTimeMillis() - 1000L
sc.parallelize(col).saveToCassandra(ks, "key_value_12", writeConf = WriteConf(timestamp = TimestampOption.constant(ts * 1000L)))
verifyKeyValueTable("key_value_12")
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT WRITETIME(value) FROM "$ks".key_value_12""").all()
result should have size 3
result.foreach(_.getLong(0) should be (ts * 1000L))
}
}
it should "write RDD of case class objects with per-row TTL" in {
val col = Seq(KeyValueWithTTL(1, 1L, "value1", 100), KeyValueWithTTL(2, 2L, "value2", 200), KeyValueWithTTL(3, 3L, "value3", 300))
sc.parallelize(col).saveToCassandra(ks, "key_value_13", writeConf = WriteConf(ttl = TTLOption.perRow("ttl")))
verifyKeyValueTable("key_value_13")
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT key, TTL(value) FROM "$ks".key_value_13""").all()
result should have size 3
result.foreach(row => {
row.getInt(1) should be > (100 * row.getInt(0) - 50)
row.getInt(1) should be <= (100 * row.getInt(0))
})
}
}
it should "write RDD of case class objects with per-row timestamp" in {
val ts = System.currentTimeMillis() - 1000L
val col = Seq(KeyValueWithTimestamp(1, 1L, "value1", ts * 1000L + 100L), KeyValueWithTimestamp(2, 2L, "value2", ts * 1000L + 200L), KeyValueWithTimestamp(3, 3L, "value3", ts * 1000L + 300L))
sc.parallelize(col).saveToCassandra(ks, "key_value_14", writeConf = WriteConf(timestamp = TimestampOption.perRow("timestamp")))
verifyKeyValueTable("key_value_14")
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT key, WRITETIME(value) FROM "$ks".key_value_14""").all()
result should have size 3
result.foreach(row => {
row.getLong(1) should be (ts * 1000L + row.getInt(0) * 100L)
})
}
}
it should "write RDD of case class objects with per-row TTL with custom mapping" in {
val col = Seq(KeyValueWithTTL(1, 1L, "value1", 100), KeyValueWithTTL(2, 2L, "value2", 200), KeyValueWithTTL(3, 3L, "value3", 300))
sc.parallelize(col).saveToCassandra(ks, "key_value_15", writeConf = WriteConf(ttl = TTLOption.perRow("ttl_placeholder")))(
conn, DefaultRowWriter.factory(new DefaultColumnMapper(Map("ttl" -> "ttl_placeholder"))))
verifyKeyValueTable("key_value_15")
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT key, TTL(value) FROM "$ks".key_value_15""").all()
result should have size 3
result.foreach(row => {
row.getInt(1) should be > (100 * row.getInt(0) - 50)
row.getInt(1) should be <= (100 * row.getInt(0))
})
}
}
it should "write RDD of case class objects with per-row timestamp with custom mapping" in {
val ts = System.currentTimeMillis() - 1000L
val col = Seq(KeyValueWithTimestamp(1, 1L, "value1", ts * 1000L + 100L), KeyValueWithTimestamp(2, 2L, "value2", ts * 1000L + 200L), KeyValueWithTimestamp(3, 3L, "value3", ts * 1000L + 300L))
sc.parallelize(col).saveToCassandra(ks, "key_value_16",
writeConf = WriteConf(timestamp = TimestampOption.perRow("timestamp_placeholder")))(
conn, DefaultRowWriter.factory(new DefaultColumnMapper(Map("timestamp" -> "timestamp_placeholder"))))
verifyKeyValueTable("key_value_16")
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT key, WRITETIME(value) FROM "$ks".key_value_16""").all()
result should have size 3
result.foreach(row => {
row.getLong(1) should be (ts * 1000L + row.getInt(0) * 100L)
})
}
}
it should "write RDD of case class objects applying proper data type conversions and aliases" in {
val col = Seq(
ClassWithWeirdProps("1", 1, "value1"),
ClassWithWeirdProps("2", 2, "value2"),
ClassWithWeirdProps("3", 3, "value3")
)
sc.parallelize(col).saveToCassandra(ks, "key_value_17", columns = SomeColumns(
"key" as "devil", "group" as "cat", "value"
))
verifyKeyValueTable("key_value_17")
}
it should "write RDD of objects with inherited fields" in {
val col = Seq(
new SubKeyValue(1, "value1", 1L),
new SubKeyValue(2, "value2", 2L),
new SubKeyValue(3, "value3", 3L)
)
sc.parallelize(col).saveToCassandra(ks, "key_value_18")
verifyKeyValueTable("key_value_18")
}
it should "write RDD of case class objects with transient fields" in {
val col = Seq(KeyValueWithTransient(1, 1L, "value1", "a"), KeyValueWithTransient(2, 2L, "value2", "b"), KeyValueWithTransient(3, 3L, "value3", "c"))
sc.parallelize(col).saveToCassandra(ks, "key_value_19")
verifyKeyValueTable("key_value_19")
}
}
|
willgalen/REVEL
|
spark-cassandra-connector/src/it/scala/com/datastax/spark/connector/writer/TableWriterSpec.scala
|
Scala
|
apache-2.0
| 18,780
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.iota.fey
import java.io.File
import akka.actor.{ActorRef, PoisonPill, Props}
import akka.testkit.{EventFilter, TestActorRef, TestProbe}
import ch.qos.logback.classic.Level
import play.api.libs.json._
import java.nio.file.{Files, Paths}
import scala.collection.mutable
import scala.io.Source
import scala.concurrent.duration._
class UtilsSpec extends BaseAkkaSpec{
"Global variable loadedJars" should{
"be empty when starting" in {
Utils.loadedJars.remove("fey-test-actor.jar")
Utils.loadedJars shouldBe empty
}
}
"Executing getFilesInDirectory" should {
"return a list of all Files in the directory" in {
val files = Utils.getFilesInDirectory(CONFIG.JAR_REPOSITORY)
files should not be empty
files should have size(2)
val filepath = files.map(_.getAbsolutePath)
filepath should contain(s"${CONFIG.JAR_REPOSITORY}/fey-test-actor.jar")
filepath should contain(s"${CONFIG.JAR_REPOSITORY}/dynamic")
}
}
"Executing loadActorClassFromJar with not yet loaded jar" should {
"result in new entry to global variable loadedJars" in {
Utils.loadActorClassFromJar(s"${CONFIG.JAR_REPOSITORY}/fey-test-actor.jar", "org.apache.iota.fey.TestActor","fey-test-actor.jar")
Utils.loadedJars should have size(1)
Utils.loadedJars should contain key("fey-test-actor.jar")
Utils.loadedJars.get("fey-test-actor.jar").get._2 should have size(1)
Utils.loadedJars.get("fey-test-actor.jar").get._2 should contain key("org.apache.iota.fey.TestActor")
}
}
"Executing loadActorClassFromJar with loaded jar but a different class" should {
"not add new entry to loadedJars" in {
val loader = Utils.loadedJars.get("fey-test-actor.jar").get._1
Utils.loadActorClassFromJar(s"${CONFIG.JAR_REPOSITORY}/fey-test-actor.jar", "org.apache.iota.fey.TestActor_2","fey-test-actor.jar")
Utils.loadedJars should have size(1)
Utils.loadedJars should contain key("fey-test-actor.jar")
Utils.loadedJars.get("fey-test-actor.jar").get._1 should equal(loader)
}
"add a new classpath to the loadedJars value map" in{
Utils.loadedJars.get("fey-test-actor.jar").get._2 should have size(2)
Utils.loadedJars.get("fey-test-actor.jar").get._2 should contain key("org.apache.iota.fey.TestActor")
Utils.loadedJars.get("fey-test-actor.jar").get._2 should contain key("org.apache.iota.fey.TestActor_2")
}
}
"Executing loadActorClassFromJar with loaded jar and class" should {
"not reload the jar" in {
val loader = Utils.loadedJars.get("fey-test-actor.jar").get._1
Utils.loadActorClassFromJar(s"${CONFIG.JAR_REPOSITORY}/fey-test-actor.jar", "org.apache.iota.fey.TestActor","fey-test-actor.jar")
Utils.loadedJars.get("fey-test-actor.jar").get._1 should equal(loader)
}
}
var actorRef: ActorRef = _
"Initializing an actor from a clazz returned by loadActorClassFromJar" should {
"result in creation of a GenericActor" in {
val clazz = Utils.loadActorClassFromJar(s"${CONFIG.JAR_REPOSITORY}/fey-test-actor.jar", "org.apache.iota.fey.TestActor_2","fey-test-actor.jar")
val props = Props(clazz, Map("TEST" -> "TESTED"), 0.seconds, Map.empty, 0.seconds, "MY-ORCH", "ORCH", false)
val parent = TestProbe("UTILS-PARENT")
actorRef = TestActorRef[FeyGenericActor](props, parent.ref, "TESTING-UTILS")
}
"running GenericActor actor" in{
val respTB = TestProbe()
TestProbe().expectActor(actorRef.path.toString)
actorRef ! (respTB.ref)
actorRef ! "TEST_ACTOR"
respTB.expectMsg(Some("TESTED"))
}
"respond normally to stop message" in {
actorRef ! PoisonPill
TestProbe().verifyActorTermination(actorRef)
TestProbe().notExpectActor(actorRef.path.toString)
}
}
"Executing loadJsonFromFile with a valid JSON" should {
"return JsValue" in {
val json = Utils.loadJsonFromFile(new File(s"${CONFIG.JSON_REPOSITORY}/valid-json.json.not"))
json shouldBe defined
}
}
"Executing loadJsonFromFile with a invalid JSON" should {
"return None" in {
val json = Utils.loadJsonFromFile(new File(s"${CONFIG.JSON_REPOSITORY}/invalid-json.json.not"))
json should not be defined
}
"Log message at Error Level" in {
"Could not parse JSON" should beLoggedAt(Level.ERROR)
}
}
"Executing renameProcessedFile when CHECKPOINT is disabled" should {
"not concatenated extension to the file" in {
Utils.renameProcessedFile(new File(s"${CONFIG.JSON_REPOSITORY}/invalid-json.json.not"), "processed")
Utils.getFilesInDirectory(CONFIG.JSON_REPOSITORY).map(_.getAbsolutePath) should contain(s"${CONFIG.JSON_REPOSITORY}/invalid-json.json.not")
Utils.getFilesInDirectory(CONFIG.JSON_REPOSITORY).map(_.getAbsolutePath) should not contain(s"${CONFIG.JSON_REPOSITORY}/invalid-json.json.not.processed")
}
}
"Executing renameProcessedFile when CHECKPOINT is enabled" should {
"concatenated extension to the file" in {
CONFIG.CHEKPOINT_ENABLED = true
Utils.renameProcessedFile(new File(s"${CONFIG.JSON_REPOSITORY}/invalid-json.json.not"), "processed")
Utils.getFilesInDirectory(CONFIG.JSON_REPOSITORY).map(_.getAbsolutePath) should not contain(s"${CONFIG.JSON_REPOSITORY}/invalid-json.json.not")
Utils.getFilesInDirectory(CONFIG.JSON_REPOSITORY).map(_.getAbsolutePath) should contain(s"${CONFIG.JSON_REPOSITORY}/invalid-json.json.not.processed")
new File(s"${CONFIG.JSON_REPOSITORY}/invalid-json.json.not.processed").renameTo(new File(s"${CONFIG.JSON_REPOSITORY}/invalid-json.json.not"))
CONFIG.CHEKPOINT_ENABLED = false
}
}
val jsonObj = getJSValueFromString(Utils_JSONTest.orchestration_update2_test_json).as[JsObject]
"Executing updateOrchestrationState" should {
"result in log message at Debug when Checkpoint is disables" in {
Utils.updateOrchestrationState("TEST-15")
"Checkpoint not enabled" should beLoggedAt(Level.DEBUG)
}
"result in log message at warn when Orchestration to be updated is not cached" in {
CONFIG.CHEKPOINT_ENABLED = true
Utils.updateOrchestrationState("MY-TEST-UPDATE")
"Could not save state for Orchestration MY-TEST-UPDATE. It is not active on Fey." should beLoggedAt(Level.WARN)
CONFIG.CHEKPOINT_ENABLED = false
}
"result in creating a new file at Checkpoint dir" in {
CONFIG.CHEKPOINT_ENABLED = true
FEY_CACHE.activeOrchestrations.put("TEST_ORCHESTRATION_FOR_UTILS", ("", null))
ORCHESTRATION_CACHE.orchestration_metadata.put("TEST_ORCHESTRATION_FOR_UTILS",
Map("ENSEMBLE-UTILS" -> jsonObj))
Utils.updateOrchestrationState("TEST_ORCHESTRATION_FOR_UTILS")
Files.exists(Paths.get(s"${CONFIG.CHECKPOINT_DIR}/TEST_ORCHESTRATION_FOR_UTILS.json")) should be(true)
CONFIG.CHEKPOINT_ENABLED = false
}
"result in correct file created" in {
val file = Source.fromFile(s"${CONFIG.CHECKPOINT_DIR}/TEST_ORCHESTRATION_FOR_UTILS.json").getLines.mkString("")
val jsonFile = getJSValueFromString(file)
val ensembles = (jsonFile \\ JSON_PATH.ENSEMBLES).as[List[JsObject]]
ensembles should have size(1)
Json.stringify(ensembles(0).as[JsValue]) should equal(Json.stringify(jsonObj))
new File(s"${CONFIG.CHECKPOINT_DIR}/TEST_ORCHESTRATION_FOR_UTILS.json").delete()
FEY_CACHE.activeOrchestrations.remove("TEST_ORCHESTRATION_FOR_UTILS")
}
}
}
|
barbaragomes/incubator-iota
|
fey-core/src/test/scala/org/apache/iota/fey/UtilsSpec.scala
|
Scala
|
apache-2.0
| 8,280
|
package commands
import model.UserId
sealed trait Command {
val caller: UserId
}
sealed trait CommandDescription {
def name: String
def description: String
}
sealed trait NoArgCommand extends CommandDescription {
def apply(caller: UserId): Command
}
sealed trait OneArgCommand extends CommandDescription {
def apply(caller: UserId, arg: String): Command
def argName: String
}
case class Create(caller: UserId, place: String) extends Command
case class Close(caller: UserId) extends Command
case class Open(caller: UserId) extends Command
case class Finish(caller: UserId) extends Command
case class Summary(caller: UserId) extends Command
case class Poke(caller: UserId) extends Command
case class Kick(caller: UserId, kicked: UserId) extends Command
case class Join(caller: UserId) extends Command
case class Leave(caller: UserId) extends Command
case class Choose(caller: UserId, food: String) extends Command
case class Pay(caller: UserId) extends Command
case class Help(caller: UserId) extends Command
case class Stats(caller: UserId) extends Command
case class Unhandled(caller: UserId) extends Command
object Create extends OneArgCommand {
override def name: String = "create"
override def description: String = s"creates a new lunch at `<$argName>`"
override def argName: String = "name or URL of the place"
}
object Close extends NoArgCommand {
override def name: String = "close"
override def description: String = "closes current lunch for order changes"
}
object Open extends NoArgCommand {
override def name: String = "open"
override def description: String = "opens current lunch for further order changes"
}
object Finish extends NoArgCommand {
override def name: String = "finish"
override def description: String = "finishes current lunch"
}
object Summary extends NoArgCommand {
override def name: String = "summary"
override def description: String = "returns lunch summary"
}
object Poke extends NoArgCommand {
override def name: String = "poke"
override def description: String = "pokes all eaters that are lazy with their order"
case class Pay(caller: UserId) extends Command
}
object Kick extends OneArgCommand {
override def name: String = "kick"
override def description: String = s"kicks `<$argName>` from the current lunch"
override def argName: String = "eater to kick"
}
object Join extends NoArgCommand {
override def name: String = "join"
override def description: String = "joins the current lunch"
}
object Leave extends NoArgCommand {
override def name: String = "leave"
override def description: String = "leaves the current lunch"
}
object Choose extends OneArgCommand {
override def name: String = "choose"
override def description: String = s"chooses food with `<$argName>`"
override def argName: String = "food name"
}
object Pay extends NoArgCommand {
override def name: String = "pay"
override def description: String = "notifies about payment being made"
}
object Help extends NoArgCommand {
override def name: String = "help"
override def description: String = "prints this usage text"
}
object Stats extends NoArgCommand {
override def name: String = "stats"
override def description: String = "prints historic orders statistics"
}
object Commands {
val allCommands = Seq(
Create,
Close,
Open,
Finish,
Summary,
Poke,
Kick,
Join,
Leave,
Choose,
Pay,
Help,
Stats
)
}
|
mturlo/lunchbot
|
src/main/scala/commands/Commands.scala
|
Scala
|
mit
| 3,500
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.statsEstimation
import java.sql.Date
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical.{ColumnStat, Filter, Statistics}
import org.apache.spark.sql.catalyst.plans.logical.statsEstimation.EstimationUtils._
import org.apache.spark.sql.types._
/**
* In this test suite, we test predicates containing the following operators:
* =, <, <=, >, >=, AND, OR, IS NULL, IS NOT NULL, IN, NOT IN
*/
class FilterEstimationSuite extends StatsEstimationTestBase {
// Suppose our test table has 10 rows and 6 columns.
// First column cint has values: 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
// Hence, distinctCount:10, min:1, max:10, nullCount:0, avgLen:4, maxLen:4
val attrInt = AttributeReference("cint", IntegerType)()
val colStatInt = ColumnStat(distinctCount = 10, min = Some(1), max = Some(10),
nullCount = 0, avgLen = 4, maxLen = 4)
// only 2 values
val attrBool = AttributeReference("cbool", BooleanType)()
val colStatBool = ColumnStat(distinctCount = 2, min = Some(false), max = Some(true),
nullCount = 0, avgLen = 1, maxLen = 1)
// Second column cdate has 10 values from 2017-01-01 through 2017-01-10.
val dMin = Date.valueOf("2017-01-01")
val dMax = Date.valueOf("2017-01-10")
val attrDate = AttributeReference("cdate", DateType)()
val colStatDate = ColumnStat(distinctCount = 10, min = Some(dMin), max = Some(dMax),
nullCount = 0, avgLen = 4, maxLen = 4)
// Fourth column cdecimal has 4 values from 0.20 through 0.80 at increment of 0.20.
val decMin = new java.math.BigDecimal("0.200000000000000000")
val decMax = new java.math.BigDecimal("0.800000000000000000")
val attrDecimal = AttributeReference("cdecimal", DecimalType(18, 18))()
val colStatDecimal = ColumnStat(distinctCount = 4, min = Some(decMin), max = Some(decMax),
nullCount = 0, avgLen = 8, maxLen = 8)
// Fifth column cdouble has 10 double values: 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0
val attrDouble = AttributeReference("cdouble", DoubleType)()
val colStatDouble = ColumnStat(distinctCount = 10, min = Some(1.0), max = Some(10.0),
nullCount = 0, avgLen = 8, maxLen = 8)
// Sixth column cstring has 10 String values:
// "A0", "A1", "A2", "A3", "A4", "A5", "A6", "A7", "A8", "A9"
val attrString = AttributeReference("cstring", StringType)()
val colStatString = ColumnStat(distinctCount = 10, min = None, max = None,
nullCount = 0, avgLen = 2, maxLen = 2)
val attributeMap = AttributeMap(Seq(
attrInt -> colStatInt,
attrBool -> colStatBool,
attrDate -> colStatDate,
attrDecimal -> colStatDecimal,
attrDouble -> colStatDouble,
attrString -> colStatString))
test("cint = 2") {
validateEstimatedStats(
Filter(EqualTo(attrInt, Literal(2)), childStatsTestPlan(Seq(attrInt), 10L)),
Seq(attrInt -> ColumnStat(distinctCount = 1, min = Some(2), max = Some(2),
nullCount = 0, avgLen = 4, maxLen = 4)),
expectedRowCount = 1)
}
test("cint <=> 2") {
validateEstimatedStats(
Filter(EqualNullSafe(attrInt, Literal(2)), childStatsTestPlan(Seq(attrInt), 10L)),
Seq(attrInt -> ColumnStat(distinctCount = 1, min = Some(2), max = Some(2),
nullCount = 0, avgLen = 4, maxLen = 4)),
expectedRowCount = 1)
}
test("cint = 0") {
// This is an out-of-range case since 0 is outside the range [min, max]
validateEstimatedStats(
Filter(EqualTo(attrInt, Literal(0)), childStatsTestPlan(Seq(attrInt), 10L)),
Nil,
expectedRowCount = 0)
}
test("cint < 3") {
validateEstimatedStats(
Filter(LessThan(attrInt, Literal(3)), childStatsTestPlan(Seq(attrInt), 10L)),
Seq(attrInt -> ColumnStat(distinctCount = 2, min = Some(1), max = Some(3),
nullCount = 0, avgLen = 4, maxLen = 4)),
expectedRowCount = 3)
}
test("cint < 0") {
// This is a corner case since literal 0 is smaller than min.
validateEstimatedStats(
Filter(LessThan(attrInt, Literal(0)), childStatsTestPlan(Seq(attrInt), 10L)),
Nil,
expectedRowCount = 0)
}
test("cint <= 3") {
validateEstimatedStats(
Filter(LessThanOrEqual(attrInt, Literal(3)), childStatsTestPlan(Seq(attrInt), 10L)),
Seq(attrInt -> ColumnStat(distinctCount = 2, min = Some(1), max = Some(3),
nullCount = 0, avgLen = 4, maxLen = 4)),
expectedRowCount = 3)
}
test("cint > 6") {
validateEstimatedStats(
Filter(GreaterThan(attrInt, Literal(6)), childStatsTestPlan(Seq(attrInt), 10L)),
Seq(attrInt -> ColumnStat(distinctCount = 4, min = Some(6), max = Some(10),
nullCount = 0, avgLen = 4, maxLen = 4)),
expectedRowCount = 5)
}
test("cint > 10") {
// This is a corner case since max value is 10.
validateEstimatedStats(
Filter(GreaterThan(attrInt, Literal(10)), childStatsTestPlan(Seq(attrInt), 10L)),
Nil,
expectedRowCount = 0)
}
test("cint >= 6") {
validateEstimatedStats(
Filter(GreaterThanOrEqual(attrInt, Literal(6)), childStatsTestPlan(Seq(attrInt), 10L)),
Seq(attrInt -> ColumnStat(distinctCount = 4, min = Some(6), max = Some(10),
nullCount = 0, avgLen = 4, maxLen = 4)),
expectedRowCount = 5)
}
test("cint IS NULL") {
validateEstimatedStats(
Filter(IsNull(attrInt), childStatsTestPlan(Seq(attrInt), 10L)),
Nil,
expectedRowCount = 0)
}
test("cint IS NOT NULL") {
validateEstimatedStats(
Filter(IsNotNull(attrInt), childStatsTestPlan(Seq(attrInt), 10L)),
Seq(attrInt -> ColumnStat(distinctCount = 10, min = Some(1), max = Some(10),
nullCount = 0, avgLen = 4, maxLen = 4)),
expectedRowCount = 10)
}
test("cint > 3 AND cint <= 6") {
val condition = And(GreaterThan(attrInt, Literal(3)), LessThanOrEqual(attrInt, Literal(6)))
validateEstimatedStats(
Filter(condition, childStatsTestPlan(Seq(attrInt), 10L)),
Seq(attrInt -> ColumnStat(distinctCount = 3, min = Some(3), max = Some(6),
nullCount = 0, avgLen = 4, maxLen = 4)),
expectedRowCount = 4)
}
test("cint = 3 OR cint = 6") {
val condition = Or(EqualTo(attrInt, Literal(3)), EqualTo(attrInt, Literal(6)))
validateEstimatedStats(
Filter(condition, childStatsTestPlan(Seq(attrInt), 10L)),
Seq(attrInt -> ColumnStat(distinctCount = 10, min = Some(1), max = Some(10),
nullCount = 0, avgLen = 4, maxLen = 4)),
expectedRowCount = 2)
}
test("Not(cint > 3 AND cint <= 6)") {
val condition = Not(And(GreaterThan(attrInt, Literal(3)), LessThanOrEqual(attrInt, Literal(6))))
validateEstimatedStats(
Filter(condition, childStatsTestPlan(Seq(attrInt), 10L)),
Seq(attrInt -> colStatInt),
expectedRowCount = 6)
}
test("Not(cint <= 3 OR cint > 6)") {
val condition = Not(Or(LessThanOrEqual(attrInt, Literal(3)), GreaterThan(attrInt, Literal(6))))
validateEstimatedStats(
Filter(condition, childStatsTestPlan(Seq(attrInt), 10L)),
Seq(attrInt -> colStatInt),
expectedRowCount = 5)
}
test("Not(cint = 3 AND cstring < 'A8')") {
val condition = Not(And(EqualTo(attrInt, Literal(3)), LessThan(attrString, Literal("A8"))))
validateEstimatedStats(
Filter(condition, childStatsTestPlan(Seq(attrInt, attrString), 10L)),
Seq(attrInt -> colStatInt, attrString -> colStatString),
expectedRowCount = 10)
}
test("Not(cint = 3 OR cstring < 'A8')") {
val condition = Not(Or(EqualTo(attrInt, Literal(3)), LessThan(attrString, Literal("A8"))))
validateEstimatedStats(
Filter(condition, childStatsTestPlan(Seq(attrInt, attrString), 10L)),
Seq(attrInt -> colStatInt, attrString -> colStatString),
expectedRowCount = 9)
}
test("cint IN (3, 4, 5)") {
validateEstimatedStats(
Filter(InSet(attrInt, Set(3, 4, 5)), childStatsTestPlan(Seq(attrInt), 10L)),
Seq(attrInt -> ColumnStat(distinctCount = 3, min = Some(3), max = Some(5),
nullCount = 0, avgLen = 4, maxLen = 4)),
expectedRowCount = 3)
}
test("cint NOT IN (3, 4, 5)") {
validateEstimatedStats(
Filter(Not(InSet(attrInt, Set(3, 4, 5))), childStatsTestPlan(Seq(attrInt), 10L)),
Seq(attrInt -> ColumnStat(distinctCount = 10, min = Some(1), max = Some(10),
nullCount = 0, avgLen = 4, maxLen = 4)),
expectedRowCount = 7)
}
test("cbool = true") {
validateEstimatedStats(
Filter(EqualTo(attrBool, Literal(true)), childStatsTestPlan(Seq(attrBool), 10L)),
Seq(attrBool -> ColumnStat(distinctCount = 1, min = Some(true), max = Some(true),
nullCount = 0, avgLen = 1, maxLen = 1)),
expectedRowCount = 5)
}
test("cbool > false") {
validateEstimatedStats(
Filter(GreaterThan(attrBool, Literal(false)), childStatsTestPlan(Seq(attrBool), 10L)),
Seq(attrBool -> ColumnStat(distinctCount = 1, min = Some(true), max = Some(true),
nullCount = 0, avgLen = 1, maxLen = 1)),
expectedRowCount = 5)
}
test("cdate = cast('2017-01-02' AS DATE)") {
val d20170102 = Date.valueOf("2017-01-02")
validateEstimatedStats(
Filter(EqualTo(attrDate, Literal(d20170102)),
childStatsTestPlan(Seq(attrDate), 10L)),
Seq(attrDate -> ColumnStat(distinctCount = 1, min = Some(d20170102), max = Some(d20170102),
nullCount = 0, avgLen = 4, maxLen = 4)),
expectedRowCount = 1)
}
test("cdate < cast('2017-01-03' AS DATE)") {
val d20170103 = Date.valueOf("2017-01-03")
validateEstimatedStats(
Filter(LessThan(attrDate, Literal(d20170103)),
childStatsTestPlan(Seq(attrDate), 10L)),
Seq(attrDate -> ColumnStat(distinctCount = 2, min = Some(dMin), max = Some(d20170103),
nullCount = 0, avgLen = 4, maxLen = 4)),
expectedRowCount = 3)
}
test("""cdate IN ( cast('2017-01-03' AS DATE),
cast('2017-01-04' AS DATE), cast('2017-01-05' AS DATE) )""") {
val d20170103 = Date.valueOf("2017-01-03")
val d20170104 = Date.valueOf("2017-01-04")
val d20170105 = Date.valueOf("2017-01-05")
validateEstimatedStats(
Filter(In(attrDate, Seq(Literal(d20170103), Literal(d20170104), Literal(d20170105))),
childStatsTestPlan(Seq(attrDate), 10L)),
Seq(attrDate -> ColumnStat(distinctCount = 3, min = Some(d20170103), max = Some(d20170105),
nullCount = 0, avgLen = 4, maxLen = 4)),
expectedRowCount = 3)
}
test("cdecimal = 0.400000000000000000") {
val dec_0_40 = new java.math.BigDecimal("0.400000000000000000")
validateEstimatedStats(
Filter(EqualTo(attrDecimal, Literal(dec_0_40)),
childStatsTestPlan(Seq(attrDecimal), 4L)),
Seq(attrDecimal -> ColumnStat(distinctCount = 1, min = Some(dec_0_40), max = Some(dec_0_40),
nullCount = 0, avgLen = 8, maxLen = 8)),
expectedRowCount = 1)
}
test("cdecimal < 0.60 ") {
val dec_0_60 = new java.math.BigDecimal("0.600000000000000000")
validateEstimatedStats(
Filter(LessThan(attrDecimal, Literal(dec_0_60)),
childStatsTestPlan(Seq(attrDecimal), 4L)),
Seq(attrDecimal -> ColumnStat(distinctCount = 3, min = Some(decMin), max = Some(dec_0_60),
nullCount = 0, avgLen = 8, maxLen = 8)),
expectedRowCount = 3)
}
test("cdouble < 3.0") {
validateEstimatedStats(
Filter(LessThan(attrDouble, Literal(3.0)), childStatsTestPlan(Seq(attrDouble), 10L)),
Seq(attrDouble -> ColumnStat(distinctCount = 2, min = Some(1.0), max = Some(3.0),
nullCount = 0, avgLen = 8, maxLen = 8)),
expectedRowCount = 3)
}
test("cstring = 'A2'") {
validateEstimatedStats(
Filter(EqualTo(attrString, Literal("A2")), childStatsTestPlan(Seq(attrString), 10L)),
Seq(attrString -> ColumnStat(distinctCount = 1, min = None, max = None,
nullCount = 0, avgLen = 2, maxLen = 2)),
expectedRowCount = 1)
}
test("cstring < 'A2' - unsupported condition") {
validateEstimatedStats(
Filter(LessThan(attrString, Literal("A2")), childStatsTestPlan(Seq(attrString), 10L)),
Seq(attrString -> ColumnStat(distinctCount = 10, min = None, max = None,
nullCount = 0, avgLen = 2, maxLen = 2)),
expectedRowCount = 10)
}
test("cint IN (1, 2, 3, 4, 5)") {
// This is a corner test case. We want to test if we can handle the case when the number of
// valid values in IN clause is greater than the number of distinct values for a given column.
// For example, column has only 2 distinct values 1 and 6.
// The predicate is: column IN (1, 2, 3, 4, 5).
val cornerChildColStatInt = ColumnStat(distinctCount = 2, min = Some(1), max = Some(6),
nullCount = 0, avgLen = 4, maxLen = 4)
val cornerChildStatsTestplan = StatsTestPlan(
outputList = Seq(attrInt),
rowCount = 2L,
attributeStats = AttributeMap(Seq(attrInt -> cornerChildColStatInt))
)
validateEstimatedStats(
Filter(InSet(attrInt, Set(1, 2, 3, 4, 5)), cornerChildStatsTestplan),
Seq(attrInt -> ColumnStat(distinctCount = 2, min = Some(1), max = Some(5),
nullCount = 0, avgLen = 4, maxLen = 4)),
expectedRowCount = 2)
}
private def childStatsTestPlan(outList: Seq[Attribute], tableRowCount: BigInt): StatsTestPlan = {
StatsTestPlan(
outputList = outList,
rowCount = tableRowCount,
attributeStats = AttributeMap(outList.map(a => a -> attributeMap(a))))
}
private def validateEstimatedStats(
filterNode: Filter,
expectedColStats: Seq[(Attribute, ColumnStat)],
expectedRowCount: Int): Unit = {
// If the filter has a binary operator (including those nested inside AND/OR/NOT), swap the
// sides of the attribute and the literal, reverse the operator, and then check again.
val swappedFilter = filterNode transformExpressionsDown {
case EqualTo(attr: Attribute, l: Literal) =>
EqualTo(l, attr)
case LessThan(attr: Attribute, l: Literal) =>
GreaterThan(l, attr)
case LessThanOrEqual(attr: Attribute, l: Literal) =>
GreaterThanOrEqual(l, attr)
case GreaterThan(attr: Attribute, l: Literal) =>
LessThan(l, attr)
case GreaterThanOrEqual(attr: Attribute, l: Literal) =>
LessThanOrEqual(l, attr)
}
val testFilters = if (swappedFilter != filterNode) {
Seq(swappedFilter, filterNode)
} else {
Seq(filterNode)
}
testFilters.foreach { filter =>
val expectedAttributeMap = AttributeMap(expectedColStats)
val expectedStats = Statistics(
sizeInBytes = getOutputSize(filter.output, expectedRowCount, expectedAttributeMap),
rowCount = Some(expectedRowCount),
attributeStats = expectedAttributeMap)
assert(filter.stats(conf) == expectedStats)
}
}
}
|
jianran/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/statsEstimation/FilterEstimationSuite.scala
|
Scala
|
apache-2.0
| 15,566
|
. // error: expected class or object definition
\\u890u3084eu // error: error in unicode escape // error: illegal character '\\uffff'
|
densh/dotty
|
tests/neg/firstError.scala
|
Scala
|
bsd-3-clause
| 134
|
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.action.async
import io.gatling.core.action.{ Action, RequestAction }
import io.gatling.core.session._
import io.gatling.core.stats.StatsEngine
abstract class ReconciliateAction(
val requestName: Expression[String],
wsName: String,
val statsEngine: StatsEngine,
val next: Action
) extends RequestAction with AsyncProtocolAction {
override def sendRequest(requestName: String, session: Session) = {
for (wsActor <- fetchActor(wsName, session)) yield wsActor ! Reconciliate(requestName, next, session)
}
}
|
MykolaB/gatling
|
gatling-http/src/main/scala/io/gatling/http/action/async/ReconciliateAction.scala
|
Scala
|
apache-2.0
| 1,188
|
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs.storage.common.metadata
import java.util.concurrent.ConcurrentHashMap
import com.typesafe.scalalogging.LazyLogging
import org.apache.hadoop.fs.Options.CreateOpts
import org.apache.hadoop.fs.{CreateFlag, FileContext, Path}
import org.locationtech.geomesa.fs.storage.api._
import org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.Config
import org.locationtech.geomesa.fs.storage.common.utils.PathCache
import org.locationtech.geomesa.utils.io.WithClose
import org.locationtech.geomesa.utils.stats.MethodProfiling
class FileBasedMetadataFactory extends StorageMetadataFactory {
override def name: String = FileBasedMetadata.MetadataType
/**
* Loads a metadata instance from an existing root. The metadata info is persisted in a `metadata.json`
* file under the root path.
*
* Will return a cached instance, if available. If a previous check was made to load a file from this root,
* and the file did not exist, will not re-attempt to load it until after a configurable timeout
*
* @see `org.locationtech.geomesa.fs.storage.common.utils.PathCache#CacheDurationProperty()`
* @param context file context
* @return
**/
override def load(context: FileSystemContext): Option[FileBasedMetadata] = {
val json = MetadataJson.readMetadata(context)
// note: do this after loading the json to allow for old metadata transition
val cached = FileBasedMetadataFactory.cached(context, json.getOrElse(FileBasedMetadata.LegacyOptions).options)
json match {
case Some(m) if m.name.equalsIgnoreCase(name) =>
cached.orElse(throw new IllegalArgumentException(s"Could not load metadata at root '${context.root.toUri}'"))
case None if cached.isDefined =>
// a file-based metadata impl exists, but was created with an older version
// create a config file pointing to it, and register that
MetadataJson.writeMetadata(context, FileBasedMetadata.LegacyOptions)
cached
case _ => None
}
}
override def create(context: FileSystemContext, config: Map[String, String], meta: Metadata): FileBasedMetadata = {
val sft = namespaced(meta.sft, context.namespace)
// load the partition scheme first in case it fails
PartitionSchemeFactory.load(sft, meta.scheme)
val renderer = config.get(Config.RenderKey).map(MetadataConverter.apply).getOrElse(RenderCompact)
MetadataJson.writeMetadata(context, NamedOptions(name, config + (Config.RenderKey -> renderer.name)))
FileBasedMetadataFactory.write(context.fc, context.root, meta)
val directory = new Path(context.root, FileBasedMetadataFactory.MetadataDirectory)
val metadata = new FileBasedMetadata(context.fc, directory, sft, meta, renderer)
FileBasedMetadataFactory.cache.put(FileBasedMetadataFactory.key(context), metadata)
metadata
}
}
object FileBasedMetadataFactory extends MethodProfiling with LazyLogging {
val MetadataDirectory = "metadata"
val StoragePath = s"$MetadataDirectory/storage.json"
private val cache = new ConcurrentHashMap[String, FileBasedMetadata]()
private def key(context: FileSystemContext): String =
context.namespace.map(ns => s"$ns:${context.root.toUri}").getOrElse(context.root.toUri.toString)
private def cached(context: FileSystemContext, config: Map[String, String]): Option[FileBasedMetadata] = {
val loader = new java.util.function.Function[String, FileBasedMetadata]() {
override def apply(ignored: String): FileBasedMetadata = {
val file = new Path(context.root, StoragePath)
if (!PathCache.exists(context.fc, file)) { null } else {
val directory = new Path(context.root, MetadataDirectory)
val meta = WithClose(context.fc.open(file))(MetadataSerialization.deserialize)
val sft = namespaced(meta.sft, context.namespace)
val renderer = config.get(Config.RenderKey).map(MetadataConverter.apply).getOrElse(RenderPretty)
new FileBasedMetadata(context.fc, directory, sft, meta, renderer)
}
}
}
Option(cache.computeIfAbsent(key(context), loader))
}
/**
* Write basic metadata to disk. This should be done once, when the storage is created
*
* @param fc file context
* @param root root path
* @param metadata simple feature type, file encoding, partition scheme, etc
*/
private [metadata] def write(fc: FileContext, root: Path, metadata: Metadata): Unit = {
val file = new Path(root, StoragePath)
val flags = java.util.EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
WithClose(fc.create(file, flags, CreateOpts.createParent)) { out =>
MetadataSerialization.serialize(out, metadata)
out.hflush()
out.hsync()
}
PathCache.register(fc, file)
}
}
|
locationtech/geomesa
|
geomesa-fs/geomesa-fs-storage/geomesa-fs-storage-common/src/main/scala/org/locationtech/geomesa/fs/storage/common/metadata/FileBasedMetadataFactory.scala
|
Scala
|
apache-2.0
| 5,270
|
package com.nulabinc.backlog.migration.common.services
import java.nio.file.Path
import cats.Foldable.ops._
import cats.Monad
import cats.Monad.ops._
import cats.data.Validated.{Invalid, Valid}
import com.nulabinc.backlog.migration.common.codec.{UserMappingDecoder, UserMappingEncoder}
import com.nulabinc.backlog.migration.common.conf.BacklogApiConfiguration
import com.nulabinc.backlog.migration.common.domain.BacklogUser
import com.nulabinc.backlog.migration.common.domain.mappings._
import com.nulabinc.backlog.migration.common.dsl.{ConsoleDSL, StorageDSL}
import com.nulabinc.backlog.migration.common.errors.{
MappingFileError,
MappingFileNotFound,
MappingValidationError,
ValidationError
}
import com.nulabinc.backlog.migration.common.formatters.Formatter
import com.nulabinc.backlog.migration.common.validators.MappingValidatorNec
private case class MergedUserMapping[A](
mergeList: Seq[UserMapping[A]],
addedList: Seq[UserMapping[A]]
)
private object MergedUserMapping {
def empty[A]: MergedUserMapping[A] =
MergedUserMapping[A](mergeList = Seq(), addedList = Seq())
}
object UserMappingFileService {
import com.nulabinc.backlog.migration.common.messages.ConsoleMessages.{
Mappings => MappingMessages
}
import com.nulabinc.backlog.migration.common.shared.syntax._
def init[A, F[_]: Monad: StorageDSL: ConsoleDSL](
mappingFilePath: Path,
mappingListPath: Path,
srcItems: Seq[A],
dstItems: Seq[BacklogUser],
dstApiConfiguration: BacklogApiConfiguration
)(implicit
formatter: Formatter[UserMapping[A]],
encoder: UserMappingEncoder[A],
decoder: UserMappingDecoder[A],
header: MappingHeader[UserMapping[_]]
): F[Unit] =
for {
mappingFileExists <- StorageDSL[F].exists(mappingFilePath)
_ <-
if (mappingFileExists) {
for {
records <- StorageDSL[F].read(mappingFilePath, MappingFileService.readLine)
mappings = MappingDecoder.user(records)
result = merge(mappings, srcItems)
_ <-
if (result.addedList.nonEmpty)
for {
_ <- StorageDSL[F].writeNewFile(
mappingFilePath,
MappingEncoder.user(result.mergeList)
)
_ <- ConsoleDSL[F].println(
MappingMessages.userMappingMerged(mappingFilePath, result.addedList)
)
} yield ()
else
ConsoleDSL[F].println(MappingMessages.userMappingNoChanges)
} yield ()
} else {
val result = merge(Seq(), srcItems)
for {
_ <- StorageDSL[F].writeNewFile(
mappingFilePath,
MappingEncoder.user(result.mergeList)
)
_ <- ConsoleDSL[F].println(
MappingMessages.userMappingCreated(mappingFilePath)
)
} yield ()
}
_ <- StorageDSL[F].writeNewFile(
mappingListPath,
MappingEncoder.userList(dstItems)
)
} yield ()
/**
* List of items that can be specified in users.csv
*
* @param path
* @param dstItems
* @param deserializer
* @tparam A
* @tparam F
* @return
*/
def execute[A, F[_]: Monad: StorageDSL: ConsoleDSL](
path: Path,
dstItems: Seq[BacklogUser]
)(implicit
decoder: UserMappingDecoder[A]
): F[Either[MappingFileError, Seq[ValidatedUserMapping[A]]]] = {
val result = for {
_ <- StorageDSL[F].exists(path).orError(MappingFileNotFound("users", path)).handleError
unvalidated <- getMappings(path).handleError
validated <- validateMappings(unvalidated, dstItems).lift.handleError
} yield validated
result.value
}
/**
* Deserialize user mappings from a mapping file.
*
* @param path
* @param deserializer
* @tparam A
* @tparam F
* @return
*/
def getMappings[A, F[_]: Monad: StorageDSL](path: Path)(implicit
decoder: UserMappingDecoder[A]
): F[Either[MappingFileError, Seq[UserMapping[A]]]] =
for {
records <- StorageDSL[F].read(path, MappingFileService.readLine)
mappings = MappingDecoder.user(records)
} yield Right(mappings)
/**
* Validate mappings
* @param mappings
* @param dstItems
* @tparam A
* @return
*/
def validateMappings[A](
mappings: Seq[UserMapping[A]],
dstItems: Seq[BacklogUser]
): Either[MappingFileError, Seq[ValidatedUserMapping[A]]] = {
val results = mappings
.map(MappingValidatorNec.validateUserMapping(_, dstItems))
.foldLeft(ValidationResults.empty[A]) { (acc, item) =>
item match {
case Valid(value) => acc.copy(values = acc.values :+ value)
case Invalid(error) => acc.copy(errors = acc.errors ++ error.toList)
}
}
results.toResult
}
/**
* Merge old mappings and new items.
*
* @param mappings
* @param srcItems
* @tparam A
* @return
*/
private def merge[A](
mappings: Seq[UserMapping[A]],
srcItems: Seq[A]
): MergedUserMapping[A] =
srcItems.foldLeft(MergedUserMapping.empty[A]) { (acc, item) =>
mappings.find(_.src == item) match {
case Some(value) =>
acc.copy(mergeList = acc.mergeList :+ value)
case None =>
val mapping = UserMapping.create(item)
acc.copy(
mergeList = acc.mergeList :+ mapping,
addedList = acc.addedList :+ mapping
)
}
}
private case class ValidationResults[A](
values: Seq[ValidatedUserMapping[A]] = Seq(),
errors: List[ValidationError] = List()
) {
def toResult: Either[MappingFileError, Seq[ValidatedUserMapping[A]]] =
if (errors.nonEmpty) Left(MappingValidationError(MappingType.User, values, errors))
else Right(values)
}
private object ValidationResults {
def empty[A]: ValidationResults[A] = ValidationResults[A]()
}
}
|
nulab/backlog-migration-common
|
core/src/main/scala/com/nulabinc/backlog/migration/common/services/UserMappingFileService.scala
|
Scala
|
mit
| 5,988
|
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc
import java.util.Date
import org.scalatest.{Matchers, WordSpecLike}
class EmisReportGeneratorSpec extends WordSpecLike with Matchers {
"generator" should {
def generateReport(includeHeader: Boolean) = {
val transactions: Iterable[Transaction] = Iterable(
Transaction("1234567890K-3456", "3141", "A"),
Transaction("0987654321K-3456", "3141", "A")
)
EmisReportGenerator.generateReport(Merchant("HMRCSADC", "30178943"), transactions, includeHeader)
}
"include file header if include header is true" in {
val report = generateReport(true)
report.head should startWith("00#####################0000002###")
report.drop(1).head should startWith("05###############")
}
"does not include file header if include header is false" in {
val report = generateReport(false)
report.head should not startWith "00###################"
}
"include merchant header" in {
val report = generateReport(true)
report.drop(2).head should startWith(s"10########0000030178943${EmisReportGenerator.simpleDateFormat.format(new Date)}#######################################00000006282###########################################0000002####")
}
"include transactions" in {
val report = generateReport(true)
report.drop(3).head should startWith(s"15##############******#### ####00000003141${EmisReportGenerator.simpleDateFormat.format(new Date)}######0#######A ######")
report.drop(4).head should startWith(s"16############################")
}
"include transaction type" in {
val report = generateReport(true)
val date = EmisReportGenerator.simpleDateFormat.format(new Date)
val purchased = "0"
report.drop(3).head should startWith(s"15##############******#### ####00000003141$date######$purchased#######A ######")
}
}
}
|
hmrc/worldpay-report-generator
|
src/test/scala/uk/gov/hmrc/EmisReportGeneratorSpec.scala
|
Scala
|
apache-2.0
| 2,550
|
/*
* Copyright (C) 2016 University of Basel, Graphics and Vision Research Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package scalismo.ui.model
import java.io.File
import scalismo.geometry._3D
import scalismo.image.DiscreteImage
import scalismo.io.ImageIO
import scalismo.ui.model.capabilities._
import scalismo.ui.model.properties._
import scalismo.ui.util.{FileIoMetadata, FileUtil}
import scala.util.{Failure, Success, Try}
class ImagesNode(override val parent: GroupNode) extends SceneNodeCollection[ImageNode] with Loadable {
override val name: String = "Images"
def add(image: DiscreteImage[_3D, Float], name: String): ImageNode = {
val node = new ImageNode(this, image, name)
add(node)
node
}
override def loadMetadata: FileIoMetadata = FileIoMetadata.Image
override def load(file: File): Try[Unit] = {
ImageIO.read3DScalarImageAsType[Float](file) match {
case Success(image) =>
add(image, FileUtil.basename(file))
Success(())
case Failure(ex) => Failure(ex)
}
}
}
class ImageNode(override val parent: ImagesNode, val source: DiscreteImage[_3D, Float], initialName: String)
extends RenderableSceneNode
with Grouped
with Renameable
with Saveable
with Removeable
with HasWindowLevel
with HasOpacity {
name = initialName
val (minimumValue, maximumValue) = {
// we manually do this instead of using the min or max methods of the iterator
// so that we only have to run through the list once.
var min: Float = Float.MaxValue
var max: Float = Float.MinValue
source.values.foreach { value =>
min = Math.min(min, value)
max = Math.max(max, value)
}
(min, max)
}
override def saveMetadata: FileIoMetadata = FileIoMetadata.Image
override def save(file: File): Try[Unit] = {
val ext = FileUtil.extension(file)
ext match {
case "vtk" => ImageIO.writeVTK(source, file)
case "nii" => ImageIO.writeNifti(source, file)
case _ => Failure(new Exception(s"File $file: unsupported file extension"))
}
}
override val windowLevel: WindowLevelProperty = {
val range = maximumValue - minimumValue
val window = Math.round(range * 0.75f)
val level = Math.round(minimumValue + range / 2.0f)
new WindowLevelProperty(WindowLevel(window, level))
}
override val opacity: OpacityProperty = new OpacityProperty()
override def group: GroupNode = parent.parent
override def remove(): Unit = parent.remove(this)
}
|
unibas-gravis/scalismo-ui
|
src/main/scala/scalismo/ui/model/ImageNode.scala
|
Scala
|
gpl-3.0
| 3,119
|
package com.github.gdefacci.di.macrodef
import scala.reflect.macros.blackbox.Context
class DagGraph[C <: Context](val context: C) {
import context.universe._
val typeDag = new TypeDag[context.type](context)
import com.github.gdefacci.di.graph
def graphModel(typ: Type,
mappings: typeDag.Providers[typeDag.DagNodeOrRef]): Tree = {
val membersSelect = new MembersSelect[context.type](context)
val typeResolver = new typeDag.TypeResolver(membersSelect, mappings, collection.mutable.Buffer.empty, mappings.topLevelRefs)
val dag = typeResolver.resolveRef(typeDag.Ref(DefaultScope, typ, typ.typeSymbol.pos))
val graphNodes = dag.visit.map(toDependencyTree)
context.typecheck(q"List(..$graphNodes)")
}
private def containers(s: Symbol): List[Symbol] = {
if (s == NoSymbol) Nil
else if (s == context.mirror.RootClass) Nil
else s :: containers(s.owner)
}
private def toTypeOwner(syms: Seq[Symbol]): Tree = {
syms match {
case Seq() => q"com.github.gdefacci.di.graph.Package(Nil)"
case init :+ last =>
if (last.isPackage) {
val segs = syms.map(_.name.toTermName.toString)
q"com.github.gdefacci.di.graph.Package(List(..$segs))"
} else {
val typ = last.asType.toType
toGraphType(typ)
}
}
}
private def toGraphType(typ: Type): Tree = {
val owner = toTypeOwner(containers(typ.typeSymbol.owner).reverse)
if (typ.typeSymbol.isModuleClass) {
q"com.github.gdefacci.di.graph.SingletonTypeValue($owner, ${typ.typeSymbol.name.toTermName.toString})"
} else typ.typeArgs match {
case Seq() =>
q"com.github.gdefacci.di.graph.TypeValue($owner, ${typ.typeSymbol.name.toTermName.toString})"
case args =>
val typeArgs = args.map(toGraphType)
q"com.github.gdefacci.di.graph.PolymorphicType($owner, ${typ.erasure.typeSymbol.name.toTermName.toString}, List(..$typeArgs))"
}
}
private def toDependencyTree(dag: Dag[typeDag.DagNode]) = {
val node = dag.value
val graphScope = node.scope match {
case ApplicationScope => q"com.github.gdefacci.di.graph.DependencyScope.Singleton"
case DefaultScope => q"com.github.gdefacci.di.graph.DependencyScope.Factory"
}
val typ = toGraphType(node.typ)
val providerSrc = dag.value.providerSource match {
case typeDag.ProviderSource.MethodSource(m) => q"com.github.gdefacci.di.graph.MethodSource(${m.owner.fullName}, ${m.name.decodedName.toString})"
case typeDag.ProviderSource.DecoratorSource(m) => q"com.github.gdefacci.di.graph.DecoratorSource(${m.owner.fullName}, ${m.name.decodedName.toString})"
case typeDag.ProviderSource.ConstructorSource(c) => q"com.github.gdefacci.di.graph.ConstructorSource(${c.owner.fullName})"
case _ => q"com.github.gdefacci.di.graph.ValueSource"
}
q"""
com.github.gdefacci.di.graph.Dependency(
com.github.gdefacci.di.graph.DependencyId(${node.id}),
$providerSrc,
$graphScope,
$typ,
com.github.gdefacci.di.graph.FilePosition(${node.sourcePos.source.file.path}, ${node.sourcePos.line}),
List(..${dag.inputs.map(i => q"com.github.gdefacci.di.graph.DependencyId(${i.value.id})")}) )
"""
}
}
|
gdefacci/di
|
macros/src/main/scala/com/github/gdefacci/di/macrodef/DagGraph.scala
|
Scala
|
mit
| 3,275
|
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.api.tools
import com.lightbend.lagom.internal.spi.ServiceAcl
import com.lightbend.lagom.internal.spi.ServiceDescription
import com.lightbend.lagom.internal.spi.ServiceDiscovery
import com.typesafe.config.ConfigFactory
import play.api._
import play.api.libs.functional.syntax._
import play.api.libs.json._
import scala.collection.JavaConverters._
import scala.collection.immutable
import scala.compat.java8.OptionConverters._
/**
* A service detector locates the services of a Lagom project.
*/
object ServiceDetector {
private val ServiceDiscoveryKey = "lagom.tools.service-discovery"
private val ApplicationLoaderKey = "play.application.loader"
val log = Logger(this.getClass)
implicit val serviceAclsWrites: Writes[ServiceAcl] =
(__ \\ "method")
.writeNullable[String]
.and((__ \\ "pathPattern").writeNullable[String])
.apply(sa => (sa.method().asScala, sa.pathPattern().asScala))
implicit val serviceDescriptionWrites: Writes[ServiceDescription] =
(__ \\ "name")
.write[String]
.and((__ \\ "acls").write[immutable.Seq[ServiceAcl]])
.apply(sd => (sd.name, sd.acls.asScala.toIndexedSeq))
/**
* Retrieves the service names and acls for the current Lagom project
* of all services.
*
* @param classLoader The class loader should contain a sbt project in the classpath
* for which the services should be resolved.
* @return a JSON array of [[com.lightbend.lagom.internal.spi.ServiceDescription]] objects.
*/
def services(classLoader: ClassLoader): String = {
val config = ConfigFactory.load(classLoader)
val serviceDiscoveryClassName = if (config.hasPath(ServiceDiscoveryKey)) {
config.getString(ServiceDiscoveryKey)
} else {
config.getString(ApplicationLoaderKey)
}
services(classLoader, serviceDiscoveryClassName)
}
private[tools] def services(classLoader: ClassLoader, serviceDiscoveryClassName: String): String = {
log.debug("Loading service discovery class: " + serviceDiscoveryClassName)
val serviceDiscoverClass = classLoader.loadClass(serviceDiscoveryClassName)
val castServiceDiscoveryClass = serviceDiscoverClass.asSubclass(classOf[ServiceDiscovery])
val serviceDiscovery = castServiceDiscoveryClass.newInstance()
val services = serviceDiscovery.discoverServices(classLoader).asScala.toIndexedSeq
Json.stringify(Json.toJson(services))
}
}
|
ignasi35/lagom
|
api-tools/src/main/scala/com/lightbend/lagom/internal/api/tools/ServiceDetector.scala
|
Scala
|
apache-2.0
| 2,561
|
package com.markfeeney.circlet.middleware
import com.markfeeney.circlet._
import org.scalatest.FunSuite
class ParamsTest extends FunSuite {
// helper to rip Params out of Request that the app ultimately sees
private def params(req: Request): Params = {
var ps: Params = null
val h = Circlet.handler { req =>
ps = Params.get(req)
Response()
}
val app: Handler = Params.mw()(h)
app(req) { _ => Sent }
ps
}
private def params(url: String): Params = {
params(TestUtils.request(HttpMethod.Get, url))
}
test("simple query string") {
val ps = params("/test?foo=bar")
assert(ps.all == Map[String, Param]("foo" -> "bar"))
assert(ps.queryParams == ps.all)
assert(ps.formParams == Map.empty)
}
test("Seq and multi valued query string") {
val ps = params("/test?x=hi+there&a=1&a=2&foo=bar&a=3")
val expected = Map[String, Param](
"a" -> Vector("1", "2", "3"),
"foo" -> "bar",
"x" -> "hi there"
)
assert(ps.queryParams == expected)
assert(ps.queryParams == ps.all)
assert(ps.formParams == Map.empty)
}
private def formPost(path: String, body: String): Request = {
TestUtils.request(HttpMethod.Post, path)
.addHeader("content-type", "application/x-www-form-urlencoded")
.copy(body = Some(Util.stringInputStream(body)))
}
test("form params") {
val ps = params(formPost("/whatev", "foo=bar&a=1&a=2+3"))
val expected = Map[String, Param](
"foo" -> "bar",
"a" -> Vector("1", "2 3")
)
assert(ps.formParams == expected)
assert(ps.formParams == ps.all)
assert(ps.queryParams == Map.empty)
}
test("query string params override form params") {
val ps = params(formPost("/whatev?x=y&a=99", "foo=bar&a=1&a=2+3"))
val expectedForm = Map[String, Param](
"foo" -> "bar",
"a" -> Vector("1", "2 3")
)
val expectedQuery = Map[String, Param]("x" -> "y", "a" -> "99")
assert(ps.formParams == expectedForm)
assert(ps.queryParams == expectedQuery)
assert(ps.all == expectedForm ++ expectedQuery)
}
test("param values parsed from same area merge; from different area overwrite") {
val ps = params(formPost("/whatev?x=1&x=2&y=1", "x=5&x=6&z=42"))
val expectedAll = Map[String, Param](
"x" -> Vector("1", "2"),
"y" -> "1",
"z" -> "42"
)
assert(ps.all == expectedAll)
}
test("query string param with no value") {
val ps = params("/test?foo=bar&quux")
assert(ps.all == Map[String, Param]("foo" -> "bar", "quux" -> Vector.empty))
assert(ps.queryParams == ps.all)
assert(ps.formParams == Map.empty)
}
test("weird query strings") {
def t(ps: Params): Unit = {
assert(ps.all == Map.empty)
assert(ps.queryParams == ps.all)
assert(ps.formParams == Map.empty)
}
withClue("no param name or value") {
t(params("/test?&"))
}
withClue("empty string key, no value") {
t(params("/test?&="))
t(params("/test?=foo")) // key can't be empty string; ignored
}
withClue("mix of good and bizarre params") {
val ps = params("/test?&=&foo&a=b")
assert(ps.all == Map[String, Param]("foo" -> Vector.empty, "a" -> "b"))
assert(ps.queryParams == ps.all)
assert(ps.formParams == Map.empty)
}
}
}
|
overthink/circlet
|
src/test/scala/com/markfeeney/circlet/middleware/ParamsTest.scala
|
Scala
|
mit
| 3,317
|
package example
import org.scalatra.test.scalatest.ScalatraFlatSpec
import skinny.micro._
import scala.concurrent.Future
object TypedHelloServlet extends TypedSingleApp {
def message(implicit ctx: Context) = {
s"Hello, ${params(ctx).getOrElse("name", "Anonymous")}"
}
// synchronous action
get("/hello")(Ok(message))
post("/hello")(Ok(message))
// asynchronous action
asyncGet("/hello/async") {
implicit val ctx = context
Future { Ok(message(ctx)) }
}
}
class TypedHelloServletSpec extends ScalatraFlatSpec {
addServlet(TypedHelloServlet, "/*")
it should "work fine with GET Requests" in {
get("/hello") {
status should equal(200)
body should equal("Hello, Anonymous")
}
get("/hello?name=Martin") {
status should equal(200)
body should equal("Hello, Martin")
}
}
it should "work fine with POST Requests" in {
post("/hello", Map()) {
status should equal(200)
body should equal("Hello, Anonymous")
}
post("/hello", Map("name" -> "Martin")) {
status should equal(200)
body should equal("Hello, Martin")
}
}
it should "work fine with AsyncResult" in {
get("/hello/async") {
status should equal(200)
body should equal("Hello, Anonymous")
}
get("/hello/async?name=Martin") {
status should equal(200)
body should equal("Hello, Martin")
}
}
}
|
xerial/skinny-micro
|
micro/src/test/scala/example/TypedHelloServletSpec.scala
|
Scala
|
bsd-2-clause
| 1,407
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.client
import org.apache.hadoop.conf.Configuration
import org.scalactic.source.Position
import org.scalatest.Tag
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.hive.HiveUtils
private[client] abstract class HiveVersionSuite(version: String) extends SparkFunSuite {
override protected val enableAutoThreadAudit = false
protected var client: HiveClient = null
protected def buildClient(
hadoopConf: Configuration,
sharesHadoopClasses: Boolean = true): HiveClient = {
// Hive changed the default of datanucleus.schema.autoCreateAll from true to false and
// hive.metastore.schema.verification from false to true since 2.0
// For details, see the JIRA HIVE-6113 and HIVE-12463
if (version == "2.0" || version == "2.1" || version == "2.2" || version == "2.3" ||
version == "3.1") {
hadoopConf.set("datanucleus.schema.autoCreateAll", "true")
hadoopConf.set("hive.metastore.schema.verification", "false")
}
// Since Hive 3.0, HIVE-19310 skipped `ensureDbInit` if `hive.in.test=false`.
if (version == "3.1") {
hadoopConf.set("hive.in.test", "true")
}
HiveClientBuilder.buildClient(
version,
hadoopConf,
HiveUtils.formatTimeVarsForHiveClient(hadoopConf),
sharesHadoopClasses = sharesHadoopClasses)
}
override def suiteName: String = s"${super.suiteName}($version)"
override protected def test(testName: String, testTags: Tag*)(testFun: => Any)
(implicit pos: Position): Unit = {
super.test(s"$version: $testName", testTags: _*)(testFun)
}
}
|
yanboliang/spark
|
sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveVersionSuite.scala
|
Scala
|
apache-2.0
| 2,414
|
/*
* Licensed to STRATIO (C) under one or more contributor license agreements.
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership. The STRATIO (C) licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.stratio.connector.sparksql.core.connection
import com.stratio.crossdata.common.connector.ConnectorClusterConfig
import com.stratio.crossdata.common.security.ICredentials
class Connection(
val config: ConnectorClusterConfig,
val credentials: Option[ICredentials] = None,
val busy: Boolean = false,
val lastUseDate: Long = System.currentTimeMillis()) {
def setBusy(state: Boolean): Connection =
new Connection(config,credentials,state)
}
|
Stratio/stratio-connector-sparkSQL
|
connector-sparksql/src/main/scala/com/stratio/connector/sparksql/core/connection/Connection.scala
|
Scala
|
apache-2.0
| 1,253
|
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.connector.datasource
import slamdata.Predef.{Array, Boolean, Option, Some, SuppressWarnings}
import quasar.api.datasource.DatasourceType
import quasar.api.resource._
import quasar.connector.Offset
import cats.Applicative
import cats.data.{NonEmptyList, OptionT}
import cats.instances.option._
import cats.syntax.traverse._
import monocle.{PTraversal, Traversal}
import scalaz.syntax.functor._
import shims.applicativeToCats
/** @tparam F effects
* @tparam G multiple results
* @tparam Q query
*/
trait Datasource[F[_], G[_], Q, R, P <: ResourcePathType] {
/** The type of this datasource. */
def kind: DatasourceType
/** The set of `Loader`s provided by this datasource. */
def loaders: NonEmptyList[Loader[F, Q, R]]
/** Returns whether or not the specified path refers to a resource in the
* specified datasource.
*/
def pathIsResource(path: ResourcePath): F[Boolean]
/** Returns the name and type of the `ResourcePath`s implied by concatenating
* each name to `prefixPath` or `None` if `prefixPath` does not exist.
*/
def prefixedChildPaths(prefixPath: ResourcePath)
: F[Option[G[(ResourceName, P)]]]
/** Attempts a 'full' load, returning `None` if unsupported by this datasource. */
def loadFull(q: Q)(implicit F: Applicative[F]): OptionT[F, R] =
OptionT {
loaders
.toList
.collectFirst { case Loader.Batch(b) => b }
.traverse(_.loadFull(q))
}
/** Attempts to seek and load from the supplied offset, returning `None`
* if unsupported by this datasource.
*/
def loadFrom(q: Q, offset: Offset)(implicit F: Applicative[F]): OptionT[F, R] =
OptionT {
loaders
.toList
.collectFirst { case Loader.Batch(BatchLoader.Seek(f)) => f }
.traverse(_(q, Some(offset)))
}
}
object Datasource {
@SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf"))
def widenPathType[F[_], G[_], Q, R, PI <: ResourcePathType, PO >: PI <: ResourcePathType](
ds: Datasource[F, G, Q, R, PI])
: Datasource[F, G, Q, R, PO] =
ds.asInstanceOf[Datasource[F, G, Q, R, PO]]
def loaders[F[_], G[_], Q, R, P <: ResourcePathType]
: Traversal[Datasource[F, G, Q, R ,P], Loader[F, Q, R]] =
ploaders[F, G, Q, R, Q, R, P]
def ploaders[F[_], G[_], Q1, R1, Q2, R2, P <: ResourcePathType]
: PTraversal[Datasource[F, G, Q1, R1, P], Datasource[F, G, Q2, R2, P], Loader[F, Q1, R1], Loader[F, Q2, R2]] =
new PTraversal[Datasource[F, G, Q1, R1, P], Datasource[F, G, Q2, R2, P], Loader[F, Q1, R1], Loader[F, Q2, R2]] {
def modifyF[H[_]: scalaz.Applicative](f: Loader[F, Q1, R1] => H[Loader[F, Q2, R2]])(s: Datasource[F, G, Q1, R1, P]) =
s.loaders.traverse(f) map { ls =>
new Datasource[F, G, Q2, R2, P] {
val kind = s.kind
val loaders = ls
def pathIsResource(p: ResourcePath) = s.pathIsResource(p)
def prefixedChildPaths(pfx: ResourcePath) = s.prefixedChildPaths(pfx)
}
}
}
}
|
slamdata/quasar
|
connector/src/main/scala/quasar/connector/datasource/Datasource.scala
|
Scala
|
apache-2.0
| 3,638
|
package org.jetbrains.plugins.scala.projectHighlighting
import com.intellij.openapi.util.TextRange
import com.intellij.pom.java.LanguageLevel
import org.jetbrains.plugins.scala.FlakyTests
import org.junit.experimental.categories.Category
@Category(Array(classOf[FlakyTests]))
class ScalaFiddleEditorProjectHighlightingTest extends GithubSbtAllProjectHighlightingTest {
override def jdkLanguageLevel: LanguageLevel = LanguageLevel.JDK_1_8
override def githubUsername = "scalafiddle"
override def githubRepoName = "scalafiddle-editor"
override def revision = "e9bbda4d4190d262a93405365f38c93c8e7988b5"
override def filesWithProblems: Map[String, Set[TextRange]] = Map(
"server/src/main/scala/controllers/SocialAuthController.scala" -> Set(),
"server/src/main/scala/controllers/Application.scala" -> Set()
)
}
|
JetBrains/intellij-scala
|
scala/scala-impl/test/org/jetbrains/plugins/scala/projectHighlighting/ScalaFiddleEditorProjectHighlightingTest.scala
|
Scala
|
apache-2.0
| 834
|
/*
* Copyright 2013 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
import cats.Contravariant
import cats.Show
import cats.data.NonEmptyList
import cats.effect.Sync
import cats.syntax.all._
import fs2.Chunk
import fs2.Stream
import fs2.io.file.Files
import fs2.io.readInputStream
import org.http4s.Charset.`UTF-8`
import org.http4s.headers._
import org.http4s.multipart.Multipart
import org.http4s.multipart.MultipartEncoder
import scodec.bits.ByteVector
import java.io._
import java.nio.CharBuffer
import scala.annotation.implicitNotFound
@implicitNotFound(
"Cannot convert from ${A} to an Entity, because no EntityEncoder[${F}, ${A}] instance could be found."
)
trait EntityEncoder[+F[_], A] { self =>
/** Convert the type `A` to an [[Entity]] in the effect type `F` */
def toEntity(a: A): Entity[F]
/** Headers that may be added to a [[Message]]
*
* Examples of such headers would be Content-Type.
* __NOTE:__ The Content-Length header will be generated from the resulting Entity and thus should not be added.
*/
def headers: Headers
/** Make a new [[EntityEncoder]] using this type as a foundation */
def contramap[B](f: B => A): EntityEncoder[F, B] =
new EntityEncoder[F, B] {
override def toEntity(a: B): Entity[F] = self.toEntity(f(a))
override def headers: Headers = self.headers
}
/** Get the [[org.http4s.headers.`Content-Type`]] of the body encoded by this [[EntityEncoder]],
* if defined the headers
*/
def contentType: Option[`Content-Type`] = headers.get[`Content-Type`]
/** Get the [[Charset]] of the body encoded by this [[EntityEncoder]], if defined the headers */
def charset: Option[Charset] = headers.get[`Content-Type`].flatMap(_.charset)
/** Generate a new EntityEncoder that will contain the `Content-Type` header */
def withContentType(tpe: `Content-Type`): EntityEncoder[F, A] =
new EntityEncoder[F, A] {
override def toEntity(a: A): Entity[F] = self.toEntity(a)
override val headers: Headers = self.headers.put(tpe)
}
}
object EntityEncoder {
type Pure[A] = EntityEncoder[fs2.Pure, A]
object Pure {
def apply[A](implicit ev: EntityEncoder.Pure[A]): EntityEncoder.Pure[A] = ev
}
private val DefaultChunkSize = 4096
/** summon an implicit [[EntityEncoder]] */
def apply[F[_], A](implicit ev: EntityEncoder[F, A]): EntityEncoder[F, A] = ev
/** Create a new [[EntityEncoder]] */
def encodeBy[F[_], A](hs: Headers)(f: A => Entity[F]): EntityEncoder[F, A] =
new EntityEncoder[F, A] {
override def toEntity(a: A): Entity[F] = f(a)
override def headers: Headers = hs
}
/** Create a new [[EntityEncoder]] */
def encodeBy[F[_], A](hs: Header.ToRaw*)(f: A => Entity[F]): EntityEncoder[F, A] = {
val hdrs = if (hs.nonEmpty) Headers(hs: _*) else Headers.empty
encodeBy(hdrs)(f)
}
/** Create a new [[EntityEncoder]]
*
* This constructor is a helper for types that can be serialized synchronously, for example a String.
*/
def simple[A](hs: Header.ToRaw*)(toChunk: A => Chunk[Byte]): EntityEncoder.Pure[A] =
encodeBy(hs: _*)(a => Entity.strict(toChunk(a)))
/** Encodes a value from its Show instance. Too broad to be implicit, too useful to not exist. */
def showEncoder[A](implicit charset: Charset = `UTF-8`, show: Show[A]): EntityEncoder.Pure[A] = {
val hdr = `Content-Type`(MediaType.text.plain).withCharset(charset)
simple[A](hdr)(a => Chunk.array(show.show(a).getBytes(charset.nioCharset)))
}
def emptyEncoder[A]: EntityEncoder.Pure[A] =
new EntityEncoder[fs2.Pure, A] {
def toEntity(a: A): Entity[fs2.Pure] = Entity.empty
def headers: Headers = Headers.empty
}
/** A stream encoder is intended for streaming, and does not calculate its
* bodies in advance. As such, it does not calculate the Content-Length in
* advance. This is for use with chunked transfer encoding.
*/
implicit def streamEncoder[F[_], A](implicit
W: EntityEncoder[F, A]
): EntityEncoder[F, Stream[F, A]] =
new EntityEncoder[F, Stream[F, A]] {
override def toEntity(a: Stream[F, A]): Entity[F] =
Entity(a.flatMap(W.toEntity(_).body))
override def headers: Headers =
W.headers.get[`Transfer-Encoding`] match {
case Some(transferCoding) if transferCoding.hasChunked =>
W.headers
case _ =>
W.headers.add(`Transfer-Encoding`(TransferCoding.chunked.pure[NonEmptyList]))
}
}
implicit val unitEncoder: EntityEncoder.Pure[Unit] =
emptyEncoder[Unit]
implicit def stringEncoder(implicit charset: Charset = `UTF-8`): EntityEncoder.Pure[String] = {
val hdr = `Content-Type`(MediaType.text.plain).withCharset(charset)
simple(hdr)(s => Chunk.array(s.getBytes(charset.nioCharset)))
}
implicit def charArrayEncoder(implicit
charset: Charset = `UTF-8`
): EntityEncoder.Pure[Array[Char]] =
stringEncoder.contramap(new String(_))
implicit val chunkEncoder: EntityEncoder.Pure[Chunk[Byte]] =
simple(`Content-Type`(MediaType.application.`octet-stream`))(identity)
implicit val byteArrayEncoder: EntityEncoder.Pure[Array[Byte]] =
chunkEncoder.contramap(Chunk.array[Byte])
implicit def byteVectorEncoder[F[_]]: EntityEncoder[F, ByteVector] =
chunkEncoder.contramap(Chunk.byteVector)
/** Encodes an entity body. Chunking of the stream is preserved. A
* `Transfer-Encoding: chunked` header is set, as we cannot know
* the content length without running the stream.
*/
implicit def entityBodyEncoder[F[_]]: EntityEncoder[F, EntityBody[F]] =
encodeBy(`Transfer-Encoding`(TransferCoding.chunked.pure[NonEmptyList])) { body =>
Entity(body, None)
}
// TODO if Header moves to Entity, can add a Content-Disposition with the filename
implicit def pathEncoder[F[_]: Files]: EntityEncoder[F, fs2.io.file.Path] =
encodeBy[F, fs2.io.file.Path](`Transfer-Encoding`(TransferCoding.chunked)) { p =>
Entity(Files[F].readAll(p))
}
implicit def inputStreamEncoder[F[_]: Sync, IS <: InputStream]: EntityEncoder[F, F[IS]] =
entityBodyEncoder[F].contramap { (in: F[IS]) =>
readInputStream[F](in.widen[InputStream], DefaultChunkSize)
}
// TODO parameterize chunk size
implicit def readerEncoder[F[_], R <: Reader](implicit
F: Sync[F],
charset: Charset = `UTF-8`,
): EntityEncoder[F, F[R]] =
entityBodyEncoder[F].contramap { (fr: F[R]) =>
// Shared buffer
val charBuffer = CharBuffer.allocate(DefaultChunkSize)
def readToBytes(r: Reader): F[Option[Chunk[Byte]]] =
for {
// Read into the buffer
readChars <- F.blocking(r.read(charBuffer))
} yield {
// Flip to read
charBuffer.flip()
if (readChars < 0) None
else if (readChars == 0) Some(Chunk.empty)
else {
// Encode to bytes according to the charset
val bb = charset.nioCharset.encode(charBuffer)
// Read into a Chunk
val b = new Array[Byte](bb.remaining())
bb.get(b)
Some(Chunk.array(b))
}
}
def useReader(r: Reader) =
Stream
.eval(readToBytes(r))
.repeat
.unNoneTerminate
.flatMap(Stream.chunk[F, Byte])
// The reader is closed at the end like InputStream
Stream.bracket(fr)(r => F.delay(r.close())).flatMap(useReader)
}
implicit def multipartEncoder[F[_]]: EntityEncoder[F, Multipart[F]] =
new MultipartEncoder[F]
implicit def entityEncoderContravariant[F[_]]: Contravariant[EntityEncoder[F, *]] =
new Contravariant[EntityEncoder[F, *]] {
override def contramap[A, B](r: EntityEncoder[F, A])(f: (B) => A): EntityEncoder[F, B] =
r.contramap(f)
}
implicit def serverSentEventEncoder[F[_]]: EntityEncoder[F, EventStream[F]] =
entityBodyEncoder[F]
.contramap[EventStream[F]](_.through(ServerSentEvent.encoder))
.withContentType(`Content-Type`(MediaType.`text/event-stream`))
}
|
http4s/http4s
|
core/shared/src/main/scala/org/http4s/EntityEncoder.scala
|
Scala
|
apache-2.0
| 8,612
|
package name.abhijitsarkar.user.repository
import com.mongodb.casbah.MongoClient
import com.mongodb.casbah.commons.MongoDBObject
import name.abhijitsarkar.user.domain.UserAttributes._
import org.slf4j.LoggerFactory
object MongoDBCollectionFactory {
private val logger = LoggerFactory.getLogger(getClass)
def newCollection(name: String, hostname: String, port: Integer) = {
val collection = MongoClient(hostname, port)("akka")(name)
collection.createIndex(MongoDBObject(PHONE_NUM.toString -> 1), MongoDBObject("unique" -> true))
collection.createIndex(MongoDBObject(EMAIL.toString -> 1), MongoDBObject("unique" -> true, "sparse" -> true))
collection.indexInfo.foreach { index => logger.debug(s"Index: ${index.toMap}") }
collection
}
}
|
asarkar/akka
|
user-service/src/main/scala/name/abhijitsarkar/user/repository/MongoDBCollectionFactory.scala
|
Scala
|
gpl-3.0
| 771
|
/**
* The MIT License (MIT)
* Copyright (c) 2016 Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.microsoft.azure.documentdb
import com.microsoft.azure.documentdb.internal.routing.RoutingMapProvider
import com.microsoft.azure.documentdb.internal.{DocumentServiceRequest, DocumentServiceResponse}
/**
* This is meant to be used only internally as a bridge access to
* classes in com.microsoft.azure.documentdb
**/
object BridgeInternal {
def setFeedOptionPartitionKeyRangeId(options: FeedOptions, partitionKeyRangeId: String): Unit = {
options.setPartitionKeyRangeIdInternal(partitionKeyRangeId)
}
def getDocumentClientDoQuery(client: DocumentClient): DocumentServiceRequest => DocumentServiceResponse = {
client.doQuery
}
def getDocumentClientPartitionKeyRangeCache(client: DocumentClient): RoutingMapProvider = {
client.getPartitionKeyRangeCache
}
}
|
khdang/azure-documentdb-spark
|
src/main/scala/com/microsoft/azure/documentdb/BridgeInternal.scala
|
Scala
|
mit
| 1,974
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zeppelin.cassandra
import java.io.{ByteArrayOutputStream, PrintStream}
import java.net.InetAddress
import java.nio.ByteBuffer
import java.text.SimpleDateFormat
import java.util
import java.util.Date
import java.util.concurrent.ConcurrentHashMap
import com.datastax.driver.core.DataType.Name._
import com.datastax.driver.core._
import com.datastax.driver.core.exceptions.DriverException
import com.datastax.driver.core.policies.{LoggingRetryPolicy, FallthroughRetryPolicy, DowngradingConsistencyRetryPolicy, Policies}
import org.apache.zeppelin.cassandra.TextBlockHierarchy._
import org.apache.zeppelin.display.AngularObjectRegistry
import org.apache.zeppelin.display.ui.OptionInput.ParamOption
import org.apache.zeppelin.interpreter.InterpreterResult.Code
import org.apache.zeppelin.interpreter.{InterpreterException, InterpreterResult, InterpreterContext}
import org.slf4j.LoggerFactory
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
/**
* Value object to store runtime query parameters
*
* @param consistency consistency level
* @param serialConsistency serial consistency level
* @param timestamp timestamp
* @param retryPolicy retry policy
* @param fetchSize query fetch size
* @param requestTimeOut request time out in millisecs
*/
case class CassandraQueryOptions(consistency: Option[ConsistencyLevel],
serialConsistency:Option[ConsistencyLevel],
timestamp: Option[Long],
retryPolicy: Option[RetryPolicy],
fetchSize: Option[Int],
requestTimeOut: Option[Int])
/**
* Singleton object to store constants
*/
object InterpreterLogic {
val CHOICES_SEPARATOR : String = """\\|"""
val VARIABLE_PATTERN = """\\{\\{[^}]+\\}\\}""".r
val SIMPLE_VARIABLE_DEFINITION_PATTERN = """\\{\\{([^=]+)=([^=]+)\\}\\}""".r
val MULTIPLE_CHOICES_VARIABLE_DEFINITION_PATTERN = """\\{\\{([^=]+)=((?:[^=]+\\|)+[^|]+)\\}\\}""".r
val STANDARD_DATE_FORMAT = "yyyy-MM-dd HH:mm:ss"
val ACCURATE_DATE_FORMAT = "yyyy-MM-dd HH:mm:ss.SSS"
val defaultRetryPolicy = Policies.defaultRetryPolicy()
val downgradingConsistencyRetryPolicy = DowngradingConsistencyRetryPolicy.INSTANCE
val fallThroughRetryPolicy = FallthroughRetryPolicy.INSTANCE
val loggingDefaultRetryPolicy = new LoggingRetryPolicy(defaultRetryPolicy)
val loggingDownGradingRetryPolicy = new LoggingRetryPolicy(downgradingConsistencyRetryPolicy)
val loggingFallThroughRetryPolicy = new LoggingRetryPolicy(fallThroughRetryPolicy)
val preparedStatements : mutable.Map[String,PreparedStatement] = new ConcurrentHashMap[String,PreparedStatement]().asScala
val logger = LoggerFactory.getLogger(classOf[InterpreterLogic])
val paragraphParser = new ParagraphParser
val boundValuesParser = new BoundValuesParser
}
/**
* Real class to implement the
* interpreting logic of CQL statements
* and parameters blocks
*
* @param session java driver session
*/
class InterpreterLogic(val session: Session) {
val enhancedSession: EnhancedSession = new EnhancedSession(session)
import InterpreterLogic._
def interpret(session:Session, stringStatements : String, context: InterpreterContext): InterpreterResult = {
logger.info(s"Executing CQL statements : \\n\\n$stringStatements\\n")
try {
val protocolVersion = session.getCluster.getConfiguration.getProtocolOptions.getProtocolVersion
val queries:List[AnyBlock] = parseInput(stringStatements)
val queryOptions = extractQueryOptions(queries
.filter(_.blockType == ParameterBlock)
.map(_.get[QueryParameters]))
logger.info(s"Current Cassandra query options = $queryOptions")
val queryStatements = queries.filter(_.blockType == StatementBlock).map(_.get[QueryStatement])
//Remove prepared statements
queryStatements
.filter(_.statementType == RemovePrepareStatementType)
.map(_.getStatement[RemovePrepareStm])
.foreach(remove => {
logger.debug(s"Removing prepared statement '${remove.name}'")
preparedStatements.remove(remove.name)
})
//Update prepared statement maps
queryStatements
.filter(_.statementType == PrepareStatementType)
.map(_.getStatement[PrepareStm])
.foreach(statement => {
logger.debug(s"Get or prepare statement '${statement.name}' : ${statement.query}")
preparedStatements.getOrElseUpdate(statement.name,session.prepare(statement.query))
})
val statements: List[Any] = queryStatements
.filter(st => (st.statementType != PrepareStatementType) && (st.statementType != RemovePrepareStatementType))
.map{
case x:SimpleStm => generateSimpleStatement(x, queryOptions, context)
case x:BatchStm => {
val builtStatements: List[Statement] = x.statements.map {
case st:SimpleStm => generateSimpleStatement(st, queryOptions, context)
case st:BoundStm => generateBoundStatement(session, st, queryOptions, context)
case _ => throw new InterpreterException(s"Unknown statement type")
}
generateBatchStatement(x.batchType, queryOptions, builtStatements)
}
case x:BoundStm => generateBoundStatement(session, x, queryOptions, context)
case x:DescribeCommandStatement => x
case x:HelpCmd => x
case x => throw new InterpreterException(s"Unknown statement type : ${x}")
}
val results: List[(Any,Any)] = for (statement <- statements) yield (enhancedSession.execute(statement),statement)
if (results.nonEmpty) {
results.last match {
case(res: ResultSet, st: Statement) => buildResponseMessage((res, st), protocolVersion)
case(output: String, _) => new InterpreterResult(Code.SUCCESS, output)
case _ => throw new InterpreterException(s"Cannot parse result type : ${results.last}")
}
} else {
new InterpreterResult(Code.SUCCESS, enhancedSession.displayNoResult)
}
} catch {
case dex: DriverException => {
logger.error(dex.getMessage, dex)
new InterpreterResult(Code.ERROR, parseException(dex))
}
case pex:ParsingException => {
logger.error(pex.getMessage, pex)
new InterpreterResult(Code.ERROR, pex.getMessage)
}
case iex: InterpreterException => {
logger.error(iex.getMessage, iex)
new InterpreterResult(Code.ERROR, iex.getMessage)
}
case ex: java.lang.Exception => {
logger.error(ex.getMessage, ex)
new InterpreterResult(Code.ERROR, parseException(ex))
}
}
}
def buildResponseMessage(lastResultSet: (ResultSet,Statement), protocolVersion: ProtocolVersion): InterpreterResult = {
val output = new StringBuilder()
val rows: collection.mutable.ArrayBuffer[Row] = ArrayBuffer()
val iterator: util.Iterator[Row] = lastResultSet._1.iterator()
while (iterator.hasNext) {
rows.append(iterator.next())
}
val columnsDefinitions: List[(String, DataType)] = lastResultSet._1
.getColumnDefinitions
.asList
.toList // Java list -> Scala list
.map(definition => (definition.getName, definition.getType))
if (rows.nonEmpty) {
// Create table headers
output
.append("%table ")
.append(columnsDefinitions.map { case (columnName, _) => columnName }.mkString("\\t")).append("\\n")
// Deserialize Data
rows.foreach {
row => {
val data = columnsDefinitions.map {
case (name, dataType) => {
if (row.isNull(name)) null else row.getObject(name)
}
}
output.append(data.mkString("\\t")).append("\\n")
}
}
} else {
val lastQuery: String = lastResultSet._2.toString
val executionInfo: ExecutionInfo = lastResultSet._1.getExecutionInfo
output.append(enhancedSession.displayExecutionStatistics(lastQuery, executionInfo))
}
val result: String = output.toString()
logger.debug(s"CQL result : \\n\\n$result\\n")
new InterpreterResult(Code.SUCCESS, result)
}
def parseInput(input:String): List[AnyBlock] = {
val parsingResult: ParagraphParser#ParseResult[List[AnyBlock]] = paragraphParser.parseAll(paragraphParser.queries, input)
parsingResult match {
case paragraphParser.Success(blocks,_) => blocks
case paragraphParser.Failure(msg,next) => {
throw new InterpreterException(s"Error parsing input:\\n\\t'$input'\\nDid you forget to add ; (semi-colon) at the end of each CQL statement ?")
}
case paragraphParser.Error(msg,next) => {
throw new InterpreterException(s"Error parsing input:\\n\\t'$input'\\nDid you forget to add ; (semi-colon) at the end of each CQL statement ?")
}
case _ => throw new InterpreterException(s"Error parsing input: $input")
}
}
def extractQueryOptions(parameters: List[QueryParameters]): CassandraQueryOptions = {
logger.debug(s"Extracting query options from $parameters")
val consistency: Option[ConsistencyLevel] = parameters
.filter(_.paramType == ConsistencyParam)
.map(_.getParam[Consistency])
.flatMap(x => Option(x.value))
.headOption
val serialConsistency: Option[ConsistencyLevel] = parameters
.filter(_.paramType == SerialConsistencyParam)
.map(_.getParam[SerialConsistency])
.flatMap(x => Option(x.value))
.headOption
val timestamp: Option[Long] = parameters
.filter(_.paramType == TimestampParam)
.map(_.getParam[Timestamp])
.flatMap(x => Option(x.value))
.headOption
val retryPolicy: Option[RetryPolicy] = parameters
.filter(_.paramType == RetryPolicyParam)
.map(_.getParam[RetryPolicy])
.headOption
val fetchSize: Option[Int] = parameters
.filter(_.paramType == FetchSizeParam)
.map(_.getParam[FetchSize])
.flatMap(x => Option(x.value))
.headOption
val requestTimeOut: Option[Int] = parameters
.filter(_.paramType == RequestTimeOutParam)
.map(_.getParam[RequestTimeOut])
.flatMap(x => Option(x.value))
.headOption
CassandraQueryOptions(consistency,serialConsistency, timestamp, retryPolicy, fetchSize, requestTimeOut)
}
def generateSimpleStatement(st: SimpleStm, options: CassandraQueryOptions,context: InterpreterContext): SimpleStatement = {
logger.debug(s"Generating simple statement : '${st.text}'")
val statement = new SimpleStatement(maybeExtractVariables(st.text, context))
applyQueryOptions(options, statement)
statement
}
def generateBoundStatement(session: Session, st: BoundStm, options: CassandraQueryOptions,context: InterpreterContext): BoundStatement = {
logger.debug(s"Generating bound statement with name : '${st.name}' and bound values : ${st.values}")
preparedStatements.get(st.name) match {
case Some(ps) => {
val boundValues = maybeExtractVariables(st.values, context)
createBoundStatement(session.getCluster.getConfiguration.getCodecRegistry, st.name, ps, boundValues)
}
case None => throw new InterpreterException(s"The statement '${st.name}' can not be bound to values. " +
s"Are you sure you did prepare it with @prepare[${st.name}] ?")
}
}
def generateBatchStatement(batchType: BatchStatement.Type, options: CassandraQueryOptions, statements: List[Statement]): BatchStatement = {
logger.debug(s"""Generating batch statement of type '${batchType} for ${statements.mkString(",")}'""")
val batch = new BatchStatement(batchType)
statements.foreach(batch.add(_))
applyQueryOptions(options, batch)
batch
}
def maybeExtractVariables(statement: String, context: InterpreterContext): String = {
def findInAngularRepository(variable: String): Option[AnyRef] = {
val registry = context.getAngularObjectRegistry
val noteId = context.getNoteId
val paragraphId = context.getParagraphId
val paragraphScoped: Option[AnyRef] = Option(registry.get(variable, noteId, paragraphId)).map[AnyRef](_.get())
paragraphScoped
}
def extractVariableAndDefaultValue(statement: String, exp: String):String = {
exp match {
case MULTIPLE_CHOICES_VARIABLE_DEFINITION_PATTERN(variable, choices) => {
val escapedExp: String = exp.replaceAll( """\\{""", """\\\\{""").replaceAll( """\\}""", """\\\\}""").replaceAll("""\\|""","""\\\\|""")
findInAngularRepository(variable) match {
case Some(value) => statement.replaceAll(escapedExp,value.toString)
case None => {
val listChoices:List[String] = choices.trim.split(CHOICES_SEPARATOR).toList
val paramOptions= listChoices.map(choice => new ParamOption(choice, choice))
val selected = context.getGui.select(variable, listChoices.head, paramOptions.toArray)
statement.replaceAll(escapedExp,selected.toString)
}
}
}
case SIMPLE_VARIABLE_DEFINITION_PATTERN(variable,defaultVal) => {
val escapedExp: String = exp.replaceAll( """\\{""", """\\\\{""").replaceAll( """\\}""", """\\\\}""")
findInAngularRepository(variable) match {
case Some(value) => statement.replaceAll(escapedExp,value.toString)
case None => {
val value = context.getGui.input(variable,defaultVal)
statement.replaceAll(escapedExp,value.toString)
}
}
}
case _ => throw new ParsingException(s"Invalid bound variable definition for '$exp' in '$statement'. It should be of form 'variable=defaultValue' or 'variable=value1|value2|...|valueN'")
}
}
VARIABLE_PATTERN.findAllIn(statement).foldLeft(statement)(extractVariableAndDefaultValue _)
}
def applyQueryOptions(options: CassandraQueryOptions, statement: Statement): Unit = {
options.consistency.foreach(statement.setConsistencyLevel(_))
options.serialConsistency.foreach(statement.setSerialConsistencyLevel(_))
options.timestamp.foreach(statement.setDefaultTimestamp(_))
options.retryPolicy.foreach {
case DefaultRetryPolicy => statement.setRetryPolicy(defaultRetryPolicy)
case DowngradingRetryPolicy => statement.setRetryPolicy(downgradingConsistencyRetryPolicy)
case FallThroughRetryPolicy => statement.setRetryPolicy(fallThroughRetryPolicy)
case LoggingDefaultRetryPolicy => statement.setRetryPolicy(loggingDefaultRetryPolicy)
case LoggingDowngradingRetryPolicy => statement.setRetryPolicy(loggingDownGradingRetryPolicy)
case LoggingFallThroughRetryPolicy => statement.setRetryPolicy(loggingFallThroughRetryPolicy)
case _ => throw new InterpreterException(s"""Unknown retry policy ${options.retryPolicy.getOrElse("???")}""")
}
options.fetchSize.foreach(statement.setFetchSize(_))
options.requestTimeOut.foreach(statement.setReadTimeoutMillis(_))
}
private def createBoundStatement(codecRegistry: CodecRegistry, name: String, ps: PreparedStatement, rawBoundValues: String): BoundStatement = {
val dataTypes = ps.getVariables.toList
.map(cfDef => cfDef.getType)
val boundValuesAsText = parseBoundValues(name,rawBoundValues)
if(dataTypes.size != boundValuesAsText.size) throw new InterpreterException(s"Invalid @bind values for prepared statement '$name'. " +
s"Prepared parameters has ${dataTypes.size} variables whereas bound values have ${boundValuesAsText.size} parameters ...")
val convertedValues: List[AnyRef] = boundValuesAsText
.zip(dataTypes).map {
case (value, dataType) => {
if(value.trim == "null") {
null
} else {
val codec: TypeCodec[AnyRef] = codecRegistry.codecFor[AnyRef](dataType)
dataType.getName match {
case (ASCII | TEXT | VARCHAR) => value.trim.replaceAll("(?<!')'","")
case (INT | VARINT) => value.trim.toInt
case (BIGINT | COUNTER) => value.trim.toLong
case BLOB => ByteBuffer.wrap(value.trim.getBytes)
case BOOLEAN => value.trim.toBoolean
case DECIMAL => BigDecimal(value.trim)
case DOUBLE => value.trim.toDouble
case FLOAT => value.trim.toFloat
case INET => InetAddress.getByName(value.trim)
case TIMESTAMP => parseDate(value.trim)
case (UUID | TIMEUUID) => java.util.UUID.fromString(value.trim)
case LIST => codec.parse(boundValuesParser.parse(boundValuesParser.list, value).get)
case SET => codec.parse(boundValuesParser.parse(boundValuesParser.set, value).get)
case MAP => codec.parse(boundValuesParser.parse(boundValuesParser.map, value).get)
case UDT => codec.parse(boundValuesParser.parse(boundValuesParser.udt, value).get)
case TUPLE => codec.parse(boundValuesParser.parse(boundValuesParser.tuple, value).get)
case _ => throw new InterpreterException(s"Cannot parse data of type : ${dataType.toString}")
}
}
}
}.asInstanceOf[List[AnyRef]]
ps.bind(convertedValues.toArray: _*)
}
protected def parseBoundValues(psName: String, boundValues: String): List[String] = {
val result: BoundValuesParser#ParseResult[List[String]] = boundValuesParser.parseAll(boundValuesParser.values, boundValues)
result match {
case boundValuesParser.Success(list,_) => list
case _ => throw new InterpreterException(s"Cannot parse bound values for prepared statement '$psName' : $boundValues. Did you forget to wrap text with ' (simple quote) ?")
}
}
def parseDate(dateString: String): Date = {
dateString match {
case boundValuesParser.STANDARD_DATE_PATTERN(datePattern) => new SimpleDateFormat(STANDARD_DATE_FORMAT).parse(datePattern)
case boundValuesParser.ACCURATE_DATE_PATTERN(datePattern) => new SimpleDateFormat(ACCURATE_DATE_FORMAT).parse(datePattern)
case _ => throw new InterpreterException(s"Cannot parse date '$dateString'. " +
s"Accepted formats : $STANDARD_DATE_FORMAT OR $ACCURATE_DATE_FORMAT");
}
}
def parseException(ex: Exception): String = {
val os = new ByteArrayOutputStream()
val ps = new PrintStream(os)
ex.printStackTrace(ps)
os.toString("UTF-8")
}
}
|
SarunasG/zeppelin-oidc
|
cassandra/src/main/scala/org/apache/zeppelin/cassandra/InterpreterLogic.scala
|
Scala
|
apache-2.0
| 19,326
|
package edu.gemini.util.security.permission
import scalaz._
import Scalaz._
import java.security.{BasicPermission, Permission}
import edu.gemini.spModel.core.SPProgramID
trait ProgramPermission { this : Permission => }
object ProgramPermission {
/** Does the user have permission to read the specified program? */
case class Read(id:SPProgramID) extends BasicPermission(id.toString, "read") with ProgramPermission
}
|
arturog8m/ocs
|
bundle/edu.gemini.util.security/src/main/scala/edu/gemini/util/security/permission/ProgramPermission.scala
|
Scala
|
bsd-3-clause
| 424
|
package com.timushev.sbt.updates
import com.timushev.sbt.updates.versions.Version
import sbt._
import scala.collection.immutable.SortedSet
import com.timushev.sbt.updates.Compat._
trait UpdatesKeys {
lazy val dependencyUpdatesReportFile = settingKey[File]("Dependency updates report file")
@deprecated(
"dependencyUpdatesExclusions is deprecated in favor of dependencyUpdatesFilter, which defaults" + " to a truthy check. Migrate exclusions by setting dependencyUpdatesFilter -= yourExclusions",
"0.4.0"
)
lazy val dependencyUpdatesExclusions =
settingKey[ModuleFilter]("Dependencies that are excluded from update reporting")
lazy val dependencyUpdatesFilter = settingKey[ModuleFilter]("Dependencies that are included to update reporting")
lazy val dependencyUpdatesFailBuild = settingKey[Boolean]("Fail a build if updates found")
lazy val dependencyAllowPreRelease = settingKey[Boolean]("If true, also take pre-release versions into consideration")
lazy val dependencyUpdatesData = taskKey[Map[ModuleID, SortedSet[Version]]]("")
lazy val dependencyUpdates = taskKey[Unit]("Shows a list of project dependencies that can be updated.")
lazy val dependencyUpdatesReport =
taskKey[File]("Writes a list of project dependencies that can be updated to a file.")
}
object UpdatesKeys extends UpdatesKeys
|
rtimush/sbt-updates
|
src/main/scala/com/timushev/sbt/updates/UpdatesKeys.scala
|
Scala
|
bsd-3-clause
| 1,351
|
package ch6
import scala.annotation.tailrec
import RNG._
object Exercise10 {
import State._
case class State[S, +A] (run: S => (A, S)) {
def map[B](f: A => B): State[S, B] = flatMap(a => unit(f(a)))
def map2[B, C](sb: State[S, B])(f: (A, B) => C): State[S, C] =
flatMap(a => sb.map(b => f(a, b)))
def flatMap[B](f: A => State[S, B]): State[S, B] = State(s => {
val (a, s1) = run(s)
f(a).run(s1)
})
}
object State {
def unit[S, A](a: A): State[S, A] = State(s => (a, s))
def sequence[S, A](list: List[State[S, A]]): State[S, List[A]] =
list.foldRight(unit[S, List[A]](List()))((f, acc) => f.map2(acc)(_ :: _))
}
type Rand[A] = State[RNG, A]
val int: Rand[Int] = State[RNG, Int](_.nextInt)
def ints(count: Int): Rand[List[Int]] = State.sequence[RNG, Int](List.fill(count)(int))
}
import Exercise10._
/*
from repl you can test typing:
:load src/main/scala/fpinscala/ch6/RNG.scala
:load src/main/scala/fpinscala/ch6/Exercise10.scala
*/
|
rucka/fpinscala
|
src/main/scala/fpinscala/ch6/Exercise10.scala
|
Scala
|
gpl-2.0
| 1,001
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn.ops
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import scala.reflect.ClassTag
class Expm1[T: ClassTag, D: ClassTag]()(implicit ev: TensorNumeric[T], ev2: TensorNumeric[D])
extends Operation[Tensor[D], Tensor[D], T]{
output = Tensor[D]()
override def updateOutput(input: Tensor[D]): Tensor[D] = {
output.resizeAs(input)
output.map(input, (a, b) => ev2.minus(ev2.exp(b), ev2.one))
}
override def getClassTagNumerics() : (Array[ClassTag[_]], Array[TensorNumeric[_]]) = {
(Array[ClassTag[_]](scala.reflect.classTag[T], scala.reflect.classTag[D]),
Array[TensorNumeric[_]](ev, ev2))
}
}
object Expm1 {
def apply[T: ClassTag, D: ClassTag]()(implicit ev: TensorNumeric[T], ev2: TensorNumeric[D])
: Expm1[T, D] = new Expm1()
}
|
intel-analytics/BigDL
|
scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/nn/ops/Expm1.scala
|
Scala
|
apache-2.0
| 1,491
|
package com.ecfront.ez.framework.cluster.nats
import com.ecfront.ez.framework.core.cluster._
import scala.beans.BeanProperty
object NatsCluster extends Cluster {
override val rpc: ClusterRPC = NatsClusterRPC
override val mq: ClusterMQ = NatsClusterMQ
override val dist: ClusterDist = null
override val cache: ClusterCache = null
override val manage: ClusterManage = NatsClusterManage
}
class MessageWrap {
@BeanProperty var message: String = _
@BeanProperty var args: Map[String, String] = _
}
|
gudaoxuri/ez-framework
|
services/cluster-nats/src/main/scala/com/ecfront/ez/framework/cluster/nats/NatsCluster.scala
|
Scala
|
apache-2.0
| 517
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.worker
import java.io.File
import scala.collection.JavaConversions._
import org.scalatest.FunSuite
import org.apache.spark.deploy.{ApplicationDescription, Command, ExecutorState}
import org.apache.spark.SparkConf
class ExecutorRunnerTest extends FunSuite {
test("command includes appId") {
val appId = "12345-worker321-9876"
val sparkHome = sys.props.getOrElse("spark.test.home", fail("spark.test.home is not set!"))
val appDesc = new ApplicationDescription("app name", Some(8), 500,
Command("foo", Seq(appId), Map(), Seq(), Seq(), Seq()), "appUiUrl")
val er = new ExecutorRunner(appId, 1, appDesc, 8, 500, null, "blah", "worker321", 123,
"publicAddr", new File(sparkHome), new File("ooga"), "blah", "", new SparkConf, Seq("localDir"),
ExecutorState.RUNNING)
val builder = CommandUtils.buildProcessBuilder(appDesc.command, 512, sparkHome, er.substituteVariables)
assert(builder.command().last === appId)
}
}
|
trueyao/spark-lever
|
core/src/test/scala/org/apache/spark/deploy/worker/ExecutorRunnerTest.scala
|
Scala
|
apache-2.0
| 1,793
|
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.execution.Ack.Stop
import scala.util.control.NonFatal
import monix.execution.{Ack, Cancelable, Scheduler}
import monix.reactive.Observable
import monix.reactive.observers.Subscriber
import scala.concurrent.Future
private[reactive] final class MapAccumulateObservable[A, S, R](
source: Observable[A],
initial: () => S,
f: (S, A) => (S, R))
extends Observable[R] {
def unsafeSubscribeFn(out: Subscriber[R]): Cancelable = {
var streamErrors = true
try {
val initialState = initial()
streamErrors = false
// Initial state was evaluated, subscribing to source
source.unsafeSubscribeFn(new Subscriber[A] {
implicit val scheduler: Scheduler = out.scheduler
private[this] var isDone = false
private[this] var state = initialState
def onNext(elem: A): Future[Ack] = {
// Protects calls to user code from within the operator and
// stream the error downstream if it happens, but if the
// error happens because of calls to `onNext` or other
// protocol calls, then the behavior should be undefined.
var streamError = true
try {
val (newState, result) = f(state, elem)
streamError = false
state = newState
out.onNext(result)
} catch {
case NonFatal(ex) if streamError =>
onError(ex)
Stop
}
}
def onError(ex: Throwable): Unit =
if (!isDone) {
isDone = true
out.onError(ex)
}
def onComplete(): Unit =
if (!isDone) {
isDone = true
out.onComplete()
}
})
} catch {
case NonFatal(ex) if streamErrors =>
// The initial state triggered an error
out.onError(ex)
Cancelable.empty
}
}
}
|
monixio/monix
|
monix-reactive/shared/src/main/scala/monix/reactive/internal/operators/MapAccumulateObservable.scala
|
Scala
|
apache-2.0
| 2,603
|
package eu.timepit.refined.types
import eu.timepit.refined.types.all._
import org.scalacheck.Prop._
import org.scalacheck.Properties
class CharTypesSpec extends Properties("CharTypes") {
property("LowerCaseChar.from('a')") = secure {
LowerCaseChar.from('a').isRight
}
property("LowerCaseChar.from('A')") = secure {
LowerCaseChar.from('A') ?= Left("Predicate failed: isLower('A').")
}
property("UpperCaseChar.from('A')") = secure {
UpperCaseChar.from('A').isRight
}
property("UpperCaseChar.from('a')") = secure {
UpperCaseChar.from('a') ?= Left("Predicate failed: isUpper('a').")
}
}
|
fthomas/refined
|
modules/core/shared/src/test/scala/eu/timepit/refined/types/CharTypesSpec.scala
|
Scala
|
mit
| 621
|
/*
* Copyright 2016 Nikolay Tatarinov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.rockjam.iqnotes
import akka.http.scaladsl.model.StatusCodes
import com.github.rockjam.iqnotes.http.AuthRoutes
import com.github.rockjam.iqnotes.models._
import de.heikoseeberger.akkahttpjson4s.Json4sSupport
import org.json4s.{ native, DefaultFormats }
class AuthSpec extends SpecBase with Json4sSupport {
behavior of "authorization"
it should "grant access token to existing user with correct password" in grantToken()
it should "not grant access to user with wrong password" in dontGrantWrongPass()
it should "not grant access to not registered user" in dontGrantNotRegistered()
it should "not allow to register users with existing username" in dontRegisterSameUsername()
implicit val serialization = native.Serialization
implicit val formats = DefaultFormats
val authRoutes = AuthRoutes()
def grantToken(): Unit = {
val regRequest = UserRegisterRequest("rockjam", "hellofapassword")
registerUser(regRequest)
val authRequest = AuthorizeRequest(regRequest.username, regRequest.password)
Post("/login", authRequest) ~> authRoutes ~> check {
response.status shouldEqual StatusCodes.OK
responseAs[AccessToken].accessToken should not be empty
}
}
def dontGrantWrongPass(): Unit = {
val user = UserRegisterRequest("rockjam", "hellofapassword")
registerUser(user)
val authRequest = AuthorizeRequest(user.username, "some wrong password")
Post("/login", authRequest) ~> authRoutes ~> check {
response.status shouldEqual StatusCodes.Forbidden
responseAs[HttpError] shouldEqual HttpErrors.AuthError
}
}
def dontGrantNotRegistered(): Unit = {
val authRequest = AuthorizeRequest("burglar", "secret")
Post("/login", authRequest) ~> authRoutes ~> check {
response.status shouldEqual StatusCodes.Forbidden
responseAs[HttpError] shouldEqual HttpErrors.AuthError
}
}
def dontRegisterSameUsername(): Unit = {
val firstReq = UserRegisterRequest("rockjam", "hellofapassword")
registerUser(firstReq)
val secondReq = UserRegisterRequest("rockjam", "sercretpassword")
Post("/register", secondReq) ~> authRoutes ~> check {
response.status shouldEqual StatusCodes.BadRequest
responseAs[HttpError] shouldEqual HttpErrors.SameUsername
}
}
private def registerUser(req: UserRegisterRequest): Unit =
Post("/register", req) ~> authRoutes ~> check {
response.status shouldEqual StatusCodes.Created
response.entity.isKnownEmpty() shouldEqual true
}
}
|
rockjam/iq-notes
|
src/test/scala/com/github/rockjam/iqnotes/AuthSpec.scala
|
Scala
|
apache-2.0
| 3,137
|
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600a.v3
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.ct600.v3.retriever.CT600BoxRetriever
case class LP04(value: Option[Int]) extends CtBoxIdentifier(name = "Amount of other loans.") with CtOptionalInteger with Input with ValidatableBox[CT600BoxRetriever] {
def validate(boxRetriever: CT600BoxRetriever) = if (boxRetriever.retrieveLPQ10().value.getOrElse(false)) validateAsMandatory(this) ++ validatePositiveInteger(this) else Set()
}
|
ahudspith-equalexperts/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/ct600a/v3/LP04.scala
|
Scala
|
apache-2.0
| 1,076
|
package mysql2hbase
import java.io.Serializable
import java.util
import java.util.BitSet
import java.util.concurrent.atomic.AtomicLong
import com.github.shyiko.mysql.binlog.event.{DeleteRowsEventData, UpdateRowsEventData, WriteRowsEventData}
import org.apache.commons.lang3.time.StopWatch
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HBaseConfiguration, HColumnDescriptor, HTableDescriptor, TableName}
import org.apache.hadoop.security.UserGroupInformation
import org.apache.spark.sql.Row
import org.apache.spark.sql.hbase.util.{DataTypeUtils, HBaseKVHelper}
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
trait HbaseApplierMBean {
def getCount(): util.HashMap[String, AtomicLong]
def getDelay(): util.HashMap[String, ArrayBuffer[Long]]
def getBinlog(): String
def getBinlogPosition(): Long
final val INSERT="insert"
final val DELETE="delete"
final val UPDATE_INSERT="update.insert"
final val UPDATE_DELETE="update.delete"
def initCount = {
val count = new util.HashMap[String, AtomicLong]()
count.put(INSERT, new AtomicLong)
count.put(DELETE, new AtomicLong)
count.put(UPDATE_INSERT, new AtomicLong)
count.put(UPDATE_DELETE, new AtomicLong)
count
}
def initDelay = {
val delay = new util.HashMap[String, ArrayBuffer[Long]]()
delay.put(INSERT, ArrayBuffer[Long](0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
delay.put(DELETE, ArrayBuffer[Long](0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
delay.put(UPDATE_INSERT, ArrayBuffer[Long](0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
delay.put(UPDATE_DELETE, ArrayBuffer[Long](0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
delay
}
}
object HbaseApplier {
final val BINLOG_TABLE = "BinlogTable"
final val BINLOG__COLUMN_FAMILY = "cf"
final val BINLOG__FILENAME_COLUMN = "filename"
final val BINLOG__POSITION_COLUMN = "position"
val hbaseConf = HBaseConfiguration.create()
var hbaseAdmin = new HBaseAdmin(hbaseConf)
lazy val tables = collection.mutable.Map[String, HTable]()
HbaseApplier.login(hbaseConf)
def addConfPath(hbaseConfPath: Seq[String]) = {
hbaseConfPath.foreach { f => hbaseConf.addResource(new Path(f)) }
hbaseAdmin = new HBaseAdmin(hbaseConf)
}
def getTableForDatabase(dbTableName:String)={
HBaseTableUtils.getRelation(dbTableName) match {
case None =>
throw new Exception("meta table mapping not defined: " + dbTableName)
case Some(relation) => {
HbaseApplier.getTable(relation.hbaseTableName) match{
case None =>
throw new Exception("hbase data table not defined: " + dbTableName)
case Some(dataTable) => dataTable
}
}
}
}
def getTable(tableName: String):Option[HTable] = {
//not to create table automaticlly
tables.get(tableName) match{
case Some(table) => Some(table)
case None=>{
if (hbaseAdmin.tableExists(tableName)) {
None
}
val table =new HTable(hbaseConf, Bytes.toBytes(tableName))
table.setAutoFlushTo(false)
tables.put(tableName,table)
Some(table)
}
}
}
def login(hbaseConf: Configuration) = {
val krb = Config.getKrbLogin()
UserGroupInformation.setConfiguration(hbaseConf)
UserGroupInformation.loginUserFromKeytab(krb._1, krb._2)
}
}
class HbaseApplier(hbaseConfPath: Seq[String], hbaseBinlogKey: String) extends HbaseApplierMBean {
var count = initCount
var delay = initDelay
var binlog: String = ""
var binlogPosition: Long = 0L
HbaseApplier.addConfPath(hbaseConfPath)
val binlogTable = HbaseApplier.getTable(HbaseApplier.BINLOG_TABLE) match{
case None =>
throw new Exception("hbase meta table not defined: " + HbaseApplier.BINLOG_TABLE)
case Some(binlogTable) => binlogTable
}
override def getCount = count
override def getDelay = delay
override def getBinlog = binlog
override def getBinlogPosition = binlogPosition
def timedHbaseDataAction[A](actionType: String)(action: => Unit) = {
val sw = new StopWatch
sw.reset()
sw.start()
action
sw.stop()
val d = delay.get(actionType)
d.remove(0)
d.append(sw.getTime)
count.get(actionType).incrementAndGet()
}
def binlogGetPosition: Option[(String, Long)] = {
val get = new Get(Bytes.toBytes(hbaseBinlogKey))
val result = binlogTable.get(get)
if (!result.isEmpty) {
val fn = result.getValue(Bytes.toBytes(HbaseApplier.BINLOG__COLUMN_FAMILY),
Bytes.toBytes(HbaseApplier.BINLOG__FILENAME_COLUMN))
val pst = result.getValue(Bytes.toBytes(HbaseApplier.BINLOG__COLUMN_FAMILY),
Bytes.toBytes(HbaseApplier.BINLOG__POSITION_COLUMN))
binlog = Bytes.toString(fn)
binlogPosition = Bytes.toLong(pst)
Log.info("read binglog {} {}", Bytes.toString(fn), Bytes.toLong(pst))
Option(Bytes.toString(fn), Bytes.toLong(pst))
} else {
None
}
}
def binlogRotate(filename: String, position: Long) {
val put = new Put(Bytes.toBytes(hbaseBinlogKey))
put.add(Bytes.toBytes(HbaseApplier.BINLOG__COLUMN_FAMILY),
Bytes.toBytes(HbaseApplier.BINLOG__FILENAME_COLUMN), Bytes.toBytes(filename))
put.add(Bytes.toBytes(HbaseApplier.BINLOG__COLUMN_FAMILY),
Bytes.toBytes(HbaseApplier.BINLOG__POSITION_COLUMN), Bytes.toBytes(position))
binlogTable.put(put)
binlog = filename
binlogPosition = position
binlogTable.flushCommits()
Log.info("rotate to binglog {} {}", filename, position)
}
def binlogNextPosition(position: Long) {
val put = new Put(Bytes.toBytes(hbaseBinlogKey))
put.add(Bytes.toBytes(HbaseApplier.BINLOG__COLUMN_FAMILY),
Bytes.toBytes(HbaseApplier.BINLOG__POSITION_COLUMN), Bytes.toBytes(position))
binlogTable.put(put)
binlogPosition = position
binlogTable.flushCommits()
Log.info("next binglog position {}", position)
}
def toScalaBitSet(s: BitSet): scala.collection.mutable.BitSet = {
new scala.collection.mutable.BitSet(s.toLongArray)
}
def getPutForSpark(rowKey: Array[Byte], includedColumns: java.util.BitSet,
tableInfo: TableInfo, row: Array[Serializable]): Put = {
val put = new Put(rowKey)
val relation = HBaseTableUtils.getRelation(tableInfo.getDBTableName()).get
val theSparkRow = Row.fromSeq(row)
relation.getNonKeyColumns().foreach(
nkc => {
val rowVal = DataTypeUtils.getRowColumnInHBaseRawType(
theSparkRow, nkc.ordinal, nkc.dataType, relation.getBytesUtils())
put.add(nkc.familyRaw, nkc.qualifierRaw, rowVal)
}
)
put
}
def getRowKeyForSpark(includedColumns: BitSet,
tableInfo: TableInfo,
row: Array[Serializable]): Array[Byte] = {
val cols = tableInfo.cols
val pk = toScalaBitSet(tableInfo.primaryKey)
val included = toScalaBitSet(includedColumns)
HBaseTableUtils.getRelation(tableInfo.getDBTableName()) match {
case None => throw new Exception("can't get table for " + tableInfo.getDBTableName())
case Some(relation) => {
if ((pk & included) != pk) {
throw new Exception("sql statement does not contain all primary keys")
}
val theSparkRow = Row.fromSeq(row)
val rawKeyCol = relation.getKeyColumns().map(
kc => {
val rowColumn = DataTypeUtils.getRowColumnInHBaseRawType(
theSparkRow, kc.ordinal, kc.dataType)
(rowColumn, kc.dataType)
}
)
HBaseKVHelper.encodingRawKeyColumns(rawKeyCol)
}
}
}
def getDelete(rowKey: Array[Byte]): Delete = {
new Delete(rowKey)
}
def insert(nextPosition: Long, tableInfo: TableInfo, data: WriteRowsEventData) {
val table =
HbaseApplier.getTableForDatabase(tableInfo.getDBTableName())
for (mySQLValues <- data.getRows.asScala) {
val rowKey = getRowKeyForSpark(data.getIncludedColumns, tableInfo, mySQLValues)
if (rowKey.length == 0) {
Log.error("row key is null {}")
}
val put = getPutForSpark(rowKey, data.getIncludedColumns, tableInfo, mySQLValues)
timedHbaseDataAction(INSERT)(table.put(put))
}
table.flushCommits()
Log.info("insert to values {} log position {}", data.toString, nextPosition)
binlogNextPosition(nextPosition)
}
def isSameRowKey(left: Array[Byte], right: Array[Byte]): Boolean = {
left.toList == right.toList
}
def update(nextPosition: Long, tableInfo: TableInfo, data: UpdateRowsEventData) {
val table =
HbaseApplier.getTableForDatabase(tableInfo.getDBTableName())
for (entry <- data.getRows.asScala) {
val rowKeyBefore = getRowKeyForSpark(data.getIncludedColumnsBeforeUpdate, tableInfo, entry.getKey)
val rowKeyAfter = getRowKeyForSpark(data.getIncludedColumns, tableInfo, entry.getValue)
if (isSameRowKey(rowKeyBefore, rowKeyAfter)) {
val del = getDelete(rowKeyBefore)
timedHbaseDataAction(UPDATE_DELETE)(table.delete(del))
}
val put = getPutForSpark(rowKeyAfter, data.getIncludedColumns, tableInfo, entry.getValue)
timedHbaseDataAction(UPDATE_INSERT)(table.put(put))
}
table.flushCommits()
Log.info("update {} log position {}", data.toString, nextPosition)
binlogNextPosition(nextPosition)
}
def remove(nextPosition: Long, tableInfo: TableInfo, data: DeleteRowsEventData) {
val table =
HbaseApplier.getTableForDatabase(tableInfo.getDBTableName())
for (mySQLValues <- data.getRows.asScala) {
val rowKey = getRowKeyForSpark(data.getIncludedColumns, tableInfo, mySQLValues)
val del = getDelete(rowKey)
timedHbaseDataAction(DELETE)(table.delete(del))
}
table.flushCommits()
Log.info("remove {} log position {}", data.toString, nextPosition)
binlogNextPosition(nextPosition)
}
}
|
chenm11/mysql-hbase-replicator
|
src/main/scala/mysql2hbase/HbaseApplier.scala
|
Scala
|
mit
| 9,952
|
/*
* Copyright 2020 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package mongodb
package record
package testmodels
import net.liftweb.common._
import net.liftweb.mongodb.record.field._
import com.mongodb._
class UUIDTest private () extends MongoRecord[UUIDTest] with ObjectIdPk[UUIDTest] {
def meta = UUIDTest
object uuidfield extends UUIDField(this)
}
object UUIDTest extends UUIDTest with MongoMetaRecord[UUIDTest]
|
lift/framework
|
persistence/mongodb-record/src/test/scala/net/liftweb/mongodb/record/testmodels/UUIDTest.scala
|
Scala
|
apache-2.0
| 994
|
package cromwell.engine.workflow.lifecycle.execution
import akka.actor.{Actor, Props}
import akka.testkit.{EventFilter, TestActorRef, TestDuration, TestProbe}
import com.typesafe.config.ConfigFactory
import cromwell._
import cromwell.backend.AllBackendInitializationData
import cromwell.core.{SimpleIoActor, WorkflowId}
import cromwell.engine.backend.{BackendConfigurationEntry, BackendSingletonCollection, CromwellBackends}
import cromwell.engine.workflow.WorkflowDescriptorBuilder
import cromwell.engine.workflow.lifecycle.execution.WorkflowExecutionActor.{ExecuteWorkflowCommand, WorkflowExecutionFailedResponse}
import cromwell.engine.workflow.tokens.JobExecutionTokenDispenserActor
import cromwell.engine.workflow.workflowstore.Submitted
import cromwell.services.ServiceRegistryActor
import cromwell.services.metadata.MetadataService
import cromwell.util.SampleWdl
import org.scalatest.{BeforeAndAfter, FlatSpecLike, Matchers}
import scala.concurrent.duration._
import scala.concurrent.{Await, Promise}
class WorkflowExecutionActorSpec extends CromwellTestKitSpec with FlatSpecLike with Matchers with BeforeAndAfter with WorkflowDescriptorBuilder {
override implicit val actorSystem = system
implicit val DefaultDuration = 20.seconds.dilated
def mockServiceRegistryActor = TestActorRef(new Actor {
override def receive = {
case _ => // No action
}
})
val MockBackendName = "Mock"
val MockBackendSingletonCollection = BackendSingletonCollection(Map(MockBackendName -> None))
val stubbedConfig = ConfigFactory.load().getConfig("backend.providers.Mock").getConfig("config")
val serviceRegistry = TestProbe().ref
val runtimeSection =
"""
|runtime {
| backend: "Mock"
|}
""".stripMargin
behavior of "WorkflowExecutionActor"
it should "allow a backend to tell it to retry... up to a point" in {
import MetadataWatchActor.metadataKeyAttemptChecker
val metadataSuccessPromise = Promise[Unit]()
val requiredMetadataMatchers: Seq[MetadataWatchActor.Matcher] = List(
MetadataWatchActor.JobKeyMetadataKeyAndValueContainStringMatcher(metadataKeyAttemptChecker(1), "executionStatus", "RetryableFailure"),
MetadataWatchActor.JobKeyMetadataKeyAndValueContainStringMatcher(metadataKeyAttemptChecker(2), "executionStatus", "RetryableFailure"),
MetadataWatchActor.JobKeyMetadataKeyAndValueContainStringMatcher(metadataKeyAttemptChecker(3), "executionStatus", "Failed")
)
val metadataWatcherActor = TestActorRef[MetadataWatchActor](Props(MetadataWatchActor(metadataSuccessPromise, requiredMetadataMatchers: _*)))
val serviceRegistryActor = system.actorOf(ServiceRegistryActor.props(ConfigFactory.load(), overrides = Map(MetadataService.MetadataServiceName -> metadataWatcherActor.props)))
val jobStoreActor = system.actorOf(AlwaysHappyJobStoreActor.props)
val ioActor = system.actorOf(SimpleIoActor.props)
val subWorkflowStoreActor = system.actorOf(AlwaysHappySubWorkflowStoreActor.props)
val jobTokenDispenserActor = system.actorOf(JobExecutionTokenDispenserActor.props(serviceRegistry))
val MockBackendConfigEntry = BackendConfigurationEntry(
name = "Mock",
lifecycleActorFactoryClass = "cromwell.engine.backend.mock.RetryableBackendLifecycleActorFactory",
stubbedConfig
)
CromwellBackends.initBackends(List(MockBackendConfigEntry))
val workflowId = WorkflowId.randomId()
val engineWorkflowDescriptor = createMaterializedEngineWorkflowDescriptor(workflowId, SampleWdl.HelloWorld.asWorkflowSources(runtime = runtimeSection))
val callCacheReadActor = TestProbe()
val callCacheWriteActor = TestProbe()
val dockerHashActor = TestProbe()
val weaSupervisor = TestProbe()
val workflowExecutionActor = TestActorRef(
props = WorkflowExecutionActor.props(engineWorkflowDescriptor, ioActor, serviceRegistryActor, jobStoreActor, subWorkflowStoreActor,
callCacheReadActor.ref, callCacheWriteActor.ref, dockerHashActor.ref, jobTokenDispenserActor, MockBackendSingletonCollection, AllBackendInitializationData.empty, startState = Submitted),
name = "WorkflowExecutionActor",
supervisor = weaSupervisor.ref)
EventFilter.info(pattern = "Starting calls: wf_hello.hello", occurrences = 3).intercept {
workflowExecutionActor ! ExecuteWorkflowCommand
}
weaSupervisor.expectMsgClass(classOf[WorkflowExecutionFailedResponse])
// Super-helpful debug in case the metadata watcher is still unhappy:
if(metadataWatcherActor.underlyingActor.unsatisfiedMatchers.nonEmpty) {
requiredMetadataMatchers foreach { matcher =>
matcher.nearMissInformation.foreach { info =>
System.out.println("A matcher had a near miss (it might still get a matching value later!): " + info.replace("\\n", "..."))
}
}
}
// TODO: Yes, this might be slow... I'd advocate for refactoring away from the run-a-wdl style, but (shrug)
// (but in fact, this never really takes 2 minutes. That's just for safety)
Await.result(awaitable = metadataSuccessPromise.future, atMost = 2.minutes.dilated)
system.stop(serviceRegistryActor)
}
it should "execute a workflow with scatters" in {
val serviceRegistry = mockServiceRegistryActor
val jobStore = system.actorOf(AlwaysHappyJobStoreActor.props)
val subWorkflowStoreActor = system.actorOf(AlwaysHappySubWorkflowStoreActor.props)
val callCacheReadActor = system.actorOf(EmptyCallCacheReadActor.props)
val callCacheWriteActor = system.actorOf(EmptyCallCacheWriteActor.props)
val dockerHashActor = system.actorOf(EmptyDockerHashActor.props)
val ioActor = system.actorOf(SimpleIoActor.props)
val jobTokenDispenserActor = system.actorOf(JobExecutionTokenDispenserActor.props(serviceRegistry))
val MockBackendConfigEntry = BackendConfigurationEntry(
name = MockBackendName,
lifecycleActorFactoryClass = "cromwell.engine.backend.mock.DefaultBackendLifecycleActorFactory",
stubbedConfig
)
CromwellBackends.initBackends(List(MockBackendConfigEntry))
val workflowId = WorkflowId.randomId()
val engineWorkflowDescriptor = createMaterializedEngineWorkflowDescriptor(workflowId, SampleWdl.SimpleScatterWdl.asWorkflowSources(runtime = runtimeSection))
val workflowExecutionActor = system.actorOf(
WorkflowExecutionActor.props(engineWorkflowDescriptor, ioActor, serviceRegistry, jobStore, subWorkflowStoreActor,
callCacheReadActor, callCacheWriteActor, dockerHashActor, jobTokenDispenserActor, MockBackendSingletonCollection, AllBackendInitializationData.empty, startState = Submitted),
"WorkflowExecutionActor")
val scatterLog = "Starting calls: scatter0.inside_scatter:0:1, scatter0.inside_scatter:1:1, scatter0.inside_scatter:2:1, scatter0.inside_scatter:3:1, scatter0.inside_scatter:4:1"
EventFilter.info(pattern = ".*Final Outputs", occurrences = 1).intercept {
EventFilter.info(pattern = scatterLog, occurrences = 1).intercept {
EventFilter.info(pattern = "Starting calls: scatter0.outside_scatter:NA:1", occurrences = 1).intercept {
workflowExecutionActor ! ExecuteWorkflowCommand
}
}
}
system.stop(serviceRegistry)
}
}
|
ohsu-comp-bio/cromwell
|
engine/src/test/scala/cromwell/engine/workflow/lifecycle/execution/WorkflowExecutionActorSpec.scala
|
Scala
|
bsd-3-clause
| 7,247
|
/** MACHINE-GENERATED FROM AVRO SCHEMA. DO NOT EDIT DIRECTLY */
package example.idl.string
import other.ns.string.ExternalDependency
sealed trait ImportProtocol extends Product with Serializable
final case class DependentRecord(dependency: ExternalDependency, number: Int) extends ImportProtocol
final case class DependentRecord2(dependency: String, name: String) extends ImportProtocol
final case class DependentRecord3(dependency: Embedded, value: Boolean) extends ImportProtocol
|
julianpeeters/avrohugger
|
avrohugger-core/src/test/expected/standard/example/idl/string/ImportProtocol.scala
|
Scala
|
apache-2.0
| 486
|
package latis.time
class TimeUnit(val seconds: Double, val name: String) {
override def toString: String = name
}
object TimeUnit {
//TODO: look at definitions from other time APIs that we could reuse
//TODO: implement as case class or other form of enumeration?
// Keep a map of units so we can access them by name, as well as identifier.
private val units = new scala.collection.mutable.LinkedHashMap[String, TimeUnit]()
val ATTOSECOND = new TimeUnit(1e-18, "attoseconds")
units += ((ATTOSECOND.name, ATTOSECOND))
val FEMTOSECOND = new TimeUnit(1e-15, "femtoseconds")
units += ((FEMTOSECOND.name, FEMTOSECOND))
val PICOSECOND = new TimeUnit(1e-12, "picoseconds")
units += ((PICOSECOND.name, PICOSECOND))
val NANOSECOND = new TimeUnit(1e-9, "nanoseconds")
units += ((NANOSECOND.name, NANOSECOND))
val MICROSECOND = new TimeUnit(1e-6, "microseconds")
units += ((MICROSECOND.name, MICROSECOND))
val MILLISECOND = new TimeUnit(0.001, "milliseconds")
units += ((MILLISECOND.name, MILLISECOND))
val SECOND = new TimeUnit(1, "seconds")
units += ((SECOND.name, SECOND))
val MINUTE = new TimeUnit(60, "minutes")
units += ((MINUTE.name, MINUTE))
val HOUR = new TimeUnit(3600, "hours")
units += ((HOUR.name, HOUR))
// NOTE: Day is defined as exactly 86400 SI seconds.
val DAY = new TimeUnit(86400, "days")
units += ((DAY.name, DAY))
val WEEK = new TimeUnit(DAY.seconds * 7, "weeks")
units += ((WEEK.name, WEEK))
//TODO: include MONTH (as 30 days?) even though it isn't consistent?
val YEAR = new TimeUnit(DAY.seconds * 365, "years")
//TODO: should this be actual year? obs vs model
units += ((YEAR.name, YEAR))
val DECADE = new TimeUnit(YEAR.seconds * 10, "decades")
units += ((DECADE.name, DECADE))
val CENTURY = new TimeUnit(YEAR.seconds * 100, "centuries")
units += ((CENTURY.name, CENTURY))
val MILLENNIUM = new TimeUnit(YEAR.seconds * 1000, "millennia")
units += ((MILLENNIUM.name, MILLENNIUM))
val FORTNIGHT = new TimeUnit(DAY.seconds * 14, "fortnights")
units += ((FORTNIGHT.name, FORTNIGHT))
/**
* Get the TimeUnit instance by name.
*/
def withName(name: String): TimeUnit = units(name.toLowerCase)
}
|
dlindhol/LaTiS
|
src/main/scala/latis/time/TimeUnit.scala
|
Scala
|
epl-1.0
| 2,330
|
package com.twitter.algebird.macros
import scala.language.experimental.{ macros => sMacros }
import scala.reflect.macros.Context
import scala.reflect.runtime.universe._
/**
* "Cubes" a case class or tuple, i.e. for a tuple of type
* (T1, T2, ... , TN) generates all 2^N possible combinations of type
* (Option[T1], Option[T2], ... , Option[TN]).
*
* This is useful for comparing some metric across all possible subsets.
* For example, suppose we have a set of people represented as
* case class Person(gender: String, age: Int, height: Double)
* and we want to know the average height of
* - people, grouped by gender and age
* - people, grouped by only gender
* - people, grouped by only age
* - all people
*
* Then we could do
* > import com.twitter.algebird.macros.Cuber.cuber
* > val people: List[People]
* > val averageHeights: Map[(Option[String], Option[Int]), Double] =
* > people.flatMap { p => cuber((p.gender, p.age)).map((_,p)) }
* > .groupBy(_._1)
* > .mapValues { xs => val heights = xs.map(_.height); heights.sum / heights.length }
*/
trait Cuber[I] {
type K
def apply(in: I): TraversableOnce[K]
}
/**
* Given a TupleN, produces a sequence of (N + 1) tuples each of arity N
* such that, for all k from 0 to N, there is a tuple with k Somes
* followed by (N - k) Nones.
*
* This is useful for comparing some metric across multiple layers of
* some hierarchy.
* For example, suppose we have some climate data represented as
* case class Data(continent: String, country: String, city: String, temperature: Double)
* and we want to know the average temperatures of
* - each continent
* - each (continent, country) pair
* - each (continent, country, city) triple
*
* Here we desire the (continent, country) and (continent, country, city)
* pair because, for example, if we grouped by city instead of by
* (continent, country, city), we would accidentally combine the results for
* Paris, Texas and Paris, France.
*
* Then we could do
* > import com.twitter.algebird.macros.Roller.roller
* > val data: List[Data]
* > val averageTemps: Map[(Option[String], Option[String], Option[String]), Double] =
* > data.flatMap { d => roller((d.continent, d.country, d.city)).map((_, d)) }
* > .groupBy(_._1)
* > .mapValues { xs => val temps = xs.map(_.temperature); temps.sum / temps.length }
*/
trait Roller[I] {
type K
def apply(in: I): TraversableOnce[K]
}
object Cuber {
implicit def cuber[T]: Cuber[T] = macro cuberImpl[T]
def cuberImpl[T](c: Context)(implicit T: c.WeakTypeTag[T]): c.Expr[Cuber[T]] = {
import c.universe._
ensureCaseClass(c)
val params = getParams(c)
val arity = params.length
if (arity > 22)
c.abort(c.enclosingPosition, s"Cannot create Cuber for $T because it has more than 22 parameters.")
if (arity == 0)
c.abort(c.enclosingPosition, s"Cannot create Cuber for $T because it has no parameters.")
val tupleName = {
val types = getParamTypes(c)
val optionTypes = types.map { t => tq"_root_.scala.Option[$t]" }
val tupleType = newTypeName(s"Tuple${arity}")
tq"_root_.scala.$tupleType[..$optionTypes]"
}
val somes = params.zip(Stream.from(1)).map {
case (param, index) =>
val name = newTermName(s"some$index")
q"val $name = _root_.scala.Some(in.$param)"
}
val options = (1 to arity).map { index =>
val some = newTermName(s"some$index")
q"if (((1 << ${index - 1}) & i) == 0) _root_.scala.None else $some"
}
val cuber = q"""
new _root_.com.twitter.algebird.macros.Cuber[${T}] {
type K = $tupleName
def apply(in: ${T}): _root_.scala.Seq[K] = {
..$somes
(0 until (1 << $arity)).map { i =>
new K(..$options)
}
}
}
"""
c.Expr[Cuber[T]](cuber)
}
}
object Roller {
implicit def roller[T]: Roller[T] = macro rollerImpl[T]
def rollerImpl[T](c: Context)(implicit T: c.WeakTypeTag[T]): c.Expr[Roller[T]] = {
import c.universe._
ensureCaseClass(c)
val params = getParams(c)
val arity = params.length
if (arity > 22)
c.abort(c.enclosingPosition, s"Cannot create Roller for $T because it has more than 22 parameters.")
if (arity == 0)
c.abort(c.enclosingPosition, s"Cannot create Roller for $T because it has no parameters.")
val tupleName = {
val types = getParamTypes(c)
val optionTypes = types.map { t => tq"_root_.scala.Option[$t]" }
val tupleType = newTypeName(s"Tuple${arity}")
tq"_root_.scala.$tupleType[..$optionTypes]"
}
val somes = params.zip(Stream.from(1)).map {
case (param, index) =>
val name = newTermName(s"some$index")
q"val $name = _root_.scala.Some(in.$param)"
}
val items = (0 to arity).map { i =>
val args = (1 to arity).map { index =>
val some = newTermName(s"some$index")
if (index <= i) q"$some" else q"_root_.scala.None"
}
q"new K(..$args)"
}
val roller = q"""
new _root_.com.twitter.algebird.macros.Roller[${T}] {
type K = $tupleName
def apply(in: ${T}): _root_.scala.Seq[K] = {
..$somes
Seq(..$items)
}
}
"""
c.Expr[Roller[T]](roller)
}
}
|
erikerlandson/algebird
|
algebird-core/src/main/scala/com/twitter/algebird/macros/Cuber.scala
|
Scala
|
apache-2.0
| 5,265
|
package edu.rit.csh.linter.parser
import edu.rit.csh.linter.language.Patterns._
import fastparse.WhitespaceApi
import fastparse.all._
import fastparse.core.Parser
object Patterns {
import Types._
import Literals._
val whitespace = WhitespaceApi.Wrapper{
import fastparse.all._
NoTrace(" ".rep)
}
import whitespace._
// 8.2 Type Patterns
val typPat = typ
val patterns: Parser[Seq[Pattern]] = P(pattern.rep(min = 1, sep = ","))
val literalPattern = P(literal.map { case lit => LiteralPattern(lit) })
val variablePattern = P(varId | wildCard).map { VariablePattern }
val stableIdPattern = P(stableId.map { case sid =>
val first = sid.toString.charAt(1)
if (first >= 'a' && first <= 'z') {
if (sid.toString.contains(".")) StableIdPattern(sid)
else VariablePattern(sid)
} else {
StableIdPattern(sid)
}
})
val constructorPattern = P((stableId ~ "(" ~ patterns.? ~ ")")
.map { case (id, patts) => ConstructorPattern(id, patts.getOrElse(Seq.empty):_*) })
val tuplePattern: Parser[TuplePattern] = P("(" ~ patterns ~ ")").map(pts => TuplePattern(pts:_*))
val sequencePattern = P(stableId ~ "(" ~ (variablePattern ~ ",").rep ~ (varId ~ "@").? ~ "_" ~ "*" ~ ")")
.map { case (sid, pts, varid) => SequencePattern(sid, pts, varid) }
val simplePattern: Parser[Pattern] =
P( tuplePattern
| literalPattern
| sequencePattern
| constructorPattern
| stableIdPattern
| variablePattern )
val pattern3: Parser[Pattern] = P(simplePattern ~ (id ~ nl.? ~ simplePattern).rep)
.map { case (sp, lst) => toConstructor(sp, lst.toList) }
private def toConstructor(start: Pattern, input: List[(Symbol, Pattern)]): Pattern = input match {
case Nil => start
case (op, sp) :: Nil => ConstructorPattern(op, start, sp)
case (op, sp) :: pts => ConstructorPattern(op, start, toConstructor(sp, pts))
}
// 8.1.3 Pattern Binders
val pattern2 = P((varId ~ "@" ~ pattern3).map(BindingPattern.tupled) | pattern3)
// 8.1.2 Typed Patterns
val pattern1 = P(pattern2 | ((varId | wildCard) ~ ":" ~ typPat).map(TypedPattern.tupled))
val pattern: Parser[Pattern] = P(pattern1 ~ ("|" ~ pattern1).rep).map {
case (pt, Nil) => pt
case (p1, pts) => AlternativePattern(pts.+:(p1):_*)
}
}
|
JDrit/ScalaLint
|
src/main/scala/edu/rit/csh/linter/parser/Patterns.scala
|
Scala
|
apache-2.0
| 2,309
|
package models;
import play.api.db.slick.Config.driver.simple._
case class Sample(id: Option[Int], name: String, created: java.sql.Timestamp)
class SampleTable(tag: Tag) extends Table[Sample](tag, "sample") {
val tsvFiles = TableQuery[TSVFileTable]
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def name = column[String]("name", O.NotNull)
def created = column[java.sql.Timestamp]("created_tstmp", O.NotNull)
def * = (id.?, name, created) <> (Sample.tupled, Sample.unapply)
}
|
seqprodbio/restoule
|
app/models/Sample.scala
|
Scala
|
gpl-3.0
| 502
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.mkldnn
import com.intel.analytics.bigdl.mkl.Memory
import com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.nn.mkldnn.Phase.{InferencePhase, TrainingPhase}
import com.intel.analytics.bigdl.numeric.NumericFloat
import com.intel.analytics.bigdl.tensor.Tensor
import org.apache.commons.lang3.SerializationUtils
import org.scalatest.{FlatSpec, Matchers}
class SoftMaxSpec extends FlatSpec with Matchers {
"SoftMax forward 1-D" should "work correctly" in {
// we should test the cases which contain 1
val tests = List(2, 1)
for (x <- tests) {
val sm = SoftMax()
sm.evaluate()
sm.setRuntime(new MklDnnRuntime)
sm.initFwdPrimitives(Array(HeapData(Array(x), Memory.Format.x)), InferencePhase)
val input = Tensor(x).rand()
val output = sm.forward(input)
val nnSm = nn.SoftMax()
val nnOutput = nnSm.forward(input)
Tools.dense(output) should be (nnOutput)
}
}
"SoftMax forward 2-D" should "work correctly" in {
val tests = List(
(2, 3),
(1, 3),
(1, 1),
(2, 1))
for ((batchSize, channel) <- tests) {
val sm = SoftMax()
sm.setRuntime(new MklDnnRuntime)
sm.initFwdPrimitives(Array(HeapData(Array(batchSize, channel), Memory.Format.nc)),
InferencePhase)
sm.evaluate()
val input = Tensor(batchSize, channel).rand()
val output = sm.forward(input)
val nnSm = nn.SoftMax()
val nnOutput = nnSm.forward(input)
Tools.dense(output) shouldEqual nnOutput
}
}
"SoftMax forward 4-D" should "work correctly" in {
// we should test the cases which contain 1
val tests = List(
(2, 3, 4, 4),
(1, 3, 4, 4),
(1, 3, 1, 1),
(1, 1, 1, 1),
(1, 1, 3, 3),
(2, 1, 3, 3),
(2, 2, 1, 1))
for ((batchSize, channel, height, width) <- tests) {
val sm = SoftMax()
sm.setRuntime(new MklDnnRuntime)
sm.initFwdPrimitives(Array(HeapData(Array(batchSize, channel, height, width),
Memory.Format.nchw)), InferencePhase)
sm.evaluate()
val input = Tensor(batchSize, channel, height, width).rand()
val output = sm.forward(input)
val nnSm = nn.SoftMax()
val nnOutput = nnSm.forward(input)
Tools.dense(output) should be (nnOutput)
}
}
"SoftMax forward 3-D" should "work correctly" in {
// we should test the cases which contain 1
val tests = List(
(3, 4, 4),
(3, 4, 4),
(3, 1, 1),
(1, 1, 1),
(1, 3, 3),
(1, 3, 3),
(2, 1, 1))
for ((i, j, k) <- tests) {
val sm = SoftMax()
sm.setRuntime(new MklDnnRuntime)
sm.initFwdPrimitives(Array(HeapData(Array(i, j, k),
Memory.Format.ncw)), InferencePhase)
sm.evaluate()
val input = Tensor(i, j, k).rand()
val output = sm.forward(input)
val nnSm = nn.SoftMax()
val nnOutput = nnSm.forward(input)
Tools.dense(output) should be (nnOutput)
}
}
"SoftMax backward" should "work correctly" in {
val (batchSize, channel, height, width) = (2, 3, 4, 4)
val sm = SoftMax()
sm.setRuntime(new MklDnnRuntime)
sm.initFwdPrimitives(Array(HeapData(Array(batchSize, channel, height, width),
Memory.Format.nchw)), InferencePhase)
val nnSm = nn.SoftMax()
val input = Tensor(batchSize, channel, height, width).rand()
val gradOutput = Tensor().resizeAs(input).rand(-10, 10)
sm.forward(input)
nnSm.forward(input)
sm.backward(input, gradOutput)
nnSm.backward(input, gradOutput)
sm.output should be (nnSm.output)
sm.gradInput should be (nnSm.gradInput)
}
"SoftMax multi times forward" should "work correctly" in {
val (batchSize, channel, height, width) = (2, 3, 4, 4)
val sm = SoftMax()
sm.setRuntime(new MklDnnRuntime)
sm.initFwdPrimitives(Array(HeapData(Array(batchSize, channel, height, width),
Memory.Format.nchw)), InferencePhase)
sm.evaluate()
val nnSm = nn.SoftMax()
(0 until 5).foreach { _ =>
val input = Tensor(batchSize, channel, height, width).rand(-1, 1)
sm.forward(input)
nnSm.forward(input)
Tools.dense(sm.output) should be (nnSm.output)
}
}
"axis" should "work correctly" in {
val input = Tensor[Float](2, 24564, 21).rand(-1, 1)
val sm1 = SoftMax(axis = 2)
val seq1 = Sequential()
.add(Input(Array(2, 24564, 21), Memory.Format.ntc))
.add(sm1)
.add(Output(Memory.Format.ntc))
seq1.asInstanceOf[MklDnnContainer].compile(InferencePhase)
seq1.evaluate()
seq1.forward(input)
input.resize(Array(2 * 24564, 21))
val sm2 = SoftMax()
val seq2 = Sequential().add(Input(Array(2 * 24564, 21), Memory.Format.nc))
.add(sm2)
.add(Output())
seq2.asInstanceOf[MklDnnContainer].compile(InferencePhase)
sm2.evaluate()
seq2.forward(input)
seq1.output.toTensor.view(Array(2 * 24564, 21)) should be (seq2.output)
}
"softmax with java serialization" should "work correctly" in {
val inputShape = Array(2, 3, 4, 4)
val sm = SoftMax()
sm.setRuntime(new MklDnnRuntime)
sm.initFwdPrimitives(Array(HeapData(inputShape, Memory.Format.nchw)), TrainingPhase)
sm.initBwdPrimitives(Array(HeapData(inputShape, Memory.Format.nchw)), TrainingPhase)
val cloned = SerializationUtils.clone(sm)
cloned.setRuntime(new MklDnnRuntime)
cloned.initFwdPrimitives(Array(HeapData(inputShape, Memory.Format.nchw)), TrainingPhase)
cloned.initBwdPrimitives(Array(HeapData(inputShape, Memory.Format.nchw)), TrainingPhase)
val input = Tensor(inputShape).rand(-1, 1)
val gradOutput = Tensor(inputShape).rand(-1, 1)
sm.forward(input)
cloned.forward(input)
Tools.dense(sm.output) should be (Tools.dense(cloned.output))
sm.backward(input, gradOutput)
cloned.backward(input, gradOutput)
Tools.dense(sm.gradInput) should be (Tools.dense(cloned.gradInput))
}
}
|
i8run/BigDL-1
|
spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/mkldnn/SoftMaxSpec.scala
|
Scala
|
apache-2.0
| 6,587
|
package rpi
import akka.actor.{ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit}
import model.FishAccess
import org.mockito.Matchers
import org.scalatest.{BeforeAndAfterAll, WordSpecLike}
class FishTest(_system: ActorSystem) extends TestKit(_system) with ImplicitSender with WordSpecLike with BeforeAndAfterAll {
def this() = this(ActorSystem("FishLEDAccessTest"))
override def afterAll {
TestKit.shutdownActorSystem(system)
}
"LED Fish" must {
"all on" in {
val fish = system.actorOf(Props(new FishAccess("192.168.0.57")))
Thread.sleep(1000)
fish ! FishAccess.WriteSchema(FishAccess.Gradient(80,90,200,0,55,25,2))
Thread.sleep(2000)
fish ! FishAccess.WriteSchema(FishAccess.Off)
Thread.sleep(2000)
}
}
}
|
Jorik5702/nasw4
|
test/rpi/FishTest.scala
|
Scala
|
mit
| 784
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import scala.collection.JavaConverters._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.{PartitionsAlreadyExistException, ResolvedPartitionSpec}
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.connector.catalog.{SupportsAtomicPartitionManagement, SupportsPartitionManagement}
import org.apache.spark.sql.errors.QueryExecutionErrors
/**
* Physical plan node for adding partitions of table.
*/
case class AddPartitionExec(
table: SupportsPartitionManagement,
partSpecs: Seq[ResolvedPartitionSpec],
ignoreIfExists: Boolean,
refreshCache: () => Unit) extends LeafV2CommandExec {
import DataSourceV2Implicits._
override def output: Seq[Attribute] = Seq.empty
override protected def run(): Seq[InternalRow] = {
val (existsParts, notExistsParts) =
partSpecs.partition(p => table.partitionExists(p.ident))
if (existsParts.nonEmpty && !ignoreIfExists) {
throw new PartitionsAlreadyExistException(
table.name(), existsParts.map(_.ident), table.partitionSchema())
}
val isTableAltered = notExistsParts match {
case Seq() => false // Nothing will be done
case Seq(partitionSpec) =>
val partProp = partitionSpec.location.map(loc => "location" -> loc).toMap
table.createPartition(partitionSpec.ident, partProp.asJava)
true
case _ if table.isInstanceOf[SupportsAtomicPartitionManagement] =>
val partIdents = notExistsParts.map(_.ident)
val partProps = notExistsParts.map(_.location.map(loc => "location" -> loc).toMap)
table.asAtomicPartitionable
.createPartitions(
partIdents.toArray,
partProps.map(_.asJava).toArray)
true
case _ =>
throw QueryExecutionErrors.cannotAddMultiPartitionsOnNonatomicPartitionTableError(
table.name())
}
if (isTableAltered) refreshCache()
Seq.empty
}
}
|
ueshin/apache-spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/AddPartitionExec.scala
|
Scala
|
apache-2.0
| 2,823
|
package com.trainologic.samples.petclinic.repository
import com.trainologic.samples.petclinic._
import Stacks._
import model.Owner
import model.Pet
import model.PetType
import org.atnos.eff.Fx
import org.atnos.eff.Eff
abstract class OwnerRepository[M[_]] {
type S = Fx.prepend[M, BasicStack]
def findByLastName(lastName: String): Eff[S, Seq[Owner]]
def findById(id: Int): Eff[S, Owner]
def save(owner: Owner): Eff[S, Owner]
}
abstract class PetRepository[M[_]] {
type S = Fx.prepend[M, BasicStack]
def findPetTypes: Eff[S, Seq[PetType]]
def findById(id: Int): Eff[S, Pet]
def save(owner: Pet): Eff[S, Pet]
}
|
Trainologic/petclinic_eff
|
src/main/scala/com/trainologic/samples/petclinic/repository/Repositorie.scala
|
Scala
|
apache-2.0
| 626
|
package pulsar.actor
import akka.actor.ActorSystem
import akka.testkit._
import akka.util.ByteString
import org.scalatest.{BeforeAndAfter, Matchers, WordSpecLike}
import pulsar.action.{Dispatch, Kill, Reschedule, Schedule}
import pulsar.model.Task
import scala.concurrent.duration._
/**
* @author Alexander De Leon <me@alexdeleon.name>
*/
class SchedulerTest extends TestKit(ActorSystem.create("SchedulerTest"))
with ImplicitSender
with WordSpecLike
with Matchers
with BeforeAndAfter {
var dispatcher: TestProbe = null
var scheduler: TestActorRef[Scheduler] = null
before {
dispatcher = TestProbe()
scheduler = TestActorRef(new Scheduler(dispatcher.ref))
}
"scheduler" should {
"schedule new task" in {
val testTask = Task("id", "test", ByteString("payload"))
scheduler ! Schedule(testTask, 1 second)
within(1 second, 1.2 second) {
dispatcher.expectMsg(Dispatch(testTask))
}
}
"delaying existing task" in {
val testTask = Task("id", "test", ByteString("payload"))
scheduler ! Schedule(testTask, 0.5 second)
scheduler ! Reschedule("id", 1 second)
within(1 second, 1.2 second) {
dispatcher.expectMsg(Dispatch(testTask))
}
}
"anticipate existing task" in {
val testTask = Task("id", "test", ByteString("payload"))
scheduler ! Schedule(testTask, 1 second)
scheduler ! Reschedule("id", 0.5 second)
within(0.5 second, 0.7 second) {
dispatcher.expectMsg(Dispatch(testTask))
}
}
"cancel existing task" in {
val testTask = Task("id", "test", ByteString("payload"))
scheduler ! Schedule(testTask, 1 second)
scheduler ! Kill("id")
dispatcher.expectNoMsg(1.2 seconds)
}
}
}
|
pulsar-project/pulsar
|
src/test/scala/pulsar/actor/SchedulerTest.scala
|
Scala
|
mit
| 1,762
|
/*******************************************************************************
* Copyright 2017 Capital One Services, LLC and Bitwise, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package hydrograph.engine.spark.components
import hydrograph.engine.core.component.entity.InputFileXMLEntity
import hydrograph.engine.core.component.entity.elements.SchemaField
import hydrograph.engine.spark.components.base.InputComponentBase
import hydrograph.engine.spark.components.platform.BaseComponentParams
import hydrograph.engine.spark.components.utils.SchemaCreator
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{Column, DataFrame}
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.JavaConverters._
import scala.collection.mutable.HashMap
/**
* The Class InputFileXMLComponent.
*
* @author Bitwise
*
*/
class InputFileXMLComponent (iFileXMLEntity: InputFileXMLEntity, iComponentsParams: BaseComponentParams)
extends InputComponentBase with Serializable {
def flattenSchema(schema: StructType, prefix: String = null) : Array[Column] = {
schema.fields.flatMap(f => {
val colName = if (prefix == null) f.name else prefix + "." + f.name
f.dataType match {
case st: StructType => flattenSchema(st, colName)
case _ => Array(new Column(colName))
}
})
}
def getNamesOfFields():scala.collection.mutable.HashMap[String,String]= {
var fieldNamesAndXpathMap:HashMap[String,String] = HashMap[String,String]()
for (i <- 0 until iFileXMLEntity.getFieldsList.size()) {
val schemaField: SchemaField = iFileXMLEntity.getFieldsList.get(i)
fieldNamesAndXpathMap += (schemaField.getAbsoluteOrRelativeXPath.replaceAll("\\\\/", "\\\\.") -> schemaField.getFieldName)
}
fieldNamesAndXpathMap
}
private val LOG:Logger = LoggerFactory.getLogger(classOf[InputFileXMLComponent])
override def createComponent(): Map[String, DataFrame] = {
LOG.trace("In method createComponent()")
val schemaCreator = SchemaCreator(iFileXMLEntity)
val readMode:String= iFileXMLEntity.asInstanceOf[InputFileXMLEntity].isStrict match {
case true => "FAILFAST"
case false => "PERMISSIVE"
}
try {
val df = iComponentsParams.getSparkSession().read
.option("charset", iFileXMLEntity.getCharset)
.option("rowTag", iFileXMLEntity.getRowTag)
.option("rootTag", iFileXMLEntity.getRootTag)
.option("componentId",iFileXMLEntity.getComponentId)
.option("mode", readMode)
.option("safe", iFileXMLEntity.isSafe)
.option("dateFormats", schemaCreator.getDateFormats)
.schema(schemaCreator.makeSchema)
.format("hydrograph.engine.spark.datasource.xml")
.load(iFileXMLEntity.getPath)
val key = iFileXMLEntity.getOutSocketList.get(0).getSocketId
LOG.info("Created Input File XML Component "+ iFileXMLEntity.getComponentId
+ " in Batch "+ iFileXMLEntity.getBatch +" with output socket " + key
+ " and path " + iFileXMLEntity.getPath)
LOG.debug("Component Id: '"+ iFileXMLEntity.getComponentId
+"' in Batch: " + iFileXMLEntity.getBatch
+ " having schema: [ " + iFileXMLEntity.getFieldsList.asScala.mkString(",")
+ " ] with strict as " + iFileXMLEntity.isStrict
+ " safe as " + iFileXMLEntity.isSafe
+ " rowTag as " + iFileXMLEntity.getRowTag
+ " rootTag as " + iFileXMLEntity.getRootTag
+ " absoluteXPath as " + iFileXMLEntity.getAbsoluteXPath
+ " at Path: " + iFileXMLEntity.getPath)
val xpathAndFieldNamesMap=getNamesOfFields()
val flattenedSchema = flattenSchema(df.schema)
val renamedCols = flattenedSchema.map(name => new Column(name.toString()).as(xpathAndFieldNamesMap.get(name.toString()).get))
val df_new: DataFrame = df.select(renamedCols:_*)
Map(key -> df_new)
} catch {
case e : Exception =>
LOG.error("Error in Input File XML Component "+ iFileXMLEntity.getComponentId, e)
throw new RuntimeException("Error in Input File XML Component "+ iFileXMLEntity.getComponentId, e)
}
}
}
|
capitalone/Hydrograph
|
hydrograph.engine/hydrograph.engine.spark/src/main/scala/hydrograph/engine/spark/components/InputFileXMLComponent.scala
|
Scala
|
apache-2.0
| 4,726
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.sources
import org.apache.spark.sql.test.SharedSQLContext
class DataSourceStrategySuite extends PlanTest with SharedSQLContext {
test("translate simple expression") {
val attrInt = 'cint.int
val attrStr = 'cstr.string
testTranslateFilter(EqualTo(attrInt, 1), Some(sources.EqualTo("cint", 1)))
testTranslateFilter(EqualTo(1, attrInt), Some(sources.EqualTo("cint", 1)))
testTranslateFilter(EqualNullSafe(attrStr, Literal(null)),
Some(sources.EqualNullSafe("cstr", null)))
testTranslateFilter(EqualNullSafe(Literal(null), attrStr),
Some(sources.EqualNullSafe("cstr", null)))
testTranslateFilter(GreaterThan(attrInt, 1), Some(sources.GreaterThan("cint", 1)))
testTranslateFilter(GreaterThan(1, attrInt), Some(sources.LessThan("cint", 1)))
testTranslateFilter(LessThan(attrInt, 1), Some(sources.LessThan("cint", 1)))
testTranslateFilter(LessThan(1, attrInt), Some(sources.GreaterThan("cint", 1)))
testTranslateFilter(GreaterThanOrEqual(attrInt, 1), Some(sources.GreaterThanOrEqual("cint", 1)))
testTranslateFilter(GreaterThanOrEqual(1, attrInt), Some(sources.LessThanOrEqual("cint", 1)))
testTranslateFilter(LessThanOrEqual(attrInt, 1), Some(sources.LessThanOrEqual("cint", 1)))
testTranslateFilter(LessThanOrEqual(1, attrInt), Some(sources.GreaterThanOrEqual("cint", 1)))
testTranslateFilter(InSet(attrInt, Set(1, 2, 3)), Some(sources.In("cint", Array(1, 2, 3))))
testTranslateFilter(In(attrInt, Seq(1, 2, 3)), Some(sources.In("cint", Array(1, 2, 3))))
testTranslateFilter(IsNull(attrInt), Some(sources.IsNull("cint")))
testTranslateFilter(IsNotNull(attrInt), Some(sources.IsNotNull("cint")))
// cint > 1 AND cint < 10
testTranslateFilter(And(
GreaterThan(attrInt, 1),
LessThan(attrInt, 10)),
Some(sources.And(
sources.GreaterThan("cint", 1),
sources.LessThan("cint", 10))))
// cint >= 8 OR cint <= 2
testTranslateFilter(Or(
GreaterThanOrEqual(attrInt, 8),
LessThanOrEqual(attrInt, 2)),
Some(sources.Or(
sources.GreaterThanOrEqual("cint", 8),
sources.LessThanOrEqual("cint", 2))))
testTranslateFilter(Not(GreaterThanOrEqual(attrInt, 8)),
Some(sources.Not(sources.GreaterThanOrEqual("cint", 8))))
testTranslateFilter(StartsWith(attrStr, "a"), Some(sources.StringStartsWith("cstr", "a")))
testTranslateFilter(EndsWith(attrStr, "a"), Some(sources.StringEndsWith("cstr", "a")))
testTranslateFilter(Contains(attrStr, "a"), Some(sources.StringContains("cstr", "a")))
}
test("translate complex expression") {
val attrInt = 'cint.int
// ABS(cint) - 2 <= 1
testTranslateFilter(LessThanOrEqual(
// Expressions are not supported
// Functions such as 'Abs' are not supported
Subtract(Abs(attrInt), 2), 1), None)
// (cin1 > 1 AND cint < 10) OR (cint > 50 AND cint > 100)
testTranslateFilter(Or(
And(
GreaterThan(attrInt, 1),
LessThan(attrInt, 10)
),
And(
GreaterThan(attrInt, 50),
LessThan(attrInt, 100))),
Some(sources.Or(
sources.And(
sources.GreaterThan("cint", 1),
sources.LessThan("cint", 10)),
sources.And(
sources.GreaterThan("cint", 50),
sources.LessThan("cint", 100)))))
// SPARK-22548 Incorrect nested AND expression pushed down to JDBC data source
// (cint > 1 AND ABS(cint) < 10) OR (cint < 50 AND cint > 100)
testTranslateFilter(Or(
And(
GreaterThan(attrInt, 1),
// Functions such as 'Abs' are not supported
LessThan(Abs(attrInt), 10)
),
And(
GreaterThan(attrInt, 50),
LessThan(attrInt, 100))), None)
// NOT ((cint <= 1 OR ABS(cint) >= 10) AND (cint <= 50 OR cint >= 100))
testTranslateFilter(Not(And(
Or(
LessThanOrEqual(attrInt, 1),
// Functions such as 'Abs' are not supported
GreaterThanOrEqual(Abs(attrInt), 10)
),
Or(
LessThanOrEqual(attrInt, 50),
GreaterThanOrEqual(attrInt, 100)))), None)
// (cint = 1 OR cint = 10) OR (cint > 0 OR cint < -10)
testTranslateFilter(Or(
Or(
EqualTo(attrInt, 1),
EqualTo(attrInt, 10)
),
Or(
GreaterThan(attrInt, 0),
LessThan(attrInt, -10))),
Some(sources.Or(
sources.Or(
sources.EqualTo("cint", 1),
sources.EqualTo("cint", 10)),
sources.Or(
sources.GreaterThan("cint", 0),
sources.LessThan("cint", -10)))))
// (cint = 1 OR ABS(cint) = 10) OR (cint > 0 OR cint < -10)
testTranslateFilter(Or(
Or(
EqualTo(attrInt, 1),
// Functions such as 'Abs' are not supported
EqualTo(Abs(attrInt), 10)
),
Or(
GreaterThan(attrInt, 0),
LessThan(attrInt, -10))), None)
// In end-to-end testing, conjunctive predicate should has been split
// before reaching DataSourceStrategy.translateFilter.
// This is for UT purpose to test each [[case]].
// (cint > 1 AND cint < 10) AND (cint = 6 AND cint IS NOT NULL)
testTranslateFilter(And(
And(
GreaterThan(attrInt, 1),
LessThan(attrInt, 10)
),
And(
EqualTo(attrInt, 6),
IsNotNull(attrInt))),
Some(sources.And(
sources.And(
sources.GreaterThan("cint", 1),
sources.LessThan("cint", 10)),
sources.And(
sources.EqualTo("cint", 6),
sources.IsNotNull("cint")))))
// (cint > 1 AND cint < 10) AND (ABS(cint) = 6 AND cint IS NOT NULL)
testTranslateFilter(And(
And(
GreaterThan(attrInt, 1),
LessThan(attrInt, 10)
),
And(
// Functions such as 'Abs' are not supported
EqualTo(Abs(attrInt), 6),
IsNotNull(attrInt))), None)
// (cint > 1 OR cint < 10) AND (cint = 6 OR cint IS NOT NULL)
testTranslateFilter(And(
Or(
GreaterThan(attrInt, 1),
LessThan(attrInt, 10)
),
Or(
EqualTo(attrInt, 6),
IsNotNull(attrInt))),
Some(sources.And(
sources.Or(
sources.GreaterThan("cint", 1),
sources.LessThan("cint", 10)),
sources.Or(
sources.EqualTo("cint", 6),
sources.IsNotNull("cint")))))
// (cint > 1 OR cint < 10) AND (cint = 6 OR cint IS NOT NULL)
testTranslateFilter(And(
Or(
GreaterThan(attrInt, 1),
LessThan(attrInt, 10)
),
Or(
// Functions such as 'Abs' are not supported
EqualTo(Abs(attrInt), 6),
IsNotNull(attrInt))), None)
}
/**
* Translate the given Catalyst [[Expression]] into data source [[sources.Filter]]
* then verify against the given [[sources.Filter]].
*/
def testTranslateFilter(catalystFilter: Expression, result: Option[sources.Filter]): Unit = {
assertResult(result) {
DataSourceStrategy.translateFilter(catalystFilter)
}
}
}
|
brad-kaiser/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategySuite.scala
|
Scala
|
apache-2.0
| 8,033
|
package com.seanshubin.todo.persistence.domain
import org.scalatest.FunSuite
class EntryPointRunnerTest extends FunSuite {
test("valid configuration") {
//given
val serverPortString = "12345"
val dataFileDirectoryString = "data/file/directory"
val helper = new Helper(serverPortString, dataFileDirectoryString)
//when
helper.runner.run()
//then
assert(helper.stubRunnable.invocations === 1)
}
test("missing port") {
//given
val helper = new Helper()
//when
val exception = intercept[RuntimeException] {
helper.runner.run()
}
//then
assert(exception.getMessage === "In command line arguments at position 0, expected 'server port', was missing")
}
test("non numeric port") {
//given
val serverPortString = "foo"
val dataFileDirectoryString = "data/file/directory"
val helper = new Helper(serverPortString, dataFileDirectoryString)
//when
val exception = intercept[RuntimeException] {
helper.runner.run()
}
//then
assert(exception.getMessage === "In command line arguments at position 0, unable to convert value for 'server port' to an integer, got 'foo'")
}
test("missing data directory") {
//given
val serverPortString = "12345"
val helper = new Helper(serverPortString)
//when
val exception = intercept[RuntimeException] {
helper.runner.run()
}
//then
assert(exception.getMessage === "In command line arguments at position 1, expected 'data file directory', was missing")
}
class StubRunnable(configuration: Configuration) extends Runnable {
var invocations = 0
override def run(): Unit = invocations += 1
}
class Helper(commandLineArguments: String*) {
val configurationValidator = new CommandLineArgumentsConfigurationValidator(commandLineArguments)
var stubRunnable: StubRunnable = null
val createRunner: Configuration => Runnable = configuration => {
stubRunnable = new StubRunnable(configuration)
stubRunnable
}
val runner = new EntryPointRunner(configurationValidator, createRunner)
}
}
|
SeanShubin/todo-persistence
|
domain/src/test/scala/com/seanshubin/todo/persistence/domain/EntryPointRunnerTest.scala
|
Scala
|
unlicense
| 2,120
|
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600e.v2
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalInteger, Input}
case class E8(value: Option[Int]) extends CtBoxIdentifier("Deed of covenant") with CtOptionalInteger with Input
|
ahudspith-equalexperts/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/ct600e/v2/E8.scala
|
Scala
|
apache-2.0
| 822
|
package com.wordnik.client.api
import com.wordnik.client.model.User
import java.io.File
import org.scalatra.{ TypedParamSupport, ScalatraServlet }
import org.scalatra.swagger._
import org.json4s._
import org.json4s.JsonDSL._
import org.scalatra.json.{ JValueResult, JacksonJsonSupport }
import org.scalatra.servlet.{FileUploadSupport, MultipartConfig, SizeConstraintExceededException}
import scala.collection.JavaConverters._
class UsercompletepostcompleteApi (implicit val swagger: Swagger) extends ScalatraServlet
with FileUploadSupport
with JacksonJsonSupport
with SwaggerSupport {
protected implicit val jsonFormats: Formats = DefaultFormats
protected val applicationDescription: String = "UsercompletepostcompleteApi"
override protected val applicationName: Option[String] = Some("Usercompletepostcomplete")
before() {
contentType = formats("json")
response.headers += ("Access-Control-Allow-Origin" -> "*")
}
val saveUserCompleteOperation = (apiOperation[User]("saveUserComplete")
summary "create or update a user name by id"
parameters(
bodyParam[User]("body").description("").optional
)
)
post("/user_complete_post_complete",operation(saveUserCompleteOperation)) {
val body = parsedBody.extract[User]
println("body: " + body)
}
}
|
jfiala/swagger-spring-demo
|
user-rest-service-1.0.2/generated-code/scalatra/src/main/scala/com/wordnik/client/api/UsercompletepostcompleteApi.scala
|
Scala
|
apache-2.0
| 1,443
|
/*
* Copyright (C) 2010 Lalit Pant <pant.lalit@gmail.com>
*
* The contents of this file are subject to the GNU General Public License
* Version 3 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.gnu.org/copyleft/gpl.html
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
*/
package net.kogics.kojo.mathworld
import geogebra.kernel.GeoSegment
import geogebra.kernel.GeoNumeric
import geogebra.kernel.GeoPoint
import geogebra.plugin.GgbAPI
import net.kogics.kojo.util.Utils
import net.kogics.kojo.core._
object MwLineSegment {
val lGen = new LabelGenerator("Ls")
def apply(ggbApi: GgbAPI, p1: MwPoint, p2: MwPoint) = {
net.kogics.kojo.util.Throttler.throttle()
val lineSegment = Utils.runInSwingThreadAndWait {
val gLineSegment = ggbApi.getKernel.Segment(lGen.next(), p1.gPoint, p2.gPoint)
new MwLineSegment(ggbApi, gLineSegment, p1, p2)
}
lineSegment
}
def apply(ggbApi: GgbAPI, p: MwPoint, len: Double) = {
net.kogics.kojo.util.Throttler.throttle()
val lineSegment = Utils.runInSwingThreadAndWait {
val segP = ggbApi.getKernel.Segment(Array(lGen.next(), MwPoint.lGen.next()),
p.gPoint, new GeoNumeric(ggbApi.getConstruction, len))
val p2 = new MwPoint(ggbApi, segP(1).asInstanceOf[GeoPoint])
new MwLineSegment(ggbApi, segP(0).asInstanceOf[GeoSegment], p, p2)
}
lineSegment
}
}
class MwLineSegment(ggbApi: GgbAPI, val gLineSegment: GeoSegment, p1: MwPoint, p2: MwPoint)
extends MwLine(ggbApi, gLineSegment, p1, p2) with MwShape {
ctorDone()
override protected def geogebraElement = gLineSegment
}
|
vnkmr7620/kojo
|
KojoEnv/src/net/kogics/kojo/mathworld/MwLineSegment.scala
|
Scala
|
gpl-3.0
| 1,921
|
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.index.legacy.z3
import java.nio.charset.StandardCharsets
import java.util.Date
import com.google.common.collect.{ImmutableSet, ImmutableSortedSet}
import com.google.common.primitives.{Bytes, Longs, Shorts}
import com.vividsolutions.jts.geom._
import org.apache.accumulo.core.client.mock.MockConnector
import org.apache.accumulo.core.conf.Property
import org.apache.hadoop.io.Text
import org.locationtech.geomesa.accumulo.AccumuloVersion
import org.locationtech.geomesa.accumulo.data.{AccumuloDataStore, AccumuloFeature}
import org.locationtech.geomesa.accumulo.index.{AccumuloColumnGroups, AccumuloFeatureIndex}
import org.locationtech.geomesa.curve.BinnedTime.TimeToBinnedTime
import org.locationtech.geomesa.curve.{BinnedTime, LegacyZ3SFC}
import org.locationtech.geomesa.index.api.GeoMesaFeatureIndex
import org.locationtech.geomesa.index.utils.SplitArrays
import org.locationtech.geomesa.utils.geotools.Conversions._
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import scala.collection.JavaConversions._
trait Z3WritableIndex extends AccumuloFeatureIndex {
import AccumuloColumnGroups.BinColumnFamily
import Z3IndexV2._
def hasSplits: Boolean
override def delete(sft: SimpleFeatureType, ds: AccumuloDataStore, shared: Boolean): Unit = {
val table = getTableName(sft.getTypeName, ds)
if (ds.tableOps.exists(table)) {
// we need to synchronize deleting of tables in mock accumulo as it's not thread safe
if (ds.connector.isInstanceOf[MockConnector]) {
ds.connector.synchronized(ds.tableOps.delete(table))
} else {
ds.tableOps.delete(table)
}
}
}
override def getIdFromRow(sft: SimpleFeatureType): (Array[Byte], Int, Int, SimpleFeature) => String = {
val start = getIdRowOffset(sft)
(row, offset, length, feature) => new String(row, offset + start, length - start, StandardCharsets.UTF_8)
}
// split(1 byte), week(2 bytes), z value (8 bytes), id (n bytes)
protected def getPointRowKey(timeToIndex: TimeToBinnedTime, sfc: LegacyZ3SFC, splitArray: Seq[Array[Byte]], lenient: Boolean)
(wf: AccumuloFeature, dtgIndex: Int): Seq[Array[Byte]] = {
val numSplits = splitArray.length
val split = splitArray(wf.idHash % numSplits)
val (timeBin, z) = {
val dtg = wf.feature.getAttribute(dtgIndex).asInstanceOf[Date]
val time = if (dtg == null) 0 else dtg.getTime
val BinnedTime(b, t) = timeToIndex(time)
val geom = wf.feature.point
if (geom == null) {
throw new IllegalArgumentException(s"Null geometry in feature ${wf.feature.getID}")
}
(b, sfc.index(geom.getX, geom.getY, t, lenient).z)
}
val id = wf.feature.getID.getBytes(StandardCharsets.UTF_8)
Seq(Bytes.concat(split, Shorts.toByteArray(timeBin), Longs.toByteArray(z), id))
}
// split(1 byte), week (2 bytes), z value (3 bytes), id (n bytes)
protected def getGeomRowKeys(timeToIndex: TimeToBinnedTime, sfc: LegacyZ3SFC, splitArray: Seq[Array[Byte]])
(wf: AccumuloFeature, dtgIndex: Int): Seq[Array[Byte]] = {
val numSplits = splitArray.length
val split = splitArray(wf.idHash % numSplits)
val (timeBin, zs) = {
val dtg = wf.feature.getAttribute(dtgIndex).asInstanceOf[Date]
val time = if (dtg == null) 0 else dtg.getTime
val BinnedTime(b, t) = timeToIndex(time)
val geom = wf.feature.getDefaultGeometry.asInstanceOf[Geometry]
if (geom == null) {
throw new IllegalArgumentException(s"Null geometry in feature ${wf.feature.getID}")
}
(Shorts.toByteArray(b), zBox(sfc, geom, t).toSeq)
}
val id = wf.feature.getID.getBytes(StandardCharsets.UTF_8)
zs.map(z => Bytes.concat(split, timeBin, Longs.toByteArray(z).take(GEOM_Z_NUM_BYTES), id))
}
// gets a sequence of (week, z) values that cover the geometry
private def zBox(sfc: LegacyZ3SFC, geom: Geometry, t: Long): Set[Long] = geom match {
case g: Point => Set(sfc.index(g.getX, g.getY, t).z)
case g: LineString =>
// we flatMap bounds for each line segment so we cover a smaller area
(0 until g.getNumPoints).map(g.getPointN).sliding(2).flatMap { case Seq(one, two) =>
val (xmin, xmax) = minMax(one.getX, two.getX)
val (ymin, ymax) = minMax(one.getY, two.getY)
getZPrefixes(sfc, xmin, ymin, xmax, ymax, t)
}.toSet
case g: GeometryCollection => (0 until g.getNumGeometries).toSet.map(g.getGeometryN).flatMap(zBox(sfc, _, t))
case g: Geometry =>
val env = g.getEnvelopeInternal
getZPrefixes(sfc, env.getMinX, env.getMinY, env.getMaxX, env.getMaxY, t)
}
private def minMax(a: Double, b: Double): (Double, Double) = if (a < b) (a, b) else (b, a)
// gets z values that cover the bounding box
private def getZPrefixes(sfc: LegacyZ3SFC, xmin: Double, ymin: Double, xmax: Double, ymax: Double, t: Long): Set[Long] = {
sfc.ranges((xmin, xmax), (ymin, ymax), (t, t), 8 * GEOM_Z_NUM_BYTES).flatMap { range =>
val lower = range.lower & GEOM_Z_MASK
val upper = range.upper & GEOM_Z_MASK
if (lower == upper) {
Seq(lower)
} else {
val count = ((upper - lower) / GEOM_Z_STEP).toInt
Seq.tabulate(count)(i => lower + i * GEOM_Z_STEP) :+ upper
}
}.toSet
}
// gets the offset into the row for the id bytes
def getIdRowOffset(sft: SimpleFeatureType): Int = {
val length = if (sft.isPoints) 10 else 2 + GEOM_Z_NUM_BYTES // week + z bytes
val prefix = if (hasSplits) 1 else 0 // shard
prefix + length
}
override def configure(sft: SimpleFeatureType, ds: AccumuloDataStore): Unit = {
// z3 always has it's own table
// note: we don't call super as it will write the table name we're overriding
val suffix = GeoMesaFeatureIndex.tableSuffix(this)
val table = GeoMesaFeatureIndex.formatSoloTableName(ds.config.catalog, suffix, sft.getTypeName)
ds.metadata.insert(sft.getTypeName, tableNameKey, table)
AccumuloVersion.ensureTableExists(ds.connector, table)
val localityGroups = Seq(BinColumnFamily, AccumuloColumnGroups.default).map(cf => (cf.toString, ImmutableSet.of(cf))).toMap
ds.tableOps.setLocalityGroups(table, localityGroups)
// drop first split, otherwise we get an empty tablet
val splits = SplitArrays.apply(sft.getZShards).drop(1).map(new Text(_)).toSet
val splitsToAdd = splits -- ds.tableOps.listSplits(table).toSet
if (splitsToAdd.nonEmpty) {
// noinspection RedundantCollectionConversion
ds.tableOps.addSplits(table, ImmutableSortedSet.copyOf(splitsToAdd.toIterable))
}
ds.tableOps.setProperty(table, Property.TABLE_BLOCKCACHE_ENABLED.getKey, "true")
}
}
|
ddseapy/geomesa
|
geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/index/legacy/z3/Z3WritableIndex.scala
|
Scala
|
apache-2.0
| 7,305
|
// scalac: -Xasync
object Test extends scala.tools.partest.JUnitTest(classOf[scala.async.run.exceptions.ExceptionsSpec])
package scala.async.run.exceptions {
import scala.tools.testkit.async.Async.{async, await}
import scala.concurrent.{Future, ExecutionContext, Await}
import ExecutionContext.Implicits._
import scala.concurrent.duration._
import scala.reflect.ClassTag
import scala.tools.testkit.AssertUtil.assertThrows
import org.junit.Test
class ExceptionsSpec {
@Test
def `uncaught exception within async`(): Unit = {
val fut = async { throw new Exception("problem") }
assertThrows[Exception] { Await.result(fut, 2.seconds) }
}
@Test
def `uncaught exception within async after await`(): Unit = {
val base = Future { "five!".length }
val fut = async {
val len = await(base)
throw new Exception(s"illegal length: $len")
}
assertThrows[Exception] { Await.result(fut, 2.seconds) }
}
@Test
def `await failing future within async`(): Unit = {
val base = Future[Int] { throw new Exception("problem") }
val fut = async {
val x = await(base)
x * 2
}
assertThrows[Exception] { Await.result(fut, 2.seconds) }
}
@Test
def `await failing future within async after await`(): Unit = {
val base = Future[Any] { "five!".length }
val fut = async {
val a = await(base.mapTo[Int]) // result: 5
val b = await((Future { (a * 2).toString }).mapTo[Int]) // result: ClassCastException
val c = await(Future { (7 * 2).toString }) // result: "14"
s"$b-$c"
}
assertThrows[ClassCastException] { Await.result(fut, 2.seconds) }
}
}
}
|
scala/scala
|
test/async/jvm/exceptions.scala
|
Scala
|
apache-2.0
| 1,771
|
package x7c1.chaff.publish
import sbt.State
object CommandRunner {
/**
* Convert the given command string to a release step action,
* preserving and invoking remaining commands
* Note: This was copied from https://github.com/sbt/sbt-release/blob/663cfd426361484228a21a1244b2e6b0f7656bdf/src/main/scala/ReleasePlugin.scala#L99-L115
*/
def runCommand(command: String): State => State = { st: State =>
import sbt.complete.Parser
@annotation.tailrec
def runCommand(command: String, state: State): State = {
val nextState = Parser.parse(command, state.combinedParser) match {
case Right(cmd) => cmd()
case Left(msg) => throw sys.error(s"Invalid programmatic input:\\n$msg")
}
nextState.remainingCommands.toList match {
case Nil => nextState
case head :: tail => runCommand(head, nextState.copy(remainingCommands = tail))
}
}
runCommand(command, st.copy(remainingCommands = Nil)).
copy(remainingCommands = st.remainingCommands)
}
}
|
x7c1/Chaff
|
chaff-publish/src/main/scala/x7c1/chaff/publish/CommandRunner.scala
|
Scala
|
mit
| 1,030
|
package beyond.route
import akka.actor.Actor
import akka.actor.ActorLogging
import beyond.Authenticated
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.recipes.cache.NodeCache
import org.apache.curator.framework.recipes.cache.NodeCacheListener
import play.api.libs.json.JsArray
import play.api.libs.json.Json
object RoutingTableWatcher {
val Name: String = "routingTableWatcher"
}
class RoutingTableWatcher(curatorFramework: CuratorFramework) extends NodeCacheListener with Actor with ActorLogging {
import beyond.route.RoutingTableConfig._
private val routingTableWatcher = {
val nodeCache = new NodeCache(curatorFramework, RoutingTablePath)
nodeCache.getListenable.addListener(this)
nodeCache
}
override def nodeChanged() {
val changedData = routingTableWatcher.getCurrentData.getData
Authenticated.syncRoutingTable(Json.parse(changedData).as[JsArray])
}
override def preStart() {
curatorFramework.create().inBackground().forPath(RoutingTablePath, "[]".getBytes("UTF-8"))
routingTableWatcher.start()
log.info("RoutingTableUpdateActor started")
}
override def postStop() {
routingTableWatcher.close()
log.info("RoutingTableUpdateActor stopped")
}
override def receive: Receive = Actor.emptyBehavior
}
|
SollmoStudio/beyond
|
core/app/beyond/route/RoutingTableWatcher.scala
|
Scala
|
apache-2.0
| 1,314
|
package domala.tests.dao
import domala._
import domala.jdbc.Config
import domala.tests._
import domala.tests.models._
import org.scalatest.{BeforeAndAfter, FunSuite}
class ProtectedDefTestSuite extends FunSuite with BeforeAndAfter {
private implicit val config: Config = new H2TestConfigTemplate("protected_def_test"){}
val envDao: PersonDao = PersonDao.impl
val dao: ProtectedDao = ProtectedDao.impl
test("use internal dao") {
Required {
envDao.create()
envDao.registerInitialDepartment()
val inserted = envDao.batchInsert((1 to 20)
.map(i => Person(ID(i), Some(Name("name" + i)), Some(i * 3), Address("city" + i, "street" + i), Some(1), None)))
val selected = dao.findByIds((10 to 20).map(ID[Person]))
assert(selected == inserted.entities.filter(_.id >= ID(10)))
}
}
}
@Dao
trait ProtectedDao {
@Select("select /*%expand*/* from person where id in /* ids */() order by id", strategy = SelectType.ITERATOR)
protected def findByIdsInternally[R](ids: Seq[ID[Person]])(f: Iterator[Person] => R): R
def findByIds(ids: Seq[ID[Person]]): Seq[Person] = {
ids.grouped(3).flatMap(groupedIds => findByIdsInternally(groupedIds)(_.toList)).toSeq
}
}
|
bakenezumi/domala
|
paradise/src/test/scala/domala/tests/dao/ProtectedDefTestSuite.scala
|
Scala
|
apache-2.0
| 1,215
|
package com.github.j5ik2o.dddbase.example.repository.util
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{ Seconds, Span }
trait ScalaFuturesSupportSpec { this: ScalaFutures =>
override implicit def patienceConfig: PatienceConfig =
PatienceConfig(timeout = scaled(Span(60, Seconds)), interval = scaled(Span(1, Seconds)))
}
|
j5ik2o/scala-ddd-base-functional
|
example/src/test/scala/com/github/j5ik2o/dddbase/example/repository/util/ScalaFuturesSupportSpec.scala
|
Scala
|
mit
| 354
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.network
import java.nio._
import java.nio.channels._
import kafka.utils._
import kafka.api.RequestOrResponse
import org.apache.kafka.common.requests.{AbstractRequestResponse, ResponseHeader}
@nonthreadsafe
private[kafka] class BoundedByteBufferSend(val buffer: ByteBuffer) extends Send {
private val sizeBuffer = ByteBuffer.allocate(4)
// Avoid possibility of overflow for 2GB-4 byte buffer
if(buffer.remaining > Int.MaxValue - sizeBuffer.limit)
throw new IllegalStateException("Attempt to create a bounded buffer of " + buffer.remaining + " bytes, but the maximum " +
"allowable size for a bounded buffer is " + (Int.MaxValue - sizeBuffer.limit) + ".")
sizeBuffer.putInt(buffer.limit)
sizeBuffer.rewind()
var complete: Boolean = false
def this(size: Int) = this(ByteBuffer.allocate(size))
def this(request: RequestOrResponse) = {
this(request.sizeInBytes + (if(request.requestId != None) 2 else 0))
request.requestId match {
case Some(requestId) =>
buffer.putShort(requestId)
case None =>
}
request.writeTo(buffer)
buffer.rewind()
}
def this(header: ResponseHeader, body: AbstractRequestResponse) = {
this(header.sizeOf + body.sizeOf)
header.writeTo(buffer)
body.writeTo(buffer)
buffer.rewind
}
def writeTo(channel: GatheringByteChannel): Int = {
expectIncomplete()
val written = channel.write(Array(sizeBuffer, buffer))
// if we are done, mark it off
if(!buffer.hasRemaining)
complete = true
written.asInstanceOf[Int]
}
}
|
crashlytics/kafka
|
core/src/main/scala/kafka/network/BoundedByteBufferSend.scala
|
Scala
|
apache-2.0
| 2,423
|
package scalty.tests.types
import scalty.tests.context.TestScaltyExecutionContext
import scalty.tests.suites.ScaltySuiteWithTestScaltyExecutionContext
import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import scala.language.postfixOps
class CustomExecutionScaltyExecutionContextTest extends ScaltySuiteWithTestScaltyExecutionContext {
test("check custom ExecutionContext") {
val or: Or[String] = Future {
Thread.currentThread().getName
}.toOr
whenReady(or) { result =>
assert(result.contains(TestScaltyExecutionContext.FACTORY_NAME))
}
}
}
|
awesome-it-ternopil/scalty
|
src/test/scala/scalty/tests/types/CustomExecutionScaltyExecutionContextTest.scala
|
Scala
|
mit
| 601
|
package com.dividezero.stubby.core.service.model
import org.scalatest.FunSuite
import com.dividezero.stubby.core.model.StubRequest
import com.dividezero.stubby.core.model.StubParam
import com.dividezero.stubby.core.service.model.FieldType._
import com.dividezero.stubby.core.util.OptionUtils
import org.scalatest.Matchers
class RequestPatternTest extends FunSuite with Matchers {
import OptionUtils._
val stubbedRequest = new StubRequest(
method = "PO.*",
path = "/request/.*",
params = List(StubParam("foo", "b.r")),
headers = List(StubParam("Content-Type", "text/plain; .+")),
body = "body .*")
val incomingRequest = new StubRequest(
method = "POST",
path = "/request/path",
params = List(StubParam("foo", "bar")),
headers = List(StubParam("Content-Type", "text/plain; charset=UTF-8")),
body = "body pattern")
def assertNotFound(fieldType: FieldType, name: String, expected: String)(implicit result: MatchResult) = {
val expectedField = new PartialMatchField(fieldType, name, expected).asNotFound
assert(!result.matches)
result.fields should contain(expectedField)
}
def assertMatchFailure(fieldType: FieldType, name: String, expected: String, actual: String)(implicit result: MatchResult) = {
val expectedField = new PartialMatchField(fieldType, name, expected).asMatchFailure(actual)
assert(!result.matches)
result.fields should contain(expectedField)
}
def assertMatchSuccess(fieldType: FieldType, name: String, expected: String, actual: String)(implicit result: MatchResult) = {
val expectedField = new PartialMatchField(fieldType, name, expected).asMatch(actual)
assert(result.matches)
result.fields should contain(expectedField)
}
test("equality") {
assert(new RequestPattern(stubbedRequest) === new RequestPattern(stubbedRequest))
}
test("hash code") {
assert(new RequestPattern(stubbedRequest).hashCode === new RequestPattern(stubbedRequest).hashCode)
}
test("construct from stubbed request") {
val pattern = new RequestPattern(stubbedRequest)
assert(pattern.method.get === "PO.*")
assert(pattern.path.get === "/request/.*")
assert(pattern.params.size === 1)
assert(pattern.params.head.name === "foo")
assert(pattern.params.head.pattern === "b.r")
assert(pattern.headers.size === 1)
assert(pattern.headers.head.name === "Content-Type")
assert(pattern.headers.head.pattern === "text/plain; .+")
assert(pattern.body.get === new RegexBodyPattern("body .*"))
}
test("construct from empty pattern") {
val pattern = new RequestPattern(new StubRequest())
assert(pattern.method.isEmpty)
assert(pattern.path.isEmpty)
assert(pattern.body.isEmpty)
assert(pattern.params.isEmpty)
assert(pattern.headers.isEmpty)
}
test("JSON body pattern map") {
val body = Map("foo" -> "bar")
val request = new StubRequest(body = body)
val pattern = new RequestPattern(request)
assert(pattern.body.get === new JsonBodyPattern(body))
}
test("JSON body pattern mutable map") {
val body = collection.mutable.Map("foo" -> "bar")
val request = new StubRequest(body = body)
val pattern = new RequestPattern(request)
assert(pattern.body.get === new JsonBodyPattern(body))
}
test("JSON body pattern list") {
val body = List("foo", "bar")
val request = new StubRequest(body = body)
val pattern = new RequestPattern(request)
assert(pattern.body.get === new JsonBodyPattern(body))
}
test("JSON body pattern mutable list") {
val body = collection.mutable.ListBuffer("foo", "bar")
val request = new StubRequest(body = body)
val pattern = new RequestPattern(request)
assert(pattern.body.get === new JsonBodyPattern(body))
}
test("successful match") {
implicit val result = new RequestPattern(stubbedRequest).matches(incomingRequest)
assert(result.matches)
assertMatchSuccess(METHOD, "method", "PO.*", "POST")
assertMatchSuccess(PATH, "path", "/request/.*", "/request/path")
assertMatchSuccess(QUERY_PARAM, "foo", "b.r", "bar")
assertMatchSuccess(HEADER, "Content-Type", "text/plain; .+", "text/plain; charset=UTF-8")
assertMatchSuccess(BODY, "body", "body .*", "body pattern")
}
test("matches with extra params") {
val incoming = incomingRequest.copy(
params = incomingRequest.params :+ StubParam("what", "eva"))
val result = new RequestPattern(stubbedRequest).matches(incoming)
assert(result.matches)
}
test("matches parameter with groups in pattern") {
val pattern = stubbedRequest.copy(params = List(StubParam("foo", "(b)(a)r")))
val result = new RequestPattern(pattern).matches(incomingRequest)
assert(result.matches)
}
test("doesn't match partial parameter value") {
val pattern = stubbedRequest.copy(params = List(StubParam("foo", "b")))
val result = new RequestPattern(pattern).matches(incomingRequest)
assert(!result.matches)
}
test("doesn't match incorrect params") {
val incoming = incomingRequest.copy(params = List(StubParam("foo", "invalid")))
val result = new RequestPattern(stubbedRequest).matches(incoming)
assertMatchFailure(QUERY_PARAM, "foo", "b.r", "invalid")(result)
}
test("doesn't match when no parameters") {
val incoming = incomingRequest.copy(params = Nil)
val result = new RequestPattern(stubbedRequest).matches(incoming)
assertNotFound(QUERY_PARAM, "foo", "b.r")(result)
}
test("matches with extra headers") {
val incoming = incomingRequest.copy(
headers = incomingRequest.headers :+ StubParam("Content-Type", "application/json"))
val result = new RequestPattern(stubbedRequest).matches(incoming)
assert(result.matches)
}
test("matches header value with groups in pattern") {
val pattern = stubbedRequest.copy(headers = List(StubParam("Content-Type", "(text/plain); (.+)")))
val result = new RequestPattern(stubbedRequest).matches(incomingRequest)
assert(result.matches)
}
test("doesn't match partial header value") {
val pattern = stubbedRequest.copy(headers = List(StubParam("Content-Type", "text")))
val result = new RequestPattern(pattern).matches(incomingRequest)
assertMatchFailure(HEADER, "Content-Type", "text", "text/plain; charset=UTF-8")(result)
}
test("doesn't match incorrect headers") {
val incoming = incomingRequest.copy(
headers = List(StubParam("Content-Type", "image/gif")))
val result = new RequestPattern(stubbedRequest).matches(incoming)
assertMatchFailure(HEADER, "Content-Type", "text/plain; .+", "image/gif")(result)
}
test("doesn't match when no headers") {
val incoming = incomingRequest.copy(headers = Nil)
val result = new RequestPattern(stubbedRequest).matches(incoming)
assertNotFound(HEADER, "Content-Type", "text/plain; .+")(result)
}
test("doesn't match wrong body") {
val incoming = incomingRequest.copy(body = "wrong body")
val result = new RequestPattern(stubbedRequest).matches(incoming)
assertMatchFailure(BODY, "body", "body .*", "wrong body")(result)
}
test("doesn't match body when empty") {
val incoming = incomingRequest.copy(body = None)
val result = new RequestPattern(stubbedRequest).matches(incoming)
assertNotFound(BODY, "body", "<pattern>")(result)
}
test("matches method pattern with groups") {
val pattern = stubbedRequest.copy(method = "(PO)(.*)")
val result = new RequestPattern(pattern).matches(incomingRequest)
assert(result.matches)
}
test("doesn't match when wrong method") {
val incoming = incomingRequest.copy(method = "HEAD")
val result = new RequestPattern(stubbedRequest).matches(incoming)
assertMatchFailure(METHOD, "method", "PO.*", "HEAD")(result)
}
test("doesn't match partial method string") {
val incoming = incomingRequest.copy(method = "XPOST")
val result = new RequestPattern(stubbedRequest).matches(incoming)
assertMatchFailure(METHOD, "method", "PO.*", "XPOST")(result)
}
test("matches path with groups") {
val pattern = stubbedRequest.copy(path = "/(request)/(.*)")
val result = new RequestPattern(pattern).matches(incomingRequest)
assert(result.matches)
}
test("doesn't match incorrect path") {
val incoming = incomingRequest.copy(path = "/invalid")
val result = new RequestPattern(stubbedRequest).matches(incoming)
assertMatchFailure(PATH, "path", "/request/.*", "/invalid")(result)
}
test("doesn't match partial path string") {
val incoming = incomingRequest.copy(path = "/invalid/request/test")
val result = new RequestPattern(stubbedRequest).matches(incoming)
assertMatchFailure(PATH, "path", "/request/.*", "/invalid/request/test")(result)
}
}
|
themillhousegroup/http-stub-server-scala
|
core/src/test/scala/com/dividezero/stubby/core/service/model/RequestPatternTest.scala
|
Scala
|
apache-2.0
| 8,822
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.api.java
import java.util
import java.lang.{Long => JLong}
import java.util.{List => JList}
import scala.collection.JavaConversions._
import scala.language.implicitConversions
import scala.reflect.ClassTag
import org.apache.spark.api.java.{JavaPairRDD, JavaRDD, JavaRDDLike}
import org.apache.spark.api.java.JavaPairRDD._
import org.apache.spark.api.java.JavaSparkContext.fakeClassTag
import org.apache.spark.api.java.function.{Function => JFunction, Function2 => JFunction2, Function3 => JFunction3, _}
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming._
import org.apache.spark.streaming.api.java.JavaDStream._
import org.apache.spark.streaming.dstream.DStream
trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This, R], R <: JavaRDDLike[T, R]]
extends Serializable {
implicit val classTag: ClassTag[T]
def dstream: DStream[T]
def wrapRDD(in: RDD[T]): R
implicit def scalaIntToJavaLong(in: DStream[Long]): JavaDStream[JLong] = {
in.map(new JLong(_))
}
/**
* Print the first ten elements of each RDD generated in this DStream. This is an output
* operator, so this DStream will be registered as an output stream and there materialized.
*/
def print(): Unit = {
print(10)
}
/**
* Print the first num elements of each RDD generated in this DStream. This is an output
* operator, so this DStream will be registered as an output stream and there materialized.
*/
def print(num: Int): Unit = {
dstream.print(num)
}
/**
* Return a new DStream in which each RDD has a single element generated by counting each RDD
* of this DStream.
*/
def count(): JavaDStream[JLong] = dstream.count()
/**
* Return a new DStream in which each RDD contains the counts of each distinct value in
* each RDD of this DStream. Hash partitioning is used to generate the RDDs with
* Spark's default number of partitions.
*/
def countByValue(): JavaPairDStream[T, JLong] = {
JavaPairDStream.scalaToJavaLong(dstream.countByValue())
}
/**
* Return a new DStream in which each RDD contains the counts of each distinct value in
* each RDD of this DStream. Hash partitioning is used to generate the RDDs with `numPartitions`
* partitions.
* @param numPartitions number of partitions of each RDD in the new DStream.
*/
def countByValue(numPartitions: Int): JavaPairDStream[T, JLong] = {
JavaPairDStream.scalaToJavaLong(dstream.countByValue(numPartitions))
}
/**
* Return a new DStream in which each RDD has a single element generated by counting the number
* of elements in a window over this DStream. windowDuration and slideDuration are as defined in
* the window() operation. This is equivalent to window(windowDuration, slideDuration).count()
*/
def countByWindow(windowDuration: Duration, slideDuration: Duration) : JavaDStream[JLong] = {
dstream.countByWindow(windowDuration, slideDuration)
}
/**
* Return a new DStream in which each RDD contains the count of distinct elements in
* RDDs in a sliding window over this DStream. Hash partitioning is used to generate the RDDs
* with Spark's default number of partitions.
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
*/
def countByValueAndWindow(windowDuration: Duration, slideDuration: Duration)
: JavaPairDStream[T, JLong] = {
JavaPairDStream.scalaToJavaLong(
dstream.countByValueAndWindow(windowDuration, slideDuration))
}
/**
* Return a new DStream in which each RDD contains the count of distinct elements in
* RDDs in a sliding window over this DStream. Hash partitioning is used to generate the RDDs
* with `numPartitions` partitions.
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param numPartitions number of partitions of each RDD in the new DStream.
*/
def countByValueAndWindow(windowDuration: Duration, slideDuration: Duration, numPartitions: Int)
: JavaPairDStream[T, JLong] = {
JavaPairDStream.scalaToJavaLong(
dstream.countByValueAndWindow(windowDuration, slideDuration, numPartitions))
}
/**
* Return a new DStream in which each RDD is generated by applying glom() to each RDD of
* this DStream. Applying glom() to an RDD coalesces all elements within each partition into
* an array.
*/
def glom(): JavaDStream[JList[T]] =
new JavaDStream(dstream.glom().map(x => new java.util.ArrayList[T](x.toSeq)))
/** Return the [[org.apache.spark.streaming.StreamingContext]] associated with this DStream */
def context(): StreamingContext = dstream.context
/** Return a new DStream by applying a function to all elements of this DStream. */
def map[R](f: JFunction[T, R]): JavaDStream[R] = {
new JavaDStream(dstream.map(f)(fakeClassTag))(fakeClassTag)
}
/** Return a new DStream by applying a function to all elements of this DStream. */
def mapToPair[K2, V2](f: PairFunction[T, K2, V2]): JavaPairDStream[K2, V2] = {
def cm: ClassTag[(K2, V2)] = fakeClassTag
new JavaPairDStream(dstream.map[(K2, V2)](f)(cm))(fakeClassTag[K2], fakeClassTag[V2])
}
/**
* Return a new DStream by applying a function to all elements of this DStream,
* and then flattening the results
*/
def flatMap[U](f: FlatMapFunction[T, U]): JavaDStream[U] = {
import scala.collection.JavaConverters._
def fn = (x: T) => f.call(x).asScala
new JavaDStream(dstream.flatMap(fn)(fakeClassTag[U]))(fakeClassTag[U])
}
/**
* Return a new DStream by applying a function to all elements of this DStream,
* and then flattening the results
*/
def flatMapToPair[K2, V2](f: PairFlatMapFunction[T, K2, V2]): JavaPairDStream[K2, V2] = {
import scala.collection.JavaConverters._
def fn = (x: T) => f.call(x).asScala
def cm: ClassTag[(K2, V2)] = fakeClassTag
new JavaPairDStream(dstream.flatMap(fn)(cm))(fakeClassTag[K2], fakeClassTag[V2])
}
/**
* Return a new DStream in which each RDD is generated by applying mapPartitions() to each RDDs
* of this DStream. Applying mapPartitions() to an RDD applies a function to each partition
* of the RDD.
*/
def mapPartitions[U](f: FlatMapFunction[java.util.Iterator[T], U]): JavaDStream[U] = {
def fn = (x: Iterator[T]) => asScalaIterator(f.call(asJavaIterator(x)).iterator())
new JavaDStream(dstream.mapPartitions(fn)(fakeClassTag[U]))(fakeClassTag[U])
}
/**
* Return a new DStream in which each RDD is generated by applying mapPartitions() to each RDDs
* of this DStream. Applying mapPartitions() to an RDD applies a function to each partition
* of the RDD.
*/
def mapPartitionsToPair[K2, V2](f: PairFlatMapFunction[java.util.Iterator[T], K2, V2])
: JavaPairDStream[K2, V2] = {
def fn = (x: Iterator[T]) => asScalaIterator(f.call(asJavaIterator(x)).iterator())
new JavaPairDStream(dstream.mapPartitions(fn))(fakeClassTag[K2], fakeClassTag[V2])
}
/**
* Return a new DStream in which each RDD has a single element generated by reducing each RDD
* of this DStream.
*/
def reduce(f: JFunction2[T, T, T]): JavaDStream[T] = dstream.reduce(f)
/**
* Return a new DStream in which each RDD has a single element generated by reducing all
* elements in a sliding window over this DStream.
* @param reduceFunc associative reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @deprecated As this API is not Java compatible.
*/
@deprecated("Use Java-compatible version of reduceByWindow", "1.3.0")
def reduceByWindow(
reduceFunc: (T, T) => T,
windowDuration: Duration,
slideDuration: Duration
): DStream[T] = {
dstream.reduceByWindow(reduceFunc, windowDuration, slideDuration)
}
/**
* Return a new DStream in which each RDD has a single element generated by reducing all
* elements in a sliding window over this DStream.
* @param reduceFunc associative reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
*/
def reduceByWindow(
reduceFunc: JFunction2[T, T, T],
windowDuration: Duration,
slideDuration: Duration
): JavaDStream[T] = {
dstream.reduceByWindow(reduceFunc, windowDuration, slideDuration)
}
/**
* Return a new DStream in which each RDD has a single element generated by reducing all
* elements in a sliding window over this DStream. However, the reduction is done incrementally
* using the old window's reduced value :
* 1. reduce the new values that entered the window (e.g., adding new counts)
* 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
* This is more efficient than reduceByWindow without "inverse reduce" function.
* However, it is applicable to only "invertible reduce functions".
* @param reduceFunc associative reduce function
* @param invReduceFunc inverse reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
*/
def reduceByWindow(
reduceFunc: JFunction2[T, T, T],
invReduceFunc: JFunction2[T, T, T],
windowDuration: Duration,
slideDuration: Duration
): JavaDStream[T] = {
dstream.reduceByWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration)
}
/**
* Return all the RDDs between 'fromDuration' to 'toDuration' (both included)
*/
def slice(fromTime: Time, toTime: Time): JList[R] = {
new util.ArrayList(dstream.slice(fromTime, toTime).map(wrapRDD(_)).toSeq)
}
/**
* Apply a function to each RDD in this DStream. This is an output operator, so
* 'this' DStream will be registered as an output stream and therefore materialized.
*
* @deprecated As of release 0.9.0, replaced by foreachRDD
*/
@Deprecated
def foreach(foreachFunc: JFunction[R, Void]) {
foreachRDD(foreachFunc)
}
/**
* Apply a function to each RDD in this DStream. This is an output operator, so
* 'this' DStream will be registered as an output stream and therefore materialized.
*
* @deprecated As of release 0.9.0, replaced by foreachRDD
*/
@Deprecated
def foreach(foreachFunc: JFunction2[R, Time, Void]) {
foreachRDD(foreachFunc)
}
/**
* Apply a function to each RDD in this DStream. This is an output operator, so
* 'this' DStream will be registered as an output stream and therefore materialized.
*/
def foreachRDD(foreachFunc: JFunction[R, Void]) {
dstream.foreachRDD(rdd => foreachFunc.call(wrapRDD(rdd)))
}
/**
* Apply a function to each RDD in this DStream. This is an output operator, so
* 'this' DStream will be registered as an output stream and therefore materialized.
*/
def foreachRDD(foreachFunc: JFunction2[R, Time, Void]) {
dstream.foreachRDD((rdd, time) => foreachFunc.call(wrapRDD(rdd), time))
}
/**
* Return a new DStream in which each RDD is generated by applying a function
* on each RDD of 'this' DStream.
*/
def transform[U](transformFunc: JFunction[R, JavaRDD[U]]): JavaDStream[U] = {
implicit val cm: ClassTag[U] = fakeClassTag
def scalaTransform (in: RDD[T]): RDD[U] =
transformFunc.call(wrapRDD(in)).rdd
dstream.transform(scalaTransform(_))
}
/**
* Return a new DStream in which each RDD is generated by applying a function
* on each RDD of 'this' DStream.
*/
def transform[U](transformFunc: JFunction2[R, Time, JavaRDD[U]]): JavaDStream[U] = {
implicit val cm: ClassTag[U] = fakeClassTag
def scalaTransform (in: RDD[T], time: Time): RDD[U] =
transformFunc.call(wrapRDD(in), time).rdd
dstream.transform(scalaTransform(_, _))
}
/**
* Return a new DStream in which each RDD is generated by applying a function
* on each RDD of 'this' DStream.
*/
def transformToPair[K2, V2](transformFunc: JFunction[R, JavaPairRDD[K2, V2]]):
JavaPairDStream[K2, V2] = {
implicit val cmk: ClassTag[K2] = fakeClassTag
implicit val cmv: ClassTag[V2] = fakeClassTag
def scalaTransform (in: RDD[T]): RDD[(K2, V2)] =
transformFunc.call(wrapRDD(in)).rdd
dstream.transform(scalaTransform(_))
}
/**
* Return a new DStream in which each RDD is generated by applying a function
* on each RDD of 'this' DStream.
*/
def transformToPair[K2, V2](transformFunc: JFunction2[R, Time, JavaPairRDD[K2, V2]]):
JavaPairDStream[K2, V2] = {
implicit val cmk: ClassTag[K2] = fakeClassTag
implicit val cmv: ClassTag[V2] = fakeClassTag
def scalaTransform (in: RDD[T], time: Time): RDD[(K2, V2)] =
transformFunc.call(wrapRDD(in), time).rdd
dstream.transform(scalaTransform(_, _))
}
/**
* Return a new DStream in which each RDD is generated by applying a function
* on each RDD of 'this' DStream and 'other' DStream.
*/
def transformWith[U, W](
other: JavaDStream[U],
transformFunc: JFunction3[R, JavaRDD[U], Time, JavaRDD[W]]
): JavaDStream[W] = {
implicit val cmu: ClassTag[U] = fakeClassTag
implicit val cmv: ClassTag[W] = fakeClassTag
def scalaTransform (inThis: RDD[T], inThat: RDD[U], time: Time): RDD[W] =
transformFunc.call(wrapRDD(inThis), other.wrapRDD(inThat), time).rdd
dstream.transformWith[U, W](other.dstream, scalaTransform(_, _, _))
}
/**
* Return a new DStream in which each RDD is generated by applying a function
* on each RDD of 'this' DStream and 'other' DStream.
*/
def transformWithToPair[U, K2, V2](
other: JavaDStream[U],
transformFunc: JFunction3[R, JavaRDD[U], Time, JavaPairRDD[K2, V2]]
): JavaPairDStream[K2, V2] = {
implicit val cmu: ClassTag[U] = fakeClassTag
implicit val cmk2: ClassTag[K2] = fakeClassTag
implicit val cmv2: ClassTag[V2] = fakeClassTag
def scalaTransform (inThis: RDD[T], inThat: RDD[U], time: Time): RDD[(K2, V2)] =
transformFunc.call(wrapRDD(inThis), other.wrapRDD(inThat), time).rdd
dstream.transformWith[U, (K2, V2)](other.dstream, scalaTransform(_, _, _))
}
/**
* Return a new DStream in which each RDD is generated by applying a function
* on each RDD of 'this' DStream and 'other' DStream.
*/
def transformWith[K2, V2, W](
other: JavaPairDStream[K2, V2],
transformFunc: JFunction3[R, JavaPairRDD[K2, V2], Time, JavaRDD[W]]
): JavaDStream[W] = {
implicit val cmk2: ClassTag[K2] = fakeClassTag
implicit val cmv2: ClassTag[V2] = fakeClassTag
implicit val cmw: ClassTag[W] = fakeClassTag
def scalaTransform (inThis: RDD[T], inThat: RDD[(K2, V2)], time: Time): RDD[W] =
transformFunc.call(wrapRDD(inThis), other.wrapRDD(inThat), time).rdd
dstream.transformWith[(K2, V2), W](other.dstream, scalaTransform(_, _, _))
}
/**
* Return a new DStream in which each RDD is generated by applying a function
* on each RDD of 'this' DStream and 'other' DStream.
*/
def transformWithToPair[K2, V2, K3, V3](
other: JavaPairDStream[K2, V2],
transformFunc: JFunction3[R, JavaPairRDD[K2, V2], Time, JavaPairRDD[K3, V3]]
): JavaPairDStream[K3, V3] = {
implicit val cmk2: ClassTag[K2] = fakeClassTag
implicit val cmv2: ClassTag[V2] = fakeClassTag
implicit val cmk3: ClassTag[K3] = fakeClassTag
implicit val cmv3: ClassTag[V3] = fakeClassTag
def scalaTransform (inThis: RDD[T], inThat: RDD[(K2, V2)], time: Time): RDD[(K3, V3)] =
transformFunc.call(wrapRDD(inThis), other.wrapRDD(inThat), time).rdd
dstream.transformWith[(K2, V2), (K3, V3)](other.dstream, scalaTransform(_, _, _))
}
/**
* Enable periodic checkpointing of RDDs of this DStream.
* @param interval Time interval after which generated RDD will be checkpointed
*/
def checkpoint(interval: Duration): DStream[T] = {
dstream.checkpoint(interval)
}
}
|
trueyao/spark-lever
|
streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaDStreamLike.scala
|
Scala
|
apache-2.0
| 18,049
|
package com.twitter.algebird.caliper
import com.twitter.algebird._
import scala.util.Random
import com.twitter.bijection._
import java.util.concurrent.Executors
import com.twitter.algebird.util._
import com.google.caliper.{ Param, SimpleBenchmark }
import java.nio.ByteBuffer
import scala.math._
class OldQTreeSemigroup[A: Monoid](k: Int) extends QTreeSemigroup[A](k) {
override def sumOption(items: TraversableOnce[QTree[A]]) =
if (items.isEmpty) None
else Some(items.reduce(plus))
}
class QTreeBenchmark extends SimpleBenchmark {
var qtree: QTreeSemigroup[Long] = _
var oldqtree: QTreeSemigroup[Long] = _
@Param(Array("5", "10", "12"))
val depthK: Int = 0
@Param(Array("100", "10000"))
val numElements: Int = 0
var inputData: Seq[QTree[Long]] = _
override def setUp {
qtree = new QTreeSemigroup[Long](depthK)
oldqtree = new OldQTreeSemigroup(depthK)
val rng = new Random("qtree".hashCode)
inputData = (0L until numElements).map { _ =>
QTree(rng.nextInt(1000).toLong)
}
}
def timeSumOption(reps: Int): Int = {
var dummy = 0
while (dummy < reps) {
qtree.sumOption(inputData)
dummy += 1
}
dummy
}
/*
def timeOldSumOption(reps: Int): Int = {
var dummy = 0
while (dummy < reps) {
oldqtree.sumOption(inputData)
dummy += 1
}
dummy
} */
}
|
vidma/algebird
|
algebird-caliper/src/test/scala/com/twitter/algebird/caliper/QTreeBenchmark.scala
|
Scala
|
apache-2.0
| 1,366
|
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.qsu.mra
import slamdata.Predef.Int
import quasar.Qspec
import scala.Predef.implicitly
import cats.Eq
import cats.instances.int._
import cats.instances.option._
import cats.kernel.laws.discipline.{BoundedSemilatticeTests, CommutativeMonoidTests, OrderTests}
import org.scalacheck.Arbitrary
import org.specs2.mutable.SpecificationLike
import org.specs2.scalacheck.Parameters
import org.typelevel.discipline.specs2.mutable.Discipline
import scalaz.@@
import scalaz.Tags.{Conjunction, Disjunction}
object UopSpec extends Qspec
with SpecificationLike
with UopGenerator
with Discipline {
import Uop._
implicit val params = Parameters(maxSize = 10)
implicit def IntConjArb: Arbitrary[Uop[Int] @@ Conjunction] =
Conjunction.subst(implicitly[Arbitrary[Uop[Int]]])
implicit def IntConjEq: Eq[Uop[Int] @@ Conjunction] =
Conjunction.subst(Eq[Uop[Int]])
implicit def DisjArb: Arbitrary[Uop[Int] @@ Disjunction] =
Disjunction.subst(implicitly[Arbitrary[Uop[Int]]])
implicit def DisjEq: Eq[Uop[Int] @@ Disjunction] =
Disjunction.subst(Eq[Uop[Int]])
checkAll("Order[Uop[Int]]", OrderTests[Uop[Int]].order)
checkAll("CommutativeMonoid[Uop[Int] @@ Conjunction]", CommutativeMonoidTests[Uop[Int] @@ Conjunction].commutativeMonoid)
checkAll("BoundedSemilattice[Uop[Int] @@ Disjunction]", BoundedSemilatticeTests[Uop[Int] @@ Disjunction].boundedSemilattice)
}
|
slamdata/quasar
|
qsu/src/test/scala/quasar/qsu/mra/UopSpec.scala
|
Scala
|
apache-2.0
| 2,021
|
package es.weso.typing
import cats._, data._
import cats.implicits._
abstract class Typing[Key,Value,Error,Evidence] {
type Evidences = List[Evidence]
def hasType(key: Key, value: Value): Boolean =
getOkValues(key) contains value
def getValues(key: Key): Map[Value,TypingResult[Error,Evidence]]
def getOkValues(key: Key): Set[Value]
def getEvidences(key: Key, value: Value): Option[List[Evidence]]
def getFailedValues(key: Key): Set[Value]
def addEvidences(
key: Key,
value: Value,
es: Evidences): Typing[Key,Value,Error,Evidence]
def addEvidence(key: Key, value: Value,
es: Evidence): Typing[Key,Value,Error,Evidence]
def addNotEvidence(key: Key, value: Value, e: Error): Typing[Key,Value,Error,Evidence]
def addType(key:Key, value:Value,
evidences: List[Evidence] = List()): Typing[Key,Value,Error,Evidence] =
addEvidences(key,value,evidences)
def combineTyping(t: Typing[Key,Value,Error,Evidence]): Typing[Key,Value,Error,Evidence]
}
object Typing {
def empty[Key,Value,Error,Evidence]: Typing[Key,Value,Error, Evidence] = {
// val m: Map[Key, Map[Value,TypingResult[Error,Evidence]]] = Map()
TypingMap[Key,Value,Error,Evidence](Map())
}
def combineTypings[Key, Value, Error, Evidence](
ts: Seq[Typing[Key,Value,Error,Evidence]]): Typing[Key,Value,Error,Evidence] = {
val zero : Typing[Key,Value,Error,Evidence] = Typing.empty
ts.foldLeft(zero)(_.combineTyping(_))
}
}
|
labra/rbe
|
src/main/scala/es/weso/typing/Typing.scala
|
Scala
|
mit
| 1,504
|
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.serving.api.service.http
import akka.actor.ActorRef
import akka.testkit.TestProbe
import com.stratio.sparta.serving.api.actor.PluginActor.{PluginResponse, UploadPlugins}
import com.stratio.sparta.serving.api.constants.HttpConstant
import com.stratio.sparta.serving.core.config.{SpartaConfig, SpartaConfigFactory}
import com.stratio.sparta.serving.core.models.dto.LoggedUserConstant
import com.stratio.sparta.serving.core.models.policy.files.{JarFile, JarFilesResponse}
import org.junit.runner.RunWith
import org.scalatest.WordSpec
import org.scalatest.junit.JUnitRunner
import spray.http._
import scala.util.{Failure, Success}
@RunWith(classOf[JUnitRunner])
class PluginsHttpServiceTest extends WordSpec
with PluginsHttpService
with HttpServiceBaseTest {
override val supervisor: ActorRef = testProbe.ref
val pluginTestProbe = TestProbe()
val dummyUser = Some(LoggedUserConstant.AnonymousUser)
override implicit val actors: Map[String, ActorRef] = Map.empty
override def beforeEach(): Unit = {
SpartaConfig.initMainConfig(Option(localConfig), SpartaConfigFactory(localConfig))
}
"PluginsHttpService.upload" should {
"Upload a file" in {
val response = JarFilesResponse(Success(Seq(JarFile("", "", "", ""))))
startAutopilot(response)
Put(s"/${HttpConstant.PluginsPath}") ~> routes(dummyUser) ~> check {
testProbe.expectMsgType[UploadPlugins]
status should be(StatusCodes.OK)
}
}
"Fail when service is not available" in {
val response = JarFilesResponse(Failure(new IllegalArgumentException("Error")))
startAutopilot(response)
Put(s"/${HttpConstant.PluginsPath}") ~> routes(dummyUser) ~> check {
testProbe.expectMsgType[UploadPlugins]
status should be(StatusCodes.InternalServerError)
}
}
}
}
|
fjsc/sparta
|
serving-api/src/test/scala/com/stratio/sparta/serving/api/service/http/PluginsHttpServiceTest.scala
|
Scala
|
apache-2.0
| 2,468
|
package sbt
import complete.{ Completion, Completions, DefaultParsers, HistoryCommands, Parser, TokenCompletions }
import classpath.ClasspathUtilities.toLoader
import DefaultParsers._
import Types.{ const, idFun }
import Function.tupled
import Command.applyEffect
import HistoryCommands.{ Start => HistoryPrefix }
import BasicCommandStrings._
import CommandUtil._
import BasicKeys._
import java.io.File
import scala.util.control.NonFatal
object BasicCommands {
lazy val allBasicCommands = Seq(nop, ignore, help, completionsCommand, multi, ifLast, append, setOnFailure, clearOnFailure, stashOnFailure, popOnFailure, reboot, call, early, exit, continuous, history, shell, read, alias) ++ compatCommands
def nop = Command.custom(s => success(() => s))
def ignore = Command.command(FailureWall)(idFun)
def early = Command.arb(earlyParser, earlyHelp) { (s, other) => other :: s }
private[this] def earlyParser = (s: State) => token(EarlyCommand).flatMap(_ => otherCommandParser(s))
private[this] def earlyHelp = Help(EarlyCommand, EarlyCommandBrief, EarlyCommandDetailed)
def help = Command.make(HelpCommand, helpBrief, helpDetailed)(helpParser)
def helpParser(s: State) =
{
val h = (Help.empty /: s.definedCommands) { (a, b) =>
a ++
(try b.help(s) catch { case NonFatal(ex) => Help.empty })
}
val helpCommands = h.detail.keySet
val spacedArg = singleArgument(helpCommands).?
applyEffect(spacedArg)(runHelp(s, h))
}
def runHelp(s: State, h: Help)(arg: Option[String]): State =
{
val message = try
Help.message(h, arg)
catch {
case NonFatal(ex) =>
ex.toString
}
System.out.println(message)
s
}
@deprecated("Use Help.moreMessage", "0.13.0")
def moreHelp(more: Seq[String]): String = Help.moreMessage(more)
def completionsCommand = Command.make(CompletionsCommand, CompletionsBrief, CompletionsDetailed)(completionsParser)
def completionsParser(state: State) =
{
val notQuoted = (NotQuoted ~ any.*) map { case (nq, s) => (nq +: s).mkString }
val quotedOrUnquotedSingleArgument = Space ~> (StringVerbatim | StringEscapable | notQuoted)
applyEffect(token(quotedOrUnquotedSingleArgument ?? "" examples ("", " ")))(runCompletions(state))
}
def runCompletions(state: State)(input: String): State = {
Parser.completions(state.combinedParser, input, 9).get map {
c => if (c.isEmpty) input else input + c.append
} foreach { c =>
System.out.println("[completions] " + c.replaceAll("\n", " "))
}
state
}
def multiParser(s: State): Parser[Seq[String]] =
{
val nonSemi = token(charClass(_ != ';').+, hide = const(true))
(token(';' ~> OptSpace) flatMap { _ => matched((s.combinedParser & nonSemi) | nonSemi) <~ token(OptSpace) } map (_.trim)).+
}
def multiApplied(s: State) =
Command.applyEffect(multiParser(s))(_ ::: s)
def multi = Command.custom(multiApplied, Help(Multi, MultiBrief, MultiDetailed))
lazy val otherCommandParser = (s: State) => token(OptSpace ~> combinedLax(s, NotSpaceClass ~ any.*))
def combinedLax(s: State, any: Parser[_]): Parser[String] =
matched(s.combinedParser | token(any, hide = const(true)))
def ifLast = Command(IfLast, Help.more(IfLast, IfLastDetailed))(otherCommandParser) { (s, arg) =>
if (s.remainingCommands.isEmpty) arg :: s else s
}
def append = Command(AppendCommand, Help.more(AppendCommand, AppendLastDetailed))(otherCommandParser) { (s, arg) =>
s.copy(remainingCommands = s.remainingCommands :+ arg)
}
def setOnFailure = Command(OnFailure, Help.more(OnFailure, OnFailureDetailed))(otherCommandParser) { (s, arg) =>
s.copy(onFailure = Some(arg))
}
private[sbt] def compatCommands = Seq(
Command.command(Compat.ClearOnFailure) { s =>
s.log.warn(Compat.ClearOnFailureDeprecated)
s.copy(onFailure = None)
},
Command.arb(s => token(Compat.OnFailure, hide = const(true)).flatMap(x => otherCommandParser(s))) { (s, arg) =>
s.log.warn(Compat.OnFailureDeprecated)
s.copy(onFailure = Some(arg))
},
Command.command(Compat.FailureWall) { s =>
s.log.warn(Compat.FailureWallDeprecated)
s
}
)
def clearOnFailure = Command.command(ClearOnFailure)(s => s.copy(onFailure = None))
def stashOnFailure = Command.command(StashOnFailure)(s => s.copy(onFailure = None).update(OnFailureStack)(s.onFailure :: _.toList.flatten))
def popOnFailure = Command.command(PopOnFailure) { s =>
val stack = s.get(OnFailureStack).getOrElse(Nil)
val updated = if (stack.isEmpty) s.remove(OnFailureStack) else s.put(OnFailureStack, stack.tail)
updated.copy(onFailure = stack.headOption.flatten)
}
def reboot = Command(RebootCommand, Help.more(RebootCommand, RebootDetailed))(rebootParser) { (s, full) =>
s.reboot(full)
}
def rebootParser(s: State) = token(Space ~> "full" ^^^ true) ?? false
def call = Command(ApplyCommand, Help.more(ApplyCommand, ApplyDetailed))(_ => callParser) {
case (state, (cp, args)) =>
val parentLoader = getClass.getClassLoader
state.log.info("Applying State transformations " + args.mkString(", ") + (if (cp.isEmpty) "" else " from " + cp.mkString(File.pathSeparator)))
val loader = if (cp.isEmpty) parentLoader else toLoader(cp.map(f => new File(f)), parentLoader)
val loaded = args.map(arg => ModuleUtilities.getObject(arg, loader).asInstanceOf[State => State])
(state /: loaded)((s, obj) => obj(s))
}
def callParser: Parser[(Seq[String], Seq[String])] = token(Space) ~> ((classpathOptionParser ?? Nil) ~ rep1sep(className, token(Space)))
private[this] def className: Parser[String] =
{
val base = StringBasic & not('-' ~> any.*, "Class name cannot start with '-'.")
def single(s: String) = Completions.single(Completion.displayStrict(s))
val compl = TokenCompletions.fixed((seen, level) => if (seen.startsWith("-")) Completions.nil else single("<class name>"))
token(base, compl)
}
private[this] def classpathOptionParser: Parser[Seq[String]] =
token(("-cp" | "-classpath") ~> Space) ~> classpathStrings <~ token(Space)
private[this] def classpathStrings: Parser[Seq[String]] =
token(StringBasic.map(s => IO.pathSplit(s).toSeq), "<classpath>")
def exit = Command.command(TerminateAction, exitBrief, exitBrief)(_ exit true)
def continuous =
Command(ContinuousExecutePrefix, continuousBriefHelp, continuousDetail)(otherCommandParser) { (s, arg) =>
withAttribute(s, Watched.Configuration, "Continuous execution not configured.") { w =>
val repeat = ContinuousExecutePrefix + (if (arg.startsWith(" ")) arg else " " + arg)
Watched.executeContinuously(w, s, arg, repeat)
}
}
def history = Command.custom(historyParser, BasicCommandStrings.historyHelp)
def historyParser(s: State): Parser[() => State] =
Command.applyEffect(HistoryCommands.actionParser) { histFun =>
val logError = (msg: String) => s.log.error(msg)
val hp = s get historyPath getOrElse None
val lines = hp.toList.flatMap(p => IO.readLines(p)).toIndexedSeq
histFun(complete.History(lines, hp, logError)) match {
case Some(commands) =>
commands foreach println //printing is more appropriate than logging
(commands ::: s).continue
case None => s.fail
}
}
def shell = Command.command(Shell, Help.more(Shell, ShellDetailed)) { s =>
val history = (s get historyPath) getOrElse Some(new File(s.baseDir, ".history"))
val prompt = (s get shellPrompt) match { case Some(pf) => pf(s); case None => "> " }
val reader = new FullReader(history, s.combinedParser)
val line = reader.readLine(prompt)
line match {
case Some(line) =>
val newState = s.copy(onFailure = Some(Shell), remainingCommands = line +: Shell +: s.remainingCommands).setInteractive(true)
if (line.trim.isEmpty) newState else newState.clearGlobalLog
case None => s.setInteractive(false)
}
}
def read = Command.make(ReadCommand, Help.more(ReadCommand, ReadDetailed))(s => applyEffect(readParser(s))(doRead(s)))
def readParser(s: State) =
{
val files = (token(Space) ~> fileParser(s.baseDir)).+
val portAndSuccess = token(OptSpace) ~> Port
portAndSuccess || files
}
def doRead(s: State)(arg: Either[Int, Seq[File]]): State =
arg match {
case Left(portAndSuccess) =>
val port = math.abs(portAndSuccess)
val previousSuccess = portAndSuccess >= 0
readMessage(port, previousSuccess) match {
case Some(message) => (message :: (ReadCommand + " " + port) :: s).copy(onFailure = Some(ReadCommand + " " + (-port)))
case None =>
System.err.println("Connection closed.")
s.fail
}
case Right(from) =>
val notFound = notReadable(from)
if (notFound.isEmpty)
readLines(from) ::: s // this means that all commands from all files are loaded, parsed, and inserted before any are executed
else {
s.log.error("Command file(s) not readable: \n\t" + notFound.mkString("\n\t"))
s
}
}
private def readMessage(port: Int, previousSuccess: Boolean): Option[String] =
{
// split into two connections because this first connection ends the previous communication
xsbt.IPC.client(port) { _.send(previousSuccess.toString) }
// and this second connection starts the next communication
xsbt.IPC.client(port) { ipc =>
val message = ipc.receive
if (message eq null) None else Some(message)
}
}
def alias = Command.make(AliasCommand, Help.more(AliasCommand, AliasDetailed)) { s =>
val name = token(OpOrID.examples(aliasNames(s): _*))
val assign = token(OptSpace ~ '=' ~ OptSpace)
val sfree = removeAliases(s)
val to = matched(sfree.combinedParser, partial = true).failOnException | any.+.string
val base = (OptSpace ~> (name ~ (assign ~> to.?).?).?)
applyEffect(base)(t => runAlias(s, t))
}
def runAlias(s: State, args: Option[(String, Option[Option[String]])]): State =
args match {
case None =>
printAliases(s); s
case Some(x ~ None) if !x.isEmpty =>
printAlias(s, x.trim); s
case Some(name ~ Some(None)) => removeAlias(s, name.trim)
case Some(name ~ Some(Some(value))) => addAlias(s, name.trim, value.trim)
}
def addAlias(s: State, name: String, value: String): State =
if (Command validID name) {
val removed = removeAlias(s, name)
if (value.isEmpty) removed else addAlias0(removed, name, value)
} else {
System.err.println("Invalid alias name '" + name + "'.")
s.fail
}
private[this] def addAlias0(s: State, name: String, value: String): State =
s.copy(definedCommands = newAlias(name, value) +: s.definedCommands)
def removeAliases(s: State): State = removeTagged(s, CommandAliasKey)
def removeAlias(s: State, name: String): State = s.copy(definedCommands = s.definedCommands.filter(c => !isAliasNamed(name, c)))
def removeTagged(s: State, tag: AttributeKey[_]): State = s.copy(definedCommands = removeTagged(s.definedCommands, tag))
def removeTagged(as: Seq[Command], tag: AttributeKey[_]): Seq[Command] = as.filter(c => !(c.tags contains tag))
def isAliasNamed(name: String, c: Command): Boolean = isNamed(name, getAlias(c))
def isNamed(name: String, alias: Option[(String, String)]): Boolean = alias match { case None => false; case Some((n, _)) => name == n }
def getAlias(c: Command): Option[(String, String)] = c.tags get CommandAliasKey
def printAlias(s: State, name: String): Unit = printAliases(aliases(s, (n, v) => n == name))
def printAliases(s: State): Unit = printAliases(allAliases(s))
def printAliases(as: Seq[(String, String)]): Unit =
for ((name, value) <- as)
println("\t" + name + " = " + value)
def aliasNames(s: State): Seq[String] = allAliases(s).map(_._1)
def allAliases(s: State): Seq[(String, String)] = aliases(s, (n, v) => true)
def aliases(s: State, pred: (String, String) => Boolean): Seq[(String, String)] =
s.definedCommands.flatMap(c => getAlias(c).filter(tupled(pred)))
def newAlias(name: String, value: String): Command =
Command.make(name, (name, "'" + value + "'"), "Alias of '" + value + "'")(aliasBody(name, value)).tag(CommandAliasKey, (name, value))
def aliasBody(name: String, value: String)(state: State): Parser[() => State] = {
val aliasRemoved = removeAlias(state, name)
// apply the alias value to the commands of `state` except for the alias to avoid recursion (#933)
val partiallyApplied = Parser(Command.combine(aliasRemoved.definedCommands)(aliasRemoved))(value)
val arg = matched(partiallyApplied & (success() | (SpaceClass ~ any.*)))
// by scheduling the expanded alias instead of directly executing, we get errors on the expanded string (#598)
arg.map(str => () => (value + str) :: state)
}
def delegateToAlias(name: String, orElse: Parser[() => State])(state: State): Parser[() => State] =
aliases(state, (nme, _) => nme == name).headOption match {
case None => orElse
case Some((n, v)) => aliasBody(n, v)(state)
}
val CommandAliasKey = AttributeKey[(String, String)]("is-command-alias", "Internal: marker for Commands created as aliases for another command.")
}
|
twitter-forks/sbt
|
main/command/src/main/scala/sbt/BasicCommands.scala
|
Scala
|
bsd-3-clause
| 13,448
|
package de.fosd.typechef.crewrite
import de.fosd.typechef.featureexpr._
import java.io.StringWriter
import de.fosd.typechef.parser.c._
import de.fosd.typechef.typesystem._
import de.fosd.typechef.error.{Severity, TypeChefError}
import de.fosd.typechef.parser.c.FunctionDef
import de.fosd.typechef.parser.c.TranslationUnit
import de.fosd.typechef.conditional.Opt
sealed abstract class CAnalysisFrontend(tunit: TranslationUnit) extends CFGHelper {
protected val env = CASTEnv.createASTEnv(tunit)
protected val fdefs = filterAllASTElems[FunctionDef](tunit)
}
class CInterAnalysisFrontend(tunit: TranslationUnit, fm: FeatureModel = FeatureExprFactory.empty) extends CAnalysisFrontend(tunit) with InterCFG {
def getTranslationUnit(): TranslationUnit = tunit
def writeCFG(title: String, writer: CFGWriter) {
val env = CASTEnv.createASTEnv(tunit)
writer.writeHeader(title)
def lookupFExpr(e: AST): FeatureExpr = e match {
case o if env.isKnown(o) => env.featureExpr(o)
case e: ExternalDef => externalDefFExprs.getOrElse(e, FeatureExprFactory.True)
case _ => FeatureExprFactory.True
}
for (f <- fdefs) {
writer.writeMethodGraph(getAllSucc(f, env).map {
x => (x._1, x._2.distinct.filter { y => y.condition.isSatisfiable(fm)}) // filter duplicates and wrong succs
}, lookupFExpr, f.declarator.getName)
}
writer.writeFooter()
writer.close()
if (writer.isInstanceOf[StringWriter])
println(writer.toString)
}
}
// TODO: refactoring different dataflow analyses into a composite will reduce code: handling of invalid paths, error printing ...
class CIntraAnalysisFrontend(tunit: TranslationUnit, ts: CTypeSystemFrontend with CDeclUse, fm: FeatureModel = FeatureExprFactory.empty) extends CAnalysisFrontend(tunit) with IntraCFG {
private lazy val udm = ts.getUseDeclMap
private lazy val dum = ts.getDeclUseMap
private def getDecls(key: Id): List[Id] = {
if (! udm.containsKey(key)) List(key)
else udm.get(key).filter { d => env.featureExpr(d) and env.featureExpr(key) isSatisfiable fm }
}
private val fanalyze = fdefs.map {
x => (x, getAllSucc(x, env).filterNot { x => x._1.isInstanceOf[FunctionDef] } )
}
var errors: List[TypeChefError] = List()
def deadStore(): Boolean = {
val err = fanalyze.flatMap(deadStore)
if (err.isEmpty) {
println("No dead stores found!")
} else {
println(err.map(_.toString + "\n").reduce(_ + _))
}
errors ++= err
err.isEmpty
}
private def deadStore(fa: (FunctionDef, List[(AST, List[Opt[AST]])])): List[TypeChefError] = {
var err: List[TypeChefError] = List()
val df = new Liveness(env, udm, FeatureExprFactory.empty)
val nss = fa._2.map(_._1)
for (s <- nss) {
for ((i, fi) <- df.kill(s)) {
val out = df.out(s)
// code such as "int a;" occurs frequently and issues an error
// we filter them out by checking the declaration use map for usages
if (dum.containsKey(i) && dum.get(i).nonEmpty) {}
else out.find { case (t, _) => t == i } match {
case None =>
val idecls = getDecls(i)
if (idecls.exists(isPartOf(_, fa._1)) && fi.isSatisfiable(fm))
err ::= new TypeChefError(Severity.Warning, fi,
"warning: Variable " + i.name + " is a dead store!", i, "")
case Some((x, z)) =>
val xdecls = getDecls(x)
val idecls = getDecls(i)
for (ei <- idecls) {
// with isPartOf we reduce the number of false positives, since we only check local variables and function parameters.
// an assignment to a global variable might be used in another function
if (isPartOf(ei, fa._1) && xdecls.exists(_.eq(ei)) && fi.and(z.not()).isSatisfiable(fm))
err ::= new TypeChefError(Severity.Warning, z.not(),
"warning: Variable " + i.name + " is a dead store!", i, "")
}
}
}
}
err
}
def doubleFree(): Boolean = {
val casestudy = {
tunit.getFile match {
case None => ""
case Some(x) =>
if (x.contains("linux")) "linux"
else if (x.contains("openssl")) "openssl"
else ""
}
}
val err = fanalyze.flatMap(doubleFree(_, casestudy))
if (err.isEmpty) {
println("No double frees found!")
} else {
println(err.map(_.toString + "\n").reduce(_ + _))
}
errors ++= err
err.isEmpty
}
private def doubleFree(fa: (FunctionDef, List[(AST, List[Opt[AST]])]), casestudy: String): List[TypeChefError] = {
var err: List[TypeChefError] = List()
val df = new DoubleFree(env, dum, udm, FeatureExprFactory.empty, casestudy)
val nss = fa._2.map(_._1).filterNot(x => x.isInstanceOf[FunctionDef])
for (s <- nss) {
val g = df.gen(s)
if (g.nonEmpty) {
val in = df.in(s)
for (((i, _), h) <- in)
g.find { case ((t, _), _) => t == i } match {
case None =>
case Some(((x, _), _)) =>
val xdecls = getDecls(x)
val idecls = getDecls(i)
for (ei <- idecls)
if (xdecls.exists(_.eq(ei)) && h.isSatisfiable(fm))
err ::= new TypeChefError(Severity.Warning, h,
"warning: Variable " + x.name + " is freed multiple times!", x, "")
}
}
}
err
}
def uninitializedMemory(): Boolean = {
val err = fanalyze.flatMap(uninitializedMemory)
if (err.isEmpty) {
println("No usages of uninitialized memory found!")
} else {
println(err.map(_.toString + "\n").reduce(_ + _))
}
errors ++= err
err.isEmpty
}
private def uninitializedMemory(fa: (FunctionDef, List[(AST, List[Opt[AST]])])): List[TypeChefError] = {
var err: List[TypeChefError] = List()
val um = new UninitializedMemory(env, dum, udm, FeatureExprFactory.empty)
val nss = fa._2.map(_._1).filterNot(x => x.isInstanceOf[FunctionDef])
for (s <- nss) {
val g = um.getRelevantIdUsages(s)
if (g.nonEmpty) {
val in = um.in(s)
for (((i, _), h) <- in)
for (((x, _), _) <- g if x == i) {
val xdecls = getDecls(x)
val idecls = getDecls(i)
for (ei <- idecls)
if (xdecls.exists(_.eq(ei)) && h.isSatisfiable(fm)) {
err ::= new TypeChefError(Severity.Warning, h,
"warning: Variable " + x.name + " is used uninitialized!", x, "")
}
}
}
}
err
}
def xfree(): Boolean = {
val err = fanalyze.flatMap(xfree)
if (err.isEmpty) {
println("No static allocated memory is freed!")
} else {
println(err.map(_.toString + "\n").reduce(_ + _))
}
errors ++= err
err.isEmpty
}
private def xfree(fa: (FunctionDef, List[(AST, List[Opt[AST]])])): List[TypeChefError] = {
var err: List[TypeChefError] = List()
val xf = new XFree(env, dum, udm, FeatureExprFactory.empty, "")
val nss = fa._2.map(_._1).filterNot(x => x.isInstanceOf[FunctionDef])
for (s <- nss) {
val g = xf.freedVariables(s)
if (g.nonEmpty) {
val in = xf.in(s)
for (((i,_), h) <- in)
g.find(_ == i) match {
case None =>
case Some(x) =>
val xdecls = getDecls(x)
val idecls = getDecls(i)
for (ei <- idecls)
if (xdecls.exists(_.eq(ei)) && h.isSatisfiable(fm))
err ::= new TypeChefError(Severity.Warning, h,
"warning: Variable " + x.name + " is freed although not dynamically allocated!", x, "")
}
}
}
err
}
def danglingSwitchCode(): Boolean = {
val err = fanalyze.flatMap { x => danglingSwitchCode(x._1) }
if (err.isEmpty) {
println("No dangling code in switch statements found!")
} else {
println(err.map(_.toString + "\n").reduce(_ + _))
}
errors ++= err
err.isEmpty
}
private def danglingSwitchCode(f: FunctionDef): List[TypeChefError] = {
val ss = filterAllASTElems[SwitchStatement](f)
val ds = new DanglingSwitchCode(env)
ss.flatMap(s => {
ds.danglingSwitchCode(s).map {
case x if x.condition.isSatisfiable(fm) =>
new TypeChefError(Severity.Warning, x.condition,
"warning: switch statement has dangling code ", x.entry, "")
}
})
}
def cfgInNonVoidFunc(): Boolean = {
val err = fanalyze.flatMap(cfgInNonVoidFunc)
if (err.isEmpty) {
println("Control flow in non-void functions always ends in return statements!")
} else {
println(err.map(_.toString + "\n").reduce(_ + _))
}
errors ++= err
err.isEmpty
}
private def cfgInNonVoidFunc(fa: (FunctionDef, List[(AST, List[Opt[AST]])])): List[TypeChefError] = {
val cf = new CFGInNonVoidFunc(env, ts)
cf.cfgInNonVoidFunc(fa._1).map {
case x if x.condition.isSatisfiable(fm) =>
new TypeChefError(Severity.Warning, x.condition,
"Control flow of non-void function ends here!", x.entry, "")
}
}
def caseTermination(): Boolean = {
val err = fanalyze.flatMap(caseTermination)
if (err.isEmpty) {
println("Case statements with code are properly terminated with break statements!")
} else {
println(err.map(_.toString + "\n").reduce(_ + _))
}
errors ++= err
err.isEmpty
}
private def caseTermination(fa: (FunctionDef, List[(AST, List[Opt[AST]])])): List[TypeChefError] = {
val casestmts = filterAllASTElems[CaseStatement](fa._1)
val ct = new CaseTermination(env)
casestmts.filterNot(ct.isTerminating).map {
case x if env.featureExpr(x).isSatisfiable(fm) =>
new TypeChefError(Severity.Warning, env.featureExpr(x),
"Case statement is not terminated by a break!", x, "")
}
}
def stdLibFuncReturn(): Boolean = {
val err = fanalyze.flatMap(stdLibFuncReturn)
if (err.isEmpty) {
println("Return values of stdlib functions are properly checked for errors!")
} else {
println(err.map(_.toString + "\n").reduce(_ + _))
}
errors ++= err
err.isEmpty
}
private def stdLibFuncReturn(fa: (FunctionDef, List[(AST, List[Opt[AST]])])): List[TypeChefError] = {
var err: List[TypeChefError] = List()
val cl: List[StdLibFuncReturn] = List(
new StdLibFuncReturn_EOF(env, dum, udm, fm),
new StdLibFuncReturn_Null(env, dum, udm, FeatureExprFactory.empty)
)
for ((s, _) <- fa._2) {
for (cle <- cl) {
lazy val errorvalues = cle.errorreturn.map(PrettyPrinter.print).mkString(" 'or' ")
// check CFG element directly; without dataflow analysis
for (e <- cle.checkForPotentialCalls(s)) {
err ::= new TypeChefError(Severity.SecurityWarning, env.featureExpr(e), "Return value of " +
PrettyPrinter.print(e) + " is not properly checked for (" + errorvalues + ")!", e)
}
// stdlib call is assigned to a variable that we track with our dataflow analysis
// we check whether used variables that hold the value of a stdlib function are killed in s,
// if not we report an error
val g = cle.getUsedVariables(s)
val in = cle.in(s)
for (((e, _), fi) <- in) {
g.find { case ((t, _), _) => t == e } match {
case None =>
case Some((k@(x, _), _)) =>
val xdecls = getDecls(x)
val edecls = getDecls(e)
for (ee <- edecls) {
val kills = cle.kill(s)
if (xdecls.exists(_.eq(ee)) && !kills.contains(k) && fi.isSatisfiable(fm)) {
err ::= new TypeChefError(Severity.SecurityWarning, fi, "The value of " +
PrettyPrinter.print(e) + " is not properly checked for (" + errorvalues + ")!", e)
}
}
}
}
}
}
err
}
}
|
mbeddr/TypeChef
|
CRewrite/src/main/scala/de/fosd/typechef/crewrite/CAnalysisFrontend.scala
|
Scala
|
lgpl-3.0
| 13,856
|
package com.github.chengpohi.repl
import java.io.IOException
import java.util
import jline.console.ConsoleReader
import jline.console.completer.{
CandidateListCompletionHandler,
CompletionHandler
}
import scala.collection.JavaConverters._
/**
* ELKCompletionHandler
* Created by chengpohi on 3/25/16.
*/
class EQLCompletionHandler extends CompletionHandler {
@throws[IOException]
override def complete(reader: ConsoleReader,
candidates: util.List[CharSequence],
position: Int): Boolean = {
candidates.size() match {
case 1 =>
val value = candidates.asScala.head
setBuffer(reader, value, position)
true
case i if i > 1 => {
val value = candidates.asScala.head
setBuffer(reader, value, position)
CandidateListCompletionHandler.printCandidates(reader, candidates)
reader.drawLine()
true
}
case _ =>
true
}
}
@throws[IOException]
def setBuffer(reader: ConsoleReader, value: CharSequence, offset: Int) {
while ((reader.getCursorBuffer.cursor > offset) && reader.backspace) {}
reader.putString(value)
reader.setCursorPosition(offset + value.length)
}
}
|
chengpohi/elasticshell
|
modules/repl/src/main/scala/com/github/chengpohi/repl/EQLCompletionHandler.scala
|
Scala
|
apache-2.0
| 1,284
|
package com.blogspot.nurkiewicz.spring
import org.apache.commons.dbcp.BasicDataSource
import org.springframework.transaction.annotation.EnableTransactionManagement
import org.springframework.stereotype.Controller
import org.springframework.web.servlet.config.annotation.EnableWebMvc
import org.springframework.orm.jpa.vendor.HibernateJpaVendorAdapter
import org.springframework.orm.jpa.JpaTransactionManager
import org.hibernate.dialect.H2Dialect
import scalaj.collection.Implicits._
import org.springframework.dao.annotation.PersistenceExceptionTranslationPostProcessor
import org.springframework.orm.jpa.support.PersistenceAnnotationBeanPostProcessor
import org.springframework.context.annotation._
import org.hibernate.cfg.ImprovedNamingStrategy
import org.h2.tools.Server
import org.springframework.core.io.ClassPathResource
import org.springframework.scheduling.quartz.SchedulerFactoryBean
import org.springframework.cache.ehcache.{EhCacheManagerFactoryBean, EhCacheCacheManager}
import management.ManagementFactory
import net.sf.ehcache.management.ManagementService
import org.springframework.jmx.export.annotation.AnnotationMBeanExporter
/**
* @author Tomasz Nurkiewicz
* @since 09.10.11, 23:01
*/
@Configuration
@EnableTransactionManagement(proxyTargetClass = true)
@ComponentScan(basePackages = Array("com.blogspot.nurkiewicz"),
scopedProxy = ScopedProxyMode.TARGET_CLASS,
excludeFilters = Array(
new ComponentScan.Filter(value = Array[Class[_]](classOf[Controller], classOf[ComponentScan], classOf[EnableWebMvc]))
))
@ImportResource(Array("classpath:/applicationContext.xml"))
class SpringConfiguration {
@Bean(destroyMethod = "close")
def dataSource() = {
val ds = new ManagedBasicDataSource()
ds.setDriverClassName("org.h2.Driver")
ds.setUrl("jdbc:h2:mem:jmx-dashboard")
ds.setUsername("sa")
ds
}
@Bean
def transactionManager() = new JpaTransactionManager(entityManagerFactory())
def entityManagerFactory() = entityManagerFactoryBean().getObject
@Bean
def entityManagerFactoryBean() = {
val entityManagerFactoryBean = new JmxLocalContainerEntityManagerFactoryBean()
entityManagerFactoryBean.setDataSource(dataSource())
entityManagerFactoryBean.setJpaVendorAdapter(jpaVendorAdapter())
entityManagerFactoryBean.setPackagesToScan("com.blogspot.nurkiewicz")
entityManagerFactoryBean.setJpaPropertyMap(
Map(
"hibernate.hbm2ddl.auto" -> "create",
"hibernate.format_sql" -> "true",
"hibernate.ejb.naming_strategy" -> classOf[ImprovedNamingStrategy].getName,
"hibernate.generate_statistics" -> true.toString
).asJava
)
entityManagerFactoryBean
}
@Bean
def jpaVendorAdapter() = {
val vendorAdapter = new HibernateJpaVendorAdapter()
vendorAdapter.setDatabasePlatform(classOf[H2Dialect].getName)
vendorAdapter
}
@Bean
def persistenceExceptionTranslationPostProcessor() = new PersistenceExceptionTranslationPostProcessor()
@Bean
def persistenceAnnotationBeanPostProcessor() = new PersistenceAnnotationBeanPostProcessor()
@Bean(initMethod = "start", destroyMethod = "stop")
def h2WebServer() = Server.createWebServer("-webDaemon", "-webAllowOthers")
@Bean
def schedulerFactory() = {
val schedulerFactoryBean = new SchedulerFactoryBean()
schedulerFactoryBean.setConfigLocation(new ClassPathResource("quartz.properties"))
schedulerFactoryBean.setWaitForJobsToCompleteOnShutdown(true)
schedulerFactoryBean
}
@Bean def cacheManager = {
val ehCacheCacheManager = new EhCacheCacheManager
ehCacheCacheManager.setCacheManager(ehCacheManager())
ehCacheCacheManager
}
@Bean def ehCacheManagerFactoryBean = {
val ehCacheManagerFactoryBean = new EhCacheManagerFactoryBean
ehCacheManagerFactoryBean.setShared(true)
ehCacheManagerFactoryBean.setCacheManagerName("jmx-dashboard")
ehCacheManagerFactoryBean
}
def ehCacheManager() = ehCacheManagerFactoryBean.getObject
@Bean def platformMBeanServer() = ManagementFactory.getPlatformMBeanServer
@Bean(initMethod = "init", destroyMethod = "dispose")
def managementService = new ManagementService(ehCacheManager(), platformMBeanServer(), true, true, true, true, true)
@Bean def annotationMBeanExporter() = new AnnotationMBeanExporter()
}
|
nurkiewicz/jmx-dashboard
|
src/main/scala/com/blogspot/nurkiewicz/spring/SpringConfiguration.scala
|
Scala
|
apache-2.0
| 4,208
|
package com.eclipsesource.schema.internal
import java.text.MessageFormat
import com.osinka.i18n.{Lang, Messages}
import scala.util.Try
object ValidatorMessages {
val DefaultMessages: Map[String, String] = Map(
"obj.missing.prop.dep" -> "Missing property dependency {0}.",
"obj.max.props" -> "Too many properties. {0} properties found, but only a maximum of {1} is allowed.",
"obj.min.props" -> "Found {0} properties, but a minimum of {1} is required.",
"obj.additional.props" -> "Additional properties are not allowed, but found properties {0}.",
"obj.required.prop" -> "Property {0} missing.",
"arr.max" -> "Too many items. {0} items found, but only a maximum of {1} is allowed.",
"arr.min" -> "Found {0} items, but a minimum of {1} is required.",
"arr.dups" -> "Found duplicates.",
"arr.out.of.bounds" -> "Array index {0} out of bounds.",
"arr.invalid.index" -> "Invalid array index {0}.",
"str.pattern" -> "''{0}'' does not match pattern ''{1}''.",
"str.invalid.pattern" -> "Invalid pattern ''{0}''.",
"str.min.length" -> "''{0}'' does not match minimum length of {1}.",
"str.max.length" -> "''{0}'' exceeds maximum length of {1}.",
"str.format" -> "''{0}'' does not match format {1}.",
"num.multiple.of" -> "{0} is not a multiple of {1}.",
"num.max" -> "{0} exceeds maximum value of {1}.",
"num.max.exclusive" -> "{0} exceeds exclusive maximum value of {1}.",
"num.min" -> "{0} is smaller than required minimum value of {1}.",
"num.min.exclusive" -> "{0} is smaller than required exclusive minimum value of {1}.",
"any.not" -> "Instance matches schema although it must not.",
"any.all" -> "Instance does not match all schemas.",
"any.any" -> "Instance does not match any of the schemas.",
"any.one.of.none" -> "Instance does not match any schema.",
"any.one.of.many" -> "Instance matches more than one schema.",
"any.enum" -> "Instance is invalid enum value.",
"any.const" -> "Instance does not match const value.",
"comp.no.schema" -> "No schema applicable.",
"err.expected.type" -> "Wrong type. Expected {0}, was {1}.",
"err.unresolved.ref" -> "Could not resolve ref {0}.",
"err.prop.not.found" -> "Could not find property {0}.",
"err.ref.expected" -> "Expected to find ref at {0}.",
"err.res.scope.id.empty" -> "Resolution scope ID must not be empty.",
"err.parse.json" -> "Could not parse JSON.",
"err.max.depth" -> "Maximum recursion depth reached.",
"err.dependencies.not.found" -> "Dependency not found.",
"err.definitions.not.found" -> "Definition not found.",
"err.patternProperties.not.found" -> "Pattern Properties not found.",
"err.false.schema" -> "Boolean false schema encountered.",
"err.contains" -> "Array does not contain valid item.",
"err.if.then.else" -> "Conditional validation failed."
)
def apply(msg: String, args: Any*)(implicit lang: Lang): String = {
Try(Messages(msg, args:_*))
.getOrElse(
new MessageFormat(DefaultMessages(msg)).format(args.map(_.asInstanceOf[Object]).toArray)
)
}
}
|
eclipsesource/play-json-schema-validator
|
src/main/scala/com/eclipsesource/schema/internal/ValidatorMessages.scala
|
Scala
|
apache-2.0
| 3,121
|
package com.bloomberg.sparkflow.serialization
import org.apache.spark.hax.MyClosureCleaner
import org.scalatest.FunSuite
import com.bloomberg.sparkflow._
import com.bloomberg.sparkflow.serialization.Hashing._
import org.apache.spark.hax.SerializeUtil.clean
/**
* Created by ngoehausen on 3/23/16.
*/
class HashingTest extends FunSuite {
test("functionHashing"){
var param = 7
val input = 5
val another = (x: Int) => x * 2
val nested = (x: Int) => x * 4 + param + another(x)
val g = (x: Int) => nested(x) + param
val initialOutput = g(input)
val initialGHash = hashClass(g)
assert(initialGHash != hashClass(nested))
assert(initialGHash != hashClass(another))
assert(initialGHash == hashClass(g))
param = 10
assert(initialGHash != hashClass(g))
assert(initialOutput != g(input))
}
test("dcHashing"){
val numbers = parallelize(1 to 10)
val filtered = numbers.filter(_ < 6)
val doubled = filtered.map(_ * 2)
val after = doubled.map(SomeFunctions.func4)
println(numbers.getSignature)
println(filtered.getSignature)
println(doubled.getSignature)
println(after.getSignature)
}
}
|
nimbusgo/spark-flow
|
src/test/scala/com/bloomberg/sparkflow/serialization/HashingTest.scala
|
Scala
|
apache-2.0
| 1,179
|
package com.chobostudy.audtjddld.datastructure
import org.scalatest.FunSuite
import com.chobostudy.datastructure._
/**
* @author audtjddld
* @since 08/24/2017
*/
class SolutionP55Test extends FunSuite {
test("this test case that check of hasSubsequence. ex) List(1,2,3,4) hasSubsequence List(1,2)") {
val expect = true
val result = SolutionP55.hasSubsequence(List(1, 2, 3, 4), List(1, 2))
assert(result == expect)
}
test("this test case that check of hasSubsequence. ex) List(1,2,3,4) hasSubsequence List(2,3)") {
val expect = true
val result = SolutionP55.hasSubsequence(List(1, 2, 3, 4), List(2, 3))
assert(result == expect)
}
test("this test case that check of hasSubsequence. ex) List(1,2,3,4) hasSubsequence List(4)") {
val expect = true
val result = SolutionP55.hasSubsequence(List(1, 2, 3, 4), List(4))
assert(result == expect)
}
}
|
codechobostudy/FPIS
|
src/test/scala/com/chobostudy/audtjddld/datastructure/SolutionP55Test.scala
|
Scala
|
apache-2.0
| 899
|
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.google.cloud.genomics.spark.examples.rdd
import java.lang.{Double => JDouble}
import java.util.{List => JList}
import com.google.cloud.genomics.Client
import com.google.cloud.genomics.utils.OfflineAuth
import com.google.cloud.genomics.utils.ShardBoundary
import com.google.cloud.genomics.utils.ShardUtils
import com.google.cloud.genomics.utils.ShardUtils.SexChromosomeFilter
import com.google.cloud.genomics.utils.grpc.VariantStreamIterator
import com.google.genomics.v1.StreamVariantsRequest
import com.google.genomics.v1.{Variant => VariantModel}
import com.google.genomics.v1.{VariantCall => CallModel}
import com.google.protobuf.ByteString
import com.google.protobuf.ListValue
import com.google.protobuf.Value
import org.apache.spark.Accumulator
import org.apache.spark.Partition
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.TaskContext
import org.apache.spark.rdd.RDD
import scala.collection.JavaConversions._
/**
* A serializable version of the Variant.
* https://github.com/googlegenomics/spark-examples/issues/84
*/
case class Call(callsetId: String, callsetName: String, genotype: List[Integer],
genotypeLikelihood: Option[List[JDouble]], phaseset: String,
info: Map[String, List[String]]) extends Serializable
case class Variant(contig: String, id: String, names: Option[List[String]],
start: Long, end: Long, referenceBases: String,
alternateBases: Option[List[String]], info: Map[String, List[String]],
created: Long, variantSetId: String, calls: Option[Seq[Call]]) extends Serializable {
def toListValue(values: List[String]) = {
val listValue = ListValue.newBuilder()
listValue.addAllValues(
values.map(Value.newBuilder().setStringValue(_).build))
listValue.build
}
def toJavaVariant() = {
val variant = VariantModel.newBuilder()
.setReferenceName(this.contig)
.setCreated(this.created)
.setVariantSetId(this.variantSetId)
.setId(this.id)
.setStart(this.start)
.setEnd(this.end)
.setReferenceBases(this.referenceBases)
variant.putAllInfo(this.info.mapValues(toListValue))
if (this.alternateBases isDefined)
variant.addAllAlternateBases(this.alternateBases.get)
if (this.names isDefined)
variant.addAllNames(this.names.get)
if (this.calls isDefined) {
val calls = this.calls.get.map
{ c =>
val call = CallModel.newBuilder()
.setCallSetId(c.callsetId)
.setCallSetName(c.callsetName)
call.addAllGenotype(c.genotype)
call.setPhaseset(c.phaseset)
call.putAllInfo(c.info.mapValues(toListValue))
if (c.genotypeLikelihood isDefined)
call.addAllGenotypeLikelihood(c.genotypeLikelihood.get)
call.build
}
variant.addAllCalls(calls)
}
variant.build
}
}
object VariantsBuilder {
val refNameRegex = """([a-z]*)?([0-9]*)""".r
def normalize(referenceName: String) = {
referenceName match {
case refNameRegex(ref, id) => Some(id)
case _ => None
}
}
def toStringList(values: ListValue) =
values.getValuesList.map(_.getStringValue()).toList
def build(r: VariantModel) = {
val variantKey = VariantKey(r.getReferenceName, r.getStart)
val calls = if (r.getCallsCount > 0)
Some(r.getCallsList.map(
c => Call(
c.getCallSetId,
c.getCallSetName,
c.getGenotypeList.toList,
if (c.getGenotypeLikelihoodCount > 0)
Some(c.getGenotypeLikelihoodList.toList)
else
None,
c.getPhaseset,
c.getInfo.mapValues(toStringList).toMap)))
else
None
val referenceName = normalize(r.getReferenceName)
if (referenceName.isEmpty) {
None;
} else {
val variant = Variant(
referenceName.get,
r.getId,
if (r.getNamesCount() > 0)
Some(r.getNamesList.toList)
else
None,
r.getStart,
r.getEnd,
r.getReferenceBases,
if (r.getAlternateBasesCount() > 0)
Some(r.getAlternateBasesList.toList)
else
None,
r.getInfo.mapValues(toStringList).toMap,
r.getCreated,
r.getVariantSetId,
calls)
Some((variantKey, variant))
}
}
}
class VariantsRddStats(sc: SparkContext) extends Serializable {
val partitionsAccum = sc.accumulator(0, "Partitions count")
val referenceBasesAccum = sc.accumulator(0L, "Reference bases count")
val requestsAccum = sc.accumulator(0, "Request count")
val unsuccessfulResponsesAccum = sc.accumulator(0, "Unsuccessful count")
val ioExceptionsAccum = sc.accumulator(0, "IO exceptions count")
val variantsAccum = sc.accumulator(0, "Variant count")
override def toString ={
val buf = new StringBuilder
buf ++= "Variants API stats:\\n"
buf ++= "-------------------------------\\n"
buf ++= s"# of partitions: ${this.partitionsAccum}\\n"
buf ++= s"# of bases requested: ${this.referenceBasesAccum}\\n"
buf ++= s"# of variants read: ${this.variantsAccum}\\n"
buf ++= s"# of API requests: ${this.requestsAccum}\\n"
buf ++= s"# of unsuccessful responses: ${this.unsuccessfulResponsesAccum}\\n"
buf ++= s"# of IO exceptions: ${this.ioExceptionsAccum}\\n"
buf.toString
}
}
/**
* A simple Spark RDD backed by Google Genomics VariantStore and
* populated via the StreamVariants API call
* (https://cloud.google.com/genomics/reference/rpc/google.genomics.v1#streamingvariantservice).
*/
class VariantsRDD(sc: SparkContext,
applicationName: String,
auth: OfflineAuth,
variantSetId: String,
variantsPartitioner: VariantsPartitioner,
stats:Option[VariantsRddStats] = None)
extends RDD[(VariantKey, Variant)](sc, Nil) {
override def getPartitions: Array[Partition] = {
variantsPartitioner.getPartitions(variantSetId)
}
def reportStats(client: Client) = stats map { stat =>
stat.requestsAccum += client.initializedRequestsCount
stat.unsuccessfulResponsesAccum += client.unsuccessfulResponsesCount
stat.ioExceptionsAccum += client.ioExceptionsCount
}
override def compute(part: Partition, ctx: TaskContext):
Iterator[(VariantKey, Variant)] = {
val client = Client(auth)
val partition = part.asInstanceOf[VariantsPartition]
val request = partition.getVariantsRequest
val responses = VariantStreamIterator.enforceShardBoundary(
auth, request, ShardBoundary.Requirement.STRICT, null);
val iterator = responses.flatMap(variantResponse => {
variantResponse.getVariantsList().map(variant => {
stats map { _.variantsAccum += 1 }
VariantsBuilder.build(variant)
})
}).filter(_.isDefined).map(_.get)
stats map { stat =>
stat.partitionsAccum += 1
stat.referenceBasesAccum += (partition.range)
}
// Wrap the iterator to read the number of initialized requests once
// it is fully traversed.
new Iterator[(VariantKey, Variant)]() {
def hasNext = {
val hasNext = iterator.hasNext
if (!hasNext) {
reportStats(client)
}
hasNext
}
def next = iterator.next
}
}
}
/**
* Defines a search range over a contig.
*/
case class VariantsPartition(
override val index: Int, serializedRequest: ByteString)
extends Partition {
def getVariantsRequest = StreamVariantsRequest.parseFrom(serializedRequest)
def range = {
val request = getVariantsRequest
request.getEnd() - request.getStart()
}
}
/**
* Indexes a variant to its partition.
*/
case class VariantKey(contig: String, position: Long)
trait VariantsPartitioner extends Serializable {
def getPartitions(variantSetId: String): Array[Partition]
}
/**
* Describes partitions for a set of contigs and their ranges.
*/
class AllReferencesVariantsPartitioner(numberOfBasesPerShard: Long,
auth: OfflineAuth) extends VariantsPartitioner {
// Generates all partitions for all mapped variants in the contig space.
def getPartitions(variantSetId: String): Array[Partition] = {
println(s"Variantset: ${variantSetId}; All refs, exclude XY")
ShardUtils.getVariantRequests(
variantSetId, SexChromosomeFilter.EXCLUDE_XY,
numberOfBasesPerShard, auth).zipWithIndex.map {
case(request, index) => VariantsPartition(index, request.toByteString())
}.toArray
}
}
class ReferencesVariantsPartitioner(references: String,
numberOfBasesPerShard: Long) extends VariantsPartitioner {
// Generates all partitions for all mapped variants in the contig space.
def getPartitions(variantSetId: String): Array[Partition] = {
println(s"Variantset: ${variantSetId}; Refs: ${references}")
ShardUtils.getVariantRequests(
variantSetId, references, numberOfBasesPerShard).zipWithIndex.map {
case(request, index) => VariantsPartition(index, request.toByteString)
}.toArray
}
}
|
googlegenomics/spark-examples
|
src/main/scala/com/google/cloud/genomics/spark/examples/rdd/VariantsRDD.scala
|
Scala
|
apache-2.0
| 9,668
|
package com.wordtrellis.projecteuler
/**
*
* Problem 4
* A palindromic number reads the same both ways.
* The largest palindrome made from the product of two 2-digit numbers is
* 9009 = 91 * 99.
* Find the largest palindrome made from the product of two 3-digit numbers.
*
* @author : Todd Cook
*
*/
object problem_4 {
def main(args: Array[String]): Unit = {
println(findLargestPalindromicNumber(3))
}
def findLargestPalindromicNumber(digits: Int): (Int, Int, Int) = {
createPalindromicNumberList(digits - 1, digits).sortWith(_._3 > _._3).head
}
def createPalindromicNumberList(digitPlacesStart: Int,
digitPlacesEnd: Int): List[(Int, Int, Int)] = {
require(digitPlacesStart < digitPlacesEnd)
val palindromes =
for (a <- (createCeilingNumber(digitPlacesStart) + 1 to
createCeilingNumber(digitPlacesEnd)).toList;
b <- a to createCeilingNumber(digitPlacesEnd);
p = a * b
if isPalindrome(p.toString)) yield (a, b, p)
palindromes
}
def isPalindrome(s: String): Boolean = s.reverse.mkString == s
def createCeilingNumber(digits: Int): Int = ("9" * digits).toInt
}
|
todd-cook/Effective-Scala-with-Project-Euler
|
src/main/scala/com/wordtrellis/projecteuler/problem_4.scala
|
Scala
|
mit
| 1,210
|
package sample.blog.author
import java.util.concurrent.atomic.AtomicInteger
import akka.actor.ActorSystem
import scala.collection.immutable
import scala.concurrent.duration._
import scala.concurrent.Await
import scala.util.Success
import akka.testkit.{ TestActorRef, TestProbe }
import akka.util.Timeout
import demesne._
import demesne.testkit._
import org.scalatest.Tag
import omnibus.akka.envelope._
import omnibus.akka.publish.ReliablePublisher.ReliableMessage
import omnibus.identifier.ShortUUID
import sample.blog.author.AuthorListingModule.{ GetPosts, Posts }
import sample.blog.post.Post
import sample.blog.post.PostPrototol.PostPublished
object AuthorListingModuleSpec {
val sysId = new AtomicInteger()
}
/**
* Created by damonrolfs on 9/18/14.
*/
class AuthorListingModuleSpec extends ParallelAkkaSpec {
override def createAkkaFixture(
test: OneArgTest,
system: ActorSystem,
slug: String
): Fixture = {
new AuthorListingFixture( system, slug )
}
override type Fixture = AuthorListingFixture
class AuthorListingFixture( _system: ActorSystem, _slug: String )
extends AkkaFixture( _slug, _system ) {
import scala.concurrent.ExecutionContext.Implicits.global
override def before( test: OneArgTest ): Unit = {
import demesne.repository.StartProtocol
import akka.pattern.AskableActorSelection
val supervisorSel = new AskableActorSelection(
system actorSelection s"/user/${boundedContext.name}-repositories"
)
implicit val timeout = Timeout( 5.seconds )
Await.ready( (supervisorSel ? StartProtocol.WaitForStart), 5.seconds )
}
override def after( test: OneArgTest ): Unit = {}
override val rootTypes: Set[AggregateRootType] = Set.empty[AggregateRootType]
val authorProbe = TestProbe()
override lazy val boundedContext: BoundedContext = {
val result = {
for {
made <- BoundedContext.make(
Symbol( slug ),
config,
userResources = AuthorListingModule.resources( system )
)
ready = made.withStartTask( AuthorListingModule.startTask )
started <- ready.start()( global, Timeout( 5.seconds ) )
} yield started
}
Await.result( result, 5.seconds )
}
}
object WIP extends Tag( "wip" )
val header = EnvelopeHeader(
fromComponentType = ComponentType( "component-type" ),
fromComponentPath = ComponentPath( "akka://Test/user/post" ),
toComponentPath = ComponentPath( "akka://Test/user/author" ),
messageType = MessageType( "posting" ),
workId = WorkId( ShortUUID() ),
messageNumber = MessageNumber( 13 ),
version = EnvelopeVersion( 7 ),
properties = EnvelopeProperties()
)
def nextPostId: Post#TID = Post.identifying.next
"Author listing Module should" should {
"extract cluster id from message" in { fixture: Fixture =>
// implicit val system = fixture.system
val extractor = AuthorListingModule.AuthorListing.idExtractor
val pp = PostPublished( sourceId = nextPostId, author = "Damon", title = "Extraction" )
extractor( pp ) shouldBe ( ( pp.author, pp ) )
val gp = GetPosts( author = "Damon" )
extractor( gp ) shouldBe ( ( gp.author, gp ) )
val epp = Envelope( payload = pp, header = header )
extractor( epp ) shouldBe ( ( pp.author, epp ) )
val egp = Envelope( payload = gp, header = header )
extractor( egp ) shouldBe ( ( gp.author, egp ) )
val rpp = ReliableMessage( 3L, epp )
extractor( rpp ) shouldBe ( ( pp.author, rpp ) )
val rgp = ReliableMessage( 7L, egp )
extractor( rgp ) shouldBe ( ( gp.author, rgp ) )
}
"extract shard from message" in { fixture: Fixture =>
// implicit val system = fixture.system
val shard = AuthorListingModule.AuthorListing.shardResolver
val author = "Damon"
val authorHash = (math.abs( author.hashCode ) % 100).toString
val pp = PostPublished( sourceId = nextPostId, author = author, title = "Extraction" )
shard( pp ) shouldBe authorHash
val gp = GetPosts( author = "Damon" )
shard( gp ) shouldBe authorHash
val epp = Envelope( payload = pp, header = header )
shard( epp ) shouldBe authorHash
val egp = Envelope( payload = gp, header = header )
shard( egp ) shouldBe authorHash
val rpp = ReliableMessage( 3L, epp )
shard( rpp ) shouldBe authorHash
val rgp = ReliableMessage( 7L, egp )
shard( rgp ) shouldBe authorHash
}
"handle PostPublished event" in { fixture: Fixture =>
import fixture._
val pp = PostPublished( sourceId = nextPostId, author = "Damon", title = "Handle Publishing" )
val real = TestActorRef[AuthorListingModule.AuthorListing].underlyingActor
real.posts shouldBe Vector.empty
real.receive( pp )
real.posts shouldBe IndexedSeq( pp )
}
"respond to GetPosts requests" in { fixture: Fixture =>
import akka.pattern.ask
import fixture._
implicit val timeout = Timeout( 5.seconds )
val pp = PostPublished( sourceId = nextPostId, author = "Damon", title = "Handle Publishing" )
val ref = TestActorRef[AuthorListingModule.AuthorListing]
val real = ref.underlyingActor
// val expected: immutable.IndexedSeq[PostPublished] = immutable.IndexedSeq( pp )
real.posts shouldBe Vector.empty
val r1 = ref ? GetPosts( "Damon" )
val Success( Posts( a1 ) ) = r1.value.get
a1 shouldBe immutable.IndexedSeq.empty
real.receive( pp )
val r2 = ref ? GetPosts( "Damon" )
val Success( Posts( a2 ) ) = r2.value.get
a2 shouldBe immutable.IndexedSeq( pp )
}
}
}
|
dmrolfs/demesne
|
examples/src/test/scala/blog/AuthorListingModuleSpec.scala
|
Scala
|
apache-2.0
| 5,715
|
package chrome
import scala.scalajs.js
package object alarms {
def create(name: String, alarmInfo: AlarmInfo): Unit = Impl.create(name, alarmInfo)
def create(alarmInfo: AlarmInfo): Unit = Impl.create(alarmInfo)
def get(name: String, callback: js.Function1[js.UndefOr[Alarm], _]): Unit = Impl.get(name, callback)
def get(callback: js.Function1[js.UndefOr[Alarm], _]): Unit = Impl.get(callback)
def clear(name: String, callback: js.Function1[Boolean, _]): Unit = Impl.clear(name, callback)
def clear(name: String): Unit = Impl.clear(name)
def clear(callback: js.Function1[Boolean, _]): Unit = Impl.clear(callback)
def clear(): Unit = Impl.clear()
}
|
erdavila/auto-steamgifts
|
src/main/scala/chrome/alarms/package.scala
|
Scala
|
mit
| 667
|
/*
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.compiler
import java.io.File
import scala.collection.JavaConversions._
import com.asakusafw.lang.compiler.api.CompilerOptions
import com.asakusafw.lang.compiler.api.JobflowProcessor.{ Context => JPContext }
import com.asakusafw.lang.compiler.api.testing.MockJobflowProcessorContext
trait UsingCompilerContext {
def newCompilerContext(flowId: String): MockCompilerContext = {
new MockCompilerContext(flowId)
}
def newJobCompilerContext(flowId: String, outputDir: File): MockCompilerContext.JobCompiler = {
newJobCompilerContext(
flowId,
new MockJobflowProcessorContext(
new CompilerOptions("buildid", "", Map.empty[String, String]),
Thread.currentThread.getContextClassLoader,
outputDir))
}
def newJobCompilerContext(
flowId: String,
jpContext: JPContext): MockCompilerContext.JobCompiler = {
new MockCompilerContext.JobCompiler(flowId)(jpContext)
}
def newNodeCompilerContext(flowId: String, outputDir: File): MockCompilerContext.NodeCompiler = {
newNodeCompilerContext(
flowId,
new MockJobflowProcessorContext(
new CompilerOptions("buildid", "", Map.empty[String, String]),
Thread.currentThread.getContextClassLoader,
outputDir))
}
def newNodeCompilerContext(
flowId: String,
jpContext: JPContext): MockCompilerContext.NodeCompiler = {
new MockCompilerContext.NodeCompiler(flowId)(jpContext)
}
def newOperatorCompilerContext(flowId: String): MockCompilerContext.OperatorCompiler = {
new MockCompilerContext.OperatorCompiler(flowId)
}
def newAggregationCompilerContext(flowId: String): MockCompilerContext.AggregationCompiler = {
new MockCompilerContext.AggregationCompiler(flowId)
}
}
|
asakusafw/asakusafw-spark
|
compiler/src/test/scala/com/asakusafw/spark/compiler/UsingCompilerContext.scala
|
Scala
|
apache-2.0
| 2,381
|
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.check.jsonpath
import io.gatling.core.check._
import com.fasterxml.jackson.databind.JsonNode
object JsonPathExtractors {
def find[X: JsonFilter](name: String, path: String, occurrence: Int, jsonPaths: JsonPaths): FindCriterionExtractor[JsonNode, String, X] =
new FindCriterionExtractor[JsonNode, String, X](
name,
path,
occurrence,
jsonPaths.extractAll(_, path).map(_.slice(occurrence, occurrence + 1).nextOption())
)
def findAll[X: JsonFilter](name: String, path: String, jsonPaths: JsonPaths): FindAllCriterionExtractor[JsonNode, String, X] =
new FindAllCriterionExtractor[JsonNode, String, X](
name,
path,
jsonPaths.extractAll(_, path).map(_.toVector.liftSeqOption)
)
def count(name: String, path: String, jsonPaths: JsonPaths): CountCriterionExtractor[JsonNode, String] =
new CountCriterionExtractor[JsonNode, String](
name,
path,
jsonPaths.extractAll[Any](_, path).map(i => Some(i.size))
)
}
|
gatling/gatling
|
gatling-core/src/main/scala/io/gatling/core/check/jsonpath/JsonPathExtractors.scala
|
Scala
|
apache-2.0
| 1,636
|
/*
* Copyright 2015 The kdtree authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.asoem.kdtree
trait PointValueTuple[+A] extends Product2[HyperPoint, A] {
def point: HyperPoint
def value : A
def dim = point.dim
override def _1 = point
override def _2 = value
override def canEqual(that: Any) = that.isInstanceOf[PointValueTuple[_]]
override def toString = point.toString
}
|
hoesler/kdtree
|
src/main/scala/org/asoem/kdtree/PointValueTuple.scala
|
Scala
|
apache-2.0
| 922
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.