code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
import Filter.Fragments
import scalaz.concurrent.Task
import scalaz._, Scalaz._
import scalaz.stream.Process
import scalaz.stream.Process._
object Fold {
trait Fold[T] {
type S
def sink: Sink[Task, T]
def fold: (S, T) => S
def init: S
def last(s: S): Task[Unit]
}
object Fold {
def fromSink[T](aSink: Sink[Task, T]) = new Fold[T] {
type S = Unit
lazy val sink: Sink[Task, T] = aSink
def fold = (u: Unit, fragment: T) => ()
def init = ()
def last(u: Unit) = Task(())
}
def unitSink[T] = Process((t: T) => Task(())).toSource.repeat
def unit[T] = fromSink(unitSink[T])
}
implicit class zipFolds[T](val fold1: Fold[T]) {
def *(fold2: Fold[T]) = new Fold[T] {
type S = (fold1.S, fold2.S)
def sink = fold1.sink.zipWith(fold2.sink) {
(f1: T => Task[Unit], f2: T => Task[Unit]) =>
(t: T) => f1(t) >> f2(t)
}
def fold = (s12: (fold1.S, fold2.S), t: T) =>
(fold1.fold(s12._1, t), fold2.fold(s12._2, t))
def last(s12: (fold1.S, fold2.S)) =
fold1.last(s12._1) >> fold2.last(s12._2)
def init = (fold1.init, fold2.init)
}
}
implicit def foldMonoid[T] = new Monoid[Fold[T]] {
def append(fold1: Fold[T], fold2: =>Fold[T]): Fold[T] =
fold1 * fold2
val zero = Fold.unit[T]
}
def foldState[S, T](action: (S, T) => S)
(init: S): Process1[T, S] = {
def go(state: S): Process1[T, S] =
Process.receive1 { t: T =>
val newState = action(state, t)
emit(newState) fby go(newState)
}
go(init)
}
def foldState[T](fold: Fold[T]): Process1[T, fold.S] = {
def go(state: fold.S): Process1[T, fold.S] =
Process.receive1 { t: T =>
val newState = fold.fold(state, t)
emit(newState) fby go(newState)
}
go(fold.init)
}
val console: Fold[Fragment] = ???
val fragments: Fragments = ???
val last: Task[console.S] =
logged(fragments)
.drainW(console.sink)
.pipe(foldState(console))
.runLastOr(console.init)
val finish = last.map(console.last)
def runFold[T](process: Process[Task, T], fold: Fold[T]) = {
val last: Task[fold.S] =
logged(process)
.drainW(fold.sink)
.pipe(foldState(fold))
.runLastOr(fold.init)
last.map(fold.last)
}
def runFolds[T](process: Process[Task, T], folds: List[Fold[T]]) =
runFold(process, folds.suml)
}
|
etorreborre/lambdajam-2014
|
src/main/scala/Fold.scala
|
Scala
|
mit
| 2,470
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.pmml.export
import java.text.SimpleDateFormat
import java.util.{Date, Locale}
import scala.beans.BeanProperty
import org.dmg.pmml.{Application, Header, PMML, Timestamp}
private[mllib] trait PMMLModelExport {
/**
* Holder of the exported model in PMML format
*/
@BeanProperty
val pmml: PMML = {
val version = getClass.getPackage.getImplementationVersion
val app = new Application("Apache Spark MLlib").setVersion(version)
val timestamp = new Timestamp()
.addContent(new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss", Locale.US).format(new Date()))
val header = new Header()
.setApplication(app)
.setTimestamp(timestamp)
new PMML("4.2", header, null)
}
}
|
wangyixiaohuihui/spark2-annotation
|
mllib/src/main/scala/org/apache/spark/mllib/pmml/export/PMMLModelExport.scala
|
Scala
|
apache-2.0
| 1,581
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.carbondata.iud
import org.apache.spark.sql.Row
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
class DeleteCarbonTableSubqueryTestCase extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
sql("use default")
sql("drop database if exists iud_db_sub cascade")
sql("create database iud_db_sub")
sql(
"""create table iud_db_sub.source2 (
|c11 string,c22 int,c33 string,c55 string, c66 int) STORED AS carbondata""".stripMargin)
sql(
s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/source2.csv'
| INTO table iud_db_sub.source2""".stripMargin)
sql("use iud_db_sub")
}
test("delete data from carbon table[where IN (sub query) ]") {
sql("""drop table if exists iud_db_sub.dest""")
sql(
"""create table iud_db_sub.dest (
|c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""".stripMargin).collect()
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db_sub.dest""")
sql("""delete from iud_db_sub.dest where c1 IN (select c11 from source2)""")
.collect()
checkAnswer(
sql("""select c1 from iud_db_sub.dest"""),
Seq(Row("c"), Row("d"), Row("e"))
)
sql("drop table if exists iud_db_sub.dest")
}
test("delete data from carbon table[where IN (sub query with where clause) ]") {
sql("""drop table if exists iud_db_sub.dest""")
sql(
"""create table iud_db_sub.dest (
|c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""".stripMargin).collect()
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db_sub.dest""")
sql(
"""delete from iud_db_sub.dest
| where c1 IN (select c11 from source2 where c11 = 'b')""".stripMargin).collect()
checkAnswer(
sql("""select c1 from iud_db_sub.dest"""),
Seq(Row("a"), Row("c"), Row("d"), Row("e"))
)
sql("drop table if exists iud_db_sub.dest")
}
override def afterAll {
sql("use default")
sql("drop table if exists iud_db_sub.source2")
sql("drop database if exists iud_db_sub cascade")
}
}
|
zzcclp/carbondata
|
integration/spark/src/test/scala/org/apache/spark/carbondata/iud/DeleteCarbonTableSubqueryTestCase.scala
|
Scala
|
apache-2.0
| 2,972
|
package org.jetbrains.plugins.scala
package testingSupport.test.specs2
import com.intellij.openapi.project.Project
import com.intellij.execution.configurations.{RunConfiguration, ConfigurationType}
import testingSupport.test.AbstractTestRunConfigurationFactory
/**
* User: Alexander Podkhalyuzin
* Date: 03.05.2009
*/
class Specs2RunConfigurationFactory(override val typez: ConfigurationType)
extends AbstractTestRunConfigurationFactory(typez) {
def createTemplateConfiguration(project: Project): RunConfiguration = {
val configuration = new Specs2RunConfiguration(project, this, "")
configuration
}
}
|
consulo/consulo-scala
|
src/org/jetbrains/plugins/scala/testingSupport/test/specs2/Specs2RunConfigurationFactory.scala
|
Scala
|
apache-2.0
| 622
|
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.matchers
import org.scalatest._
import org.scalatest.exceptions.TestFailedException
class ShouldBeSymbolSpec extends Spec with ShouldMatchers with EmptyMocks {
object `The be ('symbol) syntax` {
def `should do nothing if the object has an appropriately named method, which returns true` {
emptyMock should be ('empty)
isEmptyMock should be ('empty)
}
def `should throw TestFailedException if no <symbol> or is<Symbol> method exists` {
val ex1 = intercept[TestFailedException] {
noPredicateMock should be ('empty)
}
ex1.getMessage should equal ("NoPredicateMock has neither an empty nor an isEmpty method")
// Check message for name that starts with a consonant (should use a instead of an)
val ex2 = intercept[TestFailedException] {
noPredicateMock should be ('full)
}
ex2.getMessage should equal ("NoPredicateMock has neither a full nor an isFull method")
}
def `should do nothing if the object has an appropriately named method, which returns true, even if the method contains operator characters` {
val opNames = new OperatorNames
opNames should be ('op_21_!)
opNames should be ('op_23_#)
opNames should be ('op_25_%)
opNames should be ('op_26_&)
opNames should be ('op_2a_*)
opNames should be ('op_2b_+)
opNames should be ('op_2d_-)
opNames should be ('op_2f_/)
opNames should be ('op_3a_:)
opNames should be ('op_3c_<)
opNames should be ('op_3d_=)
opNames should be ('op_3e_>)
opNames should be ('op_3f_?)
opNames should be ('op_40_@)
opNames should be ('op_5c_\)
opNames should be ('op_5e_^)
opNames should be ('op_7c_|)
opNames should be ('op_7e_~)
opNames should be (Symbol("!!!"))
opNames should be (Symbol("###"))
opNames should be (Symbol("%%%"))
opNames should be (Symbol("&&&"))
opNames should be (Symbol("***"))
opNames should be (Symbol("+++"))
opNames should be (Symbol("---"))
opNames should be (Symbol("/"))
opNames should be (Symbol(":::"))
opNames should be (Symbol("<<<"))
opNames should be (Symbol("==="))
opNames should be (Symbol(">>>"))
opNames should be (Symbol("???"))
opNames should be (Symbol("@@@"))
opNames should be (Symbol("\\\\\\"))
opNames should be (Symbol("^^^"))
opNames should be (Symbol("|||"))
opNames should be (Symbol("~~~"))
}
def `should do nothing if the object has an appropriately named method, which returns false when used with not` {
notEmptyMock should not { be ('empty) }
notEmptyMock should not be ('empty)
isNotEmptyMock should not { be ('empty) }
isNotEmptyMock should not be ('empty)
}
def `should throw TestFailedException if no <symbol> or is<Symbol> method exists, when used with not` {
val ex1 = intercept[TestFailedException] {
noPredicateMock should not { be ('empty) }
}
ex1.getMessage should equal ("NoPredicateMock has neither an empty nor an isEmpty method")
val ex2 = intercept[TestFailedException] {
noPredicateMock should not (be ('full))
}
ex2.getMessage should equal ("NoPredicateMock has neither a full nor an isFull method")
val ex3 = intercept[TestFailedException] {
noPredicateMock should not be ('empty)
}
ex3.getMessage should equal ("NoPredicateMock has neither an empty nor an isEmpty method")
val ex4 = intercept[TestFailedException] {
noPredicateMock should not be ('full)
}
ex4.getMessage should equal ("NoPredicateMock has neither a full nor an isFull method")
}
def `should do nothing if the object has an appropriately named method, which returns true, when used in a logical-and expression` {
emptyMock should ((be ('empty)) and (be ('empty)))
emptyMock should (be ('empty) and (be ('empty)))
emptyMock should (be ('empty) and be ('empty))
isEmptyMock should ((be ('empty)) and (be ('empty)))
isEmptyMock should (be ('empty) and (be ('empty)))
isEmptyMock should (be ('empty) and be ('empty))
}
def `should do nothing if the object has an appropriately named method, which returns true, when used in a logical-or expression` {
emptyMock should ((be ('full)) or (be ('empty)))
emptyMock should (be ('full) or (be ('empty)))
emptyMock should (be ('full) or be ('empty))
isEmptyMock should ((be ('full)) or (be ('empty)))
isEmptyMock should (be ('full) or (be ('empty)))
isEmptyMock should (be ('full) or be ('empty))
emptyMock should ((be ('empty)) or (be ('full)))
emptyMock should (be ('empty) or (be ('full)))
emptyMock should (be ('empty) or be ('full))
isEmptyMock should ((be ('empty)) or (be ('full)))
isEmptyMock should (be ('empty) or (be ('full)))
isEmptyMock should (be ('empty) or be ('full))
}
def `should do nothing if the object has an appropriately named method, which returns false, when used in a logical-and expression with not` {
notEmptyMock should (not (be ('empty)) and not (be ('empty)))
notEmptyMock should ((not be ('empty)) and (not be ('empty)))
notEmptyMock should (not be ('empty) and not be ('empty))
isNotEmptyMock should (not (be ('empty)) and not (be ('empty)))
isNotEmptyMock should ((not be ('empty)) and (not be ('empty)))
isNotEmptyMock should (not be ('empty) and not be ('empty))
}
def `should do nothing if the object has an appropriately named method, which returns false, when used in a logical-or expression with not` {
notEmptyMock should (not (be ('empty)) or not (be ('empty)))
notEmptyMock should ((not be ('empty)) or (not be ('empty)))
notEmptyMock should (not be ('empty) or not be ('empty))
isNotEmptyMock should (not (be ('empty)) or not (be ('empty)))
isNotEmptyMock should ((not be ('empty)) or (not be ('empty)))
isNotEmptyMock should (not be ('empty) or not be ('empty))
notEmptyMock should (not (be ('full)) or not (be ('empty)))
notEmptyMock should ((not be ('full)) or (not be ('empty)))
notEmptyMock should (not be ('full) or not be ('empty))
isNotEmptyMock should (not (be ('full)) or not (be ('empty)))
isNotEmptyMock should ((not be ('full)) or (not be ('empty)))
isNotEmptyMock should (not be ('full) or not be ('empty))
}
def `should throw TestFailedException if the object has an appropriately named method, which returns false` {
val caught1 = intercept[TestFailedException] {
notEmptyMock should be ('empty)
}
assert(caught1.getMessage === "NotEmptyMock was not empty")
val caught2 = intercept[TestFailedException] {
isNotEmptyMock should be ('empty)
}
assert(caught2.getMessage === "IsNotEmptyMock was not empty")
}
def `should throw TestFailedException if the object has an appropriately named method, which returns true when used with not` {
val caught1 = intercept[TestFailedException] {
emptyMock should not { be ('empty) }
}
assert(caught1.getMessage === "EmptyMock was empty")
val caught2 = intercept[TestFailedException] {
emptyMock should not be ('empty)
}
assert(caught2.getMessage === "EmptyMock was empty")
val caught3 = intercept[TestFailedException] {
isEmptyMock should not { be ('empty) }
}
assert(caught3.getMessage === "IsEmptyMock was empty")
val caught4 = intercept[TestFailedException] {
isEmptyMock should not be ('empty)
}
assert(caught4.getMessage === "IsEmptyMock was empty")
}
def `should throw TestFailedException if the object has an appropriately named method, which returns false, when used in a logical-and expression` {
val caught1 = intercept[TestFailedException] {
emptyMock should ((be ('empty)) and (be ('full)))
}
assert(caught1.getMessage === "EmptyMock was empty, but EmptyMock was not full")
val caught2 = intercept[TestFailedException] {
emptyMock should (be ('empty) and (be ('full)))
}
assert(caught2.getMessage === "EmptyMock was empty, but EmptyMock was not full")
val caught3 = intercept[TestFailedException] {
emptyMock should (be ('empty) and be ('full))
}
assert(caught3.getMessage === "EmptyMock was empty, but EmptyMock was not full")
val caught4 = intercept[TestFailedException] {
isEmptyMock should ((be ('empty)) and (be ('full)))
}
assert(caught4.getMessage === "IsEmptyMock was empty, but IsEmptyMock was not full")
val caught5 = intercept[TestFailedException] {
isEmptyMock should (be ('empty) and (be ('full)))
}
assert(caught5.getMessage === "IsEmptyMock was empty, but IsEmptyMock was not full")
val caught6 = intercept[TestFailedException] {
isEmptyMock should (be ('empty) and be ('full))
}
assert(caught6.getMessage === "IsEmptyMock was empty, but IsEmptyMock was not full")
}
def `should throw TestFailedException if the object has an appropriately named method, which returns false, when used in a logical-or expression` {
val caught1 = intercept[TestFailedException] {
notEmptyMock should ((be ('empty)) or (be ('empty)))
}
assert(caught1.getMessage === "NotEmptyMock was not empty, and NotEmptyMock was not empty")
val caught2 = intercept[TestFailedException] {
notEmptyMock should (be ('empty) or (be ('empty)))
}
assert(caught2.getMessage === "NotEmptyMock was not empty, and NotEmptyMock was not empty")
val caught3 = intercept[TestFailedException] {
notEmptyMock should (be ('empty) or be ('empty))
}
assert(caught3.getMessage === "NotEmptyMock was not empty, and NotEmptyMock was not empty")
val caught4 = intercept[TestFailedException] {
isNotEmptyMock should ((be ('empty)) or (be ('empty)))
}
assert(caught4.getMessage === "IsNotEmptyMock was not empty, and IsNotEmptyMock was not empty")
val caught5 = intercept[TestFailedException] {
isNotEmptyMock should (be ('empty) or (be ('empty)))
}
assert(caught5.getMessage === "IsNotEmptyMock was not empty, and IsNotEmptyMock was not empty")
val caught6 = intercept[TestFailedException] {
isNotEmptyMock should (be ('empty) or be ('empty))
}
assert(caught6.getMessage === "IsNotEmptyMock was not empty, and IsNotEmptyMock was not empty")
}
def `should throw TestFailedException if the object has an appropriately named method, which returns true, when used in a logical-and expression with not` {
val caught1 = intercept[TestFailedException] {
emptyMock should (not (be ('full)) and not (be ('empty)))
}
assert(caught1.getMessage === "EmptyMock was not full, but EmptyMock was empty")
val caught2 = intercept[TestFailedException] {
emptyMock should ((not be ('full)) and (not be ('empty)))
}
assert(caught2.getMessage === "EmptyMock was not full, but EmptyMock was empty")
val caught3 = intercept[TestFailedException] {
emptyMock should (not be ('full) and not be ('empty))
}
assert(caught3.getMessage === "EmptyMock was not full, but EmptyMock was empty")
val caught4 = intercept[TestFailedException] {
isEmptyMock should (not (be ('full)) and not (be ('empty)))
}
assert(caught4.getMessage === "IsEmptyMock was not full, but IsEmptyMock was empty")
val caught5 = intercept[TestFailedException] {
isEmptyMock should ((not be ('full)) and (not be ('empty)))
}
assert(caught5.getMessage === "IsEmptyMock was not full, but IsEmptyMock was empty")
val caught6 = intercept[TestFailedException] {
isEmptyMock should (not be ('full) and not be ('empty))
}
assert(caught6.getMessage === "IsEmptyMock was not full, but IsEmptyMock was empty")
// Check that the error message "short circuits"
val caught7 = intercept[TestFailedException] {
emptyMock should (not (be ('empty)) and not (be ('full)))
}
assert(caught7.getMessage === "EmptyMock was empty")
}
def `should throw TestFailedException if the object has an appropriately named method, which returns true, when used in a logical-or expression with not` {
val caught1 = intercept[TestFailedException] {
emptyMock should (not (be ('empty)) or not (be ('empty)))
}
assert(caught1.getMessage === "EmptyMock was empty, and EmptyMock was empty")
val caught2 = intercept[TestFailedException] {
emptyMock should ((not be ('empty)) or (not be ('empty)))
}
assert(caught2.getMessage === "EmptyMock was empty, and EmptyMock was empty")
val caught3 = intercept[TestFailedException] {
emptyMock should (not be ('empty) or not be ('empty))
}
assert(caught3.getMessage === "EmptyMock was empty, and EmptyMock was empty")
val caught4 = intercept[TestFailedException] {
isEmptyMock should (not (be ('empty)) or not (be ('empty)))
}
assert(caught4.getMessage === "IsEmptyMock was empty, and IsEmptyMock was empty")
val caught5 = intercept[TestFailedException] {
isEmptyMock should ((not be ('empty)) or (not be ('empty)))
}
assert(caught5.getMessage === "IsEmptyMock was empty, and IsEmptyMock was empty")
val caught6 = intercept[TestFailedException] {
isEmptyMock should (not be ('empty) or not be ('empty))
}
assert(caught6.getMessage === "IsEmptyMock was empty, and IsEmptyMock was empty")
}
object `(for the different types that have implicit conversions for should methods)` {
// FOR: implicit def convertToCollectionShouldWrapper[T](o: Collection[T])...
def `should work on a scala.Collection` {
val emptySet = Set[Int]()
emptySet should be ('empty)
val nonEmptySet = Set(1, 2, 3)
nonEmptySet should not { be ('empty) }
val caught1 = intercept[TestFailedException] {
nonEmptySet should be ('empty)
}
assert(caught1.getMessage === "Set(1, 2, 3) was not empty")
val caught2 = intercept[TestFailedException] {
nonEmptySet should not { be ('hasDefiniteSize) }
}
assert(caught2.getMessage === "Set(1, 2, 3) was hasDefiniteSize")
val caught3 = intercept[TestFailedException] {
nonEmptySet should not { be ('happy) }
}
assert(caught3.getMessage === "Set(1, 2, 3) has neither a happy nor an isHappy method")
}
// FOR: implicit def convertToSeqShouldWrapper[T](o: Seq[T])...
def `should work on a scala.Seq` {
import scala.collection.mutable.ListBuffer
val emptyListBuffer = new ListBuffer[Int]
emptyListBuffer should be ('empty)
val nonEmptyListBuffer = new ListBuffer[Int]
nonEmptyListBuffer += 1
nonEmptyListBuffer += 2
nonEmptyListBuffer += 3
nonEmptyListBuffer should not { be ('empty) }
val caught1 = intercept[TestFailedException] {
nonEmptyListBuffer should be ('empty)
}
assert(caught1.getMessage === "ListBuffer(1, 2, 3) was not empty")
val caught2 = intercept[TestFailedException] {
nonEmptyListBuffer should not { be ('hasDefiniteSize) }
}
assert(caught2.getMessage === "ListBuffer(1, 2, 3) was hasDefiniteSize")
val caught3 = intercept[TestFailedException] {
nonEmptyListBuffer should not { be ('happy) }
}
assert(caught3.getMessage === "ListBuffer(1, 2, 3) has neither a happy nor an isHappy method")
}
// implicit def convertToArrayShouldWrapper[T](o: Array[T]): ArrayShouldWrapper[T] = new ArrayShouldWrapper[T](o)
/* This no longer works as of Scala 2.8
def `should work on a scala.Array` {
val emptyArray = new Array[Int](0)
emptyArray should be ('empty)
val nonEmptyArray = Array(1, 2, 3)
nonEmptyArray should not be ('empty)
val caught1 = intercept[TestFailedException] {
nonEmptyArray should be ('empty)
}
assert(caught1.getMessage === "Array(1, 2, 3) was not empty")
val caught2 = intercept[TestFailedException] {
nonEmptyArray should not { be ('hasDefiniteSize) }
}
assert(caught2.getMessage === "Array(1, 2, 3) was hasDefiniteSize")
val caught3 = intercept[TestFailedException] {
nonEmptyArray should not { be ('happy) }
}
assert(caught3.getMessage === "Array(1, 2, 3) has neither a happy nor an isHappy method")
}
*/
// FOR: implicit def convertToListShouldWrapper[T](o: List[T])...
def `should work on a scala.List` {
val emptyList = List[Int]()
emptyList should be ('empty)
val nonEmptyList = List(1, 2, 3)
nonEmptyList should not { be ('empty) }
val caught1 = intercept[TestFailedException] {
nonEmptyList should be ('empty)
}
assert(caught1.getMessage === "List(1, 2, 3) was not empty")
val caught2 = intercept[TestFailedException] {
nonEmptyList should not { be ('hasDefiniteSize) }
}
assert(caught2.getMessage === "List(1, 2, 3) was hasDefiniteSize")
val caught3 = intercept[TestFailedException] {
nonEmptyList should not { be ('happy) }
}
assert(caught3.getMessage === "List(1, 2, 3) has neither a happy nor an isHappy method")
}
// implicit def convertToMapShouldWrapper[K, V](o: scala.collection.Map[K, V])...
def `should work on a scala.Map` {
val emptyMap = Map[Int, String]()
emptyMap should be ('empty)
val nonEmptyMap = Map("one" -> 1, "two" -> 2, "three" -> 3)
nonEmptyMap should not { be ('empty) }
val caught1 = intercept[TestFailedException] {
nonEmptyMap should be ('empty)
}
assert(caught1.getMessage === "Map(one -> 1, two -> 2, three -> 3) was not empty")
val caught2 = intercept[TestFailedException] {
nonEmptyMap should not { be ('hasDefiniteSize) }
}
assert(caught2.getMessage === "Map(one -> 1, two -> 2, three -> 3) was hasDefiniteSize")
val caught3 = intercept[TestFailedException] {
nonEmptyMap should not { be ('happy) }
}
assert(caught3.getMessage === "Map(one -> 1, two -> 2, three -> 3) has neither a happy nor an isHappy method")
}
// implicit def convertToStringShouldWrapper[K, V](o: String): StringShouldWrapper = new StringShouldWrapper(o)
def `should work on a String` {
val caught3 = intercept[TestFailedException] {
"howdy" should not be ('happy)
}
assert(caught3.getMessage === "\"howdy\" has neither a happy nor an isHappy method")
}
// FOR: implicit def convertToJavaCollectionShouldWrapper[T](o: java.util.Collection[T])...
def `should work on a java.util.Collection` {
val emptySet = new java.util.HashSet[Int]
emptySet should be ('empty)
val nonEmptySet = new java.util.HashSet[Int]
nonEmptySet.add(1)
nonEmptySet.add(2)
nonEmptySet.add(3)
nonEmptySet should not { be ('empty) }
val caught1 = intercept[TestFailedException] {
nonEmptySet should be ('empty)
}
assert(caught1.getMessage endsWith "] was not empty")
val caught3 = intercept[TestFailedException] {
nonEmptySet should not { be ('happy) }
}
assert(caught3.getMessage endsWith "] has neither a happy nor an isHappy method")
}
// FOR: implicit def convertToJavaListShouldWrapper[T](o: java.util.List[T])...
def `should work on a java.util.List` {
val emptyList = new java.util.ArrayList[Int]
emptyList should be ('empty)
val nonEmptyList = new java.util.ArrayList[Int]
nonEmptyList.add(1)
nonEmptyList.add(2)
nonEmptyList.add(3)
nonEmptyList should not { be ('empty) }
val caught1 = intercept[TestFailedException] {
nonEmptyList should be ('empty)
}
assert(caught1.getMessage === "[1, 2, 3] was not empty")
val caught3 = intercept[TestFailedException] {
nonEmptyList should not { be ('happy) }
}
assert(caught3.getMessage === "[1, 2, 3] has neither a happy nor an isHappy method")
}
}
}
object `The be matcher` {
object `(for symbols)` {
// TODO: Make sure to write a test for each conversion, because some are using ShouldMethodsForAny instead
// of ShouldMethodsForAnyRef.
def `should be invokable from be a Symbol and be an Symbol` {
val emptySet = Set()
emptySet should be a ('empty)
emptySet should be an ('empty)
val nonEmptySet = Set(1, 2, 3)
nonEmptySet should not { be a ('empty) }
nonEmptySet should not { be an ('empty) }
}
def `should call empty when passed 'empty` {
class EmptyMock {
def empty: Boolean = true
}
class NonEmptyMock {
def empty: Boolean = false
}
(new EmptyMock) should be ('empty)
(new NonEmptyMock) should not { be ('empty) }
// (new NonEmptyMock) shouldNot be ('empty)
}
// STOLE FROM HERE
def `should call the Scala=style method if both an empty and an isEmpty method exist` {
class EmptyMock {
def empty: Boolean = true
def isEmpty: Boolean = false
override def toString = "EmptyMock"
}
class NonEmptyMock {
def empty: Boolean = false
def isEmpty: Boolean = true
override def toString = "NonEmptyMock"
}
(new EmptyMock) should be ('empty)
(new NonEmptyMock) should not { be ('empty) }
}
def `should access an 'empty' val when passed 'empty` {
class EmptyMock {
val empty: Boolean = true
}
class NonEmptyMock {
val empty: Boolean = false
}
(new EmptyMock) should be ('empty)
(new NonEmptyMock) should not { be ('empty) }
}
}
}
object `the be ('empty) syntax` {
def `should call isEmpty` {
val emptySet = Set[Int]()
emptySet should be ('empty)
val nonEmptySet = Set(1, 2, 3)
nonEmptySet should not { be ('empty) }
}
def `should call empty when passed 'empty` {
class EmptyMock {
def empty: Boolean = true
}
class NonEmptyMock {
def empty: Boolean = false
}
(new EmptyMock) should be ('empty)
(new NonEmptyMock) should not { be ('empty) }
// (new NonEmptyMock) shouldNot be ('empty)
}
def `should throw TestFailedException if no empty or isEmpty method` {
class EmptyMock {
override def toString = "EmptyMock"
}
class NonEmptyMock {
override def toString = "NonEmptyMock"
}
val ex1 = intercept[TestFailedException] {
(new EmptyMock) should be ('empty)
}
ex1.getMessage should equal ("EmptyMock has neither an empty nor an isEmpty method")
val ex2 = intercept[TestFailedException] {
(new NonEmptyMock) should not { be ('empty) }
}
ex2.getMessage should equal ("NonEmptyMock has neither an empty nor an isEmpty method")
}
def `should call the Scala-style method if both an empty and an isEmpty method exist` {
class EmptyMock {
def empty: Boolean = true
def isEmpty: Boolean = false
override def toString = "EmptyMock"
}
class NonEmptyMock {
def empty: Boolean = false
def isEmpty: Boolean = true
override def toString = "NonEmptyMock"
}
(new EmptyMock) should be ('empty)
(new NonEmptyMock) should not { be ('empty) }
}
def `should access an 'empty' val when passed 'empty` {
class EmptyMock {
val empty: Boolean = true
}
class NonEmptyMock {
val empty: Boolean = false
}
(new EmptyMock) should be ('empty)
(new NonEmptyMock) should not { be ('empty) }
// (new NonEmptyMock) shouldNot be ('empty)
}
}
object `The be 'defined syntax` {
def `should do nothing when used with a Some` {
val someString: Some[String] = Some("hi")
someString should be ('defined)
val optionString: Option[String] = Some("hi")
optionString should be ('defined)
}
def `should throw TestFailedException when used with a None` {
val none: None.type = None
val caught1 = intercept[TestFailedException] {
none should be ('defined)
}
assert(caught1.getMessage === "None was not defined")
val option: Option[Int] = None
val caught2 = intercept[TestFailedException] {
option should be ('defined)
}
assert(caught2.getMessage === "None was not defined")
}
def `should call defined` {
class DefinedMock {
def defined: Boolean = true
}
class NonDefinedMock {
def defined: Boolean = false
}
(new DefinedMock) should be ('defined)
(new NonDefinedMock) should not { be ('defined) }
// (new NonDefinedMock) shouldNot be ('defined)
}
def `should throw TestFailedException if no defined or isDefined method` {
class DefinedMock {
override def toString = "DefinedMock"
}
class NonDefinedMock {
override def toString = "NonDefinedMock"
}
val ex1 = intercept[TestFailedException] {
(new DefinedMock) should be ('defined)
}
ex1.getMessage should equal ("DefinedMock has neither a defined nor an isDefined method")
val ex2 = intercept[TestFailedException] {
(new NonDefinedMock) should not { be ('defined) }
}
ex2.getMessage should equal ("NonDefinedMock has neither a defined nor an isDefined method")
}
def `should call the Scala-style method if both a defined and an isDefined method exist` {
class DefinedMock {
def defined: Boolean = true
def isDefined: Boolean = false
override def toString = "DefinedMock"
}
class NonDefinedMock {
def defined: Boolean = false
def isDefined: Boolean = true
override def toString = "NonDefinedMock"
}
(new DefinedMock) should be ('defined)
(new NonDefinedMock) should not { be ('defined) }
}
def `should access an 'defined' val` {
class DefinedMock {
val defined: Boolean = true
}
class NonDefinedMock {
val defined: Boolean = false
}
(new DefinedMock) should be ('defined)
(new NonDefinedMock) should not { be ('defined) }
// (new NonDefinedMock) shouldNot be ('defined)
}
}
}
|
hubertp/scalatest
|
src/test/scala/org/scalatest/matchers/ShouldBeSymbolSpec.scala
|
Scala
|
apache-2.0
| 27,685
|
package org.skrushingiv
import play.api.libs.json._
import scala.collection.MapLike
import scala.language.implicitConversions
package object json {
implicit class PathAdditions(val path: JsPath) extends AnyVal {
/**
* Determines the correct empty JsValue to use for a collection type.
*
* @param evidence Evidence that the parameter class `A` is a `Map`, use `null` if not. This value is actually not
* provided by the compiler here, but in the two calling methods `readNullableWhenEmpty` and
* `formatNullableWhenEmpty` instead. If not provided in those methods, it would be eliminated
* by erasure.
*/
private def emptyValue[A](implicit evidence: A <:< MapLike[_,_,_]): JsValue =
if (evidence == null) JsArray() else Json.obj()
/**
* When an undefined or null value is encountered, returns an empty collection of the specified type.
*
* This differs from the standard de-serializers provided by the play api in that a missing value is
* deserialized as an empty collection, instead of needing to wrap the collection in an option.
*
* @param evidence Compiler-provided evidence that the parameter class `A` is a `Map`, or `null`.
* @param r A `Reads` for the collection type.
*/
def readNullableWhenEmpty[A <: Traversable[_]](implicit r: Reads[A], evidence: A <:< MapLike[_,_,_] = null) =
lazyReadNullableWhenEmpty(r)(evidence)
def lazyReadNullableWhenEmpty[A <: Traversable[_]](r: => Reads[A])(implicit evidence: A <:< MapLike[_,_,_] = null) = Reads[A] { json =>
path.applyTillLast(json).fold(
error => error,
result => result.fold(
invalid = (_) => r.reads(emptyValue[A]),
valid = {
case JsNull => r.reads(emptyValue[A])
case js => r.reads(js).repath(path)
})
)
}
/**
* When an empty collection is provided, does not emit the path.
*
* This differs from the standard serializers provided by the play api in that there is no need to
* wrap the collection in an Option in order to omit the key in the serialized representation.
*
* @param w A `Writes` for the collection type.
*/
def writeNullableWhenEmpty[A <: Traversable[_]](implicit w: Writes[A]) =
lazyWriteNullableWhenEmpty(w)
def lazyWriteNullableWhenEmpty[A <: Traversable[_]](w: => Writes[A]) = OWrites[A] { a =>
if (a.isEmpty) Json.obj()
else JsPath.createObj((path, w.writes(a)))
}
/**
* When writing it ignores the property when the collection is empty,
* when reading undefined and empty jsarray becomes an empty collection
*
* @param evidence Compiler-provided evidence that the parameter class `A` is a `Map`, or `null`.
* @param r A `Reads` for the collection type.
* @param w A `Writes` for the collection type.
*/
def formatNullableWhenEmpty[A <: Traversable[_]](implicit r: Reads[A], w: Writes[A], evidence: A <:< MapLike[_,_,_] = null): OFormat[A] =
OFormat[A](readNullableWhenEmpty[A], writeNullableWhenEmpty[A])
}
/**
* Implicit conversion for simpler-reading code when instantiating DiscriminatedWrites
*/
implicit def wrapClassWrites[A](clazz:Class[A])(implicit w: Writes[A]): (Class[A], Writes[A]) = (clazz, w)
/**
* Implicit conversion for simpler-reading code when instantiating DiscriminatedFormat
*/
implicit def wrapClassFormat[A](clazz:Class[A])(implicit f:Format[A] = null, r: Reads[A], w: Writes[A]): (Class[A], Format[A]) =
if (f != null) (clazz, f) else (clazz, Format(r, w))
}
|
srushingiv/org.skrushingiv
|
src/main/scala/org/skrushingiv/json/package.scala
|
Scala
|
mit
| 3,669
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection
/**
* An append-only map that keeps track of its estimated size in bytes.
*/
private[spark] class SizeTrackingAppendOnlyMap[K, V]
extends AppendOnlyMap[K, V] with SizeTracker
{
override def update(key: K, value: V): Unit = {
super.update(key, value)
super.afterUpdate()
}
override def changeValue(key: K, updateFunc: (Boolean, V) => V): V = {
val newValue = super.changeValue(key, updateFunc)
super.afterUpdate()
newValue
}
override protected def growTable(): Unit = {
super.growTable()
resetSamples()
}
}
|
sh-cho/cshSpark
|
util/collection/SizeTrackingAppendOnlyMap.scala
|
Scala
|
apache-2.0
| 1,396
|
/*
* The MIT License
*
* Copyright (c) 2016 Fulcrum Genomics LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
package com.fulcrumgenomics.umi
import com.fulcrumgenomics.FgBioDef.forloop
import com.fulcrumgenomics.bam.api.{SamRecord, SamWriter}
import com.fulcrumgenomics.commons.util.LazyLogging
import com.fulcrumgenomics.umi.ConsensusCaller.Base
import com.fulcrumgenomics.umi.UmiConsensusCaller.ReadType._
import com.fulcrumgenomics.umi.UmiConsensusCaller._
import com.fulcrumgenomics.umi.VanillaUmiConsensusCallerOptions._
import com.fulcrumgenomics.util.NumericTypes._
import scala.util.Random
/**
* Holds the defaults for consensus caller options.
*/
object VanillaUmiConsensusCallerOptions {
/** Various default values for the consensus caller. */
val DefaultTag: String = ConsensusTags.MolecularId
val DefaultErrorRatePreUmi: PhredScore = 45.toByte
val DefaultErrorRatePostUmi: PhredScore = 40.toByte
val DefaultMinInputBaseQuality: PhredScore = 10.toByte
val DefaultMinConsensusBaseQuality: PhredScore = 40.toByte
val DefaultMinReads: Int = 2
val DefaultMaxReads: Int = Int.MaxValue
val DefaultProducePerBaseTags: Boolean = true
val DefaultQualityTrim: Boolean = false
}
/**
* Holds the parameters/options for consensus calling.
*/
case class VanillaUmiConsensusCallerOptions
(
tag: String = DefaultTag,
errorRatePreUmi: PhredScore = DefaultErrorRatePreUmi,
errorRatePostUmi: PhredScore = DefaultErrorRatePostUmi,
minInputBaseQuality: PhredScore = DefaultMinInputBaseQuality,
qualityTrim: Boolean = DefaultQualityTrim,
minConsensusBaseQuality: PhredScore = DefaultMinConsensusBaseQuality,
minReads: Int = DefaultMinReads,
maxReads: Int = DefaultMaxReads,
producePerBaseTags: Boolean = DefaultProducePerBaseTags
)
/**
* Stores information about a consensus read. All four arrays are of equal length.
*
* Depths and errors that have values exceeding Short.MaxValue (32767) will be called
* to Short.MaxValue.
*
* @param bases the base calls of the consensus read
* @param quals the calculated phred-scaled quality scores of the bases
* @param depths the number of raw reads that contributed to the consensus call at each position
* @param errors the number of contributing raw reads that disagree with the final consensus base at each position
*/
case class VanillaConsensusRead(id: String, bases: Array[Byte], quals: Array[Byte], depths: Array[Short], errors: Array[Short]) extends SimpleRead {
require(bases.length == quals.length, "Bases and qualities are not the same length.")
require(bases.length == depths.length, "Bases and depths are not the same length.")
require(bases.length == errors.length, "Bases and errors are not the same length.")
/** Truncates the read to the given length. If len > current length, the read is returned at current length. */
def truncate(len: Int): VanillaConsensusRead = {
if (len >= this.length) this
else this.copy(bases=bases.take(len), quals=quals.take(len), depths=depths.take(len), errors=errors.take(len))
}
}
/** Calls consensus reads by grouping consecutive reads with the same SAM tag.
*
* Consecutive reads with the SAM tag are partitioned into fragments, first of pair, and
* second of pair reads, and a consensus read is created for each partition. A consensus read
* for a given partition may not be returned if any of the conditions are not met (ex. minimum
* number of reads, minimum mean consensus base quality, ...).
* */
class VanillaUmiConsensusCaller(override val readNamePrefix: String,
override val readGroupId: String = "A",
val options: VanillaUmiConsensusCallerOptions = new VanillaUmiConsensusCallerOptions(),
val rejects: Option[SamWriter] = None
) extends UmiConsensusCaller[VanillaConsensusRead] with LazyLogging {
private val NotEnoughReadsQual: PhredScore = 0.toByte // Score output when masking to N due to insufficient input reads
private val TooLowQualityQual: PhredScore = 2.toByte // Score output when masking to N due to too low consensus quality
private val caller = new ConsensusCaller(errorRatePreLabeling = options.errorRatePreUmi,
errorRatePostLabeling = options.errorRatePostUmi)
/** Map from input qual score to output qual score in the case where there is only one read going into the consensus. */
private val SingleInputConsensusQuals: Array[Byte] = Range.inclusive(0, PhredScore.MaxValue).map { q =>
val lnProbOne = LogProbability.fromPhredScore(q)
val lnProbTwo = LogProbability.fromPhredScore(Math.min(this.options.errorRatePreUmi, this.options.errorRatePostUmi))
PhredScore.fromLogProbability(LogProbability.probabilityOfErrorTwoTrials(lnProbOne, lnProbTwo))
}.toArray
private val random = new Random(42)
/** Returns a clone of this consensus caller in a state where no previous reads were processed. I.e. all counters
* are set to zero.*/
def emptyClone(): VanillaUmiConsensusCaller = {
new VanillaUmiConsensusCaller(
readNamePrefix = readNamePrefix,
readGroupId = readGroupId,
options = options,
rejects = rejects
)
}
/** Returns the value of the SAM tag directly. */
override def sourceMoleculeId(rec: SamRecord): String = rec(this.options.tag)
/** Takes in all the SamRecords for a single source molecule and produces consensus records. */
override protected def consensusSamRecordsFromSamRecords(recs: Seq[SamRecord]): Seq[SamRecord] = {
// partition the records to which end of a pair it belongs, or if it is a fragment read.
val (fragments, firstOfPair, secondOfPair) = subGroupRecords(recs)
val builder = IndexedSeq.newBuilder[SamRecord]
// fragment
consensusFromSamRecords(records=fragments).map { frag =>
builder += createSamRecord(read=frag, readType=Fragment, fragments.flatMap(_.get[String](ConsensusTags.UmiBases)))
}
// pairs
(consensusFromSamRecords(firstOfPair), consensusFromSamRecords(secondOfPair)) match {
case (None, Some(_)) => rejectRecords(secondOfPair, UmiConsensusCaller.FilterOrphan)
case (Some(_), None) => rejectRecords(firstOfPair, UmiConsensusCaller.FilterOrphan)
case (None, None) => rejectRecords(firstOfPair ++ secondOfPair, UmiConsensusCaller.FilterOrphan)
case (Some(r1), Some(r2)) =>
builder += createSamRecord(r1, FirstOfPair, firstOfPair.flatMap(_.get[String](ConsensusTags.UmiBases)))
builder += createSamRecord(r2, SecondOfPair, secondOfPair.flatMap(_.get[String](ConsensusTags.UmiBases)))
}
builder.result()
}
/** Creates a consensus read from the given records. If no consensus read was created, None is returned. */
protected[umi] def consensusFromSamRecords(records: Seq[SamRecord]): Option[VanillaConsensusRead] = {
if (records.size < this.options.minReads) {
rejectRecords(records, UmiConsensusCaller.FilterInsufficientSupport)
None
}
else {
val sourceRecords = records.flatMap(toSourceRead(_, this.options.minInputBaseQuality, this.options.qualityTrim))
val filteredRecords = filterToMostCommonAlignment(sourceRecords)
if (filteredRecords.size < records.size) {
val r = records.head
val n = if (r.paired && r.secondOfPair) "/2" else "/1"
val m = r[String](this.options.tag)
val discards = records.size - filteredRecords.size
logger.debug("Discarded ", discards, "/", records.size, " records due to mismatched alignments for ", m, n)
}
if (filteredRecords.size >= this.options.minReads) consensusCall(filteredRecords) else {
rejectRecords(filteredRecords.flatMap(_.sam), UmiConsensusCaller.FilterInsufficientSupport)
None
}
}
}
/** Creates a consensus read from the given read and qualities sequences.
* If no consensus read was created, None is returned.
*
* The same number of base sequences and quality sequences should be given.
* */
private[umi] def consensusCall(reads: Seq[SourceRead]): Option[VanillaConsensusRead] = {
// check to see if we have enough reads.
if (reads.size < this.options.minReads) {
None
}
else {
// First limit to max reads if necessary
val capped = if (reads.size <= this.options.maxReads) reads else this.random.shuffle(reads).take(this.options.maxReads)
// get the most likely consensus bases and qualities
val consensusLength = consensusReadLength(capped, this.options.minReads)
val consensusBases = new Array[Base](consensusLength)
val consensusQuals = new Array[PhredScore](consensusLength)
val consensusDepths = new Array[Short](consensusLength)
val consensusErrors = new Array[Short](consensusLength)
if (capped.length == 1) {
val inBases = capped.head.bases
val inQuals = capped.head.quals
forloop (from=0, until=consensusLength) { i =>
val rawBase = inBases(i)
val rawQual = SingleInputConsensusQuals(inQuals(i))
val (base, qual) = if (rawQual < this.options.minConsensusBaseQuality) (NoCall, TooLowQualityQual) else (rawBase, rawQual)
val isNoCall = base == NoCall
consensusBases(i) = base
consensusQuals(i) = qual
consensusDepths(i) = if (isNoCall) 0 else 1
consensusErrors(i) = 0
}
}
else {
var positionInRead = 0
val builder = this.caller.builder()
while (positionInRead < consensusLength) {
// Add the evidence from all reads that are long enough to cover this base
capped.foreach { read =>
if (read.length > positionInRead) {
val base = read.bases(positionInRead)
val qual = read.quals(positionInRead)
if (base != NoCall) builder.add(base=base, qual=qual)
}
}
val depth = builder.contributions // NB: cache this value, as it is re-computed each time
// Call the consensus and do any additional filtering
val (rawBase, rawQual) = builder.call()
val (base, qual) = {
if (depth < this.options.minReads) (NoCall, NotEnoughReadsQual)
else if (rawQual < this.options.minConsensusBaseQuality) (NoCall, TooLowQualityQual)
else (rawBase, rawQual)
}
consensusBases(positionInRead) = base
consensusQuals(positionInRead) = qual
// Generate the values for depth and count of errors
val errors = if (rawBase == NoCall) depth else depth - builder.observations(rawBase)
consensusDepths(positionInRead) = if (depth > Short.MaxValue) Short.MaxValue else depth.toShort
consensusErrors(positionInRead) = if (errors > Short.MaxValue) Short.MaxValue else errors.toShort
// Get ready for the next pass
builder.reset()
positionInRead += 1
}
}
Some(VanillaConsensusRead(id=capped.head.id, bases=consensusBases, quals=consensusQuals, depths=consensusDepths, errors=consensusErrors))
}
}
/**
* Calculates the length of the consensus read that should be produced. The length is calculated
* as the maximum length at which minReads reads still have bases.
*
* @param reads the set of reads being fed into the consensus
* @param minReads the minimum number of reads required
* @return the length of consensus read that should be created
*/
protected def consensusReadLength(reads: Seq[SourceRead], minReads: Int): Int = {
require(reads.size >= minReads, "Too few reads to create a consensus.")
reads.map(_.length).sortBy(len => -len).drop(minReads-1).head
}
/** If a reject writer was provided, emit the reads to that writer. */
override protected def rejectRecords(recs: Iterable[SamRecord], reason: String): Unit = {
super.rejectRecords(recs, reason)
this.rejects.foreach(rej => rej ++= recs)
}
/** Creates a `SamRecord` from the called consensus base and qualities. */
override protected def createSamRecord(read: VanillaConsensusRead, readType: ReadType, umis: Seq[String] = Seq.empty): SamRecord = {
val rec = super.createSamRecord(read, readType, umis)
// Set some additional information tags on the read
rec(ConsensusTags.PerRead.RawReadCount) = read.depths.max.toInt
rec(ConsensusTags.PerRead.MinRawReadCount) = read.depths.min.toInt
rec(ConsensusTags.PerRead.RawReadErrorRate) = sum(read.errors) / sum(read.depths).toFloat
if (this.options.producePerBaseTags) {
rec(ConsensusTags.PerBase.RawReadCount) = read.depths
rec(ConsensusTags.PerBase.RawReadErrors) = read.errors
}
rec
}
}
|
fulcrumgenomics/fgbio
|
src/main/scala/com/fulcrumgenomics/umi/VanillaUmiConsensusCaller.scala
|
Scala
|
mit
| 14,127
|
package epic.features
import epic.framework.Feature
import breeze.linalg.Counter
import scala.Array
import scala.collection.mutable.ArrayBuffer
import StandardSpanFeatures._
case class FirstWordCapsAnd(f: Feature) extends Feature
case class NthWordCapsAnd(f: Feature) extends Feature
case class SentenceLengthFeature(length: Int) extends Feature
case object WholeSentenceIsUpperCaseFeature extends Feature
case class WordFeature(word: Any, kind: Symbol) extends Feature
case object BoundaryFeature extends Feature
trait SpanFeature extends Feature
object StandardSpanFeatures {
case class WordBoundary[L, W](label: L, w: W) extends SpanFeature
// Huang's WordEdges Feature without distance
case class WordEdges[L, W](label: L, left: W, right: W) extends SpanFeature
case class ShortUnary[ W](rule: Int, w: W) extends SpanFeature
}
|
jovilius/epic
|
src/main/scala/epic/features/StandardSurfaceFeaturizer.scala
|
Scala
|
apache-2.0
| 844
|
/*
* Test file for immutable queues.
*/
import scala.collection.immutable.Queue
object iq {
def main(): Unit = {
/* Create an empty queue. */
val q: Queue[Int] = Queue.empty
/* Test isEmpty.
* Expected: Empty
*/
if (q.isEmpty) {
Console.println("Empty")
}
/* Test enqueueing. */
val q2 = q.enqueue(42).enqueue(0)
val qa = q :+ 42 :+ 0
assert(q2 == qa)
val qb = 42 +: 0 +: q
assert(q2 == qb)
val qc = 42 +: q :+ 0
assert(q2 == qc)
assert(q ++ qa == qa)
val qdr = 1 +: 2 +: 3 +: 4 +: q
val qcon1 = 1 +: 2 +: q
val qcon2 = q :+ 3 :+ 4
val qd = qcon1 ++ qcon2
assert(qd == qdr)
Console.println("q2: " + q2)
Console.println("qa: " + qa)
Console.println("qb: " + qb)
Console.println("qc: " + qc)
/* Test is empty and dequeue.
* Expected: Head: 42
*/
val q4 =
if (q2.isEmpty) {
Console.println("Empty")
q2
}
else {
val (head, q3) = q2.dequeue
Console.println("Head: " + head)
q3
}
/* Test sequence enqueueing. */
val q5: Queue[Any] = q4.enqueueAll(List(1,2,3,4,5,6,7,8,9))
/* Test toString.
* Expected: q5: Queue(0,1,2,3,4,5,6,7,8,9)
*/
Console.println("q5: " + q5)
/* Test apply
* Expected: q5[5]: 5
*/
Console.println("q5[5]: " + q5(5))
val q5alt: Queue[Any] = q4.enqueueAll(collection.Iterable(1,2,3,4,5,6,7,8,9))
Console.println("q5alt: " + q5alt)
assert(q5alt.sameElements(q5))
val q5c: Queue[Int] = Queue.empty.enqueueAll(List(0, 1, 2, 3, 4, 5, 6, 7, 8, 9))
/* Testing ==
* Expected: q5 == q9: true
* q9 == q5: true
*/
Console.println("q5 == q5c: " + (q5 == q5c))
Console.println("q5c == q5: " + (q5c == q5))
val (_, q6) = q5.dequeue
val (_, q7) = q6.dequeue
//val q8 = q7 + 10 + 11 //deprecated
val q8 = q7.enqueue(10).enqueue(11)
/* Test dequeue
* Expected: q8: Queue(2,3,4,5,6,7,8,9,10,11)
*/
Console.println("q8: " + q8)
val q9 = Queue(2,3,4,5,6,7,8,9,10,11)
/* Testing ==
* Expected: q8 == q9: true
*/
Console.println("q8 == q9: " + (q8 == q9))
/* Testing elements
* Expected: Elements: 1 2 3 4 5 6 7 8 9
*/
Console.print("Elements: ");
q6.iterator.foreach(e => Console.print(" "+ e + " "))
Console.println()
/* Testing mkString
* Expected: String: <1-2-3-4-5-6-7-8-9>
*/
Console.println("String: " + q6.mkString("<","-",">"))
/* Testing length
* Expected: Length: 9
*/
Console.println("Length: " + q6.length)
/* Testing front
* Expected: Front: 1
*/
Console.println("Front: " + q6.front);
}
}
object Test {
def main(args: Array[String]): Unit = {
iq.main()
}
}
|
scala/scala
|
test/files/run/iq.scala
|
Scala
|
apache-2.0
| 2,835
|
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Sun Sep 23 21:14:14 EDT 2012
* @see LICENSE (MIT style license file).
*/
package scalation.analytics
import scalation.linalgebra.{VectorD, VectorI}
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Classifier` trait provides a common framework for several classifiers.
*/
trait Classifier
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Given a set of data vectors and their classifications, build a classifier.
* @param testStart Beginning of test region. (inclusive)
* @param testEnd End of test region. (exclusive)
*/
def train (testStart: Int, testEnd: Int)
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Train the classifier, i.e., calculate statistics and create conditional
* density (cd) functions. Assumes that conditional densities follow the
* Normal (Gaussian) distribution.
*/
def train () { train (0, 0) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Given a new discrete data vector z, determine which class it belongs to.
* @param z the vector to classify
*/
def classify (z: VectorI): Tuple2 [Int, String]
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Given a new continuous data vector z, determine which class it belongs to.
* @param z the vector to classify
*/
def classify (z: VectorD): Tuple2 [Int, String]
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Test the quality of the training with a test-set and return the fraction
* of correct classifications.
* @param testStart Beginning of test region. (inclusive)
* @param testEnd End of test region. (exclusive)
*/
def test (testStart: Int, testEnd: Int): Double
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Reset the frequency and probability tables.
*/
def reset ()
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Test the accuaracy of the classified results by crossvalidation, returning
* the accuaracy. The "test data" starts at 'testStart' and ends at 'testEnd',
* the rest of the data is "training data'.
* @param nx number of crosses and crossvalidations (defaults to 5x).
*/
def crossValidate (nx: Int = 5): Double =
{
println ("------------------------------------------------------------")
println ("cross-validation:")
val testSize = size / nx
var sum = 0.0
for (i <- 0 until nx) {
val testStart = i * testSize
val testEnd = testStart + testSize
reset ()
// println (s"testStart = $testStart, testEnd = $testEnd, testSize = $testSize)
train (testStart, testEnd)
sum += test (testStart, testEnd)
} // for
val avg = sum / nx.toDouble
println ("Average accuracy = " + avg)
println ("------------------------------------------------------------")
avg
} // crossValidate
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Size of the feature set.
*/
def size: Int
} // Classifier trait
|
mvnural/scalation
|
src/main/scala/scalation/analytics/Classifier.scala
|
Scala
|
mit
| 3,508
|
package com.azavea.opentransit.indicators.calculators
import com.azavea.gtfs._
import com.azavea.opentransit._
import com.azavea.opentransit.indicators._
import com.github.nscala_time.time.Imports._
import org.joda.time._
object TimeTraveledStops extends Indicator
with AggregatesByAll {
type Intermediate = Seq[Int]
val name = "time_traveled_stops"
def calculation(period: SamplePeriod): IndicatorCalculation = {
def map(trip: Trip): Seq[Int] =
trip.schedule match {
case Nil => Seq[Int]()
case schedule => {
schedule
.zip(schedule.tail)
.map { case (stop1, stop2) =>
Seconds.secondsBetween(stop1.departureTime, stop2.arrivalTime).getSeconds
}
}
}
def reduce(durations: Seq[Seq[Int]]): Double = {
val (sum, count) =
durations
.flatten
.foldLeft((0.0, 0)) { case ((sum, count), minutes) =>
(sum + minutes, count + 1)
}
if (count > 0) (sum / count) / 60 else 0.0 // div60 for minutes
}
perTripCalculation(map, reduce)
}
}
|
WorldBank-Transport/open-transit-indicators
|
scala/opentransit/src/main/scala/com/azavea/opentransit/indicators/calculators/TimeTraveledStops.scala
|
Scala
|
gpl-3.0
| 1,135
|
/*
* Copyright (c) 2018 OVO Energy
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package com.ovoenergy.comms.model
package sms
import java.time.Instant
import com.ovoenergy.comms.KafkaMessage
import com.ovoenergy.comms.model.types.{ComposedEvent, ComposedEventV2}
import com.ovoenergy.comms.model.{MetadataV2, LoggableEvent, InternalMetadata}
import com.sksamuel.avro4s.{AvroDoc, SchemaFor}
@KafkaMessage
case class ComposedSMSV2(
metadata: MetadataV2,
internalMetadata: InternalMetadata,
@AvroDoc("Recipient of the given SMS") recipient: String,
@AvroDoc("SMS text body") textBody: String,
@AvroDoc(
"""
A timestamp denoting a time after which the comm should no longer be sent. Value is milliseconds since Unix Epoch - UTC time.
Note: the expiry will be checked "just in time", i.e. directly before issuing to the gateway used to deliver the comm, however,
once issued to the gateway the expiry time will no longer be adhered to and hence may be delivered after the specified time.
If the timestamp is in the past, the comm will be disregarded.
If no value is set, the comm will never expire.
""") expireAt: Option[Instant])
extends LoggableEvent
with ComposedEventV2 {
override def loggableString: Option[String] = prettyPrint(this, Seq("recipient", "textBody"))
override def mdcMap: Map[String, String] = combineMDCS(metadata.mdcMap, internalMetadata.mdcMap)
}
object ComposedSMSV2 {
//Required as we have a custom serialiser for Instant (to Long)
implicit val schemaFor: SchemaFor[ComposedSMSV2] = SchemaFor[ComposedSMSV2]
}
|
ovotech/comms-kafka-messages
|
modules/core/src/main/scala/com/ovoenergy/comms/model/sms/ComposedSMSV2.scala
|
Scala
|
mit
| 2,630
|
package scalamachine.core.tests
import org.specs2._
import mock._
import org.mockito.{Matchers => MM}
import scalamachine.core._
import Resource._
import v3.WebmachineDecisions
import HTTPHeaders._
import HTTPMethods._
import ReqRespData.Metadata
class V3ColCDEFSpecs extends Specification with Mockito with SpecsHelper with WebmachineDecisions { def is =
"Webmachine V3 Column C, D, E & F".title ^
p^
"C3 - Accept Exists?" ^
"If the Accept header doesn't exist" ^
"D4 is returned and 1st type in resources provided list is set in metadata" ! testMissingAcceptHeader ^
"If provided list empty, text/plain is set in metadata, D4 still returned" ! testMissingAcceptEmptyProvidedList ^p^
"If the Accept header exists decision C4 is returned" ! testAcceptHeaderExists ^
p^
"C4 - Acceptable Media Type Available?" ^
"if the media type is provided by the resource" ^
"Decision D4 is returned & the mediatype is set as content type in metadata" ! testMediaTypeProvided ^p^
"if the media type is not provided by the resource" ^
"response with code 406 is returned" ! testMediaTypeNotProvided ^
p^p^
"D4 - Accept-Language Exists?" ^
"if Accept-Language header exists decision D5 is returned" ! testHasAcceptLanguage ^
"otherwise decision E5 is returned" ! testMissingAcceptLanguage ^
p^
"D5 - Accept-Language Availble?" ^
"asks resource if language is available" ^
"if it is, decision E5 is returned" ! testIsLanguageAvailableTrue ^
"otherwise, a response with code 406 is returned" ! testIsLanguageAvailableFalse ^
p^p^
"E5 - Accept-Charset Exists?" ^
"If the Accept-Charset header exists decision E6 is returned" ! testAcceptCharsetExists ^
"Otherwise" ^
"""If "*" charset is acceptable to resource""" ^
"decision F6 is returned" ! testAcceptMissingStarAcceptable ^
"first charset provided by resource is set as chosen in metadata" ! testAcceptMissingStarOkCharsetChosen ^p^
"If resource specifies charset negotioation short circuting, F6 is returned" ! testAcceptMissingCharsetNegShortCircuit ^
"otherwise, a response with code 406 is returned" ! testAcceptMissingStarNotAcceptable ^
p^p^
"E6 - Accept-Charset Available?" ^
"If resource specifies charset negotiation short circuting, F6 is returned" ! testAcceptExistsCharsetNegShortCircuit ^
"If the charset is provided by the resource, F6 returned, chosen set in meta" ! testAcceptExistsAcceptableSetInMeta ^
"If charset is not provided by the resource, response w/ code 406 returned" ! testAcceptExistsNotAcceptable ^
p^
"F6 - Accept-Encoding Exists?" ^
"sets the chosen content type/charset in response content type header" ^
"""if both are None, "text/plain" is set""" ! testF6MediaAndCharsetNotChosen ^
"if just the content type is Some, its string value is set" ! testF6MediaChosenCharsetNot ^
"""if just the charset is Some, "text/plain; charset=<value>" is set""" ! testF6CharsetChosenMediaNot ^
"if both are set the entire string is set" ! testF6MediaAndCharsetChosen ^p^
"if the accept-encoding header exists, decision F7 is returned" ! testAcceptEncodingExists ^
"if the accept-encoding header is missing" ^
"""if "identity;q=1.0,*;q=0.5" is acceptable""" ^
"chosen is set as the value of Content-Encoding header,in meta, G7 returned"! testAcceptEncodingMissingDefaultAcceptable ^p^
"otherwise, a response with code 406 is returned" ! testAcceptEncodingMissingDefaultNotAcceptable ^
p^p^
"F7 - Accept Encoding Available?" ^
"If resource specifies encoding neg. short circuiting, G7 returned" ! testAcceptEncodingExistsShortCircuit ^
"If charset is provided by the resource, G7 returned, chosen set in resp./meta" ! testAcceptEncodingExistsAcceptable ^
"If charset is not provided, response w/ code 406 returned" ! testAcceptEncodingExistsNotAcceptable ^
end
// TODO: change D5 to do real language negotiation like ruby webmachine implementation
def testMissingAcceptHeader = {
val ctypes: ContentTypesProvided =
(ContentType("text/html"), (d: ReqRespData) => (d,(ValueRes(FixedLengthBody("".getBytes))))) ::
(ContentType("text/plain"), (d: ReqRespData) => ((d, ValueRes(FixedLengthBody("".getBytes))))) :: Nil
testDecisionReturnsDecisionAndData(c3,d4,_.contentTypesProvided(any) answers mkAnswer(ctypes)) {
_.metadata.contentType must beSome.like {
case ct => ct must beEqualTo(ctypes.head._1)
}
}
}
def testMissingAcceptEmptyProvidedList = {
val ctypes: ContentTypesProvided = Nil
testDecisionReturnsDecisionAndData(c3,d4,_.contentTypesProvided(any) answers mkAnswer(ctypes)) {
_.metadata.contentType must beSome.like {
case ct => ct must beEqualTo(ContentType("text/plain"))
}
}
}
def testAcceptHeaderExists = {
testDecisionReturnsDecision(c3,c4,_.contentTypesProvided(any) answers mkAnswer(Nil),data = createData(headers = Map(Accept -> "text/html")))
}
def testMediaTypeNotProvided = {
val ctypes: ContentTypesProvided = (ContentType("text/html"), (d: ReqRespData) => (d, ValueRes(FixedLengthBody("".getBytes)))) :: Nil
testDecisionReturnsData(c4,_.contentTypesProvided(any) answers { d => (d.asInstanceOf[ReqRespData], (ValueRes(ctypes))) }, data = createData(headers = Map(Accept -> "text/plain"))) {
_.statusCode must beEqualTo(406)
}
}
def testMediaTypeProvided = {
val ctypes: ContentTypesProvided = (ContentType("text/html"), (d: ReqRespData) => (d, ValueRes(FixedLengthBody("".getBytes)))) :: Nil
testDecisionReturnsDecisionAndData(c4,d4,_.contentTypesProvided(any) answers mkAnswer(ctypes), data = createData(headers = Map(Accept -> "text/html"))) {
_.metadata.contentType must beSome.like {
case ct => ct must beEqualTo(ContentType("text/html"))
}
}
}
def testMissingAcceptLanguage = {
testDecisionReturnsDecision(d4,e5,r => {})
}
def testHasAcceptLanguage = {
testDecisionReturnsDecision(d4,d5,r => {},data = createData(headers = Map(AcceptLanguage -> "en/us")))
}
def testIsLanguageAvailableFalse = {
testDecisionReturnsData(d5,_.isLanguageAvailable(any) answers mkAnswer(false), data = createData(headers = Map(AcceptLanguage -> "en/us"))) {
_.statusCode must beEqualTo(406)
}
}
def testIsLanguageAvailableTrue = {
testDecisionReturnsDecision(d5,e5,_.isLanguageAvailable(any) answers mkAnswer(true), data = createData(headers = Map(AcceptLanguage-> "en/us")))
}
def testAcceptCharsetExists = {
testDecisionReturnsDecision(e5,e6,r => {}, data = createData(headers = Map(AcceptCharset -> "*")))
}
def testAcceptMissingStarAcceptable = {
val provided: CharsetsProvided = Some(("abc", identity[Array[Byte]](_)) :: Nil)
testDecisionReturnsDecision(e5,f6,_.charsetsProvided(any) answers mkAnswer(provided))
}
def testAcceptMissingStarOkCharsetChosen = {
val provided: CharsetsProvided = Some(("abc", identity[Array[Byte]](_)) :: Nil)
testDecisionReturnsDecisionAndData(e5,f6,_.charsetsProvided(any) answers mkAnswer(provided)) {
_.metadata.chosenCharset must beSome.like { case c => c must beEqualTo("abc") }
}
}
def testAcceptMissingCharsetNegShortCircuit = {
val provided: CharsetsProvided = None
testDecisionReturnsDecision(e5,f6,_.charsetsProvided(any) answers mkAnswer(provided))
}
def testAcceptMissingStarNotAcceptable = {
val provided: CharsetsProvided = Some(Nil)
testDecisionReturnsData(e5,_.charsetsProvided(any) answers mkAnswer(provided)) {
_.statusCode must beEqualTo(406)
}
}
def testAcceptExistsCharsetNegShortCircuit = {
val provided: CharsetsProvided = None
testDecisionReturnsDecision(e6, f6, _.charsetsProvided(any) answers mkAnswer(provided), data = createData(headers = Map(AcceptCharset -> "ISO-8859-1")))
}
def testAcceptExistsAcceptableSetInMeta = {
val charset = "ISO-8859-1"
val provided: CharsetsProvided = Some((charset, identity[Array[Byte]](_)) :: Nil)
testDecisionReturnsDecisionAndData(e6, f6, _.charsetsProvided(any) answers mkAnswer(provided), data = createData(headers = Map(AcceptCharset -> charset))) {
_.metadata.chosenCharset must beSome.which { _ == charset }
}
}
def testAcceptExistsNotAcceptable = {
val provided: CharsetsProvided = Some(Nil)
testDecisionReturnsData(e6, _.charsetsProvided(any) answers mkAnswer(provided), data = createData(headers = Map(AcceptCharset -> "ISO-8859-1"))) {
_.statusCode must beEqualTo(406)
}
}
def testF6MediaAndCharsetNotChosen = {
val provided: EncodingsProvided = None
testDecisionResultHasData(f6, _.encodingsProvided(any) answers mkAnswer(provided)) {
_.responseHeader(ContentTypeHeader) must beSome.like {
case value => value must beEqualTo("text/plain")
}
}
}
def testF6MediaChosenCharsetNot = {
val provided: EncodingsProvided = None
val contentType = ContentType("application/json", Map("a" -> "b", "c" -> "d"))
testDecisionResultHasData(f6, _.encodingsProvided(any) answers mkAnswer(provided), data = createData(metadata = Metadata(contentType = Some(contentType)))) {
_.responseHeader(ContentTypeHeader) must beSome.like {
case value => value must beEqualTo(contentType.mediaType + ";a=b,c=d").ignoreSpace.ignoreCase
}
}
}
def testF6CharsetChosenMediaNot = {
val provided: EncodingsProvided = None
val charset = "ISO-8859-1"
testDecisionResultHasData(f6, _.encodingsProvided(any) answers mkAnswer(provided), data = createData(metadata = Metadata(chosenCharset = Some(charset)))) {
_.responseHeader(ContentTypeHeader) must beSome.like {
case value => value must beEqualTo("text/plain;charset=" + charset).ignoreSpace.ignoreCase
}
}
}
def testF6MediaAndCharsetChosen = {
val provided: EncodingsProvided = None
val contentType = ContentType("application/json", Map("a" -> "b", "c" -> "d"))
val charset = "ISO-8859-1"
testDecisionResultHasData(f6, _.encodingsProvided(any) answers mkAnswer(provided), data = createData(metadata = Metadata(contentType = Some(contentType), chosenCharset = Some(charset)))) {
_.responseHeader(ContentTypeHeader) must beSome.like {
case value => value must beEqualTo(contentType.mediaType + ";a=b,c=d;charset=" + charset).ignoreSpace.ignoreCase
}
}
}
def testAcceptEncodingExists = {
testDecisionReturnsDecision(f6,f7,r => {}, data = createData(headers = Map(AcceptEncoding -> "*")))
}
def testAcceptEncodingMissingDefaultAcceptable = {
val encoding = "some-encoding"
val provided: EncodingsProvided = Some((encoding, identity[Array[Byte]](_)) :: Nil)
testDecisionReturnsDecisionAndData(f6,g7,_.encodingsProvided(any) answers mkAnswer(provided)) {
d => (d.responseHeader(ContentEncoding) must beSome.like {
case enc => enc must beEqualTo(encoding)
}) and (d.metadata.chosenEncoding must beSome.like {
case enc => enc must beEqualTo(encoding)
})
}
}
def testAcceptEncodingMissingDefaultNotAcceptable = {
val provided: EncodingsProvided = Some(Nil)
testDecisionReturnsData(f6,_.encodingsProvided(any) answers mkAnswer(provided)) {
_.statusCode must beEqualTo(406)
}
}
def testAcceptEncodingExistsShortCircuit = {
val provided: EncodingsProvided = None
testDecisionReturnsDecision(f7,g7,_.encodingsProvided(any) answers mkAnswer(provided), data = createData(headers = Map(AcceptEncoding-> "gzip")))
}
def testAcceptEncodingExistsAcceptable = {
val encoding = "gzip"
val provided: EncodingsProvided = Some((encoding, identity[Array[Byte]](_)) :: Nil)
testDecisionReturnsDecisionAndData(f7,g7,_.encodingsProvided(any) answers mkAnswer(provided), data = createData(headers = Map(AcceptEncoding -> encoding))) {
d => (d.responseHeader(ContentEncoding) must beSome.like {
case enc => enc must beEqualTo(encoding)
}) and (d.metadata.chosenEncoding must beSome.like {
case enc => enc must beEqualTo(encoding)
})
}
}
def testAcceptEncodingExistsNotAcceptable = {
val provided: EncodingsProvided = Some(Nil)
testDecisionReturnsData(f7,_.encodingsProvided(any) answers mkAnswer(provided), data = createData(headers = Map(AcceptEncoding -> "ISO-8859-1"))) {
d => (d.responseHeader(ContentEncoding) must beNone) and (d.statusCode must beEqualTo(406))
}
}
}
|
stackmob/scalamachine
|
core/src/test/scala/scalamachine/core/tests/V3ColCDEFSpecs.scala
|
Scala
|
apache-2.0
| 14,534
|
/*
* Copyright © 2016 - 2020 Schlichtherle IT Services
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package global.namespace.neuron.di.scala
import scala.annotation.tailrec
// The error checks in this class must match the error checks in
// `global.namespace.neuron.di.internal.NeuronProcessor`!
private trait NeuronAnnotation extends MacroAnnotation {
import c.universe._
import Flag._
def apply(inputs: List[Tree]): Tree = {
val outputs = inputs match {
case ClassDef(mods@Modifiers(flags, privateWithin, annotations), tname@TypeName(name), tparams, impl) :: rest =>
if (!hasStaticContext) {
error("A neuron type must have a static context.")
}
if (mods hasFlag FINAL) {
error("A neuron class cannot be final.")
}
if (!(mods hasFlag ABSTRACT)) {
warn("A neuron class should be abstract.")
}
if (!hasEitherNoConstructorOrANonPrivateConstructorWithoutParameters(impl)) {
error("A neuron type must have either no constructor or a non-private constructor without parameters.")
}
if ((mods hasFlag INTERFACE) && !isCachingDisabled) {
warn("A neuron interface should not have a caching strategy.")
}
if (isSerializable(impl)) {
warn("A neuron type should not be serializable.")
}
if (c.hasErrors) {
inputs
} else {
val needsShim = (mods hasFlag TRAIT) && !(mods hasFlag INTERFACE)
val shim = {
if (needsShim) {
// Due to https://issues.scala-lang.org/browse/SI-7551 , we have to put the binary class name into the
// shim annotation instead of just the class literal which is bad because the naming schema is supposed to
// be an implementation detail of the Scala compiler which may change without notice.
val binaryName = {
binaryNameOf(enclosingOwner) +
(if (enclosingOwner.isPackage) '.' else '$') +
tname.encodedName +
"$$shim"
}
q"new _root_.global.namespace.neuron.di.internal.Shim(name = $binaryName)" :: Nil
} else {
Nil
}
}
val neuron = {
val Apply(_, args) = c.prefix.tree
val Apply(fun, _) = newNeuronAnnotationTerm
Apply(fun, args map {
case NamedArg(lhs@q"cachingStrategy", rhs: Tree) => NamedArg(lhs, scala2javaCachingStrategy(rhs))
case tree => tree
})
}
ClassDef(mods.mapAnnotations(shim ::: neuron :: _), tname, tparams, applyCachingAnnotation(impl)) :: {
if (needsShim) {
val shimMods = Modifiers(flags &~ (DEFAULTPARAM | TRAIT) | ABSTRACT | SYNTHETIC, privateWithin, neuron :: annotations)
val shimDef = q"$shimMods class $$shim[..$tparams] extends $tname[..${tparams.map(_.name)}]"
rest match {
case ModuleDef(moduleMods, moduleName, Template(parents, self, body)) :: moduleRest =>
ModuleDef(moduleMods, moduleName, Template(parents, self, shimDef :: body)) :: moduleRest
case _ =>
// This should be SYNTHETIC, however this would break SBT 1.1.2, 1.1.4, 1.2.7 et al.
val moduleMods = Modifiers(flags &~ (ABSTRACT | TRAIT | DEFAULTPARAM), privateWithin, annotations)
q"$moduleMods object ${TermName(name)} { $shimDef }" :: rest
}
} else {
rest
}
}
}
case other => other
}
q"..$outputs"
}
private def hasStaticContext = enclosingOwner.isStatic
private lazy val enclosingOwner = c.internal.enclosingOwner
private def hasEitherNoConstructorOrANonPrivateConstructorWithoutParameters(template: Template) = {
val Template(_, _, body) = template
val constructors = body collect { case c@DefDef(_, termNames.CONSTRUCTOR, _, _, _, _) => c }
constructors.isEmpty || (constructors exists isNonPrivateConstructorWithoutParameters)
}
private def isNonPrivateConstructorWithoutParameters(tree: Tree) = {
tree match {
case DefDef(mods, termNames.CONSTRUCTOR, _, Nil | List(Nil), _, _)
if !mods.hasFlag(PRIVATE) => true
case _ => false
}
}
private def isCachingDisabled = {
!c.prefix.tree.collect {
case q"cachingStrategy = ${Select(_, TermName(name: String))}" => name
case q"cachingStrategy = ${Ident(TermName(name: String))}" => name
}.exists(_ != "DISABLED")
}
private def isSerializable(template: Template) = {
template.parents.exists(_.toString == classOf[java.io.Serializable].getName)
}
private def binaryNameOf(symbol: Symbol): String = {
@tailrec
def binaryNameOf(symbol: Symbol, tail: String): String = {
symbol match {
case NoSymbol =>
"" + tail
case other if other.isPackage =>
other.fullName + tail
case other =>
val oo = other.owner
binaryNameOf(oo, (if (oo.isPackage) "." else "$") + other.name.encodedName + tail)
}
}
binaryNameOf(symbol, "")
}
private def applyCachingAnnotation(template: Template) = {
val Template(parents, self, body) = template
Template(parents, self, body map {
case ValDef(mods@Modifiers(_, _, annotations), name, tpt, EmptyTree)
if !annotations.exists(isCachingAnnotation) && !mods.hasFlag(PRIVATE) =>
ValDef(mods.mapAnnotations(newCachingAnnotationTerm :: _), name, tpt, EmptyTree)
case other =>
other
})
}
}
|
christian-schlichtherle/neuron-di
|
core-scala/src/main/scala/global/namespace/neuron/di/scala/NeuronAnnotation.scala
|
Scala
|
apache-2.0
| 6,199
|
package slinky.web
import slinky.core.ComponentWrapper
import slinky.core.facade.ReactElement
import org.scalajs.dom.{Element, document}
import scala.scalajs.js
import html._
import org.scalatest.funsuite.AnyFunSuite
object TestComponent extends ComponentWrapper {
type Props = Unit
type State = Unit
class Def(jsProps: js.Object) extends Definition(jsProps) {
override def initialState: Unit = ()
override def render(): ReactElement = {
a()
}
}
}
class ReactDOMTest extends AnyFunSuite {
test("Renders a single element into the DOM") {
val target = document.createElement("div")
ReactDOM.render(
a(),
target
)
assert(target.innerHTML == "<a></a>")
}
test("Finds a dom node for a component") {
val comp: ReactElement = TestComponent(())
val target = document.createElement("div")
val instance = ReactDOM.render(
comp,
target
).asInstanceOf[TestComponent.Def]
assert(target.childNodes(0).asInstanceOf[Element] == ReactDOM.findDOMNode(instance))
}
test("Renders portals to the appropriate container DOM node") {
val target = document.createElement("div")
val container = document.createElement("div")
ReactDOM.render(
div(
ReactDOM.createPortal(h1("hi"), container)
),
target
)
assert(container.innerHTML == "<h1>hi</h1>")
assert(target.innerHTML == "<div></div>")
}
test("unmountComponentAtNode clears out the container") {
val container = document.createElement("div")
ReactDOM.render(
div("hello"),
container
)
assert(container.innerHTML == "<div>hello</div>")
ReactDOM.unmountComponentAtNode(container)
assert(container.innerHTML.length == 0)
}
}
|
shadaj/slinky
|
tests/src/test/scala/slinky/web/ReactDOMTest.scala
|
Scala
|
mit
| 1,753
|
package com.sksamuel.elastic4s.searches.queries
case class BoostingQueryDefinition(positiveQuery: QueryDefinition,
negativeQuery: QueryDefinition,
queryName: Option[String] = None,
boost: Option[Double] = None,
negativeBoost: Option[Double] = None) extends QueryDefinition {
def withQueryName(queryName: String): BoostingQueryDefinition = copy(queryName = Option(queryName))
def boost(boost: Double): BoostingQueryDefinition = copy(boost = Option(boost))
def negativeBoost(negativeBoost: Double): BoostingQueryDefinition = copy(negativeBoost = Option(negativeBoost))
def queryName(queryName: String): BoostingQueryDefinition = copy(queryName = Option(queryName))
}
|
aroundus-inc/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/searches/queries/BoostingQueryDefinition.scala
|
Scala
|
apache-2.0
| 816
|
/*
Copyright (c) 2015, Raymond Dodge
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name "<PRODUCT NAME>" nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.rayrobdod.scriptSample
import java.net.URL
import scala.runtime.{AbstractFunction1 => AFunction1}
import com.codecommit.antixml.{Elem, Selector, Text,
QName, Node, Attributes => XmlAttrs, XML
}
import com.rayrobdod.script.ScriptElement
import com.rayrobdod.script.parser.{AttrsToUseFun => BaseAttrsToUseFun, ScriptFromXml}
/**
* Constructs base script elements from xml
*/
object SampleScriptFromXml extends ScriptFromXml {
def apply[A](useFun:BaseAttrsToUseFun[A], xml:Elem, base:URL, recurser:ScriptFromXml):ScriptElement[A] = xml match {
case Elem(_, "setName", attrs, _, _) => {
SetName
}
case Elem(_, "setGender", attrs, _, _) => {
SetGender
}
case Elem(_, name, _, _, _) => {
throw new IllegalArgumentException("Unexpected element: " + name)
}
// `case _` cannot be reached
}
/**
* Returns true iff apply will not throw an error if presented with
* this Xml Element
*/
def isDefinedAt(xml:Elem):Boolean = xml match {
case Elem(_, name, _, _, _) => name match {
case "setName" => true
case "setGender" => true
case _ => false
}
// `case _` cannot be reached
}
}
|
rayrobdod/script
|
src/main/scala/com/rayrobdod/scriptSample/ScriptFromXml.scala
|
Scala
|
bsd-3-clause
| 2,655
|
package com.harrys.hyppo.worker.data
import javax.inject.Inject
import akka.actor.{Actor, ActorLogging, ActorRef}
import com.harrys.hyppo.worker.api.proto.RemoteStorageLocation
import scala.concurrent.Future
import scala.util.{Failure, Success}
final class JarLoadingActor @Inject() (factory: JarFileLoader.Factory) extends Actor with ActorLogging {
import JarLoadingActor._
// Load the dispatcher as the default execution context
import context.dispatcher
val loader: JarFileLoader = factory(context.dispatcher)
override def postStop(): Unit = {
loader.shutdown()
super.postStop()
}
override def receive: Receive = {
case LoadJars(jarFiles) =>
loadJarFiles(jarFiles, sender())
}
def loadJarFiles(jarFiles: Seq[RemoteStorageLocation], recipient: ActorRef) : Unit = {
val downloads = jarFiles.map(jar => loader.loadJarFile(jar))
val combined = Future.sequence(downloads).map(jars => JarsResult(jars))
combined.onComplete {
case Success(result) =>
recipient ! result
case f @ Failure(c) =>
log.warning(s"Failed to download jars from ${jarFiles.mkString(", ")}", c)
recipient ! f
}
}
}
object JarLoadingActor {
final case class LoadJars(jars: Seq[RemoteStorageLocation])
final case class JarsResult(jars: Seq[LoadedJarFile])
}
|
harrystech/hyppo-worker
|
worker/src/main/scala/com/harrys/hyppo/worker/data/JarLoadingActor.scala
|
Scala
|
mit
| 1,329
|
package com.arcusys.learn.controllers.api
import com.arcusys.learn.facades.QuizFacadeContract
import com.arcusys.learn.ioc.Configuration
import com.arcusys.learn.liferay.permission._
import com.arcusys.learn.models.request.{ QuizActionType, QuizRequest }
import com.arcusys.learn.models.{ QuizQuestionPreviewContent, QuizQuestionPreviewRedirect }
import com.arcusys.valamis.lrs.serializer.StatementSerializer
import com.escalatesoft.subcut.inject.BindingModule
import org.json4s.{ DefaultFormats, Formats }
import PermissionUtil._
class QuizApiController(configuration: BindingModule) extends BaseApiController(configuration) {
def this() = this(Configuration)
def quiz: QuizFacadeContract = inject[QuizFacadeContract]
before() {
scentry.authenticate(LIFERAY_STRATEGY_NAME)
}
get("/quiz(/)") {
val lessonRequest = QuizRequest(this)
PermissionUtil.requirePermissionApi(ViewPermission, PortletName.LessonDesigner)
implicit val formats: Formats = DefaultFormats + new StatementSerializer
lessonRequest.actionType match {
case QuizActionType.GetAll => jsonAction(quiz.getAll(lessonRequest.courseId.toInt, lessonRequest.filter, lessonRequest.sortBy,
lessonRequest.isSortDirectionAsc, lessonRequest.page, lessonRequest.count))
case QuizActionType.GetContent => jsonAction(quiz.getContent(lessonRequest.id))
case QuizActionType.QuestionPreview => quiz.getQuestionPreview(lessonRequest.lessonId, lessonRequest.idString) match {
case QuizQuestionPreviewRedirect(url: String) => redirect(url)
case QuizQuestionPreviewContent(content: String) =>
response.setHeader("Content-Type", "text/html")
content
}
}
}
post("/quiz(/)")(jsonAction {
val data = QuizRequest(this)
data.actionType match {
case QuizActionType.Add |
QuizActionType.Delete |
QuizActionType.Update |
QuizActionType.UpdateLogo |
QuizActionType.Clone =>
PermissionUtil.requirePermissionApi(ModifyPermission, PortletName.LessonDesigner)
case QuizActionType.Publish => PermissionUtil.requirePermissionApi(PublishPermission, PortletName.LessonDesigner)
case _ =>
}
data.actionType match {
case QuizActionType.Add => quiz.create(data.title, data.description, data.logoOption.getOrElse(""), data.courseId.toInt, data.maxDuration)
case QuizActionType.Delete => quiz.delete(data.id)
case QuizActionType.Publish => quiz.publish(data.id, getUserId, data.courseId, data.publishType, data.theme, data.randomOrdering, data.questionPerUser, data.scoreLimit)
case QuizActionType.Update => quiz.update(data.id, data.title, data.description, data.maxDuration)
case QuizActionType.UpdateLogo => quiz.updateLogo(data.id, data.logo)
case QuizActionType.Clone => quiz.clone(data.id)
case QuizActionType.Convert => quiz.convert(data.id, data.courseId)
case QuizActionType.AddCategory => quiz.addCategory(data.lessonId, data.title)
case QuizActionType.AddQuestion => quiz.addQuestion(data.lessonId, data.categoryIdOption, data.bankQuestionId)
case QuizActionType.AddQuestions => data.bankQuestionIds.map(q => quiz.addQuestion(data.lessonId, data.categoryIdOption, q))
case QuizActionType.AddQuestionPlainText => quiz.addQuestionPlainText(data.lessonId, data.categoryIdOption, data.title, data.text)
case QuizActionType.AddQuestionRevealJS => quiz.addQuestionRevealJS(data.lessonId, data.categoryIdOption, data.title, data.text)
case QuizActionType.AddQuestionPDF => null
case QuizActionType.AddQuestionExternal => quiz.addQuestionExternal(data.lessonId, data.categoryIdOption, data.title, data.url)
case QuizActionType.AddVideo => quiz.addVideo(data.lessonId, data.categoryIdOption, data.title, data.url, data.videoFromDL, data.uuid, data.courseId)
case QuizActionType.UpdateCategory => quiz.updateCategory(data.lessonId, data.idString, data.title)
case QuizActionType.UpdateQuestion => quiz.updateQuestion(data.lessonId, data.idString, data.title, data.autoShowAnswer.getOrElse(false))
case QuizActionType.UpdateQuestionPlainText => quiz.updateQuestionPlainText(data.lessonId, data.idString, data.title)
case QuizActionType.UpdateQuestionRevealJS => quiz.updateQuestionRevealJS(data.lessonId, data.idString, data.title)
case QuizActionType.UpdateQuestionPDF => quiz.updateQuestionPDF(data.lessonId, data.idString, data.title)
case QuizActionType.UpdateQuestionPPTX => quiz.updateQuestionPPTX(data.idString, data.title)
case QuizActionType.UpdateQuestionExternal => quiz.updateQuestionExternal(data.lessonId, data.idString, data.title, data.url)
case QuizActionType.UpdateQuestionDLVideo => quiz.updateQuestion(data.lessonId, data.idString, data.title, data.autoShowAnswer.getOrElse(false))
case QuizActionType.DeleteCategory => quiz.deleteCategory(data.lessonId, data.idString)
case QuizActionType.DeleteQuestion => quiz.deleteQuestion(data.lessonId, data.idString)
case QuizActionType.MoveElement => quiz.moveElement(data.lessonId, data.idString, data.categoryIdOption, data.index)
}
})
}
|
ViLPy/Valamis
|
learn-portlet/src/main/scala/com/arcusys/learn/controllers/api/QuizApiController.scala
|
Scala
|
lgpl-3.0
| 5,448
|
// Starter Code for Exercise 1
// From "Fields" atom
import com.atomicscala.AtomicTest._
val cup2 = new Cup2
cup2.add(45) is 45
cup2.add(-15) is 30
cup2.add(-50) is -20
|
P7h/ScalaPlayground
|
Atomic Scala/atomic-scala-solutions/16_Fields/Starter-1.scala
|
Scala
|
apache-2.0
| 170
|
/**
* Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.routing
import scala.collection.immutable
import akka.actor.ActorContext
import akka.actor.Props
import akka.dispatch.Dispatchers
import com.typesafe.config.Config
import akka.actor.SupervisorStrategy
import akka.japi.Util.immutableSeq
import akka.actor.Address
import akka.actor.ExtendedActorSystem
import akka.actor.ActorSystem
import java.util.concurrent.atomic.AtomicReference
import akka.actor.ActorRef
import akka.serialization.SerializationExtension
import scala.util.control.NonFatal
import akka.event.Logging
import akka.actor.ActorPath
object ConsistentHashingRouter {
/**
* If you don't define the `hashMapping` when
* constructing the [[akka.routing.ConsistentHashingRouter]]
* the messages need to implement this interface to define what
* data to use for the consistent hash key. Note that it's not
* the hash, but the data to be hashed.
*
* If returning an `Array[Byte]` or String it will be used as is,
* otherwise the configured [[akka.serialization.Serializer]]
* will be applied to the returned data.
*
* If messages can't implement this interface themselves,
* it's possible to wrap the messages in
* [[akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope]],
* or use [[akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope]]
*/
trait ConsistentHashable {
def consistentHashKey: Any
}
/**
* If you don't define the `hashMapping` when
* constructing the [[akka.routing.ConsistentHashingRouter]]
* and messages can't implement [[akka.routing.ConsistentHashingRouter.ConsistentHashable]]
* themselves they can we wrapped by this envelope instead. The
* router will only send the wrapped message to the destination,
* i.e. the envelope will be stripped off.
*/
@SerialVersionUID(1L)
final case class ConsistentHashableEnvelope(message: Any, hashKey: Any)
extends ConsistentHashable with RouterEnvelope {
override def consistentHashKey: Any = hashKey
}
/**
* Partial function from message to the data to
* use for the consistent hash key. Note that it's not
* the hash that is to be returned, but the data to be hashed.
*
* If returning an `Array[Byte]` or String it will be used as is,
* otherwise the configured [[akka.serialization.Serializer]]
* will be applied to the returned data.
*/
type ConsistentHashMapping = PartialFunction[Any, Any]
@SerialVersionUID(1L)
object emptyConsistentHashMapping extends ConsistentHashMapping {
def isDefinedAt(x: Any) = false
def apply(x: Any) = throw new UnsupportedOperationException("Empty ConsistentHashMapping apply()")
}
/**
* JAVA API
* Mapping from message to the data to use for the consistent hash key.
* Note that it's not the hash that is to be returned, but the data to be
* hashed.
*
* May return `null` to indicate that the message is not handled by
* this mapping.
*
* If returning an `Array[Byte]` or String it will be used as is,
* otherwise the configured [[akka.serialization.Serializer]]
* will be applied to the returned data.
*/
trait ConsistentHashMapper {
def hashKey(message: Any): Any
}
/**
* INTERNAL API
*/
private[akka] def hashMappingAdapter(mapper: ConsistentHashMapper): ConsistentHashMapping = {
case message if (mapper.hashKey(message).asInstanceOf[AnyRef] ne null) ⇒
mapper.hashKey(message)
}
}
object ConsistentHashingRoutingLogic {
/**
* Address to use for the selfAddress parameter
*/
def defaultAddress(system: ActorSystem): Address =
system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress
}
/**
* Uses consistent hashing to select a routee based on the sent message.
*
* There is 3 ways to define what data to use for the consistent hash key.
*
* 1. You can define `hashMapping` / `withHashMapper`
* of the router to map incoming messages to their consistent hash key.
* This makes the decision transparent for the sender.
*
* 2. The messages may implement [[akka.routing.ConsistentHashingRouter.ConsistentHashable]].
* The key is part of the message and it's convenient to define it together
* with the message definition.
*
* 3. The messages can be be wrapped in a [[akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope]]
* to define what data to use for the consistent hash key. The sender knows
* the key to use.
*
* These ways to define the consistent hash key can be use together and at
* the same time for one router. The `hashMapping` is tried first.
*
* @param virtualNodesFactor number of virtual nodes per node, used in [[akka.routing.ConsistentHash]]
*
* @param hashMapping partial function from message to the data to
* use for the consistent hash key
*
* @param system the actor system hosting this router
*
*/
@SerialVersionUID(1L)
final case class ConsistentHashingRoutingLogic(
system: ActorSystem,
virtualNodesFactor: Int = 0,
hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping)
extends RoutingLogic {
import ConsistentHashingRouter._
/**
* Java API
* @param system the actor system hosting this router
*/
def this(system: ActorSystem) =
this(system, virtualNodesFactor = 0, hashMapping = ConsistentHashingRouter.emptyConsistentHashMapping)
private val selfAddress = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress
val vnodes =
if (virtualNodesFactor == 0) system.settings.DefaultVirtualNodesFactor
else virtualNodesFactor
private lazy val log = Logging(system, getClass)
/**
* Setting the number of virtual nodes per node, used in [[akka.routing.ConsistentHash]]
*/
def withVirtualNodesFactor(vnodes: Int): ConsistentHashingRoutingLogic = copy(virtualNodesFactor = vnodes)
/**
* Java API: Setting the mapping from message to the data to use for the consistent hash key.
*/
def withHashMapper(mapper: ConsistentHashingRouter.ConsistentHashMapper): ConsistentHashingRoutingLogic =
copy(hashMapping = ConsistentHashingRouter.hashMappingAdapter(mapper))
// tuple of routees and the ConsistentHash, updated together in updateConsistentHash
private val consistentHashRef = new AtomicReference[(immutable.IndexedSeq[Routee], ConsistentHash[ConsistentRoutee])]((null, null))
override def select(message: Any, routees: immutable.IndexedSeq[Routee]): Routee =
if (routees.isEmpty) NoRoutee
else {
// update consistentHash when routees has changed
// changes to routees are rare and when no changes this is a quick operation
def updateConsistentHash(): ConsistentHash[ConsistentRoutee] = {
val oldConsistentHashTuple = consistentHashRef.get
val (oldRoutees, oldConsistentHash) = oldConsistentHashTuple
if (routees ne oldRoutees) {
// when other instance, same content, no need to re-hash, but try to set routees
val consistentHash =
if (routees == oldRoutees) oldConsistentHash
else ConsistentHash(routees.map(ConsistentRoutee(_, selfAddress)), vnodes) // re-hash
// ignore, don't update, in case of CAS failure
consistentHashRef.compareAndSet(oldConsistentHashTuple, (routees, consistentHash))
consistentHash
} else oldConsistentHash
}
def target(hashData: Any): Routee = try {
val currentConsistenHash = updateConsistentHash()
if (currentConsistenHash.isEmpty) NoRoutee
else hashData match {
case bytes: Array[Byte] ⇒ currentConsistenHash.nodeFor(bytes).routee
case str: String ⇒ currentConsistenHash.nodeFor(str).routee
case x: AnyRef ⇒ currentConsistenHash.nodeFor(SerializationExtension(system).serialize(x).get).routee
}
} catch {
case NonFatal(e) ⇒
// serialization failed
log.warning("Couldn't route message with consistent hash key [{}] due to [{}]", hashData, e.getMessage)
NoRoutee
}
message match {
case _ if hashMapping.isDefinedAt(message) ⇒ target(hashMapping(message))
case hashable: ConsistentHashable ⇒ target(hashable.consistentHashKey)
case other ⇒
log.warning("Message [{}] must be handled by hashMapping, or implement [{}] or be wrapped in [{}]",
message.getClass.getName, classOf[ConsistentHashable].getName,
classOf[ConsistentHashableEnvelope].getName)
NoRoutee
}
}
}
/**
* A router pool that uses consistent hashing to select a routee based on the
* sent message. The selection is described in [[akka.routing.ConsistentHashingRoutingLogic]].
*
* The configuration parameter trumps the constructor arguments. This means that
* if you provide `nrOfInstances` during instantiation they will be ignored if
* the router is defined in the configuration file for the actor being used.
*
* <h1>Supervision Setup</h1>
*
* Any routees that are created by a router will be created as the router's children.
* The router is therefore also the children's supervisor.
*
* The supervision strategy of the router actor can be configured with
* [[#withSupervisorStrategy]]. If no strategy is provided, routers default to
* a strategy of “always escalate”. This means that errors are passed up to the
* router's supervisor for handling.
*
* The router's supervisor will treat the error as an error with the router itself.
* Therefore a directive to stop or restart will cause the router itself to stop or
* restart. The router, in turn, will cause its children to stop and restart.
*
* @param nrOfInstances initial number of routees in the pool
*
* @param resizer optional resizer that dynamically adjust the pool size
*
* @param virtualNodesFactor number of virtual nodes per node, used in [[akka.routing.ConsistentHash]]
*
* @param hashMapping partial function from message to the data to
* use for the consistent hash key
*
* @param supervisorStrategy strategy for supervising the routees, see 'Supervision Setup'
*
* @param routerDispatcher dispatcher to use for the router head actor, which handles
* supervision, death watch and router management messages
*/
@SerialVersionUID(1L)
final case class ConsistentHashingPool(
override val nrOfInstances: Int,
override val resizer: Option[Resizer] = None,
val virtualNodesFactor: Int = 0,
val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
override val usePoolDispatcher: Boolean = false)
extends Pool with PoolOverrideUnsetConfig[ConsistentHashingPool] {
def this(config: Config) =
this(
nrOfInstances = config.getInt("nr-of-instances"),
resizer = DefaultResizer.fromConfig(config),
usePoolDispatcher = config.hasPath("pool-dispatcher"))
/**
* Java API
* @param nr initial number of routees in the pool
*/
def this(nr: Int) = this(nrOfInstances = nr)
override def createRouter(system: ActorSystem): Router =
new Router(ConsistentHashingRoutingLogic(system, virtualNodesFactor, hashMapping))
override def nrOfInstances(sys: ActorSystem) = this.nrOfInstances
/**
* Setting the supervisor strategy to be used for the “head” Router actor.
*/
def withSupervisorStrategy(strategy: SupervisorStrategy): ConsistentHashingPool = copy(supervisorStrategy = strategy)
/**
* Setting the resizer to be used.
*/
def withResizer(resizer: Resizer): ConsistentHashingPool = copy(resizer = Some(resizer))
/**
* Setting the dispatcher to be used for the router head actor, which handles
* supervision, death watch and router management messages.
*/
def withDispatcher(dispatcherId: String): ConsistentHashingPool = copy(routerDispatcher = dispatcherId)
/**
* Setting the number of virtual nodes per node, used in [[akka.routing.ConsistentHash]]
*/
def withVirtualNodesFactor(vnodes: Int): ConsistentHashingPool = copy(virtualNodesFactor = vnodes)
/**
* Java API: Setting the mapping from message to the data to use for the consistent hash key.
*/
def withHashMapper(mapper: ConsistentHashingRouter.ConsistentHashMapper): ConsistentHashingPool =
copy(hashMapping = ConsistentHashingRouter.hashMappingAdapter(mapper))
/**
* Uses the resizer and/or the supervisor strategy of the given RouterConfig
* if this RouterConfig doesn't have one, i.e. the resizer defined in code is used if
* resizer was not defined in config.
* Uses the `hashMapping` defined in code, since that can't be defined in configuration.
*/
override def withFallback(other: RouterConfig): RouterConfig = other match {
case _: FromConfig | _: NoRouter ⇒ this.overrideUnsetConfig(other)
case otherRouter: ConsistentHashingPool ⇒ (copy(hashMapping = otherRouter.hashMapping)).overrideUnsetConfig(other)
case _ ⇒ throw new IllegalArgumentException("Expected ConsistentHashingPool, got [%s]".format(other))
}
}
/**
* A router group that uses consistent hashing to select a routee based on the
* sent message. The selection is described in [[akka.routing.ConsistentHashingRoutingLogic]].
*
* The configuration parameter trumps the constructor arguments. This means that
* if you provide `paths` during instantiation they will be ignored if
* the router is defined in the configuration file for the actor being used.
*
* @param paths string representation of the actor paths of the routees, messages are
* sent with [[akka.actor.ActorSelection]] to these paths
*
* @param virtualNodesFactor number of virtual nodes per node, used in [[akka.routing.ConsistentHash]]
*
* @param hashMapping partial function from message to the data to
* use for the consistent hash key
*
* @param routerDispatcher dispatcher to use for the router head actor, which handles
* router management messages
*/
@SerialVersionUID(1L)
final case class ConsistentHashingGroup(
override val paths: immutable.Iterable[String],
val virtualNodesFactor: Int = 0,
val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =
this(paths = immutableSeq(config.getStringList("routees.paths")))
/**
* Java API
* @param routeePaths string representation of the actor paths of the routees, messages are
* sent with [[akka.actor.ActorSelection]] to these paths
*/
def this(routeePaths: java.lang.Iterable[String]) = this(paths = immutableSeq(routeePaths))
override def createRouter(system: ActorSystem): Router =
new Router(ConsistentHashingRoutingLogic(system, virtualNodesFactor, hashMapping))
/**
* Setting the dispatcher to be used for the router head actor, which handles
* router management messages
*/
def withDispatcher(dispatcherId: String): ConsistentHashingGroup = copy(routerDispatcher = dispatcherId)
/**
* Setting the number of virtual nodes per node, used in [[akka.routing.ConsistentHash]]
*/
def withVirtualNodesFactor(vnodes: Int): ConsistentHashingGroup = copy(virtualNodesFactor = vnodes)
/**
* Java API: Setting the mapping from message to the data to use for the consistent hash key.
*/
def withHashMapper(mapper: ConsistentHashingRouter.ConsistentHashMapper): ConsistentHashingGroup =
copy(hashMapping = ConsistentHashingRouter.hashMappingAdapter(mapper))
/**
* Uses the `hashMapping` defined in code, since that can't be defined in configuration.
*/
override def withFallback(other: RouterConfig): RouterConfig = other match {
case _: FromConfig | _: NoRouter ⇒ super.withFallback(other)
case otherRouter: ConsistentHashingGroup ⇒ copy(hashMapping = otherRouter.hashMapping)
case _ ⇒ throw new IllegalArgumentException("Expected ConsistentHashingGroup, got [%s]".format(other))
}
}
/**
* INTERNAL API
* Important to use ActorRef with full address, with host and port, in the hash ring,
* so that same ring is produced on different nodes.
* The ConsistentHash uses toString of the ring nodes, and the ActorRef itself
* isn't a good representation, because LocalActorRef doesn't include the
* host and port.
*/
private[akka] final case class ConsistentRoutee(routee: Routee, selfAddress: Address) {
override def toString: String = routee match {
case ActorRefRoutee(ref) ⇒ toStringWithfullAddress(ref.path)
case ActorSelectionRoutee(sel) ⇒ toStringWithfullAddress(sel.anchorPath) + sel.pathString
case other ⇒ other.toString
}
private def toStringWithfullAddress(path: ActorPath): String = {
path.address match {
case Address(_, _, None, None) ⇒ path.toStringWithAddress(selfAddress)
case a ⇒ path.toString
}
}
}
|
jmnarloch/akka.js
|
akka-js-actor/jvm/src/main/scala/akka/routing/ConsistentHashing.scala
|
Scala
|
bsd-3-clause
| 17,278
|
package ee.cone.c4proto
import java.nio.charset.StandardCharsets.UTF_8
import ee.cone.c4di.TypeKey
import okio.ByteString
import collection.immutable.Seq
import scala.annotation.StaticAnnotation
case class Id(id: Long) extends StaticAnnotation
case class ShortName(name: String) extends StaticAnnotation
class GenLens extends StaticAnnotation
// override def toString: String =
// s"TypeKey(${if (clName.endsWith(alias)) clName else s"$clName/$alias"}${if (args.isEmpty) "" else args.map(_.toString).mkString("[",", ", "]")})"
case class MetaProp(id: Int, propName: String, propShortName: Option[String], resultType: String, typeProp: TypeKey)
trait ProtoOrigMeta {
def id: Option[Long]
def categories: List[DataCategory]
def cl: Class[_]
def shortName: Option[String]
def metaProps: List[MetaProp]
}
trait HasId {
def protoOrigMeta: ProtoOrigMeta
def id: Long = protoOrigMeta.id.getOrElse(throw new Exception("This orig has no Id"))
def hasId: Boolean = protoOrigMeta.id.nonEmpty
lazy val className: String = protoOrigMeta.cl.getName
@deprecated("Deprecated, use OrigMeta[Orig].categories", "07/04/20")
def categories: List[DataCategory] = protoOrigMeta.categories
@deprecated("Deprecated, use OrigMeta[Orig].cl", "07/04/20")
def cl: Class[_] = protoOrigMeta.cl
@deprecated("Deprecated, use OrigMeta[Orig].shortName", "07/04/20")
def shortName: Option[String] = protoOrigMeta.shortName
@deprecated("Deprecated, use OrigMeta[Orig].fieldsMeta", "07/04/20")
def props: List[MetaProp] = protoOrigMeta.metaProps
}
object ToByteString {
def apply(data: Array[Byte]): ByteString = ByteString.of(data,0,data.length)
def apply(v: String): ByteString = apply(v.getBytes(UTF_8))
}
class replaceBy[T](factory: Object) extends StaticAnnotation
abstract class ArgAdapter[Value] {
def encodedSizeWithTag (tag: Int, value: Value): Int
def encodeWithTag(writer: ProtoWriter, tag: Int, value: Value): Unit
def defaultValue: Value
def decodeReduce(reader: ProtoReader, prev: Value): Value
def decodeFix(prev: Value): Value
}
object FieldEncoding {
val LENGTH_DELIMITED = com.squareup.wire.FieldEncoding.LENGTH_DELIMITED
}
|
conecenter/c4proto
|
base_lib/src/main/scala/ee/cone/c4proto/Protocol.scala
|
Scala
|
apache-2.0
| 2,173
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Strategy
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.First
import org.apache.spark.sql.catalyst.planning._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution
import org.apache.spark.sql.execution.columnar.{InMemoryRelation, InMemoryTableScanExec}
import org.apache.spark.sql.execution.command._
import org.apache.spark.sql.execution.exchange.ShuffleExchange
import org.apache.spark.sql.execution.joins.{BuildLeft, BuildRight}
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming.StreamingQuery
/**
* Converts a logical plan into zero or more SparkPlans. This API is exposed for experimenting
* with the query planner and is not designed to be stable across spark releases. Developers
* writing libraries should instead consider using the stable APIs provided in
* [[org.apache.spark.sql.sources]]
*/
abstract class SparkStrategy extends GenericStrategy[SparkPlan] {
override protected def planLater(plan: LogicalPlan): SparkPlan = PlanLater(plan)
}
case class PlanLater(plan: LogicalPlan) extends LeafExecNode {
override def output: Seq[Attribute] = plan.output
protected override def doExecute(): RDD[InternalRow] = {
throw new UnsupportedOperationException()
}
}
abstract class SparkStrategies extends QueryPlanner[SparkPlan] {
self: SparkPlanner =>
/**
* Plans special cases of limit operators.
*/
object SpecialLimits extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case logical.ReturnAnswer(rootPlan) => rootPlan match {
case logical.Limit(IntegerLiteral(limit), logical.Sort(order, true, child)) =>
execution.TakeOrderedAndProjectExec(limit, order, child.output, planLater(child)) :: Nil
case logical.Limit(
IntegerLiteral(limit),
logical.Project(projectList, logical.Sort(order, true, child))) =>
execution.TakeOrderedAndProjectExec(
limit, order, projectList, planLater(child)) :: Nil
case logical.Limit(IntegerLiteral(limit), child) =>
execution.CollectLimitExec(limit, planLater(child)) :: Nil
case other => planLater(other) :: Nil
}
case logical.Limit(IntegerLiteral(limit), logical.Sort(order, true, child)) =>
execution.TakeOrderedAndProjectExec(limit, order, child.output, planLater(child)) :: Nil
case logical.Limit(
IntegerLiteral(limit), logical.Project(projectList, logical.Sort(order, true, child))) =>
execution.TakeOrderedAndProjectExec(
limit, order, projectList, planLater(child)) :: Nil
case _ => Nil
}
}
/**
* Select the proper physical plan for join based on joining keys and size of logical plan.
*
* At first, uses the [[ExtractEquiJoinKeys]] pattern to find joins where at least some of the
* predicates can be evaluated by matching join keys. If found, Join implementations are chosen
* with the following precedence:
*
* - Broadcast: if one side of the join has an estimated physical size that is smaller than the
* user-configurable [[SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]] threshold
* or if that side has an explicit broadcast hint (e.g. the user applied the
* [[org.apache.spark.sql.functions.broadcast()]] function to a DataFrame), then that side
* of the join will be broadcasted and the other side will be streamed, with no shuffling
* performed. If both sides of the join are eligible to be broadcasted then the
* - Shuffle hash join: if the average size of a single partition is small enough to build a hash
* table.
* - Sort merge: if the matching join keys are sortable.
*
* If there is no joining keys, Join implementations are chosen with the following precedence:
* - BroadcastNestedLoopJoin: if one side of the join could be broadcasted
* - CartesianProduct: for Inner join
* - BroadcastNestedLoopJoin
*/
object JoinSelection extends Strategy with PredicateHelper {
/**
* Matches a plan whose output should be small enough to be used in broadcast join.
*/
private def canBroadcast(plan: LogicalPlan): Boolean = {
plan.stats(conf).hints.isBroadcastable.getOrElse(false) ||
(plan.stats(conf).sizeInBytes >= 0 &&
plan.stats(conf).sizeInBytes <= conf.autoBroadcastJoinThreshold)
}
/**
* Matches a plan whose single partition should be small enough to build a hash table.
*
* Note: this assume that the number of partition is fixed, requires additional work if it's
* dynamic.
*/
private def canBuildLocalHashMap(plan: LogicalPlan): Boolean = {
plan.stats(conf).sizeInBytes < conf.autoBroadcastJoinThreshold * conf.numShufflePartitions
}
/**
* Returns whether plan a is much smaller (3X) than plan b.
*
* The cost to build hash map is higher than sorting, we should only build hash map on a table
* that is much smaller than other one. Since we does not have the statistic for number of rows,
* use the size of bytes here as estimation.
*/
private def muchSmaller(a: LogicalPlan, b: LogicalPlan): Boolean = {
a.stats(conf).sizeInBytes * 3 <= b.stats(conf).sizeInBytes
}
private def canBuildRight(joinType: JoinType): Boolean = joinType match {
case _: InnerLike | LeftOuter | LeftSemi | LeftAnti => true
case j: ExistenceJoin => true
case _ => false
}
private def canBuildLeft(joinType: JoinType): Boolean = joinType match {
case _: InnerLike | RightOuter => true
case _ => false
}
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
// --- BroadcastHashJoin --------------------------------------------------------------------
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if canBuildRight(joinType) && canBroadcast(right) =>
Seq(joins.BroadcastHashJoinExec(
leftKeys, rightKeys, joinType, BuildRight, condition, planLater(left), planLater(right)))
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if canBuildLeft(joinType) && canBroadcast(left) =>
Seq(joins.BroadcastHashJoinExec(
leftKeys, rightKeys, joinType, BuildLeft, condition, planLater(left), planLater(right)))
// --- ShuffledHashJoin ---------------------------------------------------------------------
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if !conf.preferSortMergeJoin && canBuildRight(joinType) && canBuildLocalHashMap(right)
&& muchSmaller(right, left) ||
!RowOrdering.isOrderable(leftKeys) =>
Seq(joins.ShuffledHashJoinExec(
leftKeys, rightKeys, joinType, BuildRight, condition, planLater(left), planLater(right)))
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if !conf.preferSortMergeJoin && canBuildLeft(joinType) && canBuildLocalHashMap(left)
&& muchSmaller(left, right) ||
!RowOrdering.isOrderable(leftKeys) =>
Seq(joins.ShuffledHashJoinExec(
leftKeys, rightKeys, joinType, BuildLeft, condition, planLater(left), planLater(right)))
// --- SortMergeJoin ------------------------------------------------------------
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if RowOrdering.isOrderable(leftKeys) =>
joins.SortMergeJoinExec(
leftKeys, rightKeys, joinType, condition, planLater(left), planLater(right)) :: Nil
// --- Without joining keys ------------------------------------------------------------
// Pick BroadcastNestedLoopJoin if one side could be broadcasted
case j @ logical.Join(left, right, joinType, condition)
if canBuildRight(joinType) && canBroadcast(right) =>
joins.BroadcastNestedLoopJoinExec(
planLater(left), planLater(right), BuildRight, joinType, condition) :: Nil
case j @ logical.Join(left, right, joinType, condition)
if canBuildLeft(joinType) && canBroadcast(left) =>
joins.BroadcastNestedLoopJoinExec(
planLater(left), planLater(right), BuildLeft, joinType, condition) :: Nil
// Pick CartesianProduct for InnerJoin
case logical.Join(left, right, _: InnerLike, condition) =>
joins.CartesianProductExec(planLater(left), planLater(right), condition) :: Nil
case logical.Join(left, right, joinType, condition) =>
val buildSide =
if (right.stats(conf).sizeInBytes <= left.stats(conf).sizeInBytes) {
BuildRight
} else {
BuildLeft
}
// This join could be very slow or OOM
joins.BroadcastNestedLoopJoinExec(
planLater(left), planLater(right), buildSide, joinType, condition) :: Nil
// --- Cases where this strategy does not apply ---------------------------------------------
case _ => Nil
}
}
/**
* Used to plan aggregation queries that are computed incrementally as part of a
* [[StreamingQuery]]. Currently this rule is injected into the planner
* on-demand, only when planning in a [[org.apache.spark.sql.execution.streaming.StreamExecution]]
*/
object StatefulAggregationStrategy extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case EventTimeWatermark(columnName, delay, child) =>
EventTimeWatermarkExec(columnName, delay, planLater(child)) :: Nil
case PhysicalAggregation(
namedGroupingExpressions, aggregateExpressions, rewrittenResultExpressions, child) =>
aggregate.AggUtils.planStreamingAggregation(
namedGroupingExpressions,
aggregateExpressions,
rewrittenResultExpressions,
planLater(child))
case _ => Nil
}
}
/**
* Used to plan the streaming deduplicate operator.
*/
object StreamingDeduplicationStrategy extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case Deduplicate(keys, child, true) =>
StreamingDeduplicateExec(keys, planLater(child)) :: Nil
case _ => Nil
}
}
/**
* Used to plan the aggregate operator for expressions based on the AggregateFunction2 interface.
*/
object Aggregation extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case PhysicalAggregation(
groupingExpressions, aggregateExpressions, resultExpressions, child) =>
val (functionsWithDistinct, functionsWithoutDistinct) =
aggregateExpressions.partition(_.isDistinct)
if (functionsWithDistinct.map(_.aggregateFunction.children).distinct.length > 1) {
// This is a sanity check. We should not reach here when we have multiple distinct
// column sets. Our MultipleDistinctRewriter should take care this case.
sys.error("You hit a query analyzer bug. Please report your query to " +
"Spark user mailing list.")
}
val aggregateOperator =
if (functionsWithDistinct.isEmpty) {
aggregate.AggUtils.planAggregateWithoutDistinct(
groupingExpressions,
aggregateExpressions,
resultExpressions,
planLater(child))
} else {
aggregate.AggUtils.planAggregateWithOneDistinct(
groupingExpressions,
functionsWithDistinct,
functionsWithoutDistinct,
resultExpressions,
planLater(child))
}
aggregateOperator
case _ => Nil
}
}
protected lazy val singleRowRdd = sparkContext.parallelize(Seq(InternalRow()), 1)
object InMemoryScans extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case PhysicalOperation(projectList, filters, mem: InMemoryRelation) =>
pruneFilterProject(
projectList,
filters,
identity[Seq[Expression]], // All filters still need to be evaluated.
InMemoryTableScanExec(_, filters, mem)) :: Nil
case _ => Nil
}
}
/**
* This strategy is just for explaining `Dataset/DataFrame` created by `spark.readStream`.
* It won't affect the execution, because `StreamingRelation` will be replaced with
* `StreamingExecutionRelation` in `StreamingQueryManager` and `StreamingExecutionRelation` will
* be replaced with the real relation using the `Source` in `StreamExecution`.
*/
object StreamingRelationStrategy extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case s: StreamingRelation =>
StreamingRelationExec(s.sourceName, s.output) :: Nil
case s: StreamingExecutionRelation =>
StreamingRelationExec(s.toString, s.output) :: Nil
case _ => Nil
}
}
/**
* Strategy to convert [[FlatMapGroupsWithState]] logical operator to physical operator
* in streaming plans. Conversion for batch plans is handled by [[BasicOperators]].
*/
object FlatMapGroupsWithStateStrategy extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case FlatMapGroupsWithState(
func, keyDeser, valueDeser, groupAttr, dataAttr, outputAttr, stateEnc, outputMode, _,
timeout, child) =>
val execPlan = FlatMapGroupsWithStateExec(
func, keyDeser, valueDeser, groupAttr, dataAttr, outputAttr, None, stateEnc, outputMode,
timeout, batchTimestampMs = None, eventTimeWatermark = None, planLater(child))
execPlan :: Nil
case _ =>
Nil
}
}
// Can we automate these 'pass through' operations?
object BasicOperators extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case r: RunnableCommand => ExecutedCommandExec(r) :: Nil
case MemoryPlan(sink, output) =>
val encoder = RowEncoder(sink.schema)
LocalTableScanExec(output, sink.allData.map(r => encoder.toRow(r).copy())) :: Nil
case logical.Distinct(child) =>
throw new IllegalStateException(
"logical distinct operator should have been replaced by aggregate in the optimizer")
case logical.Intersect(left, right) =>
throw new IllegalStateException(
"logical intersect operator should have been replaced by semi-join in the optimizer")
case logical.Except(left, right) =>
throw new IllegalStateException(
"logical except operator should have been replaced by anti-join in the optimizer")
case logical.DeserializeToObject(deserializer, objAttr, child) =>
execution.DeserializeToObjectExec(deserializer, objAttr, planLater(child)) :: Nil
case logical.SerializeFromObject(serializer, child) =>
execution.SerializeFromObjectExec(serializer, planLater(child)) :: Nil
case logical.MapPartitions(f, objAttr, child) =>
execution.MapPartitionsExec(f, objAttr, planLater(child)) :: Nil
case logical.MapPartitionsInR(f, p, b, is, os, objAttr, child) =>
execution.MapPartitionsExec(
execution.r.MapPartitionsRWrapper(f, p, b, is, os), objAttr, planLater(child)) :: Nil
case logical.FlatMapGroupsInR(f, p, b, is, os, key, value, grouping, data, objAttr, child) =>
execution.FlatMapGroupsInRExec(f, p, b, is, os, key, value, grouping,
data, objAttr, planLater(child)) :: Nil
case logical.MapElements(f, _, _, objAttr, child) =>
execution.MapElementsExec(f, objAttr, planLater(child)) :: Nil
case logical.AppendColumns(f, _, _, in, out, child) =>
execution.AppendColumnsExec(f, in, out, planLater(child)) :: Nil
case logical.AppendColumnsWithObject(f, childSer, newSer, child) =>
execution.AppendColumnsWithObjectExec(f, childSer, newSer, planLater(child)) :: Nil
case logical.MapGroups(f, key, value, grouping, data, objAttr, child) =>
execution.MapGroupsExec(f, key, value, grouping, data, objAttr, planLater(child)) :: Nil
case logical.FlatMapGroupsWithState(
f, key, value, grouping, data, output, _, _, _, timeout, child) =>
execution.MapGroupsExec(
f, key, value, grouping, data, output, timeout, planLater(child)) :: Nil
case logical.CoGroup(f, key, lObj, rObj, lGroup, rGroup, lAttr, rAttr, oAttr, left, right) =>
execution.CoGroupExec(
f, key, lObj, rObj, lGroup, rGroup, lAttr, rAttr, oAttr,
planLater(left), planLater(right)) :: Nil
case logical.Repartition(numPartitions, shuffle, child) =>
if (shuffle) {
ShuffleExchange(RoundRobinPartitioning(numPartitions), planLater(child)) :: Nil
} else {
execution.CoalesceExec(numPartitions, planLater(child)) :: Nil
}
case logical.Sort(sortExprs, global, child) =>
execution.SortExec(sortExprs, global, planLater(child)) :: Nil
case logical.Project(projectList, child) =>
execution.ProjectExec(projectList, planLater(child)) :: Nil
case logical.Filter(condition, child) =>
execution.FilterExec(condition, planLater(child)) :: Nil
case f: logical.TypedFilter =>
execution.FilterExec(f.typedCondition(f.deserializer), planLater(f.child)) :: Nil
case e @ logical.Expand(_, _, child) =>
execution.ExpandExec(e.projections, e.output, planLater(child)) :: Nil
case logical.Window(windowExprs, partitionSpec, orderSpec, child) =>
execution.window.WindowExec(windowExprs, partitionSpec, orderSpec, planLater(child)) :: Nil
case logical.Sample(lb, ub, withReplacement, seed, child) =>
execution.SampleExec(lb, ub, withReplacement, seed, planLater(child)) :: Nil
case logical.LocalRelation(output, data) =>
LocalTableScanExec(output, data) :: Nil
case logical.LocalLimit(IntegerLiteral(limit), child) =>
execution.LocalLimitExec(limit, planLater(child)) :: Nil
case logical.GlobalLimit(IntegerLiteral(limit), child) =>
execution.GlobalLimitExec(limit, planLater(child)) :: Nil
case logical.Union(unionChildren) =>
execution.UnionExec(unionChildren.map(planLater)) :: Nil
case g @ logical.Generate(generator, join, outer, _, _, child) =>
execution.GenerateExec(
generator, join = join, outer = outer, g.qualifiedGeneratorOutput,
planLater(child)) :: Nil
case logical.OneRowRelation =>
execution.RDDScanExec(Nil, singleRowRdd, "OneRowRelation") :: Nil
case r: logical.Range =>
execution.RangeExec(r) :: Nil
case logical.RepartitionByExpression(expressions, child, numPartitions) =>
exchange.ShuffleExchange(HashPartitioning(
expressions, numPartitions), planLater(child)) :: Nil
case ExternalRDD(outputObjAttr, rdd) => ExternalRDDScanExec(outputObjAttr, rdd) :: Nil
case r: LogicalRDD =>
RDDScanExec(r.output, r.rdd, "ExistingRDD", r.outputPartitioning, r.outputOrdering) :: Nil
case h: ResolvedHint => planLater(h.child) :: Nil
case _ => Nil
}
}
}
|
mzl9039/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala
|
Scala
|
apache-2.0
| 20,428
|
package com.github.meln1k.reactive.telegrambot.api
import java.util.UUID
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.HttpMethods._
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import com.github.meln1k.reactive.telegrambot.models._
import TelegramBotJsonProtocol._
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import spray.json._
import ApiHelper._
import akka.NotUsed
import scala.concurrent.Future
import scala.util.{Failure, Success, Try}
class TelegramRequestConsumer(private val token: String)(implicit actorSystem: ActorSystem, materializer: Materializer) {
import actorSystem.dispatcher
private val availableProcessors = Runtime.getRuntime.availableProcessors()
private def createEntity(params: Map[String, Any]): Future[RequestEntity] = {
def stringBodyPart(name: String, value: Any) = Multipart.FormData.BodyPart(
name,
HttpEntity(value.toString)
)
val formData =
Multipart.FormData(
params.toSeq.map { case (name, value) =>
value match {
case l: Long => stringBodyPart(name, l)
case s: String => stringBodyPart(name, s)
case b: Boolean => stringBodyPart(name, b)
case d: Double => stringBodyPart(name, d)
case rm: ReplyMarkup => stringBodyPart(name, rm.toJson.compactPrint)
case InputFile(file) =>
Multipart.FormData.BodyPart.fromFile(
name,
MediaTypes.`application/octet-stream`,
file,
100000
)
case StreamedInputFile(fileName, contentType, length, dataBytes) =>
Multipart.FormData.BodyPart(
name,
HttpEntity(
contentType,
length,
dataBytes
),
Map("filename" -> fileName)
)
}
}: _*
)
Marshal(formData).to[RequestEntity]
}
private val apiRequestToHttpRequest: Flow[ApiRequest, (HttpRequest, UUID), NotUsed] =
Flow[ApiRequest].mapAsyncUnordered(4) { case ApiRequest(method, id) =>
val filteredParams = method.allParams.filterNot(_._2 == None).map {
case (name, Some(value)) => name -> value
case e@(name, value) => e
}
val entity = createEntity(filteredParams)
val Uri = apiUri(method.name, token)
entity.map { e =>
HttpRequest(method = POST, uri = Uri, entity = e) -> id
}
}
private val http = Http().superPool[UUID]()
private val httpResponseToApiResponse: Flow[(Try[HttpResponse], UUID), ApiResponse, NotUsed] =
Flow[(Try[HttpResponse], UUID)]
.mapAsync(availableProcessors) { case (tryResponse, id) =>
tryResponse.map { response =>
Unmarshal(response.entity).to[Response].map { resp =>
ApiResponse(id, Success(resp))
}
}.recover{
case e => Future.successful(ApiResponse(id, Failure(e)))
}.get
}
private val apiFlow: Flow[ApiRequest, ApiResponse, NotUsed] =
Flow[ApiRequest]
.via(apiRequestToHttpRequest)
.via(http)
.via(httpResponseToApiResponse)
def singleRequest(request: ApiRequest): Future[ApiResponse] =
Source.single(request)
.via(apiFlow)
.toMat(Sink.head)(Keep.right)
.run()
def flow: Flow[ApiRequest, ApiResponse, NotUsed] = apiFlow
def processor = apiFlow.toProcessor.run()
}
object TelegramRequestConsumer {
def apply(token: String)(implicit actorSystem: ActorSystem, materializer: Materializer) = new TelegramRequestConsumer(token)
}
|
meln1k/reactive-telegrambot
|
src/main/scala/com/github/meln1k/reactive/telegrambot/api/TelegramRequestConsumer.scala
|
Scala
|
mit
| 3,817
|
package shredzzz.kirkwood.junit.tests.vector.ints
import org.junit.Test
import shredzzz.kirkwood.cumath.CuValue
import shredzzz.kirkwood.cumath.tensor.CuVector
import shredzzz.kirkwood.junit._
class CuVectorBitwiseOpsTest extends IntCuVectorTester
{
@Test def testVal_unary_:~@() {
withCuContext(
implicit ctx => {
testMapFunc(
xArr = Array(1, 0, 5),
expected = Array(-2, -1, -6),
(x: CuVector[Int], res: CuVector[Int]) => {
res =: x.unary_:~@()
}
)
}
)
}
@Test def testVal_:&@() {
withCuContext(
implicit ctx => {
testVal(
xArr = Array(1, 0, 5),
cVal = 3,
expected = Array(1, 0, 1),
(x: CuVector[Int], c: CuValue[Int], res: CuVector[Int]) => {
res =: x :&@ c
}
)
}
)
}
@Test def testVal_:|@() {
withCuContext(
implicit ctx => {
testVal(
xArr = Array(1, 0, 5),
cVal = 3,
expected = Array(3, 3, 7),
(x: CuVector[Int], c: CuValue[Int], res: CuVector[Int]) => {
res =: x :|@ c
}
)
}
)
}
@Test def testVal_:^@() {
withCuContext(
implicit ctx => {
testVal(
xArr = Array(1, 0, 5),
cVal = 3,
expected = Array(2, 3, 6),
(x: CuVector[Int], c: CuValue[Int], res: CuVector[Int]) => {
res =: x :^|@ c
}
)
}
)
}
@Test def testVal_:>>@() {
withCuContext(
implicit ctx => {
testVal(
xArr = Array(10, 0, 5),
cVal = 1,
expected = Array(5, 0, 2),
(x: CuVector[Int], c: CuValue[Int], res: CuVector[Int]) => {
res =: x :>>@ c
}
)
}
)
}
@Test def testVal_:<<@() {
withCuContext(
implicit ctx => {
testVal(
xArr = Array(10, 0, 5),
cVal = 1,
expected = Array(20, 0, 10),
(x: CuVector[Int], c: CuValue[Int], res: CuVector[Int]) => {
res =: x :<<@ c
}
)
}
)
}
@Test def testMat_:&@() {
withCuContext(
implicit ctx => {
testVec(
xArr = Array(1, 0, 5),
yArr = Array(7, 1, 4),
expected = Array(1, 0, 4),
(x: CuVector[Int], y: CuVector[Int], res: CuVector[Int]) => {
res =: x :&@ y
}
)
}
)
}
@Test def testMat_:|@() {
withCuContext(
implicit ctx => {
testVec(
xArr = Array(1, 0, 5),
yArr = Array(7, 1, 4),
expected = Array(7, 1, 5),
(x: CuVector[Int], y: CuVector[Int], res: CuVector[Int]) => {
res =: x :|@ y
}
)
}
)
}
@Test def testMat_:^@() {
withCuContext(
implicit ctx => {
testVec(
xArr = Array(1, 0, 5),
yArr = Array(7, 1, 4),
expected = Array(6, 1, 1),
(x: CuVector[Int], y: CuVector[Int], res: CuVector[Int]) => {
res =: x :^|@ y
}
)
}
)
}
@Test def testMat_:>>@() {
withCuContext(
implicit ctx => {
testVec(
xArr = Array(10, 0, 5),
yArr = Array(2, 1, 1),
expected = Array(2, 0, 2),
(x: CuVector[Int], y: CuVector[Int], res: CuVector[Int]) => {
res =: x :>>@ y
}
)
}
)
}
@Test def testMat_:<<@() {
withCuContext(
implicit ctx => {
testVec(
xArr = Array(10, 0, 5),
yArr = Array(2, 1, 1),
expected = Array(40, 0, 10),
(x: CuVector[Int], y: CuVector[Int], res: CuVector[Int]) => {
res =: x :<<@ y
}
)
}
)
}
@Test def testVal_:&=() {
withCuContext(
implicit ctx => {
testValAssign(
resArr = Array(1, 0, 5),
cVal = 3,
expected = Array(1, 0, 1),
(c: CuValue[Int], res: CuVector[Int]) => {
res :&= c
}
)
}
)
}
@Test def testVal_:|=() {
withCuContext(
implicit ctx => {
testValAssign(
resArr = Array(1, 0, 5),
cVal = 3,
expected = Array(3, 3, 7),
(c: CuValue[Int], res: CuVector[Int]) => {
res :|= c
}
)
}
)
}
@Test def testVal_:^=() {
withCuContext(
implicit ctx => {
testValAssign(
resArr = Array(1, 0, 5),
cVal = 3,
expected = Array(2, 3, 6),
(c: CuValue[Int], res: CuVector[Int]) => {
res :^|= c
}
)
}
)
}
@Test def testVal_:>>=() {
withCuContext(
implicit ctx => {
testValAssign(
resArr = Array(10, 0, 5),
cVal = 1,
expected = Array(5, 0, 2),
(c: CuValue[Int], res: CuVector[Int]) => {
res :>>= c
}
)
}
)
}
@Test def testVal_:<<=() {
withCuContext(
implicit ctx => {
testValAssign(
resArr = Array(10, 0, 5),
cVal = 1,
expected = Array(20, 0, 10),
(c: CuValue[Int], res: CuVector[Int]) => {
res :<<= c
}
)
}
)
}
@Test def testMat_:&=() {
withCuContext(
implicit ctx => {
testVecAssign(
resArr = Array(1, 0, 5),
yArr = Array(7, 1, 4),
expected = Array(1, 0, 4),
(y: CuVector[Int], res: CuVector[Int]) => {
res :&= y
}
)
}
)
}
@Test def testMat_:|=() {
withCuContext(
implicit ctx => {
testVecAssign(
resArr = Array(1, 0, 5),
yArr = Array(7, 1, 4),
expected = Array(7, 1, 5),
(y: CuVector[Int], res: CuVector[Int]) => {
res :|= y
}
)
}
)
}
@Test def testMat_:^=() {
withCuContext(
implicit ctx => {
testVecAssign(
resArr = Array(1, 0, 5),
yArr = Array(7, 1, 4),
expected = Array(6, 1, 1),
(y: CuVector[Int], res: CuVector[Int]) => {
res :^|= y
}
)
}
)
}
@Test def testMat_:>>=() {
withCuContext(
implicit ctx => {
testVecAssign(
resArr = Array(10, 0, 5),
yArr = Array(2, 1, 1),
expected = Array(2, 0, 2),
(y: CuVector[Int], res: CuVector[Int]) => {
res :>>= y
}
)
}
)
}
@Test def testMat_:<<=() {
withCuContext(
implicit ctx => {
testVecAssign(
resArr = Array(10, 0, 5),
yArr = Array(2, 1, 1),
expected = Array(40, 0, 10),
(y: CuVector[Int], res: CuVector[Int]) => {
res :<<= y
}
)
}
)
}
@Test def testVal_unary_~@() {
withCuContext(
implicit ctx => {
testMapFunc(
xArr = Array(1, 0, 5),
expected = Array(-2, -1, -6),
(x: CuVector[Int], res: CuVector[Int]) => {
res =: x.unary_~@()
}
)
}
)
}
@Test def testVal_&@() {
withCuContext(
implicit ctx => {
testVal(
xArr = Array(1, 0, 5),
cVal = 3,
expected = Array(1, 0, 1),
(x: CuVector[Int], c: CuValue[Int], res: CuVector[Int]) => {
res =: (x &@ c)
}
)
}
)
}
@Test def testVal_|@() {
withCuContext(
implicit ctx => {
testVal(
xArr = Array(1, 0, 5),
cVal = 3,
expected = Array(3, 3, 7),
(x: CuVector[Int], c: CuValue[Int], res: CuVector[Int]) => {
res =: (x |@ c)
}
)
}
)
}
@Test def testVal_^@() {
withCuContext(
implicit ctx => {
testVal(
xArr = Array(1, 0, 5),
cVal = 3,
expected = Array(2, 3, 6),
(x: CuVector[Int], c: CuValue[Int], res: CuVector[Int]) => {
res =: (x ^|@ c)
}
)
}
)
}
@Test def testVal_>>@() {
withCuContext(
implicit ctx => {
testVal(
xArr = Array(10, 0, 5),
cVal = 1,
expected = Array(5, 0, 2),
(x: CuVector[Int], c: CuValue[Int], res: CuVector[Int]) => {
res =: (x >>@ c)
}
)
}
)
}
@Test def testVal_<<@() {
withCuContext(
implicit ctx => {
testVal(
xArr = Array(10, 0, 5),
cVal = 1,
expected = Array(20, 0, 10),
(x: CuVector[Int], c: CuValue[Int], res: CuVector[Int]) => {
res =: (x <<@ c)
}
)
}
)
}
@Test def testMat_&@() {
withCuContext(
implicit ctx => {
testVec(
xArr = Array(1, 0, 5),
yArr = Array(7, 1, 4),
expected = Array(1, 0, 4),
(x: CuVector[Int], y: CuVector[Int], res: CuVector[Int]) => {
res =: (x &@ y)
}
)
}
)
}
@Test def testMat_|@() {
withCuContext(
implicit ctx => {
testVec(
xArr = Array(1, 0, 5),
yArr = Array(7, 1, 4),
expected = Array(7, 1, 5),
(x: CuVector[Int], y: CuVector[Int], res: CuVector[Int]) => {
res =: (x |@ y)
}
)
}
)
}
@Test def testMat_^@() {
withCuContext(
implicit ctx => {
testVec(
xArr = Array(1, 0, 5),
yArr = Array(7, 1, 4),
expected = Array(6, 1, 1),
(x: CuVector[Int], y: CuVector[Int], res: CuVector[Int]) => {
res =: (x ^|@ y)
}
)
}
)
}
@Test def testMat_>>@() {
withCuContext(
implicit ctx => {
testVec(
xArr = Array(10, 0, 5),
yArr = Array(2, 1, 1),
expected = Array(2, 0, 2),
(x: CuVector[Int], y: CuVector[Int], res: CuVector[Int]) => {
res =: (x >>@ y)
}
)
}
)
}
@Test def testMat_<<@() {
withCuContext(
implicit ctx => {
testVec(
xArr = Array(10, 0, 5),
yArr = Array(2, 1, 1),
expected = Array(40, 0, 10),
(x: CuVector[Int], y: CuVector[Int], res: CuVector[Int]) => {
res =: (x <<@ y)
}
)
}
)
}
@Test def testVal_&=() {
withCuContext(
implicit ctx => {
testValAssign(
resArr = Array(1, 0, 5),
cVal = 3,
expected = Array(1, 0, 1),
(c: CuValue[Int], res: CuVector[Int]) => {
res &= c
}
)
}
)
}
@Test def testVal_|=() {
withCuContext(
implicit ctx => {
testValAssign(
resArr = Array(1, 0, 5),
cVal = 3,
expected = Array(3, 3, 7),
(c: CuValue[Int], res: CuVector[Int]) => {
res |= c
}
)
}
)
}
@Test def testVal_^=() {
withCuContext(
implicit ctx => {
testValAssign(
resArr = Array(1, 0, 5),
cVal = 3,
expected = Array(2, 3, 6),
(c: CuValue[Int], res: CuVector[Int]) => {
res ^|= c
}
)
}
)
}
@Test def testVal_>>=() {
withCuContext(
implicit ctx => {
testValAssign(
resArr = Array(10, 0, 5),
cVal = 1,
expected = Array(5, 0, 2),
(c: CuValue[Int], res: CuVector[Int]) => {
res >>= c
}
)
}
)
}
@Test def testVal_<<=() {
withCuContext(
implicit ctx => {
testValAssign(
resArr = Array(10, 0, 5),
cVal = 1,
expected = Array(20, 0, 10),
(c: CuValue[Int], res: CuVector[Int]) => {
res <<= c
}
)
}
)
}
@Test def testMat_&=() {
withCuContext(
implicit ctx => {
testVecAssign(
resArr = Array(1, 0, 5),
yArr = Array(7, 1, 4),
expected = Array(1, 0, 4),
(y: CuVector[Int], res: CuVector[Int]) => {
res &= y
}
)
}
)
}
@Test def testMat_|=() {
withCuContext(
implicit ctx => {
testVecAssign(
resArr = Array(1, 0, 5),
yArr = Array(7, 1, 4),
expected = Array(7, 1, 5),
(y: CuVector[Int], res: CuVector[Int]) => {
res |= y
}
)
}
)
}
@Test def testMat_^=() {
withCuContext(
implicit ctx => {
testVecAssign(
resArr = Array(1, 0, 5),
yArr = Array(7, 1, 4),
expected = Array(6, 1, 1),
(y: CuVector[Int], res: CuVector[Int]) => {
res ^|= y
}
)
}
)
}
@Test def testMat_>>=() {
withCuContext(
implicit ctx => {
testVecAssign(
resArr = Array(10, 0, 5),
yArr = Array(2, 1, 1),
expected = Array(2, 0, 2),
(y: CuVector[Int], res: CuVector[Int]) => {
res >>= y
}
)
}
)
}
@Test def testMat_<<=() {
withCuContext(
implicit ctx => {
testVecAssign(
resArr = Array(10, 0, 5),
yArr = Array(2, 1, 1),
expected = Array(40, 0, 10),
(y: CuVector[Int], res: CuVector[Int]) => {
res <<= y
}
)
}
)
}
}
|
shredzzz/kirkwood
|
src/test/scala/shredzzz/kirkwood/junit/tests/vector/ints/CuVectorBitwiseOpsTest.scala
|
Scala
|
apache-2.0
| 13,560
|
package com.github.vladminzatu.surfer
import com.github.vladminzatu.surfer.persist.{UpdatePersistenceStrategy, RestPersister}
import org.apache.spark._
import org.apache.spark.streaming._
object App {
val checkpointDir = "spark-checkpoint"
val mappingFunc = (key: String, timestamp: Option[Long], state: State[Score]) => {
if(timestamp.isDefined){
val score = state.getOption.getOrElse(Score(timestamp.get)) + timestamp.get
val output = (key, score)
state.update(score)
output
} else{
(key, state.get)
}
}
def main(args: Array[String]) {
val conf = new SparkConf().setMaster("local[4]").setAppName("Surfer")
val ssc = new StreamingContext(conf, Seconds(1))
val persistenceMode = new UpdatePersistenceStrategy(new RestPersister)
ssc.checkpoint(checkpointDir)
val events = ssc.socketTextStream("localhost", 9999)
val scores = events.map(x => (x, System.currentTimeMillis))
val initialRdd = ssc.sparkContext.parallelize(Array[(String, Score)]())
val stateDstream = scores.mapWithState(
StateSpec.function(mappingFunc).initialState(initialRdd).timeout(Minutes(60 * 24 * 10)))
stateDstream.checkpoint(Seconds(30))
persistenceMode.apply(stateDstream)
ssc.start()
ssc.awaitTermination()
}
}
|
VladMinzatu/surfer
|
src/main/scala/com/github/vladminzatu/surfer/App.scala
|
Scala
|
mit
| 1,304
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.dataflow.server.coordinator
import wvlet.dataflow.api.internal.Cluster.{Node, NodeInfo}
import wvlet.log.LogSupport
import java.net.InetAddress
import java.time.Instant
import java.util.concurrent.ConcurrentHashMap
/**
*/
class NodeManager(coordinatorConfig: CoordinatorConfig) extends LogSupport {
import scala.jdk.CollectionConverters._
private val self: Node = {
val localHost = InetAddress.getLocalHost
val localAddr = s"${localHost.getHostAddress}:${coordinatorConfig.serverAddress.port}"
Node(name = coordinatorConfig.name, address = localAddr, isCoordinator = true, startedAt = Instant.now())
}
private val heartBeatRecord = new ConcurrentHashMap[Node, Instant]().asScala
def heartBeat(node: Node): Unit = {
heartBeatRecord.getOrElseUpdate(
node, {
info(s"A new worker node is joined: ${node}")
Instant.now()
}
)
heartBeatRecord.put(node, Instant.now())
}
def listNodes: Seq[NodeInfo] = {
val b = Seq.newBuilder[NodeInfo]
b += NodeInfo(self, Instant.now())
heartBeatRecord.foreach { case (n, hb) => b += NodeInfo(n, hb) }
b.result()
}
def listWorkerNodes: Seq[NodeInfo] = listNodes.filterNot(_.isCoordinator)
}
|
wvlet/wvlet
|
wvlet-server/src/main/scala/wvlet/dataflow/server/coordinator/NodeManager.scala
|
Scala
|
apache-2.0
| 1,789
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.atlas.typesystem.json
import java.text.SimpleDateFormat
import com.google.common.collect.ImmutableList
import org.apache.atlas.AtlasException
import org.apache.atlas.typesystem.TypesDef
import org.apache.atlas.typesystem.types.DataTypes.{ArrayType, MapType, TypeCategory}
import org.apache.atlas.typesystem.types._
import org.json4s.JsonAST.JString
import org.json4s._
import org.json4s.native.Serialization._
import com.google.common.collect.ImmutableSet
/**
* Module for serializing to/from Json.
*
* @example {{{
* val j = TypesSerialization.toJson(typeSystem, "Employee", "Person", "Department", "SecurityClearance")
*
* val typesDef = TypesSerialization.fromJson(jsonStr)
* typesDef.enumTypes.foreach( typeSystem.defineEnumType(_))
typeSystem.defineTypes(ImmutableList.copyOf(typesDef.structTypes.toArray),
ImmutableList.copyOf(typesDef.traitTypes.toArray),
ImmutableList.copyOf(typesDef.classTypes.toArray)
)
* }}}
*
* @todo doesn't traverse includes directives. Includes are parsed into
* [[org.apache.atlas.tools.thrift.IncludeDef IncludeDef]] structures
* but are not traversed.
* @todo mixing in [[scala.util.parsing.combinator.PackratParsers PackratParsers]] is a placeholder. Need to
* change specific grammar rules to `lazy val` and `Parser[Elem]` to `PackratParser[Elem]`. Will do based on
* performance analysis.
* @todo Error reporting
*/
object TypesSerialization {
def toJsonValue(typ: IDataType[_])(implicit formats: Formats): JValue = {
typ.getTypeCategory match {
case TypeCategory.CLASS => {
Extraction.decompose(convertClassTypeToHierarchicalTypeDefinition(typ.asInstanceOf[ClassType]))
}
case TypeCategory.STRUCT => {
Extraction.decompose(convertStructTypeToStructDef(typ.asInstanceOf[StructType]))
}
case TypeCategory.TRAIT => {
Extraction.decompose(convertTraitTypeToHierarchicalTypeDefinition(typ.asInstanceOf[TraitType]))
}
case TypeCategory.ENUM => {
Extraction.decompose(convertEnumTypeToEnumTypeDef(typ.asInstanceOf[EnumType]))
}
case _ => JString(s"${typ.getName}")
}
}
def toJson(ts: TypeSystem, typName: String): String = {
toJson(ts, List(typName): _*)
}
def toJson(ts: TypeSystem, typNames: String*): String = {
toJson(ts, (typ: IDataType[_]) => typNames.contains(typ.getName))
}
import scala.collection.JavaConversions._
def toJson(ts: TypeSystem, typNames: java.util.List[String]): String = {
toJson(ts, typNames.toIndexedSeq: _*)
}
val _formats = new DefaultFormats {
override val dateFormatter = TypeSystem.getInstance().getDateFormat.asInstanceOf[SimpleDateFormat]
override val typeHints = NoTypeHints
}
def toJson(ts: TypeSystem, export: IDataType[_] => Boolean): String = {
implicit val formats = _formats + new MultiplicitySerializer
val typsDef = convertToTypesDef(ts, export)
writePretty(typsDef)
}
def fromJson(jsonStr: String): TypesDef = {
implicit val formats = _formats + new MultiplicitySerializer
read[TypesDef](jsonStr)
}
def toJson(typesDef : TypesDef) : String = {
implicit val formats = _formats + new MultiplicitySerializer
writePretty(typesDef)
}
def toJson(enumTypeDefinition: EnumTypeDefinition) : String = {
toJson(new TypesDef(enumTypeDefinition))
}
def toJson(structTypeDefinition: StructTypeDefinition) : String = {
toJson(new TypesDef(structTypeDefinition))
}
def toJson(typDef: HierarchicalTypeDefinition[_], isTrait : Boolean) : String = {
toJson(new TypesDef(typDef, isTrait))
}
private def convertAttributeInfoToAttributeDef(aInfo: AttributeInfo) = {
new AttributeDefinition(aInfo.name, aInfo.dataType().getName, aInfo.multiplicity,
aInfo.isComposite, aInfo.isUnique, aInfo.isIndexable, aInfo.reverseAttributeName)
}
private def convertEnumTypeToEnumTypeDef(et: EnumType) = {
val eVals: Seq[EnumValue] = et.valueMap.values().toSeq
new EnumTypeDefinition(et.name, et.description, eVals: _*)
}
private def convertStructTypeToStructDef(st: StructType): StructTypeDefinition = {
val aDefs: Iterable[AttributeDefinition] =
st.fieldMapping.fields.values().map(convertAttributeInfoToAttributeDef(_))
new StructTypeDefinition(st.name, st.description, aDefs.toArray)
}
private def convertTraitTypeToHierarchicalTypeDefinition(tt: TraitType): HierarchicalTypeDefinition[TraitType] = {
val aDefs: Iterable[AttributeDefinition] =
tt.immediateAttrs.map(convertAttributeInfoToAttributeDef(_))
new HierarchicalTypeDefinition[TraitType](classOf[TraitType], tt.name, tt.description, tt.superTypes, aDefs.toArray)
}
private def convertClassTypeToHierarchicalTypeDefinition(tt: ClassType): HierarchicalTypeDefinition[ClassType] = {
val aDefs: Iterable[AttributeDefinition] =
tt.immediateAttrs.map(convertAttributeInfoToAttributeDef(_))
new HierarchicalTypeDefinition[ClassType](classOf[ClassType], tt.name, tt.description, tt.superTypes, aDefs.toArray)
}
def convertToTypesDef(ts: TypeSystem, export: IDataType[_] => Boolean): TypesDef = {
var enumTypes: Seq[EnumTypeDefinition] = Nil
var structTypes: Seq[StructTypeDefinition] = Nil
var traitTypes: Seq[HierarchicalTypeDefinition[TraitType]] = Nil
var classTypes: Seq[HierarchicalTypeDefinition[ClassType]] = Nil
def toTyp(nm: String) = ts.getDataType(classOf[IDataType[_]], nm)
val typs: Iterable[IDataType[_]] = ts.getTypeNames.map(toTyp(_)).filter { (typ: IDataType[_]) =>
!(ts.getCoreTypes.contains(typ.getName)) && export(typ)
}
typs.foreach {
case typ: ArrayType => ()
case typ: MapType => ()
case typ: EnumType => enumTypes = enumTypes :+ convertEnumTypeToEnumTypeDef(typ)
case typ: StructType => structTypes = structTypes :+ convertStructTypeToStructDef(typ)
case typ: TraitType => traitTypes = traitTypes :+ convertTraitTypeToHierarchicalTypeDefinition(typ)
case typ: ClassType => classTypes = classTypes :+ convertClassTypeToHierarchicalTypeDefinition(typ)
}
TypesDef(enumTypes, structTypes, traitTypes, classTypes)
}
}
class MultiplicitySerializer extends CustomSerializer[Multiplicity](format => ( {
case JString(m) => m match {
case "optional" => Multiplicity.OPTIONAL
case "required" => Multiplicity.REQUIRED
case "collection" => Multiplicity.COLLECTION
case "set" => Multiplicity.SET
}
}, {
case m: Multiplicity => JString(m match {
case Multiplicity.OPTIONAL => "optional"
case Multiplicity.REQUIRED => "required"
case Multiplicity.COLLECTION => "collection"
case Multiplicity.SET => "set"
}
)
}
))
trait TypeHelpers {
def requiredAttr(name: String, dataType: IDataType[_]) =
new AttributeDefinition(name, dataType.getName, Multiplicity.REQUIRED, false, null)
def optionalAttr(name: String, dataTypeName: String) =
new AttributeDefinition(name, dataTypeName, Multiplicity.OPTIONAL, false, null)
def optionalAttr(name: String, dataType: IDataType[_]) =
new AttributeDefinition(name, dataType.getName, Multiplicity.OPTIONAL, false, null)
def structDef(name: String, attrs: AttributeDefinition*):
StructTypeDefinition = {
structDef(name, None, attrs:_*)
}
def structDef(name: String, description: Option[String], attrs: AttributeDefinition*) = {
new StructTypeDefinition(name, description.getOrElse(null), attrs.toArray)
}
def defineTraits(ts: TypeSystem, tDefs: HierarchicalTypeDefinition[TraitType]*) = {
ts.defineTraitTypes(tDefs: _*)
}
def createTraitTypeDef(name: String, superTypes: Seq[String], attrDefs: AttributeDefinition*):
HierarchicalTypeDefinition[TraitType] = {
createTraitTypeDef(name, None, superTypes, attrDefs:_*)
}
def createTraitTypeDef(name: String, description: Option[String], superTypes: Seq[String], attrDefs: AttributeDefinition*):
HierarchicalTypeDefinition[TraitType] = {
val sts = ImmutableSet.copyOf(superTypes.toArray)
return new HierarchicalTypeDefinition[TraitType](classOf[TraitType], name, description.getOrElse(null),
sts, attrDefs.toArray)
}
def createClassTypeDef(name: String, superTypes: Seq[String], attrDefs: AttributeDefinition*):
HierarchicalTypeDefinition[ClassType] = {
createClassTypeDef( name, None, superTypes, attrDefs:_*)
}
def createClassTypeDef(name: String, description: Option[String], superTypes: Seq[String], attrDefs: AttributeDefinition*):
HierarchicalTypeDefinition[ClassType] = {
val sts = ImmutableSet.copyOf(superTypes.toArray)
return new HierarchicalTypeDefinition[ClassType](classOf[ClassType], name, description.getOrElse(null),
sts, attrDefs.toArray)
}
@throws(classOf[AtlasException])
def defineClassType(ts: TypeSystem, classDef: HierarchicalTypeDefinition[ClassType]): ClassType = {
ts.defineTypes(ImmutableList.of[EnumTypeDefinition], ImmutableList.of[StructTypeDefinition],
ImmutableList.of[HierarchicalTypeDefinition[TraitType]],
ImmutableList.of[HierarchicalTypeDefinition[ClassType]](classDef))
return ts.getDataType(classOf[ClassType], classDef.typeName)
}
}
|
jnhagelberg/incubator-atlas
|
typesystem/src/main/scala/org/apache/atlas/typesystem/json/TypesSerialization.scala
|
Scala
|
apache-2.0
| 10,580
|
package com.git.huanghaifeng.spark.load
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import com.datastax.spark.connector._
object LoadCassandra {
def main(args: Array[String]) {
val sparkMaster = args(0)
val cassandraHost = args(1)
val conf = new SparkConf(true)
.set("spark.cassandra.connection.host", cassandraHost)
val sc = new SparkContext(sparkMaster, "BasicQueryCassandra", conf)
// entire table as an RDD
// assumes your table test was created as CREATE TABLE test.kv(key text PRIMARY KEY, value int);
val data = sc.cassandraTable("test", "kv")
// print some basic stats
println("stats " + data.map(row => row.getInt("value")).stats())
val rdd = sc.parallelize(List(("moremagic", 1)))
rdd.saveToCassandra("test", "kv", SomeColumns("key", "value"))
// save from a case class
val otherRdd = sc.parallelize(List(KeyValue("magic", 0)))
otherRdd.saveToCassandra("test", "kv")
}
}
case class KeyValue(key: String, value: Integer)
|
prucehuang/quickly-start-spark
|
src/main/scala/com/git/huanghaifeng/spark/load/LoadCassandra.scala
|
Scala
|
apache-2.0
| 1,092
|
package wdl
import org.scalatest.{Matchers, FlatSpec}
class WdlWorkflowOutputDeclarationSpec extends FlatSpec with Matchers {
"WorkflowOutputDeclaration" should "match outputs" in {
val declaration = WorkflowOutputWildcard("wf.mytask", wildcard = true, null)
declaration.outputMatchesDeclaration("wf.mytask.a", wildcardsAllowed = true) shouldBe true
declaration.outputMatchesDeclaration("wf.mytask.a", wildcardsAllowed = false) shouldBe false
declaration.outputMatchesDeclaration("wf.mytaskwithadifferentname.a", wildcardsAllowed = true) shouldBe false
declaration.outputMatchesDeclaration("wf.mytaskwithadifferentname.a", wildcardsAllowed = false) shouldBe false
}
}
|
ohsu-comp-bio/cromwell
|
wdl/src/test/scala/wdl/WdlWorkflowOutputDeclarationSpec.scala
|
Scala
|
bsd-3-clause
| 697
|
package fp_in_scala.chapter_02
import org.scalatest.{Matchers, FlatSpec}
class HigherOrderFunctionsSpec extends FlatSpec with Matchers {
// Types for making the intent of the function types clearer
type Curried = Int => Long => Double
type Uncurried = (Int, Long) => Double
it should "give a curried version of the function back" in {
val fn: Uncurried = (i: Int, l: Long) => i + l + 0.5
val curried: Curried = HigherOrderFunctions.curry(fn)
curried(1)(2L) should be (3.5)
}
it should "give an uncurried version of the function back" in {
val fn: Curried = (i: Int) => (l: Long) => i + l + 0.5
val uncurried: Uncurried = HigherOrderFunctions.uncurry(fn)
uncurried(1, 2L) should be (3.5)
}
it should "compose 2 functions" in {
val fn1 = (i: Int) => i * 2
val fn2 = (i: Int) => i + 3
val composed = HigherOrderFunctions.compose(fn2, fn1)
// ((1 * 2) + 3) = 5, NOT ((1 + 3) * 5) = 20
composed(1) should be (5)
}
}
|
jankeesvanandel/fp-in-scala
|
src/test/scala/fp_in_scala/chapter_02/HigherOrderFunctionsSpec.scala
|
Scala
|
apache-2.0
| 986
|
package pl.touk.nussknacker.engine.flink.test
import java.util.concurrent.CompletableFuture
import org.apache.flink.api.common.{JobID, JobStatus}
import org.apache.flink.client.program.ClusterClient
import org.apache.flink.configuration._
import org.apache.flink.runtime.client.JobStatusMessage
import org.apache.flink.runtime.executiongraph.AccessExecutionGraph
import org.apache.flink.runtime.jobgraph.JobGraph
import org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration
import org.apache.flink.test.util.MiniClusterWithClientResource
import org.scalatest.concurrent.Eventually.{scaled, _}
import org.scalatest.time.{Millis, Seconds, Span}
import pl.touk.nussknacker.engine.flink.test.FlinkMiniClusterHolder._
import scala.collection.JavaConverters._
/**
* This interface provides compatibility for another Flink's version.
* Instance of mini cluster holder should be created only once for many jobs.
*/
trait FlinkMiniClusterHolder {
protected def userFlinkClusterConfig: Configuration
protected def envConfig: AdditionalEnvironmentConfig
def start(): Unit
def stop(): Unit
def cancelJob(jobID: JobID): Unit
def submitJob(jobGraph: JobGraph): JobID
def runningJobs(): Iterable[JobID]
def listJobs(): Iterable[JobStatusMessage]
def createExecutionEnvironment(): MiniClusterExecutionEnvironment = {
new MiniClusterExecutionEnvironment(this, userFlinkClusterConfig, envConfig)
}
// We access miniCluster because ClusterClient doesn't expose getExecutionGraph and getJobStatus doesn't satisfy us
// It returns RUNNING even when some vertices are not started yet
def getExecutionGraph(jobId: JobID): CompletableFuture[_ <: AccessExecutionGraph]
}
class FlinkMiniClusterHolderImpl(flinkMiniCluster: MiniClusterWithClientResource,
protected val userFlinkClusterConfig: Configuration,
protected val envConfig: AdditionalEnvironmentConfig) extends FlinkMiniClusterHolder {
override def start(): Unit = {
flinkMiniCluster.before()
}
override def stop(): Unit = {
flinkMiniCluster.after()
}
override def cancelJob(jobID: JobID): Unit =
flinkMiniCluster.getClusterClient.cancel(jobID)
override def submitJob(jobGraph: JobGraph): JobID =
flinkMiniCluster.getClusterClient.submitJob(jobGraph).get()
override def listJobs(): List[JobStatusMessage] =
flinkMiniCluster.getClusterClient.listJobs().get().asScala.toList
override def runningJobs(): List[JobID] =
listJobs().filter(_.getJobState == JobStatus.RUNNING).map(_.getJobId)
def getClusterClient: ClusterClient[_] = flinkMiniCluster.getClusterClient
override def getExecutionGraph(jobId: JobID): CompletableFuture[_ <: AccessExecutionGraph] =
flinkMiniCluster.getMiniCluster.getExecutionGraph(jobId)
}
object FlinkMiniClusterHolder {
def apply(userFlinkClusterConfig: Configuration, envConfig: AdditionalEnvironmentConfig = AdditionalEnvironmentConfig()): FlinkMiniClusterHolder = {
userFlinkClusterConfig.setBoolean(CoreOptions.FILESYTEM_DEFAULT_OVERRIDE, true)
val resource = prepareMiniClusterResource(userFlinkClusterConfig)
new FlinkMiniClusterHolderImpl(resource, userFlinkClusterConfig, envConfig)
}
def prepareMiniClusterResource(userFlinkClusterConfig: Configuration): MiniClusterWithClientResource = {
val taskManagerNumber = ConfigOptions.key(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER).intType().defaultValue(ConfigConstants.DEFAULT_LOCAL_NUMBER_JOB_MANAGER)
val clusterConfig: MiniClusterResourceConfiguration = new MiniClusterResourceConfiguration.Builder()
.setNumberTaskManagers(userFlinkClusterConfig.get(taskManagerNumber))
.setNumberSlotsPerTaskManager(userFlinkClusterConfig.getInteger(TaskManagerOptions.NUM_TASK_SLOTS, TaskManagerOptions.NUM_TASK_SLOTS.defaultValue()))
.setConfiguration(userFlinkClusterConfig)
.build
new MiniClusterWithClientResource(clusterConfig)
}
case class AdditionalEnvironmentConfig(detachedClient: Boolean = true,
defaultWaitForStatePatience: PatienceConfig = PatienceConfig(timeout = scaled(Span(20, Seconds)), interval = scaled(Span(100, Millis))))
}
|
TouK/nussknacker
|
engine/flink/test-utils/src/main/scala/pl/touk/nussknacker/engine/flink/test/FlinkMiniClusterHolder.scala
|
Scala
|
apache-2.0
| 4,236
|
package io.github.finagle.smtp
import com.twitter.concurrent.AsyncQueue
import com.twitter.finagle.transport.QueueTransport
import com.twitter.util.Await
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ClientDispatcherTest extends FunSuite {
def newTestSet = {
val client = new AsyncQueue[Request]
val server = new AsyncQueue[UnspecifiedReply]
val transport = new QueueTransport[Request, UnspecifiedReply](client, server)
(client, server, transport)
}
def newTestSetWithGreeting = {
val (client, server, transport) = newTestSet
server.offer(ServiceReady("testdomain","testgreet"))
val dispatcher = new ClientDispatcher(transport)
(server, dispatcher)
}
test("receives correct greeting") {
val (server, dispatcher) = newTestSetWithGreeting
assert(dispatcher.isAvailable)
}
test("sends QUIT on close") {
val (client, server, transport) = newTestSet
server.offer(ServiceReady("testdomain","testgreet"))
val dispatcher = new ClientDispatcher(transport)
dispatcher.close()
assert(Await.result(client.poll()) match {
case Request.Quit => true
case _ => false
})
assert(dispatcher.isAvailable, "should wait for QUIT response")
server.offer(ClosingTransmission("QUIT"))
assert(!dispatcher.isAvailable)
}
test("closes on malformed greeting") {
val (client, server, transport) = newTestSet
server.offer(InvalidReply("wronggreet"))
val dispatcher = new ClientDispatcher(transport)
dispatcher(Request.Hello)
server.offer(ClosingTransmission("QUIT"))
assert(!dispatcher.isAvailable)
}
test("aggregates multiline replies") {
val (server, dispatcher) = newTestSetWithGreeting
val frep = dispatcher(Request.Noop)
server.offer(NonTerminalLine(250, "nonterminal"))
assert(!frep.isDefined)
server.offer(OK("terminal"))
val rep = Await result frep
assert(rep.isMultiline)
assert(rep.code === 250)
assert(rep.lines === Seq("nonterminal", "terminal"))
assert(rep.info === "nonterminal\\r\\nterminal")
}
test("returns specified replies") {
val (server, dispatcher) = newTestSetWithGreeting
val specifiedOK = OK("specified")
val rep = dispatcher(Request.Noop)
server.offer(specifiedOK)
assert(Await.result(rep) === specifiedOK)
}
test("specifies unspecified replies") {
val (server, dispatcher) = newTestSetWithGreeting
val unspecifiedOK = new UnspecifiedReply {
val info: String = "unspecified"
val code: Int = 250
}
val rep = dispatcher(Request.Noop)
server.offer(unspecifiedOK)
assert(Await.result(rep).isInstanceOf[OK])
}
test("errors are exceptions") {
val (server, dispatcher) = newTestSetWithGreeting
val rep = dispatcher(Request.Noop)
server.offer(SyntaxError("error"))
assert(Await.result(rep.liftToTry).isThrow)
}
test("wraps unknown replies") {
val (server, dispatcher) = newTestSetWithGreeting
val unknownRep = new UnspecifiedReply {
val info: String = "unknown"
val code: Int = 666
}
val rep = dispatcher(Request.Noop)
server.offer(unknownRep)
rep onSuccess { _ =>
fail("should fail")
} onFailure {
case _: UnknownReplyCodeError =>
case _ => fail("should be UnknownReplyCodeError")
}
}
}
|
finagle/finagle-smtp
|
src/test/scala/io/github/finagle/smtp/ClientDispatcherTest.scala
|
Scala
|
apache-2.0
| 3,406
|
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package code.api
import code.domain.{GetNumbers, NextNumbers, NumbersActor}
import net.liftweb.common.Empty
import net.liftweb.http.rest.RestHelper
class Routes extends RestHelper {
serveJxa {
case JsonGet("api" :: "numbers" :: Nil, _) =>
NumbersActor !? GetNumbers
case Get("api" :: "numbers" :: "next" :: Nil, _) =>
NumbersActor ! NextNumbers
Empty
}
}
|
arkadius/lift-ng-workshop
|
src/main/scala/code/api/Routes.scala
|
Scala
|
apache-2.0
| 995
|
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Mon Sep 28 11:18:16 EDT 2015
* @see LICENSE (MIT style license file).
*/
package scalation.linalgebra.mem_mapped.bld
import java.io.{File, PrintWriter}
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `BldMatrix` object is used to build matrix classes for various base types.
* > run-main scalation.linalgebra.mem_mapped.bld.BldMatrix
*/
object BldMatrix extends App with BldParams
{
println ("BldMatrix: generate code for Matrix classes")
for (i <- 0 until kind.length-1) { // do not generate `MatrixS`
val VECTOR = kind(i)._1
val BASE = kind(i)._2
val MM_ARRAY = kind(i)._3
val VECTOR2 = kind(i)._4
val BASE2 = kind(i)._5
val FORMAT = kind(i)._6
val MATRI = kind(i)._7
val ZERO = kind(i)._9
val ONE = kind(i)._10
val BASE_LC = BASE.toLowerCase
val MATRIX = { val m = MATRI.splitAt (MATRI.size-1); m._1 + "x" + m._2 }
val IMPORT = if (CUSTOM contains BASE) s"scalation.math.$BASE.{abs => ABS, _}"
else "math.{abs => ABS}"
val IMPORT2 = if (BASE == "StrNum") "scalation.math.{StrO, oneIf}"
else if (CUSTOM contains BASE) s"scalation.math.{$BASE, oneIf}"
else s"scalation.math.{${BASE_LC}_exp, oneIf}"
// Beginning of string holding code template -----------------------------------
val code = raw"""
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @builder scalation.linalgebra.mem_mapped.bld.BldMatrix
* @version 1.2
* @date Mon Sep 28 11:18:16 EDT 2015
* @see LICENSE (MIT style license file).
*/
package scalation.linalgebra.mem_mapped
import java.io.PrintWriter
import io.Source.fromFile
import $IMPORT
import $IMPORT2
import scalation.util.{Error, $MM_ARRAY, PackageInfo}
import $MATRIX.eye
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `$MATRIX` class stores and operates on Numeric Matrices of type `$BASE`.
* This class follows the `gen.MatrixN` framework and is provided for efficiency.
* @param d1 the first/row dimension
* @param d2 the second/column dimension
* @param v the 2D array used to store matrix elements
*/
class $MATRIX (val d1: Int,
val d2: Int,
private var v: Array [$MM_ARRAY] = null)
extends $MATRI with Error with Serializable
{
/** Dimension 1
*/
lazy val dim1 = d1
/** Dimension 2
*/
lazy val dim2 = d2
if (v == null) {
v = Array.ofDim [$MM_ARRAY] (dim1)
for (i <- 0 until dim1) v(i) = $MM_ARRAY.ofDim (dim2)
} else if (dim1 != v.length || dim2 != v(0).length) {
flaw ("constructor", "dimensions are wrong")
} // if
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a 'dim1' by 'dim1' square matrix.
* @param dim1 the row and column dimension
*/
def this (dim1: Int) { this (dim1, dim1) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a 'dim1' by 'dim2' matrix and assign each element the value 'x'.
* @param dim1 the row dimension
* @param dim2 the column dimesion
* @param x the scalar value to assign
*/
def this (dim1: Int, dim2: Int, x: $BASE)
{
this (dim1, dim2)
for (i <- range1; j <- range2) v(i)(j) = x
} // constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a matrix and assign values from array of arrays 'u'.
* @param u the 2D array of values to assign
*/
def this (u: Array [$MM_ARRAY]) { this (u.length, u(0).length, u) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a matrix from repeated values.
* @param dim the (row, column) dimensions
* @param u the repeated values
*/
def this (dim: Tuple2 [Int, Int], u: $BASE*)
{
this (dim._1, dim._2)
for (i <- range1; j <- range2) v(i)(j) = u(i * dim2 + j)
} // constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a matrix and assign values from matrix 'b'.
* @param b the matrix of values to assign
*/
def this (b: $MATRIX)
{
this (b.d1, b.d2)
for (i <- range1; j <- range2) v(i)(j) = b.v(i)(j)
} // constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get 'this' matrix's element at the 'i,j'-th index position.
* @param i the row index
* @param j the column index
*/
def apply (i: Int, j: Int): $BASE = v(i)(j)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get 'this' matrix's vector at the 'i'-th index position ('i'-th row).
* @param i the row index
*/
def apply (i: Int): $VECTOR = new $VECTOR (v(i))
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get a slice 'this' matrix row-wise on range 'ir' and column-wise on range 'jr'.
* Ex: b = a(2..4, 3..5)
* @param ir the row range
* @param jr the column range
*/
def apply (ir: Range, jr: Range): $MATRIX = slice (ir.start, ir.end, jr.start, jr.end)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' matrix's element at the 'i,j'-th index position to the scalar 'x'.
* @param i the row index
* @param j the column index
* @param x the scalar value to assign
*/
def update (i: Int, j: Int, x: $BASE) { v(i)(j) = x }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' matrix's row at the 'i'-th index position to the vector 'u'.
* @param i the row index
* @param u the vector value to assign
*/
def update (i: Int, u: $VECTOR) { v(i) = u() }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set a slice 'this' matrix row-wise on range ir and column-wise on range 'jr'.
* Ex: a(2..4, 3..5) = b
* @param ir the row range
* @param jr the column range
* @param b the matrix to assign
*/
def update (ir: Range, jr: Range, b: $MATRI)
{
if (b.isInstanceOf [$MATRIX]) {
val bb = b.asInstanceOf [$MATRIX]
for (i <- ir; j <- jr) v(i)(j) = bb.v(i - ir.start)(j - jr.start)
} else {
flaw ("update", "must convert b to a $MATRIX first")
} // if
} // update
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set all the elements in 'this' matrix to the scalar 'x'.
* @param x the scalar value to assign
*/
def set (x: $BASE) { for (i <- range1; j <- range2) v(i)(j) = x }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set all the values in 'this' matrix as copies of the values in 2D array 'u'.
* @param u the 2D array of values to assign
*/
def set (u: Array [Array [$BASE]]) { for (i <- range1; j <- range2) v(i)(j) = u(i)(j) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' matrix's 'i'-th row starting at column 'j' to the vector 'u'.
* @param i the row index
* @param u the vector value to assign
* @param j the starting column index
*/
def set (i: Int, u: $VECTOR, j: Int = 0) { for (k <- 0 until u.dim) v(i)(k+j) = u(k) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Slice 'this' matrix row-wise 'from' to 'end'.
* @param from the start row of the slice (inclusive)
* @param end the end row of the slice (exclusive)
*/
def slice (from: Int, end: Int): $MATRIX =
{
new $MATRIX (end - from, dim2, v.slice (from, end))
} // slice
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Slice 'this' matrix column-wise 'from' to 'end'.
* @param from the start column of the slice (inclusive)
* @param end the end column of the slice (exclusive)
*/
def sliceCol (from: Int, end: Int): $MATRIX =
{
val c = new $MATRIX (dim1, end - from)
for (i <- c.range1; j <- c.range2) c.v(i)(j) = v(i)(j + from)
c
} // sliceCol
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Slice 'this' matrix row-wise 'r_from' to 'r_end' and column-wise 'c_from' to 'c_end'.
* @param r_from the start of the row slice
* @param r_end the end of the row slice
* @param c_from the start of the column slice
* @param c_end the end of the column slice
*/
def slice (r_from: Int, r_end: Int, c_from: Int, c_end: Int): $MATRIX =
{
val c = new $MATRIX (r_end - r_from, c_end - c_from)
for (i <- c.range1; j <- c.range2) c.v(i)(j) = v(i + r_from)(j + c_from)
c
} // slice
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Slice 'this' matrix excluding the given row and/or column.
* @param row the row to exclude (0 until dim1, set to dim1 to keep all rows)
* @param col the column to exclude (0 until dim2, set to dim2 to keep all columns)
*/
def sliceExclude (row: Int, col: Int): $MATRIX =
{
val c = new $MATRIX (dim1 - oneIf (row < dim1), dim2 - oneIf (col < dim2))
for (i <- range1 if i != row) for (j <- range2 if j != col) {
c.v(i - oneIf (i > row))(j - oneIf (j > col)) = v(i)(j)
} // for
c
} // sliceExclude
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Select rows from 'this' matrix according to the given index/basis.
* @param rowIndex the row index positions (e.g., (0, 2, 5))
*/
def selectRows (rowIndex: Array [Int]): $MATRIX =
{
val c = new $MATRIX (rowIndex.length, dim2)
for (i <- c.range1) c.v(i) = v(rowIndex(i))
c
} // selectRows
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get column 'col' from the matrix, returning it as a vector.
* @param col the column to extract from the matrix
* @param from the position to start extracting from
*/
def col (col: Int, from: Int = 0): $VECTOR =
{
val u = new $VECTOR (dim1 - from)
for (i <- from until dim1) u(i-from) = v(i)(col)
u
} // col
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set column 'col' of the matrix to a vector.
* @param col the column to set
* @param u the vector to assign to the column
*/
def setCol (col: Int, u: $VECTOR) { for (i <- range1) v(i)(col) = u(i) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Select columns from 'this' matrix according to the given index/basis.
* Ex: Can be used to divide a matrix into a basis and a non-basis.
* @param colIndex the column index positions (e.g., (0, 2, 5))
*/
def selectCols (colIndex: Array [Int]): $MATRIX =
{
val c = new $MATRIX (dim1, colIndex.length)
for (j <- c.range2) c.setCol (j, col(colIndex(j)))
c
} // selectCols
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Transpose 'this' matrix (rows => columns).
*/
def t: $MATRIX =
{
val b = new $MATRIX (dim2, dim1)
for (i <- b.range1; j <- b.range2) b.v(i)(j) = v(j)(i)
b
} // t
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate (row) vector 'u' and 'this' matrix, i.e., prepend 'u' to 'this'.
* @param u the vector to be prepended as the new first row in new matrix
*/
def +: (u: $VECTOR): $MATRIX =
{
if (u.dim != dim2) flaw ("+:", "vector does not match row dimension")
val c = new $MATRIX (dim1 + 1, dim2)
for (i <- c.range1) c(i) = if (i == 0) u else this(i - 1)
c
} // +:
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate (column) vector 'u' and 'this' matrix, i.e., prepend 'u' to 'this'.
* @param u the vector to be prepended as the new first column in new matrix
*/
def +^: (u: $VECTOR): $MATRIX =
{
if (u.dim != dim1) flaw ("+^:", "vector does not match column dimension")
val c = new $MATRIX (dim1, dim2 + 1)
for (j <- c.range2) c.setCol (j, if (j == 0) u else col (j - 1))
c
} // +^:
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate 'this' matrix and (row) vector 'u', i.e., append 'u' to 'this'.
* @param u the vector to be appended as the new last row in new matrix
*/
def :+ (u: $VECTOR): $MATRIX =
{
if (u.dim != dim2) flaw (":+", "vector does not match row dimension")
val c = new $MATRIX (dim1 + 1, dim2)
for (i <- c.range1) c(i) = if (i < dim1) this(i) else u
c
} // :+
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate 'this' matrix and (column) vector 'u', i.e., append 'u' to 'this'.
* @param u the vector to be appended as the new last column in new matrix
*/
def :^+ (u: $VECTOR): $MATRIX =
{
if (u.dim != dim1) flaw (":^+", "vector does not match column dimension")
val c = new $MATRIX (dim1, dim2 + 1)
for (j <- c.range2) c.setCol (j, if (j < dim2) col (j) else u)
c
} // :^+
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate (row-wise) 'this' matrix and matrix 'b'.
* @param b the matrix to be concatenated as the new last rows in new matrix
*/
def ++ (b: $MATRI): $MATRIX =
{
if (b.dim2 != dim2) flaw ("++", "matrix b does not match row dimension")
val c = new $MATRIX (dim1 + b.dim1, dim2)
for (i <- c.range1) c(i) = if (i < dim1) this(i) else b(i - dim1)
c
} // ++
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate (column-wise) 'this' matrix and matrix 'b'.
* @param b the matrix to be concatenated as the new last columns in new matrix
*/
def ++^ (b: $MATRI): $MATRIX =
{
if (b.dim1 != dim1) flaw ("++^", "matrix b does not match column dimension")
val c = new $MATRIX (dim1, dim2 + b.dim2)
for (j <- c.range2) c.setCol (j, if (j < dim2) col (j) else b.col (j - dim2))
c
} // ++^
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' matrix and matrix 'b'.
* @param b the matrix to add (requires leDimensions)
*/
def + (b: $MATRIX): $MATRIX =
{
val c = new $MATRIX (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) + b.v(i)(j)
c
} // +
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' matrix and matrix 'b' for any type extending $MATRI.
* @param b the matrix to add (requires leDimensions)
*/
def + (b: $MATRI): $MATRIX =
{
val c = new $MATRIX (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) + b(i, j)
c
} // +
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' matrix and (row) vector 'u'.
* @param u the vector to add
*/
def + (u: $VECTOR): $MATRIX =
{
val c = new $MATRIX (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) + u(j)
c
} // +
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' matrix and scalar 'x'.
* @param x the scalar to add
*/
def + (x: $BASE): $MATRIX =
{
val c = new $MATRIX (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) + x
c
} // +
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add in-place 'this' matrix and matrix 'b'.
* @param b the matrix to add (requires leDimensions)
*/
def += (b: $MATRIX): $MATRIX =
{
for (i <- range1; j <- range2) v(i)(j) += b.v(i)(j)
this
} // +=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add in-place 'this' matrix and matrix 'b' for any type extending $MATRI.
* @param b the matrix to add (requires leDimensions)
*/
def += (b: $MATRI): $MATRIX =
{
for (i <- range1; j <- range2) v(i)(j) += b(i, j)
this
} // +=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add in-place 'this' matrix and (row) vector 'u'.
* @param u the vector to add
*/
def += (u: $VECTOR): $MATRIX =
{
for (i <- range1; j <- range2) v(i)(j) += u(j)
this
} // +=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add in-place 'this' matrix and scalar 'x'.
* @param x the scalar to add
*/
def += (x: $BASE): $MATRIX =
{
for (i <- range1; j <- range2) v(i)(j) += x
this
} // +=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' matrix subtract matrix 'b'.
* @param b the matrix to subtract (requires leDimensions)
*/
def - (b: $MATRIX): $MATRIX =
{
val c = new $MATRIX (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) - b.v(i)(j)
c
} // -
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' matrix subtract matrix 'b' for any type extending $MATRI.
* @param b the matrix to subtract (requires leDimensions)
*/
def - (b: $MATRI): $MATRIX =
{
val c = new $MATRIX (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) - b(i, j)
c
} // -
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' matrix subtract (row) vector 'u'.
* @param b the vector to subtract
*/
def - (u: $VECTOR): $MATRIX =
{
val c = new $MATRIX (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) - u(j)
c
} // -
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' matrix subtract scalar 'x'.
* @param x the scalar to subtract
*/
def - (x: $BASE): $MATRIX =
{
val c = new $MATRIX (dim1, dim2)
for (i <- c.range1; j <- c.range2) c.v(i)(j) = v(i)(j) - x
c
} // -
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' matrix subtract in-place matrix 'b'.
* @param b the matrix to subtract (requires leDimensions)
*/
def -= (b: $MATRIX): $MATRIX =
{
for (i <- range1; j <- range2) v(i)(j) -= b.v(i)(j)
this
} // -=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' matrix subtract in-place matrix 'b'.
* @param b the matrix to subtract (requires leDimensions)
*/
def -= (b: $MATRI): $MATRIX =
{
for (i <- range1; j <- range2) v(i)(j) -= b(i, j)
this
} // -=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' matrix subtract in-place (row) vector 'u'.
* @param b the vector to subtract
*/
def -= (u: $VECTOR): $MATRIX =
{
for (i <- range1; j <- range2) v(i)(j) -= u(j)
this
} // -=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' matrix subtract in-place scalar 'x'.
* @param x the scalar to subtract
*/
def -= (x: $BASE): $MATRIX =
{
for (i <- range1; j <- range2) v(i)(j) -= x
this
} // -=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' matrix by matrix 'b', transposing 'b' to improve efficiency.
* Use 'times' method to skip the transpose step.
* @param b the matrix to multiply by (requires sameCrossDimensions)
*/
def * (b: $MATRIX): $MATRIX =
{
if (dim2 != b.dim1) flaw ("*", "matrix * matrix - incompatible cross dimensions")
val c = new $MATRIX (dim1, b.dim2)
val bt = b.t // transpose the b matrix
for (i <- range1; j <- c.range2) {
val va = v(i); val vb = bt.v(j)
var sum = $ZERO
for (k <- range2) sum += va(k) * vb(k)
c.v(i)(j) = sum
} // for
c
} // *
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' matrix by matrix 'b', transposing 'b' to improve efficiency.
* Use 'times' method to skip the transpose step.
* @param b the matrix to multiply by (requires sameCrossDimensions)
*/
def * (b: $MATRI): $MATRIX =
{
if (dim2 != b.dim1) flaw ("*", "matrix * matrix - incompatible cross dimensions")
val c = new $MATRIX (dim1, b.dim2)
val bt = b.t // transpose the b matrix
for (i <- range1; j <- c.range2) {
val va = v(i); val vb = bt(j)
var sum = $ZERO
for (k <- range2) sum += va(k) * vb(k)
c.v(i)(j) = sum
} // for
c
} // *
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' matrix by vector 'u' (vector elements beyond 'dim2' ignored).
* @param u the vector to multiply by
*/
def * (u: $VECTOR): $VECTOR =
{
if (dim2 > u.dim) flaw ("*", "matrix * vector - vector dimension too small")
val c = new $VECTOR (dim1)
for (i <- range1) {
var sum = $ZERO
for (k <- range2) sum += v(i)(k) * u(k)
c(i) = sum
} // for
c
} // *
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' matrix by scalar 'x'.
* @param x the scalar to multiply by
*/
def * (x: $BASE): $MATRIX =
{
val c = new $MATRIX (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) * x
c
} // *
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' matrix by matrix 'b', transposing 'b' to improve
* efficiency. Use 'times_ip' method to skip the transpose step.
* @param b the matrix to multiply by (requires square and sameCrossDimensions)
*/
def *= (b: $MATRIX): $MATRIX =
{
if (! b.isSquare) flaw ("*=", "matrix 'b' must be square")
if (dim2 != b.dim1) flaw ("*=", "matrix *= matrix - incompatible cross dimensions")
val bt = b.t // use the transpose of b
for (i <- range1) {
val row_i = new $VECTOR (dim2) // save ith row so not overwritten
for (j <- range2) row_i(j) = v(i)(j) // copy values from ith row of 'this' matrix
for (j <- range2) {
val vb = bt.v(j)
var sum = $ZERO
for (k <- range2) sum += row_i(k) * vb(k)
v(i)(j) = sum
} // for
} // for
this
} // *=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' matrix by matrix 'b', transposing 'b' to improve
* efficiency. Use 'times_ip' method to skip the transpose step.
* @param b the matrix to multiply by (requires square and sameCrossDimensions)
*/
def *= (b: $MATRI): $MATRIX =
{
if (! b.isSquare) flaw ("*=", "matrix 'b' must be square")
if (dim2 != b.dim1) flaw ("*=", "matrix *= matrix - incompatible cross dimensions")
val bt = b.t // use the transpose of b
for (i <- range1) {
val row_i = new $VECTOR (dim2) // save ith row so not overwritten
for (j <- range2) row_i(j) = v(i)(j) // copy values from ith row of 'this' matrix
for (j <- range2) {
val vb = bt(j)
var sum = $ZERO
for (k <- range2) sum += row_i(k) * vb(k)
v(i)(j) = sum
} // for
} // for
this
} // *=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' matrix by scalar 'x'.
* @param x the scalar to multiply by
*/
def *= (x: $BASE): $MATRIX =
{
for (i <- range1; j <- range2) v(i)(j) *= x
this
} // *=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the dot product of 'this' matrix and vector 'u', by first transposing
* 'this' matrix and then multiplying by 'u' (ie., 'a dot u = a.t * u').
* @param u the vector to multiply by (requires same first dimensions)
*/
def dot (u: $VECTOR): $VECTOR =
{
if (dim1 != u.dim) flaw ("dot", "matrix dot vector - incompatible first dimensions")
val c = new $VECTOR (dim2)
val at = this.t // transpose the 'this' matrix
for (i <- range2) {
var sum = $ZERO
for (k <- range1) sum += at.v(i)(k) * u(k)
c(i) = sum
} // for
c
} // dot
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' matrix by matrix 'b' without first transposing 'b'.
* @param b the matrix to multiply by (requires sameCrossDimensions)
*/
def times (b: $MATRIX): $MATRIX =
{
if (dim2 != b.dim1) flaw ("times", "matrix * matrix - incompatible cross dimensions")
val c = new $MATRIX (dim1, b.dim2)
for (i <- range1; j <- c.range2) {
var sum = $ZERO
for (k <- range2) sum += v(i)(k) * b.v(k)(j)
c.v(i)(j) = sum
} // for
c
} // times
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' matrix by matrix 'b' without first transposing 'b'.
* If b and this reference the same matrix (b == this), a copy of the this
* matrix is made.
* @param b the matrix to multiply by (requires square and sameCrossDimensions)
*/
def times_ip (b: $MATRIX)
{
if (! b.isSquare) flaw ("times_ip", "matrix 'b' must be square")
if (dim2 != b.dim1) flaw ("times_ip", "matrix * matrix - incompatible cross dimensions")
val bb = if (b == this) new $MATRIX (this) else b
for (i <- range1) {
val row_i = new $VECTOR (dim2) // save ith row so not overwritten
for (j <- range2) row_i(j) = v(i)(j) // copy values from ith row of 'this' matrix
for (j <- range2) {
var sum = $ZERO
for (k <- range2) sum += row_i(k) * bb.v(k)(j)
v(i)(j) = sum
} // for
} // for
} // times_ip
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' matrix by matrix 'b' using 'dot' product (concise solution).
* @param b the matrix to multiply by (requires sameCrossDimensions)
*/
def times_d (b: $MATRI): $MATRIX =
{
if (dim2 != b.dim1) flaw ("*", "matrix * matrix - incompatible cross dimensions")
val c = new $MATRIX (dim1, b.dim2)
for (i <- range1; j <- c.range2) c.v(i)(j) = this(i) dot b.col(j)
c
} // times_d
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' matrix by matrix b using the Strassen matrix multiplication
* algorithm. Both matrices ('this' and 'b') must be square. Although the
* algorithm is faster than the traditional cubic algorithm, its requires
* more memory and is often less stable (due to round-off errors).
* FIX: could be make more efficient using a virtual slice (vslice) method.
* @see http://en.wikipedia.org/wiki/Strassen_algorithm
* @param b the matrix to multiply by (it has to be a square matrix)
*/
def times_s (b: $MATRIX): $MATRIX =
{
if (dim2 != b.dim1) flaw ("*", "matrix * matrix - incompatible cross dimensions")
val c = new $MATRIX (dim1, dim1) // allocate result matrix
var d = dim1 / 2 // half dim1
if (d + d < dim1) d += 1 // if not even, increment by 1
val evenDim = d + d // equals dim1 if even, else dim1 + 1
// decompose to blocks (use vslice method if available)
val a11 = slice (0, d, 0, d)
val a12 = slice (0, d, d, evenDim)
val a21 = slice (d, evenDim, 0, d)
val a22 = slice (d, evenDim, d, evenDim)
val b11 = b.slice (0, d, 0, d)
val b12 = b.slice (0, d, d, evenDim)
val b21 = b.slice (d, evenDim, 0, d)
val b22 = b.slice (d, evenDim, d, evenDim)
// compute intermediate sub-matrices
val p1 = (a11 + a22) * (b11 + b22)
val p2 = (a21 + a22) * b11
val p3 = a11 * (b12 - b22)
val p4 = a22 * (b21 - b11)
val p5 = (a11 + a12) * b22
val p6 = (a21 - a11) * (b11 + b12)
val p7 = (a12 - a22) * (b21 + b22)
for (i <- c.range1; j <- c.range2) {
c.v(i)(j) = if (i < d && j < d) p1.v(i)(j) + p4.v(i)(j)- p5.v(i)(j) + p7.v(i)(j)
else if (i < d) p3.v(i)(j-d) + p5.v(i)(j-d)
else if (i >= d && j < d) p2.v(i-d)(j) + p4.v(i-d)(j)
else p1.v(i-d)(j-d) - p2.v(i-d)(j-d) + p3.v(i-d)(j-d) + p6.v(i-d)(j-d)
} // for
c // return result matrix
} // times_s
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' matrix by vector 'u' to produce another matrix '(a_ij * u_j)'.
* E.g., multiply a matrix by a diagonal matrix represented as a vector.
* @param u the vector to multiply by
*/
def ** (u: $VECTOR): $MATRIX =
{
val c = new $MATRIX (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) * u(j)
c
} // **
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' matrix by vector 'u' to produce another matrix '(a_ij * u_j)'.
* @param u the vector to multiply by
*/
def **= (u: $VECTOR): $MATRIX =
{
for (i <- range1; j <- range2) v(i)(j) = v(i)(j) * u(j)
this
} // **=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide 'this' matrix by scalar 'x'.
* @param x the scalar to divide by
*/
def / (x: $BASE): $MATRIX =
{
val c = new $MATRIX (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) / x
c
} // /
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide in-place 'this' matrix by scalar 'x'.
* @param x the scalar to divide by
*/
def /= (x: $BASE): $MATRIX =
{
for (i <- range1; j <- range2) v(i)(j) = v(i)(j) / x
this
} // /=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Raise 'this' matrix to the 'p'th power (for some integer 'p' >= 2).
* Caveat: should be replace by a divide and conquer algorithm.
* @param p the power to raise 'this' matrix to
*/
def ~^ (p: Int): $MATRIX =
{
if (p < 2) flaw ("~^", "p must be an integer >= 2")
if (! isSquare) flaw ("~^", "only defined on square matrices")
val c = new $MATRIX (dim1, dim1)
for (i <- range1; j <- range1) {
var sum = $ZERO
for (k <- range1) sum += v(i)(k) * v(k)(j)
c.v(i)(j) = sum
} // for
if (p > 2) c ~^ (p-1) else c
} // ~^
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the maximum element in 'this' matrix.
* @param e the ending row index (exclusive) for the search
*/
def max (e: Int = dim1): $BASE =
{
var x = v(0)(0)
for (i <- 1 until e; j <- range2 if v(i)(j) > x) x = v(i)(j)
x
} // max
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the minimum element in 'this' matrix.
* @param e the ending row index (exclusive) for the search
*/
def min (e: Int = dim1): $BASE =
{
var x = v(0)(0)
for (i <- 1 until e; j <- range2 if v(i)(j) < x) x = v(i)(j)
x
} // min
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Factor 'this' matrix into the product of upper and lower triangular
* matrices '(l, u)' using the LU Factorization algorithm. This version uses
* no partial pivoting.
*/
def lud_npp: Tuple2 [$MATRIX, $MATRIX] =
{
val l = new $MATRIX (dim1, dim2) // lower triangular matrix
val u = new $MATRIX (this) // upper triangular matrix (a copy of this)
for (i <- u.range1) {
val pivot = u.v(i)(i)
if (pivot =~ $ZERO) flaw ("lud_npp", "use lud since you have a zero pivot")
l.v(i)(i) = $ONE
for (j <- i + 1 until u.dim2) l.v(i)(j) = $ZERO
for (k <- i + 1 until u.dim1) {
val mul = u.v(k)(i) / pivot
l.v(k)(i) = mul
for (j <- u.range2) u.v(k)(j) = u.v(k)(j) - mul * u.v(i)(j)
} // for
} // for
Tuple2 (l, u)
} // lud_npp
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Factor 'this' matrix into the product of lower and upper triangular
* matrices '(l, u)' using the LU Factorization algorithm. This version uses
* partial pivoting.
*/
def lud: Tuple2 [$MATRIX, $MATRIX] =
{
val l = new $MATRIX (dim1, dim2) // lower triangular matrix
val u = new $MATRIX (this) // upper triangular matrix (a copy of this)
for (i <- u.range1) {
var pivot = u.v(i)(i)
if (pivot =~ $ZERO) {
val k = partialPivoting (u, i) // find the maxiumum element below pivot
u.swap (i, k, i) // swap rows i and k from column k
pivot = u.v(i)(i) // reset the pivot
} // if
l.v(i)(i) = $ONE
for (j <- i + 1 until u.dim2) l.v(i)(j) = $ZERO
for (k <- i + 1 until u.dim1) {
val mul = u.v(k)(i) / pivot
l.v(k)(i) = mul
for (j <- u.range2) u.v(k)(j) = u.v(k)(j) - mul * u.v(i)(j)
} // for
} // for
Tuple2 (l, u)
} // lud
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Factor in-place 'this' matrix into the product of lower and upper triangular
* matrices '(l, u)' using the LU Factorization algorithm. This version uses
* partial pivoting.
*/
def lud_ip: Tuple2 [$MATRIX, $MATRIX] =
{
val l = new $MATRIX (dim1, dim2) // lower triangular matrix
val u = this // upper triangular matrix (this)
for (i <- u.range1) {
var pivot = u.v(i)(i)
if (pivot =~ $ZERO) {
val k = partialPivoting (u, i) // find the maxiumum element below pivot
u.swap (i, k, i) // swap rows i and k from column k
pivot = u.v(i)(i) // reset the pivot
} // if
l.v(i)(i) = $ONE
for (j <- i + 1 until u.dim2) l.v(i)(j) = $ZERO
for (k <- i + 1 until u.dim1) {
val mul = u.v(k)(i) / pivot
l.v(k)(i) = mul
for (j <- u.range2) u.v(k)(j) = u.v(k)(j) - mul * u.v(i)(j)
} // for
} // for
Tuple2 (l, u)
} // lud_ip
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Use partial pivoting to find a maximal non-zero pivot and return its row
* index, i.e., find the maximum element '(k, i)' below the pivot '(i, i)'.
* @param a the matrix to perform partial pivoting on
* @param i the row and column index for the current pivot
*/
private def partialPivoting (a: $MATRIX, i: Int): Int =
{
var max = a.v(i)(i) // initially set to the pivot
var kMax = i // initially the pivot row
for (k <- i + 1 until a.dim1 if ABS (a.v(k)(i)) > max) {
max = ABS (a.v(k)(i))
kMax = k
} // for
if (kMax == i) {
flaw ("partialPivoting", "unable to find a non-zero pivot for row " + i)
} // if
kMax
} // partialPivoting
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Solve for 'x' in the equation 'l*u*x = b' (see lud above).
* @param l the lower triangular matrix
* @param u the upper triangular matrix
* @param b the constant vector
*/
def solve (l: $MATRI, u: $MATRI, b: $VECTOR): $VECTOR =
{
val y = new $VECTOR (l.dim2)
for (k <- 0 until y.dim) { // solve for y in l*y = b
y(k) = b(k) - (l(k) dot y)
} // for
val x = new $VECTOR (u.dim2)
for (k <- x.dim - 1 to 0 by -1) { // solve for x in u*x = y
x(k) = (y(k) - (u(k) dot x)) / u(k, k)
} // for
x
} // solve
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Solve for 'x' in the equation 'l*u*x = b' where 'l = this'. Requires
* 'l' to be lower triangular.
* @param u the upper triangular matrix
* @param b the constant vector
*/
def solve (u: $MATRI, b: $VECTOR): $VECTOR = solve (this, u, b)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Solve for 'x' in the equation 'a*x = b' where 'a' is 'this' matrix.
* @param b the constant vector.
*/
def solve (b: $VECTOR): $VECTOR = solve (lud, b)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Combine 'this' matrix with matrix 'b', placing them along the diagonal and
* filling in the bottom left and top right regions with zeros; '[this, b]'.
* @param b the matrix to combine with 'this' matrix
*/
def diag (b: $MATRI): $MATRIX =
{
val m = dim1 + b.dim1
val n = dim2 + b.dim2
val c = new $MATRIX (m, n)
for (i <- 0 until m; j <- 0 until n) {
c.v(i)(j) = if (i < dim1 && j < dim2) v(i)(j)
else if (i >= dim1 && j >= dim2) b(i-dim1, j-dim2)
else $ZERO
} // for
c
} // diag
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Form a matrix '[Ip, this, Iq]' where Ir is a r-by-r identity matrix, by
* positioning the three matrices 'Ip', 'this' and 'Iq' along the diagonal.
* Fill the rest of matrix with zeros.
* @param p the size of identity matrix Ip
* @param q the size of identity matrix Iq
*/
def diag (p: Int, q: Int = 0): $MATRIX =
{
if (! isSquare) flaw ("diag", "'this' matrix must be square")
val n = dim1 + p + q
val c = new $MATRIX (n, n)
for (i <- 0 until p) c.v(i)(i) = $ONE // Ip
for (i <- 0 until dim1; j <- 0 until dim1) c.v(i+p)(j+p) = v(i)(j) // this
for (i <- p + dim1 until n) c.v(i)(i) = $ONE // Iq
c
} // diag
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the kth diagonal of 'this' matrix. Assumes 'dim2 >= dim1'.
* @param k how far above the main diagonal, e.g., (-1, 0, 1) for (sub, main, super)
*/
def getDiag (k: Int = 0): $VECTOR =
{
val c = new $VECTOR (dim1 - math.abs (k))
val (j, l) = (math.max (-k, 0), math.min (dim1-k, dim1))
for (i <- j until l) c(i-j) = v(i)(i+k)
c
} // getDiag
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set the kth diagonal of 'this' matrix to the vector 'u'. Assumes 'dim2 >= dim1'.
* @param u the vector to set the diagonal to
* @param k how far above the main diagonal, e.g., (-1, 0, 1) for (sub, main, super)
*/
def setDiag (u: $VECTOR, k: Int = 0)
{
val (j, l) = (math.max (-k, 0), math.min (dim1-k, dim1))
for (i <- j until l) v(i)(i+k) = u(i-j)
} // setDiag
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set the main diagonal of 'this' matrix to the scalar 'x'. Assumes 'dim2 >= dim1'.
* @param x the scalar to set the diagonal to
*/
def setDiag (x: $BASE) { for (i <- range1) v(i)(i) = x }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Invert 'this' matrix (requires a square matrix) and does not use partial pivoting.
*/
def inverse_npp: $MATRIX =
{
val b = new $MATRIX (this) // copy 'this' matrix into b
val c = eye (dim1) // let c represent the augmentation
for (i <- b.range1) {
val pivot = b.v(i)(i)
if (pivot =~ $ZERO) flaw ("inverse_npp", "use inverse since you have a zero pivot")
for (j <- b.range2) {
b.v(i)(j) /= pivot
c.v(i)(j) /= pivot
} // for
for (k <- 0 until b.dim1 if k != i) {
val mul = b.v(k)(i)
for (j <- b.range2) {
b.v(k)(j) -= mul * b.v(i)(j)
c.v(k)(j) -= mul * c.v(i)(j)
} // for
} // for
} // for
c
} // inverse_npp
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Invert 'this' matrix (requires a square matrix) and use partial pivoting.
*/
def inverse: $MATRIX =
{
val b = new $MATRIX (this) // copy 'this' matrix into b
val c = eye (dim1) // let c represent the augmentation
for (i <- b.range1) {
var pivot = b.v(i)(i)
if (pivot =~ $ZERO) {
val k = partialPivoting (b, i) // find the maxiumum element below pivot
b.swap (i, k, i) // in b, swap rows i and k from column i
c.swap (i, k, 0) // in c, swap rows i and k from column 0
pivot = b.v(i)(i) // reset the pivot
} // if
for (j <- b.range2) {
b.v(i)(j) /= pivot
c.v(i)(j) /= pivot
} // for
for (k <- 0 until dim1 if k != i) {
val mul = b.v(k)(i)
for (j <- b.range2) {
b.v(k)(j) -= mul * b.v(i)(j)
c.v(k)(j) -= mul * c.v(i)(j)
} // for
} // for
} // for
c
} // inverse
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Invert in-place 'this' matrix (requires a square matrix) and uses partial pivoting.
* Note: this method turns the orginal matrix into the identity matrix.
* The inverse is returned and is captured by assignment.
*/
def inverse_ip: $MATRIX =
{
var b = this // use 'this' matrix for b
val c = eye (dim1) // let c represent the augmentation
for (i <- b.range1) {
var pivot = b.v(i)(i)
if (pivot =~ $ZERO) {
val k = partialPivoting (b, i) // find the maxiumum element below pivot
b.swap (i, k, i) // in b, swap rows i and k from column i
c.swap (i, k, 0) // in c, swap rows i and k from column 0
pivot = b.v(i)(i) // reset the pivot
} // if
for (j <- b.range2) {
b.v(i)(j) /= pivot
c.v(i)(j) /= pivot
} // for
for (k <- 0 until dim1 if k != i) {
val mul = b.v(k)(i)
for (j <- b.range2) {
b.v(k)(j) -= mul * b.v(i)(j)
c.v(k)(j) -= mul * c.v(i)(j)
} // for
} // for
} // for
c // return the solution
} // inverse_ip
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Use Gauss-Jordan reduction on 'this' matrix to make the left part embed an
* identity matrix. A constraint on this 'm-by-n' matrix is that 'n >= m'.
* It can be used to solve 'a * x = b': augment 'a' with 'b' and call reduce.
* Takes '[a | b]' to '[I | x]'.
*/
def reduce: $MATRIX =
{
if (dim2 < dim1) flaw ("reduce", "requires n (columns) >= m (rows)")
val b = new $MATRIX (this) // copy 'this' matrix into b
for (i <- b.range1) {
var pivot = b.v(i)(i)
if (pivot =~ $ZERO) {
val k = partialPivoting (b, i) // find the maxiumum element below pivot
b.swap (i, k, i) // in b, swap rows i and k from column i
pivot = b.v(i)(i) // reset the pivot
} // if
for (j <- b.range2) b.v(i)(j) /= pivot
for (k <- 0 until dim1 if k != i) {
val mul = b.v(k)(i)
for (j <- b.range2) b.v(k)(j) -= mul * b.v(i)(j)
} // for
} // for
b
} // reduce
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Use Gauss-Jordan reduction in-place on 'this' matrix to make the left part
* embed an identity matrix. A constraint on this 'm-by-n' matrix is that 'n >= m'.
* It can be used to solve 'a * x = b': augment 'a' with 'b' and call reduce.
* Takes '[a | b]' to '[I | x]'.
*/
def reduce_ip
{
if (dim2 < dim1) flaw ("reduce", "requires n (columns) >= m (rows)")
val b = this // use 'this' matrix for b
for (i <- b.range1) {
var pivot = b.v(i)(i)
if (pivot =~ $ZERO) {
val k = partialPivoting (b, i) // find the maxiumum element below pivot
b.swap (i, k, i) // in b, swap rows i and k from column i
pivot = b.v(i)(i) // reset the pivot
} // if
for (j <- b.range2) b.v(i)(j) /= pivot
for (k <- 0 until dim1 if k != i) {
val mul = b.v(k)(i)
for (j <- b.range2) b.v(k)(j) -= mul * b.v(i)(j)
} // for
} // for
} // reduce_ip
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Clean values in 'this' matrix at or below the threshold 'thres' by setting
* them to zero. Iterative algorithms give approximate values and if very close
* to zero, may throw off other calculations, e.g., in computing eigenvectors.
* @param thres the cutoff threshold (a small value)
* @param relative whether to use relative or absolute cutoff
*/
def clean (thres: Double, relative: Boolean = true): $MATRIX =
{
val s = if (relative) mag else $ONE // use matrix magnitude or 1
for (i <- range1; j <- range2) if (ABS (v(i)(j)) <= thres * s) v(i)(j) = $ZERO
this
} // clean
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the (right) nullspace of 'this' 'm-by-n' matrix (requires 'n = m+1')
* by performing Gauss-Jordan reduction and extracting the negation of the
* last column augmented by 1.
* <p>
* nullspace (a) = set of orthogonal vectors v s.t. a * v = 0
* <p>
* The left nullspace of matrix 'a' is the same as the right nullspace of 'a.t'.
* FIX: need a more robust algorithm for computing nullspace (@see Fac_QR.scala).
* FIX: remove the 'n = m+1' restriction.
* @see http://ocw.mit.edu/courses/mathematics/18-06sc-linear-algebra-fall-2011/ax-b-and-the-four-subspaces
* /solving-ax-0-pivot-variables-special-solutions/MIT18_06SCF11_Ses1.7sum.pdf
*/
def nullspace: $VECTOR =
{
if (dim2 != dim1 + 1) flaw ("nullspace", "requires n (columns) = m (rows) + 1")
reduce.col(dim2 - 1) * -$ONE ++ $ONE
} // nullspace
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute in-place the (right) nullspace of 'this' 'm-by-n' matrix (requires 'n = m+1')
* by performing Gauss-Jordan reduction and extracting the negation of the
* last column augmented by 1.
* <p>
* nullspace (a) = set of orthogonal vectors v s.t. a * v = 0
* <p>
* The left nullspace of matrix 'a' is the same as the right nullspace of 'a.t'.
* FIX: need a more robust algorithm for computing nullspace (@see Fac_QR.scala).
* FIX: remove the 'n = m+1' restriction.
* @see http://ocw.mit.edu/courses/mathematics/18-06sc-linear-algebra-fall-2011/ax-b-and-the-four-subspaces
* /solving-ax-0-pivot-variables-special-solutions/MIT18_06SCF11_Ses1.7sum.pdf
*/
def nullspace_ip: $VECTOR =
{
if (dim2 != dim1 + 1) flaw ("nullspace", "requires n (columns) = m (rows) + 1")
reduce_ip
col(dim2 - 1) * -$ONE ++ $ONE
} // nullspace_ip
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the trace of 'this' matrix, i.e., the sum of the elements on the
* main diagonal. Should also equal the sum of the eigenvalues.
* @see Eigen.scala
*/
def trace: $BASE =
{
if ( ! isSquare) flaw ("trace", "trace only works on square matrices")
var sum = $ZERO
for (i <- range1) sum += v(i)(i)
sum
} // trace
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the sum of 'this' matrix, i.e., the sum of its elements.
*/
def sum: $BASE =
{
var sum = $ZERO
for (i <- range1; j <- range2) sum += v(i)(j)
sum
} // sum
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the abs sum of 'this' matrix, i.e., the sum of the absolute value
* of its elements. This is useful for comparing matrices '(a - b).sumAbs'.
*/
def sumAbs: $BASE =
{
var sum = $ZERO
for (i <- range1; j <- range2) sum += ABS (v(i)(j))
sum
} // sumAbs
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the sum of the lower triangular region of 'this' matrix.
*/
def sumLower: $BASE =
{
var sum = $ZERO
for (i <- range1; j <- 0 until i) sum += v(i)(j)
sum
} // sumLower
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the determinant of 'this' matrix. The value of the determinant
* indicates, among other things, whether there is a unique solution to a
* system of linear equations (a nonzero determinant).
*/
def det: $BASE =
{
if ( ! isSquare) flaw ("det", "determinant only works on square matrices")
var sum = $ZERO
var b: $MATRIX = null
for (j <- range2) {
b = sliceExclude (0, j) // the submatrix that excludes row 0 and column j
sum += (if (j % 2 == 0) v(0)(j) * (if (b.dim1 == 1) b.v(0)(0) else b.det)
else -v(0)(j) * (if (b.dim1 == 1) b.v(0)(0) else b.det))
} // for
sum
} // det
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Check whether 'this' matrix is rectangular (all rows have the same number
* of columns).
*/
def isRectangular: Boolean =
{
for (i <- range1 if v(i).length != dim2) return false
true
} // isRectangular
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert 'this' real (double precision) matrix to a string.
*/
override def toString: String =
{
var sb = new StringBuilder ("\\n$MATRIX(")
if (dim1 == 0) return sb.append (")").mkString
for (i <- range1) {
for (j <- range2) {
sb.append (fString.format (v(i)(j)))
if (j == dim2-1) sb.replace (sb.length-1, sb.length, "\\n\\t")
} // for
} // for
sb.replace (sb.length-3, sb.length, ")").mkString
} // toString
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Write 'this' matrix to a CSV-formatted text file with name 'fileName'.
* @param fileName the name of file to hold the data
*/
def write (fileName: String)
{
val out = new PrintWriter (fileName)
for (i <- range1) {
for (j <- range2) { out.print (v(i)(j)); if (j < dim2-1) out.print (",") }
out.println ()
} // for
out.close
} // write
} // $MATRIX class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `$MATRIX` companion object provides operations for `$MATRIX` that don't require
* 'this' (like static methods in Java). It provides factory methods for building
* matrices from files or vectors.
*/
object $MATRIX extends Error
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a matrix and assign values from the array of vectors 'u'.
* @param u the array of vectors to assign
* @param columnwise whether the vectors are treated as column or row vectors
*/
def apply (u: Array [$VECTOR], columnwise: Boolean = true): $MATRIX =
{
var x: $MATRIX = null
val u_dim = u(0).dim
if (columnwise) {
x = new $MATRIX (u_dim, u.length)
for (j <- 0 until u.length) x.setCol (j, u(j)) // assign column vectors
} else {
x = new $MATRIX (u.length, u_dim)
for (i <- 0 until u_dim) x(i) = u(i) // assign row vectors
} // if
x
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a matrix and assign values from the Scala `Vector` of vectors 'u'.
* Assumes vectors are columwise.
* @param u the Vector of vectors to assign
*/
def apply (u: Vector [$VECTOR]): $MATRIX =
{
val u_dim = u(0).dim
val x = new $MATRIX (u_dim, u.length)
for (j <- 0 until u.length) x.setCol (j, u(j)) // assign column vectors
x
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a matrix by reading from a text file, e.g., a CSV file.
* @param fileName the name of file holding the data
*/
def apply (fileName: String): $MATRIX =
{
val sp = ',' // character separating the values
val lines = fromFile (fileName).getLines.toArray // get the lines from file
val (m, n) = (lines.length, lines(0).split (sp).length)
val x = new $MATRIX (m, n)
for (i <- 0 until m) x(i) = $VECTOR (lines(i).split (sp))
x
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create an 'm-by-n' identity matrix I (ones on main diagonal, zeros elsewhere).
* If 'n' is <= 0, set it to 'm' for a square identity matrix.
* @param m the row dimension of the matrix
* @param n the column dimension of the matrix (defaults to 0 => square matrix)
*/
def eye (m: Int, n: Int = 0): $MATRIX =
{
val nn = if (n <= 0) m else n // square matrix, if n <= 0
val mn = if (m <= nn) m else nn // length of main diagonal
val c = new $MATRIX (m, nn)
for (i <- 0 until mn) c.v(i)(i) = $ONE
c
} // eye
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate (row) vectors 'u' and 'w' to form a matrix with 2 rows.
* @param u the vector to be concatenated as the new first row in matrix
* @param w the vector to be concatenated as the new second row in matrix
*/
def ++ (u: $VECTOR, w: $VECTOR): $MATRIX =
{
if (u.dim != w.dim) flaw ("++", "vector dimensions do not match")
val c = new $MATRIX (2, u.dim)
c(0) = u
c(1) = w
c
} // ++
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate (column) vectors 'u' and 'w' to form a matrix with 2 columns.
* @param u the vector to be concatenated as the new first column in matrix
* @param w the vector to be concatenated as the new second column in matrix
*/
def ++^ (u: $VECTOR, w: $VECTOR): $MATRIX =
{
if (u.dim != w.dim) flaw ("++^", "vector dimensions do not match")
val c = new $MATRIX (u.dim, 2)
c.setCol (0, u)
c.setCol (1, w)
c
} // ++^
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply vector 'u' by matrix 'a'. Treat 'u' as a row vector.
* @param u the vector to multiply by
* @param a the matrix to multiply by (requires sameCrossDimensions)
*/
def times (u: $VECTOR, a: $MATRIX): $VECTOR =
{
if (u.dim != a.dim1) flaw ("times", "vector * matrix - incompatible cross dimensions")
val c = new $VECTOR (a.dim2)
for (j <- a.range2) {
var sum = $ZERO
for (k <- a.range1) sum += u(k) * a.v(k)(j)
c(j) = sum
} // for
c
} // times
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the outer product of vector 'x' and vector 'y'. The result of the
* outer product is a matrix where 'c(i, j)' is the product of 'i'-th element
* of 'x' with the 'j'-th element of 'y'.
* @param x the first vector
* @param y the second vector
*/
def outer (x: $VECTOR, y: $VECTOR): $MATRIX =
{
val c = new $MATRIX (x.dim, y.dim)
for (i <- 0 until x.dim; j <- 0 until y.dim) c(i, j) = x(i) * y(j)
c
} // outer
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Form a matrix from two vectors 'x' and 'y', row-wise.
* @param x the first vector -> row 0
* @param y the second vector -> row 1
*/
def form_rw (x: $VECTOR, y: $VECTOR): $MATRIX =
{
if (x.dim != y.dim) flaw ("form_rw", "dimensions of x and y must be the same")
val cols = x.dim
val c = new $MATRIX (2, cols)
c(0) = x
c(1) = y
c
} // form_rw
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Form a matrix from scalar 'x' and a vector 'y', row-wise.
* @param x the first scalar -> row 0 (repeat scalar)
* @param y the second vector -> row 1
*/
def form_rw (x: $BASE, y: $VECTOR): $MATRIX =
{
val cols = y.dim
val c = new $MATRIX (2, cols)
for (j <- 0 until cols) c(0, j) = x
c(1) = y
c
} // form_rw
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Form a matrix from a vector 'x' and a scalar 'y', row-wise.
* @param x the first vector -> row 0
* @param y the second scalar -> row 1 (repeat scalar)
*/
def form_rw (x: $VECTOR, y: $BASE): $MATRIX =
{
val cols = x.dim
val c = new $MATRIX (2, cols)
c(0) = x
for (j <- 0 until cols) c(1, j) = y
c
} // form_rw
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Form a matrix from two vectors 'x' and 'y', column-wise.
* @param x the first vector -> column 0
* @param y the second vector -> column 1
*/
def form_cw (x: $VECTOR, y: $VECTOR): $MATRIX =
{
if (x.dim != y.dim) flaw ("form_cw", "dimensions of x and y must be the same")
val rows = x.dim
val c = new $MATRIX (rows, 2)
c.setCol(0, x)
c.setCol(1, y)
c
} // form_cw
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Form a matrix from a scalar 'x' and a vector 'y', column-wise.
* @param x the first scalar -> column 0 (repeat scalar)
* @param y the second vector -> column 1
*/
def form_cw (x: $BASE, y: $VECTOR): $MATRIX =
{
val rows = y.dim
val c = new $MATRIX (rows, 2)
for (i <- 0 until rows) c(i, 0) = x
c.setCol(1, y)
c
} // form_cw
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Form a matrix from a vector 'x' and a scalar 'y', column-wise.
* @param x the first vector -> column 0
* @param y the second scalar -> column 1 (repeat scalar)
*/
def form_cw (x: $VECTOR, y: $BASE): $MATRIX =
{
val rows = x.dim
val c = new $MATRIX (rows, 2)
c.setCol(0, x)
for (i <- 0 until rows) c(i, 1) = y
c
} // form_cw
} // $MATRIX companion object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `${MATRIX}Test` object tests the operations provided by `$MATRIX` class.
* > run-main scalation.linalgebra.${MATRIX}Test
*/
object ${MATRIX}Test extends App with PackageInfo
{
for (l <- 1 to 4) {
println ("\\n\\tTest $MATRIX on real matrices of dim " + l)
val x = new $MATRIX (l, l)
val y = new $MATRIX (l, l)
x.set (2)
y.set (3)
println ("x + y = " + (x + y))
println ("x - y = " + (x - y))
println ("x * y = " + (x * y))
println ("x * 4 = " + (x * 4))
} // for
println ("\\n\\tTest $MATRIX on additional operations")
val z = new $MATRIX ((2, 2), 1, 2,
3, 2)
val t = new $MATRIX ((3, 3), 1, 2, 3,
4, 3, 2,
1, 3, 1)
val zz = new $MATRIX ((3, 3), 3, 1, 0,
1, 4, 2,
0, 2, 5)
val bz = $VECTOR (5, 3, 6)
val b = $VECTOR (8, 7)
val lu = z.lud
val lu2 = z.lud_npp
println ("z = " + z)
println ("z.t = " + z.t)
println ("z.lud = " + lu)
println ("z.lud_npp = " + lu2)
println ("z.solve = " + z.solve (lu._1, lu._2, b))
println ("zz.solve = " + zz.solve (zz.lud, bz))
println ("z.inverse = " + z.inverse)
println ("z.inverse_ip = " + z.inverse_ip)
println ("t.inverse = " + t.inverse)
println ("t.inverse_ip = " + t.inverse_ip)
println ("z.inv * b = " + z.inverse * b)
println ("z.det = " + z.det)
println ("z = " + z)
z *= z // in-place matrix multiplication
println ("z squared = " + z)
val w = new $MATRIX ((2, 3), 2, 3, 5,
-4, 2, 3)
val v = new $MATRIX ((3, 2), 2, -4,
3, 2,
5, 3)
println ("w = " + w)
println ("v = " + v)
println ("w.reduce = " + w.reduce)
println ("right: w.nullspace = " + w.nullspace)
println ("check right nullspace = " + w * w.nullspace)
println ("left: v.t.nullspace = " + v.t.nullspace)
println ("check left nullspace = " + $MATRIX.times (v.t.nullspace, v))
for (row <- z) println ("row = " + row.deep)
val aa = new $MATRIX ((3, 2), 1, 2,
3, 4,
5, 6)
val bb = new $MATRIX ((2, 2), 1, 2,
3, 4)
println ("aa = " + aa)
println ("bb = " + bb)
println ("aa * bb = " + aa * bb)
aa *= bb
println ("aa *= bb = " + aa)
println ("aa dot bz = " + (aa dot bz))
println ("aa.t * bz = " + aa.t * bz)
val filename = getDataPath + "bb_matrix.csv"
bb.write (filename)
println ("bb_csv = " + $MATRIX (filename))
} // ${MATRIX}Test object
"""
// Ending of string holding code template --------------------------------------
// println (code)
val writer = new PrintWriter (new File (DIR + _l + MATRIX + ".scalaa"))
writer.write (code)
writer.close ()
} // for
} // BldMatrix object
|
scalation/fda
|
scalation_1.2/src/main/scala/scalation/linalgebra/mem_mapped/bld/BldMatrix.scala
|
Scala
|
mit
| 66,391
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import java.util.Locale
import java.util.concurrent.atomic.{AtomicInteger, AtomicLong}
import scala.collection.mutable
import scala.util.control.NonFatal
import org.apache.spark.broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.expressions.codegen.Block._
import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.catalyst.plans.physical.Partitioning
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.aggregate.HashAggregateExec
import org.apache.spark.sql.execution.columnar.InMemoryTableScanExec
import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, SortMergeJoinExec}
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.sql.vectorized.ColumnarBatch
import org.apache.spark.util.Utils
/**
* An interface for those physical operators that support codegen.
*/
trait CodegenSupport extends SparkPlan {
/** Prefix used in the current operator's variable names. */
private def variablePrefix: String = this match {
case _: HashAggregateExec => "agg"
case _: BroadcastHashJoinExec => "bhj"
case _: SortMergeJoinExec => "smj"
case _: RDDScanExec => "rdd"
case _: DataSourceScanExec => "scan"
case _: InMemoryTableScanExec => "memoryScan"
case _: WholeStageCodegenExec => "wholestagecodegen"
case _ => nodeName.toLowerCase(Locale.ROOT)
}
/**
* Creates a metric using the specified name.
*
* @return name of the variable representing the metric
*/
def metricTerm(ctx: CodegenContext, name: String): String = {
ctx.addReferenceObj(name, longMetric(name))
}
/**
* Whether this SparkPlan supports whole stage codegen or not.
*/
def supportCodegen: Boolean = true
/**
* Which SparkPlan is calling produce() of this one. It's itself for the first SparkPlan.
*/
protected var parent: CodegenSupport = null
/**
* Returns all the RDDs of InternalRow which generates the input rows.
*
* @note Right now we support up to two RDDs
*/
def inputRDDs(): Seq[RDD[InternalRow]]
/**
* Returns Java source code to process the rows from input RDD.
*/
final def produce(ctx: CodegenContext, parent: CodegenSupport): String = executeQuery {
this.parent = parent
ctx.freshNamePrefix = variablePrefix
s"""
|${ctx.registerComment(s"PRODUCE: ${this.simpleString(SQLConf.get.maxToStringFields)}")}
|${doProduce(ctx)}
""".stripMargin
}
/**
* Generate the Java source code to process, should be overridden by subclass to support codegen.
*
* doProduce() usually generate the framework, for example, aggregation could generate this:
*
* if (!initialized) {
* # create a hash map, then build the aggregation hash map
* # call child.produce()
* initialized = true;
* }
* while (hashmap.hasNext()) {
* row = hashmap.next();
* # build the aggregation results
* # create variables for results
* # call consume(), which will call parent.doConsume()
* if (shouldStop()) return;
* }
*/
protected def doProduce(ctx: CodegenContext): String
private def prepareRowVar(ctx: CodegenContext, row: String, colVars: Seq[ExprCode]): ExprCode = {
if (row != null) {
ExprCode.forNonNullValue(JavaCode.variable(row, classOf[UnsafeRow]))
} else {
if (colVars.nonEmpty) {
val colExprs = output.zipWithIndex.map { case (attr, i) =>
BoundReference(i, attr.dataType, attr.nullable)
}
val evaluateInputs = evaluateVariables(colVars)
// generate the code to create a UnsafeRow
ctx.INPUT_ROW = row
ctx.currentVars = colVars
val ev = GenerateUnsafeProjection.createCode(ctx, colExprs, false)
val code = code"""
|$evaluateInputs
|${ev.code}
""".stripMargin
ExprCode(code, FalseLiteral, ev.value)
} else {
// There are no columns
ExprCode.forNonNullValue(JavaCode.variable("unsafeRow", classOf[UnsafeRow]))
}
}
}
/**
* Consume the generated columns or row from current SparkPlan, call its parent's `doConsume()`.
*
* Note that `outputVars` and `row` can't both be null.
*/
final def consume(ctx: CodegenContext, outputVars: Seq[ExprCode], row: String = null): String = {
val inputVarsCandidate =
if (outputVars != null) {
assert(outputVars.length == output.length)
// outputVars will be used to generate the code for UnsafeRow, so we should copy them
outputVars.map(_.copy())
} else {
assert(row != null, "outputVars and row cannot both be null.")
ctx.currentVars = null
ctx.INPUT_ROW = row
output.zipWithIndex.map { case (attr, i) =>
BoundReference(i, attr.dataType, attr.nullable).genCode(ctx)
}
}
val inputVars = inputVarsCandidate match {
case stream: Stream[ExprCode] => stream.force
case other => other
}
val rowVar = prepareRowVar(ctx, row, outputVars)
// Set up the `currentVars` in the codegen context, as we generate the code of `inputVars`
// before calling `parent.doConsume`. We can't set up `INPUT_ROW`, because parent needs to
// generate code of `rowVar` manually.
ctx.currentVars = inputVars
ctx.INPUT_ROW = null
ctx.freshNamePrefix = parent.variablePrefix
val evaluated = evaluateRequiredVariables(output, inputVars, parent.usedInputs)
// Under certain conditions, we can put the logic to consume the rows of this operator into
// another function. So we can prevent a generated function too long to be optimized by JIT.
// The conditions:
// 1. The config "spark.sql.codegen.splitConsumeFuncByOperator" is enabled.
// 2. `inputVars` are all materialized. That is guaranteed to be true if the parent plan uses
// all variables in output (see `requireAllOutput`).
// 3. The number of output variables must less than maximum number of parameters in Java method
// declaration.
val confEnabled = SQLConf.get.wholeStageSplitConsumeFuncByOperator
val requireAllOutput = output.forall(parent.usedInputs.contains(_))
val paramLength = CodeGenerator.calculateParamLength(output) + (if (row != null) 1 else 0)
val consumeFunc = if (confEnabled && requireAllOutput
&& CodeGenerator.isValidParamLength(paramLength)) {
constructDoConsumeFunction(ctx, inputVars, row)
} else {
parent.doConsume(ctx, inputVars, rowVar)
}
s"""
|${ctx.registerComment(s"CONSUME: ${parent.simpleString(SQLConf.get.maxToStringFields)}")}
|$evaluated
|$consumeFunc
""".stripMargin
}
/**
* To prevent concatenated function growing too long to be optimized by JIT. We can separate the
* parent's `doConsume` codes of a `CodegenSupport` operator into a function to call.
*/
private def constructDoConsumeFunction(
ctx: CodegenContext,
inputVars: Seq[ExprCode],
row: String): String = {
val (args, params, inputVarsInFunc) = constructConsumeParameters(ctx, output, inputVars, row)
val rowVar = prepareRowVar(ctx, row, inputVarsInFunc)
val doConsume = ctx.freshName("doConsume")
ctx.currentVars = inputVarsInFunc
ctx.INPUT_ROW = null
val doConsumeFuncName = ctx.addNewFunction(doConsume,
s"""
| private void $doConsume(${params.mkString(", ")}) throws java.io.IOException {
| ${parent.doConsume(ctx, inputVarsInFunc, rowVar)}
| }
""".stripMargin)
s"""
| $doConsumeFuncName(${args.mkString(", ")});
""".stripMargin
}
/**
* Returns arguments for calling method and method definition parameters of the consume function.
* And also returns the list of `ExprCode` for the parameters.
*/
private def constructConsumeParameters(
ctx: CodegenContext,
attributes: Seq[Attribute],
variables: Seq[ExprCode],
row: String): (Seq[String], Seq[String], Seq[ExprCode]) = {
val arguments = mutable.ArrayBuffer[String]()
val parameters = mutable.ArrayBuffer[String]()
val paramVars = mutable.ArrayBuffer[ExprCode]()
if (row != null) {
arguments += row
parameters += s"InternalRow $row"
}
variables.zipWithIndex.foreach { case (ev, i) =>
val paramName = ctx.freshName(s"expr_$i")
val paramType = CodeGenerator.javaType(attributes(i).dataType)
arguments += ev.value
parameters += s"$paramType $paramName"
val paramIsNull = if (!attributes(i).nullable) {
// Use constant `false` without passing `isNull` for non-nullable variable.
FalseLiteral
} else {
val isNull = ctx.freshName(s"exprIsNull_$i")
arguments += ev.isNull
parameters += s"boolean $isNull"
JavaCode.isNullVariable(isNull)
}
paramVars += ExprCode(paramIsNull, JavaCode.variable(paramName, attributes(i).dataType))
}
(arguments, parameters, paramVars)
}
/**
* Returns source code to evaluate all the variables, and clear the code of them, to prevent
* them to be evaluated twice.
*/
protected def evaluateVariables(variables: Seq[ExprCode]): String = {
val evaluate = variables.filter(_.code.nonEmpty).map(_.code.toString).mkString("\\n")
variables.foreach(_.code = EmptyBlock)
evaluate
}
/**
* Returns source code to evaluate the variables for required attributes, and clear the code
* of evaluated variables, to prevent them to be evaluated twice.
*/
protected def evaluateRequiredVariables(
attributes: Seq[Attribute],
variables: Seq[ExprCode],
required: AttributeSet): String = {
val evaluateVars = new StringBuilder
variables.zipWithIndex.foreach { case (ev, i) =>
if (ev.code.nonEmpty && required.contains(attributes(i))) {
evaluateVars.append(ev.code.toString + "\\n")
ev.code = EmptyBlock
}
}
evaluateVars.toString()
}
/**
* Returns source code to evaluate the variables for non-deterministic expressions, and clear the
* code of evaluated variables, to prevent them to be evaluated twice.
*/
protected def evaluateNondeterministicVariables(
attributes: Seq[Attribute],
variables: Seq[ExprCode],
expressions: Seq[NamedExpression]): String = {
val nondeterministicAttrs = expressions.filterNot(_.deterministic).map(_.toAttribute)
evaluateRequiredVariables(attributes, variables, AttributeSet(nondeterministicAttrs))
}
/**
* The subset of inputSet those should be evaluated before this plan.
*
* We will use this to insert some code to access those columns that are actually used by current
* plan before calling doConsume().
*/
def usedInputs: AttributeSet = references
/**
* Generate the Java source code to process the rows from child SparkPlan. This should only be
* called from `consume`.
*
* This should be override by subclass to support codegen.
*
* Note: The operator should not assume the existence of an outer processing loop,
* which it can jump from with "continue;"!
*
* For example, filter could generate this:
* # code to evaluate the predicate expression, result is isNull1 and value2
* if (!isNull1 && value2) {
* # call consume(), which will call parent.doConsume()
* }
*
* Note: A plan can either consume the rows as UnsafeRow (row), or a list of variables (input).
* When consuming as a listing of variables, the code to produce the input is already
* generated and `CodegenContext.currentVars` is already set. When consuming as UnsafeRow,
* implementations need to put `row.code` in the generated code and set
* `CodegenContext.INPUT_ROW` manually. Some plans may need more tweaks as they have
* different inputs(join build side, aggregate buffer, etc.), or other special cases.
*/
def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row: ExprCode): String = {
throw new UnsupportedOperationException
}
/**
* Whether or not the result rows of this operator should be copied before putting into a buffer.
*
* If any operator inside WholeStageCodegen generate multiple rows from a single row (for
* example, Join), this should be true.
*
* If an operator starts a new pipeline, this should be false.
*/
def needCopyResult: Boolean = {
if (children.isEmpty) {
false
} else if (children.length == 1) {
children.head.asInstanceOf[CodegenSupport].needCopyResult
} else {
throw new UnsupportedOperationException
}
}
/**
* Whether or not the children of this operator should generate a stop check when consuming input
* rows. This is used to suppress shouldStop() in a loop of WholeStageCodegen.
*
* This should be false if an operator starts a new pipeline, which means it consumes all rows
* produced by children but doesn't output row to buffer by calling append(), so the children
* don't require shouldStop() in the loop of producing rows.
*/
def needStopCheck: Boolean = parent.needStopCheck
/**
* Helper default should stop check code.
*/
def shouldStopCheckCode: String = if (needStopCheck) {
"if (shouldStop()) return;"
} else {
"// shouldStop check is eliminated"
}
/**
* A sequence of checks which evaluate to true if the downstream Limit operators have not received
* enough records and reached the limit. If current node is a data producing node, it can leverage
* this information to stop producing data and complete the data flow earlier. Common data
* producing nodes are leaf nodes like Range and Scan, and blocking nodes like Sort and Aggregate.
* These checks should be put into the loop condition of the data producing loop.
*/
def limitNotReachedChecks: Seq[String] = parent.limitNotReachedChecks
/**
* Check if the node is supposed to produce limit not reached checks.
*/
protected def canCheckLimitNotReached: Boolean = children.isEmpty
/**
* A helper method to generate the data producing loop condition according to the
* limit-not-reached checks.
*/
final def limitNotReachedCond: String = {
if (!canCheckLimitNotReached) {
val errMsg = "Only leaf nodes and blocking nodes need to call 'limitNotReachedCond' " +
"in its data producing loop."
if (Utils.isTesting) {
throw new IllegalStateException(errMsg)
} else {
logWarning(s"[BUG] $errMsg Please open a JIRA ticket to report it.")
}
}
if (parent.limitNotReachedChecks.isEmpty) {
""
} else {
parent.limitNotReachedChecks.mkString("", " && ", " &&")
}
}
}
/**
* A special kind of operators which support whole stage codegen. Blocking means these operators
* will consume all the inputs first, before producing output. Typical blocking operators are
* sort and aggregate.
*/
trait BlockingOperatorWithCodegen extends CodegenSupport {
// Blocking operators usually have some kind of buffer to keep the data before producing them, so
// then don't to copy its result even if its child does.
override def needCopyResult: Boolean = false
// Blocking operators always consume all the input first, so its upstream operators don't need a
// stop check.
override def needStopCheck: Boolean = false
// Blocking operators need to consume all the inputs before producing any output. This means,
// Limit operator after this blocking operator will never reach its limit during the execution of
// this blocking operator's upstream operators. Here we override this method to return Nil, so
// that upstream operators will not generate useless conditions (which are always evaluated to
// false) for the Limit operators after this blocking operator.
override def limitNotReachedChecks: Seq[String] = Nil
// This is a blocking node so the node can produce these checks
override protected def canCheckLimitNotReached: Boolean = true
}
/**
* Leaf codegen node reading from a single RDD.
*/
trait InputRDDCodegen extends CodegenSupport {
def inputRDD: RDD[InternalRow]
// If the input can be InternalRows, an UnsafeProjection needs to be created.
protected val createUnsafeProjection: Boolean
override def inputRDDs(): Seq[RDD[InternalRow]] = {
inputRDD :: Nil
}
override def doProduce(ctx: CodegenContext): String = {
// Inline mutable state since an InputRDDCodegen is used once in a task for WholeStageCodegen
val input = ctx.addMutableState("scala.collection.Iterator", "input", v => s"$v = inputs[0];",
forceInline = true)
val row = ctx.freshName("row")
val outputVars = if (createUnsafeProjection) {
// creating the vars will make the parent consume add an unsafe projection.
ctx.INPUT_ROW = row
ctx.currentVars = null
output.zipWithIndex.map { case (a, i) =>
BoundReference(i, a.dataType, a.nullable).genCode(ctx)
}
} else {
null
}
val updateNumOutputRowsMetrics = if (metrics.contains("numOutputRows")) {
val numOutputRows = metricTerm(ctx, "numOutputRows")
s"$numOutputRows.add(1);"
} else {
""
}
s"""
| while ($limitNotReachedCond $input.hasNext()) {
| InternalRow $row = (InternalRow) $input.next();
| ${updateNumOutputRowsMetrics}
| ${consume(ctx, outputVars, if (createUnsafeProjection) null else row).trim}
| ${shouldStopCheckCode}
| }
""".stripMargin
}
}
/**
* InputAdapter is used to hide a SparkPlan from a subtree that supports codegen.
*
* This is the leaf node of a tree with WholeStageCodegen that is used to generate code
* that consumes an RDD iterator of InternalRow.
*/
case class InputAdapter(child: SparkPlan) extends UnaryExecNode with InputRDDCodegen {
override def output: Seq[Attribute] = child.output
override def outputPartitioning: Partitioning = child.outputPartitioning
override def outputOrdering: Seq[SortOrder] = child.outputOrdering
override def vectorTypes: Option[Seq[String]] = child.vectorTypes
// This is not strictly needed because the codegen transformation happens after the columnar
// transformation but just for consistency
override def supportsColumnar: Boolean = child.supportsColumnar
override def doExecute(): RDD[InternalRow] = {
child.execute()
}
override def doExecuteBroadcast[T](): broadcast.Broadcast[T] = {
child.doExecuteBroadcast()
}
override def doExecuteColumnar(): RDD[ColumnarBatch] = {
child.executeColumnar()
}
// `InputAdapter` can only generate code to process the rows from its child. If the child produces
// columnar batches, there must be a `ColumnarToRowExec` above `InputAdapter` to handle it by
// overriding `inputRDDs` and calling `InputAdapter#executeColumnar` directly.
override def inputRDD: RDD[InternalRow] = child.execute()
// This is a leaf node so the node can produce limit not reached checks.
override protected def canCheckLimitNotReached: Boolean = true
// InputAdapter does not need UnsafeProjection.
protected val createUnsafeProjection: Boolean = false
override def generateTreeString(
depth: Int,
lastChildren: Seq[Boolean],
append: String => Unit,
verbose: Boolean,
prefix: String = "",
addSuffix: Boolean = false,
maxFields: Int,
printNodeId: Boolean): Unit = {
child.generateTreeString(
depth,
lastChildren,
append,
verbose,
prefix = "",
addSuffix = false,
maxFields,
printNodeId)
}
override def needCopyResult: Boolean = false
}
object WholeStageCodegenExec {
val PIPELINE_DURATION_METRIC = "duration"
private def numOfNestedFields(dataType: DataType): Int = dataType match {
case dt: StructType => dt.fields.map(f => numOfNestedFields(f.dataType)).sum
case m: MapType => numOfNestedFields(m.keyType) + numOfNestedFields(m.valueType)
case a: ArrayType => numOfNestedFields(a.elementType)
case u: UserDefinedType[_] => numOfNestedFields(u.sqlType)
case _ => 1
}
def isTooManyFields(conf: SQLConf, dataType: DataType): Boolean = {
numOfNestedFields(dataType) > conf.wholeStageMaxNumFields
}
// The whole-stage codegen generates Java code on the driver side and sends it to the Executors
// for compilation and execution. The whole-stage codegen can bring significant performance
// improvements with large dataset in distributed environments. However, in the test environment,
// due to the small amount of data, the time to generate Java code takes up a major part of the
// entire runtime. So we summarize the total code generation time and output it to the execution
// log for easy analysis and view.
private val _codeGenTime = new AtomicLong
// Increase the total generation time of Java source code in nanoseconds.
// Visible for testing
def increaseCodeGenTime(time: Long): Unit = _codeGenTime.addAndGet(time)
// Returns the total generation time of Java source code in nanoseconds.
// Visible for testing
def codeGenTime: Long = _codeGenTime.get
// Reset generation time of Java source code.
// Visible for testing
def resetCodeGenTime(): Unit = _codeGenTime.set(0L)
}
/**
* WholeStageCodegen compiles a subtree of plans that support codegen together into single Java
* function.
*
* Here is the call graph of to generate Java source (plan A supports codegen, but plan B does not):
*
* WholeStageCodegen Plan A FakeInput Plan B
* =========================================================================
*
* -> execute()
* |
* doExecute() ---------> inputRDDs() -------> inputRDDs() ------> execute()
* |
* +-----------------> produce()
* |
* doProduce() -------> produce()
* |
* doProduce()
* |
* doConsume() <--------- consume()
* |
* doConsume() <-------- consume()
*
* SparkPlan A should override `doProduce()` and `doConsume()`.
*
* `doCodeGen()` will create a `CodeGenContext`, which will hold a list of variables for input,
* used to generated code for [[BoundReference]].
*/
case class WholeStageCodegenExec(child: SparkPlan)(val codegenStageId: Int)
extends UnaryExecNode with CodegenSupport {
override def output: Seq[Attribute] = child.output
override def outputPartitioning: Partitioning = child.outputPartitioning
override def outputOrdering: Seq[SortOrder] = child.outputOrdering
// This is not strictly needed because the codegen transformation happens after the columnar
// transformation but just for consistency
override def supportsColumnar: Boolean = child.supportsColumnar
override lazy val metrics = Map(
"pipelineTime" -> SQLMetrics.createTimingMetric(sparkContext,
WholeStageCodegenExec.PIPELINE_DURATION_METRIC))
override def nodeName: String = s"WholeStageCodegen (${codegenStageId})"
def generatedClassName(): String = if (conf.wholeStageUseIdInClassName) {
s"GeneratedIteratorForCodegenStage$codegenStageId"
} else {
"GeneratedIterator"
}
/**
* Generates code for this subtree.
*
* @return the tuple of the codegen context and the actual generated source.
*/
def doCodeGen(): (CodegenContext, CodeAndComment) = {
val startTime = System.nanoTime()
val ctx = new CodegenContext
val code = child.asInstanceOf[CodegenSupport].produce(ctx, this)
// main next function.
ctx.addNewFunction("processNext",
s"""
protected void processNext() throws java.io.IOException {
${code.trim}
}
""", inlineToOuterClass = true)
val className = generatedClassName()
val source = s"""
public Object generate(Object[] references) {
return new $className(references);
}
${ctx.registerComment(
s"""Codegend pipeline for stage (id=$codegenStageId)
|${this.treeString.trim}""".stripMargin,
"wsc_codegenPipeline")}
${ctx.registerComment(s"codegenStageId=$codegenStageId", "wsc_codegenStageId", true)}
final class $className extends ${classOf[BufferedRowIterator].getName} {
private Object[] references;
private scala.collection.Iterator[] inputs;
${ctx.declareMutableStates()}
public $className(Object[] references) {
this.references = references;
}
public void init(int index, scala.collection.Iterator[] inputs) {
partitionIndex = index;
this.inputs = inputs;
${ctx.initMutableStates()}
${ctx.initPartition()}
}
${ctx.emitExtraCode()}
${ctx.declareAddedFunctions()}
}
""".trim
// try to compile, helpful for debug
val cleanedSource = CodeFormatter.stripOverlappingComments(
new CodeAndComment(CodeFormatter.stripExtraNewLines(source), ctx.getPlaceHolderToComments()))
val duration = System.nanoTime() - startTime
WholeStageCodegenExec.increaseCodeGenTime(duration)
logDebug(s"\\n${CodeFormatter.format(cleanedSource)}")
(ctx, cleanedSource)
}
override def doExecuteColumnar(): RDD[ColumnarBatch] = {
// Code generation is not currently supported for columnar output, so just fall back to
// the interpreted path
child.executeColumnar()
}
override def doExecute(): RDD[InternalRow] = {
val (ctx, cleanedSource) = doCodeGen()
// try to compile and fallback if it failed
val (_, compiledCodeStats) = try {
CodeGenerator.compile(cleanedSource)
} catch {
case NonFatal(_) if !Utils.isTesting && sqlContext.conf.codegenFallback =>
// We should already saw the error message
logWarning(s"Whole-stage codegen disabled for plan (id=$codegenStageId):\\n $treeString")
return child.execute()
}
// Check if compiled code has a too large function
if (compiledCodeStats.maxMethodCodeSize > sqlContext.conf.hugeMethodLimit) {
logInfo(s"Found too long generated codes and JIT optimization might not work: " +
s"the bytecode size (${compiledCodeStats.maxMethodCodeSize}) is above the limit " +
s"${sqlContext.conf.hugeMethodLimit}, and the whole-stage codegen was disabled " +
s"for this plan (id=$codegenStageId). To avoid this, you can raise the limit " +
s"`${SQLConf.WHOLESTAGE_HUGE_METHOD_LIMIT.key}`:\\n$treeString")
return child.execute()
}
val references = ctx.references.toArray
val durationMs = longMetric("pipelineTime")
// Even though rdds is an RDD[InternalRow] it may actually be an RDD[ColumnarBatch] with
// type erasure hiding that. This allows for the input to a code gen stage to be columnar,
// but the output must be rows.
val rdds = child.asInstanceOf[CodegenSupport].inputRDDs()
assert(rdds.size <= 2, "Up to two input RDDs can be supported")
if (rdds.length == 1) {
rdds.head.mapPartitionsWithIndex { (index, iter) =>
val (clazz, _) = CodeGenerator.compile(cleanedSource)
val buffer = clazz.generate(references).asInstanceOf[BufferedRowIterator]
buffer.init(index, Array(iter))
new Iterator[InternalRow] {
override def hasNext: Boolean = {
val v = buffer.hasNext
if (!v) durationMs += buffer.durationMs()
v
}
override def next: InternalRow = buffer.next()
}
}
} else {
// Right now, we support up to two input RDDs.
rdds.head.zipPartitions(rdds(1)) { (leftIter, rightIter) =>
Iterator((leftIter, rightIter))
// a small hack to obtain the correct partition index
}.mapPartitionsWithIndex { (index, zippedIter) =>
val (leftIter, rightIter) = zippedIter.next()
val (clazz, _) = CodeGenerator.compile(cleanedSource)
val buffer = clazz.generate(references).asInstanceOf[BufferedRowIterator]
buffer.init(index, Array(leftIter, rightIter))
new Iterator[InternalRow] {
override def hasNext: Boolean = {
val v = buffer.hasNext
if (!v) durationMs += buffer.durationMs()
v
}
override def next: InternalRow = buffer.next()
}
}
}
}
override def inputRDDs(): Seq[RDD[InternalRow]] = {
throw new UnsupportedOperationException
}
override def doProduce(ctx: CodegenContext): String = {
throw new UnsupportedOperationException
}
override def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row: ExprCode): String = {
val doCopy = if (needCopyResult) {
".copy()"
} else {
""
}
s"""
|${row.code}
|append(${row.value}$doCopy);
""".stripMargin.trim
}
override def generateTreeString(
depth: Int,
lastChildren: Seq[Boolean],
append: String => Unit,
verbose: Boolean,
prefix: String = "",
addSuffix: Boolean = false,
maxFields: Int,
printNodeId: Boolean): Unit = {
child.generateTreeString(
depth,
lastChildren,
append,
verbose,
if (printNodeId) "* " else s"*($codegenStageId) ",
false,
maxFields,
printNodeId)
}
override def needStopCheck: Boolean = true
override def limitNotReachedChecks: Seq[String] = Nil
override protected def otherCopyArgs: Seq[AnyRef] = Seq(codegenStageId.asInstanceOf[Integer])
}
/**
* Find the chained plans that support codegen, collapse them together as WholeStageCodegen.
*
* The `codegenStageCounter` generates ID for codegen stages within a query plan.
* It does not affect equality, nor does it participate in destructuring pattern matching
* of WholeStageCodegenExec.
*
* This ID is used to help differentiate between codegen stages. It is included as a part
* of the explain output for physical plans, e.g.
*
* == Physical Plan ==
* *(5) SortMergeJoin [x#3L], [y#9L], Inner
* :- *(2) Sort [x#3L ASC NULLS FIRST], false, 0
* : +- Exchange hashpartitioning(x#3L, 200)
* : +- *(1) Project [(id#0L % 2) AS x#3L]
* : +- *(1) Filter isnotnull((id#0L % 2))
* : +- *(1) Range (0, 5, step=1, splits=8)
* +- *(4) Sort [y#9L ASC NULLS FIRST], false, 0
* +- Exchange hashpartitioning(y#9L, 200)
* +- *(3) Project [(id#6L % 2) AS y#9L]
* +- *(3) Filter isnotnull((id#6L % 2))
* +- *(3) Range (0, 5, step=1, splits=8)
*
* where the ID makes it obvious that not all adjacent codegen'd plan operators are of the
* same codegen stage.
*
* The codegen stage ID is also optionally included in the name of the generated classes as
* a suffix, so that it's easier to associate a generated class back to the physical operator.
* This is controlled by SQLConf: spark.sql.codegen.useIdInClassName
*
* The ID is also included in various log messages.
*
* Within a query, a codegen stage in a plan starts counting from 1, in "insertion order".
* WholeStageCodegenExec operators are inserted into a plan in depth-first post-order.
* See CollapseCodegenStages.insertWholeStageCodegen for the definition of insertion order.
*
* 0 is reserved as a special ID value to indicate a temporary WholeStageCodegenExec object
* is created, e.g. for special fallback handling when an existing WholeStageCodegenExec
* failed to generate/compile code.
*/
case class CollapseCodegenStages(
conf: SQLConf,
codegenStageCounter: AtomicInteger = new AtomicInteger(0))
extends Rule[SparkPlan] {
private def supportCodegen(e: Expression): Boolean = e match {
case e: LeafExpression => true
// CodegenFallback requires the input to be an InternalRow
case e: CodegenFallback => false
case _ => true
}
private def supportCodegen(plan: SparkPlan): Boolean = plan match {
case plan: CodegenSupport if plan.supportCodegen =>
val willFallback = plan.expressions.exists(_.find(e => !supportCodegen(e)).isDefined)
// the generated code will be huge if there are too many columns
val hasTooManyOutputFields =
WholeStageCodegenExec.isTooManyFields(conf, plan.schema)
val hasTooManyInputFields =
plan.children.exists(p => WholeStageCodegenExec.isTooManyFields(conf, p.schema))
!willFallback && !hasTooManyOutputFields && !hasTooManyInputFields
case _ => false
}
/**
* Inserts an InputAdapter on top of those that do not support codegen.
*/
private def insertInputAdapter(plan: SparkPlan): SparkPlan = {
plan match {
case p if !supportCodegen(p) =>
// collapse them recursively
InputAdapter(insertWholeStageCodegen(p))
case j: SortMergeJoinExec =>
// The children of SortMergeJoin should do codegen separately.
j.withNewChildren(j.children.map(
child => InputAdapter(insertWholeStageCodegen(child))))
case p => p.withNewChildren(p.children.map(insertInputAdapter))
}
}
/**
* Inserts a WholeStageCodegen on top of those that support codegen.
*/
private def insertWholeStageCodegen(plan: SparkPlan): SparkPlan = {
plan match {
// For operators that will output domain object, do not insert WholeStageCodegen for it as
// domain object can not be written into unsafe row.
case plan if plan.output.length == 1 && plan.output.head.dataType.isInstanceOf[ObjectType] =>
plan.withNewChildren(plan.children.map(insertWholeStageCodegen))
case plan: LocalTableScanExec =>
// Do not make LogicalTableScanExec the root of WholeStageCodegen
// to support the fast driver-local collect/take paths.
plan
case plan: CodegenSupport if supportCodegen(plan) =>
// The whole-stage-codegen framework is row-based. If a plan supports columnar execution,
// it can't support whole-stage-codegen at the same time.
assert(!plan.supportsColumnar)
WholeStageCodegenExec(insertInputAdapter(plan))(codegenStageCounter.incrementAndGet())
case other =>
other.withNewChildren(other.children.map(insertWholeStageCodegen))
}
}
def apply(plan: SparkPlan): SparkPlan = {
if (conf.wholeStageEnabled) {
insertWholeStageCodegen(plan)
} else {
plan
}
}
}
|
spark-test/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegenExec.scala
|
Scala
|
apache-2.0
| 35,598
|
package org.pdfextractor.algorithm.config
import java.util.Locale
import org.pdfextractor.algorithm.io._
object PdfFilesListing {
def getPdfInvoicesListing(lang: Locale): Seq[String] = {
getFolderAsFile(lang.getLanguage)
.listFiles
.filter(_.isFile)
.filter(_.getName.endsWith(".pdf"))
.map(f => lang.getLanguage + "/" + f.getName)
}
}
|
kveskimae/pdfalg
|
src/test/scala/org/pdfextractor/algorithm/config/PdfFilesListing.scala
|
Scala
|
mit
| 373
|
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.doperables
import scala.language.reflectiveCalls
import org.apache.spark.ml
import org.apache.spark.ml.param.{Params, ParamMap => SparkParamMap}
import org.apache.spark.sql.types.StructType
import io.deepsense.deeplang.ExecutionContext
import io.deepsense.deeplang.doperables.dataframe.{DataFrame, DataFrameColumnsGetter}
import io.deepsense.deeplang.doperables.multicolumn.SingleColumnParams.SingleColumnInPlaceChoice
import io.deepsense.deeplang.doperables.multicolumn.SingleColumnParams.SingleTransformInPlaceChoices.{NoInPlaceChoice, YesInPlaceChoice}
import io.deepsense.deeplang.doperables.multicolumn._
import io.deepsense.deeplang.doperables.spark.wrappers.params.common.{HasInputColumn, HasOutputColumn}
import io.deepsense.deeplang.params.Param
import io.deepsense.deeplang.params.wrappers.spark.ParamsWithSparkWrappers
abstract class SparkSingleColumnModelWrapper[
MD <: ml.Model[MD]{ val outputCol: ml.param.Param[String]},
E <: ml.Estimator[MD]{ val outputCol: ml.param.Param[String]}]
extends SparkModelWrapper[MD, E]
with ParamsWithSparkWrappers
with HasInputColumn
with HasSingleInPlaceParam
with HasSpecificParams {
def convertInputNumericToVector: Boolean = false
def convertOutputVectorToDouble: Boolean = false
private var outputColumnValue: Option[String] = None
override lazy val params: Array[Param[_]] =
Array(inputColumn, singleInPlaceChoice) ++ getSpecificParams
override private[deeplang] def _transform(ctx: ExecutionContext, df: DataFrame): DataFrame = {
val schema = df.schema.get
val inputColumnName = DataFrameColumnsGetter.getColumnName(schema, $(inputColumn))
val conversionDoubleToVectorIsNecessary = convertInputNumericToVector &&
NumericToVectorUtils.isColumnNumeric(schema, inputColumnName)
val convertedDataFrame = if (conversionDoubleToVectorIsNecessary) {
// Automatically convert numeric input column to one-element vector column
DataFrame.fromSparkDataFrame(NumericToVectorUtils.convertDataFrame(df, inputColumnName, ctx))
} else {
df
}
val transformedDataFrame = $(singleInPlaceChoice) match {
case YesInPlaceChoice() =>
SingleColumnTransformerUtils.transformSingleColumnInPlace(
convertedDataFrame.getColumnName($(inputColumn)),
convertedDataFrame,
ctx,
transformTo(ctx, convertedDataFrame))
case no: NoInPlaceChoice =>
transformTo(ctx, convertedDataFrame)(no.getOutputColumn)
}
if(conversionDoubleToVectorIsNecessary && convertOutputVectorToDouble) {
val expectedSchema = _transformSchema(schema)
val revertedTransformedDf =
NumericToVectorUtils.revertDataFrame(
transformedDataFrame.sparkDataFrame,
expectedSchema.get,
inputColumnName,
getOutputColumnName(inputColumnName),
ctx,
convertOutputVectorToDouble)
DataFrame.fromSparkDataFrame(revertedTransformedDf)
} else {
transformedDataFrame
}
}
override private[deeplang] def _transformSchema(schema: StructType): Option[StructType] = {
val inputColumnName = DataFrameColumnsGetter.getColumnName(schema, $(inputColumn))
val conversionDoubleToVectorIsNecessary = convertInputNumericToVector &&
NumericToVectorUtils.isColumnNumeric(schema, inputColumnName)
val convertedSchema = if (conversionDoubleToVectorIsNecessary) {
// Automatically convert numeric input column to one-element vector column
NumericToVectorUtils.convertSchema(schema, inputColumnName)
} else {
schema
}
val transformedSchemaOption = $(singleInPlaceChoice) match {
case YesInPlaceChoice() =>
val temporaryColumnName =
DataFrameColumnsGetter.uniqueSuffixedColumnName(inputColumnName)
val temporarySchema: Option[StructType] =
transformSchemaTo(convertedSchema, temporaryColumnName)
temporarySchema.map { schema =>
StructType(schema.collect {
case field if field.name == inputColumnName =>
schema(temporaryColumnName).copy(name = inputColumnName)
case field if field.name != temporaryColumnName =>
field
})
}
case no: NoInPlaceChoice =>
transformSchemaTo(convertedSchema, no.getOutputColumn)
}
if(conversionDoubleToVectorIsNecessary && convertOutputVectorToDouble) {
transformedSchemaOption.map { case transformedSchema =>
NumericToVectorUtils.revertSchema(
transformedSchema,
inputColumnName,
getOutputColumnName(inputColumnName),
convertOutputVectorToDouble)
}
} else {
transformedSchemaOption
}
}
override def sparkParamMap(sparkEntity: Params, schema: StructType): SparkParamMap = {
val map = super.sparkParamMap(sparkEntity, schema).put(
ml.param.ParamPair(
parentEstimator.sparkEstimator.outputCol, outputColumnValue.orNull))
if (serializableModel != null) {
map.put(ml.param.ParamPair(sparkModel.outputCol, outputColumnValue.orNull))
} else {
map
}
}
def setSingleInPlaceParam(value: SingleColumnInPlaceChoice): this.type = {
set(singleInPlaceChoice -> value)
}
private def transformTo(
ctx: ExecutionContext,
df: DataFrame)(outputColumnName: String): DataFrame = {
withOutputColumnValue(outputColumnName) {
super._transform(ctx, df)
}
}
private def transformSchemaTo(
schema: StructType,
temporaryColumnName: String): Option[StructType] = {
withOutputColumnValue(temporaryColumnName) {
super._transformSchema(schema)
}
}
private def withOutputColumnValue[T](columnName: String)(f: => T): T = {
outputColumnValue = Some(columnName)
try {
f
} finally {
outputColumnValue = None
}
}
private def getOutputColumnName(inputColumnName: String): String = {
$(singleInPlaceChoice) match {
case YesInPlaceChoice() => inputColumnName
case no: NoInPlaceChoice => no.getOutputColumn
}
}
override def replicate(
extra: io.deepsense.deeplang.params.ParamMap): SparkSingleColumnModelWrapper.this.type = {
val model = super.replicate(extractParamMap(extra))
model.outputColumnValue = outputColumnValue
model
}
}
|
deepsense-io/seahorse-workflow-executor
|
deeplang/src/main/scala/io/deepsense/deeplang/doperables/SparkSingleColumnModelWrapper.scala
|
Scala
|
apache-2.0
| 6,945
|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.performance.feeder
import io.gatling.core.config.GatlingConfiguration
import org.scalatest.{Matchers, WordSpec}
class CsvFeederSpec extends WordSpec with Matchers {
implicit val configuration = GatlingConfiguration.loadForTest()
"The feeder" should {
"return values by key" in {
val feeder: CsvFeeder = new CsvFeeder("data/helloworld.csv")
val next: Map[String, String] = feeder.next()
next("username") shouldBe "bob"
next("password") shouldBe "12345678"
}
"create only one random per request" in {
val feeder: CsvFeeder = new CsvFeeder("data/randomFeeder.csv")
val next: Map[String, String] = feeder.next()
next("username") shouldBe next("password")
}
"replace the random placeholder" in {
val feeder: CsvFeeder = new CsvFeeder("data/helloworld.csv")
val next: Map[String, String] = feeder.next()
next("email") matches "^[0-9]*@somemail.com"
}
"replace the currentTime placeholder" in {
val feeder: CsvFeeder = new CsvFeeder("data/helloworld.csv")
val next: Map[String, String] = feeder.next()
next("time") matches "^[0-9]*"
}
"replace the range place holder" in {
val feeder: CsvFeeder = new CsvFeeder("data/range.csv")
feeder.next()("username") shouldBe "bob-000001"
feeder.next()("username") shouldBe "bob-000002"
}
}
"the range" should {
"be reused in the same journey" in {
val feeder: CsvFeeder = new CsvFeeder("data/range.csv")
val next: Map[String, String] = feeder.next()
next("username") shouldBe "bob-000001"
next("password") shouldBe "000001"
}
"should work in the middle of a string" in {
val feeder: CsvFeeder = new CsvFeeder("data/range.csv")
val next: Map[String, String] = feeder.next()
next("withRangeInTheMiddle") shouldBe "90000019"
}
"restart from 1 once max is reached" in {
val feeder: CsvFeeder = new CsvFeeder("data/range.csv")
feeder.next()("other") shouldBe "1"
feeder.next()("other") shouldBe "2"
feeder.next()("other") shouldBe "3"
feeder.next()("other") shouldBe "4"
feeder.next()("other") shouldBe "5"
feeder.next()("other") shouldBe "6"
feeder.next()("other") shouldBe "7"
feeder.next()("other") shouldBe "8"
feeder.next()("other") shouldBe "9"
feeder.next()("other") shouldBe "1"
}
"use different counters for different range sizes" in {
val feeder: CsvFeeder = new CsvFeeder("data/range.csv")
feeder.next()("other") shouldBe "1"
feeder.next()("other") shouldBe "2"
feeder.next()("other") shouldBe "3"
feeder.next()("other") shouldBe "4"
feeder.next()("other") shouldBe "5"
feeder.next()("other") shouldBe "6"
feeder.next()("other") shouldBe "7"
feeder.next()("other") shouldBe "8"
feeder.next()("other") shouldBe "9"
val next: Map[String, String] = feeder.next()
next("other") shouldBe "1"
next("password") shouldBe "000010"
}
"Throw an exception if the feeder file is not found" in {
val thrown = intercept[IllegalArgumentException] {
new CsvFeeder("data/notAvailable.csv")
}
thrown.getMessage shouldBe "Could not locate feeder file; Resource data/notAvailable.csv not found"
}
}
}
|
hmrc/performance-test-runner
|
src/test/scala/uk/gov/hmrc/performance/feeder/CsvFeederSpec.scala
|
Scala
|
apache-2.0
| 4,007
|
package sheetkram.model
case class Sheet private (
name : String,
columns : IndexedSeq[ Column ],
rows : IndexedSeq[ Row ] ) {
def updateCell( colIdx : Int, rowIdx : Int, cell : Cell ) : Sheet = {
val _columns : IndexedSeq[ Column ] =
if ( colIdx >= columns.size ) {
if ( rowIdx >= rows.size ) {
columns.map { col => col.ensure( rowIdx ) } ++
IndexedSeq.fill( colIdx - columns.size )( Column( rowIdx + 1 ) ) :+
Column( rowIdx + 1 ).updateCell( rowIdx, cell )
} else {
columns ++
IndexedSeq.fill( colIdx - columns.size )( Column( rows.size ) ) :+
Column( rows.size ).updateCell( rowIdx, cell )
}
} else {
if ( rowIdx >= rows.size ) {
columns.map { col => col.ensure( rowIdx ) }.
updated( colIdx, columns( colIdx ).updateCell( rowIdx, cell ) )
} else {
columns.updated( colIdx, columns( colIdx ).updateCell( rowIdx, cell ) )
}
}
val _rows : IndexedSeq[ Row ] =
if ( rowIdx >= rows.size ) {
if ( colIdx >= columns.size ) {
rows.map { row => row.ensure( colIdx ) } ++
IndexedSeq.fill( rowIdx - rows.size )( Row( colIdx + 1 ) ) :+
Row( colIdx + 1 ).updateCell( colIdx, cell )
} else {
rows ++
IndexedSeq.fill( rowIdx - rows.size )( Row( columns.size ) ) :+
Row( columns.size ).updateCell( colIdx, cell )
}
} else {
if ( colIdx >= columns.size ) {
rows.map { row => row.ensure( colIdx ) }.
updated( rowIdx, rows( rowIdx ).updateCell( colIdx, cell ) )
} else {
rows.updated( rowIdx, rows( rowIdx ).updateCell( colIdx, cell ) )
}
}
copy( columns = _columns, rows = _rows )
}
}
object Sheet {
def apply( name : String ) : Sheet = Sheet( name, IndexedSeq(), IndexedSeq() )
}
|
kfwalkow/sheetkram
|
src/main/scala/sheetkram/model/Sheet.scala
|
Scala
|
bsd-3-clause
| 1,936
|
/*
* VETest.scala
* Variable elimination tests.
*
* Created By: Avi Pfeffer (apfeffer@cra.com)
* Creation Date: Jan 1, 2009
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email figaro@cra.com for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.test.algorithm.factored
import org.scalatest.Matchers
import org.scalatest.{ WordSpec, PrivateMethodTester }
import math.log
import com.cra.figaro.algorithm._
import com.cra.figaro.algorithm.factored._
import com.cra.figaro.algorithm.factored.factors._
import com.cra.figaro.algorithm.sampling._
import com.cra.figaro.language._
import com.cra.figaro.library.compound._
import com.cra.figaro.library.atomic.continuous._
import com.cra.figaro.util._
import com.cra.figaro.test._
import scala.collection.mutable.Map
import com.cra.figaro.test.tags.Performance
import com.cra.figaro.test.tags.NonDeterministic
class VETest extends WordSpec with Matchers {
"A VEGraph" when {
"initially" should {
"associate each element with all its factors and neighbors" in {
Universe.createNew()
val e1 = Flip(0.1)
val e2 = Constant(8)
val e3 = Select(0.2 -> "a", 0.3 -> "b", 0.5 -> "c")
val e4 = Flip(0.7)
val e5 = Constant('a)
val e6 = Select(0.1 -> 1.5, 0.9 -> 2.5)
val v1 = Variable(e1)
val v2 = Variable(e2)
val v3 = Variable(e3)
val v4 = Variable(e4)
val v5 = Variable(e5)
val v6 = Variable(e6)
val f = Factory.simpleMake[Double](List(v1, v2, v3, v4))
val g = Factory.simpleMake[Double](List(v5, v3, v2, v6))
val af = AbstractFactor(f.variables)
val ag = AbstractFactor(g.variables)
val graph = new VEGraph(List(f, g))
val info = graph.info
info(v1) should equal(VariableInfo(Set(af), Set(v1, v2, v3, v4)))
info(v2) should equal(VariableInfo(Set(af, ag), Set(v1, v2, v3, v4, v5, v6)))
info(v3) should equal(VariableInfo(Set(af, ag), Set(v1, v2, v3, v4, v5, v6)))
info(v4) should equal(VariableInfo(Set(af), Set(v1, v2, v3, v4)))
info(v5) should equal(VariableInfo(Set(ag), Set(v2, v3, v5, v6)))
info(v6) should equal(VariableInfo(Set(ag), Set(v2, v3, v5, v6)))
}
}
"computing the cost of a set of factors" should {
"return the sum over the factors of the product of the ranges of the variables in each factor" in {
Universe.createNew()
val e1 = Flip(0.1)
val e2 = Constant(8)
val e3 = Select(0.2 -> "a", 0.3 -> "b", 0.5 -> "c")
val e4 = Flip(0.7)
val e5 = Constant('a)
val e6 = Select(0.1 -> 1.5, 0.9 -> 2.5)
Values()(e1)
Values()(e2)
Values()(e3)
Values()(e4)
Values()(e5)
Values()(e6)
val v1 = Variable(e1)
val v2 = Variable(e2)
val v3 = Variable(e3)
val v4 = Variable(e4)
val v5 = Variable(e5)
val v6 = Variable(e6)
val f = Factory.simpleMake[Double](List(v1, v2, v3, v4))
val g = Factory.simpleMake[Double](List(v5, v3, v2, v6))
val af = AbstractFactor(f.variables)
val ag = AbstractFactor(g.variables)
VEGraph.cost(List(af, ag)) should equal(18) // 2*1*3*2 + 1*3*1*2
}
}
"computing the score of eliminating a variable" should {
"return the cost of the new factor minus the costs of the old factors when eliminating a variable" in {
Universe.createNew()
val e1 = Flip(0.1)
val e2 = Constant(8)
val e3 = Select(0.2 -> "a", 0.3 -> "b", 0.5 -> "c")
val e4 = Flip(0.7)
val e5 = Constant('a)
val e6 = Select(0.1 -> 1.5, 0.9 -> 2.5)
val e7 = Flip(0.9)
Values()(e1)
Values()(e2)
Values()(e3)
Values()(e4)
Values()(e5)
Values()(e6)
Values()(e7)
val v1 = Variable(e1)
val v2 = Variable(e2)
val v3 = Variable(e3)
val v4 = Variable(e4)
val v5 = Variable(e5)
val v6 = Variable(e6)
val v7 = Variable(e7)
val f = Factory.simpleMake[Double](List(v1, v2, v3, v4))
val g = Factory.simpleMake[Double](List(v5, v3, v2, v6))
val h = Factory.simpleMake[Double](List(v1, v7))
val graph1 = new VEGraph(List(f, g, h))
val score = graph1.score(v3)
score should equal(-10) // 2*1*2*1*2 - (2*1*3*2 + 1*3*1*2)
}
}
"after eliminating a variable" should {
"contain a factor involving all its neighbors and not contain " +
"any of the original factors involving the variable" in {
Universe.createNew()
val e1 = Flip(0.1)
val e2 = Constant(8)
val e3 = Select(0.2 -> "a", 0.3 -> "b", 0.5 -> "c")
val e4 = Flip(0.7)
val e5 = Constant('a)
val e6 = Select(0.1 -> 1.5, 0.9 -> 2.5)
val e7 = Flip(0.9)
val v1 = Variable(e1)
val v2 = Variable(e2)
val v3 = Variable(e3)
val v4 = Variable(e4)
val v5 = Variable(e5)
val v6 = Variable(e6)
val v7 = Variable(e7)
val f = Factory.simpleMake[Double](List(v1, v2, v3, v4))
val g = Factory.simpleMake[Double](List(v5, v3, v2, v6))
val h = Factory.simpleMake[Double](List(v1, v7))
val graph1 = new VEGraph(List(f, g, h))
val graph2 = graph1.eliminate(v3)
val VariableInfo(v1Factors, v1Neighbors) = graph2.info(v1)
v1Factors.size should equal(2) // h and the new factor
assert(v1Factors exists ((factor: AbstractFactor) => factor.variables.size == 5)) // all except v3 and v7
}
"not have any other variables have the variable as a neighbor" in {
Universe.createNew()
val e1 = Flip(0.1)
val e2 = Constant(8)
val e3 = Select(0.2 -> "a", 0.3 -> "b", 0.5 -> "c")
val e4 = Flip(0.7)
val e5 = Constant('a)
val e6 = Select(0.1 -> 1.5, 0.9 -> 2.5)
val e7 = Flip(0.9)
val v1 = Variable(e1)
val v2 = Variable(e2)
val v3 = Variable(e3)
val v4 = Variable(e4)
val v5 = Variable(e5)
val v6 = Variable(e6)
val v7 = Variable(e7)
val f = Factory.simpleMake[Double](List(v1, v2, v3, v4))
val g = Factory.simpleMake[Double](List(v5, v3, v2, v6))
val h = Factory.simpleMake[Double](List(v1, v7))
val graph1 = new VEGraph(List(f, g, h))
val graph2 = graph1.eliminate(v3)
val VariableInfo(v1Factors, v1Neighbors) = graph2.info(v1)
v1Neighbors should not contain (v3)
}
}
}
"Computing an elimination order" should {
"select an elimination order that eliminates all except the variables to preserve while greedily minimizing " +
"cost" in {
Universe.createNew()
val e1 = Flip(0.1)
val e2 = Constant(8)
val e3 = Select(0.2 -> "a", 0.3 -> "b", 0.5 -> "c")
val e4 = Flip(0.7)
val e5 = Constant('a)
val e6 = Select(0.1 -> 1.5, 0.9 -> 2.5)
val e7 = Flip(0.9)
val e8 = Flip(0.3)
Values()(e1)
Values()(e2)
Values()(e3)
Values()(e4)
Values()(e5)
Values()(e6)
Values()(e7)
Values()(e8)
val v1 = Variable(e1)
val v2 = Variable(e2)
val v3 = Variable(e3)
val v4 = Variable(e4)
val v5 = Variable(e5)
val v6 = Variable(e6)
val v7 = Variable(e7)
val v8 = Variable(e8)
val f = Factory.simpleMake[Double](List(v1, v2, v3, v4))
val g = Factory.simpleMake[Double](List(v5, v3, v2, v6))
val h = Factory.simpleMake[Double](List(v1, v7))
val i = Factory.simpleMake[Double](List(v8, v1, v3))
val order = VariableElimination.eliminationOrder(List(f, g, h, i), Set(v5, v8))._2
assert(order == List(v3, v4, v1, v6, v7, v2) ||
order == List(v3, v4, v1, v7, v6, v2) ||
order == List(v3, v4, v6, v1, v7, v2) ||
order == List(v3, v6, v1, v4, v7, v2) ||
order == List(v3, v6, v1, v7, v4, v2) ||
order == List(v3, v6, v4, v1, v7, v2))
}
// While this test is non-deterministic, it tests time as a "less than" and the usual T-Test will not work well here
// Also, we shouldn't test our performance this way.... every machine is different so this isn't likely to pass often
"take O(|factors| log |variables|)" taggedAs (Performance, NonDeterministic) in {
Universe.createNew()
val small = 100
val large = 200
def make(numVars: Int): Traversable[Factor[Double]] = {
val universe = new Universe
val a: List[Variable[_]] = List.tabulate(numVars)(i => Variable(Flip(0.3)("", universe)))
for { i <- 0 to numVars - 2 } yield Factory.simpleMake[Double](List(a(i), a(i + 1)))
}
val factors1 = make(small)
val factors2 = make(large)
def order(factors: Traversable[Factor[Double]])() =
VariableElimination.eliminationOrder(factors, List())._2
val time1 = measureTime(order(factors1), 20, 100)
val time2 = measureTime(order(factors2), 20, 100)
val slack = 1.1
time2 / time1 should be < (large / small * log(large) / log(small) * slack)
}
}
"Running VariableElimination" should {
"with no conditions or constraints produce the correct result" in {
Universe.createNew()
val u = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9)
val f = Flip(u)
val a = If(f, Select(0.3 -> 1, 0.7 -> 2), Constant(2))
test(f, (b: Boolean) => b, 0.6)
}
"with a condition on a dependent element produce the result with the correct probability" in {
Universe.createNew()
val u = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9)
val f = Flip(u)
val a = If(f, Select(0.3 -> 1, 0.7 -> 2), Constant(2))
a.setCondition((i: Int) => i == 2)
// U(true) = \\int_{0.2}^{1.0) 0.7 p = 0.35 * 0.96
// U(false) = \\int_{0.2}^{1.0) (1-p)
val u1 = 0.35 * 0.96
val u2 = 0.32
test(f, (b: Boolean) => b, u1 / (u1 + u2))
}
"with a constraint on a dependent element produce the result with the correct probability" in {
Universe.createNew()
val u = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9)
val f = Flip(u)
val a = If(f, Select(0.3 -> 1, 0.7 -> 2), Constant(2))
a.setConstraint((i: Int) => i.toDouble)
// U(true) = \\int_{0.2}^{1.0} (0.3 + 2 * 0.7) p = 0.85 * 0.96
// U(false) = \\int_{0.2}^(1.0) (2 * (1-p)) = 0.64
val u1 = 0.85 * 0.96
val u2 = 0.64
test(f, (b: Boolean) => b, u1 / (u1 + u2))
}
"with an element that uses another element multiple times, " +
"always produce the same value for the different uses" in {
Universe.createNew()
val f = Flip(0.5)
val e = f === f
test(e, (b: Boolean) => b, 1.0)
}
"with a constraint on an element that is used multiple times, only factor in the constraint once" in {
Universe.createNew()
val f1 = Flip(0.5)
val f2 = Flip(0.3)
val e1 = f1 === f1
val e2 = f1 === f2
val d = Dist(0.5 -> e1, 0.5 -> e2)
f1.setConstraint((b: Boolean) => if (b) 3.0; else 2.0)
// Probability that f1 is true = 0.6
// Probability that e1 is true = 1.0
// Probability that e2 is true = 0.6 * 0.3 + 0.4 * 0.7 = 0.46
// Probability that d is true = 0.5 * 1 + 0.5 * 0.46 = 0.73
test(d, (b: Boolean) => b, 0.73)
}
"with elements that are not used by the query or evidence, produce the correct result" in {
val u1 = Universe.createNew()
val u = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9)
val f = Flip(u)
val a = If(f, Select(0.3 -> 1, 0.7 -> 2), Constant(2))
test(f, (b: Boolean) => b, 0.6)
}
"on a different universe from the current universe, produce the correct result" in {
val u1 = Universe.createNew()
val u = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9)
val f = Flip(u)
Universe.createNew()
val tolerance = 0.0000001
val algorithm = VariableElimination(f)(u1)
algorithm.start()
algorithm.probability(f, (b: Boolean) => b) should be(0.6 +- tolerance)
algorithm.kill()
}
"with a model using chain and no conditions or constraints, produce the correct answer" in {
Universe.createNew()
val f = Flip(0.3)
val s1 = Select(0.1 -> 1, 0.9 -> 2)
val s2 = Select(0.7 -> 1, 0.2 -> 2, 0.1 -> 3)
val c = Chain(f, (b: Boolean) => if (b) s1; else s2)
test(c, (i: Int) => i == 1, 0.3 * 0.1 + 0.7 * 0.7)
}
"with a model using chain and a condition on the result, correctly condition the parent" in {
Universe.createNew()
val f = Flip(0.3)
val s1 = Select(0.1 -> 1, 0.9 -> 2)
val s2 = Select(0.7 -> 1, 0.2 -> 2, 0.1 -> 3)
val c = Chain(f, (b: Boolean) => if (b) s1; else s2)
c.observe(1)
test(f, (b: Boolean) => b, 0.3 * 0.1 / (0.3 * 0.1 + 0.7 * 0.7))
}
"with a model using chain and a condition on one of the outcome elements, correctly condition the result " +
"but not change the belief about the parent" in {
Universe.createNew()
val f = Flip(0.3)
val s1 = Select(0.1 -> 1, 0.9 -> 2)
val s2 = Select(0.7 -> 1, 0.2 -> 2, 0.1 -> 3)
val c = Chain(f, (b: Boolean) => if (b) s1; else s2)
s1.observe(1)
test(c, (i: Int) => i == 1, 0.3 * 1 + 0.7 * 0.7)
test(f, (b: Boolean) => b, 0.3)
}
"with a dependent universe, correctly take into account probability of evidence in the dependent universe" in {
Universe.createNew()
val x = Flip(0.1)
val y = Flip(0.2)
val dependentUniverse = new Universe(List(x, y))
val u1 = Uniform(0.0, 1.0)("", dependentUniverse)
val u2 = Uniform(0.0, 2.0)("", dependentUniverse)
val a = CachingChain(x, y, (x: Boolean, y: Boolean) => if (x || y) u1; else u2)("a", dependentUniverse)
val condition = (d: Double) => d < 0.5
val ve = VariableElimination(List((dependentUniverse, List(NamedEvidence("a", Condition(condition))))), x)
ve.start()
val peGivenXTrue = 0.5
val peGivenXFalse = 0.2 * 0.5 + 0.8 * 0.25
val unnormalizedPXTrue = 0.1 * peGivenXTrue
val unnormalizedPXFalse = 0.9 * peGivenXFalse
val pXTrue = unnormalizedPXTrue / (unnormalizedPXTrue + unnormalizedPXFalse)
ve.probability(x, true) should be(pXTrue +- 0.01)
ve.kill()
}
"with a contingent condition, correctly take into account the contingency" in {
Universe.createNew()
val x = Flip(0.1)
val y = Flip(0.2)
y.setCondition((b: Boolean) => b, List(Element.ElemVal(x, true)))
// Probability of y should be (0.1 * 0.2 + 0.9 * 0.2) / (0.1 * 0.2 + 0.9 * 0.2 + 0.9 * 0.8) (because the case where x is true and y is false has been ruled out)
val ve = VariableElimination(y)
ve.start
ve.probability(y, true) should be(((0.1 * 0.2 + 0.9 * 0.2) / (0.1 * 0.2 + 0.9 * 0.2 + 0.9 * 0.8)) +- 0.0000000001)
ve.kill
}
"with a very wide model produce the correct result" in {
Universe.createNew()
var root = Flip(0.5)
val rand = new scala.util.Random(System.currentTimeMillis)
for (_ <- 0 until 1000) {
val v = If(root, Flip(0.5), Flip(0.5))
if (rand.nextBoolean) {
v.observe(true)
} else {
v.observe(false)
}
}
test(root, (r: Boolean) => r == true, 0.50)
}
}
"MPEVariableElimination" should {
"compute the most likely values of all the variables given the conditions and constraints" in {
Universe.createNew()
val e1 = Flip(0.5)
e1.setConstraint((b: Boolean) => if (b) 3.0; else 1.0)
val e2 = If(e1, Flip(0.4), Flip(0.9))
val e3 = If(e1, Flip(0.52), Flip(0.4))
val e4 = e2 === e3
e4.observe(true)
// p(e1=T,e2=T,e3=T) = 0.75 * 0.4 * 0.52 = .156
// p(e1=T,e2=F,e3=F) = 0.75 * 0.6 * 0.48 = .216
// p(e1=F,e2=T,e3=T) = 0.25 * 0.9 * 0.4 = .09
// p(e1=F,e2=F,e3=F) = 0.25 * 0.1 * 0.6 = .015
// MPE: e1=T,e2=F,e3=F,e4=T
val alg = MPEVariableElimination()
alg.start
alg.mostLikelyValue(e1) should equal(true)
alg.mostLikelyValue(e2) should equal(false)
alg.mostLikelyValue(e3) should equal(false)
alg.mostLikelyValue(e4) should equal(true)
alg.kill
}
}
def test[T](target: Element[T], predicate: T => Boolean, prob: Double) {
val tolerance = 0.0000001
val algorithm = VariableElimination(target)
algorithm.start()
algorithm.probability(target, predicate) should be(prob +- tolerance)
algorithm.kill()
}
}
|
jyuhuan/figaro
|
Figaro/src/test/scala/com/cra/figaro/test/algorithm/factored/VETest.scala
|
Scala
|
bsd-3-clause
| 16,981
|
package org.phenoscape.kb.matrix.reports
import java.io.File
import scala.collection.JavaConversions._
import org.obo.datamodel.impl.OBOSessionImpl
import org.phenoscape.io.NeXMLReader
import org.phenoscape.model.AssociationSupport
import org.phenoscape.model.DataSet
object CharacterReport {
def report(filePath: String, termID: String): String = {
val dataset = new NeXMLReader(new File(filePath), new OBOSessionImpl()).getDataSet
val taxonToSupports = Report.directAndIndirectSupportsForCharacterValuesByTaxon(dataset, termID)
println("taxon\tdirects\tindirects")
val lines = for ((taxon, (directs, indirects)) <- taxonToSupports) yield {
val taxonName = Report.findTaxonLabel(taxon, dataset).getOrElse(taxon)
(s"$taxonName\t${directs.size}\t${indirects.size}")
}
lines.mkString("\n")
}
}
|
phenoscape/ontotrace
|
src/main/scala/org/phenoscape/kb/matrix/reports/CharacterReport.scala
|
Scala
|
mit
| 839
|
/* sbt -- Simple Build Tool
* Copyright 2010 Mark Harrah
*/
package sbt
package inc
import xsbti.api.Source
import java.io.File
import APIs.getAPI
trait APIs
{
/** The API for the source file `src` at the time represented by this instance.
* This method returns an empty API if the file had no API or is not known to this instance. */
def internalAPI(src: File): Source
/** The API for the external class `ext` at the time represented by this instance.
* This method returns an empty API if the file had no API or is not known to this instance. */
def externalAPI(ext: String): Source
def allExternals: collection.Set[String]
def allInternalSources: collection.Set[File]
def ++ (o: APIs): APIs
def markInternalSource(src: File, api: Source): APIs
def markExternalAPI(ext: String, api: Source): APIs
def removeInternal(remove: Iterable[File]): APIs
def filterExt(keep: String => Boolean): APIs
def internal: Map[File, Source]
def external: Map[String, Source]
}
object APIs
{
def apply(internal: Map[File, Source], external: Map[String, Source]): APIs = new MAPIs(internal, external)
def empty: APIs = apply(Map.empty, Map.empty)
val emptyAPI = new xsbti.api.SourceAPI(Array(), Array())
val emptyCompilation = new xsbti.api.Compilation(-1, Array())
val emptySource = new xsbti.api.Source(emptyCompilation, Array(), emptyAPI, 0, false)
def getAPI[T](map: Map[T, Source], src: T): Source = map.getOrElse(src, emptySource)
}
private class MAPIs(val internal: Map[File, Source], val external: Map[String, Source]) extends APIs
{
def allInternalSources: collection.Set[File] = internal.keySet
def allExternals: collection.Set[String] = external.keySet
def ++ (o: APIs): APIs = new MAPIs(internal ++ o.internal, external ++ o.external)
def markInternalSource(src: File, api: Source): APIs =
new MAPIs(internal.updated(src, api), external)
def markExternalAPI(ext: String, api: Source): APIs =
new MAPIs(internal, external.updated(ext, api))
def removeInternal(remove: Iterable[File]): APIs = new MAPIs(internal -- remove, external)
def filterExt(keep: String => Boolean): APIs = new MAPIs(internal, external.filterKeys(keep))
def internalAPI(src: File) = getAPI(internal, src)
def externalAPI(ext: String) = getAPI(external, ext)
}
|
gilt/xsbt
|
compile/inc/APIs.scala
|
Scala
|
bsd-3-clause
| 2,289
|
import java.lang.reflect.ParameterizedType
object Test {
def main(args: Array[String]): Unit = {
val objectB = classOf[Foo[Any]].getClasses
val returnType = objectB(1).getDeclaredMethod("m").getGenericReturnType.asInstanceOf[ParameterizedType]
val out1 = "Test$Foo.Test$Foo$A<Test.Test$Foo<T1>.B$>" // Windows
val out2 = "Test$Foo$A<Test$Foo<T1>$B$>" // Linux, OSX and sometimes Windows
if (scala.util.Properties.isWin)
assert(returnType.toString == out1 || returnType.toString == out2)
else
assert(returnType.toString == out2)
}
class Foo[T1] {
class A[T2]
object B {
def m: A[B.type] = ???
}
}
}
|
dotty-staging/dotty
|
tests/generic-java-signatures/derivedNames.scala
|
Scala
|
apache-2.0
| 678
|
package hello
object world extends App {
println("hello dotty!")
/*/* one
*/
two
*/
println("foo")
}
|
lampepfl/dotty
|
tests/pos/i1052.scala
|
Scala
|
apache-2.0
| 114
|
package net.paploo.diestats.expression.evaluator
/**
* The base trait for evaluators over a numeric domain.
*
* Defines functionality that is expected of a numeric domain.
*
* Note that plus is almost always a synonym for convolution.
*
* @tparam A The domain type
* @tparam R The evaluation result type
*/
trait NumericEvaluator[A, R] extends OrderedEvaluator[A, R] {
def plus(x: R, y: R): R = convolve(x, y)
def minus(x: R, y: R): R
def times(x: R, y: R): R
def quot(x: R, y: R): R
def negate(x: R): R
def rangedValues(max: A): R
def rangedValues(min: A, max: A): R
}
|
paploo/DieStats
|
src/main/scala/net/paploo/diestats/expression/evaluator/NumericEvaluator.scala
|
Scala
|
bsd-3-clause
| 605
|
package com.timgroup.matchless
import MatcherMatchers._
import org.specs2.matcher.MustMatchers._
import org.specs2.Specification
class MatcherMatcherSpec extends Specification {
def is =
"A MatcherMatcher expecting success" ^
"matches when the matcher matches the value" ! {
contain("hello") must matchTheValue("hello world")
} ^
"fails to match when the matcher doesn't match the value" ! {
matchTheValue("goodbye sweetheart") must failToMatchTheValue(contain("hello"))
} ^ end ^
"A MatcherMatcher expecting failure" ^
"matches when the matcher fails to match the value" ! {
contain("xyzzy") must failToMatchTheValue("abracadabra")
} ^
"fails to match when the matcher matches the value" ! {
failToMatchTheValue("abracadabra") must failToMatchTheValue(contain("braca"))
} ^ end ^
"A MatcherMatcher expecting a failure message" ^
"matches when the matcher fails, but the failure message matches" ! {
contain("hello") must failToMatchTheValue("goodbye sweetheart").withTheMessage("'goodbye sweetheart' doesn't contain 'hello'")
} ^
"fails to match when the matcher fails, but the failure message does not match" ! {
failToMatchTheValue("goodbye sweetheart").withMessageLike(contain("eels up inside yer")) must
failToMatchTheValue(contain("hello")).withMessageLike(contain("doesn't contain 'eels up inside yer'"))
} ^ end
}
|
tim-group/matchless
|
src/test/scala/com/timgroup/matchless/MatcherMatcherSpec.scala
|
Scala
|
mit
| 1,428
|
/*
* Copyright (c) 2013-2017 Snowplow Analytics Ltd.
* All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache
* License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied.
*
* See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package stream
package sources
// NSQ
import com.snowplowanalytics.client.nsq.NSQConsumer
import com.snowplowanalytics.client.nsq.lookup.DefaultNSQLookup
import com.snowplowanalytics.client.nsq.NSQMessage
import com.snowplowanalytics.client.nsq.NSQConfig
import com.snowplowanalytics.client.nsq.callbacks.NSQMessageCallback
import com.snowplowanalytics.client.nsq.callbacks.NSQErrorCallback
import com.snowplowanalytics.client.nsq.exceptions.NSQException
// Iglu
import iglu.client.Resolver
// Snowplow
import common.enrichments.EnrichmentRegistry
// Tracker
import com.snowplowanalytics.snowplow.scalatracker.Tracker
// Logging
import org.slf4j.LoggerFactory
// This project
import model._
/**
* Source to read raw events from NSQ.
*
* @param config Configuration for NSQ
* @param igluResolver Instance of resolver for iglu
* @param enrichmentRegistry EnrichmentRegistry instance
* @param tracker Tracker instance
*/
class NsqSource(
config: EnrichConfig,
igluResolver: Resolver,
enrichmentRegistry: EnrichmentRegistry,
tracker: Option[Tracker]
) extends AbstractSource(config, igluResolver, enrichmentRegistry, tracker) {
lazy val log = LoggerFactory.getLogger(getClass())
/**
* Consumer will be started to wait new message.
*/
override def run(): Unit = {
val nsqCallback = new NSQMessageCallback {
override def message(msg: NSQMessage): Unit = {
val bytes = msg.getMessage()
enrichAndStoreEvents(List(bytes)) match {
case true => msg.finished()
case false => log.error(s"Error while enriching the event")
}
}
}
val errorCallback = new NSQErrorCallback {
override def error(e: NSQException): Unit =
log.error(s"Exception while consuming topic $config.streams.in.raw", e)
}
// use NSQLookupd
val lookup = new DefaultNSQLookup
lookup.addLookupAddress(config.streams.nsq.host, config.streams.nsq.lookupPort)
val consumer = new NSQConsumer(lookup,
config.streams.in.raw,
config.streams.nsq.rawChannel,
nsqCallback,
new NSQConfig(),
errorCallback)
consumer.start()
}
}
|
aldemirenes/snowplow
|
3-enrich/stream-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich.stream/sources/NsqSource.scala
|
Scala
|
apache-2.0
| 3,067
|
package eu.stratosphere.peel.flink
import org.apache.flink.api.scala._
/**
* Example Apache Flink Job that counts the word occurrences in a given dataset.
*
* This example is built on top of the Apache Flink Scala API.
*/
object Wordcount {
def main (args: Array[String]) {
if (args.length != 2) {
return
}
val inputPath = args(0)
val outputPath = args(1)
val env = ExecutionEnvironment.getExecutionEnvironment
env.readTextFile(inputPath)
.flatMap { _.toLowerCase.split("\\W+") }
.map { (_, 1) }
.groupBy(0)
.sum(1)
.writeAsCsv(outputPath)
env.execute()
}
}
|
carabolic/peel-wordcount-bundle
|
peel-wordcount-flink-jobs/src/main/scala/eu/stratosphere/peel/flink/Wordcount.scala
|
Scala
|
apache-2.0
| 637
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.plans.logical
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.catalyst.trees.CurrentOrigin
import org.apache.spark.sql.types.StructType
abstract class LogicalPlan extends QueryPlan[LogicalPlan] with Logging {
private var _analyzed: Boolean = false
/**
* Marks this plan as already analyzed. This should only be called by CheckAnalysis.
*/
private[catalyst] def setAnalyzed(): Unit = { _analyzed = true }
/**
* Returns true if this node and its children have already been gone through analysis and
* verification. Note that this is only an optimization used to avoid analyzing trees that
* have already been analyzed, and can be reset by transformations.
*/
def analyzed: Boolean = _analyzed
/** Returns true if this subtree contains any streaming data sources. */
def isStreaming: Boolean = children.exists(_.isStreaming == true)
/**
* Returns a copy of this node where `rule` has been recursively applied first to all of its
* children and then itself (post-order). When `rule` does not apply to a given node, it is left
* unchanged. This function is similar to `transformUp`, but skips sub-trees that have already
* been marked as analyzed.
*
* @param rule the function use to transform this nodes children
*/
def resolveOperators(rule: PartialFunction[LogicalPlan, LogicalPlan]): LogicalPlan = {
if (!analyzed) {
val afterRuleOnChildren = transformChildren(rule, (t, r) => t.resolveOperators(r))
if (this fastEquals afterRuleOnChildren) {
CurrentOrigin.withOrigin(origin) {
rule.applyOrElse(this, identity[LogicalPlan])
}
} else {
CurrentOrigin.withOrigin(origin) {
rule.applyOrElse(afterRuleOnChildren, identity[LogicalPlan])
}
}
} else {
this
}
}
/**
* Recursively transforms the expressions of a tree, skipping nodes that have already
* been analyzed.
*/
def resolveExpressions(r: PartialFunction[Expression, Expression]): LogicalPlan = {
this resolveOperators {
case p => p.transformExpressions(r)
}
}
/**
* Computes [[Statistics]] for this plan. The default implementation assumes the output
* cardinality is the product of all child plan's cardinality, i.e. applies in the case
* of cartesian joins.
*
* [[LeafNode]]s must override this.
*/
def statistics: Statistics = {
if (children.isEmpty) {
throw new UnsupportedOperationException(s"LeafNode $nodeName must implement statistics.")
}
Statistics(sizeInBytes = children.map(_.statistics.sizeInBytes).product)
}
/**
* Returns the maximum number of rows that this plan may compute.
*
* Any operator that a Limit can be pushed passed should override this function (e.g., Union).
* Any operator that can push through a Limit should override this function (e.g., Project).
*/
def maxRows: Option[Long] = None
/**
* Returns true if this expression and all its children have been resolved to a specific schema
* and false if it still contains any unresolved placeholders. Implementations of LogicalPlan
* can override this (e.g.
* [[org.apache.spark.sql.catalyst.analysis.UnresolvedRelation UnresolvedRelation]]
* should return `false`).
*/
lazy val resolved: Boolean = expressions.forall(_.resolved) && childrenResolved
override protected def statePrefix = if (!resolved) "'" else super.statePrefix
/**
* Returns true if all its children of this query plan have been resolved.
*/
def childrenResolved: Boolean = children.forall(_.resolved)
override lazy val canonicalized: LogicalPlan = EliminateSubqueryAliases(this)
/**
* Resolves a given schema to concrete [[Attribute]] references in this query plan. This function
* should only be called on analyzed plans since it will throw [[AnalysisException]] for
* unresolved [[Attribute]]s.
*/
def resolve(schema: StructType, resolver: Resolver): Seq[Attribute] = {
schema.map { field =>
resolve(field.name :: Nil, resolver).map {
case a: AttributeReference => a
case other => sys.error(s"can not handle nested schema yet... plan $this")
}.getOrElse {
throw new AnalysisException(
s"Unable to resolve ${field.name} given [${output.map(_.name).mkString(", ")}]")
}
}
}
/**
* Optionally resolves the given strings to a [[NamedExpression]] using the input from all child
* nodes of this LogicalPlan. The attribute is expressed as
* as string in the following form: `[scope].AttributeName.[nested].[fields]...`.
*/
def resolveChildren(
nameParts: Seq[String],
resolver: Resolver): Option[NamedExpression] =
resolve(nameParts, children.flatMap(_.output), resolver)
/**
* Optionally resolves the given strings to a [[NamedExpression]] based on the output of this
* LogicalPlan. The attribute is expressed as string in the following form:
* `[scope].AttributeName.[nested].[fields]...`.
*/
def resolve(
nameParts: Seq[String],
resolver: Resolver): Option[NamedExpression] =
resolve(nameParts, output, resolver)
/**
* Given an attribute name, split it to name parts by dot, but
* don't split the name parts quoted by backticks, for example,
* `ab.cd`.`efg` should be split into two parts "ab.cd" and "efg".
*/
def resolveQuoted(
name: String,
resolver: Resolver): Option[NamedExpression] = {
resolve(UnresolvedAttribute.parseAttributeName(name), output, resolver)
}
/**
* Resolve the given `name` string against the given attribute, returning either 0 or 1 match.
*
* This assumes `name` has multiple parts, where the 1st part is a qualifier
* (i.e. table name, alias, or subquery alias).
* See the comment above `candidates` variable in resolve() for semantics the returned data.
*/
private def resolveAsTableColumn(
nameParts: Seq[String],
resolver: Resolver,
attribute: Attribute): Option[(Attribute, List[String])] = {
assert(nameParts.length > 1)
if (attribute.qualifier.exists(resolver(_, nameParts.head))) {
// At least one qualifier matches. See if remaining parts match.
val remainingParts = nameParts.tail
resolveAsColumn(remainingParts, resolver, attribute)
} else {
None
}
}
/**
* Resolve the given `name` string against the given attribute, returning either 0 or 1 match.
*
* Different from resolveAsTableColumn, this assumes `name` does NOT start with a qualifier.
* See the comment above `candidates` variable in resolve() for semantics the returned data.
*/
private def resolveAsColumn(
nameParts: Seq[String],
resolver: Resolver,
attribute: Attribute): Option[(Attribute, List[String])] = {
if (!attribute.isGenerated && resolver(attribute.name, nameParts.head)) {
Option((attribute.withName(nameParts.head), nameParts.tail.toList))
} else {
None
}
}
/** Performs attribute resolution given a name and a sequence of possible attributes. */
protected def resolve(
nameParts: Seq[String],
input: Seq[Attribute],
resolver: Resolver): Option[NamedExpression] = {
// A sequence of possible candidate matches.
// Each candidate is a tuple. The first element is a resolved attribute, followed by a list
// of parts that are to be resolved.
// For example, consider an example where "a" is the table name, "b" is the column name,
// and "c" is the struct field name, i.e. "a.b.c". In this case, Attribute will be "a.b",
// and the second element will be List("c").
var candidates: Seq[(Attribute, List[String])] = {
// If the name has 2 or more parts, try to resolve it as `table.column` first.
if (nameParts.length > 1) {
input.flatMap { option =>
resolveAsTableColumn(nameParts, resolver, option)
}
} else {
Seq.empty
}
}
// If none of attributes match `table.column` pattern, we try to resolve it as a column.
if (candidates.isEmpty) {
candidates = input.flatMap { candidate =>
resolveAsColumn(nameParts, resolver, candidate)
}
}
def name = UnresolvedAttribute(nameParts).name
candidates.distinct match {
// One match, no nested fields, use it.
case Seq((a, Nil)) => Some(a)
// One match, but we also need to extract the requested nested field.
case Seq((a, nestedFields)) =>
// The foldLeft adds ExtractValues for every remaining parts of the identifier,
// and aliased it with the last part of the name.
// For example, consider "a.b.c", where "a" is resolved to an existing attribute.
// Then this will add ExtractValue("c", ExtractValue("b", a)), and alias the final
// expression as "c".
val fieldExprs = nestedFields.foldLeft(a: Expression)((expr, fieldName) =>
ExtractValue(expr, Literal(fieldName), resolver))
Some(Alias(fieldExprs, nestedFields.last)())
// No matches.
case Seq() =>
logTrace(s"Could not find $name in ${input.mkString(", ")}")
None
// More than one match.
case ambiguousReferences =>
val referenceNames = ambiguousReferences.map(_._1).mkString(", ")
throw new AnalysisException(
s"Reference '$name' is ambiguous, could be: $referenceNames.")
}
}
/**
* Refreshes (or invalidates) any metadata/data cached in the plan recursively.
*/
def refresh(): Unit = children.foreach(_.refresh())
}
/**
* A logical plan node with no children.
*/
abstract class LeafNode extends LogicalPlan {
override final def children: Seq[LogicalPlan] = Nil
override def producedAttributes: AttributeSet = outputSet
}
/**
* A logical plan node with single child.
*/
abstract class UnaryNode extends LogicalPlan {
def child: LogicalPlan
override final def children: Seq[LogicalPlan] = child :: Nil
/**
* Generates an additional set of aliased constraints by replacing the original constraint
* expressions with the corresponding alias
*/
protected def getAliasedConstraints(projectList: Seq[NamedExpression]): Set[Expression] = {
var allConstraints = child.constraints.asInstanceOf[Set[Expression]]
projectList.foreach {
case a @ Alias(e, _) =>
// For every alias in `projectList`, replace the reference in constraints by its attribute.
allConstraints ++= allConstraints.map(_ transform {
case expr: Expression if expr.semanticEquals(e) =>
a.toAttribute
})
allConstraints += EqualNullSafe(e, a.toAttribute)
case _ => // Don't change.
}
allConstraints -- child.constraints
}
override protected def validConstraints: Set[Expression] = child.constraints
override def statistics: Statistics = {
// There should be some overhead in Row object, the size should not be zero when there is
// no columns, this help to prevent divide-by-zero error.
val childRowSize = child.output.map(_.dataType.defaultSize).sum + 8
val outputRowSize = output.map(_.dataType.defaultSize).sum + 8
// Assume there will be the same number of rows as child has.
var sizeInBytes = (child.statistics.sizeInBytes * outputRowSize) / childRowSize
if (sizeInBytes == 0) {
// sizeInBytes can't be zero, or sizeInBytes of BinaryNode will also be zero
// (product of children).
sizeInBytes = 1
}
child.statistics.copy(sizeInBytes = sizeInBytes)
}
}
/**
* A logical plan node with a left and right child.
*/
abstract class BinaryNode extends LogicalPlan {
def left: LogicalPlan
def right: LogicalPlan
override final def children: Seq[LogicalPlan] = Seq(left, right)
}
|
Panos-Bletsos/spark-cost-model-optimizer
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala
|
Scala
|
apache-2.0
| 12,835
|
package gie.utils
package object loan {
def acquire[T <: AutoCloseable, U](resource: =>T)(fun: T=>U):U = {
val allocatedResource = resource
val r = try {
fun(allocatedResource)
} catch {
case ex: Throwable =>
try{
allocatedResource.close()
} catch {
case exx:Throwable => ex.addSuppressed(exx)
}
throw ex
}
allocatedResource.close()
r
}
def acquire[T1 <: AutoCloseable, T2 <: AutoCloseable, U](resource1: =>T1, resource2: =>T2)(fun: (T1,T2)=>U):U = {
acquire(resource1){res1=>
acquire(resource2){res2 =>
fun(res1, res2)
}
}
}
}
|
gienanesobaka/ggdrive
|
src/main/scala/gie/utils/try_with_stream.scala
|
Scala
|
gpl-2.0
| 670
|
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.stats
import java.util.Date
import com.typesafe.scalalogging.LazyLogging
import com.vividsolutions.jts.geom.{Coordinate, Geometry, Point}
import org.geotools.geometry.jts.JTSFactoryFinder
import org.locationtech.geomesa.curve.TimePeriod.TimePeriod
import org.locationtech.geomesa.curve.{BinnedTime, TimePeriod, Z3SFC}
import org.locationtech.geomesa.utils.stats.MinMax.MinMaxGeometry
import org.locationtech.sfcurve.zorder.Z3
import org.opengis.feature.simple.SimpleFeature
/**
* The histogram's state is stored in an indexed array, where the index is the bin number
* and the values are the counts.
*
* Tracks geometry and date attributes as a single value.
*
* @param geomIndex geometry attribute index in the sft
* @param dtgIndex date attribute index in the sft
* @param period time period to use for z index
* @param length number of bins the histogram has, per period
*/
class Z3Histogram(val geomIndex: Int, val dtgIndex: Int, val period: TimePeriod, val length: Int)
extends Stat with LazyLogging {
import Z3Histogram._
override type S = Z3Histogram
private val sfc = Z3SFC(period)
private val timeToBin = BinnedTime.timeToBinnedTime(period)
private val binToDate = BinnedTime.binnedTimeToDate(period)
private val minZ = sfc.index(minGeom.getX, minGeom.getY, sfc.time.min.toLong).z
private val maxZ = sfc.index(maxGeom.getX, maxGeom.getY, sfc.time.max.toLong).z
private lazy val jsonFormat = period match {
case TimePeriod.Day => s"$period-%05d"
case TimePeriod.Week => s"$period-%04d"
case TimePeriod.Month => s"$period-%03d"
case TimePeriod.Year => s"$period-%02d"
}
private [stats] val binMap = scala.collection.mutable.Map.empty[Short, BinnedLongArray]
private [stats] def newBins = new BinnedLongArray(length, (minZ, maxZ))
def timeBins: Seq[Short] = binMap.keys.toSeq.sorted
def count(timeBin: Short, i: Int): Long = binMap.get(timeBin).map(_.counts(i)).getOrElse(0L)
def directIndex(timeBin: Short, value: Long): Int = binMap.get(timeBin).map(_.indexOf(value)).getOrElse(-1)
def indexOf(value: (Geometry, Date)): (Short, Int) = {
val (timeBin, z) = toKey(value._1, value._2)
(timeBin, directIndex(timeBin, z))
}
def medianValue(timeBin: Short, i: Int): (Geometry, Date) = fromKey(timeBin, binMap(timeBin).medianValue(i))
private def toKey(geom: Geometry, dtg: Date): (Short, Long) = {
import org.locationtech.geomesa.utils.geotools.Conversions.RichGeometry
val BinnedTime(bin, offset) = timeToBin(dtg.getTime)
val centroid = geom.safeCentroid()
val z = sfc.index(centroid.getX, centroid.getY, offset).z
(bin, z)
}
private def fromKey(timeBin: Short, z: Long): (Geometry, Date) = {
val (x, y, t) = sfc.invert(new Z3(z))
val dtg = Date.from(binToDate(BinnedTime(timeBin, t)).toInstant)
val geom = Z3Histogram.gf.createPoint(new Coordinate(x, y))
(geom, dtg)
}
/**
* Split the stat into a separate stat per time bin of z data. Allows for separate handling of the reduced
* data set.
*
* @return
*/
def splitByTime: Seq[(Short, Z3Histogram)] = {
binMap.toSeq.map { case (w, bins) =>
val hist = new Z3Histogram(geomIndex, dtgIndex, period, length)
hist.binMap.put(w, bins)
(w, hist)
}
}
override def observe(sf: SimpleFeature): Unit = {
val geom = sf.getAttribute(geomIndex).asInstanceOf[Geometry]
val dtg = sf.getAttribute(dtgIndex).asInstanceOf[Date]
if (geom != null && dtg != null) {
try {
val (timeBin, z3) = toKey(geom, dtg)
binMap.getOrElseUpdate(timeBin, newBins).add(z3, 1L)
} catch {
case e: Exception => logger.warn(s"Error observing geom '$geom' and date '$dtg': ${e.toString}")
}
}
}
override def unobserve(sf: SimpleFeature): Unit = {
val geom = sf.getAttribute(geomIndex).asInstanceOf[Geometry]
val dtg = sf.getAttribute(dtgIndex).asInstanceOf[Date]
if (geom != null && dtg != null) {
try {
val (timeBin, z3) = toKey(geom, dtg)
binMap.get(timeBin).foreach(_.add(z3, -1L))
} catch {
case e: Exception => logger.warn(s"Error un-observing geom '$geom' and date '$dtg': ${e.toString}")
}
}
}
/**
* Creates a new histogram by combining another histogram with this one
*/
override def +(other: Z3Histogram): Z3Histogram = {
val plus = new Z3Histogram(geomIndex, dtgIndex, period, length)
plus += this
plus += other
plus
}
/**
* Copies another histogram into this one
*/
override def +=(other: Z3Histogram): Unit = {
if (length != other.length) {
throw new NotImplementedError("Can only add z3 histograms with the same length")
}
other.binMap.foreach { case (w, bins) =>
binMap.get(w) match {
case None => binMap.put(w, bins) // note: sharing a reference now
case Some(b) =>
var i = 0
while (i < b.length) {
b.counts(i) += bins.counts(i)
i += 1
}
}
}
}
override def toJsonObject: Any =
binMap.toSeq.sortBy(_._1)
.map { case (p, bins) => (String.format(jsonFormat, Short.box(p)), bins) }
.map { case (label, bins) => Map(label-> Map("bins" -> bins.counts)) }
override def isEmpty: Boolean = binMap.values.forall(_.counts.forall(_ == 0))
override def clear(): Unit = binMap.values.foreach(_.clear())
override def isEquivalent(other: Stat): Boolean = other match {
case that: Z3Histogram =>
geomIndex == that.geomIndex && dtgIndex == that.dtgIndex && period == that.period &&
length == that.length && binMap.keySet == that.binMap.keySet &&
binMap.forall { case (w, bins) => java.util.Arrays.equals(bins.counts, that.binMap(w).counts) }
case _ => false
}
}
object Z3Histogram {
private val gf = JTSFactoryFinder.getGeometryFactory
val minGeom: Point = MinMaxGeometry.min.asInstanceOf[Point]
val maxGeom: Point = MinMaxGeometry.max.asInstanceOf[Point]
}
|
jahhulbert-ccri/geomesa
|
geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/stats/Z3Histogram.scala
|
Scala
|
apache-2.0
| 6,517
|
package graphique.backends
import graphique.images.ImageAttributes
/**
* A Graphique backend.
*
*/
class Backend(images: ImageManager, urls: UrlProvider) {
/**
* Submits a new image.
*
* @param image the image content
* @throws InvalidImageError when the submitted content is not a valid image
* @throws IOError
* @return the tag of the submitted image
*/
def submitImage(image: Array[Byte]): ImageTag = images submit image
/**
* Requests that an image matching the specified attributes should be created.
*
* @param tag the image identifier
* @param attributes the attributes of the requested image
* @throws IOError
* @throws ImageProcessingError
* @throws SourceImageNotFoundError when the image requested is unavailable
*/
def createImage(tag: ImageTag, attributes: ImageAttributes): Unit =
images createImage Image(tag, attributes)
def imageAvailable(tag: ImageTag, attributes: ImageAttributes): Boolean = images has Image(tag, attributes)
/**
* Makes sure that the requested image exists (or creates it) then returns its URL.
*
* @param tag
* @param attributes
* @throws ImageProcessingError
* @throws SourceImageNotFoundError when the image requested is unavailable
*/
def urlForExistingImage(tag: ImageTag, attributes: ImageAttributes): String = {
if (!imageAvailable(tag, attributes))
createImage(tag, attributes)
imageUrlFor(tag, attributes)
}
/**
* Request a publicly-servable URL for the image identified by the given tag and is with the given
* attributes. The image is only available if this was preceded by a call to createImage() with the
* same arguments.
*
* @param tag the identifier of the requested image
* @param attributes the desired attributes of the requested image
*/
def imageUrlFor(tag: ImageTag, attributes: ImageAttributes): String = {
urls(Image(tag, attributes).id)
}
}
|
amrhassan/graphique
|
src/main/scala/graphique/backends/Backend.scala
|
Scala
|
mit
| 1,944
|
package com.twitter.finagle.http
import com.twitter.conversions.time._
import org.jboss.netty.buffer.ChannelBuffers
import org.specs.SpecificationWithJUnit
import org.specs.util.DataTables
class MessageSpec extends SpecificationWithJUnit with DataTables {
"Message" should {
"empty message" in {
val response = Response()
response.length must_== 0
response.contentString must_== ""
}
"headers" in {
val response = Request()
response.allow.toList must_== Nil
response.allow = Method.Get :: Method.Head :: Nil
response.allow must beSome("GET,HEAD")
response.accept.toList must_== Nil
response.accept = "text/plain; q=0.5" :: "text/html" :: Nil
response.accept.toList must_== "text/plain; q=0.5" :: "text/html" :: Nil
response.accept = "A,,c;param,;d,;"
response.accept.toList must_== "A" :: "c;param" :: ";d" :: ";" :: Nil
response.acceptMediaTypes.toList must_== "a" :: "c" :: Nil
}
"charset" in {
"header" | "charset" |>
"x; charset=a" ! "a" |
"x;charset=a" ! "a" |
"x; charset=a " ! "a" |
"x;y;charset=a" ! "a" |
"x; charset=" ! "" |
"x; charset==" ! "=" |
"x; charset" ! null |
"x" ! null |
";;;;;;" ! null |
{ (header: String, expected: String) =>
val request = Request()
request.headers.set("Content-Type", header)
request.charset must_== Option(expected)
}
}
"charset=" in {
"header" | "charset" | "expected" |>
"x; charset=a" ! "b" ! "x;charset=b" |
"x" ! "b" ! "x;charset=b" |
"x;p1" ! "b" ! "x;charset=b;p1" |
"x;p1; p2 ;p3" ! "b" ! "x;charset=b;p1; p2 ;p3" |
"x;p1;charset=a;p3" ! "b" ! "x;p1;charset=b;p3" |
"x;" ! "b" ! "x;charset=b" |
";" ! "b" ! ";charset=b" |
"" ! "b" ! ";charset=b" |
{ (header: String, charset: String, expected: String) =>
val request = Request()
request.headers.set("Content-Type", header)
request.charset = charset
request.headers.get("Content-Type") must_== expected
}
}
"mediaType" in {
"header" | "type" |>
"application/json" ! "application/json" |
"application/json;charset=utf-8" ! "application/json" |
"" ! "" |
";" ! "" |
";;;;;;;;;;" ! "" |
"application/json;" ! "application/json" |
" application/json ; charset=utf-8 " ! "application/json" |
"APPLICATION/JSON" ! "application/json" |
{ (header: String, expected: String) =>
val request = Request()
request.headers.set("Content-Type", header)
if (!expected.isEmpty) // do this because None doesn't work in DataTables
request.mediaType must_== Some(expected)
else
request.mediaType must_== None
}
val request = Request()
request.mediaType must_== None
}
"mediaType=" in {
"header" | "mediaType" | "expected" |>
"x" ! "y" ! "y" |
"x; charset=a" ! "y" ! "y; charset=a" |
"x;p1; p2 ;p3" ! "y" ! "y;p1; p2 ;p3" |
"x;" ! "y" ! "y" |
";" ! "y" ! "y" |
"" ! "y" ! "y" |
{ (header: String, mediaType: String, expected: String) =>
val request = Request()
request.headers.set("Content-Type", header)
request.mediaType = mediaType
request.headers.get("Content-Type") must_== expected
}
}
"clearContent" in {
val response = Response()
response.write("hello")
response.clearContent()
response.contentString must_== ""
response.length must_== 0
}
"contentString" in {
val response = Response()
response.setContent(ChannelBuffers.wrappedBuffer("hello".getBytes))
response.contentString must_== "hello"
response.contentString must_== "hello"
}
"cacheControl" in {
val response = Response()
response.cacheControl = 15123.milliseconds
response.cacheControl must_== Some("max-age=15, must-revalidate")
}
"withInputStream" in {
val response = Response()
response.setContent(ChannelBuffers.wrappedBuffer("hello".getBytes))
response.withInputStream { inputStream =>
val bytes = new Array[Byte](5)
inputStream.read(bytes)
new String(bytes) must_== "hello"
}
}
"withReader" in {
val response = Response()
response.setContent(ChannelBuffers.wrappedBuffer("hello".getBytes))
response.withReader { reader =>
val bytes = new Array[Char](5)
reader.read(bytes)
new String(bytes) must_== "hello"
}
}
"write(String)" in {
val response = Response()
response.write("hello")
response.length must_== 5
response.contentString must_== "hello"
}
"write(String), multiple writes" in {
val response = Response()
response.write("h")
response.write("e")
response.write("l")
response.write("l")
response.write("o")
response.contentString must_== "hello"
response.length must_== 5
}
"withOutputStream" in {
val response = Response()
response.withOutputStream { outputStream =>
outputStream.write("hello".getBytes)
}
response.contentString must_== "hello"
response.length must_== 5
}
"withOutputStream, multiple writes" in {
val response = Response()
response.write("h")
response.withOutputStream { outputStream =>
outputStream.write("e".getBytes)
outputStream.write("ll".getBytes)
}
response.write("o")
response.contentString must_== "hello"
response.length must_== 5
}
"withWriter" in {
val response = Response()
response.withWriter { writer =>
writer.write("hello")
}
response.contentString must_== "hello"
response.length must_== 5
}
"withWriter, multiple writes" in {
val response = Response()
response.write("h")
response.withWriter { writer =>
writer.write("e")
writer.write("ll")
}
response.write("o")
response.contentString must_== "hello"
response.length must_== 5
}
}
}
|
JustinTulloss/finagle
|
finagle-http/src/test/scala/com/twitter/finagle/http/MessageSpec.scala
|
Scala
|
apache-2.0
| 7,093
|
package com.shocktrade.services
import com.shocktrade.server.services.yahoo.YahooFinanceKeyStatisticsService
._
import io.scalajs.nodejs.console
import utest._
import scala.scalajs.concurrent.JSExecutionContext.Implicits.queue
/**
* Yahoo Finance! Statistics Service Test
* @author Lawrence Daniels <lawrence.daniels@gmail.com>
*/
class YahooFinanceStatisticsServiceTests extends TestSuite {
private val service = new YahooFinanceKeyStatisticsService()(require)
override val tests = this {
"Yahoo! Finance Statistics Service should return stock quotes" - {
service("AAPL") foreach { quote =>
assert(quote.flatMap(_.symbol.toOption).contains("AAPL"))
console.log("quote: %j", quote)
}
}
}
tests.runAsync() map { results =>
console.log(s"results: $results")
results
}
}
|
ldaniels528/shocktrade.js
|
app/server/services/src/test/scala/com/shocktrade/services/YahooFinanceStatisticsServiceTests.scala
|
Scala
|
apache-2.0
| 834
|
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.flaminem.flamy.utils
trait MergeableNamed[T] extends Mergeable[T] with Named {
}
|
flaminem/flamy
|
src/main/scala/com/flaminem/flamy/utils/MergeableNamed.scala
|
Scala
|
apache-2.0
| 662
|
object Atbash {
private val org = "abcdefghijklmnopqrstuvwxyz12345679".split("")
private val shifted = "zyxwvutsrqponmlkjihgfedcba12345679"split("")
val cipherMap = org.zip(shifted).toMap
}
case class Atbash() {
import Atbash._
def encode(in: String): String =
in.toLowerCase
.split("")
.filter(cipherMap.contains(_))
.map(cipherMap)
.grouped(5)
.map(_.mkString)
.mkString(" ")
}
|
daewon/til
|
exercism/scala/atbash-cipher/src/main/scala/atbashCipher.scala
|
Scala
|
mpl-2.0
| 437
|
package org.derekwyatt.shorty
import com.github.mauricio.async.db.QueryResult
import scala.collection.mutable
import scala.concurrent.{ExecutionContext, Future, future}
import scala.reflect.runtime.universe._
trait TestShortyDBComponent extends ShortyDBComponent with NilDBComponent {
val hashToUrl = mutable.Map.empty[String, String]
val urlToHash = mutable.Map.empty[String, String]
val hashClicks = mutable.Map.empty[String, Long]
val shortydb = new TestShortyDB
class TestShortyDB extends ShortyDB {
def extractOne[T : TypeTag](queryResult: QueryResult, colName: String, f: Any => Option[T]): Option[T] = null
def insertHash(hash: String, url: String)(implicit ec: ExecutionContext): Future[Unit] = {
hashToUrl += (hash -> url)
urlToHash += (url -> hash)
future { () }
}
def hashExists(hash: String)(implicit ec: ExecutionContext): Future[Boolean] = {
if (hashToUrl.contains(hash))
future(true)
else
future(false)
}
def getUrl(hash: String)(implicit ec: ExecutionContext): Future[Option[String]] = {
val url = hashToUrl.get(hash)
future(url)
}
def getHash(url: String)(implicit ec: ExecutionContext): Future[Option[String]] = {
val hash = urlToHash.get(url)
future(hash)
}
def addClick(hash: String, ipaddr: String)(implicit ec: ExecutionContext): Future[Unit] = {
hashClicks += (hash -> (hashClicks.getOrElse(hash, 0L) + 1L))
future { () }
}
def getNumClicks(hash: String)(implicit ec: ExecutionContext): Future[Long] = {
val result = hashClicks.getOrElse(hash, 0L)
future(result)
}
}
}
|
derekwyatt/shorty-dqw
|
src/test/scala/org/derekwyatt/shorty/TestShortyDB.scala
|
Scala
|
apache-2.0
| 1,659
|
package com.persist.logging
import com.persist.JsonOps._
import scala.collection.mutable
import scala.concurrent.Promise
private[logging] object TimeActorMessages {
private[logging] trait TimeActorMessage
private[logging] case class TimeStart(id: RequestId, name: String, uid: String, time: Long) extends TimeActorMessage
private[logging] case class TimeEnd(id: RequestId, name: String, uid: String, time: Long) extends TimeActorMessage
private[logging] case object TimeDone extends TimeActorMessage
}
private[logging] class TimeActor(tdone: Promise[Unit]) extends ActorLogging {
import TimeActorMessages._
private case class TimeStep(name: String, start: Long, var end: Long = 0, first: Boolean = false)
private case class TimeItem(start: Long, steps: mutable.HashMap[String, TimeStep] = mutable.HashMap[String, TimeStep]())
private val items = mutable.HashMap[String, TimeItem]()
def start(id: RequestId, time: Long) {
val key = s"${id.trackingId}\\t${id.spanId}"
items += (key -> TimeItem(time))
}
def end(id: RequestId) {
val key = s"${id.trackingId}\\t${id.spanId}"
items.get(key) map {
case timeItem =>
val jitems0 = timeItem.steps map {
case (key1, timeStep) =>
val j1 = JsonObject("name" -> timeStep.name, "time0" -> timeStep.start)
val j2 = if (timeStep.end == 0) {
emptyJsonObject
} else {
JsonObject("time1" -> timeStep.end, "total" -> (timeStep.end - timeStep.start))
}
j1 ++ j2
}
val traceId = JsonArray(id.trackingId, id.spanId)
val jitems = jitems0.toSeq.sortBy(jgetInt(_, "time0"))
val j = JsonObject("@traceId" -> traceId, "items" -> jitems)
log.alternative("time", j)
items -= key
}
}
def logStart(id: RequestId, name: String, uid: String, time: Long) {
val key = s"${id.trackingId}\\t${id.spanId}"
val key1 = s"${name}\\t${uid}"
val first = if (!items.isDefinedAt(key)) {
start(id, time)
true
} else {
false
}
items.get(key) map {
case timeItem => timeItem.steps += (key1 -> TimeStep(name, time - timeItem.start, first = first))
}
}
def logEnd(id: RequestId, name: String, uid: String, time: Long) {
val key = s"${id.trackingId}\\t${id.spanId}"
val key1 = s"${name}\\t${uid}"
items.get(key) map {
case timeItem =>
timeItem.steps.get(key1) map {
case timeStep =>
timeStep.end = time - timeItem.start
if (timeStep.first) end(id)
}
}
}
def closeAll(): Unit = {
for ((key, steps) <- items) {
val parts = key.split("\\t")
if (parts.size == 2) {
end(RequestId(parts(0), parts(1)))
}
}
}
def receive = {
case TimeStart(id, name, uid, time) =>
logStart(id, name, uid, time)
case TimeEnd(id, name, uid, time) =>
logEnd(id, name, uid, time)
case TimeDone =>
closeAll()
tdone.success(())
context.stop(self)
case _ =>
}
}
|
nestorpersist/logging
|
logger/src/main/scala/com/persist/logging/TimeActor.scala
|
Scala
|
apache-2.0
| 3,069
|
import sbt._
class ArgumentTest(info: ProjectInfo) extends DefaultProject(info)
{
val st = "org.scalatest" % "scalatest" % "1.3"
override def testOptions =
super.testOptions ++
args("success1", "-n", "test2 test3") ++
args("success2", "-n", "test2") ++
args("success3", "-n", "test3") ++
args("failure1", "-n", "test1") ++
args("failure2", "-n", "test1 test4") ++
args("failure3", "-n", "test1 test3")
def args(path: Path, args: String*): Seq[TestOption] = if(path.exists) TestArgument(args : _*) :: Nil else Nil
}
|
gilt/xsbt
|
sbt/src/sbt-test/tests/arguments/project/build/ArgumentTest.scala
|
Scala
|
bsd-3-clause
| 533
|
package skinny.engine.base
import java.net.URI
import javax.servlet.http.{ HttpServletRequest, HttpServletResponse }
import skinny.engine.Handler
import skinny.engine.implicits.ServletApiImplicits
/**
* Redirects unsecured requests to the corresponding secure URL.
*/
trait UnsecuredRequestRedirector extends Handler with ServletApiImplicits {
abstract override def handle(req: HttpServletRequest, res: HttpServletResponse) {
if (!req.isSecure) {
val oldUri = req.uri
val port = securePortMap.lift(oldUri.getPort) getOrElse 443
val uri = new URI(
"https",
oldUri.getRawUserInfo,
oldUri.getHost,
port,
oldUri.getPath,
oldUri.getQuery,
oldUri.getFragment
).toString
res.redirect(uri)
} else {
super.handle(req, res)
}
}
/**
* Maps unsecured ports to secure ports.
* By default, 80 redirects to 443, and 8080 to 8443.
*/
protected def securePortMap: PartialFunction[Int, Int] = {
Map(80 -> 443, 8080 -> 8443)
}
}
|
holycattle/skinny-framework
|
engine/src/main/scala/skinny/engine/base/UnsecuredRequestRedirector.scala
|
Scala
|
mit
| 1,045
|
package com.jrende.commands
case class Exit(params : Seq[String]) extends Command {
}
|
Jrende/Skvitter
|
src/main/scala/com/jrende/commands/Exit.scala
|
Scala
|
mit
| 87
|
package gitbucket.core.view
import gitbucket.core.controller.Context
import gitbucket.core.service.RepositoryService.RepositoryInfo
import org.scalatest.FunSpec
import org.scalatest.mockito.MockitoSugar
import java.util.Date
import java.util.TimeZone
class HelpersSpec extends FunSpec with MockitoSugar {
private implicit val context = mock[Context]
private val repository = mock[RepositoryInfo]
import helpers._
describe("urlLink and decorateHtml") {
it("should pass identical string when no link is present") {
val before = "Description"
val after = decorateHtml(urlLink(before), repository)
assert(after == before)
}
it("should convert a single link") {
val before = "http://example.com"
val after = decorateHtml(urlLink(before), repository)
assert(after == """<a href="http://example.com">http://example.com</a>""")
}
it("should convert a single link within trailing text") {
val before = "Example Project. http://example.com"
val after = decorateHtml(urlLink(before), repository)
assert(after == """Example Project. <a href="http://example.com">http://example.com</a>""")
}
it("should convert a multiple links within text") {
val before = "Example Project. http://example.com. (See also https://github.com/)"
val after = decorateHtml(urlLink(before), repository)
assert(
after == """Example Project. <a href="http://example.com">http://example.com</a>. (See also <a href="https://github.com/">https://github.com/</a>)"""
)
}
it("should properly escape html metacharacters") {
val before = "<>&"
val after = decorateHtml(urlLink(before), repository)
assert(after == """<>&""")
}
it("should escape html metacharacters adjacent to a link") {
val before = "<http://example.com>"
val after = decorateHtml(urlLink(before), repository)
assert(after == """<<a href="http://example.com">http://example.com</a>>""")
}
it("should stop link recognition at a metacharacter") {
val before = "http://exa<mple.com"
val after = decorateHtml(urlLink(before), repository)
assert(after == """<a href="http://exa">http://exa</a><mple.com""")
}
it("should make sure there are no double quotes in the href attribute") {
val before = "http://exa\\"mple.com"
val after = decorateHtml(urlLink(before), repository)
assert(after == """<a href="http://exa"mple.com">http://exa"mple.com</a>""")
}
}
describe("datetimeAgo") {
it("should render a time within a minute") {
val time = System.currentTimeMillis()
val datetime = datetimeAgo(new Date(time))
assert(datetime == "just now")
}
it("should render a time 1 minute ago") {
val time = System.currentTimeMillis() - (60 * 1000)
val datetime = datetimeAgo(new Date(time))
assert(datetime == "1 minute ago")
}
it("should render a time 2 minute ago") {
val time = System.currentTimeMillis() - (60 * 1000 * 2)
val datetime = datetimeAgo(new Date(time))
assert(datetime == "2 minutes ago")
}
}
describe("datetimeRFC3339") {
it("should format date as RFC3339 format") {
val time = 1546961077224L
val datetime = datetimeRFC3339(new Date(time))
assert(datetime == "2019-01-08T15:24:37Z")
}
}
describe("date") {
it("should format date as yyyy-MM-dd with default timezone") {
val defaultTimeZone = TimeZone.getDefault
try {
val time = 1546961077247L
TimeZone.setDefault(TimeZone.getTimeZone("UTC"))
val datetimeUTC = date(new Date(time))
assert(datetimeUTC == "2019-01-08")
TimeZone.setDefault(TimeZone.getTimeZone("JST"))
val datetimeJST = date(new Date(time))
assert(datetimeJST == "2019-01-09")
} finally {
TimeZone.setDefault(defaultTimeZone)
}
}
}
describe("hashDate") {
it("should format date as yyyyMMDDHHmmss with default timezone") {
val defaultTimeZone = TimeZone.getDefault
try {
val time = 1546961077247L
TimeZone.setDefault(TimeZone.getTimeZone("UTC"))
val hash = hashDate(new Date(time))
assert(hash == "20190108152437")
} finally {
TimeZone.setDefault(defaultTimeZone)
}
}
}
describe("hashQuery") {
it("should return same value for multiple calls") {
val time = 1546961077247L
val hash1 = hashQuery
Thread.sleep(500)
val hash2 = hashQuery
assert(hash1 == hash2)
}
}
}
|
McFoggy/gitbucket
|
src/test/scala/gitbucket/core/view/HelpersSpec.scala
|
Scala
|
apache-2.0
| 4,607
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.orc
import scala.collection.JavaConverters._
import org.apache.orc.mapreduce.OrcInputFormat
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.connector.read.{Scan, SupportsPushDownFilters}
import org.apache.spark.sql.execution.datasources.PartitioningAwareFileIndex
import org.apache.spark.sql.execution.datasources.orc.OrcFilters
import org.apache.spark.sql.execution.datasources.v2.FileScanBuilder
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
case class OrcScanBuilder(
sparkSession: SparkSession,
fileIndex: PartitioningAwareFileIndex,
schema: StructType,
dataSchema: StructType,
options: CaseInsensitiveStringMap)
extends FileScanBuilder(sparkSession, fileIndex, dataSchema) with SupportsPushDownFilters {
lazy val hadoopConf = {
val caseSensitiveMap = options.asCaseSensitiveMap.asScala.toMap
// Hadoop Configurations are case sensitive.
sparkSession.sessionState.newHadoopConfWithOptions(caseSensitiveMap)
}
override protected val supportsNestedSchemaPruning: Boolean = true
override def build(): Scan = {
OrcScan(sparkSession, hadoopConf, fileIndex, dataSchema,
readDataSchema(), readPartitionSchema(), options, pushedFilters())
}
private var _pushedFilters: Array[Filter] = Array.empty
override def pushFilters(filters: Array[Filter]): Array[Filter] = {
if (sparkSession.sessionState.conf.orcFilterPushDown) {
OrcFilters.createFilter(schema, filters).foreach { f =>
// The pushed filters will be set in `hadoopConf`. After that, we can simply use the
// changed `hadoopConf` in executors.
OrcInputFormat.setSearchArgument(hadoopConf, f, schema.fieldNames)
}
val dataTypeMap = schema.map(f => f.name -> f.dataType).toMap
_pushedFilters = OrcFilters.convertibleFilters(schema, dataTypeMap, filters).toArray
}
filters
}
override def pushedFilters(): Array[Filter] = _pushedFilters
}
|
jkbradley/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/orc/OrcScanBuilder.scala
|
Scala
|
apache-2.0
| 2,895
|
package org.pico.disposal
import java.io.Closeable
import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}
import org.pico.atomic.syntax.std.atomicBoolean._
import org.pico.atomic.syntax.std.atomicReference._
import org.pico.disposal.std.autoCloseable._
import org.pico.disposal.syntax.disposable._
/** A simple trait that other types can inherit to acquire the ability to track disposable objects
* for disposal upon close.
*/
trait SimpleDisposer extends Disposer {
private val closed = new AtomicBoolean(false)
private val disposables = new AtomicReference[Closeable](Closed)
@inline
final override def release(): Closeable = disposables.getAndSet(Closed)
@inline
final override def disposes[D: Disposable](disposable: D): D = {
disposables.update(_ :+: disposable.asCloseable)
if (closed.value) {
// It is possible that the object was closed the first `closed` test. If that's the
// case we want to ensure that the `disposable` argument is also closed.
disposables.getAndSet(Closed).dispose()
}
disposable
}
@inline
final override def close(): Unit = {
closed.set(true)
disposables.getAndSet(Closed).dispose()
}
}
|
pico-works/pico-disposal
|
pico-disposal/src/main/scala/org/pico/disposal/SimpleDisposer.scala
|
Scala
|
bsd-3-clause
| 1,206
|
trait A { type Result }
class PolyTests {
def wrong(x: A { type Result = Int })
: A { type Result = String} = x
}
|
yusuke2255/dotty
|
tests/untried/neg/t8177a.scala
|
Scala
|
bsd-3-clause
| 130
|
package de.fosd.typechef.crewrite
import de.fosd.typechef.featureexpr.FeatureExprFactory
import de.fosd.typechef.conditional.{Choice, Opt}
import de.fosd.typechef.parser.c.{EnforceTreeHelper, AST}
import org.kiama.rewriting.Rewriter._
object ProductDerivation extends EnforceTreeHelper {
def deriveProduct[T <: Product](ast: T, selectedFeatures: Set[String]): T = {
assert(ast != null)
val prod = manytd(rule[Product] {
case l: List[_] if l.forall(_.isInstanceOf[Opt[_]]) => {
var res: List[Opt[_]] = List()
// use l.reverse here to omit later reverse on res or use += or ++= in the thenBranch
for (o <- l.reverse.asInstanceOf[List[Opt[_]]])
if (o.condition.evaluate(selectedFeatures)) {
res ::= o.copy(condition = FeatureExprFactory.True)
}
res
}
case Choice(feature, thenBranch, elseBranch) => {
if (feature.evaluate(selectedFeatures)) thenBranch
else elseBranch
}
case a: AST => a.clone()
})
val cast = prod(ast).get.asInstanceOf[T]
copyPositions(ast, cast)
cast
}
}
|
ckaestne/TypeChef
|
CRewrite/src/main/scala/de/fosd/typechef/crewrite/ProductDerivation.scala
|
Scala
|
lgpl-3.0
| 1,249
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.tuning
import java.io.File
import java.nio.file.{Files, StandardCopyOption}
import org.scalatest.Assertions
import org.apache.spark.ml.param.{ParamMap, ParamPair, Params}
import org.apache.spark.ml.util.{Identifiable, MLReader, MLWritable}
object ValidatorParamsSuiteHelpers extends Assertions {
/**
* Assert sequences of estimatorParamMaps are identical.
* If the values for a parameter are not directly comparable with ===
* and are instead Params types themselves then their corresponding paramMaps
* are compared against each other.
*/
def compareParamMaps(pMaps: Array[ParamMap], pMaps2: Array[ParamMap]): Unit = {
assert(pMaps.length === pMaps2.length)
pMaps.zip(pMaps2).foreach { case (pMap, pMap2) =>
assert(pMap.size === pMap2.size)
pMap.toSeq.foreach { case ParamPair(p, v) =>
assert(pMap2.contains(p))
val otherParam = pMap2(p)
v match {
case estimator: Params =>
otherParam match {
case estimator2: Params =>
val estimatorParamMap = Array(estimator.extractParamMap())
val estimatorParamMap2 = Array(estimator2.extractParamMap())
compareParamMaps(estimatorParamMap, estimatorParamMap2)
case other =>
fail(s"Expected parameter of type Params but found ${otherParam.getClass.getName}")
}
case _ =>
assert(otherParam === v)
}
}
}
}
/**
* When nested estimators (ex. OneVsRest) are saved within meta-algorithms such as
* CrossValidator and TrainValidationSplit, relative paths should be used to store
* the path of the estimator so that if the parent directory changes, loading the
* model still works.
*/
def testFileMove[T <: Params with MLWritable](instance: T, tempDir: File): Unit = {
val uid = instance.uid
val subdirName = Identifiable.randomUID("test")
val subdir = new File(tempDir, subdirName)
val subDirWithUid = new File(subdir, uid)
instance.save(subDirWithUid.getPath)
val newSubdirName = Identifiable.randomUID("test_moved")
val newSubdir = new File(tempDir, newSubdirName)
val newSubdirWithUid = new File(newSubdir, uid)
Files.createDirectory(newSubdir.toPath)
Files.createDirectory(newSubdirWithUid.toPath)
Files.move(subDirWithUid.toPath, newSubdirWithUid.toPath, StandardCopyOption.ATOMIC_MOVE)
val loader = instance.getClass.getMethod("read").invoke(null).asInstanceOf[MLReader[T]]
val newInstance = loader.load(newSubdirWithUid.getPath)
assert(uid == newInstance.uid)
}
}
|
pgandhi999/spark
|
mllib/src/test/scala/org/apache/spark/ml/tuning/ValidatorParamsSuiteHelpers.scala
|
Scala
|
apache-2.0
| 3,448
|
package org.vitrivr.adampro.communication
import java.util.concurrent.TimeUnit
import scalapb.json4s.JsonFormat
import io.grpc.internal.DnsNameResolverProvider
import io.grpc.netty.NettyChannelBuilder
import io.grpc.stub.StreamObserver
import io.grpc.{ManagedChannel, ManagedChannelBuilder}
import org.vitrivr.adampro.communication.datastructures._
import org.vitrivr.adampro.grpc.grpc.AdamDefinitionGrpc.{AdamDefinitionBlockingStub, AdamDefinitionStub}
import org.vitrivr.adampro.grpc.grpc.AdamSearchGrpc.{AdamSearchBlockingStub, AdamSearchStub}
import org.vitrivr.adampro.grpc.grpc.AdaptScanMethodsMessage.IndexCollection.{EXISTING_INDEXES, NEW_INDEXES}
import org.vitrivr.adampro.grpc.grpc.AdaptScanMethodsMessage.QueryCollection.{LOGGED_QUERIES, RANDOM_QUERIES}
import org.vitrivr.adampro.grpc.grpc.DistanceMessage.DistanceType
import org.vitrivr.adampro.grpc.grpc.RepartitionMessage.PartitionOptions
import org.vitrivr.adampro.grpc.grpc.{AttributeType, _}
import org.vitrivr.adampro.utils.Logging
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, TimeoutException}
import scala.util.{Failure, Success, Try}
/**
* adamtwo
*
* Ivan Giangreco
* March 2016
*/
class RPCClient(channel: ManagedChannel,
private[adampro] val definerBlocking: AdamDefinitionBlockingStub,
private[adampro] val definer: AdamDefinitionStub,
private[adampro] val searcherBlocking: AdamSearchBlockingStub,
private[adampro] val searcher: AdamSearchStub) extends Logging {
/**
*
* @param desc description
* @param op operation
* @return
*/
private def execute[T](desc: String)(op: => Try[T]): Try[T] = {
try {
log.debug("starting " + desc)
val t1 = System.currentTimeMillis
val res = op
val t2 = System.currentTimeMillis
log.debug("performed " + desc + " in " + (t2 - t1) + " msecs")
res
} catch {
case e: Exception =>
log.error("error in " + desc, e)
Failure(e)
}
}
/**
* Create an entity.
*
* @param entityname name of entity
* @param attributes attributes of new entity
* @return
*/
def entityCreate(entityname: String, attributes: Seq[RPCAttributeDefinition]): Try[String] = {
execute("create entity operation") {
val attributeMessages = attributes.map { attribute =>
var adm = AttributeDefinitionMessage(attribute.name, getGrpcType(attribute.datatype), params = attribute.params)
//add handler information if available
if (attribute.storagehandlername.isDefined) {
adm = adm.withHandler(attribute.storagehandlername.get)
}
adm
}
val res = definerBlocking.createEntity(CreateEntityMessage(entityname, attributeMessages))
if (res.code == AckMessage.Code.OK) {
return Success(res.message)
} else {
return Failure(new Exception(res.message))
}
}
}
/**
* Check if entity exists.
*
* @param entityname name of entity
* @return
*/
def entityExists(entityname: String): Try[Boolean] = {
execute("entity exists operation") {
val res = definerBlocking.existsEntity(EntityNameMessage(entityname))
if (res.ack.get.code == AckMessage.Code.OK) {
return Success(res.exists)
} else {
return Failure(new Exception(res.ack.get.message))
}
}
}
/**
* Generate random data and fill into entity.
*
* @param entityname name of entity
* @param tuples number of tuples
* @param dimensions dimensionality for feature fields
* @param sparsity sparsity of data for feature fields
* @param min min value for feature fields
* @param max max value for feature fields
* @param distribution distribution for random data
* @return
*/
def entityGenerateRandomData(entityname: String, tuples: Int, dimensions: Int, sparsity: Float, min: Float, max: Float, distribution: Option[String]): Try[Void] = {
execute("entity generate random data operation") {
var options: Map[String, String] = Map("fv-dimensions" -> dimensions, "fv-sparsity" -> sparsity, "fv-min" -> min, "fv-max" -> max).mapValues(_.toString)
if (distribution.isDefined) {
options += "fv-distribution" -> distribution.get
}
val res = definerBlocking.generateRandomData(GenerateRandomDataMessage(entityname, tuples, options))
if (res.code == AckMessage.Code.OK) {
return Success(null)
} else {
return Failure(new Exception(res.message))
}
}
}
/**
* Insert data into entity.
*
* @param insertMessage insert message
* @return
*/
def entityInsert(insertMessage: InsertMessage): Try[Void] = {
execute("insert operation") {
val res = definerBlocking.insert(insertMessage)
if (res.code == AckMessage.Code.OK) {
return Success(null)
} else {
return Failure(new Exception(res.message))
}
}
}
/**
* Insert data into entity (streaming).
*
* @param insertMessages sequence of insert messages
* @return
*/
def entityStreamInsert(insertMessages: Seq[InsertMessage]): Try[Void] = {
val so = new StreamObserver[AckMessage]() {
override def onError(throwable: Throwable): Unit = {
log.error("error in insert", throwable)
}
override def onCompleted(): Unit = {
log.info("completed insert")
}
override def onNext(ack: AckMessage): Unit = {
if (ack.code == AckMessage.Code.OK) {
//no output on success
} else {
log.error("error in insert: " + ack.message)
}
}
}
val insertSo = definer.streamInsert(so)
insertMessages.foreach(im => insertSo.onNext(_))
Success(null)
}
/**
* Import data to entity.
*
* @param path path
* @param out stream observer
* @return
*/
def entityProtoImport(path: String, out: StreamObserver[(Boolean, String)]): Try[Void] = {
execute("entity import operation") {
val so = new StreamObserver[AckMessage]() {
override def onError(throwable: Throwable): Unit = out.onError(throwable)
override def onCompleted(): Unit = out.onCompleted()
override def onNext(ack: AckMessage): Unit = out.onNext((ack.code == AckMessage.Code.OK, ack.message))
}
definer.protoImportData(ProtoImportMessage(path), so)
Success(null)
}
}
/**
* Export data from entity.
*
* @param path path
* @param entity
* @return
*/
def entityProtoExport(path: String, entity: String): Try[Void] = {
execute("entity import operation") {
val res = definerBlocking.protoExportData(ProtoExportMessage(path, entity))
if (res.code == AckMessage.Code.OK) {
return Success(null)
} else {
return Failure(new Exception(res.message))
}
}
}
/**
* Import data to entity.
*
* @param host host
* @param database database
* @param username username
* @param password password
* @return
*/
def entityImport(host: String, database: String, username: String, password: String): Try[Void] = {
execute("entity import operation") {
definerBlocking.importData(ImportMessage(host, database, username, password))
Success(null)
}
}
/**
* List all entities.
*
* @return
*/
def entityList(): Try[Seq[String]] = {
execute("list entities operation") {
Success(definerBlocking.listEntities(EmptyMessage()).entities.sorted)
}
}
/**
* Get details for entity.
*
* @param entityname name of entity
* @param options options for operation
* @return
*/
def entityDetails(entityname: String, options: Map[String, String] = Map()): Try[Map[String, String]] = {
execute("get details of entity operation") {
val properties = definerBlocking.getEntityProperties(EntityPropertiesMessage(entityname, options)).properties
Success(properties)
}
}
/**
* Get details for attribute.
*
* @param entityname name of entity
* @param attribute name of attribute
* @param options options for operation
* @return
*/
def entityAttributeDetails(entityname: String, attribute: String, options: Map[String, String] = Map()): Try[Map[String, String]] = {
execute("get details of attribute operation") {
val properties = definerBlocking.getAttributeProperties(AttributePropertiesMessage(entityname, attribute, options)).properties
Success(properties)
}
}
/**
* Get details for index.
*
* @param indexname name of index
* @param options options for operation
* @return
*/
def indexDetails(indexname: String, options: Map[String, String] = Map()): Try[Map[String, String]] = {
execute("get details of index operation") {
val properties = definerBlocking.getIndexProperties(IndexPropertiesMessage(indexname, options)).properties
Success(properties)
}
}
/**
* Partition entity.
*
* @param entityname name of entity
* @param npartitions number of partitions
* @param attribute name of attribute
* @param materialize materialize partitioning
* @param replace replace partitioning
* @param partitionername partitioner
* @return
*/
def entityPartition(entityname: String, npartitions: Int, attribute: Option[String] = None, materialize: Boolean, replace: Boolean, partitionername: String = "spark"): Try[String] = {
execute("repartition entity operation") {
val option = if (replace) {
PartitionOptions.REPLACE_EXISTING
} else if (materialize) {
PartitionOptions.CREATE_NEW
} else if (!materialize) {
PartitionOptions.CREATE_TEMP
} else {
PartitionOptions.CREATE_NEW
}
val partitioner = partitionername match {
case "random" => RepartitionMessage.Partitioner.RANDOM
case "ecp" => RepartitionMessage.Partitioner.ECP
case "spark" => RepartitionMessage.Partitioner.SPARK
case _ => RepartitionMessage.Partitioner.SPARK
}
val res = definerBlocking.repartitionEntityData(RepartitionMessage(entityname, npartitions, option = option, partitioner = partitioner))
if (res.code == AckMessage.Code.OK) {
Success(res.message)
} else {
Failure(throw new Exception(res.message))
}
}
}
/**
* Transfer storage of entity.
*
* @param entityname name of entity
* @param attributes names of attribute
* @param newhandler new storage handler
* @return
*/
def entityTransferStorage(entityname: String, attributes : Seq[String], newhandler : String): Try[String] = {
execute("transfer storage of entity operation") {
val res = definerBlocking.transferStorageHandler(TransferStorageHandlerMessage(entityname, attributes, newhandler))
if (res.code == AckMessage.Code.OK) {
Success(res.message)
} else {
Failure(throw new Exception(res.message))
}
}
}
/**
* Read data of entity.
*
* @param entityname name of entity
*/
def entityPreview(entityname: String): Try[Seq[RPCQueryResults]] = {
execute("get entity data operation") {
val res = searcherBlocking.preview(PreviewMessage(entityname))
Success(res.responses.map(new RPCQueryResults(_)))
}
}
/**
* Caches an entity.
*
* @param entityname
*/
def entityCache(entityname: String): Try[Boolean] = {
execute("cache entity") {
val res = searcherBlocking.cacheEntity(EntityNameMessage(entityname))
if (res.code.isOk) {
Success(res.code.isOk)
} else {
throw new Exception("caching not possible: " + res.message)
}
}
}
/**
* Benchmark entity and update scan weights.
*
* @param entityname name of entity
* @param attribute name of feature attribute
* @param optimizername optimizer name
* @param generateNewIndexes generate new indexes
* @param loggedQueries use logged queries for test (alternative: use random queries)
* @param nqueries number of queries during training phase
* @param nruns number of runs per query during training phase
* @return
*/
def entityAdaptScanMethods(entityname: String, attribute: String, optimizername: Option[String] = None, generateNewIndexes: Boolean = true, loggedQueries: Boolean = false, nqueries: Option[Int] = None, nruns: Option[Int] = None): Try[Void] = {
execute("benchmark entity scans and reset weights operation") {
val ic = if (generateNewIndexes) {
NEW_INDEXES
} else {
EXISTING_INDEXES
}
val qc = if (loggedQueries) {
LOGGED_QUERIES
} else {
RANDOM_QUERIES
}
var options: Map[String, String] = Map()
if (qc == RANDOM_QUERIES) {
options += "nqueries" -> nqueries.getOrElse(100).toString
} else if (nqueries.isDefined) {
options += "nqueries" -> nqueries.get.toString
}
if (nruns.isDefined) {
options += "nruns" -> nruns.get.toString
}
val optimizer = optimizername.getOrElse("svm") match {
case "svm" => Optimizer.SVM_OPTIMIZER
case "naive" => Optimizer.NAIVE_OPTIMIZER
case "lr" => Optimizer.LR_OPTIMIZER
case _ => Optimizer.LR_OPTIMIZER
}
definerBlocking.adaptScanMethods(AdaptScanMethodsMessage(entityname, attribute, ic, qc, options, optimizer))
Success(null)
}
}
/**
* Sparsify entity and store feature vectors as sparse vectors.
*
* @param entityname name of entity
* @param attribute name of feature attribute
* @return
*/
def entitySparsify(entityname: String, attribute: String): Try[Void] = {
execute("benchmark entity scans and reset weights operation") {
definerBlocking.sparsifyEntity(SparsifyEntityMessage(entityname, attribute))
Success(null)
}
}
/**
* Vacuum entity.
*
* @param entityname name of entity
*/
def entityVacuum(entityname: String): Try[Void] = {
execute("vacuum entity operation") {
definerBlocking.vacuumEntity(EntityNameMessage(entityname))
Success(null)
}
}
/**
* Drop entity.
*
* @param entityname name of entity
*/
def entityDrop(entityname: String): Try[Void] = {
execute("drop entity operation") {
definerBlocking.dropEntity(EntityNameMessage(entityname))
Success(null)
}
}
/**
* Create all indexes for entity.
*
* @param entityname name of entity
* @param attributes name of attributes
* @param norm norm for distance function
* @return
*/
def entityCreateAllIndexes(entityname: String, attributes: Seq[String], norm: Int): Try[Seq[String]] = {
execute("create all indexes operation") {
val res = attributes.map { attribute => definerBlocking.generateAllIndexes(IndexMessage(entity = entityname, attribute = attribute, distance = Some(DistanceMessage(DistanceType.minkowski, options = Map("norm" -> norm.toString)))))
}
if (res.exists(_.code != AckMessage.Code.OK)) {
val message = res.filter(_.code != AckMessage.Code.OK).map(_.message).mkString("; ")
return Failure(new Exception(message))
} else {
Success(res.flatMap(_.message.split(",")))
}
}
}
/**
* Create specific index.
*
* @param entityname name of entity
* @param attribute name of attribute
* @param indextype type of index
* @param norm norm
* @param options index creation options
* @return
*/
def indexCreate(entityname: String, attribute: String, indextype: String, norm: Int, options: Map[String, String]): Try[String] = {
execute("create index operation") {
val indexMessage = IndexMessage(entityname, attribute, getIndexType(indextype), Some(DistanceMessage(DistanceType.minkowski, Map("norm" -> norm.toString))), options)
val res = definerBlocking.index(indexMessage)
if (res.code == AckMessage.Code.OK) {
return Success(res.message)
} else {
throw new Exception(res.message)
}
}
}
/**
* List all indexes for given entity.
*
* @param entityname name of entity
* @return (indexname, attribute, indextypename)
*/
def indexList(entityname: String): Try[Seq[(String, String, IndexType)]] = {
execute("list indexes operation") {
Success(definerBlocking.listIndexes(EntityNameMessage(entityname)).indexes.map(i => (i.index, i.attribute, i.indextype)))
}
}
/**
* Check if index exists.
*
* @param entityname name of entity
* @param attribute nmae of attribute
* @param indextype type of index
* @param acceptStale accept also stale indexes
* @return
*/
def indexExists(entityname: String, attribute: String, indextype: String, acceptStale : Boolean = false): Try[Boolean] = {
execute("index exists operation") {
val res = definerBlocking.existsIndex(IndexExistsMessage(entityname, attribute, getIndexType(indextype), acceptStale = acceptStale))
if (res.ack.get.code == AckMessage.Code.OK) {
return Success(res.exists)
} else {
return Failure(new Exception(res.ack.get.message))
}
}
}
/**
* Caches an index.
*
* @param indexname
*/
def indexCache(indexname: String): Try[Boolean] = {
execute("cache index operation") {
val res = searcherBlocking.cacheIndex(IndexNameMessage(indexname))
if (res.code.isOk) {
Success(res.code.isOk)
} else {
throw new Exception("caching not possible: " + res.message)
}
}
}
/**
* Drop index.
*
* @param indexname name of index
*/
def indexDrop(indexname: String): Try[Void] = {
execute("drop index operation") {
val res = definerBlocking.dropIndex(IndexNameMessage(indexname))
if (res.code.isOk) {
Success(null)
} else {
throw new Exception("dropping index not possible: " + res.message)
}
}
}
/**
*
* @param s
* @return
*/
private def getIndexType(s: String) = s match {
case "ecp" => IndexType.ecp
case "lsh" => IndexType.lsh
case "mi" => IndexType.mi
case "pq" => IndexType.pq
case "sh" => IndexType.sh
case "vaf" => IndexType.vaf
case "vav" => IndexType.vav
case "vap" => IndexType.vap
case _ => throw new Exception("no indextype of name " + s + " known")
}
/**
* Partition index.
*
* @param indexname name of index
* @param npartitions number of partitions
* @param attribute name of attribute
* @param materialize materialize partitioning
* @param replace replace partitioning
* @param partitionername partitioner
* @return
*/
def indexPartition(indexname: String, npartitions: Int, attribute: Option[String] = None, materialize: Boolean, replace: Boolean, partitionername: String = "spark"): Try[String] = {
execute("partition index operation") {
val option = if (replace) {
PartitionOptions.REPLACE_EXISTING
} else if (materialize) {
PartitionOptions.CREATE_NEW
} else if (!materialize) {
PartitionOptions.CREATE_TEMP
} else {
PartitionOptions.CREATE_NEW
}
val partitioner = partitionername match {
case "random" => RepartitionMessage.Partitioner.RANDOM
case "ecp" => RepartitionMessage.Partitioner.ECP
case "spark" => RepartitionMessage.Partitioner.SPARK
case _ => RepartitionMessage.Partitioner.SPARK
}
val res = definerBlocking.repartitionIndexData(RepartitionMessage(indexname, npartitions, option = option, partitioner = partitioner))
if (res.code.isOk) {
Success(res.message)
} else {
throw new Exception(res.message)
}
}
}
/**
* Perform a search.
*
* @param qo search request
* @return
*/
def doQuery(qo: RPCGenericQueryObject): Try[Seq[RPCQueryResults]] = {
execute("compound query operation") {
val res = searcherBlocking.doQuery(qo.prepare.buildQueryMessage)
if (res.ack.get.code.isOk) {
return Success(res.responses.map(new RPCQueryResults(_)))
} else {
throw new Exception(res.ack.get.message)
}
}
}
/**
* Perform a search.
*
* @param qo search request
* @param timeout timeout in seconds
* @return
*/
def doQuery(qo: RPCGenericQueryObject, timeout: Long): Try[Seq[RPCQueryResults]] = {
execute("compound query operation") {
val fut = searcher.doQuery(qo.prepare.buildQueryMessage)
try {
val res = Await.result(fut, Duration.apply(timeout, "seconds"))
if (res.ack.get.code.isOk) {
return Success(res.responses.map(new RPCQueryResults(_)))
} else {
throw new Exception(res.ack.get.message)
}
} catch {
case e: TimeoutException => {
searcherBlocking.stopQuery(StopQueryMessage(qo.id))
throw e
}
}
}
}
/**
* Perform a progressive search.
*
* @param qo search request
* @param next function for next result
* @param completed function for final result
* @return
*/
def doProgressiveQuery(qo: RPCGenericQueryObject, next: (Try[RPCQueryResults]) => (Unit), completed: (String) => (Unit)): Try[Seq[RPCQueryResults]] = {
execute("progressive query operation") {
val so = new StreamObserver[QueryResultsMessage]() {
override def onError(throwable: Throwable): Unit = {
log.error("error in progressive querying", throwable)
}
override def onCompleted(): Unit = {
completed(qo.id)
}
override def onNext(qr: QueryResultsMessage): Unit = {
log.info("new progressive results arrived")
if (qr.ack.get.code == AckMessage.Code.OK && qr.responses.nonEmpty) {
next(Success(new RPCQueryResults(qr.responses.head)))
} else {
next(Failure(new Exception(qr.ack.get.message)))
}
}
}
searcher.doProgressiveQuery(qo.prepare.buildQueryMessage, so)
Success(null)
}
}
/**
* Perform a parallel search.
*
* @param qo search request
* @param next function for next result
* @param completed function for final result
* @return
*/
def doParallelQuery(qo: RPCGenericQueryObject, next: (Try[RPCQueryResults]) => (Unit), completed: (String) => (Unit)): Try[Seq[RPCQueryResults]] = {
execute("parallel query operation") {
val so = new StreamObserver[QueryResultsMessage]() {
override def onError(throwable: Throwable): Unit = {
log.error("error in parallel querying", throwable)
}
override def onCompleted(): Unit = {
completed(qo.id)
}
override def onNext(qr: QueryResultsMessage): Unit = {
log.info("new parallel results arrived")
if (qr.ack.get.code == AckMessage.Code.OK && qr.responses.nonEmpty) {
next(Success(new RPCQueryResults(qr.responses.head)))
} else {
next(Failure(new Exception(qr.ack.get.message)))
}
}
}
searcher.doParallelQuery(qo.prepare.buildQueryMessage, so)
Success(null)
}
}
def getScoredQueryExecutionPaths(qo: RPCGenericQueryObject, optimizername: String = "svm"): Try[Seq[(String, String, Double)]] = {
execute("collecting scored query execution paths operation") {
val optimizer = optimizername match {
case "svm" => Optimizer.SVM_OPTIMIZER
case "naive" => Optimizer.NAIVE_OPTIMIZER
case "lr" => Optimizer.LR_OPTIMIZER
case _ => throw new Exception("optimizer name is not known")
}
val res = searcherBlocking.getScoredExecutionPath(RPCSimulationQueryObject(qo, optimizer).buildQueryMessage)
if (res.ack.get.code.isOk) {
return Success(res.executionpaths.map(x => (x.scan, x.scantype, x.score)))
} else {
throw new Exception(res.ack.get.message)
}
}
}
/**
*
* @param json
* @return
*/
def doQuery(json: String): Try[Seq[RPCQueryResults]] = {
execute("json query operation") {
val query = JsonFormat.fromJsonString[QueryMessage](json)
val res = searcherBlocking.doQuery(query)
if (res.ack.get.code.isOk) {
return Success(res.responses.map(new RPCQueryResults(_)))
} else {
throw new Exception(res.ack.get.message)
}
}
}
/**
* Returns registered storage handlers.
*
* @return
*/
def storageHandlerList(): Try[Map[String, Seq[String]]] = {
execute("get storage handlers operation") {
Success(definerBlocking.listStorageHandlers(EmptyMessage()).handlers.map(handler => handler.name -> handler.attributetypes.map(_.toString)).toMap)
}
}
/**
* Shutdown connection.
*/
def shutdown(): Unit = {
channel.shutdown.awaitTermination(5, TimeUnit.SECONDS)
}
val str2grpcTypes = Map("auto" -> AttributeType.AUTO, "long" -> AttributeType.LONG, "int" -> AttributeType.INT, "float" -> AttributeType.FLOAT,
"double" -> AttributeType.DOUBLE, "string" -> AttributeType.STRING, "text" -> AttributeType.TEXT, "boolean" -> AttributeType.BOOLEAN, "geography" -> AttributeType.GEOGRAPHY,
"geometry" -> AttributeType.GEOMETRY, "vector" -> AttributeType.VECTOR, "sparsevector" -> AttributeType.SPARSEVECTOR)
val grpc2strTypes = str2grpcTypes.map(_.swap)
/**
*
* @param s string of field type name
* @return
*/
private def getGrpcType(s: String): AttributeType = str2grpcTypes.get(s).orNull
private def getStrType(a: AttributeType): String = grpc2strTypes.get(a).orNull
//TODO: add get attributes-method for an entity, to retrieve attributes to display
}
object RPCClient {
def apply(host: String, port: Int): RPCClient = {
val channel = NettyChannelBuilder.forAddress(host, port)
.usePlaintext(true)
.nameResolverFactory(new DnsNameResolverProvider())
.maxMessageSize(12582912)
.asInstanceOf[ManagedChannelBuilder[_]]
.build()
new RPCClient(
channel,
AdamDefinitionGrpc.blockingStub(channel),
AdamDefinitionGrpc.stub(channel),
AdamSearchGrpc.blockingStub(channel),
AdamSearchGrpc.stub(channel)
)
}
}
|
dbisUnibas/ADAMpro
|
grpcclient/src/main/scala/org/vitrivr/adampro/communication/RPCClient.scala
|
Scala
|
mit
| 26,696
|
package org.jetbrains.plugins.scala.testingSupport.scalatest.scala2_11.scalatest2_2_1
import org.jetbrains.plugins.scala.base.libraryLoaders._
import org.jetbrains.plugins.scala.testingSupport.scalatest.ScalaTestTestCase
/**
* @author Roman.Shein
* @since 22.01.2015
*/
abstract class Scalatest2_11_2_2_1_Base extends ScalaTestTestCase {
override protected def additionalLibraries: Seq[ThirdPartyLibraryLoader] = {
implicit val module = getModule
Seq(ScalaTestLoader("2.2.1", IvyLibraryLoader.Bundles), ScalaXmlLoader())
}
}
|
loskutov/intellij-scala
|
test/org/jetbrains/plugins/scala/testingSupport/scalatest/scala2_11/scalatest2_2_1/Scalatest2_11_2_2_1_Base.scala
|
Scala
|
apache-2.0
| 543
|
/*
* Copyright (C) 2016 DANS - Data Archiving and Networked Services (info@dans.knaw.nl)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.multideposit.actions
import java.util.UUID
import nl.knaw.dans.easy.multideposit.TestSupportFixture
import nl.knaw.dans.easy.multideposit.model.AudioVideo
import org.apache.commons.configuration.PropertiesConfiguration
import org.scalatest.BeforeAndAfterEach
import scala.collection.JavaConverters._
class AddPropertiesToDepositSpec extends TestSupportFixture with BeforeAndAfterEach {
private val depositId = "ds1"
private val datamanagerId = "dm"
private val datamanagerEmail = "dm@test.org"
private val dansDoiPrefix = "10.17026/"
private val action = new AddPropertiesToDeposit(dansDoiPrefix)
override def beforeEach(): Unit = {
val path = stagingDir / s"sd-$depositId"
if (path.exists) path.delete()
path.createDirectories()
}
"addDepositProperties" should "generate the properties file and write the properties in it" in {
val uuid = UUID.randomUUID()
action.addDepositProperties(testInstructions1.copy(audioVideo = AudioVideo()).toDeposit().copy(bagId = uuid), datamanagerId, datamanagerEmail) shouldBe right[Unit]
val props = stagingPropertiesFile(testInstructions1.depositId)
props.toJava should exist
val resultProps = new PropertiesConfiguration {
setDelimiterParsingDisabled(true)
load(props.toJava)
}
resultProps.getKeys.asScala.toList should {
contain only(
"bag-store.bag-id",
"creation.timestamp",
"state.label",
"state.description",
"depositor.userId",
"curation.datamanager.email",
"curation.datamanager.userId",
"curation.required",
"curation.performed",
"identifier.dans-doi.registered",
"identifier.dans-doi.action",
"bag-store.bag-name",
"deposit.origin",
) and contain noneOf(
"springfield.domain",
"springfield.user",
"springfield.collection",
"springfield.playmode",
)
}
resultProps.getString("bag-store.bag-id") shouldBe uuid.toString
resultProps.getString("depositor.userId") shouldBe "ruimtereiziger1"
resultProps.getString("curation.datamanager.email") shouldBe datamanagerEmail
resultProps.getString("curation.datamanager.userId") shouldBe datamanagerId
resultProps.getString("curation.required") shouldBe "yes"
resultProps.getString("curation.performed") shouldBe "yes"
resultProps.getString("identifier.dans-doi.registered") shouldBe "no"
resultProps.getString("identifier.dans-doi.action") shouldBe "create"
resultProps.getString("deposit.origin") shouldBe "SMD"
}
it should "generate the properties file and write the properties in it with dans-doi.action 'update'" in {
val uuid = UUID.randomUUID()
action.addDepositProperties(testInstructions2.copy(audioVideo = AudioVideo()).toDeposit().copy(bagId = uuid), datamanagerId, datamanagerEmail) shouldBe right[Unit]
val props = stagingPropertiesFile(testInstructions2.depositId)
props.toJava should exist
val resultProps = new PropertiesConfiguration {
setDelimiterParsingDisabled(true)
load(props.toJava)
}
resultProps.getKeys.asScala.toList should {
contain only(
"bag-store.bag-id",
"creation.timestamp",
"state.label",
"state.description",
"depositor.userId",
"curation.datamanager.email",
"curation.datamanager.userId",
"curation.required",
"curation.performed",
"identifier.dans-doi.registered",
"identifier.dans-doi.action",
"bag-store.bag-name",
"deposit.origin",
) and contain noneOf(
"springfield.domain",
"springfield.user",
"springfield.collection",
"springfield.playmode",
)
}
resultProps.getString("bag-store.bag-id") shouldBe uuid.toString
resultProps.getString("depositor.userId") shouldBe "ruimtereiziger2"
resultProps.getString("curation.datamanager.email") shouldBe datamanagerEmail
resultProps.getString("curation.datamanager.userId") shouldBe datamanagerId
resultProps.getString("curation.required") shouldBe "yes"
resultProps.getString("curation.performed") shouldBe "yes"
resultProps.getString("identifier.dans-doi.registered") shouldBe "no"
resultProps.getString("identifier.dans-doi.action") shouldBe "update"
resultProps.getString("deposit.origin") shouldBe "SMD"
}
it should "generate the properties file with springfield fields and write the properties in it" in {
val uuid = UUID.randomUUID()
action.addDepositProperties(testInstructions1.toDeposit().copy(bagId = uuid), datamanagerId, datamanagerEmail) shouldBe right[Unit]
val props = stagingPropertiesFile(testInstructions1.depositId)
props.toJava should exist
val resultProps = new PropertiesConfiguration {
setDelimiterParsingDisabled(true)
load(props.toJava)
}
resultProps.getKeys.asScala.toList should {
contain only(
"bag-store.bag-id",
"creation.timestamp",
"state.label",
"state.description",
"depositor.userId",
"curation.datamanager.email",
"curation.datamanager.userId",
"curation.required",
"curation.performed",
"springfield.domain",
"springfield.user",
"springfield.collection",
"springfield.playmode",
"identifier.dans-doi.registered",
"identifier.dans-doi.action",
"bag-store.bag-name",
"deposit.origin",
)
}
resultProps.getString("bag-store.bag-id") shouldBe uuid.toString
resultProps.getString("depositor.userId") shouldBe "ruimtereiziger1"
resultProps.getString("curation.datamanager.email") shouldBe datamanagerEmail
resultProps.getString("curation.datamanager.userId") shouldBe datamanagerId
resultProps.getString("curation.required") shouldBe "yes"
resultProps.getString("curation.performed") shouldBe "yes"
resultProps.getString("springfield.domain") shouldBe "dans"
resultProps.getString("springfield.user") shouldBe "janvanmansum"
resultProps.getString("springfield.collection") shouldBe "Jans-test-files"
resultProps.getString("springfield.playmode") shouldBe "menu"
resultProps.getString("identifier.dans-doi.registered") shouldBe "no"
resultProps.getString("identifier.dans-doi.action") shouldBe "create"
resultProps.getString("bag-store.bag-name") shouldBe bagDirName
resultProps.getString("deposit.origin") shouldBe "SMD"
}
}
|
DANS-KNAW/easy-process-sip
|
src/test/scala/nl.knaw.dans.easy.multideposit/actions/AddPropertiesToDepositSpec.scala
|
Scala
|
apache-2.0
| 7,169
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.test
import scala.concurrent.duration._
import org.scalatest.{BeforeAndAfterEach, Suite}
import org.scalatest.concurrent.Eventually
import org.apache.spark.{DebugFilesystem, SparkConf}
import org.apache.spark.sql.{SparkSession, SQLContext}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.oap.OapRuntime
trait OapSharedSQLContext extends SQLTestUtils with OapSharedSparkSession
/**
* Helper trait for SQL test suites where all tests share a single [[SparkSession]].
*/
trait OapSharedSparkSession
extends SQLTestUtilsBase
with BeforeAndAfterEach
with Eventually { self: Suite =>
protected def sparkConf: SparkConf = {
new SparkConf()
.set("spark.hadoop.fs.file.impl", classOf[DebugFilesystem].getName)
.set("spark.unsafe.exceptionOnMemoryLeak", "true")
.set(SQLConf.CODEGEN_FALLBACK.key, "false")
.set("spark.sql.testkey", "true")
.set(SQLConf.SHUFFLE_PARTITIONS.key, "5")
.set("spark.memory.offHeap.size", "100m")
}
/**
* The [[SparkSession]] to use for all tests in this suite.
*
* By default, the underlying [[org.apache.spark.SparkContext]] will be run in local
* mode with the default test configurations.
*/
private var _spark: SparkSession = _
/**
* The [[SparkSession]] to use for all tests in this suite.
*/
protected implicit def spark: SparkSession = _spark
/**
* The [[SQLContext]] to use for all tests in this suite.
*/
protected implicit def sqlContext: SQLContext = _spark.sqlContext
/**
* SubClass should override this method to construct [[SparkSession]]
*/
protected def createSparkSession: SparkSession
/**
* Initialize the [[SparkSession]]. Generally, this is just called from
* beforeAll; however, in test using styles other than FunSuite, there is
* often code that relies on the session between test group constructs and
* the actual tests, which may need this session. It is purely a semantic
* difference, but semantically, it makes more sense to call
* 'initializeSession' between a 'describe' and an 'it' call than it does to
* call 'beforeAll'.
*/
protected def initializeSession(): Unit = {
if (_spark == null) {
_spark = createSparkSession
}
}
/**
* Make sure the [[SparkSession]] is initialized before any tests are run.
*/
protected override def beforeAll(): Unit = {
initializeSession()
// Ensure we have initialized the context before calling parent code
super.beforeAll()
}
/**
* Stop the underlying [[org.apache.spark.SparkContext]], if any.
*/
protected override def afterAll(): Unit = {
try {
super.afterAll()
} finally {
try {
if (_spark != null) {
try {
_spark.sessionState.catalog.reset()
} finally {
OapRuntime.stop()
_spark.stop()
_spark = null
}
}
} finally {
SparkSession.clearActiveSession()
SparkSession.clearDefaultSession()
}
}
}
protected override def beforeEach(): Unit = {
super.beforeEach()
DebugFilesystem.clearOpenStreams()
}
protected override def afterEach(): Unit = {
super.afterEach()
// Clear all persistent datasets after each test
spark.sharedState.cacheManager.clearCache()
// files can be closed from other threads, so wait a bit
// normally this doesn't take more than 1s
eventually(timeout(10.seconds), interval(2.seconds)) {
DebugFilesystem.assertNoOpenStreams()
}
}
}
|
Intel-bigdata/OAP
|
oap-cache/oap/src/test/scala/org/apache/spark/sql/test/OapSharedSQLContext.scala
|
Scala
|
apache-2.0
| 4,391
|
package mesosphere.marathon
package core.deployment
import com.typesafe.scalalogging.StrictLogging
import mesosphere.marathon.core.condition.Condition
import mesosphere.marathon.core.condition.Condition.UnreachableInactive
import mesosphere.marathon.core.instance.{Goal, Instance}
import mesosphere.marathon.state.{KillSelection, PathId, Timestamp}
case class ScalingProposition(toDecommission: Seq[Instance], toStart: Int)
object ScalingProposition extends StrictLogging {
def propose(
instances: Seq[Instance],
toDecommission: Seq[Instance],
meetConstraints: ((Seq[Instance], Int) => Seq[Instance]),
scaleTo: Int,
killSelection: KillSelection,
runSpecId: PathId): ScalingProposition = {
val instancesGoalRunning: Map[Instance.Id, Instance] = instances
.filter(_.state.goal == Goal.Running)
.filter(_.state.condition != UnreachableInactive)
.map(instance => instance.instanceId -> instance)(collection.breakOut)
val toDecommissionMap: Map[Instance.Id, Instance] = toDecommission.map(instance => instance.instanceId -> instance)(collection.breakOut)
val (sentencedAndRunningMap, notSentencedAndRunningMap) = instancesGoalRunning partition {
case (instanceId, _) =>
toDecommissionMap.contains(instanceId)
}
// overall number of tasks that need to be killed
val decommissionCount = math.max(instancesGoalRunning.size - scaleTo, sentencedAndRunningMap.size)
// tasks that should be killed to meet constraints – pass notSentenced & consider the sentenced 'already killed'
val killToMeetConstraints = meetConstraints(
notSentencedAndRunningMap.values.to[Seq],
decommissionCount - sentencedAndRunningMap.size
)
// rest are tasks that are not sentenced and need not be killed to meet constraints
val rest: Seq[Instance] = (notSentencedAndRunningMap -- killToMeetConstraints.map(_.instanceId)).values.to[Seq]
val orderedDecommissionCandidates =
Seq(sentencedAndRunningMap.values, killToMeetConstraints, rest.sortWith(sortByConditionAndDate(killSelection))).flatten
val instancesToDecommission = orderedDecommissionCandidates.take(decommissionCount)
val numberOfInstancesToStart = scaleTo - instancesGoalRunning.size + decommissionCount
if (numberOfInstancesToStart > 0) {
logger.info(s"Need to scale $runSpecId from ${instancesGoalRunning.size} up to $scaleTo instances")
}
if (instancesToDecommission.nonEmpty) {
logger.info(s"Going to decommission instances '${instancesToDecommission.map(_.instanceId).mkString(",")}'." +
s" of runspec $runSpecId. We have ${instancesGoalRunning.size} and we need $scaleTo instances.")
}
ScalingProposition(instancesToDecommission, numberOfInstancesToStart)
}
// TODO: this should evaluate a task's health as well
/**
* If we need to kill tasks, the order should be LOST - UNREACHABLE - UNHEALTHY - STAGING - (EVERYTHING ELSE)
* If two task are staging kill with the latest staging timestamp.
* If both are started kill the one according to the KillSelection.
*
* @param select Defines which instance to kill first if both have the same state.
* @param a The instance that is compared to b
* @param b The instance that is compared to a
* @return true if a comes before b
*/
def sortByConditionAndDate(select: KillSelection)(a: Instance, b: Instance): Boolean = {
val weightA = weight(a.state.condition)
val weightB = weight(b.state.condition)
if (weightA < weightB) true
else if (weightA > weightB) false
else if (a.state.condition == Condition.Staging && b.state.condition == Condition.Staging) {
// Both are staging.
select(stagedAt(a), stagedAt(b))
} else if (a.state.condition == Condition.Starting && b.state.condition == Condition.Starting) {
select(a.state.since, b.state.since)
} else {
// Both are assumed to be started.
// None is actually an error case :/
(a.state.activeSince, b.state.activeSince) match {
case (None, Some(_)) => true
case (Some(_), None) => false
case (Some(left), Some(right)) => select(left, right)
case (None, None) => true
}
}
}
/** tasks with lower weight should be killed first */
private val weight: Map[Condition, Int] = Map[Condition, Int](
Condition.Unreachable -> 1,
Condition.Staging -> 2,
Condition.Starting -> 3,
Condition.Running -> 4).withDefaultValue(5)
private def stagedAt(instance: Instance): Timestamp = {
val stagedTasks = instance.tasksMap.values.map(_.status.stagedAt)
if (stagedTasks.nonEmpty) stagedTasks.max else Timestamp.now()
}
}
|
gsantovena/marathon
|
src/main/scala/mesosphere/marathon/core/deployment/ScalingProposition.scala
|
Scala
|
apache-2.0
| 4,692
|
package io.eels.component.kudu
import com.sksamuel.exts.Logging
import io.eels.schema._
import io.eels.{CloseableIterator, Part, Row, Source}
import org.apache.kudu.client.{KuduClient, RowResultIterator}
import scala.collection.JavaConverters._
case class KuduSource(tableName: String)(implicit client: KuduClient) extends Source with Logging {
override lazy val schema: StructType = {
val schema = client.openTable(tableName).getSchema
KuduSchemaFns.fromKuduSchema(schema)
}
override def parts(): Seq[Part] = Seq(new KuduPart(tableName))
class KuduPart(tableName: String) extends Part {
override def iterator(): CloseableIterator[Seq[Row]] = {
val projectColumns = schema.fieldNames()
val table = client.openTable(tableName)
val scanner = client.newScannerBuilder(table)
.setProjectedColumnNames(projectColumns.asJava)
.build()
val _iter = new Iterator[Row] {
var iter: Iterator[Row] = Iterator.empty
override def hasNext: Boolean = iter.hasNext || {
if (scanner.hasMoreRows) {
iter = ResultsIterator(schema, scanner.nextRows)
iter.hasNext
} else {
false
}
}
override def next(): Row = iter.next()
}
new CloseableIterator[Seq[Row]] {
override val iterator: Iterator[Seq[Row]] = _iter.grouped(100).withPartial(true)
}
}
}
}
object ResultsIterator {
def apply(schema: StructType, iter: RowResultIterator) = new Iterator[Row] {
val zipped = schema.fields.zipWithIndex
override def hasNext: Boolean = iter.hasNext
override def next(): Row = {
val next = iter.next()
val values = zipped.map { case (field, index) =>
field.dataType match {
case BinaryType => BinaryValueReader.read(next, index)
case BooleanType => BooleanValueReader.read(next, index)
case _: ByteType => ByteValueReader.read(next, index)
case DoubleType => DoubleValueReader.read(next, index)
case FloatType => FloatValueReader.read(next, index)
case _: IntType => IntValueReader.read(next, index)
case _: LongType => LongValueReader.read(next, index)
case _: ShortType => ShortValueReader.read(next, index)
case StringType => StringValueReader.read(next, index)
}
}
Row(schema, values)
}
}
}
object KuduSource {
def apply(master: String, table: String): KuduSource = {
implicit val client = new KuduClient.KuduClientBuilder(master).build()
KuduSource(table)
}
}
|
stheppi/eel
|
eel-kudu/src/main/scala/io/eels/component/kudu/KuduSource.scala
|
Scala
|
apache-2.0
| 2,593
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.csv
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.hadoop.mapreduce._
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{AnalysisException, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.util.CompressionCodecs
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
import org.apache.spark.util.SerializableConfiguration
/**
* Provides access to CSV data from pure SQL statements.
*/
class CSVFileFormat extends TextBasedFileFormat with DataSourceRegister {
override def shortName(): String = "csv"
override def isSplitable(
sparkSession: SparkSession,
options: Map[String, String],
path: Path): Boolean = {
val parsedOptions =
new CSVOptions(options, sparkSession.sessionState.conf.sessionLocalTimeZone)
val csvDataSource = CSVDataSource(parsedOptions)
csvDataSource.isSplitable && super.isSplitable(sparkSession, options, path)
}
override def inferSchema(
sparkSession: SparkSession,
options: Map[String, String],
files: Seq[FileStatus]): Option[StructType] = {
require(files.nonEmpty, "Cannot infer schema from an empty set of files")
val parsedOptions =
new CSVOptions(options, sparkSession.sessionState.conf.sessionLocalTimeZone)
CSVDataSource(parsedOptions).infer(sparkSession, files, parsedOptions)
}
override def prepareWrite(
sparkSession: SparkSession,
job: Job,
options: Map[String, String],
dataSchema: StructType): OutputWriterFactory = {
CSVUtils.verifySchema(dataSchema)
val conf = job.getConfiguration
val csvOptions = new CSVOptions(options, sparkSession.sessionState.conf.sessionLocalTimeZone)
csvOptions.compressionCodec.foreach { codec =>
CompressionCodecs.setCodecConfiguration(conf, codec)
}
new OutputWriterFactory {
override def newInstance(
path: String,
dataSchema: StructType,
context: TaskAttemptContext): OutputWriter = {
new CsvOutputWriter(path, dataSchema, context, csvOptions)
}
override def getFileExtension(context: TaskAttemptContext): String = {
".csv" + CodecStreams.getCompressionExtension(context)
}
}
}
override def buildReader(
sparkSession: SparkSession,
dataSchema: StructType,
partitionSchema: StructType,
requiredSchema: StructType,
filters: Seq[Filter],
options: Map[String, String],
hadoopConf: Configuration): (PartitionedFile) => Iterator[InternalRow] = {
CSVUtils.verifySchema(dataSchema)
val broadcastedHadoopConf =
sparkSession.sparkContext.broadcast(new SerializableConfiguration(hadoopConf))
val parsedOptions = new CSVOptions(
options,
sparkSession.sessionState.conf.sessionLocalTimeZone,
sparkSession.sessionState.conf.columnNameOfCorruptRecord)
// Check a field requirement for corrupt records here to throw an exception in a driver side
dataSchema.getFieldIndex(parsedOptions.columnNameOfCorruptRecord).foreach { corruptFieldIndex =>
val f = dataSchema(corruptFieldIndex)
if (f.dataType != StringType || !f.nullable) {
throw new AnalysisException(
"The field for corrupt records must be string type and nullable")
}
}
(file: PartitionedFile) => {
val conf = broadcastedHadoopConf.value.value
val parser = new UnivocityParser(dataSchema, requiredSchema, parsedOptions)
CSVDataSource(parsedOptions).readFile(conf, file, parser, parsedOptions)
}
}
override def toString: String = "CSV"
override def hashCode(): Int = getClass.hashCode()
override def equals(other: Any): Boolean = other.isInstanceOf[CSVFileFormat]
}
private[csv] class CsvOutputWriter(
path: String,
dataSchema: StructType,
context: TaskAttemptContext,
params: CSVOptions) extends OutputWriter with Logging {
private val writer = CodecStreams.createOutputStreamWriter(context, new Path(path))
private val gen = new UnivocityGenerator(dataSchema, writer, params)
override def write(row: InternalRow): Unit = gen.write(row)
override def close(): Unit = gen.close()
}
|
jianran/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVFileFormat.scala
|
Scala
|
apache-2.0
| 5,169
|
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CP501(value: Option[Int]) extends CtBoxIdentifier(name = "Gross income from property") with CtOptionalInteger with Input with ValidatableBox[ComputationsBoxRetriever] {
override def validate(boxRetriever: ComputationsBoxRetriever): Set[CtValidation] = validateIntegerRange("CP501", this, 0, 5200)
}
object CP501 {
def apply(int: Int): CP501 = CP501(Some(int))
}
|
pncampbell/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/computations/CP501.scala
|
Scala
|
apache-2.0
| 1,123
|
/*
* Copyright 2016 Nikolay Donets
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.nikdon.telepooz.model.payments
/**
* This object contains basic information about a successful payment.
*
* @param currency Three-letter ISO 4217 currency code
* @param total_amount Total price in the smallest units of the currency (integer, not float/double).
* For example, for a price of US$ 1.45 pass amount = 145.
* See the exp parameter in currencies.json, it shows the number of digits past
* the decimal point for each currency (2 for the majority of currencies).
* @param invoice_payload Bot specified invoice payload
* @param shipping_option_id Optional. Identifier of the shipping option chosen by the user
* @param order_info Optional. Order info provided by the user
* @param telegram_payment_charge_id Telegram payment identifier
* @param provider_payment_charge_id Provider payment identifier
*/
case class SuccessfulPayment(
currency: String,
total_amount: Int,
invoice_payload: String,
shipping_option_id: Option[String] = None,
order_info: Option[OrderInfo] = None,
telegram_payment_charge_id: String,
provider_payment_charge_id: String
)
|
nikdon/telepooz
|
src/main/scala/com/github/nikdon/telepooz/model/payments/SuccessfulPayment.scala
|
Scala
|
apache-2.0
| 1,899
|
package controllers
import java.util.concurrent.CopyOnWriteArrayList
import com.bryzek.apidoc.test.full.v0.models._
import com.bryzek.apidoc.test.full.v0.models.json._
import play.api.libs.json.{JsError, JsSuccess, Json}
import play.api.mvc._
import scala.collection.JavaConversions._
class Users @javax.inject.Inject() () extends Controller with BasicAuthController {
val users = new CopyOnWriteArrayList[User]()
/**
* Purposely freak out if a header isn't present to help client side testing
*/
def get() = BasicAuth { implicit request =>
if (request.headers.get("test-header").contains("test-header-value")) {
Ok(
Json.toJson(users.take(30))
)
} else {
BadRequest
}
}
def getById(id: String) = BasicAuth { implicit request =>
val user = users.find {
case RegisteredUser(usrId, email) => usrId == id
case GuestUser(usrId, email) => usrId == id
case UserString(usrId) => usrId == id
case UserUndefinedType(name) => name == id
case SystemUser.System => id == "system"
case SystemUser.Anonymous => id == "anonymous"
case SystemUser.UNDEFINED(name) => id == name
case _ => false
}
user.map(u => Ok(Json.toJson(u))).getOrElse(NotFound)
}
def post() = BasicAuth(parse.json) { implicit request =>
request.body.validate[User] match {
case e:JsError => {
UnprocessableEntity
}
case s:JsSuccess[User] => {
val userForm = s.get
val newUser = userForm match {
case RegisteredUser(id, email) => RegisteredUser(id, email)
case GuestUser(id, email) => GuestUser(id, email)
case UserString(id) => UserString(id)
case UserUndefinedType(name) => UserUndefinedType(name)
case SystemUser.System => SystemUser.System
case SystemUser.Anonymous => SystemUser.Anonymous
case SystemUser.UNDEFINED(name) => SystemUser.UNDEFINED(name)
}
users.add(newUser)
Created(Json.toJson(newUser))
}
}
}
}
|
mbryzek/apidoc-test
|
full/app/controllers/Users.scala
|
Scala
|
mit
| 2,057
|
package com.dominikgruber.scalatorrent.actor
import akka.actor.{ActorRef, Props, Actor}
import akka.io.{IO, Tcp}
import com.dominikgruber.scalatorrent.tracker.{Peer => PeerInformation}
import java.net.InetSocketAddress
object ConnectionHandler {
case class CreatePeerConnection(peer: PeerInformation)
case class PeerConnectionCreated(connection: ActorRef, peer: PeerInformation)
}
class ConnectionHandler(endpoint: InetSocketAddress, internalPeerId: String) extends Actor {
import Tcp._
import context.system
import ConnectionHandler._
// Torrent coordinator actor
val coordinator = context.parent
// Start listening to incoming connections
IO(Tcp) ! Tcp.Bind(self, endpoint)
override def receive = {
case CommandFailed(_: Bind) =>
// TODO: Handle failure
case c @ Connected(remoteAddress, _) =>
val handler = createPeerConnectionActor(remoteAddress)
sender ! Register(handler)
case CreatePeerConnection(peer) =>
val peerConnection = createPeerConnectionActor(peer.inetSocketAddress)
sender ! PeerConnectionCreated(peerConnection, peer)
}
private def createPeerConnectionActor(remoteAddress: InetSocketAddress) =
context.actorOf(Props(classOf[PeerConnection], remoteAddress, internalPeerId, coordinator), "peer-connection-" + remoteAddress.toString.replace("/", ""))
}
|
TheDom/scala-torrent
|
src/main/scala/com/dominikgruber/scalatorrent/actor/ConnectionHandler.scala
|
Scala
|
mit
| 1,347
|
package scorex.crypto.hash
/**
* Thread-unsafe hash classes may be used for performance purposes
*/
trait ThreadUnsafeHash[D <: Digest] extends CryptographicHash[D]
|
ScorexProject/scrypto
|
src/main/scala/scorex/crypto/hash/ThreadUnsafeHash.scala
|
Scala
|
cc0-1.0
| 171
|
package main.scala
import java.io.BufferedInputStream
import java.io.BufferedOutputStream
import java.net.Socket
import java.io.ByteArrayOutputStream
class ClientRequestHandler {
def execRemoteObject(data: Array[Byte], socket: Socket): Array[Byte] = {
var is:BufferedInputStream = new BufferedInputStream(socket.getInputStream());
var os:BufferedOutputStream = new BufferedOutputStream(socket.getOutputStream());
sendData(data, os);
var result: Array[Byte] = recieveDataFromServer(is);
closeConnection(socket, is, os);
result;
}
def sendData(data: Array[Byte], os:BufferedOutputStream) {
os.write(data, 0, data.length);
os.flush();
}
def recieveDataFromServer(is:BufferedInputStream): Array[Byte] = {
var byteOutput: ByteArrayOutputStream = new ByteArrayOutputStream();
var buffer: Array[Byte] = Array.ofDim[Byte](4096);
var len: Int = is.read(buffer);
while (len > 0) {
byteOutput.write(buffer);
if (is.available() > 0) {
len = is.read(buffer);
} else {
len = 0;
}
}
var resultData: Array[Byte] = byteOutput.toByteArray();
byteOutput.close();
resultData;
}
def closeConnection(socket: Socket, is:BufferedInputStream, os:BufferedOutputStream) {
if (is != null) {
is.close();
}
if (os != null) {
os.close();
}
if (socket != null) {
socket.close();
}
}
}
|
labs2/FLiMSy
|
ServerFLiMSy/src/main/scala/ClientRequestHandler.scala
|
Scala
|
apache-2.0
| 1,453
|
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package scalaguide
// #scalaexample
import javax.inject.Inject
import javax.net.ssl._
import play.core.ApplicationProvider
import play.server.api._
class CustomSSLEngineProvider @Inject() (appProvider: ApplicationProvider) extends SSLEngineProvider {
override def createSSLEngine(): SSLEngine = {
// change it to your custom implementation
sslContext().createSSLEngine
}
override def sslContext(): SSLContext = {
// change it to your custom implementation
SSLContext.getDefault
}
}
// #scalaexample
|
benmccann/playframework
|
documentation/manual/working/commonGuide/production/code/scalaguide/CustomSSLEngineProvider.scala
|
Scala
|
apache-2.0
| 594
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.plans.logical.statsEstimation
import org.apache.spark.sql.catalyst.expressions.AttributeMap
import org.apache.spark.sql.catalyst.plans.logical.{Project, Statistics}
object ProjectEstimation {
import EstimationUtils._
def estimate(project: Project): Option[Statistics] = {
if (rowCountsExist(project.child)) {
val childStats = project.child.stats
val aliasStats = EstimationUtils.getAliasStats(project.expressions, childStats.attributeStats)
val outputAttrStats =
getOutputMap(AttributeMap(childStats.attributeStats.toSeq ++ aliasStats), project.output)
Some(childStats.copy(
sizeInBytes = getOutputSize(project.output, childStats.rowCount.get, outputAttrStats),
attributeStats = outputAttrStats))
} else {
None
}
}
}
|
shaneknapp/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statsEstimation/ProjectEstimation.scala
|
Scala
|
apache-2.0
| 1,633
|
#!/usr/bin/env scalas
/***
scalaVersion := "2.11.6"
*/
/*
題目:
下面算式中,不同字母代表不同的數字,
相同的字母表示相同的數字,
範圍是 1 到 9 不含 0,
請問可能的解有幾種?
A B
- C D
-------
E F
+ G H
-------
P P P
*/
/*
解法概要:
因為任意二位數的正數相加一定不可能超過200,所以推得 P = 1,然後把二位數字合併成一個數字
W
- X
-----
Y
+ Z
-----
111
因為 0 和 1 已經不能用了,所以 W,X,Y,Z 必定是大於 20 的數字
因為 W - X 必需為正數,所以 W > X
上述的條件可以推得
for(
w <- (21..98),
x <- (21..98),
z <- (21..98)
)when(
w > x,
y = w - x,
y + z == 111,
r = (w.toString + x.toString + y.toString + z.toString),
r.toSet.length == 8,
!r.contains('0')
!r.contains('1')
).length
*/
val answers = for {
w <- (21 to 98)
x <- (21 to 98)
z <- (21 to 98)
if w > x
y = w - x
if y + z == 111
r = s"$w$x$y$z"
if r.toSet.size == 8
if !r.contains("0")
if !r.contains("1")
} yield (w, x, y, z)
println(s"1 ~ 9\ntotal answers is ${answers.size},\ndetails :\n${answers}\n")
/*
若範圍是 0 到 9
*/
val answers09 = for {
w <- (2 to 98)
x <- (2 to 98)
z <- (2 to 98)
if w > x
y = w - x
if y + z == 111
r = f"${w}%02d${x}%02d${y}%02d${z}%02d"
if r.toSet.size == 8
if !r.contains("1")
} yield (w, x, y, z)
println(s"0 ~ 9\ntotal answers is ${answers09.size},\ndetails :\n${answers09}\n")
/*
若範圍包含 - 號 0 到 9
*/
val answersMinus = for {
w <- (-9 to 98)
x <- (-9 to 98)
z <- (-9 to 98)
y = w - x
if y > -10
if y + z == 111 || y + z == 0
r = f"${w}%02d${x}%02d${y}%02d${z}%02d"
if r.toSet.size == 8
if !r.contains("1")
} yield (w, x, y, z)
println(s"- , 0 ~ 9\ntotal answers is ${answersMinus.size},\ndetails :\n${answersMinus}\n")
|
hotdog929/interesting-question
|
InterestingQuestion1.scala
|
Scala
|
mit
| 1,861
|
package org.rebeam.boxes.core.demo
//
//import org.rebeam.boxes.core.{Txn, TxnR, ShelfDefault}
//
//object TransactDemo {
//
// def thread(f: => Unit) = new Thread(new Runnable{
// def run() = f
// }).start()
//
// def main(args: Array[String]): Unit = {
// val s = ShelfDefault()
//
// val a = s.now.create("a")
// val b = s.now.create("b")
//
// val view = s.now.view{
// implicit t: TxnR => {
// println("view, a = " + a() + ", b = " + b())
// }
// }
//
// s.transact{
// implicit t: Txn => {
// println("a = " + a() + ", b = " + b())
// a() = "a2"
// println("a = " + a() + ", b = " + b())
// }
// }
//
// s.transact{
// implicit t: Txn => {
// println("a = " + a() + ", b = " + b())
// b()= "b2"
// println("a = " + a() + ", b = " + b())
// }
// }
//
// println(s.read{
// implicit t: TxnR => {
// "read a = " + a() + ", b = " + b()
// }
// })
//
//
// val x = s.now.create(0.0)
// val y = s.now.create(0.0)
//
// Range(0, 5).foreach(i => thread{
// Range(0, 1000).foreach{_ =>
// val a = Math.random() - 0.5
// s.transact{
// implicit t: Txn => {
// x() = x() - a
// y() = y() + a
// Thread.sleep(((a + 0.5) * 2).asInstanceOf[Long])
// }
// }
// }
// println("Thread " + i + " done")
// s.read{
// implicit t: TxnR => {
// println("x = " + x())
// println("y = " + y())
// }
// }
// })
//
// thread{
// Range(0, 1000).foreach{_ =>
// s.read {
// implicit t: TxnR => {
// println("x = " + x() + ", y = " + y())
// Thread.sleep(5)
// }
// }
// }
// }
//
//
// }
//}
|
trepidacious/boxes-core
|
src/main/scala/org/rebeam/boxes/core/demo/TransactDemo.scala
|
Scala
|
gpl-2.0
| 1,816
|
object Solution {
import scala.collection.mutable.HashMap
case class Memo[I <% K, K, O](f: I => O) extends (I => O) {
val cache = HashMap.empty[K, O]
def apply(x: I) = cache getOrElseUpdate (x, f(x))
}
type I = (Int, Int)
type K = I
type O = Int
type Choices = Memo[I, K, O]
val p = 1e9.toInt
lazy val nCr: Choices = Memo {
case (_, k) if k < 0 => 0
case (_, 0) => 1
case (n, k) if k > n / 2 => nCr(n, n - k)
case (n, k) => (nCr(n - 1, k) + nCr(n - 1, k - 1)) % p
}
def choicesOfCandy(n: Int, k: Int) = nCr(n + k - 1, k)
def main(args: Array[String]) {
for (_ <- 1 to readLine.toInt) {
val n = readInt
val k = readInt
println(choicesOfCandy(n: Int, k: Int))
}
}
}
|
advancedxy/hackerrank
|
algorithms/combinatorics/KCandyStore.scala
|
Scala
|
mit
| 745
|
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.compendium.types
/**
* Created by adarr on 3/25/2017.
*/
trait Giantese extends MainType {
override val mainTypes = Some(MonsterType.Giant)
}
|
adarro/ddo-calc
|
subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/compendium/types/Giantese.scala
|
Scala
|
apache-2.0
| 849
|
package app.adapters.database
import org.scalatest.{Matchers, WordSpec}
import scala.slick.jdbc.meta.MTable
/**
* User: asalvadore
*/
class DbInitializerSpec extends WordSpec with Matchers with DbSpec {
import dbProfile.profile.simple._
"createTable" should {
conn.withSession { implicit session =>
"increases the number of tables in the database" in {
tables.size should equal(1)
}
"create a table with the expected name" in {
val tableNames = tables.map(_.name.name)
tableNames should contain("tasks")
}
}
}
private def tables(implicit session: Session) = MTable.getTables("tasks").list
}
|
mericano1/spray-akka-slick-postgres
|
src/test/scala/app/adapters/database/DbInitializerSpec.scala
|
Scala
|
mit
| 667
|
// Copyright: 2010 - 2018 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.util
import shapeless._
// workaround lack of automatic enums for sealed traits by using shapeless
package enums {
trait SingletonByName[A, C <: Coproduct] {
def lookup: Map[String, A]
}
object SingletonByName {
implicit def CNilSingleton[A]: SingletonByName[A, CNil] =
new SingletonByName[A, CNil] {
override def lookup: Map[String, A] = Map.empty
}
implicit def coproductSingletons[A, H <: A, T <: Coproduct](
implicit
tsbn: SingletonByName[A, T],
witness: Witness.Aux[H],
tpe: Typeable[H]
): SingletonByName[A, H :+: T] = new SingletonByName[A, H :+: T] {
override def lookup: Map[String, A] = {
val label = tpe.describe.replaceAll(".type", "")
tsbn.lookup + (label -> witness.value)
}
}
}
trait AdtToMap[A] {
def lookup: Map[String, A]
}
object AdtToMap {
implicit def fromSingletonByName[A, C <: Coproduct](
implicit
@deprecated("local", "") gen: Generic.Aux[A, C],
singletonByName: SingletonByName[A, C]
): AdtToMap[A] = new AdtToMap[A] {
override def lookup: Map[String, A] = singletonByName.lookup
}
}
}
|
yyadavalli/ensime-server
|
util/src/main/scala/org/ensime/util/enums.scala
|
Scala
|
gpl-3.0
| 1,314
|
/**
* Copyright 2013-2015, AlwaysResolve Project (alwaysresolve.org), MOYD.CO LTD
* This file incorporates work covered by the following copyright and permission notice:
*
* Copyright 2012 silenteh
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models
import payload.RRData
import java.io.ObjectOutputStream
import java.io.ByteArrayOutputStream
import records.NULL
import com.fasterxml.jackson.annotation.JsonProperty
import com.fasterxml.jackson.annotation.JsonIgnoreProperties
import utils.HostnameUtils
@JsonIgnoreProperties(Array("typ"))
case class NullHost(
@JsonProperty("class") cls: String = null,
@JsonProperty("name") name: String = null,
@JsonProperty("value") value: Any,
@JsonProperty("ttl") timetolive: Long
) extends Host("NULL") {
def setName(newname: String) = NullHost(cls, newname, value, timetolive)
def getRData = new NULL(valueByteArray, timetolive)
override def toAbsoluteNames(domain: ExtendedDomain) =
NullHost(cls, HostnameUtils.absoluteHostName(name, domain.fullName), value, timetolive)
override def equals(other: Any) = other match {
case h: NullHost => h.cls == cls && h.name == name && h.value.equals(value)
case _ => false
}
lazy val valueByteArray = value match {
case v: String => v.getBytes
case v: Int => RRData.intToBytes(v)
case v: Array[String] => v.map(_.getBytes).flatten
case v: Array[Int] => v.map(RRData.intToBytes(_)).flatten
case _ => {
val bos = new ByteArrayOutputStream
val oos = new ObjectOutputStream(bos)
oos.writeObject(value)
val bytes = bos.toByteArray
oos.close
bos.close
bytes
}
}
}
|
Moydco/AlwaysResolveDNS
|
src/main/scala/models/NullHost.scala
|
Scala
|
apache-2.0
| 2,200
|
/*
* GeneralizedEM.scala
* Expectation maximization algorithm using any ProbQueryAlgorithm as the inference algorithm.
*
* Created By: Michael Howard (mhoward@cra.com)
* Creation Date: Jun 1, 2013
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email figaro@cra.com for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.algorithm.learning
import com.cra.figaro.language._
import com.cra.figaro.algorithm.{ Algorithm, ParameterLearner, ProbQueryAlgorithm, OneTime }
import com.cra.figaro.algorithm.factored.beliefpropagation.BeliefPropagation
import com.cra.figaro.algorithm.sampling.{ Importance, MetropolisHastings, ProposalScheme }
import com.cra.figaro.algorithm.factored.factors.Factory
import com.cra.figaro.patterns.learning.ModelParameters
import com.cra.figaro.algorithm.factored.SufficientStatisticsVariableElimination
import com.cra.figaro.algorithm.online.Online
/**
* Expectation maximization iteratively produces an estimate of sufficient statistics for learnable parameters,
* then maximizes the parameters according to the estimate. This trait can be extended with a different expectation
* or maximization algorithm; see the code for details.
*/
trait ExpectationMaximization extends Algorithm with ParameterLearner {
protected val paramMap: Map[Parameter[_], Seq[Double]] = Map(targetParameters.map(p => p -> p.zeroSufficientStatistics): _*)
protected def doExpectationStep(): Map[Parameter[_], Seq[Double]]
protected def doStart(): Unit = {
em()
}
/*
* Stop the algorithm from computing. The algorithm is still ready to provide answers after it returns.
*/
protected def doStop(): Unit = {}
/*
* Resume the computation of the algorithm, if it has been stopped.
*/
protected def doResume(): Unit = {}
/*
* Kill the algorithm so that it is inactive. It will no longer be able to provide answers.
*/
protected def doKill(): Unit = {}
val terminationCriteria: () => EMTerminationCriteria
val targetParameters: Seq[Parameter[_]]
var sufficientStatistics: Map[Parameter[_], Seq[Double]] = Map.empty[Parameter[_], Seq[Double]]
var debug = false
protected def em(): Unit = {
//Instantiate termination criteria here.
val shouldTerminate = terminationCriteria()
if (debug) println("Entering EM loop")
while (shouldTerminate(sufficientStatistics) == false) {
iteration()
}
}
protected def doMaximizationStep(parameterMapping: Map[Parameter[_], Seq[Double]]): Unit = {
for (p <- targetParameters) yield {
p.maximize(parameterMapping(p))
}
}
def iteration(): Unit = {
sufficientStatistics = doExpectationStep()
doMaximizationStep(sufficientStatistics)
if (debug) println("Completed iteration")
}
}
/**
* An EM algorithm which learns parameters incrementally
*/
trait OnlineExpectationMaximization extends Online with ExpectationMaximization {
override def doStart = {}
protected var lastIterationStatistics: Map[Parameter[_], Seq[Double]] = Map(targetParameters.map(p => p -> p.zeroSufficientStatistics): _*)
override val initial: Universe
override val transition: Function0[Universe]
protected var currentUniverse: Universe = initial
private def updateStatistics(newStatistics: Map[Parameter[_], Seq[Double]]): Map[Parameter[_], Seq[Double]] = {
Map((for (p <- paramMap.keys) yield {
val updatedStatistics = (lastIterationStatistics(p) zip newStatistics(p)).map((pair: (Double, Double)) => pair._1 + pair._2)
(p, updatedStatistics)
}).toSeq: _*)
}
/**
* Observe new evidence and perform one expectation step and one maximization step
*/
def update(evidence: Seq[NamedEvidence[_]] = Seq()): Unit = {
currentUniverse = transition()
currentUniverse.assertEvidence(evidence)
val newStatistics = doExpectationStep
val updated = updateStatistics(newStatistics)
doMaximizationStep(updated)
lastIterationStatistics = updated
}
}
/**
* An EM algorithm which learns parameters using a factored algorithm
*/
class ExpectationMaximizationWithFactors(val universe: Universe, val targetParameters: Parameter[_]*)(val terminationCriteria: () => EMTerminationCriteria) extends ExpectationMaximization {
protected def doExpectationStep(): Map[Parameter[_], Seq[Double]] = {
val algorithm = SufficientStatisticsVariableElimination(paramMap)(universe)
algorithm.start
val result = algorithm.getSufficientStatisticsForAllParameters
algorithm.kill
result
}
}
/**
* An online EM algorithm which learns parameters using a factored algorithm
*/
class OnlineExpectationMaximizationWithFactors(override val initial: Universe, override val transition: Function0[Universe], val targetParameters: Parameter[_]*)(val terminationCriteria: () => EMTerminationCriteria)
extends OnlineExpectationMaximization {
def doExpectationStep = {
val algorithm = SufficientStatisticsVariableElimination(paramMap)(currentUniverse)
algorithm.start
algorithm.stop
val newStatistics = algorithm.getSufficientStatisticsForAllParameters
algorithm.kill
newStatistics
}
}
/**
* An EM algorithm which learns parameters using an inference algorithm provided as an argument
*/
class GeneralizedEM(inferenceAlgorithmConstructor: Seq[Element[_]] => Universe => ProbQueryAlgorithm with OneTime, val universe: Universe, val targetParameters: Parameter[_]*)(val terminationCriteria: () => EMTerminationCriteria) extends ExpectationMaximization {
//Dependent universe doesn't work the same way.
protected def doExpectationStep(): Map[Parameter[_], Seq[Double]] = {
val inferenceTargets =
universe.activeElements.filter(_.isInstanceOf[Parameterized[_]]).map(_.asInstanceOf[Parameterized[_]])
val algorithm = inferenceAlgorithmConstructor(inferenceTargets)(universe)
algorithm.start()
var result: Map[Parameter[_], Seq[Double]] = Map()
for { parameter <- targetParameters } {
var stats = parameter.zeroSufficientStatistics
for {
target <- universe.directlyUsedBy(parameter)
} {
val t: Parameterized[target.Value] = target.asInstanceOf[Parameterized[target.Value]]
val distribution: Stream[(Double, target.Value)] = algorithm.distribution(t)
val newStats = t.distributionToStatistics(parameter, distribution)
stats = (stats.zip(newStats)).map(pair => pair._1 + pair._2)
}
result += parameter -> stats
}
algorithm.kill()
result
}
}
/**
* An EM algorithm which learns parameters using an inference algorithm provided as an argument
*/
class GeneralizedOnlineEM(inferenceAlgorithmConstructor: Seq[Element[_]] => Universe => ProbQueryAlgorithm with OneTime, override val initial: Universe, override val transition: Function0[Universe], val targetParameters: Parameter[_]*)(val terminationCriteria: () => EMTerminationCriteria) extends OnlineExpectationMaximization {
protected def usesParameter(l: List[Element[_]]): Map[Parameter[_], Iterable[Parameterized[_]]] = {
(l.map { x => x match { case p: Parameterized[_] => { p -> p.parameters.head } } }).groupBy(_._2).mapValues(_.map(_._1))
}
protected def doExpectationStep(): Map[Parameter[_], Seq[Double]] = {
val inferenceTargets =
currentUniverse.activeElements.filter(_.isInstanceOf[Parameterized[_]]).map(_.asInstanceOf[Parameterized[_]])
val algorithm = inferenceAlgorithmConstructor(inferenceTargets)(currentUniverse)
algorithm.start()
//println("universe: " + currentUniverse.hashCode)
var result: Map[Parameter[_], Seq[Double]] = Map()
val uses = usesParameter(inferenceTargets)
println("built map")
for { parameter <- targetParameters } {
var stats = parameter.zeroSufficientStatistics
for {
target <- uses(parameter)
} {
println("found used by...")
val t: Parameterized[target.Value] = target.asInstanceOf[Parameterized[target.Value]]
val distribution: Stream[(Double, target.Value)] = algorithm.distribution(t)
val newStats = t.distributionToStatistics(parameter, distribution)
stats = (stats.zip(newStats)).map(pair => pair._1 + pair._2)
}
result += parameter -> stats
}
algorithm.kill()
result
}
}
object EMWithBP {
private val defaultBPIterations = 10
def online(transition: () => Universe, p: Parameter[_]*)(implicit universe: Universe) = {
new GeneralizedOnlineEM((targets: Seq[Element[_]]) => (universe: Universe) => makeBP(defaultBPIterations, targets)(universe), universe, transition, p: _*)(EMTerminationCriteria.maxIterations(10))
}
def online(transition: () => Universe, p: ModelParameters)(implicit universe: Universe) = {
new GeneralizedOnlineEM((targets: Seq[Element[_]]) => (universe: Universe) => makeBP(defaultBPIterations, targets)(universe), universe, transition, p.convertToParameterList: _*)(EMTerminationCriteria.maxIterations(10))
}
private def makeBP(numIterations: Int, targets: Seq[Element[_]])(universe: Universe) = {
Factory.removeFactors()
BeliefPropagation(numIterations, targets: _*)(universe)
}
/**
* An expectation maximization algorithm using Belief Propagation sampling for inference.
*
* @param params parameters to target with EM algorithm
*/
def apply(params: ModelParameters)(implicit universe: Universe) = {
val parameters = params.convertToParameterList
new GeneralizedEM((targets: Seq[Element[_]]) => (universe: Universe) => makeBP(defaultBPIterations, targets)(universe), universe, parameters: _*)(EMTerminationCriteria.maxIterations(10))
}
/**
* An expectation maximization algorithm using Belief Propagation sampling for inference.
* @param emIterations number of iterations of the EM algorithm
* @param bpIterations number of iterations of the BP algorithm
* @param params parameters to target with EM algorithm
*/
def apply(emIterations: Int, bpIterations: Int, p: ModelParameters)(implicit universe: Universe) = {
val parameters = p.convertToParameterList
new GeneralizedEM((targets: Seq[Element[_]]) => (universe: Universe) => makeBP(bpIterations, targets)(universe), universe, parameters: _*)(EMTerminationCriteria.maxIterations(emIterations))
}
/**
* An expectation maximization algorithm using Belief Propagation sampling for inference.
* @param params parameters to target with EM algorithm
*/
def apply(params: Parameter[_]*)(implicit universe: Universe) =
new GeneralizedEM((targets: Seq[Element[_]]) => (universe: Universe) => makeBP(defaultBPIterations, targets)(universe), universe, params: _*)(EMTerminationCriteria.maxIterations(10))
/**
* An expectation maximization algorithm using importance sampling for inference.
* @param emIterations number of iterations of the EM algorithm
* @param bpIterations number of iterations of the BP algorithm
* @param params parameters to target with EM algorithm
*/
def apply(emIterations: Int, bpIterations: Int, params: Parameter[_]*)(implicit universe: Universe) =
new GeneralizedEM((targets: Seq[Element[_]]) => (universe: Universe) => makeBP(bpIterations, targets)(universe), universe, params: _*)(EMTerminationCriteria.maxIterations(emIterations))
/**
* An expectation maximization algorithm using importance sampling for inference.
* @param terminationCriteria criteria for stopping the EM algorithm
* @param bpIterations number of iterations of the BP algorithm
* @param params parameters to target with EM algorithm
*/
def apply(terminationCriteria: () => EMTerminationCriteria, bpIterations: Int, params: Parameter[_]*)(implicit universe: Universe) =
new GeneralizedEM((targets: Seq[Element[_]]) => (universe: Universe) => makeBP(bpIterations, targets)(universe), universe, params: _*)(terminationCriteria)
}
object EMWithImportance {
private val defaultImportanceParticles = 100000
private def makeImportance(numParticles: Int, targets: Seq[Element[_]])(universe: Universe) = {
Importance(numParticles, targets: _*)(universe)
}
def online(transition: () => Universe, p: Parameter[_]*)(implicit universe: Universe) = {
new GeneralizedOnlineEM((targets: Seq[Element[_]]) => (universe: Universe) => makeImportance(defaultImportanceParticles, targets)(universe), universe, transition, p: _*)(EMTerminationCriteria.maxIterations(10))
}
def online(transition: () => Universe, p: ModelParameters)(implicit universe: Universe) = {
new GeneralizedOnlineEM((targets: Seq[Element[_]]) => (universe: Universe) => makeImportance(defaultImportanceParticles, targets)(universe), universe, transition, p.convertToParameterList: _*)(EMTerminationCriteria.maxIterations(10))
}
/**
* An expectation maximization algorithm using importance sampling for inference.
*
* @param emIterations number of iterations of the EM algorithm
* @param importanceParticles number of particles of the importance sampling algorithm
*/
def apply(emIterations: Int, importanceParticles: Int, p: Parameter[_]*)(implicit universe: Universe) =
new GeneralizedEM((targets: Seq[Element[_]]) => (universe: Universe) => makeImportance(importanceParticles, targets)(universe), universe, p: _*)(EMTerminationCriteria.maxIterations(emIterations))
/**
* An expectation maximization algorithm using importance sampling for inference.
*
* @param terminationCriteria criteria for stopping the EM algorithm
* @param importanceParticles number of particles of the importance sampling algorithm
*/
def apply(terminationCriteria: () => EMTerminationCriteria, importanceParticles: Int, p: Parameter[_]*)(implicit universe: Universe) =
new GeneralizedEM((targets: Seq[Element[_]]) => (universe: Universe) => makeImportance(importanceParticles, targets)(universe), universe, p: _*)(terminationCriteria)
/**
* An expectation maximization algorithm using importance sampling for inference.
*
* @param params parameters to target with EM algorithm
*/
def apply(params: ModelParameters)(implicit universe: Universe) = {
val parameters = params.convertToParameterList
new GeneralizedEM((targets: Seq[Element[_]]) => (universe: Universe) => makeImportance(defaultImportanceParticles, targets)(universe), universe, parameters: _*)(EMTerminationCriteria.maxIterations(10))
}
/**
* An expectation maximization algorithm using importance sampling for inference.
*
* @param emIterations number of iterations of the EM algorithm
* @param importanceParticles number of particles of the importance sampling algorithm
* @param params parameters to target with EM algorithm
*/
def apply(emIterations: Int, importanceParticles: Int, params: ModelParameters)(implicit universe: Universe) = {
val parameters = params.convertToParameterList
new GeneralizedEM((targets: Seq[Element[_]]) => (universe: Universe) => makeImportance(defaultImportanceParticles, targets)(universe), universe, parameters: _*)(EMTerminationCriteria.maxIterations(emIterations))
}
/**
* An expectation maximization algorithm using importance sampling for inference.
*
* @param terminationCriteria criteria for stopping the EM algorithm
* @param importanceParticles number of particles of the importance sampling algorithm
* @param params parameters to target with EM algorithm
*/
def apply(terminationCriteria: () => EMTerminationCriteria, importanceParticles: Int, params: ModelParameters)(implicit universe: Universe) = {
val parameters = params.convertToParameterList
new GeneralizedEM((targets: Seq[Element[_]]) => (universe: Universe) => makeImportance(100000, targets)(universe), universe, parameters: _*)(terminationCriteria)
}
}
object EMWithMH {
private val defaultMHParticles = 100000
private def makeImportance(numParticles: Int, targets: Seq[Element[_]])(universe: Universe) = {
Importance(numParticles, targets: _*)(universe)
}
def online(transition: () => Universe, p: Parameter[_]*)(implicit universe: Universe) = {
new GeneralizedOnlineEM((targets: Seq[Element[_]]) => (universe: Universe) => makeMH(defaultMHParticles, ProposalScheme.default(universe), targets)(universe), universe, transition, p: _*)(EMTerminationCriteria.maxIterations(10))
}
def online(transition: () => Universe, p: ModelParameters)(implicit universe: Universe) = {
new GeneralizedOnlineEM((targets: Seq[Element[_]]) => (universe: Universe) => makeMH(defaultMHParticles, ProposalScheme.default(universe), targets)(universe), universe, transition, p.convertToParameterList: _*)(EMTerminationCriteria.maxIterations(10))
}
private def makeMH(numParticles: Int, proposalScheme: ProposalScheme, targets: Seq[Element[_]])(universe: Universe) = {
MetropolisHastings(numParticles, proposalScheme, targets: _*)(universe)
}
/**
* An expectation maximization algorithm using Metropolis Hastings for inference.
*
* @param emIterations number of iterations of the EM algorithm
* @param mhParticles number of particles of the MH algorithm
*/
def apply(emIterations: Int, mhParticles: Int, p: Parameter[_]*)(implicit universe: Universe) =
new GeneralizedEM((targets: Seq[Element[_]]) => (universe: Universe) => makeMH(mhParticles, ProposalScheme.default(universe), targets)(universe), universe, p: _*)(EMTerminationCriteria.maxIterations(emIterations))
/**
* An expectation maximization algorithm using Metropolis Hastings for inference.
* @param terminationCriteria criteria for stopping the EM algorithm
* @param mhParticles number of particles of the MH algorithm
* @param params parameters to target in EM algorithm
*/
def apply(terminationCriteria: () => EMTerminationCriteria, mhParticles: Int, params: Parameter[_]*)(implicit universe: Universe) =
new GeneralizedEM((targets: Seq[Element[_]]) => (universe: Universe) => makeMH(mhParticles, ProposalScheme.default(universe), targets)(universe), universe, params: _*)(terminationCriteria)
/**
* An expectation maximization algorithm using Metropolis Hastings for inference.
*
* @param iterations number of iterations of the EM algorithm
* @param mhParticles number of particles of the MH algorithm
* @param proposalScheme proposal scheme for MH algorithm
* @param params parameters to target in EM algorithm
*/
def apply(emIterations: Int, mhParticles: Int, proposalScheme: ProposalScheme, params: Parameter[_]*)(implicit universe: Universe) =
new GeneralizedEM((targets: Seq[Element[_]]) => (universe: Universe) => makeMH(mhParticles, proposalScheme, targets)(universe), universe, params: _*)(EMTerminationCriteria.maxIterations(emIterations))
/**
* An expectation maximization algorithm using Metropolis Hastings for inference.
* @param params parameters to target in EM algorithm
*/
def apply(p: ModelParameters)(implicit universe: Universe) = {
val parameters = p.convertToParameterList
new GeneralizedEM((targets: Seq[Element[_]]) => (universe: Universe) => makeMH(defaultMHParticles, ProposalScheme.default(universe), targets)(universe), universe, parameters: _*)(EMTerminationCriteria.maxIterations(10))
}
/**
* An expectation maximization algorithm using Metropolis Hastings for inference.
*
* @param iterations number of iterations of the EM algorithm
* @param mhParticles number of particles of the MH algorithm
* @param proposalScheme proposal scheme for MH algorithm
* @param params parameters to target in EM algorithm
*/
def apply(emIterations: Int, mhParticles: Int, proposalScheme: ProposalScheme, p: ModelParameters)(implicit universe: Universe) = {
val parameters = p.convertToParameterList
new GeneralizedEM((targets: Seq[Element[_]]) => (universe: Universe) => makeMH(mhParticles, proposalScheme, targets)(universe), universe, parameters: _*)(EMTerminationCriteria.maxIterations(emIterations))
}
/**
* An expectation maximization algorithm using Metropolis Hastings for inference.
*
* @param terminationCriteria criteria for stopping the EM algorithm
* @param mhParticles number of particles of the MH algorithm
* @param proposalScheme proposal scheme for MH algorithm
* @param params parameters to target in EM algorithm
*/
def apply(terminationCriteria: () => EMTerminationCriteria, mhParticles: Int, proposalScheme: ProposalScheme, params: ModelParameters)(implicit universe: Universe) = {
val parameters = params.convertToParameterList
new GeneralizedEM((targets: Seq[Element[_]]) => (universe: Universe) => makeMH(mhParticles, proposalScheme, targets)(universe), universe, parameters: _*)(terminationCriteria)
}
}
object EMWithVE {
/**
* An expectation maximization algorithm which will run for the default of 10 iterations.
*/
def apply(p: Parameter[_]*)(implicit universe: Universe) =
new ExpectationMaximizationWithFactors(universe, p: _*)(EMTerminationCriteria.maxIterations(10))
/**
* An expectation maximization algorithm which will run for the default of 10 iterations.
*/
def apply(p: ModelParameters)(implicit universe: Universe) =
new ExpectationMaximizationWithFactors(universe, p.convertToParameterList: _*)(EMTerminationCriteria.maxIterations(10))
def online(transition: () => Universe, p: Parameter[_]*)(implicit universe: Universe) = {
new OnlineExpectationMaximizationWithFactors(universe, transition, p: _*)(EMTerminationCriteria.maxIterations(10))
}
def online(transition: () => Universe, p: ModelParameters)(implicit universe: Universe) = {
new OnlineExpectationMaximizationWithFactors(universe, transition, p.convertToParameterList: _*)(EMTerminationCriteria.maxIterations(10))
}
/**
* An expectation maximization algorithm which will run for the number of iterations specified.
*/
def apply(iterations: Int, p: ModelParameters)(implicit universe: Universe) =
new ExpectationMaximizationWithFactors(universe, p.convertToParameterList: _*)(EMTerminationCriteria.maxIterations(iterations))
/**
* An expectation maximization algorithm which will run for the number of iterations specified.
*/
def apply(iterations: Int, p: Parameter[_]*)(implicit universe: Universe) =
new ExpectationMaximizationWithFactors(universe, p: _*)(EMTerminationCriteria.maxIterations(iterations))
/**
* An expectation maximization algorithm which will stop according to a user specified termination criteria.
*/
def apply(terminationCriteria: () => EMTerminationCriteria, p: Parameter[_]*)(implicit universe: Universe) =
new ExpectationMaximizationWithFactors(universe, p: _*)(terminationCriteria)
}
|
lfkellogg/figaro
|
Figaro/src/main/scala/com/cra/figaro/algorithm/learning/GeneralizedEM.scala
|
Scala
|
bsd-3-clause
| 22,764
|
package builder
import me.apidoc.swagger.SwaggerServiceValidator
import me.apidoc.avro.AvroIdlServiceValidator
import api_json.{ApiJsonServiceValidator, ServiceJsonServiceValidator}
import lib.{ServiceConfiguration, ServiceValidator}
import core.{ServiceFetcher, VersionMigration}
import com.bryzek.apidoc.api.v0.models.{Original, OriginalType}
import com.bryzek.apidoc.spec.v0.models.Service
object OriginalValidator {
// TODO: if valid, need to use ServiceSpecValidator.scala
def apply(
config: ServiceConfiguration,
original: Original,
fetcher: ServiceFetcher,
migration: VersionMigration = VersionMigration(internal = false)
): ServiceValidator[Service] = {
val validator = original.`type` match {
case OriginalType.ApiJson => {
ApiJsonServiceValidator(config, original.data, fetcher, migration)
}
case OriginalType.AvroIdl => {
AvroIdlServiceValidator(config, original.data)
}
case OriginalType.ServiceJson => {
ServiceJsonServiceValidator(original.data)
}
case OriginalType.SwaggerJson => {
SwaggerServiceValidator(config, original.data)
}
case OriginalType.UNDEFINED(other) => {
sys.error(s"Invalid original type[$other]")
}
}
WithServiceSpecValidator(validator)
}
case class WithServiceSpecValidator(underlying: ServiceValidator[Service]) extends ServiceValidator[Service] {
override def validate(): Either[Seq[String], Service] = {
underlying.validate() match {
case Left(errors) => Left(errors)
case Right(service) => {
ServiceSpecValidator(service).errors match {
case Nil => Right(service)
case errors => Left(errors)
}
}
}
}
}
}
|
Seanstoppable/apidoc
|
core/src/main/scala/core/builder/OriginalValidator.scala
|
Scala
|
mit
| 1,779
|
/*
* Copyright 2015 Dennis Vriend
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.test.week2.observer
trait Subscriber {
def handler(pub: Publisher): Unit
}
|
dnvriend/reactive-programming
|
src/test/scala/com/test/week2/observer/Subscriber.scala
|
Scala
|
apache-2.0
| 688
|
import sbt._
import sbt.Keys._
object Dependencies {
// Some common dependencies here so they don't need to be declared over and over
val specsBuild = "org.specs2" %% "specs2" % "2.1.1"
val specsSbt = specsBuild
val scalaIoFile = "com.github.scala-incubator.io" %% "scala-io-file" % "0.4.2"
val guava = "com.google.guava" % "guava" % "14.0.1"
// Needed by guava
val findBugs = "com.google.code.findbugs" % "jsr305" % "2.0.1"
val jdbcDeps = Seq(
"com.jolbox" % "bonecp" % "0.8.0.RELEASE" exclude ("com.google.guava", "guava"),
// bonecp needs it, but due to guavas stupid version numbering of older versions ("r08"), we need to explicitly
// declare a dependency on the newer version so that ivy can know which one to include
guava,
"com.h2database" % "h2" % "1.3.172",
"tyrex" % "tyrex" % "1.0.1",
specsBuild % "test")
val ebeanDeps = Seq(
"org.avaje.ebeanorm" % "avaje-ebeanorm" % "3.2.2" exclude ("javax.persistence", "persistence-api"),
"org.avaje.ebeanorm" % "avaje-ebeanorm-agent" % "3.2.1" exclude ("javax.persistence", "persistence-api")
)
val jpaDeps = Seq(
"org.hibernate.javax.persistence" % "hibernate-jpa-2.0-api" % "1.0.1.Final")
val javaDeps = Seq(
"org.yaml" % "snakeyaml" % "1.12",
"org.hibernate" % "hibernate-validator" % "5.0.1.Final",
("org.springframework" % "spring-context" % "3.2.3.RELEASE" notTransitive ())
.exclude("org.springframework", "spring-aop")
.exclude("org.springframework", "spring-beans")
.exclude("org.springframework", "spring-core")
.exclude("org.springframework", "spring-expression")
.exclude("org.springframework", "spring-asm"),
("org.springframework" % "spring-core" % "3.2.3.RELEASE" notTransitive ())
.exclude("org.springframework", "spring-asm")
.exclude("commons-logging", "commons-logging"),
("org.springframework" % "spring-beans" % "3.2.3.RELEASE" notTransitive ())
.exclude("org.springframework", "spring-core"),
"org.javassist" % "javassist" % "3.18.0-GA",
("org.reflections" % "reflections" % "0.9.8" notTransitive ())
.exclude("com.google.guava", "guava")
.exclude("javassist", "javassist"),
guava,
findBugs,
"javax.servlet" % "javax.servlet-api" % "3.0.1",
specsBuild % "test")
val runtime = Seq(
"io.netty" % "netty" % "3.7.1.Final",
"com.typesafe.netty" % "netty-http-pipelining" % "1.1.2",
"org.slf4j" % "slf4j-api" % "1.7.5",
"org.slf4j" % "jul-to-slf4j" % "1.7.5",
"org.slf4j" % "jcl-over-slf4j" % "1.7.5",
"ch.qos.logback" % "logback-core" % "1.0.13",
"ch.qos.logback" % "logback-classic" % "1.0.13",
scalaIoFile,
"com.typesafe.akka" %% "akka-actor" % "2.2.0",
"com.typesafe.akka" %% "akka-slf4j" % "2.2.0",
"org.scala-stm" %% "scala-stm" % "0.7",
"joda-time" % "joda-time" % "2.2",
"org.joda" % "joda-convert" % "1.3.1",
"org.apache.commons" % "commons-lang3" % "3.1",
("com.ning" % "async-http-client" % "1.7.18" notTransitive ())
.exclude("org.jboss.netty", "netty"),
"oauth.signpost" % "signpost-core" % "1.2.1.2",
"oauth.signpost" % "signpost-commonshttp4" % "1.2.1.2",
"com.fasterxml.jackson.core" % "jackson-core" % "2.2.2",
"com.fasterxml.jackson.core" % "jackson-annotations" % "2.2.2",
"com.fasterxml.jackson.core" % "jackson-databind" % "2.2.2",
"xerces" % "xercesImpl" % "2.11.0",
"javax.transaction" % "jta" % "1.1",
specsBuild % "test",
"org.mockito" % "mockito-all" % "1.9.5" % "test",
"com.novocode" % "junit-interface" % "0.10" % "test" exclude("junit", "junit-dep"),
("org.fluentlenium" % "fluentlenium-festassert" % "0.9.0" % "test")
.exclude("org.jboss.netty", "netty"),
"org.scala-lang" % "scala-reflect" % BuildSettings.buildScalaVersion,
"org.databene" % "contiperf" % "2.2.0" % "test",
"junit" % "junit" % "4.11" % "test")
val link = Seq(
"org.javassist" % "javassist" % "3.18.0-GA")
val routersCompilerDependencies = Seq(
scalaIoFile,
specsSbt % "test"
)
val templatesCompilerDependencies = Seq(
scalaIoFile,
specsSbt % "test"
)
val sbtDependencies = Seq(
"org.scala-lang" % "scala-reflect" % BuildSettings.buildScalaVersionForSbt % "provided",
"com.typesafe" % "config" % "1.0.2",
"org.mozilla" % "rhino" % "1.7R4",
("com.google.javascript" % "closure-compiler" % "v20130603")
.exclude("args4j", "args4j")
.exclude("com.google.protobuf", "protobuf-java")
.exclude("com.google.code.findbugs", "jsr305"),
guava,
scalaIoFile,
"org.avaje.ebeanorm" % "avaje-ebeanorm-agent" % "3.2.1" exclude ("javax.persistence", "persistence-api"),
"com.h2database" % "h2" % "1.3.172",
"org.javassist" % "javassist" % "3.18.0-GA",
"net.contentobjects.jnotify" % "jnotify" % "0.94",
"com.typesafe.sbteclipse" % "sbteclipse-plugin" % "2.4.0" extra("sbtVersion" -> BuildSettings.buildSbtVersionBinaryCompatible, "scalaVersion" -> BuildSettings.buildScalaBinaryVersionForSbt),
"com.github.mpeltonen" % "sbt-idea" % "1.5.1" extra("sbtVersion" -> BuildSettings.buildSbtVersionBinaryCompatible, "scalaVersion" -> BuildSettings.buildScalaBinaryVersionForSbt),
"com.typesafe.sbt" % "sbt-native-packager" % "0.6.4" extra("sbtVersion" -> BuildSettings.buildSbtVersionBinaryCompatible, "scalaVersion" -> BuildSettings.buildScalaBinaryVersionForSbt),
specsSbt
)
val playDocsDependencies = Seq(
"com.typesafe.play" %% "play-doc" % "1.0.3"
)
val consoleDependencies = Seq(
scalaIoFile,
"org.scala-sbt" % "launcher-interface" % BuildSettings.buildSbtVersion,
"jline" % "jline" % "2.11"
)
val templatesDependencies = Seq(
scalaIoFile,
specsBuild % "test")
val iterateesDependencies = Seq(
"org.scala-stm" %% "scala-stm" % "0.7",
"com.typesafe" % "config" % "1.0.2",
specsBuild % "test")
val jsonDependencies = Seq(
"joda-time" % "joda-time" % "2.2",
"org.joda" % "joda-convert" % "1.3.1",
"com.fasterxml.jackson.core" % "jackson-annotations" % "2.2.2",
"com.fasterxml.jackson.core" % "jackson-core" % "2.2.2",
"com.fasterxml.jackson.core" % "jackson-databind" % "2.2.2",
"org.scala-lang" % "scala-reflect" % BuildSettings.buildScalaVersion,
specsBuild % "test")
val testDependencies = Seq(
"junit" % "junit" % "4.11",
specsBuild,
"com.novocode" % "junit-interface" % "0.10" exclude("junit", "junit-dep"),
guava,
findBugs,
("org.fluentlenium" % "fluentlenium-festassert" % "0.8.0")
.exclude("org.jboss.netty", "netty")
.exclude("com.google.guava","guava"))
val playCacheDeps = Seq(
"net.sf.ehcache" % "ehcache-core" % "2.6.6",
specsBuild % "test"
)
}
|
vangav/vos_backend
|
play-2.2.6/framework/project/Dependencies.scala
|
Scala
|
mit
| 6,799
|
/*******************************************************************************
Copyright (c) 2013-2014, KAIST, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.models.DOMHtml
import scala.collection.mutable.{Map=>MMap, HashMap=>MHashMap}
import kr.ac.kaist.jsaf.analysis.typing.domain._
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T}
import kr.ac.kaist.jsaf.analysis.typing.models._
import org.w3c.dom.Node
import org.w3c.dom.Element
import kr.ac.kaist.jsaf.analysis.cfg.CFG
import kr.ac.kaist.jsaf.analysis.typing.models.AbsConstValue
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
import kr.ac.kaist.jsaf.Shell
object HTMLLegendElement extends DOM {
private val name = "HTMLLegendElement"
/* predefined locatoins */
val loc_cons = newSystemRecentLoc(name + "Cons")
val loc_proto = newSystemRecentLoc(name + "Proto")
val loc_ins = newSystemRecentLoc(name + "Ins")
/* constructor */
private val prop_cons: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Function")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
("@hasinstance", AbsConstValue(PropValueNullTop)),
("length", AbsConstValue(PropValue(ObjectValue(Value(AbsNumber.alpha(0)), F, F, F)))),
("prototype", AbsConstValue(PropValue(ObjectValue(Value(loc_proto), F, F, F))))
)
/* instance */
private val prop_ins: List[(String, AbsProperty)] =
HTMLElement.getInsList2() ++ List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(loc_proto, F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
// DOM Level 1
("form", AbsConstValue(PropValue(ObjectValue(Value(HTMLFormElement.loc_ins), F, T, T)))),
("accessKey", AbsConstValue(PropValue(ObjectValue(StrTop, T, T, T)))),
("align", AbsConstValue(PropValue(ObjectValue(StrTop, T, T, T))))
)
/* prorotype */
private val prop_proto: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(HTMLElement.loc_proto), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue)))
)
/* global */
private val prop_global: List[(String, AbsProperty)] = List(
(name, AbsConstValue(PropValue(ObjectValue(loc_cons, T, F, T))))
)
def getInitList(): List[(Loc, List[(String, AbsProperty)])] = if(Shell.params.opt_Dommodel2) List(
(loc_cons, prop_cons), (loc_proto, prop_proto), (GlobalLoc, prop_global), (loc_ins, prop_ins)
) else List(
(loc_cons, prop_cons), (loc_proto, prop_proto), (GlobalLoc, prop_global) )
def getSemanticMap(): Map[String, SemanticFun] = {
Map()
}
def getPreSemanticMap(): Map[String, SemanticFun] = {
Map()
}
def getDefMap(): Map[String, AccessFun] = {
Map()
}
def getUseMap(): Map[String, AccessFun] = {
Map()
}
/* semantics */
// no function
/* instance */
override def getInstance(cfg: CFG): Option[Loc] = Some(newRecentLoc())
/* list of properties in the instance object */
override def getInsList(node: Node): List[(String, PropValue)] = node match {
case e: Element =>
// This object has all properties of the HTMLElement object
HTMLElement.getInsList(node) ++ List(
("@class", PropValue(AbsString.alpha("Object"))),
("@proto", PropValue(ObjectValue(loc_proto, F, F, F))),
("@extensible", PropValue(BoolTrue)),
// DOM Level 1
("accessKey", PropValue(ObjectValue(AbsString.alpha(e.getAttribute("accessKey")), T, T, T))),
("align", PropValue(ObjectValue(AbsString.alpha(e.getAttribute("align")), T, T, T))))
// TODO: 'form' in DOM Level 1
case _ => {
System.err.println("* Warning: " + node.getNodeName + " cannot have instance objects.")
List()
}
}
def getInsList(accessKey: PropValue, align: PropValue, xpath: PropValue): List[(String, PropValue)] = List(
("@class", PropValue(AbsString.alpha("Object"))),
("@proto", PropValue(ObjectValue(loc_proto, F, F, F))),
("@extensible", PropValue(BoolTrue)),
// DOM Level 1
("accessKey", accessKey),
("align", align),
("xpath", xpath)
)
override def default_getInsList(): List[(String, PropValue)] = {
val accessKey = PropValue(ObjectValue(AbsString.alpha(""), T, T, T))
val align = PropValue(ObjectValue(AbsString.alpha(""), T, T, T))
val xpath = PropValue(ObjectValue(AbsString.alpha(""), F, F, F))
// This object has all properties of the HTMLElement object
HTMLElement.default_getInsList :::
getInsList(accessKey, align, xpath)
}
}
|
darkrsw/safe
|
src/main/scala/kr/ac/kaist/jsaf/analysis/typing/models/DOMHtml/HTMLLegendElement.scala
|
Scala
|
bsd-3-clause
| 5,043
|
package controllers
import javax.inject.Inject
import models.Table
import play.api.i18n.MessagesApi
import scala.concurrent.ExecutionContext.Implicits.global
class Tables @Inject() (val messagesApi: MessagesApi) extends api.ApiController {
def list = ApiAction { implicit request =>
Table.list.flatMap { list =>
ok(list)
}
}
def info(id: Long) = ApiAction { implicit request =>
Table.findById(id).flatMap { table =>
ok(table)
}
}
def goal(id: Long, isPlayer1: Boolean) = SecuredApiAction { implicit request =>
Table.increment(id, isPlayer1, 1).flatMap { table =>
ok(table)
}
}
def redact(id: Long, isPlayer1: Boolean) = SecuredApiAction { implicit request =>
Table.increment(id, isPlayer1, -1).flatMap { table =>
ok(table)
}
}
def reset(id: Long) = SecuredApiAction { implicit request =>
Table.reset(id).flatMap { table =>
ok(table)
}
}
def setOnline(id: Long) = SecuredApiAction { implicit request =>
Table.setOnline(id, isOnline = true).flatMap { table =>
ok(table)
}
}
def setOffline(id: Long) = SecuredApiAction { implicit request =>
Table.setOnline(id, isOnline = false).flatMap { table =>
ok(table)
}
}
}
|
davidgraig/foosball
|
server/app/controllers/Tables.scala
|
Scala
|
mit
| 1,246
|
package japgolly.scalajs.react.test.raw
import scala.scalajs.js
import scala.scalajs.js.annotation._
import japgolly.scalajs.react.raw._
/** https://facebook.github.io/react/docs/test-utils.html */
@JSImport("react-dom/test-utils", JSImport.Namespace, "ReactTestUtils")
@js.native
object ReactTestUtils extends ReactTestUtils
@js.native
trait ReactTestUtils extends js.Object {
final val Simulate: Simulate = js.native
/** Render a component into a detached DOM node in the document. This function requires a DOM. */
final def renderIntoDocument(element: React.Element): React.ComponentUntyped = js.native
/**
* Pass a mocked component module to this method to augment it with useful methods that allow it to be used as a dummy
* React component. Instead of rendering as usual, the component will become a simple <div> (or other tag if
* mockTagName is provided) containing any provided children.
*/
final def mockComponent[P <: js.Object, S <: js.Object](c: React.ComponentClass[P, S], mockTagName: String = js.native): React.ComponentClass[P, S] = js.native
final type Mounted = React.ComponentUntyped
/** Returns true if instance is an instance of a React componentClass. */
final def isComponentOfType(instance: React.Element, c: React.ComponentClassUntyped): Boolean = js.native
/** Returns true if instance is a DOM component (such as a <div> or <span>). */
final def isDOMComponent(instance: React.Element): Boolean = js.native
/** Returns true if instance is a composite component (created with React.createClass()) */
final def isCompositeComponent(instance: React.Element): Boolean = js.native
/** The combination of [[isComponentOfType()]] and [[isCompositeComponent()]]. */
final def isCompositeComponentWithType(instance: React.Element, c: React.ComponentClassUntyped): Boolean = js.native
/**
* Traverse all components in tree and accumulate all components where test(component) is true.
* This is not that useful on its own, but it's used as a primitive for other test utils.
*/
final def findAllInRenderedTree(tree: Mounted, test: js.Function1[Mounted, Boolean]): js.Array[Mounted] = js.native
/**
* Finds all instance of components in the rendered tree that are DOM components with the class name
* matching className.
*/
final def scryRenderedDOMComponentsWithClass(tree: Mounted, className: String): js.Array[Mounted] = js.native
/**
* Like [[scryRenderedDOMComponentsWithClass()]] but expects there to be one result, and returns that one result, or
* throws exception if there is any other number of matches besides one.
*/
final def findRenderedDOMComponentWithClass(tree: Mounted, className: String): Mounted = js.native
/**
* Finds all instance of components in the rendered tree that are DOM components with the tag name
* matching tagName.
*/
final def scryRenderedDOMComponentsWithTag(tree: Mounted, tagName: String): js.Array[Mounted] = js.native
/**
* Like [[scryRenderedDOMComponentsWithTag()]] but expects there to be one result, and returns that one result, or
* throws exception if there is any other number of matches besides one.
*/
final def findRenderedDOMComponentWithTag(tree: Mounted, tagName: String): Mounted = js.native
/** Finds all instances of components with type equal to componentClass. */
final def scryRenderedComponentsWithType(tree: Mounted, c: React.ComponentClassUntyped): js.Array[Mounted] = js.native
/**
* Same as [[scryRenderedComponentsWithType()]] but expects there to be one result and returns that one result, or throws
* exception if there is any other number of matches besides one.
*/
final def findRenderedComponentWithType(tree: Mounted, c: React.ComponentClassUntyped): Mounted = js.native
}
|
matthughes/scalajs-react
|
test/src/main/scala/japgolly/scalajs/react/test/raw/ReactTestUtils.scala
|
Scala
|
apache-2.0
| 3,818
|
package testcases
object UnificationTest {
sealed abstract class Tree
case class Leaf() extends Tree
case class Node(left: Tree, value: Int, right: Tree) extends Tree
// Proved by unifier
def mkTree(a: Int, b: Int, c: Int) = {
Node(Node(Leaf(), a, Leaf()), b, Node(Leaf(), c, Leaf()))
//Node(Leaf(), b, Node(Leaf(), c, Leaf()))
} ensuring ( res => {
res.left != Leaf() &&
res.value == b &&
res.right == Node(Leaf(), c, Leaf())
})
sealed abstract class Term
case class F(t1: Term, t2: Term, t3: Term, t4: Term) extends Term
case class G(s1: Term, s2: Term) extends Term
case class H(r1: Term, r2: Term) extends Term
case class A extends Term
case class B extends Term
def examplePage268(x1: Term, x2: Term, x3: Term, x4: Term, x5: Term) = {
F(G(H(A(), x5), x2), x1, H(A(), x4), x4)
} //ensuring ( _ == F(x1, G(x2, x3), x2, B()) )
case class Tuple3(_1: Term, _2: Term, _3: Term)
def examplePage269(x1: Term, x2: Term, x3: Term, x4: Term) = {
Tuple3(H(x1, x1), H(x2, x2), H(x3, x3))
} /*ensuring ( res => {
x2 == res._1 &&
x3 == res._2 &&
x4 == res._3
})*/
// Cannot be solved yet, due to the presence of an if expression
def insert(tree: Tree, value: Int) : Node = (tree match {
case Leaf() => Node(Leaf(), value, Leaf())
case n @ Node(l, v, r) => if(v < value) {
Node(l, v, insert(r, value))
} else if(v > value) {
Node(insert(l, value), v, r)
} else {
n
}
}) ensuring(_ != Leaf())
}
|
epfl-lara/leon
|
testcases/graveyard/UnificationTest.scala
|
Scala
|
gpl-3.0
| 1,579
|
package fpinscala.datastructures
trait L[+A]
case object N extends L[Nothing]
case class C[+A](head: A, tail: L[A]) extends L[A] {
override def toString: String = {
@annotation.tailrec
def loop(cons: L[A], string: String, ending: String): String = {
cons match {
case N =>
string + ending
case C(head, N) =>
val prepend = if(string.isEmpty) string else s"$string, "
loop(N, s"${prepend}C($head, N)", ending)
case C(head, tail) =>
val prepend = if(string.isEmpty) string else s"$string, "
loop(tail, s"${prepend}C($head", s"${ending})")
}
}
loop(this, "", "")
}
}
object L {
def apply[A](as: A*): L[A] =
if(as.isEmpty) N
else C(as.head, apply(as.tail: _*))
def make(int: Int): L[Int] = {
@annotation.tailrec
def loop(i: Int, list: L[Int]): L[Int] = {
if(i == 0) list
else loop(i -1, C(i, list))
}
loop(int, N)
}
}
|
pjberry/functional-programming-in-scala
|
src/com/bitmotif/part_1/Listing_3_1_1.scala
|
Scala
|
mit
| 1,021
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.