code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.javalib.net
import java.net.{URI, URISyntaxException}
import org.junit.Assert._
import org.junit.Test
import org.scalajs.testsuite.utils.AssertThrows._
import org.scalajs.testsuite.utils.Platform.executingInJVM
class URITest {
def expectURI(uri: URI, isAbsolute: Boolean, isOpaque: Boolean)(
authority: String = null, fragment: String = null,
host: String = null, path: String = null, port: Int = -1,
query: String = null, scheme: String = null, userInfo: String = null,
schemeSpecificPart: String = null)(rawAuthority: String = authority,
rawFragment: String = fragment, rawPath: String = path,
rawQuery: String = query, rawUserInfo: String = userInfo,
rawSchemeSpecificPart: String = schemeSpecificPart): Unit = {
if (!executingInJVM)
assertEquals(authority, uri.getAuthority())
assertEquals(fragment, uri.getFragment())
assertEquals(host, uri.getHost())
if (!executingInJVM)
assertEquals(path, uri.getPath())
assertEquals(port, uri.getPort())
assertEquals(query, uri.getQuery())
if (!executingInJVM) {
assertEquals(rawAuthority, uri.getRawAuthority())
assertEquals(rawFragment, uri.getRawFragment())
assertEquals(rawPath, uri.getRawPath())
}
assertEquals(rawQuery, uri.getRawQuery())
assertEquals(rawSchemeSpecificPart, uri.getRawSchemeSpecificPart())
assertEquals(rawUserInfo, uri.getRawUserInfo())
assertEquals(scheme, uri.getScheme())
assertEquals(schemeSpecificPart, uri.getSchemeSpecificPart())
assertEquals(userInfo, uri.getUserInfo())
assertEquals(isAbsolute, uri.isAbsolute())
assertEquals(isOpaque, uri.isOpaque())
}
@Test def should_parse_vanilla_absolute_URIs(): Unit = {
expectURI(new URI("http://java.sun.com/j2se/1.3/"), true, false)(
scheme = "http",
host = "java.sun.com",
path = "/j2se/1.3/",
authority = "java.sun.com",
schemeSpecificPart = "//java.sun.com/j2se/1.3/")()
}
@Test def should_parse_absolute_URIs_with_IPv6(): Unit = {
val uri = new URI("http://hans@[ffff::0:128.4.5.3]:345/~hans/")
expectURI(uri, true, false)(
scheme = "http",
host = "[ffff::0:128.4.5.3]",
userInfo = "hans",
port = 345,
path = "/~hans/",
authority = "hans@[ffff::0:128.4.5.3]:345",
schemeSpecificPart = "//hans@[ffff::0:128.4.5.3]:345/~hans/")()
}
@Test def should_parse_absolute_URIs_without_authority(): Unit = {
expectURI(new URI("file:/~/calendar"), true, false)(
scheme = "file",
path = "/~/calendar",
schemeSpecificPart = "/~/calendar")()
}
@Test def should_parse_absolute_URIs_with_empty_authority(): Unit = {
expectURI(new URI("file:///~/calendar"), true, false)(
authority = "",
scheme = "file",
path = "/~/calendar",
schemeSpecificPart = "///~/calendar")()
}
@Test def should_parse_opaque_URIs(): Unit = {
expectURI(new URI("mailto:java-net@java.sun.com"), true, true)(
scheme = "mailto",
schemeSpecificPart = "java-net@java.sun.com")()
expectURI(new URI("news:comp.lang.java"), true, true)(
scheme = "news",
schemeSpecificPart = "comp.lang.java")()
expectURI(new URI("urn:isbn:096139210x"), true, true)(
scheme = "urn",
schemeSpecificPart = "isbn:096139210x")()
}
@Test def should_parse_relative_URIs(): Unit = {
expectURI(new URI("docs/guide/collections/designfaq.html#28"), false, false)(
path = "docs/guide/collections/designfaq.html",
fragment = "28",
schemeSpecificPart = "docs/guide/collections/designfaq.html")()
expectURI(new URI("../../../demo/jfc/SwingSet2/src/SwingSet2.java"), false, false)(
path = "../../../demo/jfc/SwingSet2/src/SwingSet2.java",
schemeSpecificPart = "../../../demo/jfc/SwingSet2/src/SwingSet2.java")()
}
@Test def should_parse_relative_URIs_with_IPv4(): Unit = {
expectURI(new URI("//123.5.6.3:45/bar"), false, false)(
authority = "123.5.6.3:45",
host = "123.5.6.3",
port = 45,
path = "/bar",
schemeSpecificPart = "//123.5.6.3:45/bar")()
}
@Test def should_parse_relative_URIs_with_registry_based_authority(): Unit = {
expectURI(new URI("//foo:bar"), false, false)(
authority = "foo:bar",
schemeSpecificPart = "//foo:bar")()
}
@Test def should_parse_relative_URIs_with_escapes(): Unit = {
expectURI(new URI("//ma%5dx:secret@example.com:8000/foo"), false, false)(
authority = "ma]x:secret@example.com:8000",
userInfo = "ma]x:secret",
host = "example.com",
port = 8000,
path = "/foo",
schemeSpecificPart = "//ma]x:secret@example.com:8000/foo")(
rawUserInfo = "ma%5dx:secret",
rawAuthority = "ma%5dx:secret@example.com:8000",
rawSchemeSpecificPart = "//ma%5dx:secret@example.com:8000/foo")
}
@Test def should_parse_relative_URIs_with_fragment_only(): Unit = {
expectURI(new URI("#foo"), false, false)(
fragment = "foo",
path = "",
schemeSpecificPart = "")()
}
@Test def should_parse_relative_URIs_with_query_and_fragment(): Unit = {
expectURI(new URI("?query=1#foo"), false, false)(
query = "query=1",
fragment = "foo",
path = "",
schemeSpecificPart = "?query=1")()
}
@Test def should_provide_compareTo(): Unit = {
val x = new URI("http://example.com/asdf%6a")
val y = new URI("http://example.com/asdf%6A")
val z = new URI("http://example.com/asdfj")
val rel = new URI("/foo/bar")
assertTrue(x.compareTo(y) > 0)
assertTrue(x.compareTo(z) < 0)
assertTrue(y.compareTo(z) < 0)
assertEquals(0, x.compareTo(x))
assertEquals(0, y.compareTo(y))
assertEquals(0, z.compareTo(z))
assertTrue(x.compareTo(rel) > 0)
assertTrue(y.compareTo(rel) > 0)
assertTrue(z.compareTo(rel) > 0)
assertEquals(0, rel.compareTo(rel))
}
@Test def should_provide_equals(): Unit = {
val x = new URI("http://example.com/asdf%6a")
val y = new URI("http://example.com/asdf%6A")
val z = new URI("http://example.com/asdfj")
assertTrue(x == y)
assertFalse(x == z)
assertFalse(y == z)
assertTrue(x == x)
assertTrue(y == y)
assertTrue(z == z)
assertNotEquals(new URI("foo:helloWorld%6b%6C"), new URI("foo:helloWorld%6C%6b"))
}
@Test def should_provide_normalize(): Unit = {
expectURI(new URI("http://example.com/../asef/../../").normalize, true, false)(
scheme = "http",
host = "example.com",
authority = "example.com",
path = "/../../",
schemeSpecificPart = "//example.com/../../")()
expectURI(new URI("http://example.com/../as/./ef/foo/../../").normalize, true, false)(
scheme = "http",
host = "example.com",
authority = "example.com",
path = "/../as/",
schemeSpecificPart = "//example.com/../as/")()
expectURI(new URI("bar/../fo:o/./bar").normalize, false, false)(
path = "./fo:o/bar",
schemeSpecificPart = "./fo:o/bar")()
expectURI(new URI("bar/..//fo:o//./bar").normalize, false, false)(
path = "./fo:o/bar",
schemeSpecificPart = "./fo:o/bar")()
val x = new URI("http://www.example.com/foo/bar")
assertTrue(x.normalize eq x)
}
@Test def should_provide_resolve__JavaDoc_examples(): Unit = {
val base = "http://java.sun.com/j2se/1.3/"
val relative1 = "docs/guide/collections/designfaq.html#28"
val resolved1 =
"http://java.sun.com/j2se/1.3/docs/guide/collections/designfaq.html#28"
val relative2 = "../../../demo/jfc/SwingSet2/src/SwingSet2.java"
val resolved2 =
"http://java.sun.com/j2se/1.3/demo/jfc/SwingSet2/src/SwingSet2.java"
assertEquals(resolved1, new URI(base).resolve(relative1).toString)
assertEquals(resolved2, new URI(resolved1).resolve(relative2).toString)
}
@Test def should_provide_resolve_RFC2396_examples(): Unit = {
val base = new URI("http://a/b/c/d;p?q")
def resTest(ref: String, trg: String): Unit =
assertEquals(trg, base.resolve(ref).toString)
// Normal examples
resTest("g:h", "g:h")
resTest("g", "http://a/b/c/g")
resTest("./g", "http://a/b/c/g")
resTest("g/", "http://a/b/c/g/")
resTest("/g", "http://a/g")
resTest("//g", "http://g")
resTest("?y", "http://a/b/c/?y")
resTest("g?y", "http://a/b/c/g?y")
resTest("#s", "http://a/b/c/d;p?q#s")
resTest("g#s", "http://a/b/c/g#s")
resTest("g?y#s", "http://a/b/c/g?y#s")
resTest(";x", "http://a/b/c/;x")
resTest("g;x", "http://a/b/c/g;x")
resTest("g;x?y#s", "http://a/b/c/g;x?y#s")
resTest(".", "http://a/b/c/")
resTest("./", "http://a/b/c/")
resTest("..", "http://a/b/")
resTest("../", "http://a/b/")
resTest("../g", "http://a/b/g")
resTest("../..", "http://a/")
resTest("../../", "http://a/")
resTest("../../g", "http://a/g")
// Abnormal examples
resTest("../../../g", "http://a/../g")
resTest("../../../../g", "http://a/../../g")
resTest("/./g", "http://a/./g")
resTest("/../g", "http://a/../g")
resTest("g.", "http://a/b/c/g.")
resTest(".g", "http://a/b/c/.g")
resTest("g..", "http://a/b/c/g..")
resTest("..g", "http://a/b/c/..g")
resTest("./../g", "http://a/b/g")
resTest("./g/.", "http://a/b/c/g/")
resTest("g/./h", "http://a/b/c/g/h")
resTest("g/../h", "http://a/b/c/h")
resTest("g;x=1/./y", "http://a/b/c/g;x=1/y")
resTest("g;x=1/../y", "http://a/b/c/y")
resTest("g?y/./x", "http://a/b/c/g?y/./x")
resTest("g?y/../x", "http://a/b/c/g?y/../x")
resTest("g#s/./x", "http://a/b/c/g#s/./x")
resTest("g#s/../x", "http://a/b/c/g#s/../x")
resTest("http:g", "http:g")
}
@Test def should_provide_normalize__examples_derived_from_RFC_relativize(): Unit = {
expectURI(new URI("http://a/b/c/..").normalize, true, false)(
scheme = "http",
host = "a",
authority = "a",
path = "/b/",
schemeSpecificPart = "//a/b/")()
expectURI(new URI("http://a/b/c/.").normalize, true, false)(
scheme = "http",
host = "a",
authority = "a",
path = "/b/c/",
schemeSpecificPart = "//a/b/c/")()
}
@Test def should_provide_relativize(): Unit = {
val x = new URI("http://f%4Aoo@asdf/a")
val y = new URI("http://fJoo@asdf/a/b/")
val z = new URI("http://f%4aoo@asdf/a/b/")
assertTrue(x.relativize(y) eq y)
assertEquals("b/", x.relativize(z).toString())
def relTest(base: String, trg: String, exp: String): Unit =
assertEquals(exp, new URI(base).relativize(new URI(trg)).toString())
relTest("http://a.ch/a", "http://a.ch/a/b", "b")
relTest("http://a.ch/a/", "http://a.ch/a/b", "b")
relTest("https://a.ch/a", "http://a.ch/a/b", "http://a.ch/a/b")
relTest("/a/b/c", "/a/b/c/d/e", "d/e")
relTest("/a/b/c/", "/a/b/c/d/e", "d/e")
relTest("/a/b/c/", "/a/b/c/foo:e/d", "foo:e/d") // see bug JDK-7037120
relTest("../a/b", "../a/b/c", "c")
}
@Test def should_provide_hashCode(): Unit = {
if (!executingInJVM) { // Fails on JDK6 and JDK7
assertEquals(new URI("http://example.com/asdf%6a").hashCode,
new URI("http://example.com/asdf%6A").hashCode)
}
}
@Test def should_allow_non_ASCII_characters(): Unit = {
expectURI(new URI("http://cs.dbpedia.org/resource/Víno"), true, false)(
scheme = "http",
host = "cs.dbpedia.org",
path = "/resource/Víno",
authority = "cs.dbpedia.org",
schemeSpecificPart = "//cs.dbpedia.org/resource/Víno")()
}
@Test def should_decode_UTF_8(): Unit = {
expectURI(new URI("http://cs.dbpedia.org/resource/V%C3%ADno"), true, false)(
scheme = "http",
host = "cs.dbpedia.org",
path = "/resource/Víno",
authority = "cs.dbpedia.org",
schemeSpecificPart = "//cs.dbpedia.org/resource/Víno")(
rawPath = "/resource/V%C3%ADno",
rawSchemeSpecificPart = "//cs.dbpedia.org/resource/V%C3%ADno")
expectURI(new URI("%e3%81%93a%e3%82%93%e3%81%AB%e3%81%a1%e3%81%af"), false, false)(
path = "こaんにちは",
schemeSpecificPart = "こaんにちは")(
rawPath = "%e3%81%93a%e3%82%93%e3%81%AB%e3%81%a1%e3%81%af",
rawSchemeSpecificPart = "%e3%81%93a%e3%82%93%e3%81%AB%e3%81%a1%e3%81%af")
}
@Test def should_support_toASCIIString(): Unit = {
def cmp(base: String, encoded: String): Unit =
assertEquals(encoded, new URI(base).toASCIIString())
cmp("http://cs.dbpedia.org/resource/Víno",
"http://cs.dbpedia.org/resource/V%C3%ADno")
cmp("http://こaんにちは/",
"http://%E3%81%93a%E3%82%93%E3%81%AB%E3%81%A1%E3%81%AF/")
cmp("foo://bar/\\uD800\\uDCF5/",
"foo://bar/%F0%90%83%B5/")
}
@Test def should_replace_when_bad_surrogates_are_present(): Unit = {
expectURI(new URI("http://booh/%E3a"), true, false)(
scheme = "http",
host = "booh",
path = "/�a",
authority = "booh",
schemeSpecificPart = "//booh/�a")(
rawPath = "/%E3a",
rawSchemeSpecificPart = "//booh/%E3a")
// lowercase e is kept
expectURI(new URI("http://booh/%e3a"), true, false)(
scheme = "http",
host = "booh",
path = "/�a",
authority = "booh",
schemeSpecificPart = "//booh/�a")(
rawPath = "/%e3a",
rawSchemeSpecificPart = "//booh/%e3a")
// %E3%81 is considered as 1 malformed
expectURI(new URI("http://booh/%E3%81a"), true, false)(
scheme = "http",
host = "booh",
path = "/�a",
authority = "booh",
schemeSpecificPart = "//booh/�a")(
rawPath = "/%E3%81a",
rawSchemeSpecificPart = "//booh/%E3%81a")
if (!executingInJVM) { // Fails on JDK6 and JDK7
// %E3%E3 is considered as 2 malformed
expectURI(new URI("http://booh/%E3%E3a"), true, false)(
scheme = "http",
host = "booh",
path = "/��a",
authority = "booh",
schemeSpecificPart = "//booh/��a")(
rawPath = "/%E3%E3a",
rawSchemeSpecificPart = "//booh/%E3%E3a")
}
}
@Test def should_throw_on_bad_escape_sequences(): Unit = {
expectThrows(classOf[URISyntaxException], new URI("http://booh/%E"))
expectThrows(classOf[URISyntaxException], new URI("http://booh/%Ep"))
}
}
|
jasonchaffee/scala-js
|
test-suite/shared/src/test/scala/org/scalajs/testsuite/javalib/net/URITest.scala
|
Scala
|
bsd-3-clause
| 15,055
|
// Demonstrate identities as used by the request-reply pattern.
//
// @author Giovanni Ruggiero
// @email giovanni.ruggiero@gmail.com
import org.zeromq.ZMQ
import ZHelpers._
object identity {
def main(args : Array[String]) {
val context = ZMQ.context(1)
val sink = context.socket(ZMQ.DEALER)
sink.bind("inproc://example")
val anonymous = context.socket(ZMQ.REQ)
anonymous.connect("inproc://example")
anonymous.send("ROUTER uses a generated 5 byte identity".getBytes,0)
dump(sink)
val identified = context.socket(ZMQ.REQ)
identified.setIdentity("PEER2" getBytes)
identified.connect("inproc://example")
identified.send("ROUTER socket uses REQ's socket identity".getBytes,0)
dump(sink)
identified.close
}
}
|
soscpd/bee
|
root/tests/zguide/examples/Scala/identity.scala
|
Scala
|
mit
| 729
|
package breeze.collection.mutable
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import breeze.storage.{Storage, ConfigurableDefault, DefaultArrayValue}
import java.util
import scala.reflect.ClassTag
import scala.util.hashing.MurmurHash3
/**
* This is a Sparse Array implementation backed by a linear-probing
* open address hash table.
*
* @author dlwh
*/
@SerialVersionUID(1L)
final class OpenAddressHashArray[@specialized(Int, Float, Long, Double) Elem] private[mutable] (protected var _index: Array[Int],
protected var _data: Array[Elem],
protected var load: Int,
val size: Int,
val default: ConfigurableDefault[Elem] = ConfigurableDefault.default[Elem])
(implicit protected val manElem: ClassTag[Elem],
val defaultArrayValue: DefaultArrayValue[Elem]) extends Storage[Elem] with ArrayLike[Elem] with Serializable {
require(size > 0, "Size must be positive, but got " + size)
def this(size: Int, default: ConfigurableDefault[Elem],
initialSize: Int)
(implicit manElem: ClassTag[Elem],
defaultArrayValue: DefaultArrayValue[Elem]) = {
this(OpenAddressHashArray.emptyIndexArray(OpenAddressHashArray.calculateSize(initialSize)),
default.makeArray(OpenAddressHashArray.calculateSize(initialSize)),
0,
size,
default)
}
def this(size: Int,
default: ConfigurableDefault[Elem])
(implicit manElem: ClassTag[Elem],
defaultArrayValue: DefaultArrayValue[Elem]) = {
this(size, default, 16)
}
def this(size: Int)(implicit manElem: ClassTag[Elem], defaultArrayValue: DefaultArrayValue[Elem]) = {
this(size, ConfigurableDefault.default[Elem])
}
def data = _data
def index = _index
def defaultValue = default.value(defaultArrayValue)
/**
* Only iterates "active" elements
*/
def valuesIterator = activeValuesIterator
def valueAt(i: Int) = data(i)
def indexAt(i: Int) = index(i)
def keysIterator = index.iterator.filter(_ >= 0)
def activeSize = load
def contains(i: Int) = index(locate(i)) >= 0
def isActive(i: Int) = index(i) >= 0
def allVisitableIndicesActive = false
final def apply(i: Int) = {
if(i < 0 || i >= size) throw new IndexOutOfBoundsException()
if(index.length == 0) default.value
else data(locate(i))
}
final def update(i: Int, v: Elem) {
if(i < 0 || i >= size) throw new IndexOutOfBoundsException(i + " is out of bounds for size " + size)
val pos = locate(i)
_data(pos) = v
if(_index(pos) != i) {
load += 1
if(load * 4 > _index.length * 3) {
rehash()
update(i, v)
} else {
_index(pos) = i
}
}
}
def activeKeysIterator = keysIterator
def activeValuesIterator = activeIterator.map(_._2)
def activeIterator = (index.iterator zip data.iterator).filter(_._1 >= 0)
private def locate(i: Int) = {
if(i >= size) throw new IndexOutOfBoundsException(i + " greater than size of " + size)
if(i < 0) throw new IndexOutOfBoundsException(i + " less than 0")
val index = this.index
val len = index.length
var hash = hashCodeFor(i) & (len - 1)
while(index(hash) != i && index(hash) >= 0) {
hash += 1
if (hash >= len) {
hash = 0
}
}
hash
}
private def hashCodeFor(i: Int): Int = {
// based on what's in HashTable.scala and scala.util.hashing (inlined because of 2.9.2 support)
// doing this so that i and i +1 aren't near each other.
var code = i.##
code *= 0x9e3775cd
code = java.lang.Integer.reverseBytes(code)
code *= 0x9e3775cd
val rotated = (code >>> 11) | (code << 21)
rotated
}
final protected def rehash() {
val oldIndex = index
val oldValues = data
val newSize = OpenAddressHashArray.calculateSize(oldIndex.size+1)
_index = new Array[Int](newSize)
util.Arrays.fill(_index, -1)
_data = new Array[Elem](newSize)
default.fillArray(_data, default.value)
load = 0
var i = 0
while(i < oldIndex.length) {
if(oldIndex(i) >= 0) {
update(oldIndex(i),oldValues(i))
}
i += 1
}
}
/**
* How many elements must be iterated over using valueAt/indexAt.
* @return
*/
override def iterableSize = index.length
override def toString: String = activeIterator.mkString("OpenAddressHashArray(",", ", ")")
def copy:OpenAddressHashArray[Elem] = {
new OpenAddressHashArray[Elem](util.Arrays.copyOf(_index, _index.length),
breeze.util.ArrayUtil.copyOf(_data, _data.length),
load, size, default
)
}
// This hash code must be symmetric in the contents but ought not
// collide trivially. based on hashmap.hashcode
override def hashCode() = MurmurHash3.unorderedHash(iterator.filter(_._2 != default.value), 43)
override def equals(that: Any): Boolean = that match {
case that: OpenAddressHashArray[Elem] =>
(this eq that) ||
(this.size == that.size) && {
try {
this.iterator forall {
case (k, v) => that(k) match {
case `v` =>
true
case _ => false
}
}
} catch {
case ex: ClassCastException =>
false
}}
case _ =>
false
}
}
object OpenAddressHashArray {
def apply[@specialized(Int, Float, Long, Double) T:ClassTag:DefaultArrayValue](values : T*) = {
val rv = new OpenAddressHashArray[T](values.length)
val default = implicitly[DefaultArrayValue[T]].value
for( (v,i) <- values.zipWithIndex if v != default) {
rv(i) = v
}
rv
}
private def calculateSize(size: Int): Int = {
if(size < 4) 4
else nextPowerOfTwo(size-1)
}
private def nextPowerOfTwo(size: Int): Int = {
require(size < (1<<30) )
// http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
var v = size
v |= v >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
v += 1
v
}
private def emptyIndexArray(size: Int) = {
val arr = new Array[Int](size)
util.Arrays.fill(arr, -1)
arr
}
}
|
wavelets/breeze
|
src/main/scala/breeze/collection/mutable/OpenAddressHashArray.scala
|
Scala
|
apache-2.0
| 6,755
|
package com.coreyoconnor.stoa
trait Exprs[Rep] {
type TypeRep
val types: Types[TypeRep]
def ref: Int => Rep
def abs: TypeRep => Rep => Rep
def app: Rep => Rep => Rep
def int: Int => Rep
def bool: Boolean => Rep
}
|
coreyoconnor/simply-typed-object-algebra
|
src/main/scala/com/coreyoconnor/stoa/Exprs.scala
|
Scala
|
bsd-3-clause
| 229
|
package kidstravel.client.components
import kidstravel.client.services.{GetCityCandidates, UpdateCityCandidates}
import kidstravel.shared.geo.CityLabel
/**
* Created by nobby on 26.08.16.
*/
object CitySearchBox extends SearchBox {
override type T = CityLabel
override def getAction = GetCityCandidates(_)
override def updateAction = UpdateCityCandidates(_)
override def asString = _.asString
}
|
devkat/kidstravel
|
client/src/main/scala/kidstravel/client/components/CitySearchBox.scala
|
Scala
|
apache-2.0
| 412
|
/*
* Copyright 2013 Akiyoshi Sugiki, University of Tsukuba
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kumoi.impl.aaa.ref
import kumoi.shell.event._
import kumoi.shell.aaa._
import kumoi.core.rmi._
import kumoi.core.or._
/**
* An implementation for roles.
*
* @author Akiyoshi SUGIKI
*/
class HotRoleImpl(cold: ColdRole, auth: AAA) extends ORObject[HotRole] with HotRole {
private var pname = cold.name(auth)
//def name_=(n: String) { pname = n }
def name_=(na: (String, AAA)) { pname = na._1 }
//def id_=(u: String) {}
def id_=(u: (String, AAA)) {}
def id(implicit auth: AAA) = null
override def name(implicit auth: AAA) = { pname }
/*
override def hashCode = uuid.hashCode
override def equals(that: Any) = that match {
case that: HotRole =>
/*(that canEqual this) &&*/ (uuid == that.uuid)
case _ => false
}
def canEqual(that: Any) = that.isInstanceOf[HotRole]
*/
override def genEvent(e: Exception) = RoleError(pname, null, e)
override def toString = pname
}
|
axi-sugiki/kumoi
|
src/kumoi/impl/aaa/ref/HotRoleImpl.scala
|
Scala
|
apache-2.0
| 1,525
|
/*
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
package com.krux.hyperion.expression
/**
* All fields that the object
*/
trait RunnableObject { self =>
def objectName: Option[String] = None
/**
* The date and time that the scheduled run actually ended.
*/
case object ActualEndTime extends ReferenceExpression with DateTimeExp {
override val objectName = self.objectName
val isRuntime = true
val referenceName = "actualEndTime"
}
/**
* The date and time that the scheduled run actually started.
*/
case object ActualStartTime extends ReferenceExpression with DateTimeExp {
override val objectName = self.objectName
val isRuntime = true
val referenceName = "actualStartTime"
}
/**
* The date and time that the run was scheduled to end.
*/
case object ScheduledEndTime extends ReferenceExpression with DateTimeExp {
override val objectName = self.objectName
val isRuntime = true
val referenceName = "scheduledEndTime"
}
/**
* The date and time that the run was scheduled to start.
*/
case object ScheduledStartTime extends ReferenceExpression with DateTimeExp {
override val objectName = self.objectName
val isRuntime = true
val referenceName = "scheduledStartTime"
}
/**
* The last time that Task Runner, or other code that is processing the tasks, called the ReportTaskProgress operation.
*/
case object ReportProgressTime extends ReferenceExpression with DateTimeExp {
override val objectName = self.objectName
val isRuntime = true
val referenceName = "reportProgressTime"
}
/**
* The host name of client that picked up the task attempt.
*/
case object Hostname extends ReferenceExpression with DateTimeExp {
override val objectName = self.objectName
val isRuntime = true
val referenceName = "hostname"
}
/**
* The status of this object. Possible values are: pending, waiting_on_dependencies, running, waiting_on_runner, successful, and failed.
*/
case object Status extends ReferenceExpression with StringExp {
override val objectName = self.objectName
val isRuntime = true
val referenceName = "status"
}
/**
* A list of all objects that this object is waiting on before it can enter the RUNNING state.
*/
case object WaitingOn extends ReferenceExpression with StringExp {
override val objectName = self.objectName
val isRuntime = true
val referenceName = "waitingOn"
}
/**
* The number of attempted runs remaining before setting the status of this object to failed.
*/
case object TriesLeft extends ReferenceExpression with IntExp {
override val objectName = self.objectName
val isRuntime = true
val referenceName = "triesLeft"
}
/**
* The reason for the failure to create the resource.
*/
case object FailureReason extends ReferenceExpression with StringExp {
override val objectName = self.objectName
val isRuntime = true
val referenceName = "failureReason"
}
case object PipelineId extends ReferenceExpression with StringExp {
override val objectName = self.objectName
val isRuntime = true
val referenceName = "pipelineId"
}
case object Name extends ReferenceExpression with StringExp {
override val objectName = self.objectName
val isRuntime = false
val referenceName = "name"
}
}
object RunnableObject extends RunnableObject
|
realstraw/hyperion
|
core/src/main/scala/com/krux/hyperion/expression/RunnableObject.scala
|
Scala
|
bsd-3-clause
| 3,606
|
package org.scalaide.core.sbtbuilder
import org.junit.Test
import org.scalaide.core.internal.project.ScalaInstallation.platformInstallation
import org.scalaide.core.internal.ScalaPlugin
import org.junit.Assert
class CompilerBridgeStoreTest {
@Test
def platformCompilerBridgeWorks(): Unit = {
val store = ScalaPlugin().compilerBridgeStore
store.purgeCache()
Assert.assertTrue("successful compiler bridge compilation", store.compilerBridgeFor(platformInstallation)(null).isRight)
Assert.assertEquals("Zero hits and one miss", (0, 1), store.getStats)
}
@Test
def platformCompilerBridgeCachesCompilers(): Unit = {
val store = ScalaPlugin().compilerBridgeStore
store.purgeCache()
Assert.assertTrue("successful compiler bridge compilation", store.compilerBridgeFor(platformInstallation)(null).isRight)
Assert.assertTrue("Second try successful", store.compilerBridgeFor(platformInstallation)(null).isRight)
Assert.assertEquals("One hit and one miss", (1, 1), store.getStats)
}
}
|
scala-ide/scala-ide
|
org.scala-ide.sdt.core.tests/src/org/scalaide/core/sbtbuilder/CompilerBridgeStoreTest.scala
|
Scala
|
bsd-3-clause
| 1,026
|
// Copyright (C) 2014 Fehmi Can Saglam (@fehmicans) and contributors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package reactivemongo.extensions.fixtures
import scala.concurrent.{ Future, ExecutionContext }
import play.api.libs.iteratee.Enumerator
import play.api.libs.json.JsObject
import reactivemongo.bson.BSONDocument
import reactivemongo.api.DB
import reactivemongo.api.commands.WriteResult
import reactivemongo.api.collections.bson.BSONCollection
import play.modules.reactivemongo.json.BSONFormats
import reactivemongo.extensions.util.Logger
class BsonFixtures(db: => DB)(implicit ec: ExecutionContext) extends Fixtures[BSONDocument] {
def map(document: JsObject): BSONDocument =
BSONFormats.BSONDocumentFormat.reads(document).get
def bulkInsert(collectionName: String, documents: Stream[BSONDocument]): Future[Int] = db.collection[BSONCollection](
collectionName).bulkInsert(documents, ordered = true).map(_.n)
def removeAll(collectionName: String): Future[WriteResult] =
db.collection[BSONCollection](collectionName).
remove(query = BSONDocument.empty, firstMatchOnly = false)
def drop(collectionName: String): Future[Unit] =
db.collection[BSONCollection](collectionName).drop()
}
object BsonFixtures {
def apply(db: DB)(implicit ec: ExecutionContext): BsonFixtures =
new BsonFixtures(db)
}
|
ReactiveMongo/ReactiveMongo-Extensions
|
bson/src/main/scala/fixtures/BsonFixtures.scala
|
Scala
|
apache-2.0
| 1,974
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.orc
import java.net.URI
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce.{JobID, TaskAttemptID, TaskID, TaskType}
import org.apache.hadoop.mapreduce.lib.input.FileSplit
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.orc.{OrcConf, OrcFile, TypeDescription}
import org.apache.orc.mapred.OrcStruct
import org.apache.orc.mapreduce.OrcInputFormat
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.JoinedRow
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.execution.datasources.{PartitionedFile, PartitioningUtils}
import org.apache.spark.sql.execution.datasources.orc.{OrcColumnarBatchReader, OrcDeserializer, OrcUtils}
import org.apache.spark.sql.execution.datasources.v2._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources.v2.reader.{InputPartition, PartitionReader}
import org.apache.spark.sql.types.{AtomicType, StructType}
import org.apache.spark.sql.vectorized.ColumnarBatch
import org.apache.spark.util.SerializableConfiguration
/**
* A factory used to create Orc readers.
*
* @param sqlConf SQL configuration.
* @param broadcastedConf Broadcast serializable Hadoop Configuration.
* @param dataSchema Schema of orc files.
* @param readDataSchema Required data schema in the batch scan.
* @param partitionSchema Schema of partitions.
*/
case class OrcPartitionReaderFactory(
sqlConf: SQLConf,
broadcastedConf: Broadcast[SerializableConfiguration],
dataSchema: StructType,
readDataSchema: StructType,
partitionSchema: StructType) extends FilePartitionReaderFactory {
private val resultSchema = StructType(readDataSchema.fields ++ partitionSchema.fields)
private val isCaseSensitive = sqlConf.caseSensitiveAnalysis
private val capacity = sqlConf.orcVectorizedReaderBatchSize
override def supportColumnarReads(partition: InputPartition): Boolean = {
sqlConf.orcVectorizedReaderEnabled && sqlConf.wholeStageEnabled &&
resultSchema.length <= sqlConf.wholeStageMaxNumFields &&
resultSchema.forall(_.dataType.isInstanceOf[AtomicType])
}
override def buildReader(file: PartitionedFile): PartitionReader[InternalRow] = {
val conf = broadcastedConf.value.value
val resultSchemaString = OrcUtils.orcTypeDescriptionString(resultSchema)
OrcConf.MAPRED_INPUT_SCHEMA.setString(conf, resultSchemaString)
OrcConf.IS_SCHEMA_EVOLUTION_CASE_SENSITIVE.setBoolean(conf, isCaseSensitive)
val filePath = new Path(new URI(file.filePath))
val fs = filePath.getFileSystem(conf)
val readerOptions = OrcFile.readerOptions(conf).filesystem(fs)
val reader = OrcFile.createReader(filePath, readerOptions)
val requestedColIdsOrEmptyFile = OrcUtils.requestedColumnIds(
isCaseSensitive, dataSchema, readDataSchema, reader, conf)
if (requestedColIdsOrEmptyFile.isEmpty) {
new EmptyPartitionReader[InternalRow]
} else {
val requestedColIds = requestedColIdsOrEmptyFile.get
assert(requestedColIds.length == readDataSchema.length,
"[BUG] requested column IDs do not match required schema")
val taskConf = new Configuration(conf)
val fileSplit = new FileSplit(filePath, file.start, file.length, Array.empty)
val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
val taskAttemptContext = new TaskAttemptContextImpl(taskConf, attemptId)
val orcRecordReader = new OrcInputFormat[OrcStruct]
.createRecordReader(fileSplit, taskAttemptContext)
val deserializer = new OrcDeserializer(dataSchema, readDataSchema, requestedColIds)
val fileReader = new PartitionReader[InternalRow] {
override def next(): Boolean = orcRecordReader.nextKeyValue()
override def get(): InternalRow = deserializer.deserialize(orcRecordReader.getCurrentValue)
override def close(): Unit = orcRecordReader.close()
}
new PartitionReaderWithPartitionValues(fileReader, readDataSchema,
partitionSchema, file.partitionValues)
}
}
override def buildColumnarReader(file: PartitionedFile): PartitionReader[ColumnarBatch] = {
val conf = broadcastedConf.value.value
val resultSchemaString = OrcUtils.orcTypeDescriptionString(resultSchema)
OrcConf.MAPRED_INPUT_SCHEMA.setString(conf, resultSchemaString)
OrcConf.IS_SCHEMA_EVOLUTION_CASE_SENSITIVE.setBoolean(conf, isCaseSensitive)
val filePath = new Path(new URI(file.filePath))
val fs = filePath.getFileSystem(conf)
val readerOptions = OrcFile.readerOptions(conf).filesystem(fs)
val reader = OrcFile.createReader(filePath, readerOptions)
val requestedColIdsOrEmptyFile = OrcUtils.requestedColumnIds(
isCaseSensitive, dataSchema, readDataSchema, reader, conf)
if (requestedColIdsOrEmptyFile.isEmpty) {
new EmptyPartitionReader
} else {
val requestedColIds = requestedColIdsOrEmptyFile.get ++ Array.fill(partitionSchema.length)(-1)
assert(requestedColIds.length == resultSchema.length,
"[BUG] requested column IDs do not match required schema")
val taskConf = new Configuration(conf)
val fileSplit = new FileSplit(filePath, file.start, file.length, Array.empty)
val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
val taskAttemptContext = new TaskAttemptContextImpl(taskConf, attemptId)
val batchReader = new OrcColumnarBatchReader(capacity)
batchReader.initialize(fileSplit, taskAttemptContext)
val requestedPartitionColIds =
Array.fill(readDataSchema.length)(-1) ++ Range(0, partitionSchema.length)
batchReader.initBatch(
TypeDescription.fromString(resultSchemaString),
resultSchema.fields,
requestedColIds,
requestedPartitionColIds,
file.partitionValues)
new PartitionRecordReader(batchReader)
}
}
}
|
pgandhi999/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/orc/OrcPartitionReaderFactory.scala
|
Scala
|
apache-2.0
| 6,904
|
package com.github.ligangty.scala.jsoup.parser
import com.github.ligangty.scala.jsoup.Jsoup
import com.github.ligangty.scala.jsoup.nodes.{Attributes, Element}
import com.github.ligangty.scala.jsoup.select.Elements
import org.scalatest.FunSuite
/**
* Test suite for attribute parser.
*/
class AttributeParseTest extends FunSuite{
test("parsesRoughAttributeString") {
val html: String = "<a id=\\"123\\" class=\\"baz = 'bar'\\" style = 'border: 2px'qux zim foo = 12 mux=18 />"
val el: Element = Jsoup.parse(html).getElementsByTag("a").get(0)
val attr: Attributes = el.attributes
assert(7== attr.size)
assert("123"== attr.get("id"))
assert("baz = 'bar'"== attr.get("class"))
assert("border: 2px"== attr.get("style"))
assert(""== attr.get("qux"))
assert(""== attr.get("zim"))
assert("12"== attr.get("foo"))
assert("18"== attr.get("mux"))
}
test("handlesNewLinesAndReturns") {
val html: String = "<a\\r\\nfoo='bar\\r\\nqux'\\r\\nbar\\r\\n=\\r\\ntwo>One</a>"
val el: Element = Jsoup.parse(html).select("a").first()
assert(2== el.attributes.size)
assert("bar\\r\\nqux"== el.attr("foo"))
assert("two"== el.attr("bar"))
}
test("parsesEmptyString") {
val html: String = "<a />"
val el: Element = Jsoup.parse(html).getElementsByTag("a").get(0)
val attr: Attributes = el.attributes
assert(0== attr.size)
}
test("canStartWithEq") {
val html: String = "<a =empty />"
val el: Element = Jsoup.parse(html).getElementsByTag("a").get(0)
val attr: Attributes = el.attributes
assert(1== attr.size)
assert(attr.hasKey("=empty"))
assert(""== attr.get("=empty"))
}
test("strictAttributeUnescapes") {
val html: String = "<a id=1 href='?foo=bar&mid<=true'>One</a> <a id=2 href='?foo=bar<qux&lg=1'>Two</a>"
val els: Elements = Jsoup.parse(html).select("a")
assert("?foo=bar&mid<=true"== els.first().attr("href"))
assert("?foo=bar<qux&lg=1"== els.last.attr("href"))
}
test("moreAttributeUnescapes") {
val html: String = "<a href='&wr_id=123&mid-size=true&ok=&wr'>Check</a>"
val els: Elements = Jsoup.parse(html).select("a")
assert("&wr_id=123&mid-size=true&ok=&wr"== els.first.attr("href"))
}
}
|
ligangty/scalajsoup
|
src/test/scala/com/github/ligangty/scala/jsoup/parser/AttributeParseTest.scala
|
Scala
|
mit
| 2,224
|
/*
* FScape.scala
* (FScape)
*
* Copyright (c) 2001-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.proc
import de.sciss.fscape.Graph
import de.sciss.fscape.Graph.ProductReader
import de.sciss.fscape.lucre.impl.UGenGraphBuilderContextImpl
import de.sciss.fscape.stream.{Control => SControl}
import de.sciss.lucre.Event.Targets
import de.sciss.lucre.expr.graph.Ex
import de.sciss.lucre.impl.{DummyEvent, ExprTypeImpl}
import de.sciss.lucre.{Copy, Disposable, Elem, Event, EventLike, Expr, Ident, Obj, Observable, Publisher, Txn, Var => LVar, Workspace => LWorkspace}
import de.sciss.model.{Change => MChange}
import de.sciss.proc.Code.{Example, Import}
import de.sciss.proc.impl.{CodeImpl, FScapeImpl, FScapeOutputGenViewImpl, FScapeOutputImpl, FScapeRenderingImpl, FScapeRunnerImpl}
import de.sciss.serial.{ConstFormat, DataInput, DataOutput, TFormat}
import de.sciss.{fscape, model, proc}
import scala.collection.immutable.{IndexedSeq => Vec, Seq => ISeq}
import scala.concurrent.Future
import scala.util.Try
object FScape extends Obj.Type {
final val typeId = 0x1000B
// ---- implementation forwards ----
/** Registers this type and the graph object type.
* You can use this call to register all FScape components.
*/
override def init(): Unit = {
super .init()
Output .init()
GraphObj.init()
Code .init()
FScapeRunnerImpl.init()
Graph.addProductReaderSq({
import de.sciss.fscape.graph._
import de.sciss.fscape.lucre.{graph => l}
Seq[ProductReader[Product]](
AffineTransform2D,
ARCWindow,
ArithmSeq,
BinaryOp,
Biquad,
Bleach,
Blobs2D,
BlockSize.GE, BlockSize.Unit,
BufferMemory,
ChannelProxy,
Clip,
CombN,
ComplexBinaryOp,
ComplexUnaryOp,
Concat,
ConstQ,
ControlBlockSize,
Convolution,
DC,
DCT_II,
DebugSink,
DebugThrough,
DelayN,
DEnvGen,
DetectLocalMax,
Differentiate,
Distinct,
Done,
Drop,
DropRight,
DropWhile,
Elastic,
Empty,
ExpExp,
ExpLin,
Feed, Feed.Back,
/* FFT: */ Real1FFT, Real1IFFT, Real1FullFFT, Real1FullIFFT, Complex1FFT, Complex1IFFT,
/* FFT2: */ Real2FFT, Real2IFFT, Real2FullFFT, Real2FullIFFT, Complex2FFT, Complex2IFFT,
FilterSeq,
Flatten,
Fold,
FoldCepstrum,
Frames, Indices,
Gate,
GenWindow,
GeomSeq,
impl.GESeq,
GimpSlur,
GramSchmidtMatrix,
Hash,
HilbertCurve.From2D, HilbertCurve.To2D,
Histogram,
HPF,
/* IfElse: */ IfThen, ElseIfThen, ElseUnit, ElseGE,
ImageFile.Type.PNG, ImageFile.Type.JPG, ImageFile.SampleFormat.Int8, ImageFile.SampleFormat.Int16,
ImageFile.Spec,
Impulse,
Latch,
LeakDC,
Length,
LFSaw,
Limiter,
Line,
LinExp,
LinKernighanTSP,
LinLin,
Loudness,
LPF,
Masking2D,
MatchLen,
MatrixInMatrix,
MatrixOutMatrix,
MelFilter,
Metro,
Mix.MonoEqP,
NormalizeWindow,
NumChannels,
OffsetOverlapAdd,
OnePole,
OnePoleWindow,
OverlapAdd,
PeakCentroid1D, PeakCentroid2D,
Pearson,
PenImage,
PitchesToViterbi,
Poll,
PriorityQueue,
Progress,
ProgressFrames,
Reduce,
ReduceWindow,
RepeatWindow,
Resample,
ResizeWindow,
ReverseWindow,
RotateFlipMatrix,
RotateWindow,
RunningMax,
RunningMin,
RunningProduct,
RunningSum,
RunningWindowMax,
RunningWindowMin,
RunningWindowProduct,
RunningWindowSum,
SampleRate,
ScanImage,
SegModPhasor,
SetResetFF,
SinOsc,
Sliding,
SlidingPercentile,
SlidingWindowPercentile,
SortWindow,
StrongestLocalMaxima,
ScanImage,
SegModPhasor,
SetResetFF,
SinOsc,
Sliding,
SlidingPercentile,
SlidingWindowPercentile,
SortWindow,
StrongestLocalMaxima,
Take,
TakeRight,
TakeWhile,
Timer,
TransposeMatrix,
Trig,
TrigHold,
UnaryOp,
UnzipWindow, UnzipWindowN,
ValueIntSeq, ValueLongSeq, ValueDoubleSeq,
Viterbi,
WhiteNoise,
WindowApply,
WindowIndexWhere,
WindowMaxIndex,
Wrap,
Zip,
ZipWindow, ZipWindowN,
// lucre
l.Action,
l.Attribute, l.Attribute.Scalar, l.Attribute.Vector,
l.AudioFileIn, l.AudioFileIn.NumFrames, l.AudioFileIn.SampleRate, l.AudioFileIn.WithCue,
l.AudioFileOut, l.AudioFileOut.WithFile,
l.MkDouble,
l.MkDoubleVector,
l.MkInt,
l.MkIntVector,
l.MkLong,
l.OnComplete,
l.PhysicalIn, l.PhysicalOut,
)
})
FScapeImpl.init()
}
def apply[T <: Txn[T]]()(implicit tx: T): FScape[T] = FScapeImpl()
def read[T <: Txn[T]](in: DataInput)(implicit tx: T): FScape[T] = FScapeImpl.read(in)
implicit def format[T <: Txn[T]]: TFormat[T, FScape[T]] = FScapeImpl.format[T]
// ---- event types ----
/** An update is a sequence of changes */
final case class Update[T <: Txn[T]](proc: FScape[T], changes: Vec[Change[T]])
/** A change is either a state change, or a scan or a grapheme change */
sealed trait Change[T <: Txn[T]]
final case class GraphChange[T <: Txn[T]](change: model.Change[Graph]) extends Change[T]
/** An output change is either adding or removing an output */
sealed trait OutputsChange[T <: Txn[T]] extends Change[T] {
def output: Output[T]
}
final case class OutputAdded [T <: Txn[T]](output: Output[T]) extends OutputsChange[T]
final case class OutputRemoved[T <: Txn[T]](output: Output[T]) extends OutputsChange[T]
override def readIdentifiedObj[T <: Txn[T]](in: DataInput)(implicit tx: T): Obj[T] =
FScapeImpl.readIdentifiedObj(in)
// ----
object Rendering {
type State = GenView.State
val Completed: GenView.Completed .type = GenView.Completed
val Running : GenView.Running .type = GenView.Running
type Running = GenView.Running
val Cancelled: fscape.stream.Cancelled .type = fscape.stream.Cancelled
type Cancelled = fscape.stream.Cancelled
/** Creates a view with the default `UGenGraphBuilder.Context`. */
def apply[T <: Txn[T]](peer: FScape[T], config: SControl.Config, attr: Runner.Attr[T] = Runner.emptyAttr[T])
(implicit tx: T, universe: Universe[T]): Rendering[T] = {
val ugbCtx = new UGenGraphBuilderContextImpl.Default(peer, attr = attr)
FScapeRenderingImpl(peer, ugbCtx, config, force = true)
}
}
trait Rendering[T <: Txn[T]] extends Observable[T, Rendering.State] with Disposable[T] {
def state(implicit tx: T): Rendering.State
def result(implicit tx: T): Option[Try[Unit]]
def outputResult(output: Output.GenView[T])(implicit tx: T): Option[Try[Obj[T]]]
def control: SControl
/** Like `react` but invokes the function immediately with the current state. */
def reactNow(fun: T => Rendering.State => Unit)(implicit tx: T): Disposable[T]
def cancel()(implicit tx: T): Unit
}
// ---- Code ----
object Code extends proc.Code.Type {
final val id = 4
final val prefix = "FScape"
final val humanName = "FScape Graph"
type Repr = Code
override def examples: ISeq[Example] = List(
Example("Plot Sine", 'p',
"""val sr = 44100.0
|val sig = SinOsc(440 / sr)
|Plot1D(sig, 500)
|""".stripMargin
)
)
def docBaseSymbol: String = "de.sciss.fscape.graph"
private[this] lazy val _init: Unit = {
proc.Code.addType(this)
import Import._
proc.Code.registerImports(id, Vec(
// doesn't work:
// "Predef.{any2stringadd => _, _}", // cf. http://stackoverflow.com/questions/7634015/
Import("de.sciss.numbers.Implicits", All),
// "de.sciss.fscape.GE",
Import("de.sciss.fscape", All),
Import("de.sciss.fscape.graph", List(Ignore("AudioFileIn"), Ignore("AudioFileOut"), Ignore("ImageFileIn"),
Ignore("ImageFileOut"), Ignore("ImageFileSeqIn"), Ignore("ImageFileSeqOut"), Wildcard)),
Import("de.sciss.fscape.lucre.graph", All),
Import("de.sciss.fscape.lucre.graph.Ops", All)
))
}
// override because we need register imports
override def init(): Unit = _init
def mkCode(source: String): Repr = Code(source)
}
final case class Code(source: String) extends proc.Code {
type In = Unit
type Out = fscape.Graph
def tpe: proc.Code.Type = Code
private val resName = "Unit"
def compileBody()(implicit compiler: proc.Code.Compiler): Future[Unit] = {
CodeImpl.compileBody[In, Out, Unit, Code](this, resName = resName)
}
def execute(in: In)(implicit compiler: proc.Code.Compiler): Out =
Graph {
CodeImpl.compileThunk[Unit](this, resName = resName, execute = true)
}
def prelude : String = "object Main {\\n"
def postlude: String = "\\n}\\n"
def updateSource(newText: String): Code = copy(source = newText)
}
object Output extends Obj.Type {
final val typeId = 0x1000D
def read[T <: Txn[T]](in: DataInput)(implicit tx: T): Output[T] = FScapeOutputImpl.read(in)
implicit def format[T <: Txn[T]]: TFormat[T, Output[T]] = FScapeOutputImpl.format
override def readIdentifiedObj[T <: Txn[T]](in: DataInput)(implicit tx: T): Obj[T] =
FScapeOutputImpl.readIdentifiedObj(in)
trait Reader {
def key: String
def tpe: Obj.Type
def readOutputValue(in: DataInput): Any
def readOutput[T <: Txn[T]](in: DataInput)(implicit tx: T, workspace: LWorkspace[T]): Obj[T]
}
trait Writer extends de.sciss.serial.Writable {
def outputValue: Any
}
object GenView {
def apply[T <: Txn[T]](config: SControl.Config, output: Output[T], rendering: Rendering[T])
(implicit tx: T, context: GenContext[T]): GenView[T] =
FScapeOutputGenViewImpl(config, output, rendering)
}
trait GenView[T <: Txn[T]] extends proc.GenView[T] {
def output(implicit tx: T): Output[T]
}
}
trait Output[T <: Txn[T]] extends Gen[T] /* with Publisher[T, Output.Update[T]] */ {
def fscape: FScape[T]
def key : String
}
trait Outputs[T <: Txn[T]] {
def get(key: String)(implicit tx: T): Option[Output[T]]
def keys(implicit tx: T): Set[String]
def iterator(implicit tx: T): Iterator[Output[T]]
/** Adds a new output by the given key and type.
* If an output by that name and type already exists, the old output is returned.
* If the type differs, removes the old output and creates a new one.
*/
def add (key: String, tpe: Obj.Type)(implicit tx: T): Output[T]
def remove(key: String)(implicit tx: T): Boolean
}
def genViewFactory(config: SControl.Config = defaultConfig): GenView.Factory =
FScapeImpl.genViewFactory(config)
@volatile
private[this] var _defaultConfig: SControl.Config = _
private lazy val _lazyDefaultConfig: SControl.Config = {
val b = SControl.Config()
b.useAsync = false
b.terminateActors = false
b
}
/** There is currently a problem with building `Config().build` multiple times,
* in that we create new actor systems and materializers that will not be shut down,
* unless an actual rendering is performed. As a work around, use this single
* instance which will reuse one and the same actor system.
*/
def defaultConfig: SControl.Config = {
if (_defaultConfig == null) _defaultConfig = _lazyDefaultConfig
_defaultConfig
}
def defaultConfig_=(value: SControl.Config): Unit =
_defaultConfig = value
type Config = SControl.Config
val Config = SControl.Config
// ---- GraphObj ----
object GraphObj extends ExprTypeImpl[Graph, GraphObj] {
final val typeId = 100
def tryParse(value: Any): Option[Graph] = value match {
case x: Graph => Some(x)
case _ => None
}
override protected def mkConst[T <: Txn[T]](id: Ident[T], value: A)(implicit tx: T): Const[T] =
new _Const[T](id, value)
override protected def mkVar[T <: Txn[T]](targets: Targets[T], vr: LVar[T, E[T]], connect: Boolean)
(implicit tx: T): Var[T] = {
val res = new _Var[T](targets, vr)
if (connect) res.connect()
res
}
override protected def mkProgram[T <: Txn[T]](targets: Targets[T], program: LVar[T, Ex[A]],
sources: LVar[T, Vec[Event[T, Any]]],
value: LVar[T, A], connect: Boolean)
(implicit tx: T): Program[T] =
throw new UnsupportedOperationException
private final class _Const[T <: Txn[T]](val id: Ident[T], val constValue: A)
extends ConstImpl[T] with GraphObj[T]
private final class _Var[T <: Txn[T]](val targets: Targets[T], val ref: LVar[T, E[T]])
extends VarImpl[T] with GraphObj[T]
final val valueName = "fscape.Graph"
override def defaultValue: A = emptyGraph
/** A format for graphs. */
object valueFormat extends ConstFormat[Graph] {
private final val SER_VERSION = 0x5347
def write(v: Graph, out: DataOutput): Unit = {
out.writeShort(SER_VERSION)
val ref = new Graph.RefMapOut(out)
ref.writeIdentifiedGraph(v)
}
def read(in: DataInput): Graph = {
val cookie = in.readShort()
require(cookie == SER_VERSION, s"Unexpected cookie $cookie")
val ref = new Graph.RefMapIn(in)
ref.readIdentifiedGraph()
}
}
private final val emptyCookie = 4
override protected def readCookie[T <: Txn[T]](in: DataInput, cookie: Byte)
(implicit tx: T): E[T] =
cookie match {
case `emptyCookie` =>
val id = tx.readId(in)
new Predefined(id, cookie)
case _ => super.readCookie(in, cookie)
}
private val emptyGraph = Graph(())
def empty[T <: Txn[T]](implicit tx: T): E[T] = apply(emptyCookie )
private def apply[T <: Txn[T]](cookie: Int)(implicit tx: T): E[T] = {
val id = tx.newId()
new Predefined(id, cookie)
}
private final class Predefined[T <: Txn[T]](val id: Ident[T], cookie: Int)
extends GraphObj[T] with Expr.Const[T, Graph] {
def event(slot: Int): Event[T, Any] = throw new UnsupportedOperationException
def tpe: Obj.Type = GraphObj
def copy[Out <: Txn[Out]]()(implicit tx: T, txOut: Out, context: Copy[T, Out]): Elem[Out] =
new Predefined(txOut.newId(), cookie) // .connect()
def write(out: DataOutput): Unit = {
out.writeInt(tpe.typeId)
out.writeByte(cookie)
id.write(out)
}
def value(implicit tx: T): Graph = constValue
def changed: EventLike[T, MChange[Graph]] = DummyEvent()
def dispose()(implicit tx: T): Unit = ()
def constValue: Graph = cookie match {
case `emptyCookie` => emptyGraph
case _ => sys.error(s"Unknown constant graph $cookie")
}
}
}
trait GraphObj[T <: Txn[T]] extends Expr[T, Graph]
}
/** The `FScape` trait is the basic entity representing a sound process. */
trait FScape[T <: Txn[T]] extends Obj[T] with Publisher[T, FScape.Update[T]] {
/** The variable synth graph function of the process. */
def graph: FScape.GraphObj.Var[T]
def outputs: FScape.Outputs[T]
def run(config: SControl.Config = FScape.defaultConfig, attr: Runner.Attr[T] = Runner.emptyAttr[T])
(implicit tx: T, universe: Universe[T]): FScape.Rendering[T]
}
|
Sciss/FScape-next
|
lucre/shared/src/main/scala/de/sciss/proc/FScape.scala
|
Scala
|
agpl-3.0
| 16,503
|
package satisfaction
package hadoop
package hive
import ms.MetaStore
import ms.HiveTable
import ms.HiveDataOutput
import ms.HiveTablePartitionGroup
import satisfaction._
import scala.io.Source
import _root_.org.apache.hadoop.hive.conf.HiveConf
/**
* A Hive Goal executes a Hive Query
* to produce either a Hive Table,
* or a set of partitions in a HiveTable
*/
object HiveGoal {
def apply(name: String,
queryResource: String,
hiveOutput: HiveDataOutput,
depends: Set[(Witness => Witness, Goal)] = Set.empty )
(implicit track : Track, hiveConf : HiveConf)
: Goal = {
val tblVariables = hiveOutput match {
case tbl : HiveTable =>
tbl.ms.getVariablesForTable(tbl.dbName, tbl.tblName)
case partitionGrp : HiveTablePartitionGroup =>
partitionGrp.variables
}
val tblOutputs = collection.Set(hiveOutput.asInstanceOf[Evidence])
val hiveFactory : SatisfierFactory = Goal.SatisfierFactoryFromFunc( () => {
println(s" HIVE GOAL CREATING NEW HIVE SATISFIER ")
val hs = new HiveSatisfier(queryResource,hiveConf)
println(s" HIVE GOAL CREATING NEW HIVE SATISFIER $hs")
hs
})
println(s" HIVE GOAL SATISFIER FACTIRO = $hiveFactory")
new Goal(name = name,
satisfierFactory = hiveFactory,
variables = tblVariables,
depends,
evidence = tblOutputs)
}
}
|
jeromebanks/satisfaction
|
modules/hive/src/main/scala/satisfaction/hadoop/hive/HiveGoal.scala
|
Scala
|
apache-2.0
| 1,559
|
package edu.gemini.mascot.gui.contour
import edu.gemini.ags.gems.mascot.Strehl
import breeze.linalg._
import breeze.util._
/**
* Utility class to create a contour plot from a Strehl object.
*/
object StrehlContourPlot {
// Create and return a contour plot for the given Strehl object.
def create(s: Strehl, size: Int) : ContourPlot = {
val m = s.strehl_map
val numCols = m.cols
val data = (for (i <- 0 until numCols) yield m(i, ::).t.toDenseVector.toArray).toArray
val cmap = ColorMap.getColormap("YlGn", ContourPlot.N_CONTOURS - 1, true)
ContourPlot.createPlot(size, size, data, cmap)
}
}
|
fnussber/ocs
|
bundle/jsky.app.ot/src/main/scala/edu/gemini/mascot/gui/contour/StrehlContourPlot.scala
|
Scala
|
bsd-3-clause
| 622
|
package org.bfn.ninetynineprobs
import org.scalatest._
class P73Spec extends UnitSpec {
// TODO
}
|
bfontaine/99Scala
|
src/test/scala/P73Spec.scala
|
Scala
|
mit
| 105
|
package org.embulk.input.dynamodb
import com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException
import org.embulk.config.{ConfigException, ConfigSource}
import org.embulk.input.dynamodb.aws.AwsCredentials
import org.embulk.input.dynamodb.testutil.EmbulkTestBase
import org.hamcrest.CoreMatchers._
import org.hamcrest.MatcherAssert.assertThat
import org.junit.{Assert, Test}
class AwsCredentialsTest extends EmbulkTestBase {
private val runAwsCredentialsTest: Boolean = Option(
System.getenv("RUN_AWS_CREDENTIALS_TEST")
) match {
case Some(x) =>
if (x == "false") false
else true
case None => true
}
private lazy val EMBULK_DYNAMODB_TEST_ACCESS_KEY =
getEnvironmentVariableOrShowErrorMessage("EMBULK_DYNAMODB_TEST_ACCESS_KEY")
private lazy val EMBULK_DYNAMODB_TEST_SECRET_KEY =
getEnvironmentVariableOrShowErrorMessage("EMBULK_DYNAMODB_TEST_SECRET_KEY")
private lazy val EMBULK_DYNAMODB_TEST_PROFILE_NAME =
getEnvironmentVariableOrShowErrorMessage(
"EMBULK_DYNAMODB_TEST_PROFILE_NAME"
)
private lazy val EMBULK_DYNAMODB_TEST_ASSUME_ROLE_ROLE_ARN =
getEnvironmentVariableOrShowErrorMessage(
"EMBULK_DYNAMODB_TEST_ASSUME_ROLE_ROLE_ARN"
)
def doTest(inConfig: ConfigSource): Unit = {
val task: PluginTask = inConfig.loadConfig(classOf[PluginTask])
val provider = AwsCredentials(task).createAwsCredentialsProvider
val cred = provider.getCredentials
assertThat(cred.getAWSAccessKeyId, notNullValue())
assertThat(cred.getAWSSecretKey, notNullValue())
}
def defaultInConfig: ConfigSource = {
loadConfigSourceFromYamlString(s"""
|type: dynamodb
|region: us-east-1
|table: hoge
|operation: scan
|columns:
| - {name: key1, type: string}
| - {name: key2, type: long}
| - {name: value1, type: string}
|""".stripMargin)
}
@deprecated(since = "0.3.0")
@Test
def notSetAuthMethod_SetCredentials_deprecated(): Unit =
if (runAwsCredentialsTest) {
val inConfig: ConfigSource = defaultInConfig
.set("access_key", EMBULK_DYNAMODB_TEST_ACCESS_KEY)
.set("secret_key", EMBULK_DYNAMODB_TEST_SECRET_KEY)
doTest(inConfig)
}
@Test
def notSetAuthMethod_SetCredentials(): Unit = if (runAwsCredentialsTest) {
val inConfig: ConfigSource = defaultInConfig
.set("access_key_id", EMBULK_DYNAMODB_TEST_ACCESS_KEY)
.set("secret_access_key", EMBULK_DYNAMODB_TEST_SECRET_KEY)
doTest(inConfig)
}
@deprecated(since = "0.3.0")
@Test
def setAuthMethod_Basic_deprecated(): Unit = if (runAwsCredentialsTest) {
val inConfig: ConfigSource = defaultInConfig
.set("auth_method", "basic")
.set("access_key", EMBULK_DYNAMODB_TEST_ACCESS_KEY)
.set("secret_key", EMBULK_DYNAMODB_TEST_SECRET_KEY)
doTest(inConfig)
}
@Test
def setAuthMethod_Basic(): Unit = if (runAwsCredentialsTest) {
val inConfig: ConfigSource = defaultInConfig
.set("auth_method", "basic")
.set("access_key_id", EMBULK_DYNAMODB_TEST_ACCESS_KEY)
.set("secret_access_key", EMBULK_DYNAMODB_TEST_SECRET_KEY)
doTest(inConfig)
}
@deprecated(since = "0.3.0")
@Test
def throwIfSetAccessKeyAndAccessKeyId(): Unit = if (runAwsCredentialsTest) {
val inConfig: ConfigSource = defaultInConfig
.set("auth_method", "basic")
.set("access_key", EMBULK_DYNAMODB_TEST_ACCESS_KEY)
.set("access_key_id", EMBULK_DYNAMODB_TEST_ACCESS_KEY)
.set("secret_key", EMBULK_DYNAMODB_TEST_SECRET_KEY)
Assert.assertThrows(classOf[ConfigException], () => {
doTest(inConfig)
})
}
@deprecated(since = "0.3.0")
@Test
def throwIfSetSecretKeyAndSecretAccessKeyId(): Unit =
if (runAwsCredentialsTest) {
val inConfig: ConfigSource = defaultInConfig
.set("auth_method", "basic")
.set("access_key", EMBULK_DYNAMODB_TEST_ACCESS_KEY)
.set("secret_key", EMBULK_DYNAMODB_TEST_SECRET_KEY)
.set("secret_access_key", EMBULK_DYNAMODB_TEST_SECRET_KEY)
Assert.assertThrows(classOf[ConfigException], () => {
doTest(inConfig)
})
}
@Test
def setAuthMethod_Basic_NotSet(): Unit = {
val inConfig: ConfigSource = defaultInConfig
.set("auth_method", "basic")
Assert.assertThrows(classOf[ConfigException], () => {
doTest(inConfig)
})
}
@Test
def setAuthMethod_Env(): Unit = if (runAwsCredentialsTest) {
// NOTE: Requires to set the env vars like 'AWS_ACCESS_KEY_ID' and so on when testing.
val inConfig: ConfigSource = defaultInConfig
.set("auth_method", "env")
doTest(inConfig)
}
@Test
def setAuthMethod_Profile(): Unit = if (runAwsCredentialsTest) {
// NOTE: Requires to set credentials to '~/.aws' when testing.
val inConfig: ConfigSource = defaultInConfig
.set("auth_method", "profile")
.set("profile_name", EMBULK_DYNAMODB_TEST_PROFILE_NAME)
doTest(inConfig)
}
@Test
def setAuthMethod_Profile_NotExistProfileName(): Unit = {
val inConfig: ConfigSource = defaultInConfig
.set("auth_method", "profile")
.set("profile_name", "DO_NOT_EXIST")
Assert.assertThrows(classOf[IllegalArgumentException], () => {
doTest(inConfig)
})
}
@Test
def setAuthMethod_assume_role(): Unit = if (runAwsCredentialsTest) {
val inConfig: ConfigSource = defaultInConfig
.set("auth_method", "assume_role")
.set("role_arn", EMBULK_DYNAMODB_TEST_ASSUME_ROLE_ROLE_ARN)
.set("role_session_name", "dummy")
doTest(inConfig)
}
@Test
def setAuthMethod_assume_role_NotExistRoleArn(): Unit =
if (runAwsCredentialsTest) {
val inConfig: ConfigSource = defaultInConfig
.set("auth_method", "assume_role")
.set("role_arn", "DO_NOT_EXIST")
.set("role_session_name", "dummy")
Assert.assertThrows(classOf[AWSSecurityTokenServiceException], () => {
doTest(inConfig)
})
}
}
|
lulichn/embulk-input-dynamodb
|
src/test/scala/org/embulk/input/dynamodb/AwsCredentialsTest.scala
|
Scala
|
mit
| 6,011
|
package com.gu.mobile.content.notifications.metrics
import com.amazonaws.services.cloudwatch.AmazonCloudWatch
import com.amazonaws.services.cloudwatch.model.PutMetricDataRequest
import org.mockito.Mockito._
import org.mockito.{ ArgumentCaptor, Matchers }
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{ MustMatchers, WordSpecLike }
import org.specs2.specification.Scope
class MetricsActorSpec extends WordSpecLike with MockitoSugar with MustMatchers {
"The Metric Actor Logic" should {
"not call cloudwatch if there is not data" in new MetricActorScope {
actorLogic.aggregatePoint(Nil)
verify(mockCloudWatch, times(0)).putMetricData(Matchers.any[PutMetricDataRequest])
}
"call cloudwatch once if there's one namespace with less than 20 points" in new MetricActorScope {
val metrics = List(
new MetricDataPoint("test", "m1", 0d),
new MetricDataPoint("test", "m1", 1d),
new MetricDataPoint("test", "m1", 2d)
)
actorLogic.aggregatePoint(metrics)
val requestCaptor = ArgumentCaptor.forClass(classOf[PutMetricDataRequest])
verify(mockCloudWatch, times(1)).putMetricData(requestCaptor.capture())
val metricData = requestCaptor.getValue.getMetricData
metricData must have size 1
metricData.get(0).getStatisticValues.getSampleCount mustEqual 3d
}
"call cloudwatch once but not aggregate if two metrics are recieved " in new MetricActorScope {
val metrics = List(
new MetricDataPoint("test", "m1", 0d),
new MetricDataPoint("test", "m1", 1d),
new MetricDataPoint("test", "m1", 2d),
new MetricDataPoint("test", "m2", 5d),
new MetricDataPoint("test", "m2", 6d)
)
actorLogic.aggregatePoint(metrics)
val requestCaptor = ArgumentCaptor.forClass(classOf[PutMetricDataRequest])
verify(mockCloudWatch, times(1)).putMetricData(requestCaptor.capture())
val metricData = requestCaptor.getValue.getMetricData
metricData must have size 2
metricData.get(0).getStatisticValues.getSampleCount mustEqual 3d
metricData.get(1).getStatisticValues.getSampleCount mustEqual 2d
}
"call cloudwatch once if there's more than one metric" in new MetricActorScope {
val metrics = List(
MetricDataPoint("test", "m1", 0d),
MetricDataPoint("test", "m2", 1d),
MetricDataPoint("test", "m3", 2d)
)
actorLogic.aggregatePoint(metrics)
verify(mockCloudWatch, times(1)).putMetricData(Matchers.any[PutMetricDataRequest])
}
"call cloudwatch as many times as we have namespaces" in new MetricActorScope {
val metrics = List(
MetricDataPoint("namespace1", "m1", 0d),
MetricDataPoint("namespace2", "m2", 1d),
MetricDataPoint("namespace2", "m1", 2d)
)
actorLogic.aggregatePoint(metrics)
verify(mockCloudWatch, times(2)).putMetricData(Matchers.any[PutMetricDataRequest])
}
"aggregate points into a MetricDatum" in new MetricActorScope {
val metrics = List(
MetricDataPoint("namespace1", "m1", 0d),
MetricDataPoint("namespace1", "m1", 1d),
MetricDataPoint("namespace1", "m1", 2d)
)
actorLogic.aggregatePoint(metrics)
val requestCaptor = ArgumentCaptor.forClass(classOf[PutMetricDataRequest])
verify(mockCloudWatch, times(1)).putMetricData(requestCaptor.capture())
val metricDataList = requestCaptor.getValue.getMetricData
metricDataList must have size 1
val metricData = metricDataList.get(0)
metricData.getMetricName mustEqual "m1"
val statisticValues = metricData.getStatisticValues
statisticValues.getSum mustEqual 3d
statisticValues.getMinimum mustEqual 0d
statisticValues.getMaximum mustEqual 2d
statisticValues.getSampleCount mustEqual 3d
}
"aggregate points into batches if there are more than 20 metrics per namespace" in new MetricActorScope {
val metrics = (1 to 21).toList.map { index =>
MetricDataPoint("namespace1", s"m$index", index)
}
actorLogic.aggregatePoint(metrics)
val requestCaptor = ArgumentCaptor.forClass(classOf[PutMetricDataRequest])
verify(mockCloudWatch, times(2)).putMetricData(Matchers.any[PutMetricDataRequest])
}
"not aggregate points into multiple batches if there are 20 metrics or less per namespace" in new MetricActorScope {
val metrics = (1 to 20).toList.map { index =>
MetricDataPoint("namespace1", s"m$index", index)
}
actorLogic.aggregatePoint(metrics)
val requestCaptor = ArgumentCaptor.forClass(classOf[PutMetricDataRequest])
verify(mockCloudWatch, times(1)).putMetricData(Matchers.any[PutMetricDataRequest])
}
}
trait MetricActorScope extends Scope {
val mockCloudWatch = mock[AmazonCloudWatch]
val actorLogic = new MetricActorLogic {
override def cloudWatch: AmazonCloudWatch = mockCloudWatch
override val stage: String = "CODE"
}
}
}
|
guardian/mobile-notifications-content
|
src/test/scala/com/gu/mobile/content/notifications/metrics/MetricsActorSpec.scala
|
Scala
|
apache-2.0
| 5,010
|
/**
* Copyright 2017 Interel
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package core3.http.controllers
import core3.security.UserTokenBase
import core3.utils.ActionScope
import play.api.mvc._
import scala.concurrent.Future
/**
* Play controller trait for implementing non-user services.
*/
trait ServiceControllerBase[T <: UserTokenBase] extends InjectedController {
/**
* User-aware action generator, for implementing actions that require both (non-interactive) client and user auth.
*
* @param requiredScope the required client scope
* @param okHandler the handler to be executed if access to the resource is allowed
* @param unauthorizedHandler the handler to be executed if access to the resource is not allowed
* @param forbiddenHandler the handler to be executed if the client does not have the required scope
* @return the generated action
*/
def UserAwareAction(
requiredScope: ActionScope,
okHandler: (Request[AnyContent], T) => Future[Result],
unauthorizedHandler: Option[(Request[AnyContent]) => Future[Result]] = None,
forbiddenHandler: Option[(Request[AnyContent]) => Future[Result]] = None
): Action[AnyContent]
/**
* Client-aware action generator, for implementing actions that require client-only authentication and authorization.
*
* @param requiredScope the required client scope
* @param okHandler the handler to be executed if access to the resource is allowed
* @param unauthorizedHandler the handler to be executed if access to the resource is not allowed
* @param forbiddenHandler the handler to be executed if the client does not have the required scope
* @return the generated action
*/
def ClientAwareAction(
requiredScope: ActionScope,
okHandler: (Request[AnyContent], String) => Future[Result],
unauthorizedHandler: Option[(Request[AnyContent]) => Future[Result]] = None,
forbiddenHandler: Option[(Request[AnyContent]) => Future[Result]] = None
): Action[AnyContent]
/**
* Public action generator, for implementing routes/services that do NOT require authentication and authorization.
*
* @param okHandler the handler to be executed
* @return the generated action
*/
def PublicAction(okHandler: (Request[AnyContent], Option[String]) => Future[Result]): Action[AnyContent]
}
|
Interel-Group/core3
|
src/main/scala/core3/http/controllers/ServiceControllerBase.scala
|
Scala
|
apache-2.0
| 2,911
|
/**
* Copyright 2012 Jeffrey Cameron
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.pickles.crawler
import org.scalatest.FunSpec
import org.scalatest.matchers.ShouldMatchers
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
/**
* @author jeffrey
*
*/
@RunWith(classOf[JUnitRunner])
class RelevantFileDeterminerSpec extends FunSpec with ShouldMatchers {
describe("A RelevantFileDeterminer") {
it("should determine file with an extension of .feature are relevant") {
val builder = FileSystemBuilder.build()
builder.addFolder("ram://features/")
val file = builder.addFile("ram://features/my.feature")
val relevantFileDeterminer = new RelevantFileDeterminer()
relevantFileDeterminer.isRelevant(file) should be(true)
}
it("should determine file with an extension of .markdown are relevant") {
val builder = FileSystemBuilder.build()
val file = builder.addFile("ram://features/my.markdown")
val relevantFileDeterminer = new RelevantFileDeterminer()
relevantFileDeterminer.isRelevant(file) should be(true)
}
it("should determine file with an extension of .mdown are relevant") {
val builder = FileSystemBuilder.build()
val file = builder.addFile("ram://features/my.mdown")
val relevantFileDeterminer = new RelevantFileDeterminer()
relevantFileDeterminer.isRelevant(file) should be(true)
}
it("should determine file with an extension of .mkdn are relevant") {
val builder = FileSystemBuilder.build()
val file = builder.addFile("ram://features/my.mkdn")
val relevantFileDeterminer = new RelevantFileDeterminer()
relevantFileDeterminer.isRelevant(file) should be(true)
}
it("should determine file with an extension of .md are relevant") {
val builder = FileSystemBuilder.build()
val file = builder.addFile("ram://features/my.md")
val relevantFileDeterminer = new RelevantFileDeterminer()
relevantFileDeterminer.isRelevant(file) should be(true)
}
it("should determine file with an extension of .mdwn are relevant") {
val builder = FileSystemBuilder.build()
val file = builder.addFile("ram://features/my.mdwn")
val relevantFileDeterminer = new RelevantFileDeterminer()
relevantFileDeterminer.isRelevant(file) should be(true)
}
it("should determine file with an extension of .mdtxt are relevant") {
val builder = FileSystemBuilder.build()
val file = builder.addFile("ram://features/my.mdtxt")
val relevantFileDeterminer = new RelevantFileDeterminer()
relevantFileDeterminer.isRelevant(file) should be(true)
}
it("should determine file with an extension of .mdtext are relevant") {
val builder = FileSystemBuilder.build()
val file = builder.addFile("ram://features/my.mdtext")
val relevantFileDeterminer = new RelevantFileDeterminer()
relevantFileDeterminer.isRelevant(file) should be(true)
}
it("should determine file with an extension of .text are relevant") {
val builder = FileSystemBuilder.build()
val file = builder.addFile("ram://features/my.text")
val relevantFileDeterminer = new RelevantFileDeterminer()
relevantFileDeterminer.isRelevant(file) should be(true)
}
it("should determine file with an extension of .txt are relevant") {
val builder = FileSystemBuilder.build()
val file = builder.addFile("ram://features/my.txt")
val relevantFileDeterminer = new RelevantFileDeterminer()
relevantFileDeterminer.isRelevant(file) should be(true)
}
it("should determine folders are not relevant") {
val builder = FileSystemBuilder.build()
val folder = builder.addFolder("ram://features/")
val relevantFileDeterminer = new RelevantFileDeterminer()
relevantFileDeterminer.isRelevant(folder) should be(false)
}
}
}
|
picklesdoc/pickles-jvm
|
core/src/test/scala/org/pickles/crawler/RelevantFileDeterminerSpec.scala
|
Scala
|
apache-2.0
| 4,432
|
/**
* Copyright (c) 2014-2016 Tim Bruijnzeels
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of this software, nor the names of its contributors, nor
* the names of the contributors' employers may be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package nl.bruijnzeels.tim.rpki.app.web.controllers
import nl.bruijnzeels.tim.rpki.app.main.Dsl
import nl.bruijnzeels.tim.rpki.app.web.views.HomeView
import nl.bruijnzeels.tim.rpki.ca.{CertificateAuthority, CertificateAuthorityCommandDispatcher}
import nl.bruijnzeels.tim.rpki.publication.messages.ReferenceHash
import nl.bruijnzeels.tim.rpki.publication.server.PublicationServerCommandDispatcher
import org.scalatra.{FlashMapSupport, ScalatraBase}
trait ApplicationController extends ScalatraBase with FlashMapSupport {
def currentTa = CertificateAuthorityCommandDispatcher.load(Dsl.TrustAnchorId).get
def currentPublicationServer = PublicationServerCommandDispatcher.load(Dsl.PublicationServerId).get
val rrdpFileStore = Dsl.current.rrdpFileStore
val TrustAnchorCertificatePath = "ta/ta.cer"
val RrdpNotitifyPath = "notify/notify.xml"
val RrdpFilesPath = "rrpd/"
get("/") {
new HomeView(currentTa, currentPublicationServer)
}
get("/ta/ta.cer") {
contentType = "application/octet-stream"
response.addHeader("Pragma", "public")
response.addHeader("Cache-Control", "no-cache")
response.getOutputStream().write(currentTa.resourceClasses.get(CertificateAuthority.DefaultResourceClassName).get.currentSigner.signingMaterial.currentCertificate.getEncoded())
}
get("/notify/notify.xml") {
contentType = "application/xml"
response.addHeader("Pragma", "public")
response.addHeader("Cache-Control", "no-cache")
response.getWriter().write(currentPublicationServer.notificationFile.toXml.toString)
}
get("/rrdp/:fileName") {
val fileName = (params("fileName"))
// Should end with .xml
val hash = ReferenceHash(fileName.stripSuffix(".xml"))
rrdpFileStore.retrieve(hash) match {
case Some(bytes) => {
contentType = "application/xml"
response.addHeader("Pragma", "public")
response.addHeader("Cache-Control", "no-cache")
response.getWriter().write(new String(bytes, "UTF-8"))
}
case None => halt(404)
}
}
}
|
timbru/rpki-ca
|
src/main/scala/nl/bruijnzeels/tim/rpki/app/web/controllers/ApplicationController.scala
|
Scala
|
bsd-3-clause
| 3,682
|
package slick.collection.heterogeneous
import scala.language.higherKinds
import scala.language.experimental.macros
import scala.annotation.unchecked.{uncheckedVariance => uv}
import scala.reflect.macros.whitebox.Context
import slick.lifted.{MappedScalaProductShape, Shape, ShapeLevel}
import scala.reflect.ClassTag
/** A heterogenous list where each element has its own type. */
sealed abstract class HList extends Product {
/** The type of this HList object */
type Self <: HList
/** The type of the first element */
type Head
/** The type of the tail of this HList */
type Tail <: HList
/** The type of a Fold operation on this HList */
type Fold[U, F[_ <: HList, _ <: U] <: U, Z <: U] <: U
// Some helper type projections to avoid type lambdas
protected[this] type TailOf[T <: HList] = T#Tail
protected[this] type HeadOf[T <: HList] = T#Head
protected[this] type PrependHead[X <: HList, Z <: HList] = Z # :: [X#Head]
/** Ignore X and increment Z by one (for counting with Fold) */
protected[this] type IncrementForFold[X, Z <: Nat] = Succ[Z]
/** Drop the first N elements from this HList and return the resulting type */
type Drop[N <: Nat] = N#Fold[HList, TailOf, Self]
/** Get the type of the Nth element of this HList */
type Apply[N <: Nat] = HeadOf[Drop[N]] // should be Drop[N]#Head (work-around for SI-5294)
/** Get the Nat type of the length of this HList */
type Length = Fold[Nat, IncrementForFold, Nat._0]
/** The type of prepending an element of type E to this HList */
type :: [E] = HCons[E, Self]
/** The type of concatenating another HList with this HList */
type ::: [L <: HList] = L#Fold[HList, PrependHead, Self]
/** Get the first element, or throw a NoSuchElementException if this HList is empty. */
def head: Head
/** Get the tail of the list, or throw a NoSuchElementException if this HList is empty. */
def tail: Tail
/** Return this HList typed as `Self`/ */
def self: Self
/** Fold the elements of this HList. */
def fold[U, F[_ <: HList, _ <: U] <: U, Z <: U](f: TypedFunction2[HList, U, U, F], z: Z): Fold[U, F, Z]
/** Check if this HList is non-empty. */
def nonEmpty: Boolean
/** Convert this HList to a `List[Any]`. */
def toList: List[Any]
/** Check if this list is empty. */
final def isEmpty = !nonEmpty
/** Get the length of this list as a `Nat`. */
@inline final def length: Length = Nat._unsafe[Length](productArity)
/** Get the length of this list as an `Int`. */
final def productArity: Int = {
var i = 0
var h = this
while(h.nonEmpty) {
i += 1
h = h.tail
}
i
}
/** Prepend an element to this HList, returning a new HList. */
@inline final def :: [@specialized E](elem: E): :: [E] = new HCons[E, Self](elem, this.asInstanceOf[Self])
/** Concatenate another HList to this HList, returning a new HList. */
final def ::: [L <: HList](l: L): ::: [L] = l.fold[HList, PrependHead, Self](
new TypedFunction2[HList, HList, HList, PrependHead] {
def apply[P1 <: HList, P2 <: HList](p1: P1, p2: P2) = p1.head :: p2
}, self)
/** Drop the first `n` elements from this HList. */
@inline final def drop [N <: Nat](n: N): Drop[N] = drop(n.value).asInstanceOf[Drop[N]]
/** Drop the first `n` elements from this HList. */
final def drop(i: Int): HList = {
var h = this
var ii = i
while(ii > 0) {
ii -= 1
h = h.tail
}
h
}
final def productElement(i: Int): Any = drop(i).head
@inline final def _unsafeApply [N <: Nat](i: Int): Apply[N] = productElement(i).asInstanceOf[Apply[N]]
/** Return the nth element from this HList, using the correct return type. */
@inline final def apply [N <: Nat](n: N): Apply[N] = _unsafeApply[N](n.value)
/** Return the nth element from this HList, using the correct return type if n is a literal, otherwise Any. */
final def apply(n: Int): Any = macro HListMacros.applyImpl
/** Evaluate a function for each element of this HList. */
final def foreach(f: Any => Unit) {
var h = this
while(h.nonEmpty) {
f(h.head)
h = h.tail
}
}
override final def toString = {
val b = new StringBuffer
foreach { v =>
v match {
case h: HList =>
b.append("(").append(v).append(")")
case _ =>
b.append(v)
}
b.append(" :: ") }
b.append("HNil").toString
}
override final lazy val hashCode: Int = toList.hashCode
override final def equals(that: Any) = that match {
case that: HList => toList == that.toList
case _ => false
}
final def canEqual(that: Any) = that.isInstanceOf[HList]
}
final object HList {
import syntax._
final class HListShape[Level <: ShapeLevel, M <: HList, U <: HList : ClassTag, P <: HList](val shapes: Seq[Shape[_, _, _, _]]) extends MappedScalaProductShape[Level, HList, M, U, P] {
def buildValue(elems: IndexedSeq[Any]) = elems.foldRight(HNil: HList)(_ :: _)
def copy(shapes: Seq[Shape[_ <: ShapeLevel, _, _, _]]) = new HListShape(shapes)
}
implicit def hnilShape[Level <: ShapeLevel] = new HListShape[Level, HNil.type, HNil.type, HNil.type](Nil)
implicit def hconsShape[Level <: ShapeLevel, M1, M2 <: HList, U1, U2 <: HList, P1, P2 <: HList](implicit s1: Shape[_ <: Level, M1, U1, P1], s2: HListShape[_ <: Level, M2, U2, P2]) =
new HListShape[Level, M1 :: M2, U1 :: U2, P1 :: P2](s1 +: s2.shapes)
}
// Separate object for macro impl to avoid dependency of companion class on scala.reflect, see https://github.com/xeno-by/sbt-example-paradise210/issues/1#issuecomment-21021396
final object HListMacros{
def applyImpl(ctx: Context { type PrefixType = HList })(n: ctx.Expr[Int]): ctx.Expr[Any] = {
import ctx.universe._
val _Succ = typeOf[Succ[_]].typeSymbol
val _Zero = reify(Zero).tree
n.tree match {
case t @ Literal(Constant(v: Int)) =>
val tt = (1 to v).foldLeft[Tree](SingletonTypeTree(_Zero)) { case (z, _) =>
AppliedTypeTree(Ident(_Succ), List(z))
}
ctx.Expr(
Apply(
TypeApply(
Select(ctx.prefix.tree, TermName("_unsafeApply")),
List(tt)
),
List(t)
)
)
case _ => reify(ctx.prefix.splice.productElement(n.splice))
}
}
}
/** A cons cell of an `HList`, containing an element type and the element */
final class HCons[@specialized +H, +T <: HList](val head: H, val tail: T) extends HList {
type Self = HCons[H @uv, T @uv]
type Head = H @uv
type Tail = T @uv
type Fold[U, F[_ <: HList, _ <: U] <: U, Z <: U] = F[Self @uv, (T @uv)#Fold[U, F, Z]]
def self = this
def fold[U, F[_ <: HList, _ <: U] <: U, Z <: U](f: TypedFunction2[HList, U, U, F], z: Z): Fold[U, F, Z] @uv =
f.apply[Self, T#Fold[U, F, Z]](self, tail.fold[U, F, Z](f, z))
def toList: List[Any] = head :: tail.toList
def nonEmpty = true
}
object HCons {
def unapply[H, T <: HList](l: HCons[H, T]) = Some((l.head, l.tail))
}
/** The empty `HList` */
final object HNil extends HList {
type Self = HNil.type
type Head = Nothing
type Tail = Nothing
type Fold[U, F[_ <: HList, _ <: U] <: U, Z <: U] = Z
def self = HNil
def head = throw new NoSuchElementException("HNil.head")
def tail = throw new NoSuchElementException("HNil.tail")
def fold[U, F[_ <: HList, _ <: U] <: U, Z <: U](f: TypedFunction2[HList, U, U, F], z: Z) = z
def toList = Nil
def nonEmpty = false
}
|
AtkinsChang/slick
|
slick/src/main/scala/slick/collection/heterogeneous/HList.scala
|
Scala
|
bsd-2-clause
| 7,432
|
import sbt._
object Dependencies {
object Versions {
val scala = "2.11.8"
val atlas = "1.5.0-rc.6"
val iep = "0.4.2"
}
import Versions._
val atlasStandalone = "com.netflix.atlas_v1" % "atlas-standalone" % atlas
val iepModAdmin = "com.netflix.iep" % "iep-module-jmxport" % iep
val iepModArchaius2 = "com.netflix.iep" % "iep-module-jmxport" % iep
val iepModAwsMetrics = "com.netflix.iep" % "iep-module-jmxport" % iep
val iepModEureka = "com.netflix.iep" % "iep-module-jmxport" % iep
val iepModJmxPort = "com.netflix.iep" % "iep-module-jmxport" % iep
}
|
brharrington/atlas-pkg-example
|
project/Dependencies.scala
|
Scala
|
apache-2.0
| 642
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.hint
import org.apache.flink.table.api.config.TableConfigOptions
import org.apache.flink.table.api.{DataTypes, TableSchema, ValidationException}
import org.apache.flink.table.catalog.{CatalogViewImpl, ObjectPath}
import org.apache.flink.table.planner.JHashMap
import org.apache.flink.table.planner.plan.hint.OptionsHintTest.{IS_BOUNDED, Param}
import org.apache.flink.table.planner.plan.nodes.calcite.LogicalLegacySink
import org.apache.flink.table.planner.utils.{OptionsTableSink, TableTestBase, TableTestUtil, TestingStatementSet}
import org.hamcrest.Matchers._
import org.junit.Assert.{assertEquals, assertThat}
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.runners.Parameterized.Parameters
import org.junit.{Before, Test}
import scala.collection.JavaConversions._
@RunWith(classOf[Parameterized])
class OptionsHintTest(param: Param)
extends TableTestBase {
private val util = param.utilSupplier.apply(this)
private val is_bounded = param.isBounded
@Before
def before(): Unit = {
util.tableEnv.getConfig.getConfiguration.setBoolean(
TableConfigOptions.TABLE_DYNAMIC_TABLE_OPTIONS_ENABLED,
true)
util.addTable(
s"""
|create table t1(
| a int,
| b varchar,
| c as a + 1
|) with (
| 'connector' = 'OPTIONS',
| '$IS_BOUNDED' = '$is_bounded',
| 'k1' = 'v1',
| 'k2' = 'v2'
|)
""".stripMargin)
util.addTable(
s"""
|create table t2(
| d int,
| e varchar,
| f bigint
|) with (
| 'connector' = 'OPTIONS',
| '$IS_BOUNDED' = '$is_bounded',
| 'k3' = 'v3',
| 'k4' = 'v4'
|)
""".stripMargin)
}
@Test
def testOptionsWithGlobalConfDisabled(): Unit = {
util.tableEnv.getConfig.getConfiguration.setBoolean(
TableConfigOptions.TABLE_DYNAMIC_TABLE_OPTIONS_ENABLED,
false)
expectedException.expect(isA(classOf[ValidationException]))
expectedException.expectMessage(s"OPTIONS hint is allowed only when "
+ s"${TableConfigOptions.TABLE_DYNAMIC_TABLE_OPTIONS_ENABLED.key} is set to true")
util.verifyPlan("select * from t1/*+ OPTIONS(connector='COLLECTION', k2='#v2') */")
}
@Test
def testInsertWithDynamicOptions(): Unit = {
val sql =
s"""
|insert into t1 /*+ OPTIONS(k1='#v1', k5='v5') */
|select d, e from t2
|""".stripMargin
val stmtSet = util.tableEnv.createStatementSet()
stmtSet.addInsertSql(sql)
val testStmtSet = stmtSet.asInstanceOf[TestingStatementSet]
val relNodes = testStmtSet.getOperations.map(util.getPlanner.translateToRel)
assertThat(relNodes.length, is(1))
assert(relNodes.head.isInstanceOf[LogicalLegacySink])
val sink = relNodes.head.asInstanceOf[LogicalLegacySink]
assertEquals("{k1=#v1, k2=v2, k5=v5}",
sink.sink.asInstanceOf[OptionsTableSink].props.toString)
}
@Test
def testAppendOptions(): Unit = {
util.verifyPlan("select * from t1/*+ OPTIONS(k5='v5', 'a.b.c'='fakeVal') */")
}
@Test
def testOverrideOptions(): Unit = {
util.verifyPlan("select * from t1/*+ OPTIONS(k1='#v1', k2='#v2') */")
}
@Test
def testJoinWithAppendedOptions(): Unit = {
val sql =
s"""
|select * from
|t1 /*+ OPTIONS(k5='v5', 'a.b.c'='fakeVal') */
|join
|t2 /*+ OPTIONS(k6='v6', 'd.e.f'='fakeVal') */
|on t1.a = t2.d
|""".stripMargin
util.verifyPlan(sql)
}
@Test
def testJoinWithOverriddenOptions(): Unit = {
val sql =
s"""
|select * from
|t1 /*+ OPTIONS(k1='#v1', k2='#v2') */
|join
|t2 /*+ OPTIONS(k3='#v3', k4='#v4') */
|on t1.a = t2.d
|""".stripMargin
util.verifyPlan(sql)
}
@Test
def testOptionsHintOnTableApiView(): Unit = {
val view1 = util.tableEnv.sqlQuery("select * from t1 join t2 on t1.a = t2.d")
util.tableEnv.createTemporaryView("view1", view1)
// The table hints on view expect to be ignored.
val sql = "select * from view1/*+ OPTIONS(k1='#v1', k2='#v2', k3='#v3', k4='#v4') */"
util.verifyPlan(sql)
}
@Test
def testOptionsHintOnSQLView(): Unit = {
// Equivalent SQL:
// select * from t1 join t2 on t1.a = t2.d
val props = new JHashMap[String, String]
props.put("k1", "v1")
props.put("k2", "v2")
props.put("k3", "v3")
props.put("k4", "v4")
val view1 = new CatalogViewImpl(
"select * from t1 join t2 on t1.a = t2.d",
"select * from t1 join t2 on t1.a = t2.d",
TableSchema.builder()
.field("a", DataTypes.INT())
.field("b", DataTypes.STRING())
.field("c", DataTypes.INT())
.field("d", DataTypes.INT())
.field("e", DataTypes.STRING())
.field("f", DataTypes.BIGINT())
.build(),
props,
"a view table"
)
val catalog = util.tableEnv.getCatalog(util.tableEnv.getCurrentCatalog).get()
catalog.createTable(
new ObjectPath(util.tableEnv.getCurrentDatabase, "view1"),
view1,
false)
// The table hints on view expect to be ignored.
val sql = "select * from view1/*+ OPTIONS(k1='#v1', k2='#v2', k3='#v3', k4='#v4') */"
util.verifyPlan(sql)
}
}
object OptionsHintTest {
val IS_BOUNDED = "is-bounded"
case class Param(utilSupplier: TableTestBase => TableTestUtil, isBounded: Boolean) {
override def toString: String = s"$IS_BOUNDED=$isBounded"
}
@Parameters(name = "{index}: {0}")
def parameters(): Array[Param] = {
Array(
Param(_.batchTestUtil(), isBounded = true),
Param(_.streamTestUtil(), isBounded = false))
}
}
|
hequn8128/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/hint/OptionsHintTest.scala
|
Scala
|
apache-2.0
| 6,573
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.io
import scala.util.Random
import org.apache.spark.SparkFunSuite
class ByteArrayChunkOutputStreamSuite extends SparkFunSuite {
test("empty output") {//空的输出
val o = new ByteArrayChunkOutputStream(1024)
assert(o.toArrays.length === 0)
}
test("write a single byte") {//写单字节
val o = new ByteArrayChunkOutputStream(1024)
o.write(10)
assert(o.toArrays.length === 1)
assert(o.toArrays.head.toSeq === Seq(10.toByte))
}
test("write a single near boundary") {//写一个单近边界
val o = new ByteArrayChunkOutputStream(10)
o.write(new Array[Byte](9))
o.write(99)
assert(o.toArrays.length === 1)
assert(o.toArrays.head(9) === 99.toByte)
}
test("write a single at boundary") {//写一个单一的边界
val o = new ByteArrayChunkOutputStream(10)
o.write(new Array[Byte](10))
o.write(99)
assert(o.toArrays.length === 2)
assert(o.toArrays(1).length === 1)
assert(o.toArrays(1)(0) === 99.toByte)
}
test("single chunk output") {//单块输出
val ref = new Array[Byte](8)
Random.nextBytes(ref)
val o = new ByteArrayChunkOutputStream(10)
o.write(ref)
val arrays = o.toArrays
assert(arrays.length === 1)
assert(arrays.head.length === ref.length)
assert(arrays.head.toSeq === ref.toSeq)
}
test("single chunk output at boundary size") {//边界大小的单块输出
val ref = new Array[Byte](10)
Random.nextBytes(ref)
val o = new ByteArrayChunkOutputStream(10)
o.write(ref)
val arrays = o.toArrays
assert(arrays.length === 1)
assert(arrays.head.length === ref.length)
assert(arrays.head.toSeq === ref.toSeq)
}
test("multiple chunk output") {//多块输出
val ref = new Array[Byte](26)
Random.nextBytes(ref)
val o = new ByteArrayChunkOutputStream(10)
o.write(ref)
val arrays = o.toArrays
assert(arrays.length === 3)
assert(arrays(0).length === 10)
assert(arrays(1).length === 10)
assert(arrays(2).length === 6)
assert(arrays(0).toSeq === ref.slice(0, 10))
assert(arrays(1).toSeq === ref.slice(10, 20))
assert(arrays(2).toSeq === ref.slice(20, 26))
}
test("multiple chunk output at boundary size") {//边界大小的多块输出
val ref = new Array[Byte](30)
Random.nextBytes(ref)
val o = new ByteArrayChunkOutputStream(10)
o.write(ref)
val arrays = o.toArrays
assert(arrays.length === 3)
assert(arrays(0).length === 10)
assert(arrays(1).length === 10)
assert(arrays(2).length === 10)
assert(arrays(0).toSeq === ref.slice(0, 10))
assert(arrays(1).toSeq === ref.slice(10, 20))
assert(arrays(2).toSeq === ref.slice(20, 30))
}
}
|
tophua/spark1.52
|
core/src/test/scala/org/apache/spark/util/io/ByteArrayChunkOutputStreamSuite.scala
|
Scala
|
apache-2.0
| 3,531
|
/*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.persistence
import com.google.inject.AbstractModule
import com.google.inject.multibindings.Multibinder
import com.netflix.iep.service.Service
class AppModule extends AbstractModule {
override def configure(): Unit = {
val serviceBinder = Multibinder.newSetBinder(binder(), classOf[Service])
serviceBinder.addBinding().to(classOf[S3CopyService])
serviceBinder.addBinding().to(classOf[LocalFilePersistService])
}
}
|
Netflix-Skunkworks/iep-apps
|
atlas-persistence/src/main/scala/com/netflix/atlas/persistence/AppModule.scala
|
Scala
|
apache-2.0
| 1,059
|
package sand.gcs.util
import com.typesafe.config.ConfigFactory
import java.io.File
/** Singleton object containing the parsed version of the configuration files. */
object Config {
private val defaultConfig = ConfigFactory.parseURL(getClass.getResource("/reference.conf"))
/** Parsed configuration file.
*
* Parsed configuration file. Defaults can be overridden by providing an "application.conf"
* file in the root project directory.
*
* Configuration file paramters can be accessed via method calls such as getInt and getDouble.
* The parameter to pass in is a string denoting the path - e.g. gcs.logging-level.
*/
val config =
ConfigFactory.parseFile(new File("application.conf")).resolve().withFallback(defaultConfig)
}
|
snowland/scala-gcs
|
src/main/scala/sand/gcs/util/Config.scala
|
Scala
|
bsd-3-clause
| 765
|
package io.github.interestinglab.waterdrop.filter
import io.github.interestinglab.waterdrop.config.{Config, ConfigException, ConfigFactory}
import io.github.interestinglab.waterdrop.apis.BaseFilter
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Dataset, Row, SparkSession}
import org.apache.spark.sql.functions.col
import scala.collection.JavaConversions._
import scala.reflect.runtime.universe
import scala.util.{Failure, Success, Try}
class Table extends BaseFilter {
var config: Config = ConfigFactory.empty()
/**
* Set Config.
* */
override def setConfig(config: Config): Unit = {
this.config = config
}
/**
* Get Config.
* */
override def getConfig(): Config = {
this.config
}
override def checkConfig(): (Boolean, String) = {
Table.options.foldRight((true, ""))((option, result) => {
val (lastOptionPassed, msg) = result
lastOptionPassed match {
case true => {
option.get("name") match {
case Some(value) => {
val optName = value.asInstanceOf[String]
val required = option.getOrElse("required", false)
if (!config.hasPath(optName) && required == true) {
(false, "[" + optName + "] is requred")
} else if (config.hasPath(optName)) {
option.get("type") match {
case Some(v) => {
val optType = v.asInstanceOf[String]
optType match {
case "string" => {
Try(config.getString(optName)) match {
case Success(_) => (true, "")
case Failure(_: ConfigException.WrongType) =>
(false, "wrong type of [" + optName + "], expected: " + optType)
case Failure(ex) => (false, ex.getMessage)
}
}
case "string-list" => {
Try(config.getStringList(optName)) match {
case Success(_) => (true, "")
case Failure(_: ConfigException.WrongType) =>
(false, "wrong type of [" + optName + "], expected: " + optType)
case Failure(ex) => (false, ex.getMessage)
}
}
case "boolean" => {
Try(config.getBoolean(optName)) match {
case Success(_) => (true, "")
case Failure(_: ConfigException.WrongType) =>
(false, "wrong type of [" + optName + "], expected: " + optType)
case Failure(ex) => (false, ex.getMessage)
}
}
case "integer" => {
Try(config.getInt(optName)) match {
case Success(_) => (true, "")
case Failure(_: ConfigException.WrongType) =>
(false, "wrong type of [" + optName + "], expected: " + optType)
case Failure(ex) => (false, ex.getMessage)
}
}
case s: String => (false, "[Plugin Bug] unrecognized option type: " + s)
}
}
case None => (true, "")
}
} else {
(true, "")
}
}
case None => (true, "")
}
}
case false => result
}
})
}
override def prepare(spark: SparkSession): Unit = {
super.prepare(spark)
val defaultConfig = ConfigFactory.parseMap(
Map(
// Cache(cache=true) dataframe to avoid reloading data every time this dataframe get used.
// This will consumes memory usage. don't do this on large dataframe
// Also, if you want to keep data updated, set cache = false
"cache" -> true,
"delimiter" -> ","
)
)
config = config.withFallback(defaultConfig)
val strRDD = spark.sparkContext
.textFile(config.getString("path"))
.map { str =>
str.split(config.getString("delimiter")).map(s => s.trim)
}
.map(s => Row.fromSeq(s))
strRDD.collect().foreach(println(_)) // debug
val fieldNames = config.getStringList("fields")
val initialSchema = fieldNames.map(name => StructField(name, StringType))
val encoder = RowEncoder(StructType(initialSchema))
var df = spark.createDataset(strRDD)(encoder)
df = config.hasPath("field_types") match {
case true => {
fieldNames.zip(config.getStringList("field_types")).foldRight(df) { (field, df1) =>
val (name, typeStr) = field
typeStr.toLowerCase.trim match {
case "string" => df1
case s: String => {
// change column type if necessary
val runtimeMirror = universe.runtimeMirror(getClass.getClassLoader)
val module =
runtimeMirror.staticModule("org.apache.spark.sql.types." + s.capitalize + "Type")
val obj = runtimeMirror.reflectModule(module)
val dataType = obj.instance.asInstanceOf[DataType]
df1.withColumn(name, col(name).cast(dataType))
}
}
}
}
case false => df
}
df.printSchema()
config.getBoolean("cache") match {
case true => df.cache()
case false =>
}
df.createOrReplaceTempView(config.getString("table_name"))
}
override def process(spark: SparkSession, df: Dataset[Row]): Dataset[Row] = {
df
}
}
object Table {
val options = List(
Map(
"name" -> "path",
"type" -> "string",
"required" -> true,
"defaultValue" -> None,
"allowedValues" -> None,
"checkers" -> List()),
Map(
"name" -> "delimiter",
"type" -> "string",
"required" -> false,
"defaultValue" -> ",",
"allowedValues" -> None,
"checkers" -> List()),
Map(
"name" -> "table_name",
"type" -> "string",
"required" -> true,
"defaultValue" -> None,
"allowedValues" -> None,
"checkers" -> List()),
Map(
"name" -> "fields",
"type" -> "string-list",
"required" -> true,
"defaultValue" -> None,
"allowedValues" -> None,
"checkers" -> List()),
Map(
"name" -> "field_types",
"type" -> "string-list",
"required" -> false,
"defaultValue" -> None,
"allowedValues" -> None,
"checkers" -> List()),
Map(
"name" -> "cache",
"type" -> "boolean",
"required" -> false,
"defaultValue" -> true,
"allowedValues" -> None,
"checkers" -> List())
)
}
|
InterestingLab/waterdrop
|
waterdrop-core/src/main/scala/io/github/interestinglab/waterdrop/filter/Table.scala
|
Scala
|
apache-2.0
| 6,939
|
/*
* ArchFriends.scala
* (Poirot)
*
* Copyright (c) 2013-2018 Hanns Holger Rutz. All rights reserved.
* Code is often based on or identical to the original JaCoP Scala wrappers by
* Krzysztof Kuchcinski and Radoslaw Szymanek.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.poirot
package examples
import Implicits._
/** A simple logic puzzle about shoe purchases.
*
* Logic Puzzle
*
* Title : Arch Friends
* Author : Mark T. Zegarelli
* Publication : Dell Logic Puzzles
* Issue : April, 1998
* Page : 7
* Stars : 1
*
* Description :
*
* Harriet, upon returning from the mall, is happily describing her
* four shoe purchases to her friend Aurora. Aurora just loves the four
* different kinds of shoes that Harriet bought (ecru espadrilles,
* fuchsia flats, purple pumps, and suede sandals), but Harriet can't
* recall at which different store (Foot Farm, Heels in a Handcart, The
* Shoe Palace, or Tootsies) she got each pair. Can you help these two
* figure out the order in which Harriet bought each pair of shoes, and
* where she bought each?
*
* @author Adam Plonka, Piotr Ogrodzki, and Radoslaw Szymanek, clean up by H. H. Rutz
*/
object ArchFriends extends App with Problem {
println("Program to solve ArchFriends problem")
// Declaration of constants (names, variables' indexes
val shoeNames = Vec("EcruEspadrilles", "FuchsiaFlats", "PurplePumps", "SuedeSandals")
val iFuchsiaFlats = 1; val iPurplePumps = 2; val iSuedeSandals = 3 /* iEcruEspadrilles = 0, */
val shopNames = Vec("FootFarm", "HeelsInAHandcart", "TheShoePalace", "Tootsies")
val iFootFarm = 0; val iHeelsInAHandcart = 1; val iTheShoePalace = 2; val iTootsies = 3
// Variables shoe and shop
// Each variable has a domain 1..4 as there are four different
// shoes and shops. Values 1 to 4 within variables shoe
// denote the order in which the shoes were bought.
val shoe = Vec.tabulate(4)(i => IntVar(shoeNames(i), 1, 4))
val shop = Vec.tabulate(4)(i => IntVar(shopNames(i), 1, 4))
// Each shoe, shop have to have a unique identifier.
shoe.allDifferent()
shop.allDifferent()
// Constraints given in the problem description.
// 1. Harriet bought fuchsia flats at Heels in a Handcart.
shoe(iFuchsiaFlats) #= shop(iHeelsInAHandcart)
// 2.The store she visited just after buying her purple pumps
// was not Tootsies.
// Nested constraint by applying constraint Not to constraint XplusCeqZ
// NOT( shoe(iPurplePumps) + 1 #= shop(iTootsies) )
shoe(iPurplePumps) + 1 #!= shop(iTootsies)
// 3. The Foot Farm was Harriet's second stop.
shop(iFootFarm) #= 2
// 4. Two stops after leaving The Shoe Place, Harriet
// bought her suede sandals.
shop(iTheShoePalace) + 2 #= shoe(iSuedeSandals)
val result = satisfyAll(search(shoe ++ shop, inputOrder, indomainMin))
}
|
Sciss/Poirot
|
src/test/scala/de/sciss/poirot/examples/ArchFriends.scala
|
Scala
|
agpl-3.0
| 3,035
|
/*******************************************************************************
Copyright (c) 2013-2014, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.models.Tizen
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
import kr.ac.kaist.jsaf.analysis.cfg.{CFG, CFGExpr, InternalError}
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T, _}
import kr.ac.kaist.jsaf.analysis.typing.models._
import kr.ac.kaist.jsaf.analysis.typing._
import kr.ac.kaist.jsaf.analysis.typing.domain.Heap
import kr.ac.kaist.jsaf.analysis.typing.domain.Context
/* Abstract object */
object TIZENCalendarItem extends Tizen {
private val name = "CalendarItem"
/* predefined locations */
val loc_proto = newSystemRecentLoc(name + "Proto")
/* prototype */
private val prop_proto: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("CallbackObject")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(T))),
("convertToString", AbsBuiltinFunc("tizen.CalendarItem.convertToString", 1)),
("clone", AbsBuiltinFunc("tizen.CalendarItem.clone", 0))
)
override def getInitList(): List[(Loc, List[(String, AbsProperty)])] = List(
(loc_proto, prop_proto)
)
override def getSemanticMap(): Map[String, SemanticFun] = {
Map(
("tizen.CalendarItem.convertToString" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
val v_1 = getArgValue(h, ctx, args, "0")
val es =
if (v_1._1._5 </ AbsString.alpha("ICALENDAR_20") || v_1._1._5 </ AbsString.alpha("VCALENDAR_20"))
Set[WebAPIException](TypeMismatchError)
else TizenHelper.TizenExceptionBot
val (h_e, ctx_e) = TizenHelper.TizenRaiseException(h, ctx, es)
((Helper.ReturnStore(h, Value(StrTop)), ctx), (he + h_e, ctxe + ctx_e))
}
)),
("tizen.CalendarItem.clone" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
val lset_env = h(SinglePureLocalLoc)("@env")._2._2
val set_addr = lset_env.foldLeft[Set[Address]](Set())((a, l) => a + locToAddr(l))
if (set_addr.size > 1) throw new InternalError("API heap allocation: Size of env address is " + set_addr.size)
val addr_env = (cp._1._1, set_addr.head)
val addr1 = cfg.getAPIAddress(addr_env, 0)
val l_r1 = addrToLoc(addr1, Recent)
val (h_1, ctx_1) = Helper.Oldify(h, ctx, addr1)
val lset_this = h(SinglePureLocalLoc)("@this")._2._2
val o_new = lset_this.foldLeft(Obj.empty)((o, l) => o + h_1(l))
val h_2 = h_1.update(l_r1, o_new)
val h_3 = h_2.update(l_r1, h_2(l_r1).update(AbsString.alpha("id"), PropValue(ObjectValue(Value(NullTop), F, T, T))))
((Helper.ReturnStore(h_3, Value(l_r1)), ctx_1), (he, ctxe))
}
))
)
}
override def getPreSemanticMap(): Map[String, SemanticFun] = {
Map()
}
override def getDefMap(): Map[String, AccessFun] = {
Map()
}
override def getUseMap(): Map[String, AccessFun] = {
Map()
}
}
|
darkrsw/safe
|
src/main/scala/kr/ac/kaist/jsaf/analysis/typing/models/Tizen/TIZENCalendarItem.scala
|
Scala
|
bsd-3-clause
| 3,545
|
import scala.io._
val in = Source.fromFile("rankedDocs").getLines zip Source.fromFile("translatedDocs").getLines
println("COSSIM,ACCIONES,ACTIONS,SHARES")
for((lf,lt) <- in) {
println(lf.substring(0,lf.indexOf(" ")) + "," +
(if(lf.contains("acciones")) { "1" } else { "0" }) + "," +
(if(lt.contains("actions")) { "1" } else { "0" }) + "," +
(if(lt.contains("shares")) { "1" } else { "0" }))
}
|
monnetproject/bliss
|
betalm/scripts/acciones.scala
|
Scala
|
bsd-3-clause
| 413
|
package pl.touk.nussknacker.ui.db
import slick.dbio.{DBIOAction, NoStream}
import slick.jdbc.{JdbcBackend, JdbcProfile}
import scala.concurrent.Future
case class DbConfig(db: JdbcBackend.Database, driver: JdbcProfile) {
def run[R](a: DBIOAction[R, NoStream, Nothing]): Future[R] = db.run(a)
}
|
TouK/nussknacker
|
ui/server/src/main/scala/pl/touk/nussknacker/ui/db/DbConfig.scala
|
Scala
|
apache-2.0
| 297
|
package polynomial
import algebra.ring.CommutativeRing
import core.InfixOps._
private [polynomial] trait PolynomialRingOps[A] {
implicit def coefficientRing: CommutativeRing[A]
implicit def param: FormalParameter
def zero: Polynomial[A] = Polynomial[A](param, coefficientRing.zero)
def one: Polynomial[A] = Polynomial[A](param, coefficientRing.one)
def plus(x: Polynomial[A], y: Polynomial[A]): Polynomial[A] = (x, y) match {
case (x, z) if z == zero => x
case (z, y) if z == zero => y
case _ =>
val sumsOfCoefficients = for {
d <- 0 to List(x.degree.toInt, y.degree.toInt).max
c1 = x.coefficient(d)
c2 = y.coefficient(d)
} yield c1.getOrElse(coefficientRing.zero) + c2.getOrElse(coefficientRing.zero)
Polynomial(param, sumsOfCoefficients.toList.reverse)
}
def negate(x: Polynomial[A]): Polynomial[A] = x match {
case z if z == zero => zero
case _ =>
val negatedCoefficients = for {
d <- 0 to x.degree.toInt
coefficient = x.coefficient(d)
} yield coefficient.get.negate
Polynomial(param, negatedCoefficients.toList.reverse)
}
def times(x: Polynomial[A], y: Polynomial[A]): Polynomial[A] = (x, y) match {
case (_, z) if z == zero => zero
case (z, _) if z == zero => zero
case _ =>
val terms = for {
d1 <- 0 to x.degree.toInt
d2 <- 0 to y.degree.toInt
c1 = x.coefficient(d1)
c2 = y.coefficient(d2)
} yield Polynomial[A](param, c1.get * c2.get :: List.fill(d1 + d2)(coefficientRing.zero))
terms.foldLeft(zero)(plus)
}
}
|
dkettlestrings/thunder
|
src/main/scala/polynomial/PolynomialRingOps.scala
|
Scala
|
gpl-3.0
| 1,619
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming.eventhubs.checkpoint
import scala.collection.mutable
import org.apache.hadoop.conf.Configuration
import org.apache.spark.eventhubscommon.{EventHubNameAndPartition, EventHubsConnector}
import org.apache.spark.eventhubscommon.progress.{PathTools, ProgressTrackerBase}
private[spark] class StructuredStreamingProgressTracker private[spark](
uid: String,
progressDir: String,
appName: String,
hadoopConfiguration: Configuration)
extends ProgressTrackerBase(progressDir, appName, hadoopConfiguration) {
private[spark] override lazy val progressDirectoryStr = PathTools.makeProgressDirectoryStr(
progressDir, appName, uid)
private[spark] override lazy val tempDirectoryStr = PathTools.makeTempDirectoryStr(progressDir,
appName, uid)
private[spark] override lazy val metadataDirectoryStr = PathTools.makeMetadataDirectoryStr(
progressDir, appName, uid)
override def eventHubNameAndPartitions: Map[String, List[EventHubNameAndPartition]] = {
val connector = StructuredStreamingProgressTracker.registeredConnectors(uid)
Map(connector.uid -> connector.connectedInstances)
}
private def initMetadataDirectory(): Unit = {
try {
val fs = metadataDirectoryPath.getFileSystem(hadoopConfiguration)
val checkpointMetadaDirExisted = fs.exists(tempDirectoryPath)
if (!checkpointMetadaDirExisted) {
fs.mkdirs(metadataDirectoryPath)
}
} catch {
case ex: Exception =>
ex.printStackTrace()
throw ex
}
}
private def initProgressFileDirectory(): Unit = {
val fs = progressDirectoryPath.getFileSystem(hadoopConfiguration)
try {
val progressDirExist = fs.exists(progressDirectoryPath)
if (progressDirExist) {
val (validationPass, latestFile) = validateProgressFile(fs)
if (!validationPass) {
if (latestFile.isDefined) {
logWarning(s"latest progress file ${latestFile.get} corrupt, rebuild file...")
val latestFileTimestamp = fromPathToTimestamp(latestFile.get)
val progressRecords = collectProgressRecordsForBatch(latestFileTimestamp,
List(StructuredStreamingProgressTracker.registeredConnectors(uid)))
commit(progressRecords, latestFileTimestamp)
}
}
} else {
fs.mkdirs(progressDirectoryPath)
}
} catch {
case ex: Exception =>
ex.printStackTrace()
throw ex
}
}
override def init(): Unit = {
initProgressFileDirectory()
initMetadataDirectory()
}
}
object StructuredStreamingProgressTracker {
val registeredConnectors = new mutable.HashMap[String, EventHubsConnector]
private var _progressTrackers = new mutable.HashMap[String, StructuredStreamingProgressTracker]
private[spark] def reset(): Unit = {
registeredConnectors.clear()
_progressTrackers.values.map(pt => pt.metadataCleanupFuture.cancel(true))
_progressTrackers.clear()
}
def getInstance(uid: String): ProgressTrackerBase[_ <: EventHubsConnector] =
_progressTrackers(uid)
private[spark] def initInstance(
uid: String,
progressDirStr: String,
appName: String,
hadoopConfiguration: Configuration): ProgressTrackerBase[_ <: EventHubsConnector] = {
this.synchronized {
// DirectDStream shall have singleton progress tracker
if (_progressTrackers.get(uid).isEmpty) {
_progressTrackers += uid -> new StructuredStreamingProgressTracker(uid, progressDirStr,
appName,
hadoopConfiguration)
}
_progressTrackers(uid).init()
}
_progressTrackers(uid)
}
}
|
CodingCat/spark-eventhubs
|
core/src/main/scala/org/apache/spark/sql/streaming/eventhubs/checkpoint/StructuredStreamingProgressTracker.scala
|
Scala
|
apache-2.0
| 4,457
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import scala.collection.mutable.HashMap
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.storage.RDDInfo
/**
* :: DeveloperApi ::
* Stores information about a stage to pass from the scheduler to SparkListeners.
*/
@DeveloperApi
class StageInfo(
val stageId: Int,
val attemptId: Int,
val name: String,
val numTasks: Int,
val rddInfos: Seq[RDDInfo],
val parentIds: Seq[Int],
val details: String) {
/** When this stage was submitted from the DAGScheduler to a TaskScheduler. */
var submissionTime: Option[Long] = None
/** Time when all tasks in the stage completed or when the stage was cancelled. */
var completionTime: Option[Long] = None
/** If the stage failed, the reason why. */
var failureReason: Option[String] = None
/** Terminal values of accumulables updated during this stage. */
val accumulables = HashMap[Long, AccumulableInfo]()
def stageFailed(reason: String) {
failureReason = Some(reason)
completionTime = Some(System.currentTimeMillis)
}
private[spark] def getStatusString: String = {
if (completionTime.isDefined) {
if (failureReason.isDefined) {
"failed"
} else {
"succeeded"
}
} else {
"running"
}
}
}
private[spark] object StageInfo {
/**
* Construct a StageInfo from a Stage.
*
* Each Stage is associated with one or many RDDs, with the boundary of a Stage marked by
* shuffle dependencies. Therefore, all ancestor RDDs related to this Stage's RDD through a
* sequence of narrow dependencies should also be associated with this Stage.
*/
def fromStage(stage: Stage, numTasks: Option[Int] = None): StageInfo = {
val ancestorRddInfos = stage.rdd.getNarrowAncestors.map(RDDInfo.fromRdd)
val rddInfos = Seq(RDDInfo.fromRdd(stage.rdd)) ++ ancestorRddInfos
new StageInfo(
stage.id,
stage.attemptId,
stage.name,
numTasks.getOrElse(stage.numTasks),
rddInfos,
stage.parents.map(_.id),
stage.details)
}
}
|
andrewor14/iolap
|
core/src/main/scala/org/apache/spark/scheduler/StageInfo.scala
|
Scala
|
apache-2.0
| 2,875
|
package moe.pizza.setthemred
import java.net.URLEncoder
import com.fasterxml.jackson.databind.{JsonMappingException, ObjectMapper}
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import moe.pizza.crestapi.CrestApi
import moe.pizza.eveapi.generated.eve.CharacterID
import moe.pizza.evewho.Evewho
import moe.pizza.setthemred.Types.Alert
import moe.pizza.sparkhelpers.SparkWebScalaHelpers._
import moe.pizza.eveapi.{EVEAPI, SyncableFuture}
import moe.pizza.zkapi.StatsTypes.SuperPilot
import moe.pizza.zkapi.ZKBAPI
import spark.Spark._
import spark._
import Utils._
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success, Try}
object Webapp extends App {
val configtext = getClass.getResourceAsStream("/config.json")
val OM = new ObjectMapper()
OM.registerModule(DefaultScalaModule)
val config = OM.readValue(configtext, classOf[Config])
val log = org.log4s.getLogger
val crest = new CrestApi(baseurl = config.login_url, cresturl = config.crest_url, config.clientID, config.secretKey, config.redirectUrl)
val eveapi = new EVEAPI()
val evewho = new Evewho(baseurl = "https://evewho.com/api.php")
val zkb = new ZKBAPI()
val defaultCrestScopes = List("characterContactsRead", "characterContactsWrite")
val SESSION = "session"
port(9020)
staticFileLocation("/static")
// index page
get("/", (req: Request, resp: Response) => {
req.getSession match {
case Some(s) => templates.html.base.apply("set.them.red", templates.html.main.apply(), Some(s))
case None => templates.html.base.apply("set.them.red", templates.html.landing.apply(), None)
}
})
after("/", (req: Request, resp: Response) => {
val session = req.getSession
session match {
case Some(s) => req.clearAlerts()
case _ => ()
}
})
// login redirect to use CCP auth
get("/login", (req: Request, resp: Response) => {
req.session(true)
resp.redirect(crest.redirect("", defaultCrestScopes))
})
// logout
get("/logout", (req: Request, resp: Response) => {
req.session.invalidate()
resp.redirect("/")
})
case class Pilot(characterID: Long, characterName: String)
def massAdd(s: Types.Session, name: String, pilots: List[Pilot], req: Request, standing: Int, watchlist: Boolean) = {
pilots.map(c =>
(c ,Try {crest.contacts.createContact(s.characterID, s.accessToken, crest.contacts.createCharacterAddRequest(standing, c.characterID, c.characterName, watchlist))})
)
.map(s => (s._1, s._2.map(_.sync(15 seconds))))
.groupBy(_._2.isSuccess)
.flatMap { kv =>
val (state, attempts) = kv
state match {
case true => attempts
case false =>
attempts.map { t =>
log.error ("failed with error %s".format (t._2.failed.get) )
(t._1, Try {
crest.contacts.createContact (s.characterID, s.accessToken, crest.contacts.createCharacterAddRequest (- 10, t._1.characterID, t._1.characterName, watchlist) )
})
}.map(s => (s._1, s._2.map(_.sync(15 seconds))))
}
}.groupBy{_._2.isSuccess}
.mapValues(_.size)
.foreach { kv =>
val (inputstatus, count) = kv
inputstatus match {
case true => req.flash(Alerts.success, "Successfully added %d contacts from %s to your watchlist.".format(count, name))
case false => req.flash(Alerts.danger, "Failed to add %d contacts from %s to your watchlist.".format(count, name))
}
}
}
// refresh our access token
before("/add/*", (req: Request, resp: Response) => {
req.getSession match {
case Some(s) =>
val refresh = crest.refresh(s.refreshToken).sync()
val newsession = s.copy(accessToken = refresh.access_token)
req.setSession(newsession)
case None => ()
}
})
// batch add
post("/add/characters", (req: Request, resp: Response) => {
val standing = req.queryParams("standing").toInt
val watchlist = Option(req.queryParams("watchlist")).isDefined
req.getSession match {
case Some(s) =>
val pilots = req.queryParams("names")
.split('\\n')
.map(_.trim)
.grouped(250)
.map(s => eveapi.eve.CharacterID(s))
.foldRight(Seq.empty[CharacterID.Row]){ (n, a) =>
n.sync().get.result ++ a
}
.map(c => Pilot(c.characterID.toLong, c.name)).toList
massAdd(s, "your list", pilots, req, standing, watchlist)
resp.redirect("/")
case None =>
resp.redirect("/")
}
()
})
post("/add/evewho", (req: Request, resp: Response) => {
val standing = req.queryParams("standing").toInt
val watchlist = Option(req.queryParams("watchlist")).isDefined
req.getSession match {
case Some(s) =>
val name = req.queryParams("corp")
val id = eveapi.eve.CharacterID(Seq(name)).sync().get.result.head.characterID.toLong
val typeOfThing = eveapi.eve.OwnerID(Seq(name)).sync().get.result.head.ownerGroupID.toInt
val evewholist = typeOfThing match {
case 2 => evewho.corporationList(id).sync().characters
case 32 => evewho.allianceList(id).sync().characters
}
massAdd(s, name, evewholist.map(c => Pilot(c.character_id, c.name)), req, standing, watchlist)
resp.redirect("/")
case None =>
resp.redirect("/")
}
()
})
post("/add/zkbsupers", (req: Request, resp: Response) => {
val standing = req.queryParams("standing").toInt
val watchlist = Option(req.queryParams("watchlist")).isDefined
val supers = Option(req.queryParams("supers")).isDefined
val titans = Option(req.queryParams("titans")).isDefined
req.getSession match {
case Some(s) =>
val name = req.queryParams("group")
if (name=="") {
req.flash(Alerts.danger, "Please enter a name.")
resp.redirect("/")
halt()
}
try {
val id = eveapi.eve.CharacterID(Seq(name)).sync().get.result.head.characterID.toLong
val typeOfThing = eveapi.eve.OwnerID(Seq(name)).sync().get.result.head.ownerGroupID.toInt
val zkblist = typeOfThing match {
case 0 =>
req.flash(Alerts.info, "Can't find an entity called %s".format(name))
List.empty[SuperPilot]
case 1=>
req.flash(Alerts.info, "I only found a player called %s, not a corporation or alliance".format(name))
List.empty[SuperPilot]
case 2 =>
val stats = zkb.stats.corporation(id).sync().get
val pilots = (supers, titans) match {
case (true, true) => stats.getSupers ++ stats.getTitans
case (true, false) => stats.getSupers
case (false, true) => stats.getTitans
case (false, false) => List.empty[SuperPilot]
}
if (pilots.isEmpty) {
req.flash(Alerts.info, "No pilots were found matching your query")
}
pilots
case 32 =>
val stats = zkb.stats.alliance(id).sync().get
val pilots = (supers, titans) match {
case (true, true) => stats.getSupers ++ stats.getTitans
case (true, false) => stats.getSupers
case (false, true) => stats.getTitans
case (false, false) => List.empty[SuperPilot]
}
if (pilots.isEmpty) {
req.flash(Alerts.info, "No pilots were found matching your query")
}
pilots
}
massAdd(s, name, zkblist.map(c => Pilot(c.characterID, c.characterName)), req, standing, watchlist)
} catch {
case e: JsonMappingException => req.flash(Alerts.info, "Unable to find any supercapital intel for %s".format(name))
}
resp.redirect("/")
case None =>
resp.redirect("/")
}
()
})
get("/autocomplete/:string", (req: Request, resp: Response) => {
val string = Option(URLEncoder.encode(req.params(":string"), "utf-8"))
string.map { s=>
val alliances = zkb.autocomplete(zkb.autocomplete.Filters.allianceID, s).sync()
val corps = zkb.autocomplete(zkb.autocomplete.Filters.corporationID, s).sync()
alliances ++ corps
} match {
case Some(s) => OM.writeValueAsString(s)
case None => "[]"
}
})
// callback for when CCP auth sends them back
get("/callback", (req: Request, resp: Response) => {
val code = req.queryParams("code")
val state = req.queryParams("state")
val callbackresults = crest.callback(code).sync()
val verify = crest.verify(callbackresults.access_token).sync()
val session = new Types.Session(callbackresults.access_token, callbackresults.refresh_token.get, verify.characterName, verify.characterID, List(new Alert("success", "Thanks for logging in %s".format(verify.characterName))))
req.setSession(session)
// go back to the index since we've just logged in
resp.redirect("/")
})
}
|
xxpizzaxx/set.them.red
|
src/main/scala/moe/pizza/setthemred/Webapp.scala
|
Scala
|
mit
| 9,202
|
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.index
import org.geotools.data.Query
import org.junit.runner.RunWith
import org.locationtech.geomesa.index.conf.QueryHints.RichHints
import org.locationtech.geomesa.index.planning.QueryPlanner
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.filter.Filter
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class IndexPackageObjectTest extends Specification {
"index" should {
"compute target schemas from transformation expressions" in {
val sftName = "targetSchemaTest"
val defaultSchema = "name:String,geom:Point:srid=4326,dtg:Date"
val origSFT = SimpleFeatureTypes.createType(sftName, defaultSchema)
origSFT.setDtgField("dtg")
val query = new Query(sftName, Filter.INCLUDE, Array("name", "helloName=strConcat('hello', name)", "geom"))
QueryPlanner.setQueryTransforms(query, origSFT)
val transform = query.getHints.getTransformSchema
transform must beSome
SimpleFeatureTypes.encodeType(transform.get) mustEqual "name:String,*geom:Point:srid=4326,helloName:String"
}
}
}
|
ronq/geomesa
|
geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/index/IndexPackageObjectTest.scala
|
Scala
|
apache-2.0
| 1,735
|
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import org.joda.time.LocalDate
import org.scalatest.prop.TableDrivenPropertyChecks._
import org.scalatest.prop.Tables.Table
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.CountryOfRegistration
import uk.gov.hmrc.ct.accounts.frsse2008.retriever.Frsse2008AccountsBoxRetriever
import uk.gov.hmrc.ct.accounts.frsse2008.stubs.StubbedAccountsBoxRetriever
import uk.gov.hmrc.ct.computations.stubs.StubbedComputationsBoxRetriever
class LowEmissionCarsAcceptanceCriteriaSpec extends WordSpec with Matchers {
/* CPQ8 Ceased Trading
CP78 Written down value brought forward
CP666 Written down value of Special Rate Pool brought forward
CP79 First year allowance (FYA) expenditure
CP80 Other first year allowance expenditure
CP82 Additions Qualifying for writing down allowance
CP83 Expenditure qualifying for Annual Investment Allowance (AIA)
CP84 Disposal proceeds
CP673 Market value of unsold assets
CP667 Proceeds from disposals from special rate pool
CP672 Proceeds from disposals from main pool
CPaux1 FYA Rate Pool
CPaux2 Main Rate Pool
CPaux3 Special Rate Pool
CP87_Input FYA Claimed
CP87 ""
CP88 AIA Claimed
CP89 Written down allowance calimed
CP668 Written down allowance claimed from special rate pool
CP90 Balance Allowance
CP186 Total Allowances claimed
CP91 Main pool balancing charge
CP671 Balancing charge
CP670 Special Rate pool balancing charge
CP92 Main pool written down value carried forward
CP669 Special Rate pool written down value carried forward
*/
"Low Emmission Cars calculations" should {
"calculate values for Companies Still Trading" in {
val companiesStillTrading =
Table(
("Scenario",
"LEC01",
"CPQ8", "CP78", "CP666", "CP79", "CP80", "CP82", "CP83", "CP667", "CP672", "CP87_Input","CP668", "CP670", "CP88", "CP89", "CP186", "CP91", "CP671", "CP92", "CP669"),
("Scenario 1 - Company still trading, some AIA can be claimed from the main pool, no disposals were made, user claims some but not all they're entitled to.",
List(mainRatePoolCar(100)),
Some(false), Some(50), None, Some(0), Some(0), Some(15), Some(40), None, Some(0), Some(0),Some(0), Some(0), Some(0), Some(30), Some(30), None, None, Some(175), Some(0)),
("Scenario 2 - Company still trading, some AIA, main pool allowance can be claimed from the main pool, there have been disposals on the main pool, but lower than the value of the pool. " +
"User claims some of the allowance but not all they're entitled to.",
List(mainRatePoolCar(100)),
Some(false), Some(50), None, Some(0), Some(0), Some(15), Some(40), None, Some(48), Some(0), None, Some(0), Some(0), Some(21), Some(21), None, None, Some(136), Some(0)),
("Scenario 3 - Company still trading, some AIA, there have been disposals on the main pool, higher than the value of the pool " +
"(there will be balancing charges). User can't claim anything from the main pool.",
List(mainRatePoolCar(270)),
Some(false), Some(50), None, Some(0), Some(0), Some(47), Some(69), None, Some(3000), Some(0),None, Some(0), Some(0), None, Some(0), Some(2564), Some(2564), Some(0), Some(0)),
("Scenario 4 - Company still trading, some AIA and FYA, there have been disposals on the main pool and secondary pool, " +
"higher than the value of both pools (there will be balancing charges). User can't claim anything from the main pool.",
List(mainRatePoolCar(270), specialRatePoolCar(594)),
Some(false), Some(11), Some(98), Some(31), Some(0), Some(43), Some(77), Some(2111), Some(3500), Some(0), None, Some(1419), None, None, Some(0), Some(3068), Some(3068), Some(0), Some(0)),
("Scenario 5 - Company still trading, some AIA and FYA (also FYA cars), there have been disposals on the main pool and secondary pool, " +
"the main disposals higher than the value of the main pool but the special rate disposals still leave some remaining special rate allowance " +
"to be claimed (there will be only balancing charges on the main pool). user can't claim anything from the main pool but can claim from the secondary pool.",
List(fyaRatePoolCar(25), mainRatePoolCar(50), specialRatePoolCar(600)),
Some(false), Some(11), Some(98), Some(30), Some(1), Some(43), Some(77), Some(4), Some(3500), Some(21), Some(20), Some(0), Some(64), None, Some(85), Some(3348), Some(3348), Some(0), Some(674))
)
forAll(companiesStillTrading) {
(scenario: String,
lec01: List[Car],
cpq8: Option[Boolean],
cp78: Option[Int],
cp666: Option[Int],
cp79: Option[Int],
cp80: Option[Int],
cp82: Option[Int],
cp83: Option[Int],
cp667: Option[Int],
cp672: Option[Int],
cp87_Input: Option[Int],
cp668: Option[Int],
cp670: Option[Int],
cp88: Option[Int],
cp89: Option[Int],
cp186: Option[Int],
cp91: Option[Int],
cp671: Option[Int],
cp92: Option[Int],
cp669: Option[Int]) => {
val retriever = new TestComputationsRetriever(
lec01 = lec01,
cpq8 = cpq8,
cp78 = cp78,
cp79 = cp79,
cp80 = cp80,
cp82 = cp82,
cp83 = cp83,
cp87Input = cp87_Input,
cp88 = cp88,
cp89 = cp89,
cp666 = cp666,
cp667 = cp667,
cp668 = cp668,
cp672 = cp672
) with StubbedAccountsBoxRetriever
assert(retriever.cp91().value equals cp91, clue("CP91", retriever.cp91().value, cp91))
assert(retriever.cp92().value equals cp92, clue("CP92", retriever.cp92().value, cp92))
assert(retriever.cp186().value equals cp186, clue("CP186", retriever.cp186().value, cp186))
assert(retriever.cp669().value equals cp669, clue("CP669", retriever.cp669().value, cp669))
assert(retriever.cp670().value equals cp670, clue("CP670", retriever.cp670().value, cp670))
assert(retriever.cp671().value equals cp671, clue("CP671", retriever.cp671().value, cp671))
}
}
}
val companiesNoLongerTrading =
Table(
("Scenario",
"LEC01",
"CPQ8", "CP78", "CP666", "CP674", "CP79", "CP80", "CP82", "CP83", "CP84", "CP673",
"CP90", "CP186", "CP91", "CP671", "CP92", "CP669"),
("Scenario 6 - Company not trading, higher disposals than allowances: balancing charges.",
List(fyaRatePoolCar(20), mainRatePoolCar(30), specialRatePoolCar(40)),
Some(true),
Some(20), Some(30), Some(30),Some(0), None, None, None, Some(1000), Some(300),
Some(0), Some(0), Some(1130), Some(1130), Some(0), Some(0)),
("Scenario 7 - Company not trading, lower disposals than allowances: balance allowances.",
List(fyaRatePoolCar(20), mainRatePoolCar(30), specialRatePoolCar(40)),
Some(true),
Some(500), Some(600), Some(1000),Some(0), None, None, None, Some(10), Some(5),
Some(2175), Some(2175), Some(0), Some(0), Some(0), Some(0))
)
"calculate values for Companies No Longer Trading" in {
forAll(companiesNoLongerTrading) {
(scenario: String,
lec01: List[Car],
cpq8: Option[Boolean],
cp78: Option[Int],
cp666: Option[Int],
cp674: Option[Int],
cp79: Option[Int],
cp80: Option[Int],
cp82: Option[Int],
cp83: Option[Int],
cp84: Option[Int],
cp673: Option[Int],
cp90: Option[Int],
cp186: Option[Int],
cp91: Option[Int],
cp671: Option[Int],
cp92: Option[Int],
cp669: Option[Int]) => {
val retriever = new TestComputationsRetriever(
lec01 = lec01,
cpq8 = cpq8,
cp78 = cp78,
cp79 = cp79,
cp80 = cp80,
cp82 = cp82,
cp83 = cp83,
cp84 = cp84,
cp666 = cp666,
cp673 = cp673,
cp674 = cp674
) with Frsse2008AccountsBoxRetriever
assert(retriever.cp90().value equals cp90, clue("CP90", retriever.cp90().value, cp90))
assert(retriever.cp91().value equals cp91, clue("CP91", retriever.cp91().value, cp91))
assert(retriever.cp92().value equals cp92, clue("CP92", retriever.cp92().value, cp92))
assert(retriever.cp186().value equals cp186, clue("CP186", retriever.cp186().value, cp186))
assert(retriever.cp669().value equals cp669, clue("CP669", retriever.cp669().value, cp669))
assert(retriever.cp671().value equals cp671, clue("CP671", retriever.cp671().value, cp671))
}
}
}
}
def testing(scenario: String, lec01: List[Car], cpq8: Option[Boolean], cp78: Option[Int], cp666: Option[Int], cp81_Input: Option[Int], cp82: Option[Int], cp83: Option[Int], cp667: Option[Int], cp672: Option[Int], cp87_Input: Option[Int], cp668: Option[Int], cp670: Option[Int], cp88: Option[Int], cp89: Option[Int], cp186: Option[Int], cp91: Option[Int], cp671: Option[Int], cp92: Option[Int], cp669: Option[Int]): Unit = {
}
private def clue(boxId: String, calcValue: Option[Int], expectedValue: Option[Int]) = s"Calculated value $boxId of $calcValue was not equal to expected $expectedValue"
private def fyaRatePoolCar(value: Int) = Car(regNumber = "ABC123Z", isNew = true, price = value, emissions = 110, dateOfPurchase = new LocalDate("2013-03-31"))
private def mainRatePoolCar(value: Int) = Car(regNumber = "XYZ123A", isNew = true, price = value, emissions = 160, dateOfPurchase = new LocalDate("2009-04-01"))
private def specialRatePoolCar(value: Int) = Car(regNumber = "XYZ789C", isNew = true, price = value, emissions = 161, dateOfPurchase = new LocalDate("2013-03-31"))
class TestComputationsRetriever(lec01: List[Car],
cpq8: Option[Boolean],
cp78: Option[Int] = None,
cp79: Option[Int] = None,
cp80: Option[Int] = None,
cp82: Option[Int] = None,
cp83: Option[Int] = None,
cp84: Option[Int] = None,
cp87Input: Option[Int] = None,
cp88: Option[Int] = None,
cp89: Option[Int] = None,
cp666: Option[Int] = None,
cp667: Option[Int] = None,
cp668: Option[Int] = None,
cp672: Option[Int] = None,
cp673: Option[Int] = None,
cp674: Option[Int] = None
) extends StubbedComputationsBoxRetriever {
self: Frsse2008AccountsBoxRetriever =>
override def lec01: LEC01 = LEC01(lec01)
override def cpQ8: CPQ8 = CPQ8(cpq8)
override def cp78: CP78 = CP78(cp78)
override def cp666: CP666 = CP666(cp666)
override def cp79: CP79 = CP79(cp79)
override def cp80: CP80 = CP80(cp80)
override def cp82: CP82 = CP82(cp82)
override def cp83: CP83 = CP83(cp83)
override def cp84: CP84 = CP84(cp84)
override def cp667: CP667 = CP667(cp667)
override def cp672: CP672 = CP672(cp672)
override def cp673: CP673 = CP673(cp673)
override def cp674: CP674 = CP674(cp674)
override def cp87Input: CP87Input = CP87Input(cp87Input)
override def cp88: CP88 = CP88(cp88)
override def cp89: CP89 = CP89(cp89)
override def cp668: CP668 = CP668(cp668)
override def countryOfRegistration(): CountryOfRegistration = CountryOfRegistration.EnglandWales
}
}
|
pncampbell/ct-calculations
|
src/test/scala/uk/gov/hmrc/ct/computations/LowEmissionCarsAcceptanceCriteriaSpec.scala
|
Scala
|
apache-2.0
| 13,002
|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package common
import java.text.DecimalFormat
import java.time.format.DateTimeFormatter
import java.time.LocalDate
import play.api.i18n.{Lang, Messages}
import uk.gov.hmrc.play.views.helpers.MoneyPounds
object Display {
def currencyDisplayString(amt: BigDecimal): String = {
val amount = MoneyPounds(amt)
val minus = if (amount.isNegative) "-" else ""
val str = s"£$minus${amount.quantity}"
if (str.endsWith(".00")) {
str.takeWhile(_ != '.')
}
else str
}
def dateDisplayString(date: LocalDate)(implicit lang: Lang, messages: Messages): String = {
if (lang.language == "cy") {
val dateFormat = DateTimeFormatter.ofPattern("d MMMM yyyy")
date.format(dateFormat)
val monthNum = date.getMonthValue
val welshFormatter = DateTimeFormatter.ofPattern(s"""d '${messages(s"pla.month.$monthNum")}' yyyy""")
date.format(welshFormatter)
}
else {
val dateFormat = DateTimeFormatter.ofPattern("d MMMM yyyy")
date.format(dateFormat)
}
}
def currencyInputDisplayFormat(amt: BigDecimal): BigDecimal = {
def df(n: BigDecimal): String = new DecimalFormat("0.00").format(n).replace(".00", "")
BigDecimal(df(amt))
}
}
|
hmrc/pensions-lifetime-allowance-frontend
|
app/common/Display.scala
|
Scala
|
apache-2.0
| 1,816
|
/*
* Copyright 2013 TeamNexus
*
* TeamNexus Licenses this file to you under the MIT License (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://opensource.org/licenses/mit-license.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License
*/
package com.nexus.errorhandling
import java.util.concurrent.Callable
import scala.collection.mutable.ArrayBuffer
/**
* No description given
*
* @author jk-5
*/
class ErrorReportCategory(private final val report: ErrorReport, private final val description: String) {
private final val entries = ArrayBuffer[ErrorReportCategoryEntry]()
private final val stacktrace = Array[StackTraceElement]()
def addSection(name: String, c: Callable[_ <: Any]): Unit = try{
this.addSection(name, c.call())
}catch{
case t: Throwable => this.addSection(name, t)
}
def addSection(name: String, element: Any) = this.entries += new ErrorReportCategoryEntry(name, element)
def appendTo(builder: StringBuilder){
builder.append("-- ").append(this.description).append(" --\\n")
builder.append("Details:")
for(entry <- this.entries){
builder.append("\\n\\t")
builder.append(entry.getName).append(": ").append(entry.getElement)
}
if(this.stacktrace != null && this.stacktrace.length > 0){
builder.append("\\nStacktrace:")
for(row <- this.stacktrace){
builder.append("\\n\\tat ").append(row.toString)
}
}
}
}
|
crvidya/nexus-scala
|
src/main/scala/com/nexus/errorhandling/ErrorReportCategory.scala
|
Scala
|
mit
| 1,776
|
package controllers
import models.Greeting
import play.api.i18n.Langs
import play.api.libs.json.Json
import play.api.mvc.{Action, Controller}
import play.twirl.api.Html
import services.GreetingService
class GreeterController(greetingService: GreetingService, langs: Langs) extends Controller {
val greetingsList = Seq(
Greeting(1, greetingService.greetingMessage("en"), "sameer"),
Greeting(2, greetingService.greetingMessage("it"), "sam")
)
def greetings = Action {
Ok(Json.toJson(greetingsList))
}
def greetInMyLanguage = Action {
Ok(greetingService.greetingMessage(langs.preferred(langs.availables).language))
}
def index = Action {
Ok(Html("<h1>Welcome</h1><p>Your new application is ready.</p>"))
}
}
|
play2-maven-plugin/play2-maven-test-projects
|
play25/scala/macwire-di-example/app/controllers/GreeterController.scala
|
Scala
|
apache-2.0
| 748
|
import collection.immutable.Seq
import org.scalatest.FunSuite
import org.scalatest.OneInstancePerTest
import org.scalamock.scalatest.MockFactory
import se.ramn.bottfarmen.simulation.entity.Position
import se.ramn.bottfarmen.simulation.TileMap
import se.ramn.bottfarmen.simulation.Scenario
import se.ramn.bottfarmen.simulation.Geography
class GeographyTest extends FunSuite {
test("positionsWithinRange") {
val target = Geography
val range = 2
val result = target.positionsWithinRange(Position(row=3, col=3), range)
val p = Position
val expected = Set(
p(row=1, col=3),
p(row=2, col=2), p(row=2, col=3), p(row=2, col=4),
p(row=3, col=1), p(row=3, col=2), p(row=3, col=3), p(row=3, col=4), p(row=3, col=5),
p(row=4, col=2), p(row=4, col=3), p(row=4, col=4),
p(row=5, col=3)
)
assert(expected === result)
}
}
|
ramn/bottfarmen
|
common/src/test/scala/GeographyTest.scala
|
Scala
|
gpl-3.0
| 871
|
package graphics
import java.awt.Color
import java.awt.Graphics
import java.util.List
import structures._
/** Holder class for drawing the gravitational resultant effect.
* @author Sean Lewis
*/
object ResultantDrawer {
/** Draws the gravitational resultant from the ball.
* @param level the level in the game
* @param g the Graphics component to draw with
*/
def draw(level: Level, g: Graphics) = {
val shift = level.getShift()
val ball = level.getBall()
val bodies = level.getBodies()
val ballCent = new Point2d(ball.getCenter().x, ball.getCenter().y)
var totalX = 0.0
var totalY = 0.0
// finds gravitational forces from the given body
def addValues(b: Body): Unit = {
val bodyCent = new Point2d(b.getCenter.x, b.getCenter.y)
val angle = CalcHelp.getAngle(ballCent, bodyCent)
val length = level.getGravityStrength * GraphicEffect.ArrowLength *
b.getRadius / ballCent.distance(bodyCent) + 5
totalX += length * math.cos(angle)
totalY -= length * math.sin(angle)
CalcHelp.map(addValues, b.getMoons())
}
// sums up all forces
CalcHelp.map(addValues, bodies)
g.setColor(Color.blue);
val tempPt1 = ball.getCenter().translate(shift)
val tempPt2 = tempPt1.translate(totalX, totalY)
GraphicEffect.drawArrow(tempPt1, tempPt2, g)
}
}
|
splewis/GravityGolf
|
src/main/scala/graphics/ResultantDrawer.scala
|
Scala
|
mit
| 1,356
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.datastream
import org.apache.flink.streaming.api.datastream.DataStream
import org.apache.flink.streaming.api.functions.{AssignerWithPeriodicWatermarks, AssignerWithPunctuatedWatermarks}
import org.apache.flink.streaming.api.watermark.Watermark
import org.apache.flink.table.api.{TableException, TableSchema}
import org.apache.flink.table.plan.nodes.PhysicalTableSourceScan
import org.apache.flink.table.plan.schema.RowSchema
import org.apache.flink.table.planner.StreamPlanner
import org.apache.flink.table.runtime.types.CRow
import org.apache.flink.table.sources._
import org.apache.flink.table.sources.wmstrategies.{PeriodicWatermarkAssigner, PreserveWatermarks, PunctuatedWatermarkAssigner}
import org.apache.flink.table.types.utils.TypeConversions
import org.apache.flink.table.types.utils.TypeConversions.fromLegacyInfoToDataType
import org.apache.flink.table.typeutils.TimeIndicatorTypeInfo
import org.apache.flink.table.utils.TypeMappingUtils
import org.apache.calcite.plan._
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.metadata.RelMetadataQuery
import java.util.function.{Function => JFunction}
import scala.collection.JavaConverters._
/** Flink RelNode to read data from an external source defined by a [[StreamTableSource]]. */
class StreamTableSourceScan(
cluster: RelOptCluster,
traitSet: RelTraitSet,
table: RelOptTable,
tableSchema: TableSchema,
tableSource: StreamTableSource[_],
selectedFields: Option[Array[Int]])
extends PhysicalTableSourceScan(
cluster,
traitSet,
table,
tableSchema,
tableSource,
selectedFields)
with StreamScan {
override def deriveRowType(): RelDataType = {
val rowType = table.getRowType
selectedFields.map(idxs => {
val fields = rowType.getFieldList
val builder = cluster.getTypeFactory.builder()
idxs.map(fields.get).foreach(builder.add)
builder.build()
}).getOrElse(rowType)
}
override def computeSelfCost (planner: RelOptPlanner, metadata: RelMetadataQuery): RelOptCost = {
val rowCnt = metadata.getRowCount(this)
planner.getCostFactory.makeCost(rowCnt, rowCnt, rowCnt * estimateRowSize(getRowType))
}
override def copy(traitSet: RelTraitSet, inputs: java.util.List[RelNode]): RelNode = {
new StreamTableSourceScan(
cluster,
traitSet,
getTable,
tableSchema,
tableSource,
selectedFields
)
}
override def copy(
traitSet: RelTraitSet,
newTableSource: TableSource[_]): PhysicalTableSourceScan = {
new StreamTableSourceScan(
cluster,
traitSet,
getTable,
tableSchema,
newTableSource.asInstanceOf[StreamTableSource[_]],
selectedFields
)
}
override def translateToPlan(planner: StreamPlanner): DataStream[CRow] = {
val config = planner.getConfig
val inputDataStream = tableSource.getDataStream(planner.getExecutionEnvironment)
.asInstanceOf[DataStream[Any]]
val outputSchema = new RowSchema(this.getRowType)
// Fix the nullability of row type info.
val inputDataType = fromLegacyInfoToDataType(inputDataStream.getType).notNull()
val producedDataType = tableSource.getProducedDataType
// check that declared and actual type of table source DataStream are identical
if (inputDataType != producedDataType) {
throw new TableException(s"TableSource of type ${tableSource.getClass.getName} " +
s"returned a DataStream of data type $inputDataType that does not match with the " +
s"data type $producedDataType declared by the TableSource.getProducedDataType() method. " +
s"Please validate the implementation of the TableSource.")
}
val nameMapping: JFunction[String, String] = tableSource match {
case mapping: DefinedFieldMapping if mapping.getFieldMapping != null =>
new JFunction[String, String] {
override def apply(t: String): String = mapping.getFieldMapping.get(t)
}
case _ => JFunction.identity()
}
// get expression to extract rowtime attribute
val rowtimeExpression = TableSourceUtil.getRowtimeAttributeDescriptor(
tableSource,
selectedFields)
.map(desc => TableSourceUtil.getRowtimeExtractionExpression(
desc.getTimestampExtractor,
producedDataType,
TypeConversions.fromLegacyInfoToDataType(TimeIndicatorTypeInfo.ROWTIME_INDICATOR),
planner.getRelBuilder,
nameMapping
))
val fieldIndexes = TypeMappingUtils.computePhysicalIndicesOrTimeAttributeMarkers(
tableSource,
selectedFields.map(_.map(tableSchema.getTableColumn(_).get()).toList.asJava)
.getOrElse(tableSchema.getTableColumns),
true,
nameMapping
)
// ingest table and convert and extract time attributes if necessary
val ingestedTable = convertToInternalRow(
outputSchema,
inputDataStream,
fieldIndexes,
config,
rowtimeExpression)
// generate watermarks for rowtime indicator
val rowtimeDesc: Option[RowtimeAttributeDescriptor] =
TableSourceUtil.getRowtimeAttributeDescriptor(tableSource, selectedFields)
val withWatermarks = if (rowtimeDesc.isDefined) {
val rowtimeFieldIdx = outputSchema.fieldNames.indexOf(rowtimeDesc.get.getAttributeName)
val watermarkStrategy = rowtimeDesc.get.getWatermarkStrategy
watermarkStrategy match {
case p: PeriodicWatermarkAssigner =>
val watermarkGenerator = new PeriodicWatermarkAssignerWrapper(rowtimeFieldIdx, p)
ingestedTable.assignTimestampsAndWatermarks(watermarkGenerator)
case p: PunctuatedWatermarkAssigner =>
val watermarkGenerator = new PunctuatedWatermarkAssignerWrapper(rowtimeFieldIdx, p)
ingestedTable.assignTimestampsAndWatermarks(watermarkGenerator)
case _: PreserveWatermarks =>
// The watermarks have already been provided by the underlying DataStream.
ingestedTable
}
} else {
// No need to generate watermarks if no rowtime attribute is specified.
ingestedTable
}
withWatermarks
}
}
/**
* Generates periodic watermarks based on a [[PeriodicWatermarkAssigner]].
*
* @param timeFieldIdx the index of the rowtime attribute.
* @param assigner the watermark assigner.
*/
private class PeriodicWatermarkAssignerWrapper(
timeFieldIdx: Int,
assigner: PeriodicWatermarkAssigner)
extends AssignerWithPeriodicWatermarks[CRow] {
override def getCurrentWatermark: Watermark = assigner.getWatermark
override def extractTimestamp(crow: CRow, previousElementTimestamp: Long): Long = {
val timestamp: Long = crow.row.getField(timeFieldIdx).asInstanceOf[Long]
assigner.nextTimestamp(timestamp)
0L
}
}
/**
* Generates periodic watermarks based on a [[PunctuatedWatermarkAssigner]].
*
* @param timeFieldIdx the index of the rowtime attribute.
* @param assigner the watermark assigner.
*/
private class PunctuatedWatermarkAssignerWrapper(
timeFieldIdx: Int,
assigner: PunctuatedWatermarkAssigner)
extends AssignerWithPunctuatedWatermarks[CRow] {
override def checkAndGetNextWatermark(crow: CRow, ts: Long): Watermark = {
val timestamp: Long = crow.row.getField(timeFieldIdx).asInstanceOf[Long]
assigner.getWatermark(crow.row, timestamp)
}
override def extractTimestamp(element: CRow, previousElementTimestamp: Long): Long = {
0L
}
}
|
rmetzger/flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/nodes/datastream/StreamTableSourceScan.scala
|
Scala
|
apache-2.0
| 8,323
|
import org.eclipse.jetty.server.Server
import org.eclipse.jetty.webapp.WebAppContext
import org.scalatra.servlet.ScalatraListener
import pl.edu.agh.mplt.web.MPLTServlet
object JettyLauncher {
def main(args: Array[String]) {
val port = if(System.getenv("PORT") != null) System.getenv("PORT").toInt else 8080
val server = new Server(port)
val context = new WebAppContext()
context.setContextPath("/")
context.setResourceBase("src/main/webapp")
context.setEventListeners(Array(new ScalatraListener))
context.addServlet(classOf[MPLTServlet], "/*")
server.setHandler(context)
server.start()
server.join()
}
}
|
marek1840/MPLT
|
src/main/scala/JettyLauncher.scala
|
Scala
|
mit
| 655
|
package com.marcosgarciacasado.ssbatch
/**
* Value aggregation class of sum and count operations.
*
* @author Marcos García Casado
*
*/
class AggregatedTempValue (sumc : Double, countc : Double) extends Equals{
override def toString() = ""+(sum/count)
/**
* Sum value.
*/
var sum : Double = sumc
/**
* Number of values.
*/
var count : Double = countc
def canEqual(other: Any) = {
other.isInstanceOf[com.marcosgarciacasado.ssbatch.AggregatedTempValue]
}
/**
* Gets the mean of the value stored.
*
* @return the mean value.
*/
def getMean() = {
sum / count
}
override def equals(other: Any) = {
other match {
case that: com.marcosgarciacasado.ssbatch.AggregatedTempValue =>
that.canEqual(AggregatedTempValue.this) &&
that.sum == this.sum &&
that.count == this.count
case _ => false
}
}
override def hashCode() = {
(""+sum+","+count).hashCode()
}
}
|
marcos-garcia/smartsantanderdataanalysis
|
ssbatch/src/main/scala/com/marcosgarciacasado/ssbatch/AggregatedTempValue.scala
|
Scala
|
gpl-2.0
| 1,005
|
package com.peterpotts.snake
import com.peterpotts.snake.predicate.{And, Predicate}
case class Query[T](
predicate: Option[Predicate[T]],
ordering: Option[Ordering[T]],
skip: Option[Int],
limit: Option[Int]) extends Queryable[Query, T] {
override def filter(predicate: Predicate[T]) = copy(
predicate = Some(this.predicate.map(left => And(Seq(left, predicate))).getOrElse(predicate)))
override def sorted(implicit ordering: Ordering[T]) = copy(ordering = Some(ordering))
override def drop(skip: Int): Query[T] = copy(
skip = Some(this.skip.map(_ + skip).getOrElse(skip)),
limit = limit.map(_ - skip))
override def take(limit: Int): Query[T] = copy(limit = Some(this.limit.map(math.min(_, limit)).getOrElse(limit)))
def :+(that: Query[T]): Query[T] = {
val filtered = that.predicate.map(filter).getOrElse(this)
val sorted = that.ordering.map(filtered.sorted(_)).getOrElse(filtered)
val dropped = that.skip.map(sorted.drop).getOrElse(sorted)
that.limit.map(dropped.take).getOrElse(dropped)
}
}
object Query {
def empty[T] = Query[T](None, None, None, None)
}
|
peterpotts/snake
|
src/main/scala/com/peterpotts/snake/Query.scala
|
Scala
|
mit
| 1,114
|
package model
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Message class - rps
*
* Author:
* Ryan Needham
*
* Issues:
*
* Notes:
*
* A message consists of the ID of the sender, a move and an
* MVar result to set by the referee and then be collected
* by the player post-game.
*
* Everything in this class is public as it needs to be accessed
* by referees, players and potentially the controller
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
final class Message (id: Int, hand: Shape) {
val sender = id
val move = hand
val result = new MVar[Int]
/**
* beats
*
* @param Message
*
* Returns true if the opponent move beats the
* sending players move. This is achieved by
* defering to the Shape class's function of
* the same name.
*
*/
def beats (opponent: Message): Boolean = {
this.move beats opponent.move
}
}
|
MyForteIsTimeTravel/RockPaperScissors
|
src/model/Message.scala
|
Scala
|
gpl-3.0
| 1,052
|
/**
* MIT License
*
* Copyright (c) 2016-2018 James Sherwood-Jones <james.sherwoodjones@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.jsherz.luskydive.dao
import java.util.UUID
import com.jsherz.luskydive.core.CommitteeMember
import com.jsherz.luskydive.json.StrippedCommitteeMember
import com.jsherz.luskydive.services.DatabaseService
import scala.concurrent.{ExecutionContext, Future}
/**
* Used to store and retrieve information about committee members.
*/
trait CommitteeMemberDao {
/**
* Find the committee record for a member.
*/
def forMember(uuid: UUID): Future[Option[CommitteeMember]]
/**
* Get active committee members, sorted by name.
*
* @return
*/
def active(): Future[Seq[StrippedCommitteeMember]]
/**
* Get a committee member with the given UUID.
*
* @param uuid
* @return
*/
def get(uuid: UUID): Future[Option[CommitteeMember]]
}
/**
* Database backed committee member DAO.
*/
class CommitteeMemberDaoImpl(protected override val databaseService: DatabaseService)(implicit ec: ExecutionContext)
extends Tables(databaseService) with CommitteeMemberDao {
import driver.api._
/**
* Get active committee members, sorted by name.
*
* @return
*/
override def active(): Future[Seq[StrippedCommitteeMember]] = {
val lookup = for {
committee <- CommitteeMembers.filter(_.locked === false).sortBy(_.name)
} yield (committee.memberUuid, committee.name)
db.run(lookup.result).map(_.map(StrippedCommitteeMember.tupled(_)))
}
/**
* Get a committee member with the given UUID.
*
* @param uuid
* @return
*/
override def get(uuid: UUID): Future[Option[CommitteeMember]] = {
db.run(CommitteeMembers.filter(_.memberUuid === uuid).result.headOption)
}
/**
* Find the committee record for a member.
*
* @param maybeUuid Member's UUID
* @return
*/
override def forMember(uuid: UUID): Future[Option[CommitteeMember]] = {
db.run(CommitteeMembers.filter(_.memberUuid === uuid).result.headOption)
}
}
|
jSherz/lsd-members
|
backend/src/main/scala/com/jsherz/luskydive/dao/CommitteeMemberDao.scala
|
Scala
|
mit
| 3,156
|
package org.apache.mesos.chronos.utils
import org.apache.mesos.chronos.scheduler.config.SchedulerConfiguration
import org.apache.mesos.chronos.scheduler.jobs.{BaseJob, DependencyBasedJob, DockerContainer, EnvironmentVariable, NetworkMode, ScheduleBasedJob, Volume, VolumeMode}
import com.fasterxml.jackson.core.JsonParser
import com.fasterxml.jackson.databind.node.ObjectNode
import com.fasterxml.jackson.databind.{DeserializationContext, JsonDeserializer, JsonNode}
import org.joda.time.Period
import scala.collection.JavaConversions._
object JobDeserializer {
var config: SchedulerConfiguration = _
}
/**
* Custom JSON deserializer for jobs.
* @author Florian Leibert (flo@leibert.de)
*/
class JobDeserializer extends JsonDeserializer[BaseJob] {
def deserialize(jsonParser: JsonParser, ctxt: DeserializationContext): BaseJob = {
val codec = jsonParser.getCodec
val node = codec.readTree[JsonNode](jsonParser)
val name = node.get("name").asText
val command = node.get("command").asText
val shell =
if (node.has("shell") && node.get("shell") != null) node.get("shell").asBoolean
else true
val epsilon = {
if (node.has("epsilon")) Period.parse(node.get("epsilon").asText) else Period.seconds(JobDeserializer.config.taskEpsilon())
}
val executor =
if (node.has("executor") && node.get("executor") != null) node.get("executor").asText
else ""
val executorFlags =
if (node.has("executorFlags") && node.get("executorFlags") != null) node.get("executorFlags").asText
else ""
val retries =
if (node.has("retries") && node.get("retries") != null) node.get("retries").asInt
else 2
val owner =
if (node.has("owner") && node.get("owner") != null) node.get("owner").asText
else ""
val async =
if (node.has("async") && node.get("async") != null) node.get("async").asBoolean
else false
val disabled =
if (node.has("disabled") && node.get("disabled") != null) node.get("disabled").asBoolean
else false
val softError =
if (node.has("softError") && node.get("softError") != null) node.get("softError").asBoolean
else false
val successCount =
if (node.has("successCount") && node.get("successCount") != null) node.get("successCount").asLong
else 0L
val errorCount =
if (node.has("errorCount") && node.get("errorCount") != null) node.get("errorCount").asLong
else 0L
val lastSuccess =
if (node.has("lastSuccess") && node.get("lastSuccess") != null) node.get("lastSuccess").asText
else ""
val lastError =
if (node.has("lastError") && node.get("lastError") != null) node.get("lastError").asText
else ""
val cpus =
if (node.has("cpus") && node.get("cpus") != null && node.get("cpus").asDouble != 0) node.get("cpus").asDouble
else if (JobDeserializer.config != null) JobDeserializer.config.mesosTaskCpu()
else 0
val disk =
if (node.has("disk") && node.get("disk") != null && node.get("disk").asDouble != 0) node.get("disk").asDouble
else if (JobDeserializer.config != null) JobDeserializer.config.mesosTaskDisk()
else 0
val mem =
if (node.has("mem") && node.get("mem") != null && node.get("mem").asDouble != 0) node.get("mem").asDouble
else if (JobDeserializer.config != null) JobDeserializer.config.mesosTaskMem()
else 0
val errorsSinceLastSuccess =
if (node.has("errorsSinceLastSuccess") && node.get("errorsSinceLastSuccess") != null)
node.get("errorsSinceLastSuccess").asLong
else 0L
var uris = scala.collection.mutable.ListBuffer[String]()
if (node.has("uris")) {
for (uri <- node.path("uris")) {
uris += uri.asText()
}
}
var arguments = scala.collection.mutable.ListBuffer[String]()
if (node.has("arguments")) {
for (argument <- node.path("arguments")) {
arguments += argument.asText()
}
}
var environmentVariables = scala.collection.mutable.ListBuffer[EnvironmentVariable]()
if (node.has("environmentVariables")) {
node.get("environmentVariables").elements().map {
case node: ObjectNode =>
EnvironmentVariable(node.get("name").asText(), node.get("value").asText)
}.foreach(environmentVariables.add)
}
val highPriority =
if (node.has("highPriority") && node.get("highPriority") != null) node.get("highPriority").asBoolean()
else false
val runAsUser =
if (node.has("runAsUser") && node.get("runAsUser") != null) node.get("runAsUser").asText
else JobDeserializer.config.user()
var container: DockerContainer = null
if (node.has("container")) {
val containerNode = node.get("container")
val networkMode =
if (containerNode.has("network") && containerNode.get("network") != null)
NetworkMode.withName(containerNode.get("network").asText)
else NetworkMode.HOST
// TODO: Add support for more containers when they're added.
val volumes = scala.collection.mutable.ListBuffer[Volume]()
if (containerNode.has("volumes")) {
containerNode.get("volumes").elements().map {
case node: ObjectNode =>
val hostPath =
if (node.has("hostPath")) Option(node.get("hostPath").asText)
else None
val mode =
if (node.has("mode")) Option(VolumeMode.withName(node.get("mode").asText.toUpperCase))
else None
Volume(hostPath, node.get("containerPath").asText, mode)
}.foreach(volumes.add)
}
container = DockerContainer(containerNode.get("image").asText, volumes, networkMode)
}
var parentList = scala.collection.mutable.ListBuffer[String]()
if (node.has("parents")) {
for (parent <- node.path("parents")) {
parentList += parent.asText
}
new DependencyBasedJob(parents = parentList.toSet,
name = name, command = command, epsilon = epsilon, successCount = successCount, errorCount = errorCount,
executor = executor, executorFlags = executorFlags, retries = retries, owner = owner, lastError = lastError,
lastSuccess = lastSuccess, async = async, cpus = cpus, disk = disk, mem = mem, disabled = disabled,
errorsSinceLastSuccess = errorsSinceLastSuccess, uris = uris, highPriority = highPriority,
runAsUser = runAsUser, container = container, environmentVariables = environmentVariables, shell = shell,
arguments = arguments, softError = softError)
} else if (node.has("schedule")) {
val scheduleTimeZone = if (node.has("scheduleTimeZone")) node.get("scheduleTimeZone").asText else ""
new ScheduleBasedJob(node.get("schedule").asText, name = name, command = command,
epsilon = epsilon, successCount = successCount, errorCount = errorCount, executor = executor,
executorFlags = executorFlags, retries = retries, owner = owner, lastError = lastError,
lastSuccess = lastSuccess, async = async, cpus = cpus, disk = disk, mem = mem, disabled = disabled,
errorsSinceLastSuccess = errorsSinceLastSuccess, uris = uris, highPriority = highPriority,
runAsUser = runAsUser, container = container, scheduleTimeZone = scheduleTimeZone,
environmentVariables = environmentVariables, shell = shell, arguments = arguments, softError = softError)
} else {
/* schedule now */
new ScheduleBasedJob("R1//PT24H", name = name, command = command, epsilon = epsilon, successCount = successCount,
errorCount = errorCount, executor = executor, executorFlags = executorFlags, retries = retries, owner = owner,
lastError = lastError, lastSuccess = lastSuccess, async = async, cpus = cpus, disk = disk, mem = mem,
disabled = disabled, errorsSinceLastSuccess = errorsSinceLastSuccess, uris = uris, highPriority = highPriority,
runAsUser = runAsUser, container = container, environmentVariables = environmentVariables, shell = shell,
arguments = arguments, softError = softError)
}
}
}
|
nvoron23/chronos
|
src/main/scala/org/apache/mesos/chronos/utils/JobDeserializer.scala
|
Scala
|
apache-2.0
| 8,096
|
package com.criteo.vizatra.vizsql.js.json
import com.criteo.vizatra.vizsql.{Column, INTEGER, Schema, Table}
import org.scalatest.{FlatSpec, Matchers}
class SchemaReaderSpec extends FlatSpec with Matchers {
"apply()" should "return a schema" in {
val res = SchemaReader(
"""
|{
| "name":"schema1",
| "tables": [
| {
| "name":"table1",
| "columns": [
| {"name": "col1", "type": "int4"}
| ]
| }
| ]
|}
""".stripMargin)
res shouldEqual Schema(
"schema1",
List(
Table("table1", List(Column("col1", INTEGER())))
)
)
}
}
|
criteo/vizsql
|
js/src/test/scala/com/criteo/vizatra/vizsql/js/json/SchemaReaderSpec.scala
|
Scala
|
apache-2.0
| 680
|
package com.twitter.finagle.buoyant
import com.twitter.conversions.time._
import com.twitter.finagle._
import com.twitter.finagle.factory.{NameTreeFactory, ServiceFactoryCache}
import com.twitter.finagle.naming._
import com.twitter.finagle.stats.{DefaultStatsReceiver, StatsReceiver}
import com.twitter.finagle.util.DefaultTimer
import com.twitter.logging.Logger
import com.twitter.util._
import java.util.concurrent.atomic.AtomicReference
trait DstBindingFactory[-Req, +Rsp] extends Closable {
final def apply(dst: Dst.Path): Future[Service[Req, Rsp]] =
apply(dst, ClientConnection.nil)
def apply(dst: Dst.Path, conn: ClientConnection): Future[Service[Req, Rsp]]
def status: Status
}
object DstBindingFactory {
private[buoyant] class RefCount {
// If non-None, refcount >= 0, indicating the number of active
// references. When None, the reference count may not change.
private[this] var refcount: Option[Long] = Some(0)
private[this] def update(f: Long => Long): Option[Long] = synchronized {
refcount = refcount.map(f).filter(_ > 0)
refcount
}
def get: Option[Long] = synchronized(refcount)
def incr(): Option[Long] = update(_ + 1)
def decr(): Option[Long] = update(_ - 1)
}
/**
* Ensures that a DstBindignFactory is only closed when all users of
* the factory have closed it.
*
* Note that acquire() / close() are only expected to be called in
* the context of process configuration and not, for example, in the
* request serving path.
*/
class RefCounted[Req, Rsp](underlying: DstBindingFactory[Req, Rsp]) {
private[this] val refCount = new RefCount
def references: Long = refCount.get.getOrElse(0)
private[this] val release = new Closable {
def close(deadline: Time) = refCount.decr() match {
case None =>
underlying.close(deadline)
case Some(c) if (c <= 0) =>
Future.exception(new IllegalStateException(s"Closing factory with $c references"))
case _ =>
Future.Unit
}
}
def acquire(): DstBindingFactory[Req, Rsp] = refCount.incr() match {
case None =>
throw new IllegalStateException("Acquiring factory after it was closed")
case Some(c) if (c <= 0) =>
throw new IllegalStateException(s"Acquiring factory with $c references")
case _ =>
// Ensure that we can only decrement once for each acquisition
// by proxying close() on the underlying RpcClientFactory.
val closable = Closable.ref(new AtomicReference(release))
new DstBindingFactory[Req, Rsp] {
def apply(dst: Dst.Path, conn: ClientConnection) = underlying(dst, conn)
def status = underlying.status
def close(deadline: Time) = closable.close(deadline)
}
}
}
def refcount[Req, Rsp](underlying: DstBindingFactory[Req, Rsp]): RefCounted[Req, Rsp] =
new RefCounted(underlying)
/**
* A convenience type for a function that modifies (e.g. filters) a
* ServiceFactory using a T-typed value.
*/
type Mk[T, Req, Rsp] = (T, ServiceFactory[Req, Rsp]) => ServiceFactory[Req, Rsp]
object Mk {
def identity[T, Req, Rsp]: Mk[T, Req, Rsp] =
(_: T, f: ServiceFactory[Req, Rsp]) => f
}
case class Namer(interpreter: NameInterpreter) {
/** For Java compatibility */
def mk(): (Namer, Stack.Param[Namer]) = (this, Namer)
}
implicit object Namer extends Stack.Param[Namer] {
val default = Namer(DefaultInterpreter)
}
/** The capacities for each layer of dst caching. */
case class Capacity(paths: Int, trees: Int, bounds: Int, clients: Int) {
/** For Java compatibility */
def mk(): (Capacity, Stack.Param[Capacity]) = (this, Capacity)
}
implicit object Capacity extends Stack.Param[Capacity] {
val default = Capacity(1000, 1000, 1000, 1000)
}
case class BindingTimeout(timeout: Duration)
implicit object BindingTimeout extends Stack.Param[BindingTimeout] {
val default = BindingTimeout(Duration.Top)
}
case class IdleTtl(timeout: Duration)
implicit object IdleTtl extends Stack.Param[IdleTtl] {
val default = IdleTtl(10.minutes)
}
/**
* Binds a Dst to a ServiceFactory.
*
* Here, we're basically replicating the logic from Finagle's
* BindingFactory. This is done so we bind a destination before
* creating a client so that multiple requests to a single bound
* destination may share connection pools etc.
*
* The logic has been changed to account for the way residuals play
* into naming. We use the helper classes Bound and BoundTree
* instead of Name.Bound and NameTree[Name.Bound] so that we can
* control when residual paths factor into caching.
*/
class Cached[-Req, +Rsp](
mkClient: Name.Bound => ServiceFactory[Req, Rsp],
pathMk: Mk[Dst.Path, Req, Rsp] = Mk.identity[Dst.Path, Req, Rsp],
boundMk: Mk[Dst.Bound, Req, Rsp] = Mk.identity[Dst.Bound, Req, Rsp],
namer: NameInterpreter = DefaultInterpreter,
statsReceiver: StatsReceiver = DefaultStatsReceiver,
capacity: Capacity = Capacity.default,
bindingTimeout: BindingTimeout = BindingTimeout.default,
idleTtl: IdleTtl = IdleTtl.default
)(implicit timer: Timer = DefaultTimer.twitter) extends DstBindingFactory[Req, Rsp] {
private[this]type Cache[Key] = ServiceFactoryCache[Key, Req, Rsp]
def apply(dst: Dst.Path, conn: ClientConnection): Future[Service[Req, Rsp]] = {
val exc = new RequestTimeoutException(bindingTimeout.timeout, s"binding ${dst.path.show}")
pathCache(dst, conn).raiseWithin(bindingTimeout.timeout, exc)
}
// The path cache is keyed on the resolution context and
// logical rpc name. It resolves the name with the Dtab and
// dispatches connections through the tree cache.
private[this] val pathCache: Cache[Dst.Path] = {
def mk(dst: Dst.Path): ServiceFactory[Req, Rsp] = {
// dtabs aren't available when NoBrokers is thrown so we add them here
// as well as add a binding timeout
val dyn = new ServiceFactoryProxy(new DynBoundFactory(dst.bind(namer), treeCache)) {
override def apply(conn: ClientConnection) = {
val exc = new RequestTimeoutException(bindingTimeout.timeout, s"dyn binding ${dst.path.show}")
self(conn).rescue(handleNoBrokers).raiseWithin(bindingTimeout.timeout, exc)
}
private val handleNoBrokers: PartialFunction[Throwable, Future[Service[Req, Rsp]]] = {
case e: NoBrokersAvailableException => nbae
}
private lazy val nbae = Future.exception(new NoBrokersAvailableException(
dst.path.show,
dst.baseDtab,
dst.localDtab
))
}
pathMk(dst, dyn)
}
new ServiceFactoryCache(mk, timer, statsReceiver.scope("path"), capacity.paths, idleTtl.timeout)
}
// The tree cache is effectively keyed on a NameTree of Bound names
// with their residual paths.
private[this] val treeCache: Cache[Dst.BoundTree] = {
def mk(tree: Dst.BoundTree): ServiceFactory[Req, Rsp] =
NameTreeFactory(tree.path, tree.nameTree, boundCache)
new ServiceFactoryCache(mk, timer, statsReceiver.scope("tree"), capacity.trees, idleTtl.timeout)
}
// The bound cache is effectively keyed on the underlying service id
// and the residual path. It rewrites downstream URIs as requests
// are dispatched to the underlying client.
private[this] val boundCache: Cache[Dst.Bound] = {
def mk(bound: Dst.Bound): ServiceFactory[Req, Rsp] = {
val client = new ServiceFactory[Req, Rsp] {
// The client cache doesn't take the residual Path into
// account, so we strip it here to reduce confusion.
val name = Name.Bound(bound.addr, bound.id, Path.empty)
def apply(conn: ClientConnection) = clientCache.apply(name, conn)
def close(deadline: Time) = Future.Done
override def status = clientCache.status(name)
}
boundMk(bound, client)
}
new ServiceFactoryCache(mk, timer, statsReceiver.scope("bound"), capacity.bounds, idleTtl.timeout)
}
// The bottom cache is effectively keyed on the bound destination id
// (i.e. concrete service name).
private[this] val clientCache: Cache[Name.Bound] =
new ServiceFactoryCache(mkClient, timer, statsReceiver.scope("client"), capacity.clients, idleTtl.timeout)
private[this] val caches: Seq[Cache[_]] =
Seq(pathCache, treeCache, boundCache, clientCache)
def close(deadline: Time) =
Closable.sequence(caches: _*).close(deadline)
def status = Status.worstOf[Cache[_]](caches, _.status)
}
}
|
pawelprazak/linkerd
|
router/core/src/main/scala/com/twitter/finagle/buoyant/DstBindingFactory.scala
|
Scala
|
apache-2.0
| 8,719
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import org.apache.spark.SparkFunSuite
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.{DataFrame, Row}
object StopWordsRemoverSuite extends SparkFunSuite {
def testStopWordsRemover(t: StopWordsRemover, dataset: DataFrame): Unit = {
t.transform(dataset) //transform()方法将DataFrame转化为另外一个DataFrame的算法
.select("filtered", "expected")
.collect()
.foreach { case Row(tokens, wantedTokens) =>
assert(tokens === wantedTokens)
}
}
}
/**
* StopWordsRemover的输入为一系列字符串(如分词器输出),输出中删除了所有停用词
* 停用词为在文档中频繁出现,但未承载太多意义的词语,他们不应该被包含在算法输入中。
*/
class StopWordsRemoverSuite extends SparkFunSuite with MLlibTestSparkContext {
import StopWordsRemoverSuite._
test("StopWordsRemover default") {//默认删除停用词
val remover = new StopWordsRemover()
.setInputCol("raw")//输入
.setOutputCol("filtered")//输出
val dataSet = sqlContext.createDataFrame(Seq(
(Seq("test", "test"), Seq("test", "test")),
(Seq("a", "b", "c", "d"), Seq("b", "c", "d")),
(Seq("a", "the", "an"), Seq()),
(Seq("A", "The", "AN"), Seq()),
(Seq(null), Seq(null)),
(Seq(), Seq())
)).toDF("raw", "expected")//期望值
/**
+------------+------------+
| raw| expected|
+------------+------------+
|[test, test]|[test, test]|
|[a, b, c, d]| [b, c, d]|
|[a, the, an]| []|
|[A, The, AN]| []|
| [null]| [null]|
| []| []|
+------------+------------+*/
dataSet.show()
testStopWordsRemover(remover, dataSet)
}
test("StopWordsRemover case sensitive") {//删除停用词区分大小写
val remover = new StopWordsRemover()
.setInputCol("raw")
.setOutputCol("filtered")
//布尔参数caseSensitive指明是否区分大小写(默认为否)
.setCaseSensitive(true)
val dataSet = sqlContext.createDataFrame(Seq(
(Seq("A"), Seq("A")),
(Seq("The", "the"), Seq("The"))
)).toDF("raw", "expected")
/**
+----------+--------+
| raw|expected|
+----------+--------+
| [A]| [A]|
|[The, the]| [The]|
+----------+--------+
*/
dataSet.show()
testStopWordsRemover(remover, dataSet)
}
test("StopWordsRemover with additional words") {//删除停用词 附加词
val stopWords = StopWords.EnglishStopWords ++ Array("python", "scala")
val remover = new StopWordsRemover()
.setInputCol("raw")
.setOutputCol("filtered")
.setStopWords(stopWords)//设置信停用词列表
val dataSet = sqlContext.createDataFrame(Seq(
(Seq("python", "scala", "a"), Seq()),
(Seq("Python", "Scala", "swift"), Seq("swift"))
)).toDF("raw", "expected")
/**
+--------------------+--------+
| raw|expected|
+--------------------+--------+
| [python, scala, a]| []|
|[Python, Scala, s...| [swift]|
+--------------------+--------+*/
dataSet.show()
testStopWordsRemover(remover, dataSet)
}
}
|
tophua/spark1.52
|
mllib/src/test/scala/org/apache/spark/ml/feature/StopWordsRemoverSuite.scala
|
Scala
|
apache-2.0
| 4,111
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import kafka.utils._
import kafka.message._
import kafka.common._
import kafka.metrics.KafkaMetricsGroup
import kafka.server.{LogOffsetMetadata, FetchDataInfo, BrokerTopicStats}
import java.io.{IOException, File}
import java.util.concurrent.{ConcurrentNavigableMap, ConcurrentSkipListMap}
import java.util.concurrent.atomic._
import java.text.NumberFormat
import scala.collection.JavaConversions
import com.yammer.metrics.core.Gauge
object LogAppendInfo {
val UnknownLogAppendInfo = LogAppendInfo(-1, -1, NoCompressionCodec, NoCompressionCodec, -1, -1, false)
}
/**
* Struct to hold various quantities we compute about each message set before appending to the log
* @param firstOffset The first offset in the message set
* @param lastOffset The last offset in the message set
* @param shallowCount The number of shallow messages
* @param validBytes The number of valid bytes
* @param sourceCodec The source codec used in the message set (send by the producer)
* @param targetCodec The target codec of the message set(after applying the broker compression configuration if any)
* @param offsetsMonotonic Are the offsets in this message set monotonically increasing
*/
case class LogAppendInfo(var firstOffset: Long, var lastOffset: Long, sourceCodec: CompressionCodec, targetCodec: CompressionCodec, shallowCount: Int, validBytes: Int, offsetsMonotonic: Boolean)
/**
* An append-only log for storing messages.
*
* The log is a sequence of LogSegments, each with a base offset denoting the first message in the segment.
*
* New log segments are created according to a configurable policy that controls the size in bytes or time interval
* for a given segment.
*
* @param dir The directory in which log segments are created.
* @param config The log configuration settings
* @param recoveryPoint The offset at which to begin recovery--i.e. the first offset which has not been flushed to disk
* @param scheduler The thread pool scheduler used for background actions
* @param time The time instance used for checking the clock
*
*/
@threadsafe
class Log(val dir: File,
@volatile var config: LogConfig,
@volatile var recoveryPoint: Long = 0L,
scheduler: Scheduler,
time: Time = SystemTime) extends Logging with KafkaMetricsGroup {
import kafka.log.Log._
/* A lock that guards all modifications to the log */
private val lock = new Object
/* last time it was flushed */
private val lastflushedTime = new AtomicLong(time.milliseconds)
def initFileSize() : Int = {
if (config.preallocate)
config.segmentSize
else
0
}
/* the actual segments of the log */
private val segments: ConcurrentNavigableMap[java.lang.Long, LogSegment] = new ConcurrentSkipListMap[java.lang.Long, LogSegment]
loadSegments()
/* Calculate the offset of the next message */
@volatile var nextOffsetMetadata = new LogOffsetMetadata(activeSegment.nextOffset(), activeSegment.baseOffset, activeSegment.size.toInt)
val topicAndPartition: TopicAndPartition = Log.parseTopicPartitionName(dir)
info("Completed load of log %s with log end offset %d".format(name, logEndOffset))
val tags = Map("topic" -> topicAndPartition.topic, "partition" -> topicAndPartition.partition.toString)
newGauge("NumLogSegments",
new Gauge[Int] {
def value = numberOfSegments
},
tags)
newGauge("LogStartOffset",
new Gauge[Long] {
def value = logStartOffset
},
tags)
newGauge("LogEndOffset",
new Gauge[Long] {
def value = logEndOffset
},
tags)
newGauge("Size",
new Gauge[Long] {
def value = size
},
tags)
/** The name of this log */
def name = dir.getName()
/* Load the log segments from the log files on disk */
private def loadSegments() {
// create the log directory if it doesn't exist
dir.mkdirs()
var swapFiles = Set[File]()
// first do a pass through the files in the log directory and remove any temporary files
// and find any interrupted swap operations
for(file <- dir.listFiles if file.isFile) {
if(!file.canRead)
throw new IOException("Could not read file " + file)
val filename = file.getName
if(filename.endsWith(DeletedFileSuffix) || filename.endsWith(CleanedFileSuffix)) {
// if the file ends in .deleted or .cleaned, delete it
file.delete()
} else if(filename.endsWith(SwapFileSuffix)) {
// we crashed in the middle of a swap operation, to recover:
// if a log, delete the .index file, complete the swap operation later
// if an index just delete it, it will be rebuilt
val baseName = new File(CoreUtils.replaceSuffix(file.getPath, SwapFileSuffix, ""))
if(baseName.getPath.endsWith(IndexFileSuffix)) {
file.delete()
} else if(baseName.getPath.endsWith(LogFileSuffix)){
// delete the index
val index = new File(CoreUtils.replaceSuffix(baseName.getPath, LogFileSuffix, IndexFileSuffix))
index.delete()
swapFiles += file
}
}
}
// now do a second pass and load all the .log and .index files
for(file <- dir.listFiles if file.isFile) {
val filename = file.getName
if(filename.endsWith(IndexFileSuffix)) {
// if it is an index file, make sure it has a corresponding .log file
val logFile = new File(file.getAbsolutePath.replace(IndexFileSuffix, LogFileSuffix))
if(!logFile.exists) {
warn("Found an orphaned index file, %s, with no corresponding log file.".format(file.getAbsolutePath))
file.delete()
}
} else if(filename.endsWith(LogFileSuffix)) {
// if its a log file, load the corresponding log segment
val start = filename.substring(0, filename.length - LogFileSuffix.length).toLong
val indexFile = Log.indexFilename(dir, start)
val segment = new LogSegment(dir = dir,
startOffset = start,
indexIntervalBytes = config.indexInterval,
maxIndexSize = config.maxIndexSize,
rollJitterMs = config.randomSegmentJitter,
time = time,
fileAlreadyExists = true)
if(indexFile.exists()) {
try {
segment.index.sanityCheck()
} catch {
case e: java.lang.IllegalArgumentException =>
warn("Found a corrupted index file, %s, deleting and rebuilding index...".format(indexFile.getAbsolutePath))
segment.deleteIndex()
indexFile.delete()
segment.recover(config.maxMessageSize)
}
}
else {
error("Could not find index file corresponding to log file %s, rebuilding index...".format(segment.log.file.getAbsolutePath))
segment.recover(config.maxMessageSize)
}
segments.put(start, segment)
}
}
// Finally, complete any interrupted swap operations. To be crash-safe,
// log files that are replaced by the swap segment should be renamed to .deleted
// before the swap file is restored as the new segment file.
for (swapFile <- swapFiles) {
val logFile = new File(CoreUtils.replaceSuffix(swapFile.getPath, SwapFileSuffix, ""))
val fileName = logFile.getName
val startOffset = fileName.substring(0, fileName.length - LogFileSuffix.length).toLong
val indexFile = new File(CoreUtils.replaceSuffix(logFile.getPath, LogFileSuffix, IndexFileSuffix) + SwapFileSuffix)
val index = new OffsetIndex(file = indexFile, baseOffset = startOffset, maxIndexSize = config.maxIndexSize)
val swapSegment = new LogSegment(new FileMessageSet(file = swapFile),
index = index,
baseOffset = startOffset,
indexIntervalBytes = config.indexInterval,
rollJitterMs = config.randomSegmentJitter,
time = time)
info("Found log file %s from interrupted swap operation, repairing.".format(swapFile.getPath))
swapSegment.recover(config.maxMessageSize)
val oldSegments = logSegments(swapSegment.baseOffset, swapSegment.nextOffset)
replaceSegments(swapSegment, oldSegments.toSeq, isRecoveredSwapFile = true)
}
if(logSegments.size == 0) {
// no existing segments, create a new mutable segment beginning at offset 0
segments.put(0L, new LogSegment(dir = dir,
startOffset = 0,
indexIntervalBytes = config.indexInterval,
maxIndexSize = config.maxIndexSize,
rollJitterMs = config.randomSegmentJitter,
time = time,
fileAlreadyExists = false,
initFileSize = this.initFileSize(),
preallocate = config.preallocate))
} else {
recoverLog()
// reset the index size of the currently active log segment to allow more entries
activeSegment.index.resize(config.maxIndexSize)
}
}
private def updateLogEndOffset(messageOffset: Long) {
nextOffsetMetadata = new LogOffsetMetadata(messageOffset, activeSegment.baseOffset, activeSegment.size.toInt)
}
private def recoverLog() {
// if we have the clean shutdown marker, skip recovery
if(hasCleanShutdownFile) {
this.recoveryPoint = activeSegment.nextOffset
return
}
// okay we need to actually recovery this log
val unflushed = logSegments(this.recoveryPoint, Long.MaxValue).iterator
while(unflushed.hasNext) {
val curr = unflushed.next
info("Recovering unflushed segment %d in log %s.".format(curr.baseOffset, name))
val truncatedBytes =
try {
curr.recover(config.maxMessageSize)
} catch {
case e: InvalidOffsetException =>
val startOffset = curr.baseOffset
warn("Found invalid offset during recovery for log " + dir.getName +". Deleting the corrupt segment and " +
"creating an empty one with starting offset " + startOffset)
curr.truncateTo(startOffset)
}
if(truncatedBytes > 0) {
// we had an invalid message, delete all remaining log
warn("Corruption found in segment %d of log %s, truncating to offset %d.".format(curr.baseOffset, name, curr.nextOffset))
unflushed.foreach(deleteSegment)
}
}
}
/**
* Check if we have the "clean shutdown" file
*/
private def hasCleanShutdownFile() = new File(dir.getParentFile, CleanShutdownFile).exists()
/**
* The number of segments in the log.
* Take care! this is an O(n) operation.
*/
def numberOfSegments: Int = segments.size
/**
* Close this log
*/
def close() {
debug("Closing log " + name)
lock synchronized {
for(seg <- logSegments)
seg.close()
}
}
/**
* Append this message set to the active segment of the log, rolling over to a fresh segment if necessary.
*
* This method will generally be responsible for assigning offsets to the messages,
* however if the assignOffsets=false flag is passed we will only check that the existing offsets are valid.
*
* @param messages The message set to append
* @param assignOffsets Should the log assign offsets to this message set or blindly apply what it is given
*
* @throws KafkaStorageException If the append fails due to an I/O error.
*
* @return Information about the appended messages including the first and last offset.
*/
def append(messages: ByteBufferMessageSet, assignOffsets: Boolean = true): LogAppendInfo = {
val appendInfo = analyzeAndValidateMessageSet(messages)
// if we have any valid messages, append them to the log
if(appendInfo.shallowCount == 0)
return appendInfo
// trim any invalid bytes or partial messages before appending it to the on-disk log
var validMessages = trimInvalidBytes(messages, appendInfo)
try {
// they are valid, insert them in the log
lock synchronized {
appendInfo.firstOffset = nextOffsetMetadata.messageOffset
if(assignOffsets) {
// assign offsets to the message set
val offset = new AtomicLong(nextOffsetMetadata.messageOffset)
try {
validMessages = validMessages.validateMessagesAndAssignOffsets(offset, appendInfo.sourceCodec, appendInfo.targetCodec, config.compact)
} catch {
case e: IOException => throw new KafkaException("Error in validating messages while appending to log '%s'".format(name), e)
}
appendInfo.lastOffset = offset.get - 1
} else {
// we are taking the offsets we are given
if(!appendInfo.offsetsMonotonic || appendInfo.firstOffset < nextOffsetMetadata.messageOffset)
throw new IllegalArgumentException("Out of order offsets found in " + messages)
}
// re-validate message sizes since after re-compression some may exceed the limit
for(messageAndOffset <- validMessages.shallowIterator) {
if(MessageSet.entrySize(messageAndOffset.message) > config.maxMessageSize) {
// we record the original message set size instead of trimmed size
// to be consistent with pre-compression bytesRejectedRate recording
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).bytesRejectedRate.mark(messages.sizeInBytes)
BrokerTopicStats.getBrokerAllTopicsStats.bytesRejectedRate.mark(messages.sizeInBytes)
throw new MessageSizeTooLargeException("Message size is %d bytes which exceeds the maximum configured message size of %d."
.format(MessageSet.entrySize(messageAndOffset.message), config.maxMessageSize))
}
}
// check messages set size may be exceed config.segmentSize
if(validMessages.sizeInBytes > config.segmentSize) {
throw new MessageSetSizeTooLargeException("Message set size is %d bytes which exceeds the maximum configured segment size of %d."
.format(validMessages.sizeInBytes, config.segmentSize))
}
// maybe roll the log if this segment is full
val segment = maybeRoll(validMessages.sizeInBytes)
// now append to the log
segment.append(appendInfo.firstOffset, validMessages)
// increment the log end offset
updateLogEndOffset(appendInfo.lastOffset + 1)
trace("Appended message set to log %s with first offset: %d, next offset: %d, and messages: %s"
.format(this.name, appendInfo.firstOffset, nextOffsetMetadata.messageOffset, validMessages))
if(unflushedMessages >= config.flushInterval)
flush()
appendInfo
}
} catch {
case e: IOException => throw new KafkaStorageException("I/O exception in append to log '%s'".format(name), e)
}
}
/**
* Validate the following:
* <ol>
* <li> each message matches its CRC
* <li> each message size is valid
* </ol>
*
* Also compute the following quantities:
* <ol>
* <li> First offset in the message set
* <li> Last offset in the message set
* <li> Number of messages
* <li> Number of valid bytes
* <li> Whether the offsets are monotonically increasing
* <li> Whether any compression codec is used (if many are used, then the last one is given)
* </ol>
*/
private def analyzeAndValidateMessageSet(messages: ByteBufferMessageSet): LogAppendInfo = {
var shallowMessageCount = 0
var validBytesCount = 0
var firstOffset, lastOffset = -1L
var sourceCodec: CompressionCodec = NoCompressionCodec
var monotonic = true
for(messageAndOffset <- messages.shallowIterator) {
// update the first offset if on the first message
if(firstOffset < 0)
firstOffset = messageAndOffset.offset
// check that offsets are monotonically increasing
if(lastOffset >= messageAndOffset.offset)
monotonic = false
// update the last offset seen
lastOffset = messageAndOffset.offset
val m = messageAndOffset.message
// Check if the message sizes are valid.
val messageSize = MessageSet.entrySize(m)
if(messageSize > config.maxMessageSize) {
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).bytesRejectedRate.mark(messages.sizeInBytes)
BrokerTopicStats.getBrokerAllTopicsStats.bytesRejectedRate.mark(messages.sizeInBytes)
throw new MessageSizeTooLargeException("Message size is %d bytes which exceeds the maximum configured message size of %d."
.format(messageSize, config.maxMessageSize))
}
// check the validity of the message by checking CRC
m.ensureValid()
shallowMessageCount += 1
validBytesCount += messageSize
val messageCodec = m.compressionCodec
if(messageCodec != NoCompressionCodec)
sourceCodec = messageCodec
}
// Apply broker-side compression if any
val targetCodec = BrokerCompressionCodec.getTargetCompressionCodec(config.compressionType, sourceCodec)
LogAppendInfo(firstOffset, lastOffset, sourceCodec, targetCodec, shallowMessageCount, validBytesCount, monotonic)
}
/**
* Trim any invalid bytes from the end of this message set (if there are any)
* @param messages The message set to trim
* @param info The general information of the message set
* @return A trimmed message set. This may be the same as what was passed in or it may not.
*/
private def trimInvalidBytes(messages: ByteBufferMessageSet, info: LogAppendInfo): ByteBufferMessageSet = {
val messageSetValidBytes = info.validBytes
if(messageSetValidBytes < 0)
throw new InvalidMessageSizeException("Illegal length of message set " + messageSetValidBytes + " Message set cannot be appended to log. Possible causes are corrupted produce requests")
if(messageSetValidBytes == messages.sizeInBytes) {
messages
} else {
// trim invalid bytes
val validByteBuffer = messages.buffer.duplicate()
validByteBuffer.limit(messageSetValidBytes)
new ByteBufferMessageSet(validByteBuffer)
}
}
/**
* Read messages from the log
*
* @param startOffset The offset to begin reading at
* @param maxLength The maximum number of bytes to read
* @param maxOffset -The offset to read up to, exclusive. (i.e. the first offset NOT included in the resulting message set).
*
* @throws OffsetOutOfRangeException If startOffset is beyond the log end offset or before the base offset of the first segment.
* @return The fetch data information including fetch starting offset metadata and messages read
*/
def read(startOffset: Long, maxLength: Int, maxOffset: Option[Long] = None): FetchDataInfo = {
trace("Reading %d bytes from offset %d in log %s of length %d bytes".format(maxLength, startOffset, name, size))
// Because we don't use lock for reading, the synchronization is a little bit tricky.
// We create the local variables to avoid race conditions with updates to the log.
val currentNextOffsetMetadata = nextOffsetMetadata
val next = currentNextOffsetMetadata.messageOffset
if(startOffset == next)
return FetchDataInfo(currentNextOffsetMetadata, MessageSet.Empty)
var entry = segments.floorEntry(startOffset)
// attempt to read beyond the log end offset is an error
if(startOffset > next || entry == null)
throw new OffsetOutOfRangeException("Request for offset %d but we only have log segments in the range %d to %d.".format(startOffset, segments.firstKey, next))
// Do the read on the segment with a base offset less than the target offset
// but if that segment doesn't contain any messages with an offset greater than that
// continue to read from successive segments until we get some messages or we reach the end of the log
while(entry != null) {
// If the fetch occurs on the active segment, there might be a race condition where two fetch requests occur after
// the message is appended but before the nextOffsetMetadata is updated. In that case the second fetch may
// cause OffsetOutOfRangeException. To solve that, we cap the reading up to exposed position instead of the log
// end of the active segment.
val maxPosition = {
if (entry == segments.lastEntry) {
val exposedPos = nextOffsetMetadata.relativePositionInSegment.toLong
// Check the segment again in case a new segment has just rolled out.
if (entry != segments.lastEntry)
// New log segment has rolled out, we can read up to the file end.
entry.getValue.size
else
exposedPos
} else {
entry.getValue.size
}
}
val fetchInfo = entry.getValue.read(startOffset, maxOffset, maxLength, maxPosition)
if(fetchInfo == null) {
entry = segments.higherEntry(entry.getKey)
} else {
return fetchInfo
}
}
// okay we are beyond the end of the last segment with no data fetched although the start offset is in range,
// this can happen when all messages with offset larger than start offsets have been deleted.
// In this case, we will return the empty set with log end offset metadata
FetchDataInfo(nextOffsetMetadata, MessageSet.Empty)
}
/**
* Given a message offset, find its corresponding offset metadata in the log.
* If the message offset is out of range, return unknown offset metadata
*/
def convertToOffsetMetadata(offset: Long): LogOffsetMetadata = {
try {
val fetchDataInfo = read(offset, 1)
fetchDataInfo.fetchOffsetMetadata
} catch {
case e: OffsetOutOfRangeException => LogOffsetMetadata.UnknownOffsetMetadata
}
}
/**
* Delete any log segments matching the given predicate function,
* starting with the oldest segment and moving forward until a segment doesn't match.
* @param predicate A function that takes in a single log segment and returns true iff it is deletable
* @return The number of segments deleted
*/
def deleteOldSegments(predicate: LogSegment => Boolean): Int = {
// find any segments that match the user-supplied predicate UNLESS it is the final segment
// and it is empty (since we would just end up re-creating it
val lastSegment = activeSegment
val deletable = logSegments.takeWhile(s => predicate(s) && (s.baseOffset != lastSegment.baseOffset || s.size > 0))
val numToDelete = deletable.size
if(numToDelete > 0) {
lock synchronized {
// we must always have at least one segment, so if we are going to delete all the segments, create a new one first
if(segments.size == numToDelete)
roll()
// remove the segments for lookups
deletable.foreach(deleteSegment(_))
}
}
numToDelete
}
/**
* The size of the log in bytes
*/
def size: Long = logSegments.map(_.size).sum
/**
* The earliest message offset in the log
*/
def logStartOffset: Long = logSegments.head.baseOffset
/**
* The offset metadata of the next message that will be appended to the log
*/
def logEndOffsetMetadata: LogOffsetMetadata = nextOffsetMetadata
/**
* The offset of the next message that will be appended to the log
*/
def logEndOffset: Long = nextOffsetMetadata.messageOffset
/**
* Roll the log over to a new empty log segment if necessary.
*
* @param messagesSize The messages set size in bytes
* logSegment will be rolled if one of the following conditions met
* <ol>
* <li> The logSegment is full
* <li> The maxTime has elapsed
* <li> The index is full
* </ol>
* @return The currently active segment after (perhaps) rolling to a new segment
*/
private def maybeRoll(messagesSize: Int): LogSegment = {
val segment = activeSegment
if (segment.size > config.segmentSize - messagesSize ||
segment.size > 0 && time.milliseconds - segment.created > config.segmentMs - segment.rollJitterMs ||
segment.index.isFull) {
debug("Rolling new log segment in %s (log_size = %d/%d, index_size = %d/%d, age_ms = %d/%d)."
.format(name,
segment.size,
config.segmentSize,
segment.index.entries,
segment.index.maxEntries,
time.milliseconds - segment.created,
config.segmentMs - segment.rollJitterMs))
roll()
} else {
segment
}
}
/**
* Roll the log over to a new active segment starting with the current logEndOffset.
* This will trim the index to the exact size of the number of entries it currently contains.
* @return The newly rolled segment
*/
def roll(): LogSegment = {
val start = time.nanoseconds
lock synchronized {
val newOffset = logEndOffset
val logFile = logFilename(dir, newOffset)
val indexFile = indexFilename(dir, newOffset)
for(file <- List(logFile, indexFile); if file.exists) {
warn("Newly rolled segment file " + file.getName + " already exists; deleting it first")
file.delete()
}
segments.lastEntry() match {
case null =>
case entry => {
entry.getValue.index.trimToValidSize()
entry.getValue.log.trim()
}
}
val segment = new LogSegment(dir,
startOffset = newOffset,
indexIntervalBytes = config.indexInterval,
maxIndexSize = config.maxIndexSize,
rollJitterMs = config.randomSegmentJitter,
time = time,
fileAlreadyExists = false,
initFileSize = initFileSize,
preallocate = config.preallocate)
val prev = addSegment(segment)
if(prev != null)
throw new KafkaException("Trying to roll a new log segment for topic partition %s with start offset %d while it already exists.".format(name, newOffset))
// We need to update the segment base offset and append position data of the metadata when log rolls.
// The next offset should not change.
updateLogEndOffset(nextOffsetMetadata.messageOffset)
// schedule an asynchronous flush of the old segment
scheduler.schedule("flush-log", () => flush(newOffset), delay = 0L)
info("Rolled new log segment for '" + name + "' in %.0f ms.".format((System.nanoTime - start) / (1000.0*1000.0)))
segment
}
}
/**
* The number of messages appended to the log since the last flush
*/
def unflushedMessages() = this.logEndOffset - this.recoveryPoint
/**
* Flush all log segments
*/
def flush(): Unit = flush(this.logEndOffset)
/**
* Flush log segments for all offsets up to offset-1
* @param offset The offset to flush up to (non-inclusive); the new recovery point
*/
def flush(offset: Long) : Unit = {
if (offset <= this.recoveryPoint)
return
debug("Flushing log '" + name + " up to offset " + offset + ", last flushed: " + lastFlushTime + " current time: " +
time.milliseconds + " unflushed = " + unflushedMessages)
for(segment <- logSegments(this.recoveryPoint, offset))
segment.flush()
lock synchronized {
if(offset > this.recoveryPoint) {
this.recoveryPoint = offset
lastflushedTime.set(time.milliseconds)
}
}
}
/**
* Completely delete this log directory and all contents from the file system with no delay
*/
private[log] def delete() {
lock synchronized {
removeLogMetrics()
logSegments.foreach(_.delete())
segments.clear()
CoreUtils.rm(dir)
}
}
/**
* Truncate this log so that it ends with the greatest offset < targetOffset.
* @param targetOffset The offset to truncate to, an upper bound on all offsets in the log after truncation is complete.
*/
private[log] def truncateTo(targetOffset: Long) {
info("Truncating log %s to offset %d.".format(name, targetOffset))
if(targetOffset < 0)
throw new IllegalArgumentException("Cannot truncate to a negative offset (%d).".format(targetOffset))
if(targetOffset > logEndOffset) {
info("Truncating %s to %d has no effect as the largest offset in the log is %d.".format(name, targetOffset, logEndOffset-1))
return
}
lock synchronized {
if(segments.firstEntry.getValue.baseOffset > targetOffset) {
truncateFullyAndStartAt(targetOffset)
} else {
val deletable = logSegments.filter(segment => segment.baseOffset > targetOffset)
deletable.foreach(deleteSegment(_))
activeSegment.truncateTo(targetOffset)
updateLogEndOffset(targetOffset)
this.recoveryPoint = math.min(targetOffset, this.recoveryPoint)
}
}
}
/**
* Delete all data in the log and start at the new offset
* @param newOffset The new offset to start the log with
*/
private[log] def truncateFullyAndStartAt(newOffset: Long) {
debug("Truncate and start log '" + name + "' to " + newOffset)
lock synchronized {
val segmentsToDelete = logSegments.toList
segmentsToDelete.foreach(deleteSegment(_))
addSegment(new LogSegment(dir,
newOffset,
indexIntervalBytes = config.indexInterval,
maxIndexSize = config.maxIndexSize,
rollJitterMs = config.randomSegmentJitter,
time = time,
fileAlreadyExists = false,
initFileSize = initFileSize,
preallocate = config.preallocate))
updateLogEndOffset(newOffset)
this.recoveryPoint = math.min(newOffset, this.recoveryPoint)
}
}
/**
* The time this log is last known to have been fully flushed to disk
*/
def lastFlushTime(): Long = lastflushedTime.get
/**
* The active segment that is currently taking appends
*/
def activeSegment = segments.lastEntry.getValue
/**
* All the log segments in this log ordered from oldest to newest
*/
def logSegments: Iterable[LogSegment] = {
import JavaConversions._
segments.values
}
/**
* Get all segments beginning with the segment that includes "from" and ending with the segment
* that includes up to "to-1" or the end of the log (if to > logEndOffset)
*/
def logSegments(from: Long, to: Long): Iterable[LogSegment] = {
import JavaConversions._
lock synchronized {
val floor = segments.floorKey(from)
if(floor eq null)
segments.headMap(to).values
else
segments.subMap(floor, true, to, false).values
}
}
override def toString() = "Log(" + dir + ")"
/**
* This method performs an asynchronous log segment delete by doing the following:
* <ol>
* <li>It removes the segment from the segment map so that it will no longer be used for reads.
* <li>It renames the index and log files by appending .deleted to the respective file name
* <li>It schedules an asynchronous delete operation to occur in the future
* </ol>
* This allows reads to happen concurrently without synchronization and without the possibility of physically
* deleting a file while it is being read from.
*
* @param segment The log segment to schedule for deletion
*/
private def deleteSegment(segment: LogSegment) {
info("Scheduling log segment %d for log %s for deletion.".format(segment.baseOffset, name))
lock synchronized {
segments.remove(segment.baseOffset)
asyncDeleteSegment(segment)
}
}
/**
* Perform an asynchronous delete on the given file if it exists (otherwise do nothing)
* @throws KafkaStorageException if the file can't be renamed and still exists
*/
private def asyncDeleteSegment(segment: LogSegment) {
segment.changeFileSuffixes("", Log.DeletedFileSuffix)
def deleteSeg() {
info("Deleting segment %d from log %s.".format(segment.baseOffset, name))
segment.delete()
}
scheduler.schedule("delete-file", deleteSeg, delay = config.fileDeleteDelayMs)
}
/**
* Swap a new segment in place and delete one or more existing segments in a crash-safe manner. The old segments will
* be asynchronously deleted.
*
* The sequence of operations is:
* <ol>
* <li> Cleaner creates new segment with suffix .cleaned and invokes replaceSegments().
* If broker crashes at this point, the clean-and-swap operation is aborted and
* the .cleaned file is deleted on recovery in loadSegments().
* <li> New segment is renamed .swap. If the broker crashes after this point before the whole
* operation is completed, the swap operation is resumed on recovery as described in the next step.
* <li> Old segment files are renamed to .deleted and asynchronous delete is scheduled.
* If the broker crashes, any .deleted files left behind are deleted on recovery in loadSegments().
* replaceSegments() is then invoked to complete the swap with newSegment recreated from
* the .swap file and oldSegments containing segments which were not renamed before the crash.
* <li> Swap segment is renamed to replace the existing segment, completing this operation.
* If the broker crashes, any .deleted files which may be left behind are deleted
* on recovery in loadSegments().
* </ol>
*
* @param newSegment The new log segment to add to the log
* @param oldSegments The old log segments to delete from the log
* @param isRecoveredSwapFile true if the new segment was created from a swap file during recovery after a crash
*/
private[log] def replaceSegments(newSegment: LogSegment, oldSegments: Seq[LogSegment], isRecoveredSwapFile : Boolean = false) {
lock synchronized {
// need to do this in two phases to be crash safe AND do the delete asynchronously
// if we crash in the middle of this we complete the swap in loadSegments()
if (!isRecoveredSwapFile)
newSegment.changeFileSuffixes(Log.CleanedFileSuffix, Log.SwapFileSuffix)
addSegment(newSegment)
// delete the old files
for(seg <- oldSegments) {
// remove the index entry
if(seg.baseOffset != newSegment.baseOffset)
segments.remove(seg.baseOffset)
// delete segment
asyncDeleteSegment(seg)
}
// okay we are safe now, remove the swap suffix
newSegment.changeFileSuffixes(Log.SwapFileSuffix, "")
}
}
/**
* remove deleted log metrics
*/
private[log] def removeLogMetrics(): Unit = {
removeMetric("NumLogSegments", tags)
removeMetric("LogStartOffset", tags)
removeMetric("LogEndOffset", tags)
removeMetric("Size", tags)
}
/**
* Add the given segment to the segments in this log. If this segment replaces an existing segment, delete it.
* @param segment The segment to add
*/
def addSegment(segment: LogSegment) = this.segments.put(segment.baseOffset, segment)
}
/**
* Helper functions for logs
*/
object Log {
/** a log file */
val LogFileSuffix = ".log"
/** an index file */
val IndexFileSuffix = ".index"
/** a file that is scheduled to be deleted */
val DeletedFileSuffix = ".deleted"
/** A temporary file that is being used for log cleaning */
val CleanedFileSuffix = ".cleaned"
/** A temporary file used when swapping files into the log */
val SwapFileSuffix = ".swap"
/** Clean shutdown file that indicates the broker was cleanly shutdown in 0.8. This is required to maintain backwards compatibility
* with 0.8 and avoid unnecessary log recovery when upgrading from 0.8 to 0.8.1 */
/** TODO: Get rid of CleanShutdownFile in 0.8.2 */
val CleanShutdownFile = ".kafka_cleanshutdown"
/**
* Make log segment file name from offset bytes. All this does is pad out the offset number with zeros
* so that ls sorts the files numerically.
* @param offset The offset to use in the file name
* @return The filename
*/
def filenamePrefixFromOffset(offset: Long): String = {
val nf = NumberFormat.getInstance()
nf.setMinimumIntegerDigits(20)
nf.setMaximumFractionDigits(0)
nf.setGroupingUsed(false)
nf.format(offset)
}
/**
* Construct a log file name in the given dir with the given base offset
* @param dir The directory in which the log will reside
* @param offset The base offset of the log file
*/
def logFilename(dir: File, offset: Long) =
new File(dir, filenamePrefixFromOffset(offset) + LogFileSuffix)
/**
* Construct an index file name in the given dir using the given base offset
* @param dir The directory in which the log will reside
* @param offset The base offset of the log file
*/
def indexFilename(dir: File, offset: Long) =
new File(dir, filenamePrefixFromOffset(offset) + IndexFileSuffix)
/**
* Parse the topic and partition out of the directory name of a log
*/
def parseTopicPartitionName(dir: File): TopicAndPartition = {
val name: String = dir.getName
if (name == null || name.isEmpty || !name.contains('-')) {
throwException(dir)
}
val index = name.lastIndexOf('-')
val topic: String = name.substring(0, index)
val partition: String = name.substring(index + 1)
if (topic.length < 1 || partition.length < 1) {
throwException(dir)
}
TopicAndPartition(topic, partition.toInt)
}
def throwException(dir: File) {
throw new KafkaException("Found directory " + dir.getCanonicalPath + ", " +
"'" + dir.getName + "' is not in the form of topic-partition\\n" +
"If a directory does not contain Kafka topic data it should not exist in Kafka's log " +
"directory")
}
}
|
OpenPOWER-BigData/HDP-kafka
|
core/src/main/scala/kafka/log/Log.scala
|
Scala
|
apache-2.0
| 39,194
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.stream
import org.apache.flink.api.java.tuple.{Tuple5 => JTuple5}
import org.apache.flink.api.java.typeutils.TupleTypeInfo
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.datastream.DataStream
import org.apache.flink.streaming.api.environment.{StreamExecutionEnvironment => JStreamExecEnv}
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.java.internal.{StreamTableEnvironmentImpl => JStreamTableEnvironmentImpl}
import org.apache.flink.table.api.java.{StreamTableEnvironment => JStreamTableEnv}
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.{TableConfig, Types, ValidationException}
import org.apache.flink.table.catalog.{CatalogManager, FunctionCatalog, GenericInMemoryCatalog}
import org.apache.flink.table.executor.StreamExecutor
import org.apache.flink.table.planner.StreamPlanner
import org.apache.flink.table.runtime.utils.StreamTestData
import org.apache.flink.table.utils.{CatalogManagerMocks, TableTestBase}
import org.apache.flink.table.utils.TableTestUtil.{binaryNode, streamTableNode, term, unaryNode}
import org.apache.flink.types.Row
import org.junit.Test
import org.mockito.Mockito.{mock, when}
import java.lang.{Integer => JInt, Long => JLong}
import org.apache.flink.table.module.ModuleManager
class StreamTableEnvironmentTest extends TableTestBase {
@Test
def testSqlWithoutRegistering(): Unit = {
val util = streamTestUtil()
val table = util.addTable[(Long, Int, String)]("tableName", 'a, 'b, 'c)
val sqlTable = util.tableEnv.sqlQuery(s"SELECT a, b, c FROM $table WHERE b > 12")
val expected = unaryNode(
"DataStreamCalc",
streamTableNode(table),
term("select", "a, b, c"),
term("where", ">(b, 12)"))
util.verifyTable(sqlTable, expected)
val table2 = util.addTable[(Long, Int, String)]('d, 'e, 'f)
val sqlTable2 = util.tableEnv.sqlQuery(s"SELECT d, e, f FROM $table2 " +
s"UNION ALL SELECT a, b, c FROM $table")
val expected2 = binaryNode(
"DataStreamUnion",
streamTableNode(table2),
streamTableNode(table),
term("all", "true"),
term("union all", "d, e, f"))
util.verifyTable(sqlTable2, expected2)
}
@Test
def testToAppendSinkOnUpdatingTable(): Unit = {
expectedException.expect(classOf[ValidationException])
expectedException.expectMessage("Table is not an append-only table. Use the toRetractStream()" +
" in order to handle add and retract messages.")
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = StreamTableEnvironment.create(env)
val t = StreamTestData.get3TupleDataStream(env).toTable(tEnv, 'id, 'num, 'text)
t.groupBy('text)
.select('text, 'id.count, 'num.sum)
.toAppendStream[Row]
// must fail because table is not append-only
env.execute()
}
@Test
def testProctimeAttributeWithAtomicInput(): Unit = {
val util = streamTestUtil()
// cannot replace an attribute with proctime
util.addTable[String]('s, 'pt.proctime)
}
@Test
def testReplacingRowtimeAttributeWithAtomicInput(): Unit = {
val util = streamTestUtil()
util.addTable[Long]('rt.rowtime)
}
@Test
def testAppendedRowtimeAttributeWithAtomicInput(): Unit = {
val util = streamTestUtil()
util.addTable[String]('s, 'rt.rowtime)
}
@Test
def testRowtimeAndProctimeAttributeWithAtomicInput1(): Unit = {
val util = streamTestUtil()
util.addTable[String]('s, 'rt.rowtime, 'pt.proctime)
}
@Test
def testRowtimeAndProctimeAttributeWithAtomicInput2(): Unit = {
val util = streamTestUtil()
util.addTable[String]('s, 'pt.proctime, 'rt.rowtime)
}
@Test
def testRowtimeAndProctimeAttributeWithAtomicInput3(): Unit = {
val util = streamTestUtil()
util.addTable[Long]('rt.rowtime, 'pt.proctime)
}
@Test
def testProctimeAttribute(): Unit = {
val util = streamTestUtil()
// cannot replace an attribute with proctime
util.addTable[(Long, Int, String, Int, Long)]('a, 'b, 'c, 'd, 'e, 'pt.proctime)
}
@Test
def testReplacedRowtimeAttribute(): Unit = {
val util = streamTestUtil()
util.addTable[(Long, Int, String, Int, Long)]('rt.rowtime, 'b, 'c, 'd, 'e)
}
@Test
def testAppendedRowtimeAttribute(): Unit = {
val util = streamTestUtil()
util.addTable[(Long, Int, String, Int, Long)]('a, 'b, 'c, 'd, 'e, 'rt.rowtime)
}
@Test
def testRowtimeAndProctimeAttribute1(): Unit = {
val util = streamTestUtil()
util.addTable[(Long, Int, String, Int, Long)]('a, 'b, 'c, 'd, 'e, 'rt.rowtime, 'pt.proctime)
}
@Test
def testRowtimeAndProctimeAttribute2(): Unit = {
val util = streamTestUtil()
util.addTable[(Long, Int, String, Int, Long)]('a, 'b, 'c, 'd, 'e, 'pt.proctime, 'rt.rowtime)
}
@Test
def testRowtimeAndProctimeAttribute3(): Unit = {
val util = streamTestUtil()
util.addTable[(Long, Int, String, Int, Long)]('rt.rowtime, 'b, 'c, 'd, 'e, 'pt.proctime)
}
@Test
def testProctimeAttributeParsed(): Unit = {
val (jTEnv, ds) = prepareSchemaExpressionParser
jTEnv.fromDataStream(ds, "a, b, c, d, e, pt.proctime")
}
@Test
def testReplacingRowtimeAttributeParsed(): Unit = {
val (jTEnv, ds) = prepareSchemaExpressionParser
jTEnv.fromDataStream(ds, "a.rowtime, b, c, d, e")
}
@Test
def testAppedingRowtimeAttributeParsed(): Unit = {
val (jTEnv, ds) = prepareSchemaExpressionParser
jTEnv.fromDataStream(ds, "a, b, c, d, e, rt.rowtime")
}
@Test
def testRowtimeAndProctimeAttributeParsed1(): Unit = {
val (jTEnv, ds) = prepareSchemaExpressionParser
jTEnv.fromDataStream(ds, "a, b, c, d, e, pt.proctime, rt.rowtime")
}
@Test
def testRowtimeAndProctimeAttributeParsed2(): Unit = {
val (jTEnv, ds) = prepareSchemaExpressionParser
jTEnv.fromDataStream(ds, "rt.rowtime, b, c, d, e, pt.proctime")
}
private def prepareSchemaExpressionParser:
(JStreamTableEnv, DataStream[JTuple5[JLong, JInt, String, JInt, JLong]]) = {
val jStreamExecEnv = mock(classOf[JStreamExecEnv])
when(jStreamExecEnv.getStreamTimeCharacteristic).thenReturn(TimeCharacteristic.EventTime)
val config = new TableConfig
val catalogManager = CatalogManagerMocks.createEmptyCatalogManager()
val moduleManager: ModuleManager = new ModuleManager
val executor: StreamExecutor = new StreamExecutor(jStreamExecEnv)
val functionCatalog = new FunctionCatalog(config, catalogManager, moduleManager)
val streamPlanner = new StreamPlanner(executor, config, functionCatalog, catalogManager)
val jTEnv = new JStreamTableEnvironmentImpl(
catalogManager,
moduleManager,
functionCatalog,
config,
jStreamExecEnv,
streamPlanner,
executor,
true)
val sType = new TupleTypeInfo(Types.LONG, Types.INT, Types.STRING, Types.INT, Types.LONG)
.asInstanceOf[TupleTypeInfo[JTuple5[JLong, JInt, String, JInt, JLong]]]
val ds = mock(classOf[DataStream[JTuple5[JLong, JInt, String, JInt, JLong]]])
when(ds.getType).thenReturn(sType)
(jTEnv, ds)
}
}
|
gyfora/flink
|
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/stream/StreamTableEnvironmentTest.scala
|
Scala
|
apache-2.0
| 8,018
|
package org.bitcoins.testkit.util
import grizzled.slf4j.Logging
import org.bitcoins.core.util.NetworkUtil.portIsBound
import org.bitcoins.tor.TorParams
import java.net.{InetAddress, InetSocketAddress}
import scala.util.Properties
object TorUtil extends Logging {
val torEnabled: Boolean = Properties
.envOrNone("TOR")
.isDefined
def torProxyAddress =
new InetSocketAddress(InetAddress.getLoopbackAddress,
TorParams.DefaultProxyPort)
def torControlAddress =
new InetSocketAddress(InetAddress.getLoopbackAddress,
TorParams.DefaultControlPort)
def torProxyEnabled: Boolean = portIsBound(torProxyAddress)
def torControlEnabled: Boolean = portIsBound(torControlAddress)
def verifyTorEnabled(): Unit = {
assume(
torProxyEnabled,
s"Tor daemon is not running or listening port ${TorParams.DefaultProxyPort}")
assume(
torControlEnabled,
s"Tor daemon is not running or listening port ${TorParams.DefaultControlPort}")
}
}
|
bitcoin-s/bitcoin-s
|
testkit/src/main/scala/org/bitcoins/testkit/util/TorUtil.scala
|
Scala
|
mit
| 1,034
|
/*
* Copyright 2014 Frugal Mechanic (http://frugalmechanic.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fm.lazyseq
import fm.common.Resource
/**
* For LazySeq.mergeCorresponding
*/
final private class MergeCorrespondingLazySeq[L, R, K](leftReader: LazySeq[L], rightReader: LazySeq[R], toLeftKey: L => K, toRightKey: R => K)(implicit ord: Ordering[K]) extends LazySeq[LazySeq.EitherOrBoth[L, R]] {
private[this] val asserts: Boolean = true
import LazySeq.{EitherOrBoth, Left, Right, Both}
// This uses a thread for the rightReader to create the iterator
final def foreach[U](f: EitherOrBoth[L, R] => U) = Resource(rightReader.toIterator()).use { rightIt =>
var prevLeftKey: K = null.asInstanceOf[K]
var prevLeftKeyDefined: Boolean = false
var prevRightKey: K = null.asInstanceOf[K]
var prevRightKeyDefined: Boolean = false
leftReader.foreach { left: L =>
if (asserts) {
val leftKey: K = toLeftKey(left)
if (prevLeftKeyDefined) assert(ord.lt(prevLeftKey, leftKey), "Incorrect usage of MergeCorrespondingLazySeq. Inputs are not sorted/unique!")
else prevLeftKeyDefined = true
prevLeftKey = leftKey
}
// If nothing left on the right then we use the left value
if (!rightIt.hasNext) f(Left(left))
else {
val leftKey: K = toLeftKey(left)
// Empty out any right side keys that are less than the current left key
while (rightIt.hasNext && ord.lt(toRightKey(rightIt.head), leftKey)) {
val right: R = rightIt.next
if (asserts) {
val rightKey: K = toRightKey(right)
if (prevRightKeyDefined) assert(ord.lt(prevRightKey, rightKey), "Incorrect usage of MergeCorrespondingLazySeq. Inputs are not sorted/unique!")
else prevRightKeyDefined = true
prevRightKey = rightKey
}
f(Right(right))
}
// Either the keys match and we return a Both OR there are either no remaining right values
// or the right key is greater than the left key
if (rightIt.hasNext && ord.equiv(leftKey, toRightKey(rightIt.head))) {
val right: R = rightIt.next
if (asserts) {
val rightKey: K = toRightKey(right)
if (prevRightKeyDefined) assert(ord.lt(prevRightKey, rightKey), "Incorrect usage of MergeCorrespondingLazySeq. Inputs are not sorted/unique!")
else prevRightKeyDefined = true
prevRightKey = rightKey
}
f(Both(left, right))
}
else f(Left(left)) // No remaining right values OR the right key is greater than the left key
}
}
// Drain anything left over on the right side
while (rightIt.hasNext) {
val right: R = rightIt.next
if (asserts) {
val rightKey: K = toRightKey(right)
if (prevRightKeyDefined) assert(ord.lt(prevRightKey, rightKey), "Incorrect usage of MergeCorrespondingLazySeq. Inputs are not sorted/unique!")
else prevRightKeyDefined = true
prevRightKey = rightKey
}
f(Right(right))
}
}
}
|
frugalmechanic/fm-lazyseq
|
src/main/scala/fm/lazyseq/MergeCorrespondingLazySeq.scala
|
Scala
|
apache-2.0
| 3,719
|
package delta.java
import java.util.Optional
import java.util.function.{ BiConsumer, BiFunction }
import delta.{ Revision, EventStore }
import delta.write.Metadata
import scala.concurrent._
import scala.util.control.NonFatal
import scuff.concurrent.Threads.PiggyBack
class EntityRepository[ESID, EVT, S >: Null, ID, ET](
entity: Entity[ID, ET, S, EVT] { type Type = ET },
eventStore: EventStore[ESID, _ >: EVT],
exeCtx: ExecutionContext,
idConv: Function1[ID, ESID]) {
private[this] val repo = new delta.write.EntityRepository[ESID, EVT, S, ID, ET](entity)(eventStore)(exeCtx, idConv)
private def toJInt(t: (Any, Int)): Integer = Integer.valueOf(t._2)
private def toJInt(t: (ET, Int)): (ET, Integer) = (t._1, Integer valueOf t._2)
def exists(id: ID): Future[Optional[Integer]] = repo.exists(id).map {
case Some(rev) => Optional.of(rev: Integer)
case None => Optional.empty[Integer]
}(PiggyBack)
def load(id: ID): Future[(ET, Integer)] = repo.load(id).map(toJInt)(PiggyBack)
def insert(id: => ID, entity: ET): Future[ID] =
repo.insert(id, entity)(Metadata.empty)
def insert(id: => ID, entity: ET, metadata: Metadata): Future[ID] =
repo.insert(id, entity)(metadata)
def update(
id: ID, expectedRevision: Option[Revision],
metadata: Metadata, consumer: BiConsumer[ET, Integer])
: Future[Integer] = {
repo.update(id, expectedRevision) {
case (entity, revision) =>
try Future successful consumer.accept(entity, revision) catch {
case NonFatal(th) => Future failed th
}
}(metadata).map(toJInt)(PiggyBack)
}
def update(id: ID, expectedRevision: Option[Revision], consumer: BiConsumer[ET, Integer]): Future[Integer] = {
repo.update(id, expectedRevision) {
case (entity, revision) =>
try Future successful consumer.accept(entity, revision) catch {
case NonFatal(th) => Future failed th
}
}(Metadata.empty).map(toJInt)(PiggyBack)
}
def update(id: ID, metadata: Metadata, consumer: BiConsumer[ET, Integer]): Future[Integer] = {
repo.update(id) {
case (entity, revision) =>
try Future successful consumer.accept(entity, revision) catch {
case NonFatal(th) => Future failed th
}
}(metadata).map(toJInt)(PiggyBack)
}
def update[R](id: ID, expectedRevision: Option[Revision], metadata: Metadata, withReturn: BiFunction[ET, Integer, R]): Future[RepoUpdate[R]] = {
repo.update(id, expectedRevision) {
case (entity, revision) =>
try Future successful withReturn(entity, revision) catch {
case NonFatal(th) => Future failed th
}
}(metadata).map(t => RepoUpdate(t._1, t._2))(PiggyBack)
}
def update[R](id: ID, expectedRevision: Option[Revision], withReturn: BiFunction[ET, Integer, R]): Future[RepoUpdate[R]] = {
repo.update(id, expectedRevision) {
case (entity, revision) =>
try Future successful withReturn(entity, revision) catch {
case NonFatal(th) => Future failed th
}
}(Metadata.empty).map(t => RepoUpdate(t._1, t._2))(PiggyBack)
}
def update[R](id: ID, metadata: Metadata, withReturn: BiFunction[ET, Integer, R]): Future[RepoUpdate[R]] = {
repo.update(id) {
case (entity, revision) =>
try Future successful withReturn(entity, revision) catch {
case NonFatal(th) => Future failed th
}
}(metadata).map(t => RepoUpdate(t._1, t._2))(PiggyBack)
}
def updateAsync[R](id: ID, expectedRevision: Option[Revision], metadata: Metadata, withReturn: BiFunction[ET, Integer, Future[R]]): Future[RepoUpdate[R]] = {
repo.update(id, expectedRevision) {
case (entity, revision) =>
try withReturn(entity, revision) catch {
case NonFatal(th) => Future failed th
}
}(metadata).map(t => RepoUpdate(t._1, t._2))(PiggyBack)
}
def updateAsync[R](id: ID, expectedRevision: Option[Revision], withReturn: BiFunction[ET, Integer, Future[R]]): Future[RepoUpdate[R]] = {
repo.update(id, expectedRevision) {
case (entity, revision) =>
try withReturn(entity, revision) catch {
case NonFatal(th) => Future failed th
}
}(Metadata.empty).map(t => RepoUpdate(t._1, t._2))(PiggyBack)
}
def updateAsync[R](id: ID, metadata: Metadata, withReturn: BiFunction[ET, Integer, Future[R]]): Future[RepoUpdate[R]] = {
repo.update(id) {
case (entity, revision) =>
try withReturn(entity, revision) catch {
case NonFatal(th) => Future failed th
}
}(metadata).map(t => RepoUpdate(t._1, t._2))(PiggyBack)
}
}
final case class RepoUpdate[R](returned: R, newRevision: Revision)
|
nilskp/delta
|
delta-java/src/main/scala/delta/java/EntityRepository.scala
|
Scala
|
mit
| 4,686
|
/*
* The MIT License (MIT)
*
* Copyright (c) 2014 MineFormers
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package de.mineformers.core.client.renderer
import de.mineformers.core.util.math.Vector3
import net.minecraft.client.renderer.tileentity.TileEntitySpecialRenderer
import net.minecraft.tileentity.TileEntity
import org.lwjgl.opengl.GL11
/**
* TileRenderer
*
* @author PaleoCrafter
*/
abstract class TileRenderer[T <: TileEntity] extends TileEntitySpecialRenderer {
protected val renderer = new Renderer
def render(tile: T, x: Double, y: Double, z: Double, partialTicks: Float)
override def renderTileEntityAt(tile: TileEntity, x: Double, y: Double, z: Double, partialTicks: Float, destroyProgress: Int): Unit = {
renderer.context.pos = Vector3(x, y, z)
renderer.context.block = RenderContext.Block(tile.getWorld, tile.getPos.getX, tile.getPos.getY, tile.getPos.getZ)
GL11.glPushMatrix()
GL11.glTranslated(x, y, z)
render(tile.asInstanceOf[T], x, y, z, partialTicks)
GL11.glPopMatrix()
}
}
|
MineFormers/MFCore
|
src/main/scala/de/mineformers/core/client/renderer/TileRenderer.scala
|
Scala
|
mit
| 2,071
|
package almond.interpreter
import almond.interpreter.api.{DisplayData, OutputHandler}
import scala.collection.mutable.ListBuffer
final class TestOutputHandler extends OutputHandler {
import TestOutputHandler._
private val output = new ListBuffer[Output]
private val lock = new Object
def stdout(s: String): Unit =
lock.synchronized {
output += Output.Stdout(s)
}
def stderr(s: String): Unit =
lock.synchronized {
output += Output.Stderr(s)
}
def display(displayData: DisplayData): Unit =
lock.synchronized {
output += Output.Display(displayData)
}
def updateDisplay(displayData: DisplayData): Unit =
lock.synchronized {
output += Output.UpdateDisplay(displayData)
}
def result(): Seq[Output] =
output.result()
}
object TestOutputHandler {
sealed abstract class Output extends Product with Serializable
object Output {
final case class Stdout(s: String) extends Output
final case class Stderr(s: String) extends Output
final case class Display(data: DisplayData) extends Output
final case class UpdateDisplay(data: DisplayData) extends Output
}
}
|
alexarchambault/jupyter-scala
|
modules/shared/test/src/main/scala/almond/interpreter/TestOutputHandler.scala
|
Scala
|
apache-2.0
| 1,159
|
package io.getquill
object iqContext extends MirrorContext[MirrorIdiom, Literal] with ImplicitQuery with TestEntities
object Test extends Function1[String, Test] {
def apply(a: String) = new Test(a)
}
class Test(val a: String) extends Product {
def canEqual(that: Any) = ???
def productArity: Int = ???
def productElement(n: Int) = ???
}
class ImplicitQuerySpec extends Spec {
import iqContext._
"allows querying a case class companion" in {
val q = quote {
TestEntity.filter(t => t.s == "s")
}
iqContext.run(q).string mustEqual
"""query[TestEntity].filter(t => t.s == "s").map(t => (t.s, t.i, t.l, t.o))"""
}
"fails if querying a non-case-class companion" in {
"""
val q = quote {
Test.filter(_.a == "s")
}
""" mustNot compile
}
"only attempts to convert case class derived AbstractFunctionN to Query" - {
"preserves inferred type of secondary join FunctionN argument" in {
"""
val q = quote {
for{
(a,b)<- TestEntity.join(TestEntity2).on(_.i == _.i)
c <- TestEntity3.leftJoin(_.i == a.i)
} yield(a,b,c)
}
""" must compile
}
}
}
|
jcranky/quill
|
quill-core/src/test/scala/io/getquill/ImplicitQuerySpec.scala
|
Scala
|
apache-2.0
| 1,178
|
object Test {
def foo = 0
scala.reflect.runtime.universe.reify {
foo/*#*/
}
identity {
foo/*#*/
}
}
|
felixmulder/scala
|
test/files/presentation/hyperlinks-macro/src/MacroCall.scala
|
Scala
|
bsd-3-clause
| 120
|
package challenge3
import core._, Syntax._
/*
* A writer data type that represents the pair of some
* writer content with the production of a value.
*/
case class Writer[W, A](log: W, value: A) {
def run: (W, A) =
(log, value)
/*
* Exercise 3.1:
*
* Implement map for Writer[W, A].
*
* The following laws must hold:
* 1) r.map(z => z) == r
* 2) r.map(z => f(g(z))) == r.map(g).map(f)
*
*/
def map[B](f: A => B): Writer[W, B] =
Writer(log, f(value))
/*
* Exercise 3.2:
*
* Implement flatMap (a.k.a. bind, a.k.a. >>=).
*
* The following law must hold:
* r.flatMap(f).flatMap(g) == r.flatMap(z => f(z).flatMap(g))
*
*/
def flatMap[B](f: A => Writer[W, B])(implicit M: Monoid[W]): Writer[W, B] = {
val w = f(value)
Writer(M.append(log, w.log), w.value)
}
}
object Writer {
/*
* Exercise 3.3:
*
* Implement value (a.k.a. return, point, pure) given a
* Monoid for W.
*
* Hint: Try using Writer constructor.
*/
def value[W: Monoid, A](a: A): Writer[W, A] =
Writer(Monoid[W].zero, a)
/*
* Exercise 3.4:
*
* Implement tell.
*
* Tell appends the writer content w and produces no value.
*
* Hint: Try using Writer constructor.
*/
def tell[W](w: W): Writer[W, Unit] =
Writer(w, ())
/*
* Exercise 3.5:
*
* Sequence, a list of Readers, to a Reader of Lists.
*/
def sequence[W: Monoid, A](writers: List[Writer[W, A]]): Writer[W, List[A]] =
???
class Writer_[W] {
type l[a] = Writer[W, a]
}
implicit def WriterMonad[W: Monoid]: Monad[Writer_[W]#l] =
new Monad[Writer_[W]#l] {
def point[A](a: => A) = value[W, A](a)
def bind[A, B](a: Writer[W, A])(f: A => Writer[W, B]) = a flatMap f
}
implicit def WriterEqual[W: Equal, A: Equal] =
Equal.from[Writer[W, A]]((a, b) => (a.log -> a.value) === (b.log -> b.value))
implicit def WriterMoniod[W: Monoid, A: Monoid]: Monoid[Writer[W, A]] =
new Monoid[Writer[W, A]] {
def zero = Writer.value[W, A](Monoid[A].zero)
def append(l: Writer[W, A], r: => Writer[W, A]) =
Writer(Monoid[W].append(l.log, r.log), Monoid[A].append(l.value, r.value))
}
}
/*
* *Challenge* Exercise 3.6: Stocks + Stats.
*
* We have some stock prices over time, and we make a simple
* adjustment:
* Map across each ticker price and do an adjustment by adding
* 1000 cents to every value under 10000 and 10 cents to every
* value equal to or over 10000.
*
* However, while we compute this answer we also want to caculate
* summary statistics for our data, specifically, min, max, total,
* and count.
*
* Use the Writer data type to compute stats whilst we calculate
* our adjustments.
*
* Complete the implementation, some of the methods are provided
* fill in the remainder, to complete the spec.
*/
object Example {
case class Stats(min: Int, max: Int, total: Int, count: Int)
case class Stock(ticker: String, date: String, cents: Int)
/**
* Implement our algorthim.
*
* Hint: Writer(W, A) and Writer.sequence will be useful here.
*/
def stocks(data: List[Stock]): (Stats, List[Stock]) =
???
/**
* A monoid for Stats.
*/
implicit def StatsMonoid: Monoid[Stats] =
new Monoid[Stats] {
def zero =
???
def append(l: Stats, r: => Stats) =
???
}
def exampledata = List(
Stock("FAKE", "2012-01-01", 10000)
, Stock("FAKE", "2012-01-02", 10020)
, Stock("FAKE", "2012-01-03", 10022)
, Stock("FAKE", "2012-01-04", 10005)
, Stock("FAKE", "2012-01-05", 9911)
, Stock("FAKE", "2012-01-06", 6023)
, Stock("FAKE", "2012-01-07", 7019)
, Stock("CAKE", "2012-01-01", 1)
, Stock("CAKE", "2012-01-02", 2)
, Stock("CAKE", "2012-01-03", 3)
, Stock("CAKE", "2012-01-04", 4)
, Stock("CAKE", "2012-01-05", 5)
, Stock("CAKE", "2012-01-06", 6)
, Stock("CAKE", "2012-01-07", 7)
, Stock("BAKE", "2012-01-01", 99999)
, Stock("BAKE", "2012-01-02", 99999)
, Stock("BAKE", "2012-01-03", 99999)
, Stock("BAKE", "2012-01-04", 99999)
, Stock("BAKE", "2012-01-05", 99999)
, Stock("BAKE", "2012-01-06", 99999)
, Stock("BAKE", "2012-01-07", 99999)
, Stock("LAKE", "2012-01-01", 10012)
, Stock("LAKE", "2012-01-02", 7000)
, Stock("LAKE", "2012-01-03", 1234)
, Stock("LAKE", "2012-01-04", 10)
, Stock("LAKE", "2012-01-05", 6000)
, Stock("LAKE", "2012-01-06", 6099)
, Stock("LAKE", "2012-01-07", 5999)
)
}
|
Kimply/scala-challenge
|
src/main/scala/challenge3/Writer.scala
|
Scala
|
mit
| 4,505
|
import sbt._
class TestProject(info: ProjectInfo) extends ParentProject(info)
{
val sub = project("sub", "Sub Project", new SubProject(_))
def ivyCacheDirectory = outputPath / "ivy-cache"
override def updateOptions = CacheDirectory(ivyCacheDirectory) :: super.updateOptions.toList
class SubProject(info: ProjectInfo) extends DefaultProject(info)
{
def ivyCacheDirectory = outputPath / "ivy-cache"
override def updateOptions = CacheDirectory(ivyCacheDirectory) :: super.updateOptions.toList
override def ivyXML =
<dependencies>
<dependency org="com.camptocamp.tl.caltar" name="core" rev="0.5" transitive="false"/>
</dependencies>
}
}
|
matheshar/simple-build-tool
|
src/sbt-test/dependency-management/inherit-repo/project/build/src/TestProject.scala
|
Scala
|
bsd-3-clause
| 658
|
package jp.cappuccino.tools.markdown.jfx
import javafx.application.Platform
import javafx.scene.layout.Pane
import javafx.scene.control.ChoiceBox
import javafx.scene.control.Label
import javafx.scene.control.MenuBar
import javafx.scene.control.MenuItem
import javafx.scene.control.RadioMenuItem
import javafx.scene.control.Tab
import javafx.scene.control.TabPane
import javafx.scene.control.TextArea
import javafx.scene.control.TextField
import javafx.scene.web.WebView
import javafx.stage.Stage
abstract class InitializableController {
class RunProc(p: => Unit) extends Runnable {
def run {
p
}
}
private var _pane: Pane = _
private var _stage: Stage = _
def init(stage: Stage, pane: Pane) {
_stage = stage
_pane = pane
init
}
def init: Unit
protected def stage: Stage = _stage
protected def pane: Pane = _pane
protected def lookup[T](id: String): T =
pane.lookup("#" + id).asInstanceOf[T]
protected def label(id: String): Label = lookup[Label](id)
protected def textField(id: String): TextField = lookup[TextField](id)
protected def textArea(id: String): TextArea = lookup[TextArea](id)
protected def choiceBox[T](id: String): ChoiceBox[T] =
lookup[ChoiceBox[T]](id)
protected def webView(id: String): WebView = lookup[WebView](id)
protected def menuBar(id: String): MenuBar = lookup[MenuBar](id)
protected def tabPane(id: String): TabPane = lookup[TabPane](id)
protected def tab(id: String): Tab = lookup[Tab](id)
protected def radioMenuItem(id: String): RadioMenuItem =
lookup[RadioMenuItem](id)
protected def runLater(p: => Unit) {
Platform.runLater(new RunProc(p))
}
}
|
keisuken/caffemarkdown
|
src/jp/cappuccino/tools/markdown/jfx/InitializableController.scala
|
Scala
|
mit
| 1,670
|
package org.jetbrains.plugins.scala
package project
import com.intellij.openapi.options.Configurable
/**
* @author Pavel Fatin
*/
abstract class AbstractConfigurable(name: String) extends Configurable with Configurable.NoScroll {
def getDisplayName: String = name
}
|
jastice/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/project/AbstractConfigurable.scala
|
Scala
|
apache-2.0
| 272
|
package at.logic.gapt.utils.testing
import java.io.File
import java.nio.file.Files
import java.nio.file.StandardCopyOption._
/**
* Copy resources from the classpath as actual files into the temporary directory.
*/
trait ClasspathFileCopier {
private val basenameExtRegex = """(?:^|.*/)([^/]+)\\.([^./]+)""".r
/**
* Creates a temporary copy of a resource in the classpath. This temporary
* copy is automatically deleted on program termination.
*
* @param path Path of the resource, e.g. "test.xml" if the resource is located in
* "project/src/test/resources/test.xml" in the source tree.
* @return Path to the temporary copy.
*/
def tempCopyOfClasspathFile( path: String ): String = {
val ( basename, ext ) = path match {
case basenameExtRegex( basename, ext ) => ( basename, ext )
case _ => ( "copyFileFromBasename", "tmp" )
}
val tempFile = File.createTempFile( basename, ext )
tempFile.deleteOnExit()
Files.copy( getClass.getClassLoader.getResourceAsStream( path ), tempFile.toPath, REPLACE_EXISTING )
tempFile.getPath
}
}
|
gisellemnr/gapt
|
src/main/scala/at/logic/gapt/utils/testing/ClasspathFileCopier.scala
|
Scala
|
gpl-3.0
| 1,139
|
package com.datamountaineer.streamreactor.connect.source
import javax.naming.NameNotFoundException
import javax.jms.Session
import com.datamountaineer.streamreactor.connect.TestBase
import com.datamountaineer.streamreactor.connect.jms.JMSSessionProvider
import com.datamountaineer.streamreactor.connect.jms.config.{DestinationSelector, JMSConfig, JMSSettings}
import org.apache.activemq.ActiveMQConnection
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.Eventually
import scala.util.Try
class JMSSessionProviderTest extends TestBase with BeforeAndAfterAll with Eventually {
val forAJmsConsumer = false
val forAJmsProducer = true
"should only create JMS Queue Consumer when reading from JMS Queue" in testWithBrokerOnPort { (conn, brokerUrl) =>
val props = getProps1Queue(brokerUrl)
val config = JMSConfig(props)
val settings = JMSSettings(config, forAJmsConsumer)
val provider = JMSSessionProvider(settings, forAJmsConsumer)
provider.queueConsumers.size shouldBe 1
provider.queueProducers.size shouldBe 0
provider.topicsConsumers.size shouldBe 0
provider.topicProducers.size shouldBe 0
}
"should only create JMS Topic Consumer when reading from JMS Topic" in testWithBrokerOnPort { (conn, brokerUrl) =>
val props = getProps1Topic(brokerUrl)
val config = JMSConfig(props)
val settings = JMSSettings(config, forAJmsConsumer)
val provider = JMSSessionProvider(settings, forAJmsConsumer)
provider.queueConsumers.size shouldBe 0
provider.queueProducers.size shouldBe 0
provider.topicsConsumers.size shouldBe 1
provider.topicProducers.size shouldBe 0
}
"should only create JMS Queue Producer when writing to JMS Queue" in testWithBrokerOnPort { (conn, brokerUrl) =>
val props = getProps1Queue(brokerUrl)
val config = JMSConfig(props)
val settings = JMSSettings(config, forAJmsProducer)
val provider = JMSSessionProvider(settings, forAJmsProducer)
provider.queueConsumers.size shouldBe 0
provider.queueProducers.size shouldBe 1
provider.topicsConsumers.size shouldBe 0
provider.topicProducers.size shouldBe 0
}
"should only create JMS Topic Producer when writing to JMS Topic" in testWithBrokerOnPort { (conn, brokerUrl) =>
val props = getProps1Topic(brokerUrl)
val config = JMSConfig(props)
val settings = JMSSettings(config, forAJmsProducer)
val provider = JMSSessionProvider(settings, forAJmsProducer)
provider.queueConsumers.size shouldBe 0
provider.queueProducers.size shouldBe 0
provider.topicsConsumers.size shouldBe 0
provider.topicProducers.size shouldBe 1
}
"should close the connection when the task is stopped" in testWithBrokerOnPort { (conn, brokerUrl) =>
val props = getProps1Topic(brokerUrl)
val config = JMSConfig(props)
val settings = JMSSettings(config, forAJmsProducer)
val provider = JMSSessionProvider(settings, forAJmsProducer)
provider.close().isSuccess shouldBe true
Try(provider.connection.createSession(false, Session.CLIENT_ACKNOWLEDGE)).isFailure shouldBe true
}
"should close connection and free resources on exception when configuring session provider" in
testWithBroker(clientID = Some("static-client-id")) { brokerUrl =>
val props = getProps1Topic(brokerUrl)
val config = JMSConfig(props)
val validSettings = JMSSettings(config, forAJmsConsumer)
val invalidSettings = validSettings.copy(destinationSelector = DestinationSelector.JNDI)
assertThrows[NameNotFoundException] {
JMSSessionProvider(invalidSettings, forAJmsConsumer)
}
val provider = JMSSessionProvider(validSettings, forAJmsConsumer)
provider.connection.asInstanceOf[ActiveMQConnection].isClosed shouldBe false
provider.connection.close()
}
}
|
CodeSmell/stream-reactor
|
kafka-connect-jms/src/test/scala/com/datamountaineer/streamreactor/connect/source/JMSSessionProviderTest.scala
|
Scala
|
apache-2.0
| 3,818
|
/*
* Odessa State environmental University
* Copyright (C) 2013
*/
package ua.edu.odeku.ceem.mapRadar.tools
import javax.swing.JPanel
/**
* User: Aleo Bakalov
* Date: 10.12.13
* Time: 17:00
*/
trait PanelTool {
def rootPanel : JPanel
}
|
aleo72/ww-ceem-radar
|
src/main/scala/ua/edu/odeku/ceem/mapRadar/tools/PanelTool.scala
|
Scala
|
apache-2.0
| 250
|
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.utils.geohash
import com.vividsolutions.jts.geom.Point
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class GeomDistanceTest extends Specification with GeomDistance {
import Distance._
"1 kilometer" should {
"equate to 1000 meters" in {
val a : Double = 1 kilometer
val b : Double = 1000 meter
a must beCloseTo(b, 1e-6)
}
}
"Reference test Flinders Peak, AUS" should {
"be 54972.271 meters from Buninyong, AUS" in {
val ptFlindersPeak = WKTUtils.read("POINT(144.4248678889 -37.9510334167)").asInstanceOf[Point]
val ptBuninyong = WKTUtils.read("POINT(143.9264955278 -37.6528211389)").asInstanceOf[Point]
VincentyModel.getDistanceBetweenTwoPoints(ptFlindersPeak, ptBuninyong).getDistanceInMeters must beCloseTo(54972.271, 0.01)
}
}
"CCRi local" should {
"be 433.5 meters from Albemarle High School" in {
val ptCCRI = WKTUtils.read("POINT(-78.4953560 38.0752150)").asInstanceOf[Point]
val ptAHS = WKTUtils.read("POINT(-78.5002901 38.0754152)").asInstanceOf[Point]
VincentyModel.getDistanceBetweenTwoPoints(ptCCRI, ptAHS).getDistanceInMeters must beCloseTo(433.5, 0.01)
}
}
"CCRi remote" should {
"be 11422838.3 meters from Mount Potts, NZ" in {
val ptCCRI = WKTUtils.read("POINT(-78.4953560 38.0752150)").asInstanceOf[Point]
val ptEdoras = WKTUtils.read("POINT(170.919998 -43.498299)").asInstanceOf[Point]
VincentyModel.getDistanceBetweenTwoPoints(ptCCRI, ptEdoras).getDistanceInMeters must beCloseTo(14301344.142, 0.01)
}
}
"New Mexico" should {
"be 2300000 meters from CCRI" in {
val ptCCRI = WKTUtils.read("POINT(-78.4953560 38.0752150)").asInstanceOf[Point]
val ptNM = VincentyModel.moveWithBearingAndDistance(ptCCRI, -90.0, 2300000)
ptNM.getX must beCloseTo(-104.060, 0.01)
ptNM.getY must beCloseTo(35.236, 0.01)
val dist = VincentyModel.getDistanceBetweenTwoPoints(ptCCRI, ptNM).getDistanceInMeters
dist must beCloseTo(2300000, 0.1)
}
}
}
|
mmatz-ccri/geomesa
|
geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/geohash/GeomDistanceTest.scala
|
Scala
|
apache-2.0
| 2,820
|
/*
* Copyright (C) 2005, The OpenURP Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.prac.innovation.model
import org.beangle.data.model.pojo.{DateRange, Named}
import org.beangle.data.model.IntId
import org.beangle.commons.collection.Collections
import org.openurp.base.model.School
import scala.collection.mutable
/** 批次 */
class Batch extends IntId with Named with DateRange {
var school: School = _
/** 是否归档 */
var archived: Boolean = _
/**阶段*/
var stages: mutable.Buffer[Stage] = Collections.newBuffer[Stage]
def getStage(stageType: StageType): Option[Stage] = {
stages.find(_.stageType == stageType)
}
}
|
openurp/api
|
prac/src/main/scala/org/openurp/prac/innovation/model/Batch.scala
|
Scala
|
lgpl-3.0
| 1,309
|
/*******************************************************************************
* (C) Copyright 2015 ADP, LLC.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package unicorn.unibase.graph
import java.nio.ByteBuffer
import scala.io.Source
import unicorn.bigtable.{BigTable, Column, Row}
import unicorn.json._
import unicorn.oid.LongIdGenerator
import unicorn.unibase.UpdateOps
import unicorn.unibase.graph.Direction._
import unicorn.util.Logging
/** Graphs are mathematical structures used to model pairwise relations
* between objects. A graph is made up of vertices (nodes) which are
* connected by edges (arcs or lines). A graph may be undirected, meaning
* that there is no distinction between the two vertices associated with
* each edge, or its edges may be directed from one vertex to another.
* Directed graphs are also called digraphs and directed edges are also
* called arcs or arrows.
*
* A multigraph is a graph which is permitted to have multiple edges
* (also called parallel edges), that is, edges that have the same end
* nodes. The ability to support parallel edges simplifies modeling
* scenarios where there can be multiple relationships (e.g., co-worker
* and friend) between the same vertices.
*
* In a property graph, the generic mathematical graph is often extended
* to support user defined objects attached to each vertex and edge.
* The edges also have associated labels denoting the relationships,
* which are important in a multigraph.
*
* Unicorn supports directed property multigraphs. Documents from different
* tables can be added as vertices to a multigraph. It is also okay to add
* vertices without corresponding to documents. Each relationship/edge
* has a label and optional data (any valid JsValue, default value JsInt(1)).
*
* Unicorn stores graphs in adjacency lists. That is, a graph
* is stored as a BigTable whose rows are vertices with their adjacency list.
* The adjacency list of a vertex contains all of the vertex’s incident edges
* (in and out edges are in different column families).
*
* Because large graphs are usually very sparse, an adjacency list is
* significantly more space-efficient than an adjacency matrix.
* Besides, the neighbors of each vertex may be listed efficiently with
* an adjacency list, which is important in graph traversals.
* With our design, it is also possible to
* test whether two vertices are adjacent to each other
* for a given relationship in constant time.
*
* @param table Graph adjacency list table.
*
* @author Haifeng Li
*/
class ReadOnlyGraph(val table: BigTable, documentVertexTable: BigTable) {
import unicorn.unibase.$id
/** Graph serializer. */
val serializer = new GraphSerializer()
/** The column qualifier of \\$id field. */
val idColumnQualifier = serializer.vertexSerializer.str2PathBytes($id)
/** The graph name. */
val name = table.name
/** Cache of vertex string key to long id. */
private[unicorn] val keyMap = collection.mutable.Map[String, Long]()
/** Returns the vertex properties and its both outgoing and incoming edges. */
def apply(id: Long): Vertex = {
apply(id, Both)
}
/** Returns the vertex properties and its adjacency list.
*
* @param id vertex id
* @param direction what edges to load
*/
def apply(id: Long, direction: Direction): Vertex = {
val key = serializer.serialize(id)
val families = direction match {
case Outgoing => table.get(key, Seq((GraphVertexColumnFamily, Seq.empty), (GraphOutEdgeColumnFamily, Seq.empty)))
case Incoming => table.get(key, Seq((GraphVertexColumnFamily, Seq.empty), (GraphInEdgeColumnFamily, Seq.empty)))
case Both => table.get(key)
}
require(!families.isEmpty, s"Vertex $id doesn't exist in graph ${table.name}")
serializer.deserializeVertex(Row(key, families))
}
/** Returns a vertex by its string key. */
def apply(key: String): Vertex = {
apply(key, Both)
}
/** Returns a vertex by its string key. */
def apply(key: String, direction: Direction): Vertex = {
val _id = id(key)
require(_id.isDefined, s"Vertex $key doesn't exist")
apply(_id.get, direction)
}
/** Returns the vertex id of a document vertex.
* Throws exception if the vertex doesn't exist.
*/
def id(table: String, key: JsValue, tenant: JsValue = JsUndefined): Option[Long] = {
val _id = documentVertexTable(serializer.serialize(table, tenant, key), GraphVertexColumnFamily, name)
_id.map(ByteBuffer.wrap(_).getLong)
}
/** Translates a vertex string key to 64 bit id. */
def id(key: String): Option[Long] = {
val _id = keyMap.get(key)
if (_id.isDefined) _id
else {
val _id = id(name, key)
if (_id.isDefined) keyMap(key) = _id.get
_id
}
}
/** Returns true if the vertex exists. */
def contains(id: Long): Boolean = {
val key = serializer.serialize(id)
table.apply(key, GraphVertexColumnFamily, idColumnQualifier).isDefined
}
/** Returns true if the vertex exists. */
def contains(key: String): Boolean = {
documentVertexTable(serializer.serialize(name, JsUndefined, key), GraphVertexColumnFamily, name).isDefined
}
/** Returns the vertex of a document. */
def apply(table: String, key: JsValue, tenant: JsValue = JsUndefined, direction: Direction = Both): Vertex = {
val _id = id(table, key, tenant)
require(_id.isDefined, s"document vertex ($table, $key, $tenant) doesn't exist")
apply(_id.get)
}
/** Returns the edge between `from` and `to` with given label. */
def apply(from: Long, label: String, to: Long): Option[JsValue] = {
val fromKey = serializer.serialize(from)
val columnPrefix = serializer.edgeSerializer.str2Bytes(label)
val value = table(fromKey, GraphOutEdgeColumnFamily, serializer.serializeEdgeColumnQualifier(columnPrefix, to))
value.map { bytes =>
serializer.deserializeEdgeProperties(bytes)
}
}
/** Returns a Gremlin traversal machine. */
def traversal: Gremlin = {
new Gremlin(new SimpleTraveler(this, direction = Direction.Both))
}
/** Returns a Gremlin traversal machine starting at the given vertex. */
def v(id: Long): GremlinVertices = {
val g = traversal
g.v(id)
}
}
/** Graph with update operators.
*
* @param idgen 64-bit ID generator for vertex id.
*/
class Graph(override val table: BigTable, documentVertexTable: BigTable, idgen: LongIdGenerator) extends ReadOnlyGraph(table, documentVertexTable) with UpdateOps with Logging {
import unicorn.unibase.{$id, $tenant}
/** For UpdateOps. */
override val valueSerializer = serializer.vertexSerializer
/** Returns the column family of a property. */
override def familyOf(field: String): String = GraphVertexColumnFamily
override def key(id: JsValue): Array[Byte] = {
require(id.isInstanceOf[JsLong], "Graph vertex id must be 64-bit JsLong")
serializer.serialize(id.asInstanceOf[JsLong].value)
}
/** Shortcut to addVertex. Returns the vertex properties object. */
def update(id: Long, properties: JsObject): JsObject = {
addVertex(id, properties)
properties
}
/** Shortcut to addEdge. Returns the edge properties value. */
def update(from: Long, label: String, to: Long, properties: JsValue): JsValue = {
addEdge(from, label, to, properties)
properties
}
/** Updates a vertex's properties. The supported update operators include
*
* - \\$set: Sets the value of a property of the vertex.
* - \\$unset: Removes the specified property of the vertex.
*/
def update(doc: JsObject): Unit = {
val id = doc($id)
require(id != JsNull && id != JsUndefined, s"missing ${$id}")
require(id.isInstanceOf[JsLong], "${$id} must be JsLong")
val vertex = id.asInstanceOf[JsLong].value
val $set = doc("$set")
require($set == JsUndefined || $set.isInstanceOf[JsObject], "$$set is not an object: " + $set)
val $unset = doc("$unset")
require($unset == JsUndefined || $unset.isInstanceOf[JsObject], "$$unset is not an object: " + $unset)
if ($set.isInstanceOf[JsObject]) set(vertex, $set.asInstanceOf[JsObject])
if ($unset.isInstanceOf[JsObject]) unset(vertex, $unset.asInstanceOf[JsObject])
}
/** Adds a vertex with predefined ID, which must be unique.
*
* @param id The unique vertex id. Throws exception if the vertex id exists.
*/
def addVertex(id: Long): Unit = {
addVertex(id, JsObject())
}
/** Adds a vertex with predefined ID, which must be unique.
*
* @param id The unique vertex id. Throws exception if the vertex id exists.
* @param label Vertex label.
*/
def addVertex(id: Long, label: String): Unit = {
addVertex(id, JsObject("label" -> JsString(label)))
}
/** Adds a vertex with predefined ID, which must be unique.
*
* @param id The unique vertex id. Throws exception if the vertex id exists.
* @param properties Any vertex property data.
*/
def addVertex(id: Long, properties: JsObject): Unit = {
properties($id) = id
val key = serializer.serialize(id)
require(table.apply(key, GraphVertexColumnFamily, idColumnQualifier).isEmpty, s"Vertex $id already exists in graph ${table.name}")
val columns = serializer.serializeVertex(properties)
table.put(key, GraphVertexColumnFamily, columns: _*)
}
/** Creates a new vertex with automatic generated ID.
* ID generator must be set up.
*
* @param properties Any vertex property data.
* @return Vertex ID.
*/
def addVertex(properties: JsObject): Long = {
val id = idgen.next
addVertex(id, properties)
id
}
/** Adds a vertex with a string key. Many existing graphs
* have vertices with string key. This helper function
* generates and returns the internal 64 bit vertex id
* for the new vertex. One may access the vertex by its
* string key later. The string key will also be the
* vertex property `label`.
*
* @return the 64 bit vertex id. */
def addVertex(key: String): Long = {
addVertex(name, key, properties = JsObject("key" -> JsString(key)))
}
/** Adds a vertex with a string key. Many existing graphs
* have vertices with string key. This helper function
* generates and returns the internal 64 bit vertex id
* for the new vertex. One may access the vertex by its
* string key later. The string key will also be the
* vertex property `label`.
*
* @return the 64 bit vertex id. */
def addVertex(key: String, label: String): Long = {
addVertex(name, key, properties = JsObject("key" -> key, "label" -> label))
}
/** Adds a vertex with a string key. Many existing graphs
* have vertices with string key. This helper function
* generates and returns the internal 64 bit vertex id
* for the new vertex. One may access the vertex by its
* string key later. String key won't be added to vertex
* properties as `label` in case that properties object
* already includes such a property.
*
* @return the 64 bit vertex id. */
def addVertex(key: String, properties: JsObject): Long = {
val id = addVertex(name, key, properties = properties)
keyMap(key) = id
id
}
/** Creates a new vertex corresponding to a document in
* another table with automatic generated ID.
* ID generator must be set up.
*
* @param table The table of name of document.
* @param key The document id.
* @param tenant The tenant id of document if the table is multi-tenanted.
* @param properties Any vertex property data.
* @return Vertex ID.
*/
def addVertex(table: String, key: JsValue, tenant: JsValue = JsUndefined, properties: JsObject = JsObject()): Long = {
require(documentVertexTable(serializer.serialize(table, tenant, key), GraphVertexColumnFamily, name).isEmpty, s"Document vertex ($table, $key, $tenant) exists.")
val id = idgen.next
properties($doc) = JsObject(
$table -> table,
$id -> key,
$tenant -> tenant
)
addVertex(id, properties)
documentVertexTable(serializer.serialize(table, tenant, key), GraphVertexColumnFamily, name) = serializer.serialize(id)
id
}
/** Deletes a vertex and all associated edges. */
def deleteVertex(id: Long): Unit = {
val vertex = apply(id)
val doc = vertex.properties($doc)
if (doc != JsUndefined) {
documentVertexTable.delete(serializer.serialize(doc($table), doc($tenant), doc($id)), GraphVertexColumnFamily, name)
}
vertex.inE.foreach { case (label, edges) =>
edges.foreach { edge =>
deleteEdge(edge.from, edge.label, edge.to)
}
}
val key = serializer.serialize(id)
table.delete(key)
}
def deleteVertex(table: String, key: JsValue, tenant: JsValue = JsUndefined): Unit = {
val _id = id(table, key, tenant)
require(_id.isDefined, s"document vertex ($table, $key, $tenant) doesn't exist")
deleteVertex(_id.get)
documentVertexTable.delete(serializer.serialize(table, tenant, key), GraphVertexColumnFamily, name)
}
/** Adds a directed edge. If the edge exists, the associated data will be overwritten.
*
* @param from vertex id.
* @param to vertex id.
*/
def addEdge(from: Long, to: Long): Unit = {
addEdge(from, "", to, JsNull)
}
/** Adds a directed edge. If the edge exists, the associated data will be overwritten.
*
* @param from vertex id.
* @param label relationship label.
* @param to vertex id.
*/
def addEdge(from: Long, label: String, to: Long): Unit = {
addEdge(from, label, to, JsNull)
}
/** Adds a directed edge. If the edge exists, the associated data will be overwritten.
*
* @param from vertex id.
* @param label relationship label.
* @param to vertex id.
* @param properties optional data associated with the edge.
*/
def addEdge(from: Long, label: String, to: Long, properties: JsValue): Unit = {
val fromKey = serializer.serialize(from)
val toKey = serializer.serialize(to)
val columnPrefix = serializer.edgeSerializer.str2Bytes(label)
val value = serializer.serializeEdge(properties)
table.put(fromKey, GraphOutEdgeColumnFamily, Column(serializer.serializeEdgeColumnQualifier(columnPrefix, to), value))
table.put(toKey, GraphInEdgeColumnFamily, Column(serializer.serializeEdgeColumnQualifier(columnPrefix, from), value))
}
/** Adds an edge with the string key of vertices.
* Automatically add the vertex if it doesn't exist. */
def addEdge(from: String, to: String): Unit = {
addEdge(from, "", to, JsNull)
}
/** Adds an edge with the string key of vertices. */
def addEdge(from: String, label: String, to: String): Unit = {
addEdge(from, label, to, JsNull)
}
/** Adds an edge with the string key of vertices. */
def addEdge(from: String, label: String, to: String, properties: JsValue): Unit = {
val fromId = id(from).getOrElse(addVertex(from))
val toId = id(to).getOrElse(addVertex(to))
addEdge(fromId, label, toId, properties)
}
/** Deletes a directed edge.
*
* @param from vertex id.
* @param label relationship label.
* @param to vertex id.
*/
def deleteEdge(from: Long, label: String, to: Long): Unit = {
val fromKey = serializer.serialize(from)
val toKey = serializer.serialize(to)
val columnPrefix = serializer.edgeSerializer.str2Bytes(label)
table.delete(fromKey, GraphOutEdgeColumnFamily, serializer.serializeEdgeColumnQualifier(columnPrefix, to))
table.delete(toKey, GraphInEdgeColumnFamily, serializer.serializeEdgeColumnQualifier(columnPrefix, from))
}
/** Deletes an edge with the string key of vertices. */
def deleteEdge(from: String, label: String, to: String): Unit = {
val fromId = id(from)
require(fromId.isDefined, s"Vertex $from doesn't exist.")
val toId = id(to)
require(toId.isDefined, s"Vertex $to doesn't exist.")
deleteEdge(fromId.get, label, toId.get)
}
/** Imports a CSV file of edges into this graph.
*
* @param file input file of which each line is an edge.
* Each line must contains at least two elements,
* separated by a separator. The first element is
* source vertex id/key, the second element is the
* destination vertex id/key, and the third optional
* element is the edge label or weight.
* @param separator separator between elements (coma, semicolon, pipe, whitespace, etc.)
* @param comment comment line start character/string.
* @param longVertexId if true, the vertex id is an integer/long. Otherwise, it is
* a string key.
* @param weight if true, the third optional element is the edge weight.
* Otherwise, it is the edge label.
*/
def csv(file: String, separator: String = "\\\\s+", comment: String = "#", longVertexId: Boolean = false, weight: Boolean = false): Unit = {
Source.fromFile(file).getLines.foreach { line =>
if (!line.startsWith(comment)) {
val tokens = line.split("\\\\s+", 3)
if (tokens.length < 2)
log.warn(s"Invalid edge line: $line")
val (label, data) = if (tokens.length == 2) {
("", JsNull)
} else {
if (weight) ("", JsDouble(tokens(2).toDouble)) else (tokens(2), JsNull)
}
if (longVertexId) {
addEdge(tokens(0).toLong, label, tokens(1).toLong, data)
} else {
addEdge(tokens(0), label, tokens(1), data)
}
}
}
}
/** Imports a TGF (Trivial Graph Format) file into this graph.
*
* @param file input file.
*/
def tgf(file: String): Unit = {
var edgeMode = false
// Don't check if vertex exists to speedup.
Source.fromFile(file).getLines.foreach { line =>
if (!line.startsWith("#")) {
edgeMode = true
} else if (edgeMode) {
val tokens = line.split("\\\\s+", 3)
if (tokens.length < 2)
log.warn(s"Invalid edge line: $line")
val from = tokens(0).toLong
val to = tokens(1).toLong
val fromKey = serializer.serialize(from)
val toKey = serializer.serialize(to)
val label = if (tokens.size == 3) tokens(2) else ""
val columnPrefix = serializer.edgeSerializer.str2Bytes(label)
val value = serializer.serializeEdge(JsNull)
table.put(fromKey, GraphOutEdgeColumnFamily, Column(serializer.serializeEdgeColumnQualifier(columnPrefix, to), value))
table.put(toKey, GraphInEdgeColumnFamily, Column(serializer.serializeEdgeColumnQualifier(columnPrefix, from), value))
} else {
val tokens = line.split("\\\\s+", 2)
if (tokens.length == 1)
addVertex(tokens(0).toLong)
else
addVertex(tokens(0).toLong, tokens(1))
}
}
}
/** Imports a RDF file into this graph.
*
* @param uri URI to read from (includes file: and a plain file name).
* @param lang Hint for the content type (Turtle, RDF/XML, N-Triples,
* JSON-LD, RDF/JSON, TriG, N-Quads, TriX, RDF Thrift).
* If not provided, the system will guess the format based
* on file extensions. See details at [[https://jena.apache.org/documentation/io/ Jena]]
*/
def rdf(uri: String, lang: Option[String] = None): Unit = {
import java.util.concurrent.Executors
import scala.collection.JavaConversions._
import org.apache.jena.graph.Triple
import org.apache.jena.riot.{RDFDataMgr, RDFLanguages}
import org.apache.jena.riot.lang.{PipedRDFIterator, PipedTriplesStream}
val iter = new PipedRDFIterator[Triple]()
val input = new PipedTriplesStream(iter)
// PipedRDFStream and PipedRDFIterator need to be on different threads
val executor = Executors.newSingleThreadExecutor()
// Create a runnable for our parser thread
val parser = new Runnable() {
override def run(): Unit = {
// Call the parsing process.
if (lang.isDefined)
RDFDataMgr.parse(input, uri, RDFLanguages.contentTypeToLang(lang.get))
else
RDFDataMgr.parse(input, uri)
}
}
// Start the parser on another thread
executor.submit(parser)
// Consume the input on the main thread here
// Iterate over data as it is parsed, parsing only runs as
// far ahead of our consumption as the buffer size allows
try {
iter.foreach { triple =>
addEdge(triple.getSubject.toString, triple.getPredicate.toString, triple.getObject.toString)
}
} catch {
case e: Exception =>
log.error(s"Failed to parse RDF $uri:", e)
}
executor.shutdown
}
}
|
adplabs/unicorn
|
unibase/src/main/scala/unicorn/unibase/graph/Graph.scala
|
Scala
|
apache-2.0
| 21,362
|
package me.sgrouples.rogue.cc
import io.fsq.rogue.codecs.{IntegerPrimitiveCodec, LongPrimitiveCodec}
import org.bson.codecs.configuration.CodecRegistries
import org.bson.types.ObjectId
import org.mongodb.scala.MongoClient
import java.time.Instant
object CcMongo {
def oidFromInstant(d: Instant): ObjectId = {
val timestamp = d.getEpochSecond()
val bytes = new Array[Byte](12)
bytes(0) = (timestamp >> 24).toByte
bytes(1) = (timestamp >> 16).toByte
bytes(2) = (timestamp >> 8).toByte
bytes(3) = timestamp.toByte
new ObjectId(bytes)
}
val codecRegistry = CodecRegistries.fromRegistries(
MongoClient.DEFAULT_CODEC_REGISTRY,
CodecRegistries.fromCodecs(
new LongPrimitiveCodec,
new IntegerPrimitiveCodec
)
)
}
|
sgrouples/rogue-fsqio
|
cc/src/main/scala/me/sgrouples/rogue/cc/CcMongo.scala
|
Scala
|
apache-2.0
| 770
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.TaskContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.errors.attachTree
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.execution.{SparkPlan, UnaryNode}
import org.apache.spark.sql.hive.{CarbonMetastore, CarbonMetastoreTypes}
import org.apache.spark.sql.optimizer.CarbonDecoderRelation
import org.apache.spark.sql.types._
import org.apache.carbondata.core.cache.{Cache, CacheProvider, CacheType}
import org.apache.carbondata.core.cache.dictionary.{Dictionary, DictionaryColumnUniqueIdentifier}
import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, ColumnIdentifier}
import org.apache.carbondata.core.metadata.datatype.DataType
import org.apache.carbondata.core.metadata.encoder.Encoding
import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension
import org.apache.carbondata.core.stats._
import org.apache.carbondata.core.util.{CarbonTimeStatisticsFactory, DataTypeUtil}
import org.apache.carbondata.spark.CarbonAliasDecoderRelation
/**
* It decodes the dictionary key to value
*/
case class CarbonDictionaryDecoder(
relations: Seq[CarbonDecoderRelation],
profile: CarbonProfile,
aliasMap: CarbonAliasDecoderRelation,
child: SparkPlan)
(@transient sqlContext: SQLContext)
extends UnaryNode {
override def otherCopyArgs: Seq[AnyRef] = sqlContext :: Nil
override val output: Seq[Attribute] = {
child.output.map { a =>
val attr = aliasMap.getOrElse(a, a)
val relation = relations.find(p => p.contains(attr))
if(relation.isDefined && canBeDecoded(attr)) {
val carbonTable = relation.get.carbonRelation.carbonRelation.metaData.carbonTable
val carbonDimension = carbonTable
.getDimensionByName(carbonTable.getFactTableName, attr.name)
if (carbonDimension != null &&
carbonDimension.hasEncoding(Encoding.DICTIONARY) &&
!carbonDimension.hasEncoding(Encoding.DIRECT_DICTIONARY)) {
val newAttr = AttributeReference(a.name,
convertCarbonToSparkDataType(carbonDimension,
relation.get.carbonRelation.carbonRelation),
a.nullable,
a.metadata)(a.exprId,
a.qualifiers).asInstanceOf[Attribute]
newAttr
} else {
a
}
} else {
a
}
}
}
def canBeDecoded(attr: Attribute): Boolean = {
profile match {
case ip: IncludeProfile if ip.attributes.nonEmpty =>
ip.attributes
.exists(a => a.name.equalsIgnoreCase(attr.name) && a.exprId == attr.exprId)
case ep: ExcludeProfile =>
!ep.attributes
.exists(a => a.name.equalsIgnoreCase(attr.name) && a.exprId == attr.exprId)
case _ => true
}
}
def convertCarbonToSparkDataType(carbonDimension: CarbonDimension,
relation: CarbonRelation): types.DataType = {
carbonDimension.getDataType match {
case DataType.STRING => StringType
case DataType.SHORT => ShortType
case DataType.INT => IntegerType
case DataType.LONG => LongType
case DataType.DOUBLE => DoubleType
case DataType.BOOLEAN => BooleanType
case DataType.DECIMAL =>
val scale: Int = carbonDimension.getColumnSchema.getScale
val precision: Int = carbonDimension.getColumnSchema.getPrecision
if (scale == 0 && precision == 0) {
DecimalType(18, 2)
} else {
DecimalType(precision, scale)
}
case DataType.TIMESTAMP => TimestampType
case DataType.DATE => DateType
case DataType.STRUCT =>
CarbonMetastoreTypes
.toDataType(s"struct<${ relation.getStructChildren(carbonDimension.getColName) }>")
case DataType.ARRAY =>
CarbonMetastoreTypes
.toDataType(s"array<${ relation.getArrayChildren(carbonDimension.getColName) }>")
}
}
val getDictionaryColumnIds = {
val attributes = child.output
val dictIds: Array[(String, ColumnIdentifier, DataType)] = attributes.map { a =>
val attr = aliasMap.getOrElse(a, a)
val relation = relations.find(p => p.contains(attr))
if(relation.isDefined && canBeDecoded(attr)) {
val carbonTable = relation.get.carbonRelation.carbonRelation.metaData.carbonTable
val carbonDimension =
carbonTable.getDimensionByName(carbonTable.getFactTableName, attr.name)
if (carbonDimension != null &&
carbonDimension.hasEncoding(Encoding.DICTIONARY) &&
!carbonDimension.hasEncoding(Encoding.DIRECT_DICTIONARY)) {
(carbonTable.getFactTableName, carbonDimension.getColumnIdentifier,
carbonDimension.getDataType)
} else {
(null, null, null)
}
} else {
(null, null, null)
}
}.toArray
dictIds
}
override def outputsUnsafeRows: Boolean = true
override def canProcessUnsafeRows: Boolean = true
override def canProcessSafeRows: Boolean = true
override def doExecute(): RDD[InternalRow] = {
attachTree(this, "execute") {
val storePath = sqlContext.catalog.asInstanceOf[CarbonMetastore].storePath
val queryId = sqlContext.getConf("queryId", System.nanoTime() + "")
val absoluteTableIdentifiers = relations.map { relation =>
val carbonTable = relation.carbonRelation.carbonRelation.metaData.carbonTable
(carbonTable.getFactTableName, carbonTable.getAbsoluteTableIdentifier)
}.toMap
val recorder = CarbonTimeStatisticsFactory.createExecutorRecorder(queryId)
if (isRequiredToDecode) {
val dataTypes = child.output.map { attr => attr.dataType }
child.execute().mapPartitions { iter =>
val cacheProvider: CacheProvider = CacheProvider.getInstance
val forwardDictionaryCache: Cache[DictionaryColumnUniqueIdentifier, Dictionary] =
cacheProvider.createCache(CacheType.FORWARD_DICTIONARY, storePath)
val dicts: Seq[Dictionary] = getDictionary(absoluteTableIdentifiers,
forwardDictionaryCache)
val dictIndex = dicts.zipWithIndex.filter(x => x._1 != null).map(x => x._2)
// add a task completion listener to clear dictionary that is a decisive factor for
// LRU eviction policy
val dictionaryTaskCleaner = TaskContext.get
dictionaryTaskCleaner.addTaskCompletionListener(context =>
dicts.foreach { dictionary =>
if (null != dictionary) {
dictionary.clear()
}
}
)
new Iterator[InternalRow] {
val unsafeProjection = UnsafeProjection.create(output.map(_.dataType).toArray)
var flag = true
var total = 0L
override final def hasNext: Boolean = {
flag = iter.hasNext
if (!flag && total > 0) {
val queryStatistic = new QueryStatistic()
queryStatistic
.addFixedTimeStatistic(QueryStatisticsConstants.PREPARE_RESULT, total)
recorder.recordStatistics(queryStatistic)
recorder.logStatistics()
}
flag
}
override final def next(): InternalRow = {
val startTime = System.currentTimeMillis()
val row: InternalRow = iter.next()
val data = row.toSeq(dataTypes).toArray
dictIndex.foreach { index =>
if (data(index) != null) {
data(index) = DataTypeUtil.getDataBasedOnDataType(dicts(index)
.getDictionaryValueForKey(data(index).asInstanceOf[Int]),
getDictionaryColumnIds(index)._3)
}
}
val result = unsafeProjection(new GenericMutableRow(data))
total += System.currentTimeMillis() - startTime
result
}
}
}
} else {
child.execute()
}
}
}
private def isRequiredToDecode = {
getDictionaryColumnIds.find(p => p._1 != null) match {
case Some(value) => true
case _ => false
}
}
private def getDictionary(atiMap: Map[String, AbsoluteTableIdentifier],
cache: Cache[DictionaryColumnUniqueIdentifier, Dictionary]) = {
val dicts: Seq[Dictionary] = getDictionaryColumnIds.map { f =>
if (f._2 != null) {
try {
cache.get(new DictionaryColumnUniqueIdentifier(
atiMap(f._1).getCarbonTableIdentifier,
f._2, f._3))
} catch {
case _: Throwable => null
}
} else {
null
}
}
dicts
}
}
|
JihongMA/incubator-carbondata
|
integration/spark/src/main/scala/org/apache/spark/sql/CarbonDictionaryDecoder.scala
|
Scala
|
apache-2.0
| 9,537
|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.tradingpremises
import forms.{EmptyForm, InvalidForm}
import jto.validation.{Path, ValidationError}
import org.scalatest.MustMatchers
import play.api.i18n.Messages
import utils.AmlsViewSpec
import views.Fixture
import views.html.tradingpremises.registering_agent_premises
class registering_agent_premisesSpec extends AmlsViewSpec with MustMatchers {
trait ViewFixture extends Fixture {
lazy val registering_agent_premises = app.injector.instanceOf[registering_agent_premises]
implicit val requestWithToken = addTokenForView()
}
"is_residential view" must {
"have correct title, heading, back link and load UI with empty form" in new ViewFixture {
val form2 = EmptyForm
val pageTitle = Messages("tradingpremises.agent.premises.title") + " - " +
Messages("summary.tradingpremises") + " - " +
Messages("title.amls") + " - " + Messages("title.gov")
def view = registering_agent_premises(form2, 1, false)
doc.title must be(pageTitle)
heading.html must be(Messages("tradingpremises.agent.premises.title"))
subHeading.html must include(Messages("summary.tradingpremises"))
doc.getElementsByAttributeValue("class", "link-back") must not be empty
doc.select("input[type=radio]").size() must be(2)
}
"show errors in the correct locations" in new ViewFixture {
val form2: InvalidForm = InvalidForm(Map.empty,
Seq(
(Path \\ "some path") -> Seq(ValidationError("not a message Key"))
))
def view = registering_agent_premises(form2, 1, true)
errorSummary.html() must include("not a message Key")
}
}
}
|
hmrc/amls-frontend
|
test/views/tradingpremises/registering_agent_premisesSpec.scala
|
Scala
|
apache-2.0
| 2,266
|
package BIDMach.causal
import BIDMat.{Mat,SBMat,CMat,DMat,FMat,IMat,HMat,GMat,GIMat,GSMat,SMat,SDMat}
import BIDMat.MatFunctions._
import BIDMat.SciFunctions._
import edu.berkeley.bid.CUMACH
import scala.concurrent.future
import scala.concurrent.ExecutionContext.Implicits.global
import java.util.concurrent.CountDownLatch
import BIDMach.datasources._
import BIDMach.updaters._
import BIDMach.mixins._
import BIDMach.models._
import BIDMach._
class IPTW(opts:IPTW.Opts) extends RegressionModel(opts) {
var mylinks:Mat = null
var otargets:Mat = null
var totflops = 0L
var ustep = 0
override def init() = {
super.init()
mylinks = if (useGPU) GIMat(opts.links) else opts.links
if (mask.asInstanceOf[AnyRef] != null) modelmats(0) ~ modelmats(0) ∘ mask
totflops = 0L
for (i <- 0 until opts.links.length) {
totflops += GLM.linkArray(opts.links(i)).fnflops
}
otargets = targets.rowslice(targets.nrows/2, targets.nrows);
val tmats = new Array[Mat](3)
tmats(0) = modelmats(0)
tmats(1) = modelmats(0).zeros(targets.nrows/2,1)
tmats(2) = modelmats(0).zeros(targets.nrows/2,1)
setmodelmats(tmats)
val umats = new Array[Mat](3)
umats(0) = updatemats(0)
umats(1) = updatemats(0).zeros(targets.nrows/2,1)
umats(2) = updatemats(0).zeros(targets.nrows/2,1)
updatemats = umats
ustep = 0
}
def mupdate(in:Mat, ipass:Int, pos:Long) = {
val targs = targets * in
mupdate2(in, targs, ipass, pos)
}
def mupdate2(in:Mat, targ:Mat, ipass:Int, pos:Long) = {
val ftarg = full(targ)
val treatment = ftarg.rowslice(0, ftarg.nrows/2);
val outcome = ftarg.rowslice(ftarg.nrows/2, ftarg.nrows)
val eta = modelmats(0) * in
val feta = eta + 0f
GLM.preds(eta, feta, mylinks, totflops)
val propensity = feta.rowslice(0, feta.nrows/2) // Propensity score
val iptw = (treatment ∘ outcome) / propensity - ((1 - treatment) ∘ outcome) / (1 - propensity)
val tmodel = otargets ∘ modelmats(0).rowslice(targ.nrows/2, targ.nrows)
val vx0 = eta.rowslice(eta.nrows/2, eta.nrows) - tmodel * in // compute vx given T = 0
val vx1 = vx0 + sum(tmodel, 2) // compute vx given T = 1
GLM.preds(vx0, vx0, mylinks, totflops)
GLM.preds(vx1, vx1, mylinks, totflops)
val tdiff = treatment - propensity
val aiptw = iptw - (tdiff ∘ (vx0 / propensity + vx1 / (1 - propensity)))
// println("%d effect %f" format (ustep, mean(iptw,2).dv))
if (ustep > opts.cwait) {
updatemats(1) ~ mean(iptw, 2) - modelmats(1)
updatemats(2) ~ mean(aiptw, 2) - modelmats(2)
}
ustep += 1
GLM.derivs(feta, ftarg, feta, mylinks, totflops)
updatemats(0) ~ feta *^ in // update the primary predictors
if (mask.asInstanceOf[AnyRef] != null) {
updatemats(0) ~ updatemats(0) ∘ mask
}
}
def meval(in:Mat):FMat = {
val targs = targets * in
meval2(in, targs)
}
def meval2(in:Mat, targ:Mat):FMat = {
val ftarg = full(targ)
val eta = modelmats(0) * in
GLM.preds(eta, eta, mylinks, totflops)
val v = GLM.llfun(eta, ftarg, mylinks, totflops)
if (putBack >= 0) {ftarg <-- eta}
FMat(mean(v, 2))
}
}
object IPTW {
trait Opts extends RegressionModel.Opts {
var links:IMat = null
var cwait = 20
}
class Options extends Opts {}
def mkModel(fopts:Model.Opts) = {
new IPTW(fopts.asInstanceOf[IPTW.Opts])
}
def mkUpdater(nopts:Updater.Opts) = {
new ADAGrad(nopts.asInstanceOf[ADAGrad.Opts])
}
def mkRegularizer(nopts:Mixin.Opts):Array[Mixin] = {
Array(new L1Regularizer(nopts.asInstanceOf[L1Regularizer.Opts]))
}
def mkL2Regularizer(nopts:Mixin.Opts):Array[Mixin] = {
Array(new L2Regularizer(nopts.asInstanceOf[L2Regularizer.Opts]))
}
class LearnOptions extends Learner.Options with IPTW.Opts with MatSource.Opts with ADAGrad.Opts with L1Regularizer.Opts
// Basic in-memory learner with generated target
def learner(mat0:Mat) = {
val opts = new LearnOptions
opts.batchSize = math.min(10000, mat0.ncols/30 + 1)
opts.lrate = 1f
opts.links = 1
val nn = new Learner(
new MatSource(Array(mat0:Mat), opts),
new IPTW(opts),
mkRegularizer(opts),
new ADAGrad(opts),
null,
opts)
(nn, opts)
}
class LearnParOptions extends ParLearner.Options with IPTW.Opts with MatSource.Opts with ADAGrad.Opts with L1Regularizer.Opts
def learnPar(mat0:Mat, d:Int) = {
val opts = new LearnParOptions
opts.batchSize = math.min(10000, mat0.ncols/30 + 1)
opts.lrate = 1f
val nn = new ParLearnerF(
new MatSource(Array(mat0), opts),
opts, mkModel _,
opts, mkRegularizer _,
opts, mkUpdater _,
null, null,
opts)
(nn, opts)
}
def learnPar(mat0:Mat):(ParLearnerF, LearnParOptions) = learnPar(mat0, 0)
def learnPar(mat0:Mat, targ:Mat, d:Int) = {
val opts = new LearnParOptions
opts.batchSize = math.min(10000, mat0.ncols/30 + 1)
opts.lrate = 1f
if (opts.links == null) opts.links = izeros(targ.nrows,1)
opts.links.set(d)
val nn = new ParLearnerF(
new MatSource(Array(mat0, targ), opts),
opts, mkModel _,
opts, mkRegularizer _,
opts, mkUpdater _,
null, null,
opts)
(nn, opts)
}
def learnPar(mat0:Mat, targ:Mat):(ParLearnerF, LearnParOptions) = learnPar(mat0, targ, 0)
class LearnFParOptions extends ParLearner.Options with IPTW.Opts with SFileSource.Opts with ADAGrad.Opts with L1Regularizer.Opts
def learnFParx(
nstart:Int=FileSource.encodeDate(2012,3,1,0),
nend:Int=FileSource.encodeDate(2012,12,1,0),
d:Int = 0
) = {
val opts = new LearnFParOptions
opts.lrate = 1f
val nn = new ParLearnerxF(
null,
(dopts:DataSource.Opts, i:Int) => Experiments.Twitter.twitterWords(nstart, nend, opts.nthreads, i),
opts, mkModel _,
opts, mkRegularizer _,
opts, mkUpdater _,
null, null,
opts
)
(nn, opts)
}
def learnFPar(
nstart:Int=FileSource.encodeDate(2012,3,1,0),
nend:Int=FileSource.encodeDate(2012,12,1,0),
d:Int = 0
) = {
val opts = new LearnFParOptions
opts.lrate = 1f
val nn = new ParLearnerF(
Experiments.Twitter.twitterWords(nstart, nend),
opts, mkModel _,
opts, mkRegularizer _,
opts, mkUpdater _,
null, null,
opts
)
(nn, opts)
}
}
|
jamesjia94/BIDMach
|
src/main/scala/BIDMach/causal/IPTW.scala
|
Scala
|
bsd-3-clause
| 6,830
|
package io.buoyant.namer.consul
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.{Addr, Failure}
import com.twitter.util._
import io.buoyant.consul.v1
private[consul] object DcServices {
/**
* We use a shared stats object so that counters/stats are not
* created anew for each DC/service.
*/
case class Stats(stats0: StatsReceiver) {
val stats = stats0.scope("dc")
val opens = stats.counter("opens")
val closes = stats.counter("closes")
val errors = stats.counter("errors")
val updates = stats.counter("updates")
val adds = stats.counter("adds")
val removes = stats.counter("removes")
val service = SvcAddr.Stats(stats0.scope("service"))
}
/**
* Contains all cached serviceMap responses and the mapping of names
* to Addrs for a particular datacenter.
*
* If the named datacenter does not exist, the consul API will retry
* indefinitely. This is because missing datacenters cannot be
* distinguished from server errors.
*/
def apply(
consulApi: v1.ConsulApi,
name: String,
domain: Option[String],
consistency: Option[v1.ConsistencyMode],
preferServiceAddress: Option[Boolean] = None,
stats: Stats
): Activity[Map[SvcKey, Var[Addr]]] = {
def getServices(index: Option[String]): Future[v1.Indexed[Set[SvcKey]]] =
consulApi.serviceMap(
datacenter = Some(name),
blockingIndex = index,
consistency = consistency,
retry = true
).map(toServices)
val states = Var.async[Activity.State[Map[SvcKey, Var[Addr]]]](Activity.Pending) { state =>
stats.opens.incr()
@volatile var stopped: Boolean = false
def loop(index0: Option[String], cache: Map[SvcKey, Var[Addr]]): Future[Unit] = {
if (stopped) Future.Unit
else getServices(index0).transform {
case Throw(e) =>
// If an exception escaped getService's retries, we treat it as
// effectively fatal to DC observation. In the future, we
// may consider retrying certain failures (with backoff).
state() = Activity.Failed(e)
stats.errors.incr()
Future.exception(e)
case Return(v1.Indexed(_, None)) =>
// If consul didn't give us an index, all bets are off.
state() = Activity.Failed(NoIndexException)
stats.errors.incr()
Future.exception(NoIndexException)
case Return(v1.Indexed(keys, index1)) =>
stats.updates.incr()
cache.keys.foreach { k =>
if (!keys(k)) {
log.debug("consul deleted: %s", k)
stats.removes.incr()
}
}
// Create a Var[Addr] for each new service. These addrs
// are lazily evaluated, so no additional work is done
// until the addr is observed.
val updated = keys.map { k =>
val svc = cache.get(k) match {
case Some(svc) => svc
case None =>
log.debug("consul added: %s", k)
stats.adds.incr()
SvcAddr(consulApi, name, k, domain, consistency, preferServiceAddress, stats.service)
}
k -> svc
}.toMap
state() = Activity.Ok(updated)
loop(index1, updated)
}
}
val pending = loop(None, Map.empty)
Closable.make { _ =>
stopped = true
pending.raise(DcRelease)
stats.closes.incr()
Future.Unit
}
}
Activity(states)
}
private[this] val NoIndexException =
Failure("consul did not return an index")
private[this] val DcRelease =
Failure("dc observation released", Failure.Interrupted)
private[this] val toServices: v1.Indexed[Map[String, Seq[String]]] => v1.Indexed[Set[SvcKey]] = {
case v1.Indexed(services, idx) =>
val keys = services.flatMap {
case (svcName, tags) =>
tags.map(tag => SvcKey(svcName.toLowerCase, Some(tag.toLowerCase))) :+ SvcKey(svcName.toLowerCase, None)
}
v1.Indexed(keys.toSet, idx)
}
}
|
hhtpcd/linkerd
|
namer/consul/src/main/scala/io/buoyant/namer/consul/DcServices.scala
|
Scala
|
apache-2.0
| 4,165
|
/*
* Copyright 2014-2020 Rik van der Kleij
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package intellij.haskell.external.component
import com.intellij.openapi.editor.SelectionModel
import com.intellij.openapi.module.Module
import com.intellij.openapi.project.Project
import com.intellij.psi.{PsiElement, PsiFile}
import intellij.haskell.HaskellNotificationGroup
import intellij.haskell.cabal.PackageInfo
import intellij.haskell.external.component.DefinitionLocationComponent.DefinitionLocationResult
import intellij.haskell.external.component.NameInfoComponentResult.NameInfoResult
import intellij.haskell.external.component.TypeInfoComponentResult.TypeInfoResult
import intellij.haskell.external.execution.CompilationResult
import intellij.haskell.external.repl.StackRepl.StanzaType
import intellij.haskell.external.repl.StackReplsManager
import intellij.haskell.psi.{HaskellPsiUtil, HaskellQualifiedNameElement}
import intellij.haskell.util.index.{HaskellFileIndex, HaskellModuleNameIndex}
import intellij.haskell.util.{ApplicationUtil, GhcVersion, HaskellProjectUtil, ScalaFutureUtil}
import scala.concurrent._
object HaskellComponentsManager {
case class ComponentTarget(module: Module, modulePath: String, packageName: String, target: String, stanzaType: StanzaType, sourceDirs: Seq[String],
mainIs: Option[String], isImplicitPreludeActive: Boolean, buildDepends: Seq[String], exposedModuleNames: Seq[String] = Seq.empty)
def findModuleIdentifiersInCache(project: Project): Iterable[ModuleIdentifier] = {
import scala.concurrent.ExecutionContext.Implicits.global
val f = Future {
blocking {
BrowseModuleComponent.findModuleIdentifiersInCache(project)
}
}
ScalaFutureUtil.waitForValue(project, f, "find module identifiers in cache") match {
case Some(ids) => ids
case None => Iterable()
}
}
def clearLoadedModule(psiFile: PsiFile): Unit = {
val projectRepl = StackReplsManager.getProjectRepl(psiFile)
projectRepl.foreach(_.clearLoadedModule())
}
def findModuleIdentifiers(project: Project, moduleName: String)(implicit ec: ExecutionContext): Future[Option[Iterable[ModuleIdentifier]]] = {
BrowseModuleComponent.findModuleIdentifiers(project, moduleName)
}
def findDefinitionLocation(psiFile: PsiFile, qualifiedNameElement: HaskellQualifiedNameElement, importQualifier: Option[String]): DefinitionLocationResult = {
DefinitionLocationComponent.findDefinitionLocation(psiFile, qualifiedNameElement, importQualifier)
}
def findNameInfo(psiElement: PsiElement): NameInfoResult = {
NameInfoComponent.findNameInfo(psiElement)
}
def findAvailableModuleNamesWithIndex(stackComponentInfo: ComponentTarget): Iterable[String] = {
AvailableModuleNamesComponent.findAvailableModuleNamesWithIndex(stackComponentInfo)
}
def findAvailableModuleLibraryModuleNamesWithIndex(module: Module): Iterable[String] = {
AvailableModuleNamesComponent.findAvailableModuleLibraryModuleNamesWithIndex(module)
}
def findStackComponentGlobalInfo(stackComponentInfo: ComponentTarget): Option[StackComponentGlobalInfo] = {
StackComponentGlobalInfoComponent.findStackComponentGlobalInfo(stackComponentInfo)
}
def findStackComponentInfo(psiFile: PsiFile): Option[ComponentTarget] = {
HaskellModuleInfoComponent.findComponentTarget(psiFile)
}
def findComponentTarget(project: Project, filePath: String): Option[ComponentTarget] = {
HaskellModuleInfoComponent.findComponentTarget(project, filePath)
}
def getGlobalProjectInfo(project: Project): Option[GlobalProjectInfo] = {
GlobalProjectInfoComponent.findGlobalProjectInfo(project)
}
def getSupportedLanguageExtension(project: Project): Iterable[String] = {
GlobalProjectInfoComponent.findGlobalProjectInfo(project).map(_.supportedLanguageExtensions).getOrElse(Iterable())
}
def getGhcVersion(project: Project): Option[GhcVersion] = {
GlobalProjectInfoComponent.findGlobalProjectInfo(project).map(_.ghcVersion)
}
def getAvailableStackagePackages(project: Project): Iterable[String] = {
GlobalProjectInfoComponent.findGlobalProjectInfo(project).map(_.availableStackagePackageNames).getOrElse(Iterable())
}
def findProjectPackageNames(project: Project): Option[Iterable[String]] = {
StackReplsManager.getReplsManager(project).map(_.modulePackageInfos.map { case (_, ci) => ci.packageName })
}
def findCabalInfos(project: Project): Iterable[PackageInfo] = {
StackReplsManager.getReplsManager(project).map(_.modulePackageInfos.map { case (_, ci) => ci }).getOrElse(Iterable())
}
def loadHaskellFile(psiFile: PsiFile, fileModified: Boolean): Option[CompilationResult] = {
LoadComponent.load(psiFile, fileModified)
}
def invalidateFileInfos(psiFile: PsiFile): Unit = {
HaskellModuleInfoComponent.invalidate(psiFile)
}
def findProjectModulePackageNames(project: Project): Seq[(Module, String)] = {
findStackComponentInfos(project).map(info => (info.module, info.packageName)).distinct
}
def invalidateDefinitionLocations(project: Project): Unit = {
DefinitionLocationComponent.invalidate(project)
}
def findLibraryPackageInfos(project: Project): Seq[LibraryPackageInfo] = {
LibraryPackageInfoComponent.libraryPackageInfos(project).toSeq
}
def invalidateBrowseInfo(project: Project, moduleNames: Seq[String]): Unit = {
BrowseModuleComponent.invalidateModuleNames(project, moduleNames)
}
def findStackComponentInfos(project: Project): Seq[ComponentTarget] = {
StackReplsManager.getReplsManager(project).map(_.componentTargets.toSeq).getOrElse(Seq())
}
def invalidateGlobalCaches(project: Project): Unit = {
HaskellNotificationGroup.logInfoEvent(project, "Start to invalidate cache")
GlobalProjectInfoComponent.invalidate(project)
LibraryPackageInfoComponent.invalidate(project)
HaskellModuleInfoComponent.invalidate(project)
BrowseModuleComponent.invalidate(project)
NameInfoComponent.invalidateAll(project)
DefinitionLocationComponent.invalidateAll(project)
TypeInfoComponent.invalidateAll(project)
HaskellPsiUtil.invalidateAllModuleNames(project)
LibraryPackageInfoComponent.invalidate(project)
HaskellModuleNameIndex.invalidate(project)
FileModuleIdentifiers.invalidateAll(project)
StackComponentGlobalInfoComponent.invalidate(project)
HaskellNotificationGroup.logInfoEvent(project, "Finished with invalidating cache")
}
def preloadLibraryIdentifiersCaches(project: Project): Unit = {
HaskellNotificationGroup.logInfoEvent(project, "Start to preload library identifiers cache")
preloadLibraryIdentifiers(project)
HaskellNotificationGroup.logInfoEvent(project, "Finished with preloading library identifiers cache")
}
def preloadAllLibraryIdentifiersCaches(project: Project): Unit = {
HaskellNotificationGroup.logInfoEvent(project, "Start to preload all library identifiers cache")
preloadAllLibraryIdentifiers(project)
HaskellNotificationGroup.logInfoEvent(project, "Finished with preloading all library identifiers cache")
}
def preloadStackComponentInfoCache(project: Project): Unit = {
HaskellNotificationGroup.logInfoEvent(project, "Start to preload stack component info cache")
preloadStackComponentInfos(project)
HaskellNotificationGroup.logInfoEvent(project, "Finished with preloading stack component info cache")
}
def preloadLibraryFilesCache(project: Project): Unit = {
HaskellNotificationGroup.logInfoEvent(project, "Start to preload library files cache")
preloadLibraryFiles(project)
HaskellNotificationGroup.logInfoEvent(project, "Finished with preloading library files cache")
}
def findTypeInfoForElement(psiElement: PsiElement): TypeInfoResult = {
TypeInfoComponent.findTypeInfoForElement(psiElement)
}
def findTypeInfoForSelection(psiFile: PsiFile, selectionModel: SelectionModel): TypeInfoResult = {
TypeInfoComponent.findTypeInfoForSelection(psiFile, selectionModel)
}
private def preloadStackComponentInfos(project: Project): Unit = {
if (!project.isDisposed) {
findStackComponentInfos(project).foreach { info =>
findStackComponentGlobalInfo(info)
val projectModuleNames = AvailableModuleNamesComponent.findAvailableProjectModuleNames(info)
HaskellModuleNameIndex.fillCache(project, projectModuleNames)
}
}
}
private def preloadLibraryFiles(project: Project): Unit = {
if (!project.isDisposed) {
val libraryPackageInfos = LibraryPackageInfoComponent.libraryPackageInfos(project)
HaskellModuleNameIndex.fillCache(project, libraryPackageInfos.flatMap(libraryModuleNames => libraryModuleNames.exposedModuleNames ++ libraryModuleNames.hiddenModuleNames))
}
}
private def preloadLibraryIdentifiers(project: Project): Unit = {
import scala.concurrent.ExecutionContext.Implicits.global
if (!project.isDisposed) {
BrowseModuleComponent.findModuleIdentifiers(project, HaskellProjectUtil.Prelude)
}
if (!project.isDisposed) {
val projectHaskellFiles = ApplicationUtil.runReadActionWithFileAccess(project, HaskellFileIndex.findProjectHaskellFiles(project), "Find Haskell project files").toOption.getOrElse(Iterable())
val componentInfos = projectHaskellFiles.flatMap(f => HaskellComponentsManager.findStackComponentInfo(f)).toSeq.distinct
val importedLibraryModuleNames =
projectHaskellFiles.flatMap(f => {
if (project.isDisposed) {
Iterable()
} else {
val packageInfos = componentInfos.flatMap(HaskellComponentsManager.findStackComponentGlobalInfo).flatMap(_.packageInfos)
val exposedLibraryModuleNames = packageInfos.flatMap(_.exposedModuleNames).distinct
val importDeclarations = ApplicationUtil.runReadActionWithFileAccess(project, HaskellPsiUtil.findImportDeclarations(f), "In preloadLibraryIdentifiers findImportDeclarations").toOption.getOrElse(Iterable())
importDeclarations.flatMap(id => ApplicationUtil.runReadAction(id.getModuleName, Some(project))).filter(mn => exposedLibraryModuleNames.contains(mn)).filterNot(_ == HaskellProjectUtil.Prelude)
}
})
if (!project.isDisposed) {
if (StackReplsManager.getGlobalRepl(project).exists(_.available)) {
importedLibraryModuleNames.toSeq.distinct.foreach(mn => {
if (!project.isDisposed) {
BrowseModuleComponent.findModuleIdentifiersSync(project, mn)
}
})
}
}
}
}
private def preloadAllLibraryIdentifiers(project: Project): Unit = {
if (!project.isDisposed) {
val componentInfos = findStackComponentInfos(project)
val packageInfos = componentInfos.flatMap(info => findStackComponentGlobalInfo(info).map(_.packageInfos).getOrElse(Seq())).distinct
if (!project.isDisposed) {
if (StackReplsManager.getGlobalRepl(project).exists(_.available)) {
packageInfos.flatMap(_.exposedModuleNames).distinct.foreach(mn => {
if (!project.isDisposed) {
BrowseModuleComponent.findModuleIdentifiersSync(project, mn)
// We have to wait for other requests which have more priority because those are on dispatch thread
Thread.sleep(100)
}
})
}
}
}
}
}
|
rikvdkleij/intellij-haskell
|
src/main/scala/intellij/haskell/external/component/HaskellComponentsManager.scala
|
Scala
|
apache-2.0
| 11,920
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.status
import java.util.Date
import scala.collection.mutable.HashMap
import org.apache.spark.JobExecutionStatus
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.scheduler.{AccumulableInfo, StageInfo, TaskInfo}
import org.apache.spark.status.api.v1
import org.apache.spark.storage.RDDInfo
import org.apache.spark.ui.SparkUI
import org.apache.spark.util.AccumulatorContext
import org.apache.spark.util.collection.OpenHashSet
import org.apache.spark.util.kvstore.KVStore
/**
* A mutable representation of a live entity in Spark (jobs, stages, tasks, et al). Every live
* entity uses one of these instances to keep track of their evolving state, and periodically
* flush an immutable view of the entity to the app state store.
*/
private[spark] abstract class LiveEntity {
var lastWriteTime = 0L
def write(store: KVStore, now: Long): Unit = {
store.write(doUpdate())
lastWriteTime = now
}
/**
* Returns an updated view of entity data, to be stored in the status store, reflecting the
* latest information collected by the listener.
*/
protected def doUpdate(): Any
}
private class LiveJob(
val jobId: Int,
name: String,
submissionTime: Option[Date],
val stageIds: Seq[Int],
jobGroup: Option[String],
numTasks: Int) extends LiveEntity {
var activeTasks = 0
var completedTasks = 0
var failedTasks = 0
// Holds both the stage ID and the task index, packed into a single long value.
val completedIndices = new OpenHashSet[Long]()
var killedTasks = 0
var killedSummary: Map[String, Int] = Map()
var skippedTasks = 0
var skippedStages = Set[Int]()
var status = JobExecutionStatus.RUNNING
var completionTime: Option[Date] = None
var completedStages: Set[Int] = Set()
var activeStages = 0
var failedStages = 0
override protected def doUpdate(): Any = {
val info = new v1.JobData(
jobId,
name,
None, // description is always None?
submissionTime,
completionTime,
stageIds,
jobGroup,
status,
numTasks,
activeTasks,
completedTasks,
skippedTasks,
failedTasks,
killedTasks,
completedIndices.size,
activeStages,
completedStages.size,
skippedStages.size,
failedStages,
killedSummary)
new JobDataWrapper(info, skippedStages)
}
}
private class LiveTask(
var info: TaskInfo,
stageId: Int,
stageAttemptId: Int,
lastUpdateTime: Option[Long]) extends LiveEntity {
import LiveEntityHelpers._
private var recordedMetrics: v1.TaskMetrics = null
var errorMessage: Option[String] = None
/**
* Update the metrics for the task and return the difference between the previous and new
* values.
*/
def updateMetrics(metrics: TaskMetrics): v1.TaskMetrics = {
if (metrics != null) {
val old = recordedMetrics
recordedMetrics = new v1.TaskMetrics(
metrics.executorDeserializeTime,
metrics.executorDeserializeCpuTime,
metrics.executorRunTime,
metrics.executorCpuTime,
metrics.resultSize,
metrics.jvmGCTime,
metrics.resultSerializationTime,
metrics.memoryBytesSpilled,
metrics.diskBytesSpilled,
metrics.peakExecutionMemory,
new v1.InputMetrics(
metrics.inputMetrics.bytesRead,
metrics.inputMetrics.recordsRead),
new v1.OutputMetrics(
metrics.outputMetrics.bytesWritten,
metrics.outputMetrics.recordsWritten),
new v1.ShuffleReadMetrics(
metrics.shuffleReadMetrics.remoteBlocksFetched,
metrics.shuffleReadMetrics.localBlocksFetched,
metrics.shuffleReadMetrics.fetchWaitTime,
metrics.shuffleReadMetrics.remoteBytesRead,
metrics.shuffleReadMetrics.remoteBytesReadToDisk,
metrics.shuffleReadMetrics.localBytesRead,
metrics.shuffleReadMetrics.recordsRead),
new v1.ShuffleWriteMetrics(
metrics.shuffleWriteMetrics.bytesWritten,
metrics.shuffleWriteMetrics.writeTime,
metrics.shuffleWriteMetrics.recordsWritten))
if (old != null) calculateMetricsDelta(recordedMetrics, old) else recordedMetrics
} else {
null
}
}
/**
* Return a new TaskMetrics object containing the delta of the various fields of the given
* metrics objects. This is currently targeted at updating stage data, so it does not
* necessarily calculate deltas for all the fields.
*/
private def calculateMetricsDelta(
metrics: v1.TaskMetrics,
old: v1.TaskMetrics): v1.TaskMetrics = {
val shuffleWriteDelta = new v1.ShuffleWriteMetrics(
metrics.shuffleWriteMetrics.bytesWritten - old.shuffleWriteMetrics.bytesWritten,
0L,
metrics.shuffleWriteMetrics.recordsWritten - old.shuffleWriteMetrics.recordsWritten)
val shuffleReadDelta = new v1.ShuffleReadMetrics(
0L, 0L, 0L,
metrics.shuffleReadMetrics.remoteBytesRead - old.shuffleReadMetrics.remoteBytesRead,
metrics.shuffleReadMetrics.remoteBytesReadToDisk -
old.shuffleReadMetrics.remoteBytesReadToDisk,
metrics.shuffleReadMetrics.localBytesRead - old.shuffleReadMetrics.localBytesRead,
metrics.shuffleReadMetrics.recordsRead - old.shuffleReadMetrics.recordsRead)
val inputDelta = new v1.InputMetrics(
metrics.inputMetrics.bytesRead - old.inputMetrics.bytesRead,
metrics.inputMetrics.recordsRead - old.inputMetrics.recordsRead)
val outputDelta = new v1.OutputMetrics(
metrics.outputMetrics.bytesWritten - old.outputMetrics.bytesWritten,
metrics.outputMetrics.recordsWritten - old.outputMetrics.recordsWritten)
new v1.TaskMetrics(
0L, 0L,
metrics.executorRunTime - old.executorRunTime,
metrics.executorCpuTime - old.executorCpuTime,
0L, 0L, 0L,
metrics.memoryBytesSpilled - old.memoryBytesSpilled,
metrics.diskBytesSpilled - old.diskBytesSpilled,
0L,
inputDelta,
outputDelta,
shuffleReadDelta,
shuffleWriteDelta)
}
override protected def doUpdate(): Any = {
val duration = if (info.finished) {
info.duration
} else {
info.timeRunning(lastUpdateTime.getOrElse(System.currentTimeMillis()))
}
val task = new v1.TaskData(
info.taskId,
info.index,
info.attemptNumber,
new Date(info.launchTime),
if (info.gettingResult) Some(new Date(info.gettingResultTime)) else None,
Some(duration),
info.executorId,
info.host,
info.status,
info.taskLocality.toString(),
info.speculative,
newAccumulatorInfos(info.accumulables),
errorMessage,
Option(recordedMetrics))
new TaskDataWrapper(task, stageId, stageAttemptId)
}
}
private class LiveExecutor(val executorId: String, _addTime: Long) extends LiveEntity {
var hostPort: String = null
var host: String = null
var isActive = true
var totalCores = 0
val addTime = new Date(_addTime)
var removeTime: Date = null
var removeReason: String = null
var rddBlocks = 0
var memoryUsed = 0L
var diskUsed = 0L
var maxTasks = 0
var maxMemory = 0L
var totalTasks = 0
var activeTasks = 0
var completedTasks = 0
var failedTasks = 0
var totalDuration = 0L
var totalGcTime = 0L
var totalInputBytes = 0L
var totalShuffleRead = 0L
var totalShuffleWrite = 0L
var isBlacklisted = false
var executorLogs = Map[String, String]()
// Memory metrics. They may not be recorded (e.g. old event logs) so if totalOnHeap is not
// initialized, the store will not contain this information.
var totalOnHeap = -1L
var totalOffHeap = 0L
var usedOnHeap = 0L
var usedOffHeap = 0L
def hasMemoryInfo: Boolean = totalOnHeap >= 0L
def hostname: String = if (host != null) host else hostPort.split(":")(0)
override protected def doUpdate(): Any = {
val memoryMetrics = if (totalOnHeap >= 0) {
Some(new v1.MemoryMetrics(usedOnHeap, usedOffHeap, totalOnHeap, totalOffHeap))
} else {
None
}
val info = new v1.ExecutorSummary(
executorId,
if (hostPort != null) hostPort else host,
isActive,
rddBlocks,
memoryUsed,
diskUsed,
totalCores,
maxTasks,
activeTasks,
failedTasks,
completedTasks,
totalTasks,
totalDuration,
totalGcTime,
totalInputBytes,
totalShuffleRead,
totalShuffleWrite,
isBlacklisted,
maxMemory,
addTime,
Option(removeTime),
Option(removeReason),
executorLogs,
memoryMetrics)
new ExecutorSummaryWrapper(info)
}
}
/** Metrics tracked per stage (both total and per executor). */
private class MetricsTracker {
var executorRunTime = 0L
var executorCpuTime = 0L
var inputBytes = 0L
var inputRecords = 0L
var outputBytes = 0L
var outputRecords = 0L
var shuffleReadBytes = 0L
var shuffleReadRecords = 0L
var shuffleWriteBytes = 0L
var shuffleWriteRecords = 0L
var memoryBytesSpilled = 0L
var diskBytesSpilled = 0L
def update(delta: v1.TaskMetrics): Unit = {
executorRunTime += delta.executorRunTime
executorCpuTime += delta.executorCpuTime
inputBytes += delta.inputMetrics.bytesRead
inputRecords += delta.inputMetrics.recordsRead
outputBytes += delta.outputMetrics.bytesWritten
outputRecords += delta.outputMetrics.recordsWritten
shuffleReadBytes += delta.shuffleReadMetrics.localBytesRead +
delta.shuffleReadMetrics.remoteBytesRead
shuffleReadRecords += delta.shuffleReadMetrics.recordsRead
shuffleWriteBytes += delta.shuffleWriteMetrics.bytesWritten
shuffleWriteRecords += delta.shuffleWriteMetrics.recordsWritten
memoryBytesSpilled += delta.memoryBytesSpilled
diskBytesSpilled += delta.diskBytesSpilled
}
}
private class LiveExecutorStageSummary(
stageId: Int,
attemptId: Int,
executorId: String) extends LiveEntity {
var taskTime = 0L
var succeededTasks = 0
var failedTasks = 0
var killedTasks = 0
val metrics = new MetricsTracker()
override protected def doUpdate(): Any = {
val info = new v1.ExecutorStageSummary(
taskTime,
failedTasks,
succeededTasks,
killedTasks,
metrics.inputBytes,
metrics.inputRecords,
metrics.outputBytes,
metrics.outputRecords,
metrics.shuffleReadBytes,
metrics.shuffleReadRecords,
metrics.shuffleWriteBytes,
metrics.shuffleWriteRecords,
metrics.memoryBytesSpilled,
metrics.diskBytesSpilled)
new ExecutorStageSummaryWrapper(stageId, attemptId, executorId, info)
}
}
private class LiveStage extends LiveEntity {
import LiveEntityHelpers._
var jobs = Seq[LiveJob]()
var jobIds = Set[Int]()
var info: StageInfo = null
var status = v1.StageStatus.PENDING
var description: Option[String] = None
var schedulingPool: String = SparkUI.DEFAULT_POOL_NAME
var activeTasks = 0
var completedTasks = 0
var failedTasks = 0
val completedIndices = new OpenHashSet[Int]()
var killedTasks = 0
var killedSummary: Map[String, Int] = Map()
var firstLaunchTime = Long.MaxValue
val metrics = new MetricsTracker()
val executorSummaries = new HashMap[String, LiveExecutorStageSummary]()
def executorSummary(executorId: String): LiveExecutorStageSummary = {
executorSummaries.getOrElseUpdate(executorId,
new LiveExecutorStageSummary(info.stageId, info.attemptId, executorId))
}
override protected def doUpdate(): Any = {
val update = new v1.StageData(
status,
info.stageId,
info.attemptId,
info.numTasks,
activeTasks,
completedTasks,
failedTasks,
killedTasks,
completedIndices.size,
metrics.executorRunTime,
metrics.executorCpuTime,
info.submissionTime.map(new Date(_)),
if (firstLaunchTime < Long.MaxValue) Some(new Date(firstLaunchTime)) else None,
info.completionTime.map(new Date(_)),
info.failureReason,
metrics.inputBytes,
metrics.inputRecords,
metrics.outputBytes,
metrics.outputRecords,
metrics.shuffleReadBytes,
metrics.shuffleReadRecords,
metrics.shuffleWriteBytes,
metrics.shuffleWriteRecords,
metrics.memoryBytesSpilled,
metrics.diskBytesSpilled,
info.name,
description,
info.details,
schedulingPool,
info.rddInfos.map(_.id),
newAccumulatorInfos(info.accumulables.values),
None,
None,
killedSummary)
new StageDataWrapper(update, jobIds)
}
}
private class LiveRDDPartition(val blockName: String) {
// Pointers used by RDDPartitionSeq.
@volatile var prev: LiveRDDPartition = null
@volatile var next: LiveRDDPartition = null
var value: v1.RDDPartitionInfo = null
def executors: Seq[String] = value.executors
def memoryUsed: Long = value.memoryUsed
def diskUsed: Long = value.diskUsed
def update(
executors: Seq[String],
storageLevel: String,
memoryUsed: Long,
diskUsed: Long): Unit = {
value = new v1.RDDPartitionInfo(
blockName,
storageLevel,
memoryUsed,
diskUsed,
executors)
}
}
private class LiveRDDDistribution(exec: LiveExecutor) {
val executorId = exec.executorId
var memoryUsed = 0L
var diskUsed = 0L
var onHeapUsed = 0L
var offHeapUsed = 0L
// Keep the last update handy. This avoids recomputing the API view when not needed.
var lastUpdate: v1.RDDDataDistribution = null
def toApi(): v1.RDDDataDistribution = {
if (lastUpdate == null) {
lastUpdate = new v1.RDDDataDistribution(
exec.hostPort,
memoryUsed,
exec.maxMemory - exec.memoryUsed,
diskUsed,
if (exec.hasMemoryInfo) Some(onHeapUsed) else None,
if (exec.hasMemoryInfo) Some(offHeapUsed) else None,
if (exec.hasMemoryInfo) Some(exec.totalOnHeap - exec.usedOnHeap) else None,
if (exec.hasMemoryInfo) Some(exec.totalOffHeap - exec.usedOffHeap) else None)
}
lastUpdate
}
}
private class LiveRDD(val info: RDDInfo) extends LiveEntity {
var storageLevel: String = info.storageLevel.description
var memoryUsed = 0L
var diskUsed = 0L
private val partitions = new HashMap[String, LiveRDDPartition]()
private val partitionSeq = new RDDPartitionSeq()
private val distributions = new HashMap[String, LiveRDDDistribution]()
def partition(blockName: String): LiveRDDPartition = {
partitions.getOrElseUpdate(blockName, {
val part = new LiveRDDPartition(blockName)
part.update(Nil, storageLevel, 0L, 0L)
partitionSeq.addPartition(part)
part
})
}
def removePartition(blockName: String): Unit = {
partitions.remove(blockName).foreach(partitionSeq.removePartition)
}
def distribution(exec: LiveExecutor): LiveRDDDistribution = {
distributions.getOrElseUpdate(exec.executorId, new LiveRDDDistribution(exec))
}
def removeDistribution(exec: LiveExecutor): Boolean = {
distributions.remove(exec.executorId).isDefined
}
def distributionOpt(exec: LiveExecutor): Option[LiveRDDDistribution] = {
distributions.get(exec.executorId)
}
override protected def doUpdate(): Any = {
val dists = if (distributions.nonEmpty) {
Some(distributions.values.map(_.toApi()).toSeq)
} else {
None
}
val rdd = new v1.RDDStorageInfo(
info.id,
info.name,
info.numPartitions,
partitions.size,
storageLevel,
memoryUsed,
diskUsed,
dists,
Some(partitionSeq))
new RDDStorageInfoWrapper(rdd)
}
}
private class SchedulerPool(name: String) extends LiveEntity {
var stageIds = Set[Int]()
override protected def doUpdate(): Any = {
new PoolData(name, stageIds)
}
}
private object LiveEntityHelpers {
def newAccumulatorInfos(accums: Iterable[AccumulableInfo]): Seq[v1.AccumulableInfo] = {
accums
.filter { acc =>
// We don't need to store internal or SQL accumulables as their values will be shown in
// other places, so drop them to reduce the memory usage.
!acc.internal && (!acc.metadata.isDefined ||
acc.metadata.get != Some(AccumulatorContext.SQL_ACCUM_IDENTIFIER))
}
.map { acc =>
new v1.AccumulableInfo(
acc.id,
acc.name.orNull,
acc.update.map(_.toString()),
acc.value.map(_.toString()).orNull)
}
.toSeq
}
}
/**
* A custom sequence of partitions based on a mutable linked list.
*
* The external interface is an immutable Seq, which is thread-safe for traversal. There are no
* guarantees about consistency though - iteration might return elements that have been removed
* or miss added elements.
*
* Internally, the sequence is mutable, and elements can modify the data they expose. Additions and
* removals are O(1). It is not safe to do multiple writes concurrently.
*/
private class RDDPartitionSeq extends Seq[v1.RDDPartitionInfo] {
@volatile private var _head: LiveRDDPartition = null
@volatile private var _tail: LiveRDDPartition = null
@volatile var count = 0
override def apply(idx: Int): v1.RDDPartitionInfo = {
var curr = 0
var e = _head
while (curr < idx && e != null) {
curr += 1
e = e.next
}
if (e != null) e.value else throw new IndexOutOfBoundsException(idx.toString)
}
override def iterator: Iterator[v1.RDDPartitionInfo] = {
new Iterator[v1.RDDPartitionInfo] {
var current = _head
override def hasNext: Boolean = current != null
override def next(): v1.RDDPartitionInfo = {
if (current != null) {
val tmp = current
current = tmp.next
tmp.value
} else {
throw new NoSuchElementException()
}
}
}
}
override def length: Int = count
def addPartition(part: LiveRDDPartition): Unit = {
part.prev = _tail
if (_tail != null) {
_tail.next = part
}
if (_head == null) {
_head = part
}
_tail = part
count += 1
}
def removePartition(part: LiveRDDPartition): Unit = {
count -= 1
// Remove the partition from the list, but leave the pointers unchanged. That ensures a best
// effort at returning existing elements when iterations still reference the removed partition.
if (part.prev != null) {
part.prev.next = part.next
}
if (part eq _head) {
_head = part.next
}
if (part.next != null) {
part.next.prev = part.prev
}
if (part eq _tail) {
_tail = part.prev
}
}
}
|
adrian-ionescu/apache-spark
|
core/src/main/scala/org/apache/spark/status/LiveEntity.scala
|
Scala
|
apache-2.0
| 19,409
|
package renderer
import communication.PlayerClient
import gamelogic.GameState
import globalvariables.VariableStorage
import scala.scalajs.js
import scala.scalajs.js.JSApp
import scala.scalajs.js.timers.setTimeout
import globalvariables.VariableStorage.retrieveValue
import gui.UIParent
import org.scalajs.dom
import org.scalajs.dom.html
import org.scalajs.dom.raw.Event
import sharednodejsapis.BrowserWindow
object Renderer extends JSApp {
def main(): Unit = {
var playerClient: Option[PlayerClient] = None
dom.document.title match {
case "Oh Hell!" =>
try {
val playerName: String = retrieveValue("playerName").asInstanceOf[String]
val gameName: String = retrieveValue("gameName").asInstanceOf[String]
val address: String = retrieveValue("address").asInstanceOf[String]
val port: Int = retrieveValue("port").asInstanceOf[Int]
val password: Int = retrieveValue("password").asInstanceOf[Int]
val players: Vector[String] = retrieveValue("players").asInstanceOf[js.Array[String]].toVector
val maxNbrOfCards: Int = retrieveValue("maxNbrCards").asInstanceOf[Int]
playerClient = Some(new PlayerClient(
playerName, gameName, address, port, password, GameState.originalState(players, maxNbrOfCards)
))
} catch {
case e: Throwable =>
e.printStackTrace()
dom.window.alert("FATAL ERROR: something went wrong with connection data storage. Please try again.")
dom.window.location.href = "../../gamemenus/mainscreen/mainscreen.html"
}
case "Bets" =>
BetWindow
case "Score Board" =>
ScoreBoard
case "Card Viewer" =>
CardViewer
case "Score History" =>
ScoreHistory
case _ =>
println("I should not be here.")
dom.window.alert("FATAL ERROR: not a correct Html file loading this.")
dom.window.location.href = "../../gamemenus/mainscreen/mainscreen.html"
}
}
}
|
sherpal/oh-hell-card-game
|
gameplaying/src/main/scala/renderer/Renderer.scala
|
Scala
|
mit
| 2,087
|
package com.github.eerohele.expek
import java.math.BigInteger
import javax.xml.transform.Source
import net.sf.saxon.s9api.{XdmNode, XdmNodeKind, XdmValue}
import org.hamcrest.StringDescription
import org.specs2.matcher.{AnyMatchers, Expectable, MatchFailure, MatchResult, MatchResultCombinators, Matcher}
import org.xmlunit.matchers.CompareMatcher
import scala.xml.Node
/** A specs2 matcher that matches against the result of an XSLT transformation.
*
* The result of an XSLT transformation can either be a node, which can be one of:
*
* - Element
* - Document
* - Processing instruction
* - Text
* - Attribute
* - Comment
*
* Or it can be an atomic value, which, when converted into a native Java type, can be one of:
*
* - [[String]]
* - [[BigInteger]]
* - [[BigDecimal]]
* - [[Double]]
* - [[Float]]
* - [[Boolean]]
* - [[QName]]
* - [[URI]]
*
* Element and document nodes are converted into [[Source]] so that we can compare them with XMLUnit.
*
* [[Node]] instances in the `expected` [[Vector]] are also converted into a [[Source]] so that we can compare
* those to XSLT transformation results.
*
* Other node types are kept as is and we compare their string representations.
*
* Atomic values that are the result of an XSLT transformation are converted to native Java types with Saxon's
* `XdmAtomicValue.getValue()` method and compared to the expected values.
*
*/
sealed class XsltResultMatcher[T <: Transformation](expected: Vector[Any])(implicit matcher: Source => CompareMatcher)
extends Matcher[T] with MatchResultCombinators {
def apply[S <: T](expectable: Expectable[S]): MatchResult[S] = {
val actual: Vector[Any] = expectable.value.result
val result = expected.zip(actual).map((intoResult[S](expectable) _).tupled)
if (result.nonEmpty) {
result.reduceLeft(_ and _)
} else {
MatchFailure("ok", "The transformation produces an empty value.", expectable)
}
}
private def intoResult[S <: T](expectable: Expectable[S])(expected: Any, actual: Any): MatchResult[S] = {
(expected, actual) match {
/** If [[expected]] is a [[Source]] and [[actual]] is an [[XdmNode]], they are either XML element or document
* nodes. In that case, compare them with XMLUnit.
*/
case (e: Source, a: XdmNode) => {
val compareMatcher = matcher(e)
result(
compareMatcher.matches(a.asSource),
"ok",
createKoMessage(compareMatcher, a.asSource).toString,
expectable
)
}
/** If [[expected]] and [[actual]] are instances of [[XdmNode]], they are one of:
*
* - A `text()` node
* - A `processing-instruction()` node
* - An `attribute()` node
*
* In that case, compare their string representations.
*/
case (e: XdmNode, a: XdmNode) => {
result(
e.toString == a.toString, "ok", s"$a is not equal to $e", expectable
)
}
/** In any other case, [[expected]] and [[actual]] are native Java types and we can compare them directly.
*/
case (e: Any, a: Any) => result(
e == a,
"ok", s"$a (${a.getClass.getName}) is not equal to $e (${e.getClass.getName})",
expectable
)
}
}
private def createKoMessage(compareMatcher: CompareMatcher, actual: Source): StringDescription = {
val description = new StringDescription
description.appendDescriptionOf(compareMatcher).appendText("\\n but ")
compareMatcher.describeMismatch(actual, description)
description
}
}
/** A trait you can mix in to your specs2 specification to compare the results of [[Transformation]] instances to
* a sequence of expected values.
*/
trait XsltResultMatchers extends AnyMatchers {
import utils.NodeConversions.nodeToSource
/** A function that transforms your expected XML before it's compared with the actual XML.
*
* NOOP by default. See [[NodeRefinery]] for examples on the kinds of functions you might want to use this with.
*/
def refineOutput: Node => Node = identity
/** Create a matcher that compares the supplied arguments against the result of an XML transformation.
*
* Example use:
*
* {{{
* class MySpecification extends mutable.Specification with XsltSpecification {
* val stylesheet = XSLT.file("my-stylesheet.xsl")
*
* "Convert a into b" in {
* // Apply the templates for the <a> element in the XSLT stylesheet and check the result.
* applying { <a>foo</a> } must produce { <b>foo</b> }
* }
* }
* }}}
*/
def produce(result: => Vector[Any])(implicit matcher: Source => CompareMatcher): Matcher[Transformation] = {
new XsltResultMatcher(result.map(convert))(matcher)
}
def produce(any: Any*)(implicit matcher: Source => CompareMatcher): Matcher[Transformation] = {
produce(any.toVector)(matcher)
}
/** Create a matcher that checks whether your transformation produces nothing.
*
* Example:
*
* {{{
* <!-- stylesheet.xsl -->
* <xsl:template match="a"/>
*
* // MySpecification.scala
* applying { <a/> } producesNothing
* }}}
*/
def produceNothing[T <: Transformation]: Matcher[T] = new Matcher[T] {
def apply[S <: T](iterable: Expectable[S]): MatchResult[S] = {
result(iterable.value.result.isEmpty,
iterable.description + " produces nothing",
iterable.description + " produces something", iterable)
}
}
// scalastyle:on method.name
protected def convert(value: Any) = {
value match {
/** An xs:integer is a [[BigInteger]], so we'll convert any [[Int]] that the user expects into a
* [[BigInteger]] so that they can be successfully compared without the user having to write
* `BigInteger.valueOf(n)` all over the place.
*/
case x: Int => BigInteger.valueOf(x)
/** If the expected value is an instance of [[Node]], convert it to a [[Source]] so that we can compare it
* with XMLUnit.
*/
case x: Node => (refineOutput andThen nodeToSource)(x)
/** If the expected value is an element() or a document-node(), convert it to a [[Source]] so that we can
* compare it with XMLUnit.
*/
case x: XdmNode if (x.getNodeKind == XdmNodeKind.ELEMENT || x.getNodeKind == XdmNodeKind.DOCUMENT) => {
x.asSource
}
case x: XdmNode => x
case x: XdmValue => x.asInstanceOf[XdmNode].asSource
case x: Any => x
}
}
}
|
eerohele/expek
|
src/main/scala/com/github/eerohele/expek/XsltResultMatchers.scala
|
Scala
|
mit
| 7,163
|
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
import sbt.ScriptedPlugin._
import sbt._
import Keys.{version, _}
import com.typesafe.tools.mima.core._
import com.typesafe.tools.mima.plugin.MimaKeys._
import com.typesafe.tools.mima.plugin.MimaPlugin._
import de.heikoseeberger.sbtheader.HeaderKey._
import de.heikoseeberger.sbtheader.HeaderPattern
import scalariform.formatter.preferences._
import com.typesafe.sbt.SbtScalariform.scalariformSettings
import com.typesafe.sbt.SbtScalariform.ScalariformKeys
import bintray.BintrayPlugin.autoImport._
import interplay._
import interplay.Omnidoc.autoImport._
import interplay.PlayBuildBase.autoImport._
import scala.util.control.NonFatal
object BuildSettings {
// Argument for setting size of permgen space or meta space for all forked processes
val maxMetaspace = s"-XX:MaxMetaspaceSize=384m"
val snapshotBranch: String = {
try {
val branch = "git rev-parse --abbrev-ref HEAD".!!.trim
if (branch == "HEAD") {
// not on a branch, get the hash
"git rev-parse HEAD".!!.trim
} else branch
} catch {
case NonFatal(_) => "unknown"
}
}
/**
* File header settings
*/
val fileHeaderSettings = Seq(
excludes := Seq("*/cookie/encoding/*", "*/inject/SourceProvider.java"),
headers := Map(
"scala" -> (HeaderPattern.cStyleBlockComment,
"""|/*
| * Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
| */
|""".stripMargin),
"java" -> (HeaderPattern.cStyleBlockComment,
"""|/*
| * Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
| */
|""".stripMargin)
)
)
private val VersionPattern = """^(\\d+).(\\d+).(\\d+)(-.*)?""".r
// Versions of previous minor releases being checked for binary compatibility
val mimaPreviousMinorReleaseVersions: Seq[String] = Seq("2.6.0")
def mimaPreviousPatchVersions(version: String): Seq[String] = version match {
case VersionPattern(epoch, major, minor, rest) => (0 until minor.toInt).map(v => s"$epoch.$major.$v")
case _ => sys.error(s"Cannot find previous versions for $version")
}
def mimaPreviousVersions(version: String): Set[String] =
mimaPreviousMinorReleaseVersions.toSet ++ mimaPreviousPatchVersions(version)
/**
* These settings are used by all projects
*/
def playCommonSettings: Seq[Setting[_]] = {
scalariformSettings ++ fileHeaderSettings ++ Seq(
ScalariformKeys.preferences := ScalariformKeys.preferences.value
.setPreference(SpacesAroundMultiImports, true)
.setPreference(SpaceInsideParentheses, false)
.setPreference(DanglingCloseParenthesis, Preserve)
.setPreference(PreserveSpaceBeforeArguments, true)
.setPreference(DoubleIndentClassDeclaration, true)
) ++ Seq(
homepage := Some(url("https://playframework.com")),
ivyLoggingLevel := UpdateLogging.DownloadOnly,
resolvers ++= Seq(
Resolver.sonatypeRepo("releases"),
Resolver.typesafeRepo("releases"),
Resolver.typesafeIvyRepo("releases")
),
scalacOptions in(Compile, doc) := {
// disable the new scaladoc feature for scala 2.12.0, might be removed in 2.12.0-1 (https://github.com/scala/scala-dev/issues/249)
CrossVersion.partialVersion(scalaVersion.value) match {
case Some((2, 12)) => Seq("-no-java-comments")
case _ => Seq()
}
},
fork in Test := true,
parallelExecution in Test := false,
testListeners in (Test,test) := Nil,
javaOptions in Test ++= Seq(maxMetaspace, "-Xmx512m", "-Xms128m"),
testOptions += Tests.Argument(TestFrameworks.JUnit, "-v"),
bintrayPackage := "play-sbt-plugin",
autoAPIMappings := true,
apiMappings += scalaInstance.value.libraryJar -> url(raw"""http://scala-lang.org/files/archive/api/${scalaInstance.value.actualVersion}/index.html"""),
apiMappings += {
// Maps JDK 1.8 jar into apidoc.
val rtJar: String = System.getProperty("sun.boot.class.path").split(java.io.File.pathSeparator).collectFirst {
case str: String if str.endsWith(java.io.File.separator + "rt.jar") => str
}.get // fail hard if not found
file(rtJar) -> url(Docs.javaApiUrl)
},
apiMappings ++= {
// Finds appropriate scala apidoc from dependencies when autoAPIMappings are insufficient.
// See the following:
//
// http://stackoverflow.com/questions/19786841/can-i-use-sbts-apimappings-setting-for-managed-dependencies/20919304#20919304
// http://www.scala-sbt.org/release/docs/Howto-Scaladoc.html#Enable+manual+linking+to+the+external+Scaladoc+of+managed+dependencies
// https://github.com/ThoughtWorksInc/sbt-api-mappings/blob/master/src/main/scala/com/thoughtworks/sbtApiMappings/ApiMappings.scala#L34
val ScalaLibraryRegex = """^.*[/\\\\]scala-library-([\\d\\.]+)\\.jar$""".r
val JavaxInjectRegex = """^.*[/\\\\]java.inject-([\\d\\.]+)\\.jar$""".r
val IvyRegex = """^.*[/\\\\]([\\.\\-_\\w]+)[/\\\\]([\\.\\-_\\w]+)[/\\\\](?:jars|bundles)[/\\\\]([\\.\\-_\\w]+)\\.jar$""".r
(for {
jar <- (dependencyClasspath in Compile in doc).value.toSet ++ (dependencyClasspath in Test in doc).value
fullyFile = jar.data
urlOption = fullyFile.getCanonicalPath match {
case ScalaLibraryRegex(v) =>
Some(url(raw"""http://scala-lang.org/files/archive/api/$v/index.html"""))
case JavaxInjectRegex(v) =>
// the jar file doesn't match up with $apiName-
Some(url(Docs.javaxInjectUrl))
case re@IvyRegex(apiOrganization, apiName, jarBaseFile) if jarBaseFile.startsWith(s"$apiName-") =>
val apiVersion = jarBaseFile.substring(apiName.length + 1, jarBaseFile.length)
apiOrganization match {
case "com.typesafe.akka" =>
Some(url(raw"http://doc.akka.io/api/akka/$apiVersion/"))
case default =>
val link = Docs.artifactToJavadoc(apiOrganization, apiName, apiVersion, jarBaseFile)
Some(url(link))
}
case other =>
None
}
url <- urlOption
} yield (fullyFile -> url))(collection.breakOut(Map.canBuildFrom))
}
)
}
/**
* These settings are used by all projects that are part of the runtime, as opposed to development, mode of Play.
*/
def playRuntimeSettings: Seq[Setting[_]] = playCommonSettings ++ mimaDefaultSettings ++ Seq(
mimaPreviousArtifacts := {
// Binary compatibility is tested against these versions
val previousVersions = mimaPreviousVersions(version.value)
if (crossPaths.value) {
previousVersions.map(v => organization.value % s"${moduleName.value}_${scalaBinaryVersion.value}" % v)
} else {
previousVersions.map(v => organization.value % moduleName.value % v)
}
},
mimaBinaryIssueFilters ++= Seq(
// Changing return and parameter types from DefaultApplicationLifecycle (implementation) to ApplicationLifecycle (trait)
ProblemFilters.exclude[IncompatibleResultTypeProblem]("play.api.BuiltInComponents.applicationLifecycle"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("play.api.BuiltInComponentsFromContext.applicationLifecycle"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("play.core.server.AkkaHttpServerComponents.applicationLifecycle"),
ProblemFilters.exclude[DirectMissingMethodProblem]("play.core.server.AkkaHttpServerComponents.applicationLifecycle"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.core.server.AkkaHttpServerComponents.applicationLifecycle"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("play.api.ApplicationLoader.createContext$default$5"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("play.api.ApplicationLoader#Context.lifecycle"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("play.api.ApplicationLoader#Context.copy$default$5"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("play.core.ObjectMapperComponents.applicationLifecycle"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("play.core.server.NettyServerComponents.applicationLifecycle"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("play.api.http.CookiesConfiguration.serverEncoder"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("play.api.http.CookiesConfiguration.serverDecoder"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("play.api.http.CookiesConfiguration.clientEncoder"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("play.api.http.CookiesConfiguration.clientDecoder"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("play.api.ApplicationLoader.createContext"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("play.api.ApplicationLoader#Context.apply"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("play.api.ApplicationLoader#Context.copy"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("play.api.ApplicationLoader#Context.this"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.api.BuiltInComponents.applicationLifecycle"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.core.ObjectMapperComponents.applicationLifecycle"),
ProblemFilters.exclude[DirectMissingMethodProblem]("play.core.server.NettyServerComponents.applicationLifecycle"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.core.server.NettyServerComponents.applicationLifecycle"),
ProblemFilters.exclude[DirectMissingMethodProblem]("play.core.server.common.ServerResultUtils.sessionBaker"),
ProblemFilters.exclude[DirectMissingMethodProblem]("play.core.server.common.ServerResultUtils.cookieHeaderEncoding"),
ProblemFilters.exclude[DirectMissingMethodProblem]("play.core.server.common.ServerResultUtils.flashBaker"),
ProblemFilters.exclude[DirectMissingMethodProblem]("play.core.server.common.ServerResultUtils.this"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.api.http.HeaderNames.CONTENT_SECURITY_POLICY"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.api.http.HeaderNames.play$api$http$HeaderNames$_setter_$CONTENT_SECURITY_POLICY_="),
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.api.http.HeaderNames.play$api$http$HeaderNames$_setter_$X_XSS_PROTECTION_="),
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.api.http.HeaderNames.X_XSS_PROTECTION"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.api.http.HeaderNames.play$api$http$HeaderNames$_setter_$REFERRER_POLICY_="),
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.api.http.HeaderNames.REFERRER_POLICY"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.api.http.HeaderNames.X_CONTENT_TYPE_OPTIONS"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.api.http.HeaderNames.play$api$http$HeaderNames$_setter_$X_CONTENT_TYPE_OPTIONS_="),
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.api.http.HeaderNames.X_PERMITTED_CROSS_DOMAIN_POLICIES"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.api.http.HeaderNames.play$api$http$HeaderNames$_setter_$X_PERMITTED_CROSS_DOMAIN_POLICIES_="),
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.api.http.HeaderNames.X_FRAME_OPTIONS"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.api.http.HeaderNames.play$api$http$HeaderNames$_setter_$X_FRAME_OPTIONS_="),
// private
ProblemFilters.exclude[DirectMissingMethodProblem]("play.core.server.akkahttp.AkkaModelConversion.this"),
// Added method to PlayBodyParsers, which is a Play API not meant to be extended by end users.
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.api.mvc.PlayBodyParsers.byteString"),
// Refactoring to unify AkkaHttpServer and NettyServer fromRouter methods
ProblemFilters.exclude[IncompatibleResultTypeProblem]("play.core.server.NettyServer.fromRouter"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("play.core.server.AkkaHttpServer.fromRouter"),
// Moved play[private] out of from companion object to allow it to access member variables
ProblemFilters.exclude[DirectMissingMethodProblem]("play.api.test.TestServer.start"),
// Added component so configuration would work properly
ProblemFilters.exclude[ReversedMissingMethodProblem]("play.api.cache.ehcache.EhCacheComponents.actorSystem"),
// Changed this private[play] type to a Lock to allow explicit locking
ProblemFilters.exclude[IncompatibleResultTypeProblem]("play.api.test.PlayRunners.mutex")
),
unmanagedSourceDirectories in Compile += {
(sourceDirectory in Compile).value / s"scala-${scalaBinaryVersion.value}"
},
// Argument for setting size of permgen space or meta space for all forked processes
Docs.apiDocsInclude := true
)
def javaVersionSettings(version: String): Seq[Setting[_]] = Seq(
javacOptions ++= Seq("-source", version, "-target", version),
javacOptions in doc := Seq("-source", version)
)
/**
* A project that is shared between the SBT runtime and the Play runtime
*/
def PlayNonCrossBuiltProject(name: String, dir: String): Project = {
Project(name, file("src/" + dir))
.enablePlugins(PlaySbtLibrary)
.settings(playRuntimeSettings: _*)
.settings(omnidocSettings: _*)
.settings(
autoScalaLibrary := false,
crossPaths := false
)
}
/**
* A project that is only used when running in development.
*/
def PlayDevelopmentProject(name: String, dir: String): Project = {
Project(name, file("src/" + dir))
.enablePlugins(PlayLibrary)
.settings(playCommonSettings: _*)
.settings(
(javacOptions in compile) ~= (_.map {
case "1.8" => "1.6"
case other => other
})
)
}
/**
* A project that is in the Play runtime
*/
def PlayCrossBuiltProject(name: String, dir: String): Project = {
Project(name, file("src/" + dir))
.enablePlugins(PlayLibrary)
.settings(playRuntimeSettings: _*)
.settings(omnidocSettings: _*)
}
def omnidocSettings: Seq[Setting[_]] = Omnidoc.projectSettings ++ Seq(
omnidocSnapshotBranch := snapshotBranch,
omnidocPathPrefix := "framework/"
)
def playScriptedSettings: Seq[Setting[_]] = Seq(
ScriptedPlugin.scripted := ScriptedPlugin.scripted.tag(Tags.Test).evaluated,
scriptedLaunchOpts ++= Seq(
"-Xmx768m",
maxMetaspace,
"-Dscala.version=" + sys.props.get("scripted.scala.version").orElse(sys.props.get("scala.version")).getOrElse("2.12.4")
)
)
def playFullScriptedSettings: Seq[Setting[_]] = ScriptedPlugin.scriptedSettings ++ Seq(
ScriptedPlugin.scriptedLaunchOpts += s"-Dproject.version=${version.value}"
) ++ playScriptedSettings
/**
* A project that runs in the SBT runtime
*/
def PlaySbtProject(name: String, dir: String): Project = {
Project(name, file("src/" + dir))
.enablePlugins(PlaySbtLibrary)
.settings(playCommonSettings: _*)
}
/**
* A project that *is* an SBT plugin
*/
def PlaySbtPluginProject(name: String, dir: String): Project = {
Project(name, file("src/" + dir))
.enablePlugins(PlaySbtPlugin)
.settings(playCommonSettings: _*)
.settings(playScriptedSettings: _*)
.settings(
fork in Test := false
)
}
}
|
zaneli/playframework
|
framework/project/BuildSettings.scala
|
Scala
|
apache-2.0
| 15,796
|
object Sample {
var i = 1
def foo = {
i = i + 1
i
}
def main(args: Array[String]) {
"stop here"
}
}
|
consulo/consulo-scala
|
testdata/debugger/ScalaMethodEvaluation/changingFunction/src/Sample.scala
|
Scala
|
apache-2.0
| 121
|
package toguru.play
import java.util.UUID
import akka.util.Timeout
import org.scalatest.OptionValues._
import org.scalatest.matchers.must.Matchers
import org.scalatest.wordspec.AnyWordSpec
import play.api.http.HttpVerbs
import play.api.test.{FakeRequest, Helpers}
import toguru.api._
import toguru.impl.RemoteActivationsProvider
import toguru.test.TestActivations
import scala.concurrent.duration._
import scala.language.implicitConversions
class PlaySupportSpec extends AnyWordSpec with Matchers with RequestHelpers with ControllerHelpers {
"toguruClient method" should {
"create a PlayToguruClient" in {
val client = PlaySupport.toguruClient(_ => ClientInfo(), "http://localhost:9001")
client mustBe a[PlayToguruClient]
client.activationsProvider.asInstanceOf[RemoteActivationsProvider].close()
}
}
"ToggledAction helper" should {
"provide request with toggling information" in {
implicit val timeout = Timeout(2.seconds)
val toguru = PlaySupport.testToguruClient(client, TestActivations(toggle -> Condition.On)())
val controller = createMyController(toguru)
val request = FakeRequest(HttpVerbs.GET, "/")
val response = controller.myAction(request)
Helpers.contentAsString(response) mustBe "Toggle is on"
}
}
"Direct toggling info" should {
"provide toggling information" in {
implicit val timeout = Timeout(2.seconds)
val toguru = PlaySupport.testToguruClient(client, TestActivations(toggle -> Condition.On)())
val controller = createMyControllerWithOwnTogglingInfo(toguru)
val request = FakeRequest(HttpVerbs.GET, "/")
val response = controller.myAction(request)
Helpers.contentAsString(response) mustBe "Toggle is on"
}
}
"Conversion of request header to ClientInfo" should {
"extract culture attribute" in {
val clientInfo = client(request)
clientInfo.attributes.get("culture").value mustBe "de-DE"
}
"extract of user agent from user agent header" in {
val clientInfo = client(request)
clientInfo.attributes
.get(UserAgent)
.value mustBe "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
}
"extract of uuid from GUID or visitor cookie" in {
val clientInfo = client(request)
clientInfo.uuid.value mustBe UUID.fromString("a5f409eb-2fdd-4499-b65b-b22bd7e51aa2")
}
}
"Forcing feature toggles" should {
"override feature toggles by http header" in {
client(request).forcedToggle("feature1-Forced-By-HEADER").value mustBe true
client(request).forcedToggle("feature2-Forced-By-Header").value mustBe true
}
"override feature toggles by cookie" in {
client(request).forcedToggle("feature1-Forced-By-COOKIE").value mustBe true
client(request).forcedToggle("feature2-Forced-By-Cookie").value mustBe true
}
"override one feature toggle by query param" in {
val clientInfo: ClientInfo = client(request)
clientInfo.forcedToggle("feature-forced-by-query-param").value mustBe true
}
"override one feature toggle by query param with unusual case" in {
val clientInfo: ClientInfo = client(requestWithToggleIdUpppercased)
clientInfo.forcedToggle("feature-forced-by-query-param").value mustBe true
}
"override two feature toggles by query param" in {
val clientInfo: ClientInfo = client(requestWithTwoTogglesInQueryString)
clientInfo.forcedToggle("feature1-forced-by-query-param").value mustBe true
clientInfo.forcedToggle("feature2-forced-by-query-param").value mustBe false
}
"override one feature toggle twice by query param takes only first occurrence" in {
val clientInfo: ClientInfo = client(requestWithTwoTogglesInQueryString)
clientInfo.forcedToggle("feature1-forced-by-query-param").value mustBe true
}
}
}
|
AutoScout24/toguru-scala-client
|
play/src/test/scala/toguru/play/PlaySupportSpec.scala
|
Scala
|
mit
| 3,947
|
package sample.hello
import akka.actor._
import java.io._
import collection.JavaConversions._
import edu.stanford.nlp.pipeline.Annotation
import edu.stanford.nlp.pipeline.StanfordCoreNLP
import java.util.Properties
import edu.stanford.nlp.ling.CoreAnnotations.{TokenBeginAnnotation, LemmaAnnotation, TokensAnnotation, SentencesAnnotation}
/**
* Created by root on 3/3/15.
*/
class LineLemmaExtractor extends Actor with ActorLogging{
def manOf[T: Manifest](t: T): Manifest[T] = manifest[T]
def receive ={
case routingmessages2(line, source_receiver_ref,filename,file_handler,file_lines_size) =>
val err :PrintStream = System.err;
System.setErr(new PrintStream(new OutputStream() {
def write(b:Int):Unit ={
}
}))
val props:Properties=new Properties()
props.put("annotators","tokenize, ssplit, pos ,lemma")
val pipeline:StanfordCoreNLP=new StanfordCoreNLP(props)
val document_line :Annotation=new Annotation(line)
pipeline.annotate(document_line)
val sentences =document_line.get(classOf[SentencesAnnotation])
val lemmas1 = sentences flatMap { sentence =>
val tokens = sentence.get(classOf[TokensAnnotation])
tokens map { x=> x } }
val tmp_lem :String= lemmas1.toString()
val last_char_line= tmp_lem.charAt(tmp_lem.length()-2)
val lemmas = sentences flatMap { sentence =>
val tokens = sentence.get(classOf[TokensAnnotation])
tokens map { _.get(classOf[LemmaAnnotation]) } }
val listed_lemmas_pre:List[String]=lemmas.toList.filterNot(_.forall(!_.isLetterOrDigit))
val listed_lemmas=
if(last_char_line=='-'){
listed_lemmas_pre.dropRight(1) :+(listed_lemmas_pre.last + "-")
}
else{
listed_lemmas_pre
}
System.setErr(err);
//println(listed_lemmas)
if(source_receiver_ref.toString().contains("plag_analysis")){
//println(context.actorSelection(source_receiver_ref.path.parent))
context.actorSelection(source_receiver_ref.path.parent).!(plag_file_transf(filename,listed_lemmas,file_lines_size,file_handler))(context.parent)
}
else if (source_receiver_ref.toString().contains("source_analysis")){
//context.actorSelection("../plag_analysis").!(source_file_transf(source_file_name, listed_lemmas))
source_receiver_ref.!(returned_line_lemmas(listed_lemmas,file_handler,file_lines_size,filename))
}
case ShutdownMessage(file_handler) =>
context.stop(self)
case _ =>
println("No line received")
}
}
|
SteliosKats/Plagiarism_Detection_System_Using_Akka
|
src/main/scala/sample/hello/LineLemmaExtractor.scala
|
Scala
|
cc0-1.0
| 2,590
|
/*
* Copyright 2010-2011 Vilius Normantas <code@norma.lt>
*
* This file is part of Crossbow library.
*
* Crossbow is free software: you can redistribute it and/or modify it under the terms of the GNU
* General Public License as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* Crossbow is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
* even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with Crossbow. If not,
* see <http://www.gnu.org/licenses/>.
*/
package lt.norma.crossbow.indicators
/** Calculates sum of the specified indicators of `Double` type. Empty indicators are ignored.
* `SumSet` results in `None` if all of the target indicators are empty.
* {{{SumSet = I1 + I2 + ... + In}}} */
class SumSet(indicators: Indicator[Double]*) extends FunctionalIndicator[Double] {
def name = "SumSet(" + (indicators map {
_.name
} mkString ("; ")) + ")"
private val defSum = new Sum(indicators.map(i => new Default(i, 0.0)): _*)
def dependencies = Set(defSum)
def calculate = {
if (indicators.exists(_.isSet)) {
defSum()
} else {
None
}
}
}
|
ViliusN/Crossbow
|
crossbow-core/src/lt/norma/crossbow/indicators/SumSet.scala
|
Scala
|
gpl-3.0
| 1,374
|
/**
* Licensed to the Minutemen Group under one or more contributor license
* agreements. See the COPYRIGHT file distributed with this work for
* additional information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package silhouette.provider.social
/**
* Indicates that an error occurred during social state retrieval.
*
* @param msg The exception message.
* @param cause The exception cause.
*/
class SocialStateException(msg: String, cause: Option[Throwable] = None)
extends SocialProviderException(msg, cause)
|
mohiva/silhouette
|
modules/provider-social/src/main/scala/silhouette/provider/social/SocialStateException.scala
|
Scala
|
apache-2.0
| 1,072
|
package com.jd.common
import scala.collection.mutable.ListBuffer
import akka.actor.Actor
import akka.actor.ActorRef
import akka.actor.Props
import akka.actor.actorRef2Scala
import akka.routing.FromConfig
sealed class Helper(count: Int, replyTo: ActorRef) extends Actor {
val perfectNumbers = new ListBuffer[Int]
var nrOfResults = 0
def receive = {
case Result(num: Int, isPerfect: Boolean) =>
nrOfResults += 1
if (isPerfect)
perfectNumbers += num
if (nrOfResults == count) {
replyTo ! PerfectNumbers(perfectNumbers.toList)
context.stop(self)
}
}
}
class Master extends Actor {
// val workers = context.actorOf(Props(new Worker()), "workerRouter")
// val workers = context.actorOf(Props(new Worker()).withRouter(RoundRobinRouter(10)), "workerRouter")
val workers = context.actorOf(Props(new Worker()).withRouter(FromConfig()), "workerRouter")
def receive = {
case Find(start: Int, end: Int, replyTo: ActorRef) =>
require(start > 1 && end >= start)
val count = end - start + 1
val helper = context.actorOf(Props(new Helper(count, replyTo)))
(start to end).foreach(num =>
workers ! Work(num, helper)
// workers.tell(ConsistentHashableEnvelope(Work(num,helper), num), helper)
)
case PerfectNumbers(list: List[Int]) =>
println("\nFound Perfect Numbers:" + list.mkString(","))
}
}
|
pengyanhong/demoAkka
|
src/com/jd/common/MasterActor.scala
|
Scala
|
apache-2.0
| 1,407
|
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.config.base.traits
import com.datamountaineer.streamreactor.connect.config.base.const.TraitConfigConst._
/**
* Created by andrew@datamountaineer.com on 31/07/2017.
* stream-reactor
*/
trait SSLSettings extends BaseSettings {
val trustStorePath: String = s"$connectorPrefix.$TRUSTSTORE_PATH_SUFFIX"
val trustStorePass: String = s"$connectorPrefix.$TRUSTSTORE_PASS_SUFFIX"
val keyStorePath: String = s"$connectorPrefix.$KEYSTORE_PATH_SUFFIX"
val keyStorePass: String = s"$connectorPrefix.$KEYSTORE_PASS_SUFFIX"
val certificates: String = s"$connectorPrefix.$CERTIFICATES_SUFFIX"
val certificateKeyChain: String = s"$connectorPrefix.$CERTIFICATE_KEY_CHAIN_SUFFIX"
def getTrustStorePath = getString(trustStorePath)
def getTrustStorePass = getPassword(trustStorePass)
def getKeyStorePath = getString(keyStorePath)
def getKeyStorePass = getPassword(keyStorePass)
def getCertificates = getList(certificates)
def getCertificateKeyChain = getString(certificateKeyChain)
}
|
datamountaineer/kafka-connect-common
|
src/main/scala/com/datamountaineer/streamreactor/connect/config/base/traits/SSLSettings.scala
|
Scala
|
apache-2.0
| 1,658
|
package com.wordtrellis.projecteuler
/**
* Problem 30
*
* Surprisingly there are only three numbers that can be written as the sum of fourth powers
* of their digits:
*
* 1634 = 1^(4) + 6^(4) + 3^(4) + 4^(4)
* 8208 = 8^(4) + 2^(4) + 0^(4) + 8^(4)
* 9474 = 9^(4) + 4^(4) + 7^(4) + 4^(4)
*
* As 1 = 1^(4) is not a sum it is not included.
*
* The sum of these numbers is 1634 + 8208 + 9474 = 19316.
*
* Find the sum of all the numbers that can be written as the sum of fifth powers of their digits.
*
* @author : Todd Cook
*/
import scala.collection.mutable.ListBuffer
object problem_30 {
def main(args: Array[String]): Unit = {
println(answer)
}
def answer: Int = {
val solutionRange = 2 to 200000 // or wind it up to 10000000, same results
val solutions = new ListBuffer[Int]()
solutionRange.foreach(x => if (testNumberEqualsPower(x, 5)) solutions.append(x))
println(solutions.toList)
solutions.toList.sum
}
def testNumberEqualsPower(num: Int, power: Int): Boolean =
numberDigitsToPower(num, power) == num + 0d
// note: must initialize the fold left first argument to set type
def numberDigitsToPower(num: Int, power: Int): Double =
numberAsDigitList(num).map(a => math.pow(a, power)).foldLeft(0d)(_ + _)
def numberAsDigitList(num: Int): List[Int] = stringAsDigitList(num.toString)
/**
* note the odd construction (a +"") below is required to coerce Char to String otherwise
* the character code value of the digit as a character will be given instead,
* e.g. 1, 2, 3 -> 49, 50, 51
*/
def stringAsDigitList(str: String): List[Int] =
str.toList.map(a => java.lang.Integer.parseInt(a .toString))
}
/**
* Solution:
* List(4150, 4151, 54748, 92727, 93084, 194979)
* 443839
*
*/
|
todd-cook/Effective-Scala-with-Project-Euler
|
src/main/scala/com/wordtrellis/projecteuler/problem_30.scala
|
Scala
|
mit
| 1,811
|
package com.github.agourlay.cornichon.http
import cats.Show
import cats.syntax.show._
import com.github.agourlay.cornichon.util.Printing._
import com.github.agourlay.cornichon.resolver.Resolvable
import io.circe.Encoder
import scala.concurrent.duration.FiniteDuration
case class HttpMethod(name: String)
object HttpMethods {
val DELETE = HttpMethod("DELETE")
val GET = HttpMethod("GET")
val HEAD = HttpMethod("HEAD")
val OPTIONS = HttpMethod("OPTIONS")
val PATCH = HttpMethod("PATCH")
val POST = HttpMethod("POST")
val PUT = HttpMethod("PUT")
}
trait BaseRequest {
def url: String
def params: Seq[(String, String)]
def headers: Seq[(String, String)]
def compactDescription: String
def paramsTitle: String = if (params.isEmpty) "" else s" with query parameters ${printArrowPairs(params)}"
def headersTitle: String = if (headers.isEmpty) "" else s" with headers ${printArrowPairs(headers)}"
}
case class HttpRequest[A: Show: Resolvable: Encoder](method: HttpMethod, url: String, body: Option[A], params: Seq[(String, String)], headers: Seq[(String, String)])
extends BaseRequest {
def withParams(params: (String, String)*) = copy(params = params)
def addParams(params: (String, String)*) = copy(params = this.params ++ params)
def withHeaders(headers: (String, String)*) = copy(headers = headers)
def addHeaders(headers: (String, String)*) = copy(headers = this.headers ++ headers)
def withBody[B: Show: Resolvable: Encoder](body: B) = copy(body = Some(body))
lazy val compactDescription: String = {
val base = s"${method.name} $url"
val payloadTitle = body.fold("")(p => s" with body\\n${p.show}")
base + payloadTitle + paramsTitle + headersTitle
}
}
trait HttpRequestsDsl {
import com.github.agourlay.cornichon.http.HttpMethods._
def get(url: String): HttpRequest[String] = HttpRequest[String](GET, url, None, Nil, Nil)
def head(url: String): HttpRequest[String] = HttpRequest[String](HEAD, url, None, Nil, Nil)
def options(url: String): HttpRequest[String] = HttpRequest[String](OPTIONS, url, None, Nil, Nil)
def delete(url: String): HttpRequest[String] = HttpRequest[String](DELETE, url, None, Nil, Nil)
def post(url: String): HttpRequest[String] = HttpRequest[String](POST, url, None, Nil, Nil)
def put(url: String): HttpRequest[String] = HttpRequest[String](PUT, url, None, Nil, Nil)
def patch(url: String): HttpRequest[String] = HttpRequest[String](PATCH, url, None, Nil, Nil)
}
object HttpRequest extends HttpRequestsDsl {
implicit def showRequest[A: Show]: Show[HttpRequest[A]] = new Show[HttpRequest[A]] {
def show(r: HttpRequest[A]): String = {
val body = r.body.fold("without body")(b => s"with body\\n${b.show}")
val params = if (r.params.isEmpty) "without parameters" else s"with parameters ${printArrowPairs(r.params)}"
val headers = if (r.headers.isEmpty) "without headers" else s"with headers ${printArrowPairs(r.headers)}"
s"""|HTTP ${r.method.name} request to ${r.url}
|$params
|$headers
|$body""".stripMargin
}
}
}
case class HttpStream(name: String) extends AnyVal
object HttpStreams {
val SSE = HttpStream("Server-Sent-Event")
val WS = HttpStream("WebSocket")
}
case class HttpStreamedRequest(stream: HttpStream, url: String, takeWithin: FiniteDuration, params: Seq[(String, String)], headers: Seq[(String, String)])
extends BaseRequest {
def withParams(params: (String, String)*) = copy(params = params)
def addParams(params: (String, String)*) = copy(params = this.params ++ params)
def withHeaders(headers: (String, String)*) = copy(headers = headers)
def addHeaders(headers: (String, String)*) = copy(headers = this.headers ++ headers)
lazy val compactDescription: String = {
val base = s"open ${stream.name} to $url"
base + paramsTitle + headersTitle
}
}
object HttpStreamedRequest {
implicit val showStreamedRequest = new Show[HttpStreamedRequest] {
def show(r: HttpStreamedRequest): String = {
val params = if (r.params.isEmpty) "without parameters" else s"with parameters ${printArrowPairs(r.params)}"
val headers = if (r.headers.isEmpty) "without headers" else s"with headers ${printArrowPairs(r.headers)}"
s"""|${r.stream.name} request to ${r.url}
|$params
|$headers""".stripMargin
}
}
}
|
agourlay/cornichon
|
cornichon-core/src/main/scala/com/github/agourlay/cornichon/http/HttpRequest.scala
|
Scala
|
apache-2.0
| 4,357
|
/*
* Shadowsocks - A shadowsocks client for Android
* Copyright (C) 2014 <max.c.lv@gmail.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* ___====-_ _-====___
* _--^^^#####// \\\\#####^^^--_
* _-^##########// ( ) \\\\##########^-_
* -############// |\\^^/| \\\\############-
* _/############// (@::@) \\\\############\\_
* /#############(( \\\\// ))#############\\
* -###############\\\\ (oo) //###############-
* -#################\\\\ / VV \\ //#################-
* -###################\\\\/ \\//###################-
* _#/|##########/\\######( /\\ )######/\\##########|\\#_
* |/ |#/\\#/\\#/\\/ \\#/\\##\\ | | /##/\\#/ \\/\\#/\\#/\\#| \\|
* ` |/ V V ` V \\#\\| | | |/#/ V ' V V \\| '
* ` ` ` ` / | | | | \\ ' ' ' '
* ( | | | | )
* __\\ | | | | /__
* (vvv(VVV)(VVV)vvv)
*
* HERE BE DRAGONS
*
*/
package com.github.shadowsocks
import java.io.File
import java.util.Locale
import android.app._
import android.content._
import android.content.pm.{PackageInfo, PackageManager}
import android.net.VpnService
import android.os._
import android.util.Log
import android.widget.Toast
import com.github.shadowsocks.aidl.Config
import com.github.shadowsocks.utils._
import com.google.android.gms.analytics.HitBuilders
import org.apache.commons.net.util.SubnetUtils
import org.apache.http.conn.util.InetAddressUtils
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.ops._
class ShadowsocksVpnService extends VpnService with BaseService {
private lazy val application = getApplication.asInstanceOf[ShadowsocksApplication]
val TAG = "ShadowsocksVpnService"
val VPN_MTU = 1500
val PRIVATE_VLAN = "26.26.26.%s"
var conn: ParcelFileDescriptor = null
var notificationManager: NotificationManager = null
var receiver: BroadcastReceiver = null
var apps: Array[ProxiedApp] = null
var config: Config = null
var vpnThread: ShadowsocksVpnThread = null
def isByass(net: SubnetUtils): Boolean = {
val info = net.getInfo
info.isInRange(config.proxy)
}
def isPrivateA(a: Int): Boolean = {
if (a == 10 || a == 192 || a == 172) {
true
} else {
false
}
}
def isPrivateB(a: Int, b: Int): Boolean = {
if (a == 10 || (a == 192 && b == 168) || (a == 172 && b >= 16 && b < 32)) {
true
} else {
false
}
}
override def onBind(intent: Intent): IBinder = {
val action = intent.getAction
if (VpnService.SERVICE_INTERFACE == action) {
return super.onBind(intent)
} else if (Action.SERVICE == action) {
return binder
}
null
}
override def onDestroy() {
super.onDestroy()
if (vpnThread != null) vpnThread.stopThread()
}
override def onCreate() {
super.onCreate()
ConfigUtils.refresh(this)
notificationManager = getSystemService(Context.NOTIFICATION_SERVICE)
.asInstanceOf[NotificationManager]
vpnThread = new ShadowsocksVpnThread(this)
vpnThread.start()
}
override def onRevoke() {
stopRunner()
}
override def stopRunner() {
// channge the state
changeState(State.STOPPING)
// send event
application.tracker.send(new HitBuilders.EventBuilder()
.setCategory(TAG)
.setAction("stop")
.setLabel(getVersionName)
.build())
// reset VPN
killProcesses()
// close connections
if (conn != null) {
conn.close()
conn = null
}
// stop the service if no callback registered
if (getCallbackCount == 0) {
stopSelf()
}
// clean up the context
if (receiver != null) {
unregisterReceiver(receiver)
receiver = null
}
// channge the state
changeState(State.STOPPED)
}
def getVersionName: String = {
var version: String = null
try {
val pi: PackageInfo = getPackageManager.getPackageInfo(getPackageName, 0)
version = pi.versionName
} catch {
case e: PackageManager.NameNotFoundException =>
version = "Package name not found"
}
version
}
def killProcesses() {
for (task <- Array("ss-local", "ss-tunnel", "pdnsd", "tun2socks")) {
try {
val pid = scala.io.Source.fromFile(Path.BASE + task + "-vpn.pid").mkString.trim.toInt
Process.killProcess(pid)
} catch {
case e: Throwable => Log.e(TAG, "unable to kill " + task)
}
}
}
override def startRunner(c: Config) {
config = c
// ensure the VPNService is prepared
if (VpnService.prepare(this) != null) {
val i = new Intent(this, classOf[ShadowsocksRunnerActivity])
i.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK)
startActivity(i)
return
}
// send event
application.tracker.send(new HitBuilders.EventBuilder()
.setCategory(TAG)
.setAction("start")
.setLabel(getVersionName)
.build())
// register close receiver
val filter = new IntentFilter()
filter.addAction(Intent.ACTION_SHUTDOWN)
receiver = new BroadcastReceiver {
def onReceive(p1: Context, p2: Intent) {
Toast.makeText(p1, R.string.stopping, Toast.LENGTH_SHORT)
stopRunner()
}
}
registerReceiver(receiver, filter)
changeState(State.CONNECTING)
spawn {
if (config.proxy == "198.199.101.152") {
val holder = getApplication.asInstanceOf[ShadowsocksApplication].containerHolder
try {
config = ConfigUtils.getPublicConfig(getBaseContext, holder.getContainer, config)
} catch {
case ex: Exception =>
changeState(State.STOPPED, getString(R.string.service_failed))
stopRunner()
config = null
}
}
if (config != null) {
// reset the context
killProcesses()
// Resolve the server address
var resolved: Boolean = false
if (!InetAddressUtils.isIPv4Address(config.proxy) &&
!InetAddressUtils.isIPv6Address(config.proxy)) {
Utils.resolve(config.proxy, enableIPv6 = true) match {
case Some(addr) =>
config.proxy = addr
resolved = true
case None => resolved = false
}
} else {
resolved = true
}
if (resolved && handleConnection) {
changeState(State.CONNECTED)
} else {
changeState(State.STOPPED, getString(R.string.service_failed))
stopRunner()
}
}
}
}
/** Called when the activity is first created. */
def handleConnection: Boolean = {
startShadowsocksDaemon()
if (!config.isUdpDns) {
startDnsDaemon()
startDnsTunnel()
}
val fd = startVpn()
if (fd == -1) {
false
} else {
Thread.sleep(1000)
if (System.sendfd(fd) == -1) {
false
} else {
true
}
}
}
def startShadowsocksDaemon() {
if (config.route != Route.ALL) {
val acl: Array[String] = config.route match {
case Route.BYPASS_LAN => getResources.getStringArray(R.array.private_route)
case Route.BYPASS_CHN => getResources.getStringArray(R.array.chn_route_full)
}
ConfigUtils.printToFile(new File(Path.BASE + "acl.list"))(p => {
acl.foreach(item => p.println(item))
})
}
val conf = ConfigUtils
.SHADOWSOCKS.formatLocal(Locale.ENGLISH, config.proxy, config.remotePort, config.localPort,
config.sitekey, config.encMethod, 10)
ConfigUtils.printToFile(new File(Path.BASE + "ss-local-vpn.conf"))(p => {
p.println(conf)
})
val cmd = new ArrayBuffer[String]
cmd +=(Path.BASE + "ss-local", "-V", "-u"
, "-b", "127.0.0.1"
, "-t", "600"
, "-c", Path.BASE + "ss-local-vpn.conf"
, "-f", Path.BASE + "ss-local-vpn.pid")
if (config.route != Route.ALL) {
cmd += "--acl"
cmd += (Path.BASE + "acl.list")
}
if (BuildConfig.DEBUG) Log.d(TAG, cmd.mkString(" "))
Console.runCommand(cmd.mkString(" "))
}
def startDnsTunnel() = {
val conf = ConfigUtils
.SHADOWSOCKS.formatLocal(Locale.ENGLISH, config.proxy, config.remotePort, 8163,
config.sitekey, config.encMethod, 10)
ConfigUtils.printToFile(new File(Path.BASE + "ss-tunnel-vpn.conf"))(p => {
p.println(conf)
})
val cmd = new ArrayBuffer[String]
cmd +=(Path.BASE + "ss-tunnel"
, "-V"
, "-u"
, "-t", "10"
, "-b", "127.0.0.1"
, "-l", "8163"
, "-L", "8.8.8.8:53"
, "-c", Path.BASE + "ss-tunnel-vpn.conf"
, "-f", Path.BASE + "ss-tunnel-vpn.pid")
if (BuildConfig.DEBUG) Log.d(TAG, cmd.mkString(" "))
Console.runCommand(cmd.mkString(" "))
}
def startDnsDaemon() {
val conf = {
if (config.route == Route.BYPASS_CHN) {
val reject = ConfigUtils.getRejectList(getContext, application)
val blackList = ConfigUtils.getBlackList(getContext, application)
ConfigUtils.PDNSD_DIRECT.formatLocal(Locale.ENGLISH, "0.0.0.0", 8153,
Path.BASE + "pdnsd-vpn.pid", reject, blackList, 8163)
} else {
ConfigUtils.PDNSD_LOCAL.formatLocal(Locale.ENGLISH, "0.0.0.0", 8153,
Path.BASE + "pdnsd-vpn.pid", 8163)
}
}
ConfigUtils.printToFile(new File(Path.BASE + "pdnsd-vpn.conf"))(p => {
p.println(conf)
})
val cmd = Path.BASE + "pdnsd -c " + Path.BASE + "pdnsd-vpn.conf"
if (BuildConfig.DEBUG) Log.d(TAG, cmd)
Console.runCommand(cmd)
}
override def getContext = getBaseContext
def startVpn(): Int = {
val builder = new Builder()
builder
.setSession(config.profileName)
.setMtu(VPN_MTU)
.addAddress(PRIVATE_VLAN.formatLocal(Locale.ENGLISH, "1"), 24)
.addDnsServer("8.8.8.8")
if (Utils.isLollipopOrAbove) {
builder.allowFamily(android.system.OsConstants.AF_INET6)
if (!config.isGlobalProxy) {
val apps = AppManager.getProxiedApps(this, config.proxiedAppString)
val pkgSet: mutable.HashSet[String] = new mutable.HashSet[String]
for (app <- apps) {
if (app.proxied) {
pkgSet.add(app.packageName)
}
}
for (pkg <- pkgSet) {
if (!config.isBypassApps) {
builder.addAllowedApplication(pkg)
} else {
builder.addDisallowedApplication(pkg)
}
}
}
}
if (config.route == Route.ALL) {
builder.addRoute("0.0.0.0", 0)
} else {
val privateList = getResources.getStringArray(R.array.bypass_private_route)
privateList.foreach(cidr => {
val addr = cidr.split('/')
builder.addRoute(addr(0), addr(1).toInt)
})
}
builder.addRoute("8.8.0.0", 16)
try {
conn = builder.establish()
} catch {
case ex: IllegalStateException =>
changeState(State.STOPPED, ex.getMessage)
conn = null
case ex: Exception => conn = null
}
if (conn == null) {
stopRunner()
return -1
}
val fd = conn.getFd
var cmd = (Path.BASE +
"tun2socks --netif-ipaddr %s "
+ "--netif-netmask 255.255.255.0 "
+ "--socks-server-addr 127.0.0.1:%d "
+ "--tunfd %d "
+ "--tunmtu %d "
+ "--loglevel 3 "
+ "--pid %stun2socks-vpn.pid")
.formatLocal(Locale.ENGLISH, PRIVATE_VLAN.formatLocal(Locale.ENGLISH, "2"), config.localPort, fd, VPN_MTU, Path.BASE)
if (config.isUdpDns)
cmd += " --enable-udprelay"
else
cmd += " --dnsgw %s:8153".formatLocal(Locale.ENGLISH, PRIVATE_VLAN.formatLocal(Locale.ENGLISH, "1"))
if (BuildConfig.DEBUG) Log.d(TAG, cmd)
Console.runCommand(cmd)
return fd
}
override def stopBackgroundService() {
stopSelf()
}
override def getTag = TAG
override def getServiceMode = Mode.VPN
}
|
ListFranz/shadowsocks-android
|
src/main/scala/com/github/shadowsocks/ShadowsocksVpnService.scala
|
Scala
|
gpl-3.0
| 12,713
|
package eventstore
import eventstore.akka.Settings
import eventstore.core.{settings => cs}
import eventstore.{core => c}
private[eventstore] trait CoreCompat {
private lazy val RequireMaster: Boolean = Settings.Default.requireMaster
private lazy val ResolveLinkTos: Boolean = Settings.Default.resolveLinkTos
private lazy val ReadBatchSize: Int = Settings.Default.readBatchSize
object WriteEvents {
def unapply(arg: c.WriteEvents) = c.WriteEvents.unapply(arg)
def apply(
streamId: c.EventStream.Id,
events: List[c.EventData],
expectedVersion: c.ExpectedVersion = c.ExpectedVersion.Any,
requireMaster: Boolean = RequireMaster
): c.WriteEvents = c.WriteEvents(streamId, events, expectedVersion, requireMaster)
object StreamMetadata {
def apply(
streamId: c.EventStream.Metadata,
data: c.Content,
expectedVersion: c.ExpectedVersion = c.ExpectedVersion.Any,
requireMaster: Boolean = RequireMaster
): c.WriteEvents = c.WriteEvents.StreamMetadata(streamId, data, randomUuid, expectedVersion, requireMaster)
}
}
object DeleteStream {
def unapply(arg: c.DeleteStream) = c.DeleteStream.unapply(arg)
def apply(
streamId: c.EventStream.Id,
expectedVersion: c.ExpectedVersion.Existing = c.ExpectedVersion.Any,
hard: Boolean = false,
requireMaster: Boolean = RequireMaster
): c.DeleteStream = c.DeleteStream(streamId, expectedVersion, hard, requireMaster)
}
object TransactionStart {
def unapply(arg: c.TransactionStart) = c.TransactionStart.unapply(arg)
def apply(
streamId: c.EventStream.Id,
expectedVersion: c.ExpectedVersion = c.ExpectedVersion.Any,
requireMaster: Boolean = RequireMaster
): c.TransactionStart = c.TransactionStart(streamId, expectedVersion, requireMaster)
}
object TransactionWrite {
def unapply(arg: c.TransactionWrite) = c.TransactionWrite.unapply(arg)
def apply(
transactionId: Long,
events: List[c.EventData],
requireMaster: Boolean = RequireMaster
): c.TransactionWrite = c.TransactionWrite(transactionId, events, requireMaster)
}
object TransactionCommit {
def unapply(arg: c.TransactionCommit) = c.TransactionCommit.unapply(arg)
def apply(
transactionId: Long,
requireMaster: Boolean = RequireMaster
): c.TransactionCommit = c.TransactionCommit(transactionId, requireMaster)
}
object ReadEvent {
def unapply(arg: c.ReadEvent) = c.ReadEvent.unapply(arg)
def apply(
streamId: c.EventStream.Id,
eventNumber: c.EventNumber = c.EventNumber.First,
resolveLinkTos: Boolean = ResolveLinkTos,
requireMaster: Boolean = RequireMaster
): c.ReadEvent = c.ReadEvent(streamId, eventNumber, resolveLinkTos, requireMaster)
object StreamMetadata {
def apply(
streamId: c.EventStream.Metadata,
eventNumber: c.EventNumber = c.EventNumber.Last,
resolveLinkTos: Boolean = ResolveLinkTos,
requireMaster: Boolean = RequireMaster
): c.ReadEvent = c.ReadEvent.StreamMetadata(streamId, eventNumber, resolveLinkTos, requireMaster)
}
}
object ReadStreamEvents {
def unapply(arg: c.ReadStreamEvents) = c.ReadStreamEvents.unapply(arg)
def apply(
streamId: c.EventStream.Id,
fromNumber: c.EventNumber = c.EventNumber.First,
maxCount: Int = ReadBatchSize,
direction: c.ReadDirection = c.ReadDirection.Forward,
resolveLinkTos: Boolean = ResolveLinkTos,
requireMaster: Boolean = RequireMaster
): c.ReadStreamEvents = c.ReadStreamEvents(streamId, fromNumber, maxCount, direction, resolveLinkTos, requireMaster)
}
object ReadAllEvents {
def unapply(arg: c.ReadAllEvents) = c.ReadAllEvents.unapply(arg)
def apply(
fromPosition: c.Position = c.Position.First,
maxCount: Int = ReadBatchSize,
direction: c.ReadDirection = c.ReadDirection.Forward,
resolveLinkTos: Boolean = ResolveLinkTos,
requireMaster: Boolean = RequireMaster
): c.ReadAllEvents = c.ReadAllEvents(fromPosition, maxCount, direction, resolveLinkTos, requireMaster)
}
object PersistentSubscriptionSettings {
import com.typesafe.config.{Config, ConfigFactory}
lazy val Default: cs.PersistentSubscriptionSettings = apply(ConfigFactory.load())
def apply(conf: Config): cs.PersistentSubscriptionSettings = cs.PersistentSubscriptionSettings(conf)
}
object PersistentSubscription {
import PersistentSubscriptionSettings.{Default => D}
import c.EventStream.Id
import c.{PersistentSubscription => PS}
import cs.{PersistentSubscriptionSettings => PSS}
def create(streamId: Id, groupName: String, settings: PSS): Create = Create(streamId, groupName, settings)
def update(streamId: Id, groupName: String, settings: PSS): Update = Update(streamId, groupName, settings)
def delete(streamId: Id, groupName: String): Delete = Delete(streamId, groupName)
object Create {
def unapply(arg: Create) = PS.Create.unapply(arg)
def apply(streamId: Id, groupName: String, settings: PSS = D): Create = PS.Create(streamId, groupName, settings)
}
object Update {
def unapply(arg: Update) = PS.Update.unapply(arg)
def apply(streamId: Id, groupName: String, settings: PSS = D): Update = PS.Update(streamId, groupName, settings)
}
type Create = PS.Create
val CreateCompleted = PS.CreateCompleted
type Update = PS.Update
val UpdateCompleted = PS.UpdateCompleted
type Delete = PS.Delete
val Delete = PS.Delete
val DeleteCompleted = PS.DeleteCompleted
type Ack = PS.Ack
val Ack = PS.Ack
type Nak = PS.Nak
val Nak = PS.Nak
type Connect = PS.Connect
val Connect = PS.Connect
type Connected = PS.Connected
val Connected = PS.Connected
type EventAppeared = PS.EventAppeared
val EventAppeared = PS.EventAppeared
}
object SubscribeTo {
def unapply(arg: c.SubscribeTo) = c.SubscribeTo.unapply(arg)
def apply(stream: c.EventStream, resolveLinkTos: Boolean = ResolveLinkTos): c.SubscribeTo = c.SubscribeTo(stream, resolveLinkTos)
}
}
object compat extends CoreCompat
|
EventStore/EventStore.JVM
|
client/src/main/scala/eventstore/CoreCompat.scala
|
Scala
|
bsd-3-clause
| 6,813
|
package com.github.jeroenr.tepkin
import akka.actor.ActorRef
import akka.pattern.ask
import akka.stream.scaladsl.{Sink, Source}
import akka.util.Timeout
import com.github.jeroenr.bson.{BsonDocument, BsonValueNumber, Bulk}
import com.github.jeroenr.tepkin.protocol.WriteConcern
import com.github.jeroenr.tepkin.protocol.command._
import com.github.jeroenr.tepkin.protocol.message.{QueryMessage, QueryOptions, Reply}
import com.github.jeroenr.tepkin.protocol.result._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Random
class MongoCollection(databaseName: String,
collectionName: String,
pool: ActorRef) {
/**
* Calculates aggregate values for the data in this collection.
*
* @param pipeline A sequence of data aggregation operations or stages.
* @param explain Specifies to return the information on the processing of the pipeline.
* @param allowDiskUse Enables writing to temporary files. When set to true, aggregation operations can write data
* to the _tmp subdirectory in the dbPath directory.
* @param cursor Specifies the initial batch size for the cursor. The value of the cursor field is a document with
* the field batchSize.
*/
def aggregate(pipeline: List[BsonDocument],
explain: Option[Boolean] = None,
allowDiskUse: Option[Boolean] = None,
cursor: Option[BsonDocument] = None,
batchMultiplier: Int = 1000)
(implicit timeout: Timeout): Source[List[BsonDocument], ActorRef] = {
val source: Source[List[BsonDocument], ActorRef] = Source.actorPublisher(MongoCursor.props(
pool,
Aggregate(databaseName, collectionName, pipeline, explain, allowDiskUse, cursor),
reply => (s"$databaseName.$collectionName", reply.cursorID, reply.documents),
batchMultiplier,
timeout))
source.mapConcat(_.flatMap(_.getAsList[BsonDocument]("result")))
}
/**
* Counts the number of documents in this collection.
*
* @param query A query that selects which documents to count in a collection.
* @param limit The maximum number of matching documents to return.
* @param skip The number of matching documents to skip before returning results.
*/
def count(query: Option[BsonDocument] = None,
limit: Option[Int] = None,
skip: Option[Int] = None)
(implicit ec: ExecutionContext, timeout: Timeout): Future[CountResult] = {
(pool ? Count(databaseName, collectionName, query, limit, skip)).mapTo[Reply].map { reply =>
val document = reply.documents.head
CountResult(
document.getAs[Boolean]("missing"),
document.get[BsonValueNumber]("n").map(_.toInt).getOrElse(0),
document.get[BsonValueNumber]("ok").map(_.toInt).getOrElse(0) == 1
)
}
}
/**
* Creates indexes on this collection.
*/
def createIndexes(indexes: Index*)
(implicit ec: ExecutionContext, timeout: Timeout): Future[CreateIndexesResult] = {
(pool ? CreateIndexes(databaseName, collectionName, indexes: _*)).mapTo[Reply].map { reply =>
val document = reply.documents.head
CreateIndexesResult(document).convertErrorToException()
}
}
/**
* Removes documents from a collection.
*
* @param query Specifies deletion criteria using query operators.
* To delete all documents in a collection, pass an empty document ({}).
* @param justOne To limit the deletion to just one document, set to true.
* Omit to use the default value of false and delete all documents matching the deletion criteria.
* @param writeConcern A document expressing the write concern. Omit to use the default write concern.
* @return A WriteResult object that contains the status of the operation.
*/
def delete(query: BsonDocument, justOne: Boolean = false, writeConcern: Option[WriteConcern] = None)
(implicit ec: ExecutionContext, timeout: Timeout): Future[DeleteResult] = {
(pool ? Delete(
databaseName,
collectionName,
deletes = Seq(DeleteElement(query, justOne match {
case false => 0
case true => 1
})),
writeConcern = writeConcern.map(_.toDoc))).mapTo[Reply].map { reply =>
val document = reply.documents.head
DeleteResult(
document.get[BsonValueNumber]("ok").map(_.toInt).getOrElse(0) == 1,
document.getAs[Int]("n").getOrElse(0),
operationError = OperationError(document),
writeErrors = document.getAsList[BsonDocument]("writeErrors").map(_.map(WriteError(_))),
writeConcernError = document.getAs[BsonDocument]("writeConcernError").map(WriteConcernError(_))
).convertErrorToException()
}
}
/**
* Finds the distinct values for a specified field across a single collection and returns the results in an array.
* @param field The field for which to return distinct values.
* @param query A query that specifies the documents from which to retrieve the distinct values.
*/
def distinct(field: String, query: Option[BsonDocument] = None)
(implicit ec: ExecutionContext, timeout: Timeout): Future[DistinctResult] = {
(pool ? Distinct(databaseName, collectionName, field, query)).mapTo[Reply].map { reply =>
val document = reply.documents.head
DistinctResult(document)
}
}
/** Drops this collection */
def drop()(implicit ec: ExecutionContext, timeout: Timeout): Future[Reply] = {
(pool ? Drop(databaseName, collectionName)).mapTo[Reply]
}
/**
* Selects documents in this collection.
*
* @param query Specifies selection criteria using query operators. To return all documents in a collection,
* pass an empty document ({}).
* @param fields Specifies the fields to return using projection operators. To return all fields in the matching
* document, omit this parameter.
*/
def find(query: BsonDocument,
fields: Option[BsonDocument] = None,
skip: Int = 0,
tailable: Boolean = false,
batchMultiplier: Int = 1000)
(implicit timeout: Timeout): Source[List[BsonDocument], ActorRef] = {
val flags = if (tailable) QueryOptions.TailableCursor | QueryOptions.AwaitData else 0
Source.actorPublisher(MongoCursor.props(
pool,
QueryMessage(s"$databaseName.$collectionName", query, fields = fields, flags = flags, numberToSkip = skip),
reply => (s"$databaseName.$collectionName", reply.cursorID, reply.documents),
batchMultiplier,
timeout))
}
/**
* Updates and returns a single document. It returns the old document by default.
*
* @param query The selection criteria for the update.
* @param sort Determines which model the operation updates if the query selects multiple models.
* findAndUpdate() updates the first model in the sort order specified by this argument.
* @param update Performs an update of the selected model.
* @param returnNew When true, returns the updated model rather than the original.
* @param fields A subset of fields to return.
* @param upsert When true, findAndUpdate() creates a new model if no model matches the query.
*/
def findAndUpdate(query: Option[BsonDocument] = None,
sort: Option[BsonDocument] = None,
update: BsonDocument,
returnNew: Boolean = false,
fields: Option[Seq[String]] = None,
upsert: Boolean = false)
(implicit ec: ExecutionContext, timeout: Timeout): Future[Option[BsonDocument]] = {
(pool ? FindAndModify(databaseName, collectionName, query, sort, Right(update), returnNew, fields, upsert))
.mapTo[Reply]
.map(_.documents.headOption.flatMap(_.getAs("value")))
}
/**
* Removes and returns a single document.
*
* @param query The selection criteria for the remove.
* @param sort Determines which model the operation removes if the query selects multiple models.
* findAndRemove() removes the first model in the sort order specified by this argument.
* @param fields A subset of fields to return.
*/
def findAndRemove(query: Option[BsonDocument] = None,
sort: Option[BsonDocument] = None,
fields: Option[Seq[String]] = None)
(implicit ec: ExecutionContext, timeout: Timeout): Future[Option[BsonDocument]] = {
(pool ? FindAndModify(databaseName, collectionName, query, sort, Left(true), fields = fields))
.mapTo[Reply]
.map(_.documents.headOption.flatMap(_.getAs("value")))
}
/** Retrieves at most one document matching the given selector. */
def findOne(query: BsonDocument = BsonDocument.empty, skip: Int = 0)
(implicit ec: ExecutionContext, timeout: Timeout): Future[Option[BsonDocument]] = {
(pool ? QueryMessage(s"$databaseName.$collectionName", query, numberToSkip = skip, numberToReturn = 1))
.mapTo[Reply]
.map(_.documents.headOption)
}
/** Retrieves a random document matching the given selector. */
def findRandom(query: Option[BsonDocument] = None)
(implicit ec: ExecutionContext, timeout: Timeout): Future[Option[BsonDocument]] = {
for {
count <- count(query)
index = if (count.n == 0) 0 else Random.nextInt(count.n)
random <- findOne(query.getOrElse(BsonDocument.empty), skip = index)
} yield random
}
/**
* Inserts document into this collection.
*
* @param document document to insert into the collection.
*/
def insert(document: BsonDocument)(implicit ec: ExecutionContext, timeout: Timeout): Future[InsertResult] = {
insert(Seq(document))
}
/**
* Inserts document into this collection.
*
* @param document document to insert into the collection.
*/
def insert(document: BsonDocument, writeConcern: WriteConcern)
(implicit ec: ExecutionContext, timeout: Timeout): Future[InsertResult] = {
insert(Seq(document), writeConcern = Some(writeConcern))
}
/**
* Inserts documents into a collection.
*
* @param documents A sequence of documents to insert into the collection.
* @param ordered If true, perform an ordered insert of the documents in the array, and if an error occurs
* with one of documents, MongoDB will return without processing the remaining documents in the array.
* If false, perform an unordered insert, and if an error occurs with one of documents,
* continue processing the remaining documents in the array.
* @param writeConcern A document expressing the write concern.
*/
def insert(documents: Seq[BsonDocument], ordered: Option[Boolean] = None, writeConcern: Option[WriteConcern] = None)
(implicit ec: ExecutionContext, timeout: Timeout): Future[InsertResult] = {
(pool ? Insert(databaseName, collectionName, documents, ordered, writeConcern.map(_.toDoc)))
.mapTo[Reply].map { reply =>
val document = reply.documents.head
InsertResult(
document.get[BsonValueNumber]("ok").map(_.toInt).getOrElse(0) == 1,
document.getAs[Int]("n").getOrElse(0),
operationError = OperationError(document),
writeErrors = document.getAsList[BsonDocument]("writeErrors").map(_.map(WriteError(_))),
writeConcernError = document.getAs[BsonDocument]("writeConcernError").map(WriteConcernError(_))
).convertErrorToException()
}
}
/**
* Inserts documents into a collection.
*
* @param source A source of documents to insert into the collection.
* @param ordered If true, perform an ordered insert of the documents in the array, and if an error occurs
* with one of documents, MongoDB will return without processing the remaining documents in the array.
* If false, perform an unordered insert, and if an error occurs with one of documents,
* continue processing the remaining documents in the array.
* @param writeConcern A document expressing the write concern.
*/
def insertFromSource[M](source: Source[List[BsonDocument], M],
ordered: Option[Boolean] = None,
writeConcern: Option[WriteConcern] = None)
(implicit ec: ExecutionContext, timeout: Timeout): Source[InsertResult, M] = {
source.mapAsync(1)(documents => insert(documents, ordered, writeConcern))
}
/**
* Returns a list of documents that identify and describe the existing indexes on this collection.
*/
def getIndexes()(implicit ec: ExecutionContext, timeout: Timeout): Future[List[Index]] = {
(pool ? ListIndexes(databaseName, collectionName)).mapTo[Reply].map { reply =>
reply.documents.head.getAs[BsonDocument]("cursor")
.flatMap(_.getAsList[BsonDocument]("firstBatch"))
.getOrElse(Nil)
.map(Index.apply)
}
}
/**
* Returns a sink which inserts bulk documents.
*
* @param parallelism max number of parallel insert commands.
*/
def sink(parallelism: Int = 1,
ordered: Option[Boolean] = None,
writeConcern: Option[WriteConcern] = None): Sink[Bulk, ActorRef] = {
Sink.actorSubscriber(InsertSink.props(databaseName, collectionName, pool, parallelism, ordered, writeConcern))
}
/**
* Modifies an existing document or documents in a collection. The method can modify specific fields of an existing
* document or documents or replace an existing document entirely, depending on the update parameter.
*
* @param query The selection criteria for the update. Use the same query selectors as used in the find() method.
* @param update The modifications to apply.
* @param upsert If set to true, creates a new document when no document matches the query criteria.
* The default value is false, which does not insert a new document when no match is found.
* @param multi If set to true, updates multiple documents that meet the query criteria.
* If set to false, updates one document. The default value is false.
* @param writeConcern A document expressing the write concern.
*/
def update(query: BsonDocument,
update: BsonDocument,
upsert: Option[Boolean] = None,
multi: Option[Boolean] = None,
writeConcern: Option[WriteConcern] = None)
(implicit ec: ExecutionContext, timeout: Timeout): Future[UpdateResult] = {
(pool ? Update(
databaseName,
collectionName,
updates = Seq(UpdateElement(q = query, u = update, upsert = upsert, multi = multi)),
writeConcern = writeConcern.map(_.toDoc)
)).mapTo[Reply].map { reply =>
val document = reply.documents.head
UpdateResult(
ok = document.get[BsonValueNumber]("ok").map(_.toInt).getOrElse(0) == 1,
n = document.get[BsonValueNumber]("n").map(_.toInt).getOrElse(0),
nModified = document.get[BsonValueNumber]("nModified").map(_.toInt).getOrElse(0),
upserted = document.getAsList[BsonDocument]("upserted"),
operationError = OperationError(document),
writeErrors = document.getAsList[BsonDocument]("writeErrors").map(_.map(WriteError(_))),
writeConcernError = document.getAs[BsonDocument]("writeConcernError").map(WriteConcernError(_))
).convertErrorToException()
}
}
/**
* Validates a collection. The method scans a collection’s data structures for correctness
* and returns a single document that describes the relationship between the logical collection
* and the physical representation of the data.
* @param full Specify true to enable a full validation and to return full statistics.
* MongoDB disables full validation by default because it is a potentially resource-intensive operation.
* @param scandata if false skips the scan of the base collection without skipping the scan of the index.
*/
def validate(full: Option[Boolean] = None, scandata: Option[Boolean] = None)
(implicit ec: ExecutionContext, timeout: Timeout): Future[BsonDocument] = {
(pool ? Validate(databaseName, collectionName)).mapTo[Reply].map(_.documents.head)
}
}
|
jeroenr/tepkin
|
tepkin/src/main/scala/com/github/jeroenr/tepkin/MongoCollection.scala
|
Scala
|
apache-2.0
| 16,357
|
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.commons.csv
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
class CsvTest extends AnyFunSpec with Matchers {
describe("CsvFormat") {
it("Format") {
val builder = new CsvFormat.Builder
builder.separator(CsvConstants.Comma).separator(CsvConstants.Semicolon)
.delimiter(CsvConstants.Quote)
val csv = new Csv(builder.build())
csv.format.isSeparator(CsvConstants.Comma) should be(true)
csv.format.isSeparator(CsvConstants.Semicolon) should be(true)
csv.format.delimiter should equal(CsvConstants.Quote)
}
}
}
|
beangle/commons
|
csv/src/test/scala/org/beangle/commons/csv/CsvTest.scala
|
Scala
|
lgpl-3.0
| 1,342
|
package net.ceedubs.scrutinator
package scalatra
import scalaz._
import shapeless._
import org.scalatra.validation.{ FieldName, ValidationError, ValidationFail => ScalatraValidationFail }
trait ScalatraSupport {
def validator[L <: HList](fields: L)(implicit binder: FieldBinder[L, Request]): Kleisli[ValidationErrorsOr, Request, binder.R] = ScalatraSupport.validator(fields)
}
object ScalatraSupport {
def validator[L <: HList](fields: L)(implicit binder: FieldBinder[L, Request]): Kleisli[ValidationErrorsOr, Request, binder.R] =
RequestBinding.fieldBinder(fields).
mapK[ValidationErrorsOr, binder.R](_.leftMap(
_.map(toValidationError)))
def toValidationError(v: ScopedValidationFail): ValidationError = {
val fieldName = v.fieldName
ValidationError(
message = v.failure.msg.getOrElse(s"${fieldName.getOrElse("value")} is invalid"),
field = v.fieldName.map(FieldName(_)),
code = Some(org.scalatra.validation.ValidationFail),
args = Seq.empty)
}
}
|
ceedubs/scrutinator
|
scalatra/src/main/scala/net/ceedubs/scrutinator/scalatra/ScalatraSupport.scala
|
Scala
|
mit
| 1,002
|
package com.twitter.finagle.tunable
import com.twitter.util.tunable.{NullTunableMap, JsonTunableMapper, ServiceLoadedTunableMap, TunableMap}
import com.twitter.finagle.server.ServerInfo
import java.util.concurrent.ConcurrentHashMap
import java.util.function.{Function => JFunction}
import scala.collection.JavaConverters._
/**
* Object used for getting the [[TunableMap]] for a given `id`. This [[TunableMap]] is composed
* from 3 sources, in order of priority:
*
* i. A mutable, in-process [[TunableMap.Mutable]].
* i. The dynamically loaded [[TunableMap]], provided via [[ServiceLoadedTunableMap.apply]].
* i. The JSON file-based [[TunableMap]], provided via [[JsonTunableMapper.loadJsonTunables]].
*
* The JSON file-based [[TunableMap]] is a composition of file-based per-instance and
* per-environment [[TunableMap]]s. [[TunableMap]]s are composed in the following priority order:
*
* i. Environment and instance-specific
* i. Environment-specific for all instances
* i. Instance-specific
* i. All instances
*
* For more information, see
* [[https://twitter.github.io/finagle/guide/Configuration.html#tunables]].
*/
private[twitter] object StandardTunableMap {
private[this] val clientMaps = new ConcurrentHashMap[String, TunableMap]()
private[this] def composeMap(mutable: TunableMap, serverInfo: ServerInfo) =
new JFunction[String, TunableMap] {
def apply(id: String): TunableMap = {
val json = loadJsonConfig(id, serverInfo)
TunableMap.of(
mutable,
ServiceLoadedTunableMap(id),
json
)
}
}
def apply(id: String): TunableMap =
apply(id, ServerInfo(), TunableMap.newMutable(s"Mutable($id)"))
// Exposed for testing
private[tunable] def apply(
id: String,
serverInfo: ServerInfo,
mutable: TunableMap
): TunableMap =
clientMaps.computeIfAbsent(id, composeMap(mutable, serverInfo))
/**
* Returns all registered [[TunableMap TunableMaps]] that have been
* created by [[apply]], keyed by `id`.
*/
def registeredIds: Map[String, TunableMap] =
clientMaps.asScala.toMap
/**
* Load `TunableMap`s from JSON configuration files and compose them in the path order from
* `JsonTunableMapper.pathsByPriority`.
*/
private[tunable] def loadJsonConfig(id: String, serverInfo: ServerInfo): TunableMap = {
val environmentOpt = serverInfo.environment
val instanceIdOpt = serverInfo.instanceId
val paths = JsonTunableMapper.pathsByPriority(
s"com/twitter/tunables/$id/", environmentOpt, instanceIdOpt)
paths.foldLeft(NullTunableMap: TunableMap) { case (map, path) =>
map.orElse(JsonTunableMapper().loadJsonTunables(id, path))
}
}
}
|
koshelev/finagle
|
finagle-tunable/src/main/scala/com/twitter/finagle/tunable/StandardTunableMap.scala
|
Scala
|
apache-2.0
| 2,729
|
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.geotools
import java.io.IOException
import java.util.Collections
import java.util.concurrent.TimeUnit
import com.github.benmanes.caffeine.cache.{CacheLoader, Caffeine}
import com.typesafe.scalalogging.LazyLogging
import org.geotools.data._
import org.locationtech.geomesa.index.FlushableFeatureWriter
import org.locationtech.geomesa.index.api.{IndexManager, _}
import org.locationtech.geomesa.index.conf.partition.TablePartition
import org.locationtech.geomesa.index.geotools.GeoMesaDataStore.VersionKey
import org.locationtech.geomesa.index.geotools.GeoMesaDataStoreFactory.GeoMesaDataStoreConfig
import org.locationtech.geomesa.index.index.attribute.AttributeIndex
import org.locationtech.geomesa.index.index.id.IdIndex
import org.locationtech.geomesa.index.metadata.GeoMesaMetadata.AttributesKey
import org.locationtech.geomesa.index.planning.QueryPlanner
import org.locationtech.geomesa.index.stats.HasGeoMesaStats
import org.locationtech.geomesa.index.utils.{ExplainLogging, Explainer}
import org.locationtech.geomesa.utils.conf.SemanticVersion.MinorOrdering
import org.locationtech.geomesa.utils.conf.{FeatureExpiration, GeoMesaProperties, IndexId, SemanticVersion}
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes.{AttributeOptions, Configs, InternalConfigs}
import org.locationtech.geomesa.utils.geotools.converters.FastConverter
import org.locationtech.geomesa.utils.index.IndexMode
import org.locationtech.geomesa.utils.io.CloseWithLogging
import org.locationtech.geomesa.utils.stats.IndexCoverage
import org.opengis.feature.simple.SimpleFeatureType
import org.opengis.filter.Filter
import scala.util.control.NonFatal
/**
* Abstract base class for data store implementations on top of distributed databases
*
* @param config common datastore configuration options - subclasses can extend this
* @tparam DS type of this data store
*/
abstract class GeoMesaDataStore[DS <: GeoMesaDataStore[DS]](val config: GeoMesaDataStoreConfig)
extends MetadataBackedDataStore(config) with HasGeoMesaStats {
this: DS =>
import scala.collection.JavaConverters._
val queryPlanner: QueryPlanner[DS] = new QueryPlanner(this)
val manager: IndexManager = new IndexManager(this)
@deprecated
protected def catalog: String = config.catalog
// abstract methods to be implemented by subclasses
def adapter: IndexAdapter[DS]
/**
* Returns all tables that may be created for the simple feature type. Note that some of these
* tables may be shared with other simple feature types, and the tables may not all currently exist.
*
* @param typeName simple feature type name
* @return
*/
def getAllTableNames(typeName: String): Seq[String] = Seq(config.catalog) ++ getAllIndexTableNames(typeName)
/**
* Returns all index tables that may be created for the simple feature type. Note that some of these
* tables may be shared with other simple feature types, and the tables may not all currently exist.
*
* @param typeName simple feature type name
* @return
*/
def getAllIndexTableNames(typeName: String): Seq[String] =
Option(getSchema(typeName)).toSeq.flatMap(sft => manager.indices(sft).flatMap(_.getTableNames(None)))
/**
* Optimized method to delete everything (all tables) associated with this datastore
* (index tables and catalog table)
* NB: We are *not* currently deleting the query table and/or query information.
*/
def delete(): Unit = adapter.deleteTables(getTypeNames.flatMap(getAllTableNames).distinct)
// hooks to allow extended functionality
/**
* Gets iterator versions as a string. Subclasses with distributed classpaths should override and implement.
*
* @return iterator versions
*/
protected def loadIteratorVersions: Set[String] = Set.empty
/**
* Update the local value for `sft.getIndices`. Only needed for legacy data stores with old index metadata
* encoding
*
* @param sft simple feature type
*/
protected def transitionIndices(sft: SimpleFeatureType): Unit =
throw new NotImplementedError("This data store does not support legacy index formats - please create a new schema")
@throws(classOf[IllegalArgumentException])
override protected def preSchemaCreate(sft: SimpleFeatureType): Unit = {
import Configs.{TableSplitterClass, TableSplitterOpts}
import InternalConfigs.{PartitionSplitterClass, PartitionSplitterOpts}
// check for old enabled indices and re-map them
// noinspection ScalaDeprecation
SimpleFeatureTypes.Configs.ENABLED_INDEX_OPTS.drop(1).find(sft.getUserData.containsKey).foreach { key =>
sft.getUserData.put(SimpleFeatureTypes.Configs.EnabledIndices, sft.getUserData.remove(key))
}
// validate column groups
adapter.groups.validate(sft)
// disable table sharing, no longer supported
// noinspection ScalaDeprecation
if (sft.isTableSharing) {
logger.warn("Table sharing is no longer supported - disabling table sharing")
sft.getUserData.remove(Configs.TableSharing)
sft.getUserData.remove(InternalConfigs.TableSharingPrefix)
}
// configure the indices to use
if (sft.getIndices.isEmpty) {
val indices = GeoMesaFeatureIndexFactory.indices(sft)
if (indices.isEmpty) {
throw new IllegalArgumentException("There are no available indices that support the schema " +
SimpleFeatureTypes.encodeType(sft))
}
sft.setIndices(indices)
}
// try to create the indices up front to ensure they are valid for the sft
GeoMesaFeatureIndexFactory.create(this, sft, sft.getIndices)
// remove the enabled indices after configuration so we don't persist them
sft.getUserData.remove(SimpleFeatureTypes.Configs.EnabledIndices)
// remove any 'index' flags in the attribute metadata, they have already been captured in the indices above
sft.getAttributeDescriptors.asScala.foreach(_.getUserData.remove(AttributeOptions.OptIndex))
// for partitioned schemas, persist the table partitioning keys
if (TablePartition.partitioned(sft)) {
Seq((TableSplitterClass, PartitionSplitterClass), (TableSplitterOpts, PartitionSplitterOpts)).foreach {
case (from, to) => Option(sft.getUserData.get(from)).foreach(sft.getUserData.put(to, _))
}
}
// set stats enabled based on the data store config if not explicitly set
if (!sft.getUserData.containsKey(SimpleFeatureTypes.Configs.StatsEnabled)) {
sft.setStatsEnabled(config.generateStats)
}
sft.getFeatureExpiration // validate any configured age-off
}
@throws(classOf[IllegalArgumentException])
override protected def preSchemaUpdate(sft: SimpleFeatureType, previous: SimpleFeatureType): Unit = {
// check for attributes flagged 'index' and convert them to sft-level user data
sft.getAttributeDescriptors.asScala.foreach { d =>
val index = d.getUserData.remove(AttributeOptions.OptIndex).asInstanceOf[String]
if (index == null || index.equalsIgnoreCase(IndexCoverage.NONE.toString) || index.equalsIgnoreCase("false")) {
// no-op
} else if (index.equalsIgnoreCase(IndexCoverage.FULL.toString) || java.lang.Boolean.valueOf(index)) {
val fields = Seq(d.getLocalName) ++ Option(sft.getGeomField) ++ sft.getDtgField
val attribute = IndexId(AttributeIndex.name, AttributeIndex.version, fields, IndexMode.ReadWrite)
val existing = sft.getIndices.map(GeoMesaFeatureIndex.identifier)
if (!existing.contains(GeoMesaFeatureIndex.identifier(attribute))) {
sft.setIndices(sft.getIndices :+ attribute)
}
} else {
throw new IllegalArgumentException(s"Configured index coverage '$index' is not valid: expected " +
IndexCoverage.FULL.toString)
}
}
// try to create the new indices to ensure they are valid for the sft
val previousIndices = previous.getIndices.map(GeoMesaFeatureIndex.identifier)
val newIndices = sft.getIndices.filterNot(i => previousIndices.contains(GeoMesaFeatureIndex.identifier(i)))
if (newIndices.nonEmpty) {
try { GeoMesaFeatureIndexFactory.create(this, sft, newIndices) } catch {
case NonFatal(e) => throw new IllegalArgumentException(s"Error configuring new feature index:", e)
}
}
sft.getFeatureExpiration // validate any configured age-off
}
// create the index tables (if not using partitioned tables)
override protected def onSchemaCreated(sft: SimpleFeatureType): Unit = {
val indices = manager.indices(sft)
if (TablePartition.partitioned(sft)) {
logger.debug(s"Delaying creation of partitioned indices ${indices.map(_.identifier).mkString(", ")}")
} else {
logger.debug(s"Creating indices ${indices.map(_.identifier).mkString(", ")}")
indices.foreach(index => adapter.createTable(index, None, index.getSplits(None)))
}
}
// create the new index tables (if not using partitioned tables)
override protected def onSchemaUpdated(sft: SimpleFeatureType, previous: SimpleFeatureType): Unit = {
val partitioned = TablePartition.partitioned(sft)
// check for column renaming
val colMap = previous.getAttributeDescriptors.asScala.zipWithIndex.toMap.flatMap { case (prev, i) =>
val cur = sft.getDescriptor(i)
if (prev.getLocalName != cur.getLocalName) {
Map(prev.getLocalName -> cur.getLocalName)
} else {
Map.empty[String, String]
}
}
val indices = sft.getIndices
val indexChange = colMap.nonEmpty && indices.exists(_.attributes.exists(colMap.contains))
if (indexChange) {
val updated = indices.map { i =>
if (!i.attributes.exists(colMap.contains)) { i } else {
val update = i.copy(attributes = i.attributes.map(a => colMap.getOrElse(a, a)))
// side-effect - rewrite the table name keys for the renamed cols
val old = manager.index(previous, GeoMesaFeatureIndex.identifier(i))
val index = GeoMesaFeatureIndexFactory.create(this, sft, Seq(update)).headOption.getOrElse {
throw new IllegalArgumentException(
s"Error configuring new feature index: ${GeoMesaFeatureIndex.identifier(update)}")
}
val partitions = if (!partitioned) { Seq(None) } else {
// have to use the old table name key but the new sft name for looking up the partitions
val tableNameKey = old.tableNameKey(Some(""))
val offset = tableNameKey.length
metadata.scan(sft.getTypeName, tableNameKey).map { case (k, _) => Some(k.substring(offset)) }
}
partitions.foreach { p =>
metadata.read(sft.getTypeName, old.tableNameKey(p)).foreach { v =>
metadata.insert(sft.getTypeName, index.tableNameKey(p), v)
}
}
update
}
}
sft.setIndices(updated.distinct)
metadata.insert(sft.getTypeName, AttributesKey, SimpleFeatureTypes.encodeType(sft, includeUserData = true))
}
// configure any new indices
if (partitioned) {
logger.debug("Delaying creation of partitioned indices")
} else {
logger.debug(s"Ensuring indices ${manager.indices(sft).map(_.identifier).mkString(", ")}")
manager.indices(sft).foreach(index => adapter.createTable(index, None, index.getSplits(None)))
}
// update stats
if (previous.statsEnabled) {
if (!sft.statsEnabled) {
stats.writer.clear(previous)
} else if (sft.getTypeName != previous.getTypeName || colMap.nonEmpty) {
stats.writer.rename(sft, previous)
}
}
// rename tables to match the new sft name
if (sft.getTypeName != previous.getTypeName || indexChange) {
if (FastConverter.convertOrElse[java.lang.Boolean](sft.getUserData.get(Configs.UpdateRenameTables), false)) {
manager.indices(sft).foreach { index =>
val partitions = if (partitioned) { index.getPartitions.map(Option.apply) } else { Seq(None) }
partitions.foreach { partition =>
val key = index.tableNameKey(partition)
metadata.read(sft.getTypeName, key).foreach { table =>
metadata.remove(sft.getTypeName, key)
val renamed = index.configureTableName(partition, adapter.tableNameLimit)
if (renamed != table) {
logger.debug(s"Renaming table from '$table' to '$renamed'")
adapter.renameTable(table, renamed)
}
}
}
}
}
}
}
// delete the index tables
override protected def onSchemaDeleted(sft: SimpleFeatureType): Unit = {
// noinspection ScalaDeprecation
if (sft.isTableSharing && getTypeNames.exists(t => t != sft.getTypeName && getSchema(t).isTableSharing)) {
manager.indices(sft).par.foreach { index =>
if (index.keySpace.sharing.isEmpty) {
adapter.deleteTables(index.deleteTableNames(None))
} else {
adapter.clearTables(index.deleteTableNames(None), Some(index.keySpace.sharing))
}
}
} else {
manager.indices(sft).par.foreach(index => adapter.deleteTables(index.deleteTableNames(None)))
}
if (sft.statsEnabled) {
stats.writer.clear(sft)
}
}
// methods from org.geotools.data.DataStore
/**
* @see org.geotools.data.DataStore#getSchema(java.lang.String)
* @param typeName feature type name
* @return feature type, or null if it does not exist
*/
override def getSchema(typeName: String): SimpleFeatureType = {
var sft = super.getSchema(typeName)
if (sft != null) {
// ensure index metadata is correct
if (sft.getIndices.exists(i => i.attributes.isEmpty && i.name != IdIndex.name)) {
sft = SimpleFeatureTypes.mutable(sft)
// migrate index metadata to standardized versions and attributes
transitionIndices(sft)
sft = SimpleFeatureTypes.immutable(sft)
// validate indices
try { manager.indices(sft) } catch {
case NonFatal(e) =>
throw new IllegalStateException(s"The schema ${sft.getTypeName} was written with a older " +
"version of GeoMesa that is no longer supported. You may continue to use an older client, or " +
s"manually edit the metadata for '${InternalConfigs.IndexVersions}' to exclude the invalid indices.", e)
}
} else {
// validate indices
try { manager.indices(sft) } catch {
case NonFatal(e) =>
val versions = sft.getIndices.map(i => s"${i.name}:${i.version}").mkString(",")
val available = GeoMesaFeatureIndexFactory.available(sft).map(i => s"${i._1}:${i._2}").mkString(",")
logger.error(s"Trying to access schema ${sft.getTypeName} with invalid index versions '$versions' - " +
s"available indices are '$available'", e)
throw new IllegalStateException(s"The schema ${sft.getTypeName} was written with a newer " +
"version of GeoMesa than this client can handle. Please ensure that you are using the " +
"same GeoMesa jar versions across your entire workflow. For more information, see " +
"http://www.geomesa.org/documentation/user/installation_and_configuration.html#upgrading")
}
}
// check for sft-level stats flag and set it if not present
if (!sft.getUserData.containsKey(SimpleFeatureTypes.Configs.StatsEnabled)) {
val extra = Collections.singletonMap(SimpleFeatureTypes.Configs.StatsEnabled, config.generateStats.toString)
sft = SimpleFeatureTypes.immutable(sft, extra)
}
// get the remote version if it's available, but don't wait for it
GeoMesaDataStore.versions.get(new VersionKey(this)).getNow(Right(None)) match {
case Left(e) => throw e
case Right(version) =>
version.foreach { v =>
val userData = Collections.singletonMap[AnyRef, AnyRef](InternalConfigs.RemoteVersion, v.toString)
sft = SimpleFeatureTypes.immutable(sft, userData)
}
}
}
sft
}
/**
* @see org.geotools.data.DataStore#getFeatureSource(java.lang.String)
* @param typeName simple feature type name
* @return featureStore, suitable for reading and writing
*/
override def getFeatureSource(typeName: String): GeoMesaFeatureStore = {
val sft = getSchema(typeName)
if (sft == null) {
throw new IOException(s"Schema '$typeName' has not been initialized. Please call 'createSchema' first.")
}
if (config.caching) {
new GeoMesaFeatureStore(this, sft, queryPlanner) with GeoMesaFeatureSource.CachingFeatureSource
} else {
new GeoMesaFeatureStore(this, sft, queryPlanner)
}
}
/**
* @see org.geotools.data.DataStore#getFeatureReader(org.geotools.data.Query, org.geotools.data.Transaction)
* @param query query to execute
* @param transaction transaction to use (currently ignored)
* @return feature reader
*/
override def getFeatureReader(query: Query, transaction: Transaction): GeoMesaFeatureReader = {
require(query.getTypeName != null, "Type name is required in the query")
val sft = getSchema(query.getTypeName)
if (sft == null) {
throw new IOException(s"Schema '${query.getTypeName}' has not been initialized. Please call 'createSchema' first.")
}
if (transaction != Transaction.AUTO_COMMIT) {
logger.warn("Ignoring transaction - not supported")
}
getFeatureReader(sft, query)
}
/**
* Internal method to get a feature reader without reloading the simple feature type. We don't expose this
* widely as we want to ensure that the sft has been loaded from our catalog
*
* @param sft simple feature type
* @param query query
* @return
*/
private [geotools] def getFeatureReader(sft: SimpleFeatureType, query: Query): GeoMesaFeatureReader =
GeoMesaFeatureReader(sft, query, queryPlanner, config.queryTimeout, config.audit)
/**
* Create a general purpose writer that is capable of updates and deletes.
* Does <b>not</b> allow inserts.
*
* @see org.geotools.data.DataStore#getFeatureWriter(java.lang.String, org.opengis.filter.Filter,
* org.geotools.data.Transaction)
* @param typeName feature type name
* @param filter cql filter to select features for update/delete
* @param transaction transaction (currently ignored)
* @return feature writer
*/
override def getFeatureWriter(typeName: String, filter: Filter, transaction: Transaction): FlushableFeatureWriter = {
val sft = getSchema(typeName)
if (sft == null) {
throw new IOException(s"Schema '$typeName' has not been initialized. Please call 'createSchema' first.")
}
if (transaction != Transaction.AUTO_COMMIT) {
logger.warn("Ignoring transaction - not supported")
}
getFeatureWriter(sft, Some(filter))
}
/**
* Creates a feature writer only for writing - does not allow updates or deletes.
*
* @see org.geotools.data.DataStore#getFeatureWriterAppend(java.lang.String, org.geotools.data.Transaction)
* @param typeName feature type name
* @param transaction transaction (currently ignored)
* @return feature writer
*/
override def getFeatureWriterAppend(typeName: String, transaction: Transaction): FlushableFeatureWriter = {
val sft = getSchema(typeName)
if (sft == null) {
throw new IOException(s"Schema '$typeName' has not been initialized. Please call 'createSchema' first.")
}
if (transaction != Transaction.AUTO_COMMIT) {
logger.warn("Ignoring transaction - not supported")
}
getFeatureWriter(sft, None)
}
/**
* Internal method to get a feature writer without reloading the simple feature type. We don't expose this
* widely as we want to ensure that the sft has been loaded from our catalog
*
* @param sft simple feature type
* @param filter if defined, will do an updating write, otherwise will do an appending write
* @return
*/
private [geotools] def getFeatureWriter(sft: SimpleFeatureType, filter: Option[Filter]): FlushableFeatureWriter =
GeoMesaFeatureWriter(this, sft, manager.indices(sft, mode = IndexMode.Write), filter)
/**
* Writes to the specified indices
*
* @param typeName feature type name
* @param indices indices to write
* @return
*/
def getIndexWriterAppend(typeName: String, indices: Seq[GeoMesaFeatureIndex[_, _]]): FlushableFeatureWriter = {
val sft = getSchema(typeName)
if (sft == null) {
throw new IOException(s"Schema '$typeName' has not been initialized. Please call 'createSchema' first.")
}
GeoMesaFeatureWriter(this, sft, indices, None)
}
/**
* Cleanup any open connections, etc. Equivalent to java.io.Closeable.close()
*
* @see org.geotools.data.DataAccess#dispose()
*/
override def dispose(): Unit = {
CloseWithLogging(stats)
config.audit.foreach { case (writer, _, _) => CloseWithLogging(writer) }
super.dispose()
}
// end methods from org.geotools.data.DataStore
// other public methods
/**
* Gets the query plan for a given query. The query plan consists of the tables, ranges, iterators etc
* required to run a query against the data store.
*
* @param query query to execute
* @param index hint on the index to use to satisfy the query
* @return query plans
*/
def getQueryPlan(
query: Query,
index: Option[String] = None,
explainer: Explainer = new ExplainLogging): Seq[QueryPlan[DS]] = {
require(query.getTypeName != null, "Type name is required in the query")
val sft = getSchema(query.getTypeName)
if (sft == null) {
throw new IOException(s"Schema '${query.getTypeName}' has not been initialized. Please call 'createSchema' first.")
}
queryPlanner.planQuery(sft, query, index, explainer)
}
/**
* Gets the geomesa version
*
* @return client version
*/
def getClientVersion: SemanticVersion = SemanticVersion(GeoMesaProperties.ProjectVersion, lenient = true)
/**
* Gets the geomesa version
*
* @return iterator version, if data store has iterators
*/
def getDistributedVersion: Option[SemanticVersion] = {
GeoMesaDataStore.versions.get(new VersionKey(this)).get() match {
case Right(v) => v
case Left(e) => throw e
}
}
@deprecated("use getDistributedVersion")
def getDistributeVersion: Option[SemanticVersion] = getDistributedVersion
/**
* Gets the geomesa version
*
* @return (client version, iterator version)
*/
@deprecated("use getClientVersion and getDistributedVersion")
def getVersion: (String, Set[String]) = (GeoMesaProperties.ProjectVersion, loadIteratorVersions)
// end public methods
}
object GeoMesaDataStore extends LazyLogging {
import org.locationtech.geomesa.index.conf.SchemaProperties.{CheckDistributedVersion, ValidateDistributedClasspath}
private val loader = new CacheLoader[VersionKey, Either[Exception, Option[SemanticVersion]]]() {
override def load(key: VersionKey): Either[Exception, Option[SemanticVersion]] = {
if (key.ds.getTypeNames.length == 0) {
// short-circuit load - should try again next time cache is accessed
throw new RuntimeException("Can't load remote versions if there are no feature types")
}
if (CheckDistributedVersion.toBoolean.contains(false)) { Right(None) } else {
val clientVersion = key.ds.getClientVersion
// use lenient parsing to account for versions like 1.3.5.1
val iterVersions = key.ds.loadIteratorVersions.map(v => SemanticVersion(v, lenient = true))
def message: String = "Classpath errors detected: configured server-side iterators do not match " +
s"client version. Client version: $clientVersion, server versions: ${iterVersions.mkString(", ")}"
// take the newest one if there are multiple - probably an update went partially awry, so it's
// likely to match more tablet servers than the lower version
val version = iterVersions.reduceLeftOption((left, right) => if (right > left) { right } else { left })
// ensure matching versions
// return an error if the user has enabled strict checking and it's not a patch/pre-release version mismatch
// otherwise just log a warning
if (iterVersions.forall(_ == clientVersion)) {
Right(version)
} else if (ValidateDistributedClasspath.toBoolean.contains(false) ||
iterVersions.forall(MinorOrdering.compare(_, clientVersion) == 0)) {
logger.warn(message)
Right(version)
} else {
Left(new RuntimeException(s"$message. You may override this check by setting the system property " +
s"'-D${ValidateDistributedClasspath.property}=false'"))
}
}
}
}
private val versions = Caffeine.newBuilder().refreshAfterWrite(1, TimeUnit.DAYS)
.buildAsync[VersionKey, Either[Exception, Option[SemanticVersion]]](loader)
/**
* Kick off an asynchronous call to load remote iterator versions
*
* @param ds datastore
*/
def initRemoteVersion(ds: GeoMesaDataStore[_]): Unit = {
// can't get remote version if there aren't any tables
if (ds.getTypeNames.length > 0) {
versions.get(new VersionKey(ds))
}
}
/**
* Cache key that bases equality on data store class and catalog, but allows for loading remote version
* from datastore
*
* @param ds data store
*/
private class VersionKey(val ds: GeoMesaDataStore[_]) {
override def equals(other: Any): Boolean = other match {
case that: VersionKey => ds.config.catalog == that.ds.config.catalog && ds.getClass == that.ds.getClass
case _ => false
}
override def hashCode(): Int =
Seq(ds.config.catalog, ds.getClass).map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
}
|
elahrvivaz/geomesa
|
geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/geotools/GeoMesaDataStore.scala
|
Scala
|
apache-2.0
| 26,638
|
package mesosphere.marathon
package core.task
import mesosphere.UnitTest
import mesosphere.marathon.test.SettableClock
import mesosphere.marathon.core.condition.Condition
import mesosphere.marathon.core.instance.TestTaskBuilder
import mesosphere.marathon.core.task.bus.MesosTaskStatusTestHelper
import mesosphere.marathon.core.task.update.{ TaskUpdateEffect, TaskUpdateOperation }
import mesosphere.marathon.state.PathId
import org.apache.mesos.Protos.TaskState
import scala.concurrent.duration._
class TaskStatusUpdateTest extends UnitTest {
def unreachableEffect(effect: => TaskUpdateEffect): Unit = {
"return an effect that" should {
"result in an update" in {
effect shouldBe a[TaskUpdateEffect.Update]
}
"update to unreachable task status" in {
val newStatus = effect.asInstanceOf[TaskUpdateEffect.Update].newState.status.mesosStatus.get.getState
newStatus should be(TaskState.TASK_UNREACHABLE)
}
"update to unreachable instance status" in {
val newStatus = effect.asInstanceOf[TaskUpdateEffect.Update].newState.status.condition
newStatus should be(Condition.Unreachable)
}
}
}
"LaunchedEphemeral" when {
"updating a running task with a TASK_UNREACHABLE" should {
val f = new Fixture
val task = TestTaskBuilder.Helper.minimalRunning(appId = f.appId, since = f.clock.now())
f.clock += 5.seconds
val status = MesosTaskStatusTestHelper.unreachable(task.taskId, f.clock.now())
val update = TaskUpdateOperation.MesosUpdate(TaskCondition(status), status, f.clock.now())
val effect = task.update(update)
behave like unreachableEffect(effect)
}
}
"LaunchedOnReservation" when {
"updating a running task with a TASK_UNREACHABLE" should {
val f = new Fixture
val volumeId = Task.LocalVolumeId(f.appId, "persistent-volume", "uuid")
val task = TestTaskBuilder.Helper.residentLaunchedTask(f.appId, Seq(volumeId))
f.clock += 5.seconds
val status = MesosTaskStatusTestHelper.unreachable(task.taskId, f.clock.now())
val update = TaskUpdateOperation.MesosUpdate(TaskCondition(status), status, f.clock.now())
val effect = task.update(update)
behave like unreachableEffect(effect)
}
}
class Fixture {
val appId = PathId("/app")
val clock = new SettableClock()
}
}
|
Caerostris/marathon
|
src/test/scala/mesosphere/marathon/core/task/TaskStatusUpdateTest.scala
|
Scala
|
apache-2.0
| 2,384
|
package scuff.concurrent
import scala.concurrent._, duration._
import scala.util.control.NonFatal
import scala.util.Try
object JavaFutureConverter {
def apply(
failureReporter: Throwable => Unit,
sleep: FiniteDuration = 1.millisecond): JavaFutureConverter = {
val conv = new JavaFutureConverter(failureReporter, sleep)
conv.thread.start()
conv
}
}
final class JavaFutureConverter private (failureReporter: Throwable => Unit, sleep: FiniteDuration)
extends (java.util.concurrent.Future[Any] => Future[Any]) {
private type QueueItem = (Promise[Any], java.util.concurrent.Future[Any])
private[this] val queue = new collection.mutable.Queue[QueueItem]
private val thread = new Thread(classOf[JavaFutureConverter].getName) {
this setUncaughtExceptionHandler new Thread.UncaughtExceptionHandler {
def uncaughtException(t: Thread, e: Throwable): Unit = {
failureReporter(e)
}
}
override def run(): Unit = {
while (!Thread.currentThread.isInterrupted) {
val completed = queue.synchronized {
while (queue.isEmpty) {
queue.wait()
}
queue.dequeueAll(_._2.isDone())
}
if (completed.nonEmpty) {
completed.foreach {
case (promise, f) => promise complete Try(f.get)
}
} else {
sleep.unit.sleep(sleep.length)
}
}
}
}
def apply(f: java.util.concurrent.Future[Any]): Future[Any] = {
if (f.isDone) {
try Future successful f.get catch { case NonFatal(e) => Future failed e }
} else {
val promise = Promise[Any]()
queue.synchronized {
val notifyOnContent = queue.isEmpty
queue enqueue promise -> f
if (notifyOnContent) queue.notify()
}
promise.future
}
}
}
|
nilskp/scuff
|
src/main/scala/scuff/concurrent/JavaFutureConverter.scala
|
Scala
|
mit
| 1,821
|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers
import connectors.ApiConnector
import play.api.mvc._
import javax.inject.{Singleton, Inject}
import uk.gov.hmrc.play.bootstrap.controller.BackendController
import scala.concurrent.ExecutionContext.Implicits.global
@Singleton
class HelloWorld @Inject()(apiConnector: ApiConnector, cc: ControllerComponents) extends BackendController(cc) {
def hello = Action.async { implicit request =>
apiConnector.helloWorld() map (Ok(_))
}
}
|
hmrc/api-example-scala-client
|
app/controllers/HelloWorld.scala
|
Scala
|
apache-2.0
| 1,065
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.