code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package gitbucket.core.service
import gitbucket.core.model.Issue
import gitbucket.core.util._
import gitbucket.core.util.StringUtil
import Directory._
import ControlUtil._
import org.eclipse.jgit.revwalk.RevWalk
import org.eclipse.jgit.treewalk.TreeWalk
import org.eclipse.jgit.lib.FileMode
import org.eclipse.jgit.api.Git
import gitbucket.core.model.Profile._
import profile.simple._
trait RepositorySearchService { self: IssuesService =>
import RepositorySearchService._
def countIssues(owner: String, repository: String, query: String)(implicit session: Session): Int =
searchIssuesByKeyword(owner, repository, query).length
def searchIssues(owner: String, repository: String, query: String)(implicit session: Session): List[IssueSearchResult] =
searchIssuesByKeyword(owner, repository, query).map { case (issue, commentCount, content) =>
IssueSearchResult(
issue.issueId,
issue.isPullRequest,
issue.title,
issue.openedUserName,
issue.registeredDate,
commentCount,
getHighlightText(content, query)._1)
}
def countFiles(owner: String, repository: String, query: String): Int =
using(Git.open(getRepositoryDir(owner, repository))){ git =>
if(JGitUtil.isEmpty(git)) 0 else searchRepositoryFiles(git, query).length
}
def searchFiles(owner: String, repository: String, query: String): List[FileSearchResult] =
using(Git.open(getRepositoryDir(owner, repository))){ git =>
if(JGitUtil.isEmpty(git)){
Nil
} else {
val files = searchRepositoryFiles(git, query)
val commits = JGitUtil.getLatestCommitFromPaths(git, files.map(_._1), "HEAD")
files.map { case (path, text) =>
val (highlightText, lineNumber) = getHighlightText(text, query)
FileSearchResult(
path,
commits(path).getCommitterIdent.getWhen,
highlightText,
lineNumber)
}
}
}
def countWikiPages(owner: String, repository: String, query: String): Int =
using(Git.open(Directory.getWikiRepositoryDir(owner, repository))){ git =>
if(JGitUtil.isEmpty(git)) 0 else searchRepositoryFiles(git, query).length
}
def searchWikiPages(owner: String, repository: String, query: String): List[FileSearchResult] =
using(Git.open(Directory.getWikiRepositoryDir(owner, repository))){ git =>
if(JGitUtil.isEmpty(git)){
Nil
} else {
val files = searchRepositoryFiles(git, query)
val commits = JGitUtil.getLatestCommitFromPaths(git, files.map(_._1), "HEAD")
files.map { case (path, text) =>
val (highlightText, lineNumber) = getHighlightText(text, query)
FileSearchResult(
path.replaceFirst("\\\\.md$", ""),
commits(path).getCommitterIdent.getWhen,
highlightText,
lineNumber)
}
}
}
def searchRepositoryFiles(git: Git, query: String): List[(String, String)] = {
val revWalk = new RevWalk(git.getRepository)
val objectId = git.getRepository.resolve("HEAD")
val revCommit = revWalk.parseCommit(objectId)
val treeWalk = new TreeWalk(git.getRepository)
treeWalk.setRecursive(true)
treeWalk.addTree(revCommit.getTree)
val keywords = StringUtil.splitWords(query.toLowerCase)
val list = new scala.collection.mutable.ListBuffer[(String, String)]
while (treeWalk.next()) {
val mode = treeWalk.getFileMode(0)
if(mode == FileMode.REGULAR_FILE || mode == FileMode.EXECUTABLE_FILE){
JGitUtil.getContentFromId(git, treeWalk.getObjectId(0), false).foreach { bytes =>
if(FileUtil.isText(bytes)){
val text = StringUtil.convertFromByteArray(bytes)
val lowerText = text.toLowerCase
val indices = keywords.map(lowerText.indexOf _)
if(!indices.exists(_ < 0)){
list.append((treeWalk.getPathString, text))
}
}
}
}
}
treeWalk.close()
revWalk.close()
list.toList
}
}
object RepositorySearchService {
val CodeLimit = 10
val IssueLimit = 10
def getHighlightText(content: String, query: String): (String, Int) = {
val keywords = StringUtil.splitWords(query.toLowerCase)
val lowerText = content.toLowerCase
val indices = keywords.map(lowerText.indexOf _)
if(!indices.exists(_ < 0)){
val lineNumber = content.substring(0, indices.min).split("\\n").size - 1
val highlightText = StringUtil.escapeHtml(content.split("\\n").drop(lineNumber).take(5).mkString("\\n"))
.replaceAll("(?i)(" + keywords.map("\\\\Q" + _ + "\\\\E").mkString("|") + ")",
"<span class=\\"highlight\\">$1</span>")
(highlightText, lineNumber + 1)
} else {
(content.split("\\n").take(5).mkString("\\n"), 1)
}
}
case class SearchResult(
files : List[(String, String)],
issues: List[(Issue, Int, String)])
case class IssueSearchResult(
issueId: Int,
isPullRequest: Boolean,
title: String,
openedUserName: String,
registeredDate: java.util.Date,
commentCount: Int,
highlightText: String)
case class FileSearchResult(
path: String,
lastModified: java.util.Date,
highlightText: String,
highlightLineNumber: Int)
}
|
zhoffice/gitbucket
|
src/main/scala/gitbucket/core/service/RepositorySearchService.scala
|
Scala
|
apache-2.0
| 5,303
|
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.runtime
import java.lang.invoke._
/**
* This class is only intended to be called by synthetic `$deserializeLambda$` method that the Scala 2.12
* compiler will add to classes hosting lambdas.
*
* It is not intended to be consumed directly.
*/
object LambdaDeserializer {
/**
* Deserialize a lambda by calling `LambdaMetafactory.altMetafactory` to spin up a lambda class
* and instantiating this class with the captured arguments.
*
* A cache may be provided to ensure that subsequent deserialization of the same lambda expression
* is cheap, it amounts to a reflective call to the constructor of the previously created class.
* However, deserialization of the same lambda expression is not guaranteed to use the same class,
* concurrent deserialization of the same lambda expression may spin up more than one class.
*
* Assumptions:
* - No additional marker interfaces are required beyond `java.io.Serializable`. These are
* not stored in `SerializedLambda`, so we can't reconstitute them.
* - No additional bridge methods are passed to `altMetafactory`. Again, these are not stored.
*
* @param lookup The factory for method handles. Must have access to the implementation method, the
* functional interface class, and `java.io.Serializable`.
* @param cache A cache used to avoid spinning up a class for each deserialization of a given lambda. May be `null`
* @param serialized The lambda to deserialize. Note that this is typically created by the `readResolve`
* member of the anonymous class created by `LambdaMetaFactory`.
* @return An instance of the functional interface
*/
def deserializeLambda(lookup: MethodHandles.Lookup, cache: java.util.Map[String, MethodHandle],
targetMethodMap: java.util.Map[String, MethodHandle], serialized: SerializedLambda): AnyRef = {
val result = deserializeLambdaOrNull(lookup, cache, targetMethodMap, serialized)
if (result == null) throw new IllegalArgumentException("Illegal lambda deserialization")
else result
}
def deserializeLambdaOrNull(lookup: MethodHandles.Lookup, cache: java.util.Map[String, MethodHandle],
targetMethodMap: java.util.Map[String, MethodHandle], serialized: SerializedLambda): AnyRef = {
assert(targetMethodMap != null)
def slashDot(name: String) = name.replaceAll("/", ".")
val loader = lookup.lookupClass().getClassLoader
val implClass = loader.loadClass(slashDot(serialized.getImplClass))
val key = LambdaDeserialize.nameAndDescriptorKey(serialized.getImplMethodName, serialized.getImplMethodSignature)
def makeCallSite: CallSite = {
import serialized._
def parseDescriptor(s: String) =
MethodType.fromMethodDescriptorString(s, loader)
val funcInterfaceSignature = parseDescriptor(getFunctionalInterfaceMethodSignature)
val instantiated = parseDescriptor(getInstantiatedMethodType)
val functionalInterfaceClass = loader.loadClass(slashDot(getFunctionalInterfaceClass))
val implMethodSig = parseDescriptor(getImplMethodSignature)
// Construct the invoked type from the impl method type. This is the type of a factory
// that will be generated by the meta-factory. It is a method type, with param types
// coming form the types of the captures, and return type being the functional interface.
val invokedType: MethodType = {
// 1. Add receiver for non-static impl methods
val withReceiver = getImplMethodKind match {
case MethodHandleInfo.REF_invokeStatic | MethodHandleInfo.REF_newInvokeSpecial =>
implMethodSig
case _ =>
implMethodSig.insertParameterTypes(0, implClass)
}
// 2. Remove lambda parameters, leaving only captures. Note: the receiver may be a lambda parameter,
// such as in `Function<Object, String> s = Object::toString`
val lambdaArity = funcInterfaceSignature.parameterCount()
val from = withReceiver.parameterCount() - lambdaArity
val to = withReceiver.parameterCount()
// 3. Drop the lambda return type and replace with the functional interface.
withReceiver.dropParameterTypes(from, to).changeReturnType(functionalInterfaceClass)
}
// Lookup the implementation method
val implMethod: MethodHandle = if (targetMethodMap.containsKey(key)) {
targetMethodMap.get(key)
} else {
return null
}
val flags: Int = LambdaMetafactory.FLAG_SERIALIZABLE
LambdaMetafactory.altMetafactory(
lookup, getFunctionalInterfaceMethodName, invokedType,
/* samMethodType = */ funcInterfaceSignature,
/* implMethod = */ implMethod,
/* instantiatedMethodType = */ instantiated,
/* flags = */ flags.asInstanceOf[AnyRef]
)
}
val factory: MethodHandle = if (cache == null) {
val callSite = makeCallSite
if (callSite == null) return null
callSite.getTarget
} else cache.synchronized{
cache.get(key) match {
case null =>
val callSite = makeCallSite
if (callSite == null) return null
val temp = callSite.getTarget
cache.put(key, temp)
temp
case target => target
}
}
val captures = Array.tabulate(serialized.getCapturedArgCount)(n => serialized.getCapturedArg(n))
factory.invokeWithArguments(captures: _*)
}
}
|
scala/scala
|
src/library/scala/runtime/LambdaDeserializer.scala
|
Scala
|
apache-2.0
| 5,867
|
package com.twitter.util.tunable
import com.twitter.util.Var
/**
* A [[Tunable]] is an abstraction for an object that produces a Some(value) or None when applied.
* Implementations may enable mutation, such that successive applications of the [[Tunable]]
* produce different values.
*
* For more information about Tunables, see
* [[https://twitter.github.io/finagle/guide/Configuration.html#tunables]]
*
* @param id id of this [[Tunable]], used in `toString`. Must not be empty and should be unique.
* @tparam T type of value this [[Tunable]] holds
*/
sealed abstract class Tunable[T](val id: String) { self =>
/**
* Returns a [[Var]] that observes this [[Tunable]]. Observers of the [[Var]] will be notified
* whenever the value of this [[Tunable]] is updated.
*/
def asVar: Var[Option[T]]
// validate id is not empty
if (id.trim.isEmpty)
throw new IllegalArgumentException("Tunable id must not be empty")
/**
* Returns the current value of this [[Tunable]] as an `Option`.
* If the [[Tunable]] has a value, returns `Some(value)`. Otherwise, returns `None`.
*/
def apply(): Option[T]
override def toString: String =
s"Tunable($id)"
/**
* Compose this [[Tunable]] with another [[Tunable]]. Application
* of the returned [[Tunable]] will return the result of applying this [[Tunable]],
* if it is defined and the result of applying the other [[Tunable]] if not.
*
* @note the returned [[Tunable]] will have the `id` of this [[Tunable]]
*/
def orElse(that: Tunable[T]): Tunable[T] =
new Tunable[T](id) {
def asVar: Var[Option[T]] = self.asVar.flatMap {
case Some(_) => self.asVar
case None => that.asVar
}
override def toString: String =
s"${self.toString}.orElse(${that.toString})"
def apply(): Option[T] =
self().orElse(that())
}
/**
* Returns a [[Tunable]] containing the result of applying f to this [[Tunable]] value.
*
* @note the returned [[Tunable]] will have the `id` of this [[Tunable]]
*/
def map[A](f: T => A): Tunable[A] =
new Tunable[A](id) {
def asVar: Var[Option[A]] = self.asVar.map(_.map(f))
def apply(): Option[A] = self().map(f)
}
}
object Tunable {
/**
* A [[Tunable]] that always returns `value` when applied.
*/
final class Const[T](id: String, value: T) extends Tunable[T](id) {
private val holder = Var.value(Some(value))
def apply(): Option[T] = holder()
def asVar: Var[Option[T]] = holder
}
object Const {
def unapply[T](tunable: Tunable[T]): Option[T] = tunable match {
case tunable: Tunable.Const[T] => tunable.holder()
case _ => None
}
}
/**
* Create a new [[Tunable.Const]] with id `id` and value `value`.
*/
def const[T](id: String, value: T): Const[T] = new Const(id, value)
private[this] val NoneTunable = new Tunable[Any]("com.twitter.util.tunable.NoneTunable") {
def apply(): Option[Any] =
None
val asVar: Var[Option[Any]] = Var.value(None)
}
/**
* Returns a [[Tunable]] that always returns `None` when applied.
*/
def none[T]: Tunable[T] =
NoneTunable.asInstanceOf[Tunable[T]]
/**
* A [[Tunable]] whose value can be changed. Operations are thread-safe.
*/
final class Mutable[T] private[tunable] (id: String, _value: Option[T]) extends Tunable[T](id) {
private[this] final val mutableHolder = Var(_value)
def asVar: Var[Option[T]] = mutableHolder
/**
* Set the value of the [[Tunable.Mutable]].
*
* Note that setting the value to `null` will result in a value of Some(null) when the
* [[Tunable]] is applied.
*/
def set(value: T): Unit = mutableHolder.update(Some(value))
/**
* Clear the value of the [[Tunable.Mutable]]. Calling `apply` on the [[Tunable.Mutable]]
* will produce `None`.
*/
def clear(): Unit = mutableHolder.update(None)
/**
* Get the current value of the [[Tunable.Mutable]]
*/
def apply(): Option[T] = mutableHolder()
}
/**
* Create a new [[Tunable.Mutable]] with id `id` and value initial value `initialValue`.
*/
def mutable[T](id: String, initialValue: T): Mutable[T] =
new Mutable(id, Some(initialValue))
/**
* Create a [[Tunable.Mutable]] without an initial value
*/
def emptyMutable[T](id: String): Mutable[T] =
new Mutable(id, None)
}
|
twitter/util
|
util-tunable/src/main/scala/com/twitter/util/tunable/Tunable.scala
|
Scala
|
apache-2.0
| 4,414
|
/**
* Copyright 2011-2013 StackMob
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stackmob.customcode.dev
package server
package sdk
package cache
import com.stackmob.sdkapi.caching.{Operation, CachingService}
import java.util.concurrent.{TimeUnit, ConcurrentHashMap}
import com.stackmob.sdkapi.caching.exceptions.{RateLimitedException, TTLTooBigException, DataSizeException, TimeoutException}
import scalaz.{Validation, Success, Failure}
import java.lang.{Boolean => JBoolean}
import com.twitter.util.Duration
import simulator.{ThrowableFrequency, Frequency, ErrorSimulator}
import CachingServiceImpl._
class CachingServiceImpl(getRateLimitThrowableFreq: ThrowableFrequency = DefaultGetRateLimitThrowableFreq,
setRateLimitThrowableFreq: ThrowableFrequency = DefaultSetRateLimitThrowableFreq,
getTimeoutThrowableFreq: ThrowableFrequency = DefaultGetTimeoutThrowableFreq,
setTimeoutThrowableFreq: ThrowableFrequency = DefaultSetTimeoutThrowableFreq)
extends CachingService {
private val maxKeySizeBytes = 1025 //1kb
private val maxValueSizeBytes = 16384 //16kb
private val maxSize = 1000
private type Value = (Array[Byte], Long)
private val cache = new ConcurrentHashMap[String, Value]()
case class CacheTooBigException() extends Exception("the cache is too big")
case class NoSuchKeyException(key: String) extends Exception("no such key %s".format(key))
case class NeedsRemovalException(key: String) extends Exception("%s needs to be removed from the cache".format(key))
private def checkCacheSize: Validation[CacheTooBigException, Unit] = {
if(cache.size > maxSize) {
Failure(CacheTooBigException())
} else {
Success(())
}
}
private def checkKeySize(operation: Operation, key: String): Validation[DataSizeException, Unit] = {
if(key.length > maxKeySizeBytes) {
Failure(new DataSizeException(operation))
} else {
Success(())
}
}
private def checkValueSize(operation: Operation, value: Array[Byte]): Validation[DataSizeException, Unit] = {
if (value.size > maxValueSizeBytes) {
Failure(new DataSizeException(operation))
} else {
Success(())
}
}
private def checkKeyNeedsRemoval(key: String, ttl: Long): Validation[NeedsRemovalException, Unit] = {
if(ttl > System.currentTimeMillis()) {
Failure(NeedsRemovalException(key))
} else {
Success(())
}
}
private def optionToValidation[FailType, SuccessType](mbSuccess: Option[SuccessType],
fail: FailType): Validation[FailType, SuccessType] = {
mbSuccess.map { success =>
Success[FailType, SuccessType](success)
}.getOrElse {
Failure[FailType, SuccessType](fail)
}
}
@throws(classOf[TimeoutException])
@throws(classOf[RateLimitedException])
@throws(classOf[DataSizeException])
override def getBytes(key: String): Array[Byte] = {
cache.synchronized {
ErrorSimulator(getRateLimitThrowableFreq :: getTimeoutThrowableFreq :: Nil) {
val v = for {
_ <- checkKeySize(Operation.GET, key)
value <- optionToValidation(Option(cache.get(key)), NoSuchKeyException(key))
_ <- checkKeyNeedsRemoval(key, value._2)
_ <- checkValueSize(Operation.SET, value._1)
} yield {
value
}
v.map { value =>
value._1
} valueOr {
case _: NoSuchKeyException => {
null: Array[Byte]
}
case _: NeedsRemovalException => {
cache.remove(key)
null: Array[Byte]
}
case otherEx: Throwable => {
(throw otherEx): Array[Byte]
}
}
}
}
}
@throws(classOf[TimeoutException])
@throws(classOf[RateLimitedException])
@throws(classOf[DataSizeException])
@throws(classOf[TTLTooBigException])
override def setBytes(key: String, value: Array[Byte], ttlMilliseconds: Long): JBoolean = {
cache.synchronized {
ErrorSimulator(setRateLimitThrowableFreq :: setTimeoutThrowableFreq :: Nil) {
val v = for {
_ <- checkKeySize(Operation.SET, key)
_ <- checkCacheSize
_ <- checkValueSize(Operation.SET, value)
expTime <- Success(System.currentTimeMillis() + ttlMilliseconds)
_ <- Success(cache.put(key, value -> expTime))
} yield {
()
}
v.map { _ =>
true: JBoolean
} valueOr {
case t: CacheTooBigException => false
case t => throw t
}
}
}
}
@throws(classOf[DataSizeException])
override def deleteEventually(key: String) {
cache.synchronized {
val v = for {
_ <- checkKeySize(Operation.DELETE, key)
_ <- Success(cache.remove(key))
} yield {
()
}
v getOrElse { t: Throwable =>
throw t
}
}
}
}
object CachingServiceImpl {
val DefaultGetRateLimitThrowableFreq = ThrowableFrequency(new RateLimitedException(Operation.GET), Frequency(1, Duration(1, TimeUnit.MINUTES)))
val DefaultSetRateLimitThrowableFreq = ThrowableFrequency(new RateLimitedException(Operation.SET), Frequency(1, Duration(1, TimeUnit.MINUTES)))
val DefaultGetTimeoutThrowableFreq = ThrowableFrequency(new TimeoutException(Operation.GET), Frequency(1, Duration(1, TimeUnit.MINUTES)))
val DefaultSetTimeoutThrowableFreq = ThrowableFrequency(new TimeoutException(Operation.SET), Frequency(1, Duration(1, TimeUnit.MINUTES)))
}
|
matthewfarwell/stackmob-customcode-dev
|
src/main/scala/com/stackmob/customcode/dev/server/sdk/cache/CachingServiceImpl.scala
|
Scala
|
apache-2.0
| 6,090
|
/* RetryPolicySpec.scala
*
* Copyright (c) 2013-2014 linkedin.com
* Copyright (c) 2013-2015 zman.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package atmos
import rummage.Clock
import scala.concurrent.{ ExecutionContext, Future, Await }
import scala.concurrent.duration._
import scala.util.{ Failure, Success, Try }
import org.scalatest._
import org.scalamock.scalatest.MockFactory
/**
* Test suite for [[atmos.RetryPolicy]].
*/
class RetryPolicySpec extends FlatSpec with Matchers with MockFactory {
import ExecutionContext.Implicits._
import termination._
import dsl._
"RetryPolicy" should "synchronously retry until complete" in {
implicit val policy = RetryPolicy(LimitAttempts(2))
var counter = 0
retry() {
counter += 1
if (counter < 2) throw new TestException
counter
} shouldEqual 2
counter shouldEqual 2
}
it should "synchronously retry and swallow silent errors" in {
val mockMonitor = mock[EventMonitor]
implicit val policy = RetryPolicy(LimitAttempts(2), monitor = mockMonitor, classifier = {
case _: TestException => ErrorClassification.SilentlyRecoverable
})
val e = new TestException
(mockMonitor.retrying(_: Option[String], _: Try[Any], _: Int, _: FiniteDuration, _: Boolean)).expects(None, Failure(e), 1, *, true)
var counter = 0
retry() {
counter += 1
if (counter < 2) throw e
counter
} shouldEqual 2
counter shouldEqual 2
}
it should "synchronously retry until signaled to terminate" in {
implicit val policy = RetryPolicy(LimitAttempts(2))
var counter = 0
a[TestException] should be thrownBy {
retry("test") {
counter += 1
throw new TestException
}
}
counter shouldEqual 2
}
it should "synchronously retry until encountering a fatal error" in {
implicit val policy = RetryPolicy(LimitAttempts(2), classifier = {
case _: TestException => ErrorClassification.Fatal
})
var counter = 0
a[TestException] should be thrownBy {
retry(None) {
counter += 1
throw new TestException
}
}
counter shouldEqual 1
}
it should "synchronously block between retry attempts" in {
implicit val policy = RetryPolicy(LimitAttempts(2), backoff.ConstantBackoff(1.second))
val startAt = System.currentTimeMillis
var counter = 0
retry(Some("test")) {
counter += 1
if (counter < 2) throw new TestException
counter
} shouldEqual 2
counter shouldEqual 2
(System.currentTimeMillis - startAt).millis should be >= 1.second
}
it should "retry when unacceptable results are returned" in {
implicit val policy = RetryPolicy(LimitAttempts(2), results = ResultClassifier {
case i: Int if i < 2 => ResultClassification.Unacceptable()
})
var counter = 0
retry(None) {
counter += 1
counter
} shouldEqual 2
counter shouldEqual 2
}
it should "be interrupted by fatal errors by default" in {
implicit val policy = RetryPolicy(LimitAttempts(2))
var counter = 0
an[InterruptedException] should be thrownBy retry(None) {
counter += 1
throw new InterruptedException
}
counter shouldEqual 1
}
it should "asynchronously retry until complete" in {
implicit val policy = RetryPolicy(LimitAttempts(3))
@volatile var counter = 0
val future = retryAsync() {
if (counter == 0) {
counter += 1
throw new TestException
}
Future {
counter += 1
if (counter < 3) throw new TestException
counter
}
}
Await.result(future, Duration.Inf) shouldEqual 3
counter shouldEqual 3
}
it should "asynchronously retry and swallow silent errors" in {
val mockMonitor = mock[EventMonitor]
implicit val policy = RetryPolicy(LimitAttempts(3), monitor = mockMonitor, classifier = {
case _: TestException => ErrorClassification.SilentlyRecoverable
})
val e = new TestException
(mockMonitor.retrying(_: Option[String], _: Try[Any], _: Int, _: FiniteDuration, _: Boolean)).expects(None, Failure(e), 1, *, true)
(mockMonitor.retrying(_: Option[String], _: Try[Any], _: Int, _: FiniteDuration, _: Boolean)).expects(None, Failure(e), 2, *, true)
@volatile var counter = 0
val future = retryAsync() {
if (counter == 0) {
counter += 1
throw e
}
Future {
counter += 1
if (counter < 3) throw e
counter
}
}
Await.result(future, Duration.Inf) shouldEqual 3
counter shouldEqual 3
}
it should "asynchronously retry until signaled to terminate" in {
implicit val policy = RetryPolicy(LimitAttempts(2))
@volatile var counter = 0
val future = retryAsync("test") {
Future {
counter += 1
if (counter <= 2) throw new TestException
counter
}
}
a[TestException] should be thrownBy { Await.result(future, Duration.Inf) }
counter shouldEqual 2
}
it should "asynchronously retry until encountering a fatal error" in {
implicit val policy = RetryPolicy(LimitAttempts(1), classifier = {
case _: TestException => ErrorClassification.Fatal
}) retryFor { 2 attempts }
@volatile var counter = 0
val future = retryAsync(None) {
Future {
counter += 1
if (counter < 2) throw new TestException
counter
}
}
a[TestException] should be thrownBy { Await.result(future, Duration.Inf) }
counter shouldEqual 1
}
it should "asynchronously block between retry attempts" in {
implicit val policy = RetryPolicy(1.minute || 2.attempts, backoff.ConstantBackoff(1.second))
val startAt = System.currentTimeMillis
@volatile var counter = 0
val future = retryAsync(Some("test")) {
Future {
counter += 1
if (counter < 2) throw new TestException
counter
}
}
Await.result(future, Duration.Inf) shouldEqual 2
counter shouldEqual 2
(System.currentTimeMillis - startAt).millis should be >= 1.second
}
it should "predictably terminate in the presence of asynchronous concurrency errors" in {
implicit val policy = RetryPolicy(LimitAttempts(2), backoff.ConstantBackoff(1.second))
@volatile var limit = 1
@volatile var counter = 0
val mockFuture = new Future[Int] {
def value = ???
def isCompleted = ???
def ready(atMost: Duration)(implicit permit: scala.concurrent.CanAwait) = ???
def result(atMost: Duration)(implicit permit: scala.concurrent.CanAwait) = ???
def onComplete[U](f: Try[Int] => U)(implicit executor: ExecutionContext) = {
counter += 1
if (counter >= limit)
throw new TestException
else
executor.execute(new Runnable { override def run() = { f(Failure(new TestException)) } })
}
}
val future1 = retryAsync() { mockFuture }
a[TestException] should be thrownBy Await.result(future1, Duration.Inf)
limit = 2
counter = 0
val future2 = retryAsync("test") { mockFuture }
a[TestException] should be thrownBy Await.result(future2, Duration.Inf)
}
it should "predictably terminate in the presence of asynchronous clock errors" in {
implicit val policy = RetryPolicy(LimitAttempts(3), backoff.ConstantBackoff(1.second))
@volatile var limit = 1
@volatile var counter = 0
implicit val mockClock = new Clock {
def now: FiniteDuration = Clock.Default.now
def tick: FiniteDuration = Clock.Default.tick
def syncWait(timeout: FiniteDuration): FiniteDuration = Clock.Default.syncWait(timeout)
def asyncWait(timeout: FiniteDuration)(implicit ec: ExecutionContext): Future[FiniteDuration] = {
counter += 1
if (counter >= limit)
throw new TestException
else
Future.failed(new TestException)
}
}
val future1 = retryAsync() { Future { throw new RuntimeException } }
a[TestException] should be thrownBy Await.result(future1, Duration.Inf)
limit = 2
counter = 0
val future2 = retryAsync("test") { Future { throw new RuntimeException } }
a[TestException] should be thrownBy Await.result(future2, Duration.Inf)
}
private class TestException extends RuntimeException
}
|
zmanio/atmos
|
src/test/scala/atmos/RetryPolicySpec.scala
|
Scala
|
apache-2.0
| 8,806
|
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.util
import java.net.URI
import java.util.UUID
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.spotify.scio.ScioContext
import org.apache.beam.sdk.extensions.gcp.options.GcpOptions
import org.apache.beam.sdk.extensions.gcp.util.Transport
import org.apache.beam.sdk.{PipelineResult, PipelineRunner}
import org.slf4j.LoggerFactory
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Try}
private[scio] object ScioUtil {
@transient private lazy val log = LoggerFactory.getLogger(this.getClass)
@transient lazy val jsonFactory = Transport.getJsonFactory
def isLocalUri(uri: URI): Boolean =
uri.getScheme == null || uri.getScheme == "file"
def isRemoteUri(uri: URI): Boolean = !isLocalUri(uri)
def isLocalRunner(runner: Class[_ <: PipelineRunner[_ <: PipelineResult]]): Boolean = {
require(runner != null, "Pipeline runner not set!")
// FIXME: cover Flink, Spark, etc. in local mode
runner.getName == "org.apache.beam.runners.direct.DirectRunner"
}
def isRemoteRunner(runner: Class[_ <: PipelineRunner[_ <: PipelineResult]]): Boolean =
!isLocalRunner(runner)
def classOf[T: ClassTag]: Class[T] =
implicitly[ClassTag[T]].runtimeClass.asInstanceOf[Class[T]]
def getScalaJsonMapper: ObjectMapper =
new ObjectMapper().registerModule(DefaultScalaModule)
def addPartSuffix(path: String, ext: String = ""): String =
if (path.endsWith("/")) s"${path}part-*$ext" else s"$path/part-*$ext"
def getTempFile(context: ScioContext, fileOrPath: String = null): String = {
val fop = Option(fileOrPath).getOrElse("scio-materialize-" + UUID.randomUUID().toString)
val uri = URI.create(fop)
if ((ScioUtil.isLocalUri(uri) && uri.toString.startsWith("/")) || uri.isAbsolute) {
fop
} else {
val filename = fop
val tmpDir = if (context.options.getTempLocation != null) {
context.options.getTempLocation
} else {
val m =
"Specify a temporary location via --tempLocation or PipelineOptions.setTempLocation."
Try(context.optionsAs[GcpOptions].getGcpTempLocation) match {
case Success(l) =>
log.warn(
"Using GCP temporary location as a temporary location to materialize data. " + m
)
l
case Failure(_) =>
throw new IllegalArgumentException("No temporary location was specified. " + m)
}
}
tmpDir + (if (tmpDir.endsWith("/")) "" else "/") + filename
}
}
def pathWithShards(path: String): String =
path.replaceAll("\\\\/+$", "") + "/part"
}
|
regadas/scio
|
scio-core/src/main/scala/com/spotify/scio/util/ScioUtil.scala
|
Scala
|
apache-2.0
| 3,274
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.util.IdentityHashMap
import com.github.benmanes.caffeine.cache.{CacheLoader, Caffeine, LoadingCache}
import com.google.common.util.concurrent.{ExecutionError, UncheckedExecutionException}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode}
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.types.DataType
/**
* This class helps subexpression elimination for interpreted evaluation
* such as `InterpretedUnsafeProjection`. It maintains an evaluation cache.
* This class wraps `ExpressionProxy` around given expressions. The `ExpressionProxy`
* intercepts expression evaluation and loads from the cache first.
*/
class SubExprEvaluationRuntime(cacheMaxEntries: Int) {
// The id assigned to `ExpressionProxy`. `SubExprEvaluationRuntime` will use assigned ids of
// `ExpressionProxy` to decide the equality when loading from cache. `SubExprEvaluationRuntime`
// won't be use by multi-threads so we don't need to consider concurrency here.
private var proxyExpressionCurrentId = 0
private[sql] val cache: LoadingCache[ExpressionProxy, ResultProxy] =
Caffeine.newBuilder().maximumSize(cacheMaxEntries)
// SPARK-34309: Use custom Executor to compatible with
// the data eviction behavior of Guava cache
.executor((command: Runnable) => command.run())
.build[ExpressionProxy, ResultProxy](
new CacheLoader[ExpressionProxy, ResultProxy]() {
override def load(expr: ExpressionProxy): ResultProxy = {
ResultProxy(expr.proxyEval(currentInput))
}
})
private var currentInput: InternalRow = null
def getEval(proxy: ExpressionProxy): Any = try {
cache.get(proxy).result
} catch {
// Cache.get() may wrap the original exception. See the following URL
// http://google.github.io/guava/releases/14.0/api/docs/com/google/common/cache/
// Cache.html#get(K,%20java.util.concurrent.Callable)
case e @ (_: UncheckedExecutionException | _: ExecutionError) =>
throw e.getCause
}
/**
* Sets given input row as current row for evaluating expressions. This cleans up the cache
* too as new input comes.
*/
def setInput(input: InternalRow = null): Unit = {
currentInput = input
cache.invalidateAll()
}
/**
* Recursively replaces expression with its proxy expression in `proxyMap`.
*/
private def replaceWithProxy(
expr: Expression,
equivalentExpressions: EquivalentExpressions,
proxyMap: IdentityHashMap[Expression, ExpressionProxy]): Expression = {
equivalentExpressions.getExprState(expr) match {
case Some(stats) if proxyMap.containsKey(stats.expr) => proxyMap.get(stats.expr)
case _ => expr.mapChildren(replaceWithProxy(_, equivalentExpressions, proxyMap))
}
}
/**
* Finds subexpressions and wraps them with `ExpressionProxy`.
*/
def proxyExpressions(expressions: Seq[Expression]): Seq[Expression] = {
val equivalentExpressions: EquivalentExpressions = new EquivalentExpressions
expressions.foreach(equivalentExpressions.addExprTree(_))
val proxyMap = new IdentityHashMap[Expression, ExpressionProxy]
val commonExprs = equivalentExpressions.getCommonSubexpressions
commonExprs.foreach { expr =>
val proxy = ExpressionProxy(expr, proxyExpressionCurrentId, this)
proxyExpressionCurrentId += 1
// We leverage `IdentityHashMap` so we compare expression keys by reference here.
// So for example if there are one group of common exprs like Seq(common expr 1,
// common expr2, ..., common expr n), we will insert into `proxyMap` some key/value
// pairs like Map(common expr 1 -> proxy(common expr 1), ...,
// common expr n -> proxy(common expr 1)).
proxyMap.put(expr, proxy)
}
// Only adding proxy if we find subexpressions.
if (!proxyMap.isEmpty) {
expressions.map(replaceWithProxy(_, equivalentExpressions, proxyMap))
} else {
expressions
}
}
}
/**
* A proxy for an catalyst `Expression`. Given a runtime object `SubExprEvaluationRuntime`,
* when this is asked to evaluate, it will load from the evaluation cache in the runtime first.
*/
case class ExpressionProxy(
child: Expression,
id: Int,
runtime: SubExprEvaluationRuntime) extends UnaryExpression {
final override def dataType: DataType = child.dataType
final override def nullable: Boolean = child.nullable
// `ExpressionProxy` is for interpreted expression evaluation only. So cannot `doGenCode`.
final override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode =
throw QueryExecutionErrors.cannotGenerateCodeForExpressionError(this)
def proxyEval(input: InternalRow = null): Any = child.eval(input)
override def eval(input: InternalRow = null): Any = runtime.getEval(this)
override def equals(obj: Any): Boolean = obj match {
case other: ExpressionProxy => this.id == other.id
case _ => false
}
override def hashCode(): Int = this.id.hashCode()
override protected def withNewChildInternal(newChild: Expression): ExpressionProxy =
copy(child = newChild)
}
/**
* A simple wrapper for holding `Any` in the cache of `SubExprEvaluationRuntime`.
*/
case class ResultProxy(result: Any)
|
jiangxb1987/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SubExprEvaluationRuntime.scala
|
Scala
|
apache-2.0
| 6,195
|
package com.wix.fax.phaxio.model
/**
* @see <a href="http://www.phaxio.com/docs/api/send/sendFax/">Send a fax</a>
*/
case class SendResponse(success: Boolean,
message: String,
faxId: Option[Long] = None,
data: Option[FaxInfo] = None)
|
wix/libfax
|
libfax-phaxio-core/src/main/scala/com/wix/fax/phaxio/model/SendResponse.scala
|
Scala
|
apache-2.0
| 316
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import org.apache.spark.sql.{QueryTest, Row, SaveMode}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.CatalogTableType
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.catalyst.plans.logical.SubqueryAlias
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.{ExamplePointUDT, SQLTestUtils}
import org.apache.spark.sql.types._
class HiveMetastoreCatalogSuite extends TestHiveSingleton with SQLTestUtils {
import spark.implicits._
test("struct field should accept underscore in sub-column name") {
val hiveTypeStr = "struct<a: int, b_1: string, c: string>"
val dataType = CatalystSqlParser.parseDataType(hiveTypeStr)
assert(dataType.isInstanceOf[StructType])
}
test("udt to metastore type conversion") {
val udt = new ExamplePointUDT
assertResult(udt.sqlType.catalogString) {
udt.catalogString
}
}
test("duplicated metastore relations") {
val df = spark.sql("SELECT * FROM src")
logInfo(df.queryExecution.toString)
df.as('a).join(df.as('b), $"a.key" === $"b.key")
}
test("should not truncate struct type catalog string") {
def field(n: Int): StructField = {
StructField("col" + n, StringType)
}
val dataType = StructType((1 to 100).map(field))
assert(CatalystSqlParser.parseDataType(dataType.catalogString) == dataType)
}
test("view relation") {
withView("vw1") {
spark.sql("create view vw1 as select 1 as id")
val plan = spark.sql("select id from vw1").queryExecution.analyzed
val aliases = plan.collect {
case x @ SubqueryAlias("vw1", _) => x
}
assert(aliases.size == 1)
}
}
test("Validate catalog metadata for supported data types") {
withTable("t") {
sql(
"""
|CREATE TABLE t (
|c1 boolean,
|c2 tinyint,
|c3 smallint,
|c4 short,
|c5 bigint,
|c6 long,
|c7 float,
|c8 double,
|c9 date,
|c10 timestamp,
|c11 string,
|c12 char(10),
|c13 varchar(10),
|c14 binary,
|c15 decimal,
|c16 decimal(10),
|c17 decimal(10,2),
|c18 array<string>,
|c19 array<int>,
|c20 array<char(10)>,
|c21 map<int,int>,
|c22 map<int,char(10)>,
|c23 struct<a:int,b:int>,
|c24 struct<c:varchar(10),d:int>
|)
""".stripMargin)
val schema = hiveClient.getTable("default", "t").schema
val expectedSchema = new StructType()
.add("c1", "boolean")
.add("c2", "tinyint")
.add("c3", "smallint")
.add("c4", "short")
.add("c5", "bigint")
.add("c6", "long")
.add("c7", "float")
.add("c8", "double")
.add("c9", "date")
.add("c10", "timestamp")
.add("c11", "string")
.add("c12", "string", true,
new MetadataBuilder().putString(HIVE_TYPE_STRING, "char(10)").build())
.add("c13", "string", true,
new MetadataBuilder().putString(HIVE_TYPE_STRING, "varchar(10)").build())
.add("c14", "binary")
.add("c15", "decimal")
.add("c16", "decimal(10)")
.add("c17", "decimal(10,2)")
.add("c18", "array<string>")
.add("c19", "array<int>")
.add("c20", "array<string>", true,
new MetadataBuilder().putString(HIVE_TYPE_STRING, "array<char(10)>").build())
.add("c21", "map<int,int>")
.add("c22", "map<int,string>", true,
new MetadataBuilder().putString(HIVE_TYPE_STRING, "map<int,char(10)>").build())
.add("c23", "struct<a:int,b:int>")
.add("c24", "struct<c:string,d:int>", true,
new MetadataBuilder().putString(HIVE_TYPE_STRING, "struct<c:varchar(10),d:int>").build())
assert(schema == expectedSchema)
}
}
}
class DataSourceWithHiveMetastoreCatalogSuite
extends QueryTest with SQLTestUtils with TestHiveSingleton {
import hiveContext._
import testImplicits._
private val testDF = range(1, 3).select(
('id + 0.1) cast DecimalType(10, 3) as 'd1,
'id cast StringType as 'd2
).coalesce(1)
Seq(
"parquet" -> ((
"org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat",
"org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat",
"org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"
)),
"orc" -> ((
"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat",
"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat",
"org.apache.hadoop.hive.ql.io.orc.OrcSerde"
))
).foreach { case (provider, (inputFormat, outputFormat, serde)) =>
test(s"Persist non-partitioned $provider relation into metastore as managed table") {
withTable("t") {
withSQLConf(SQLConf.PARQUET_WRITE_LEGACY_FORMAT.key -> "true") {
testDF
.write
.mode(SaveMode.Overwrite)
.format(provider)
.saveAsTable("t")
}
val hiveTable = sessionState.catalog.getTableMetadata(TableIdentifier("t", Some("default")))
assert(hiveTable.storage.inputFormat === Some(inputFormat))
assert(hiveTable.storage.outputFormat === Some(outputFormat))
assert(hiveTable.storage.serde === Some(serde))
assert(hiveTable.partitionColumnNames.isEmpty)
assert(hiveTable.tableType === CatalogTableType.MANAGED)
val columns = hiveTable.schema
assert(columns.map(_.name) === Seq("d1", "d2"))
assert(columns.map(_.dataType) === Seq(DecimalType(10, 3), StringType))
checkAnswer(table("t"), testDF)
assert(sparkSession.metadataHive.runSqlHive("SELECT * FROM t") === Seq("1.1\\t1", "2.1\\t2"))
}
}
test(s"Persist non-partitioned $provider relation into metastore as external table") {
withTempPath { dir =>
withTable("t") {
val path = dir.getCanonicalFile
withSQLConf(SQLConf.PARQUET_WRITE_LEGACY_FORMAT.key -> "true") {
testDF
.write
.mode(SaveMode.Overwrite)
.format(provider)
.option("path", path.toString)
.saveAsTable("t")
}
val hiveTable =
sessionState.catalog.getTableMetadata(TableIdentifier("t", Some("default")))
assert(hiveTable.storage.inputFormat === Some(inputFormat))
assert(hiveTable.storage.outputFormat === Some(outputFormat))
assert(hiveTable.storage.serde === Some(serde))
assert(hiveTable.tableType === CatalogTableType.EXTERNAL)
assert(hiveTable.storage.locationUri === Some(makeQualifiedPath(dir.getAbsolutePath)))
val columns = hiveTable.schema
assert(columns.map(_.name) === Seq("d1", "d2"))
assert(columns.map(_.dataType) === Seq(DecimalType(10, 3), StringType))
checkAnswer(table("t"), testDF)
assert(sparkSession.metadataHive.runSqlHive("SELECT * FROM t") ===
Seq("1.1\\t1", "2.1\\t2"))
}
}
}
test(s"Persist non-partitioned $provider relation into metastore as managed table using CTAS") {
withTempPath { dir =>
withTable("t") {
sql(
s"""CREATE TABLE t USING $provider
|OPTIONS (path '${dir.toURI}')
|AS SELECT 1 AS d1, "val_1" AS d2
""".stripMargin)
val hiveTable =
sessionState.catalog.getTableMetadata(TableIdentifier("t", Some("default")))
assert(hiveTable.storage.inputFormat === Some(inputFormat))
assert(hiveTable.storage.outputFormat === Some(outputFormat))
assert(hiveTable.storage.serde === Some(serde))
assert(hiveTable.partitionColumnNames.isEmpty)
assert(hiveTable.tableType === CatalogTableType.EXTERNAL)
val columns = hiveTable.schema
assert(columns.map(_.name) === Seq("d1", "d2"))
assert(columns.map(_.dataType) === Seq(IntegerType, StringType))
checkAnswer(table("t"), Row(1, "val_1"))
assert(sparkSession.metadataHive.runSqlHive("SELECT * FROM t") === Seq("1\\tval_1"))
}
}
}
}
}
|
minixalpha/spark
|
sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
|
Scala
|
apache-2.0
| 9,175
|
object Test {
val x = new InfixUsageWithTuple()
x foo (1 + 1, 33, true)
}
|
ilinum/intellij-scala
|
testdata/changeSignature/fromJava/InfixUsageWithTuple_after.scala
|
Scala
|
apache-2.0
| 77
|
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package xml.pattern
import com.intellij.psi.xml.XmlTokenType
import org.jetbrains.plugins.scala.lang.lexer.ScalaXmlTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
/**
* @author Alexander Podkhalyuzin
* Date: 18.04.2008
*/
/*
* EmptyElemTagP ::= '<' Name [S]'/>'
*/
object EmptyElemTagP {
def parse(builder: ScalaPsiBuilder): Boolean = {
val tagMarker = builder.mark()
builder.getTokenType match {
case ScalaXmlTokenTypes.XML_START_TAG_START =>
builder.advanceLexer()
case _ =>
tagMarker.drop()
return false
}
builder.getTokenType match {
case ScalaXmlTokenTypes.XML_NAME =>
builder.advanceLexer()
case _ => builder error ErrMsg("xml.name.expected")
}
builder.getTokenType match {
case XmlTokenType.XML_WHITE_SPACE => builder.advanceLexer()
case _ =>
}
builder.getTokenType match {
case ScalaXmlTokenTypes.XML_EMPTY_ELEMENT_END =>
builder.advanceLexer()
tagMarker.done(ScalaElementTypes.XML_EMPTY_TAG)
true
case _ =>
tagMarker.rollbackTo()
false
}
}
}
|
triggerNZ/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/parser/parsing/xml/pattern/EmptyElemTagP.scala
|
Scala
|
apache-2.0
| 1,238
|
package org.crudible.lift.util.decorators
import net.liftweb.http.js.JsCmd
import net.liftweb.http.js.JsCmds
import net.liftweb.http.SHtml
import org.crudible.lift.util.LabelCallback
case class YesActionDecorator(labelCallback: LabelCallback)
|
rehei/crudible
|
crudible-lift/src/main/scala/org/crudible/lift/util/decorators/YesActionDecorator.scala
|
Scala
|
apache-2.0
| 244
|
package com.zobot.client.packet.definitions.serverbound.play
import com.zobot.client.packet.Packet
case class UseEntity(target: Int, propertyType: Int, targetX: Any, targetY: Any, targetZ: Any, hand: Any) extends Packet {
override lazy val packetId = 0x0A
override lazy val packetData: Array[Byte] =
fromVarInt(target) ++
fromVarInt(propertyType) ++
fromAny(targetX) ++
fromAny(targetY) ++
fromAny(targetZ) ++
fromAny(hand)
}
|
BecauseNoReason/zobot
|
src/main/scala/com/zobot/client/packet/definitions/serverbound/play/UseEntity.scala
|
Scala
|
mit
| 457
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
/**
* Executes a roll up-style query against Apache logs.
*/
object LogQuery {
val exampleApacheLogs = List(
"""10.10.10.10 - "FRED" [18/Jan/2013:17:56:07 +1100] "GET http://images.com/2013/Generic.jpg
| HTTP/1.1" 304 315 "http://referall.com/" "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1;
| GTB7.4; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR
| 3.5.21022; .NET CLR 3.0.4506.2152; .NET CLR 1.0.3705; .NET CLR 1.1.4322; .NET CLR
| 3.5.30729; Release=ARP)" "UD-1" - "image/jpeg" "whatever" 0.350 "-" - "" 265 923 934 ""
| 62.24.11.25 images.com 1358492167 - Whatup""".stripMargin.lines.mkString,
"""10.10.10.10 - "FRED" [18/Jan/2013:18:02:37 +1100] "GET http://images.com/2013/Generic.jpg
| HTTP/1.1" 304 306 "http:/referall.com" "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1;
| GTB7.4; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR
| 3.5.21022; .NET CLR 3.0.4506.2152; .NET CLR 1.0.3705; .NET CLR 1.1.4322; .NET CLR
| 3.5.30729; Release=ARP)" "UD-1" - "image/jpeg" "whatever" 0.352 "-" - "" 256 977 988 ""
| 0 73.23.2.15 images.com 1358492557 - Whatup""".stripMargin.lines.mkString
)
def main(args: Array[String]) {
if (args.length == 0) {
System.err.println("Usage: LogQuery <master> [logFile]")
System.exit(1)
}
val sc = new SparkContext(args(0), "Log Query",
System.getenv("SPARK_HOME"), SparkContext.jarOfClass(this.getClass))
val dataSet =
if (args.length == 2) sc.textFile(args(1))
else sc.parallelize(exampleApacheLogs)
val apacheLogRegex =
"""^([\\d.]+) (\\S+) (\\S+) \\[([\\w\\d:/]+\\s[+\\-]\\d{4})\\] "(.+?)" (\\d{3}) ([\\d\\-]+) "([^"]+)" "([^"]+)".*""".r
/** Tracks the total query count and number of aggregate bytes for a particular group. */
class Stats(val count: Int, val numBytes: Int) extends Serializable {
def merge(other: Stats) = new Stats(count + other.count, numBytes + other.numBytes)
override def toString = "bytes=%s\\tn=%s".format(numBytes, count)
}
def extractKey(line: String): (String, String, String) = {
apacheLogRegex.findFirstIn(line) match {
case Some(apacheLogRegex(ip, _, user, dateTime, query, status, bytes, referer, ua)) =>
if (user != "\\"-\\"") (ip, user, query)
else (null, null, null)
case _ => (null, null, null)
}
}
def extractStats(line: String): Stats = {
apacheLogRegex.findFirstIn(line) match {
case Some(apacheLogRegex(ip, _, user, dateTime, query, status, bytes, referer, ua)) =>
new Stats(1, bytes.toInt)
case _ => new Stats(1, 0)
}
}
dataSet.map(line => (extractKey(line), extractStats(line)))
.reduceByKey((a, b) => a.merge(b))
.collect().foreach{
case (user, query) => println("%s\\t%s".format(user, query))}
}
}
|
dotunolafunmiloye/spark
|
examples/src/main/scala/org/apache/spark/examples/LogQuery.scala
|
Scala
|
apache-2.0
| 3,832
|
package fi.proweb.train.helper
object HeadingConverter {
def headingToString(heading: Int): String = {
val directions: List[String] = List("N", "NE", "E", "SE", "S", "SW", "W", "NW", "N")
directions((Math.round((22 + heading.toDouble) % 360) / 45).toInt)
}
}
|
roikonen/MissaJuna
|
app/fi/proweb/train/helper/HeadingConverter.scala
|
Scala
|
apache-2.0
| 275
|
package scredis.protocol
import org.scalatest._
import org.scalatest.concurrent._
import akka.util.ByteString
import scredis.PubSubMessage
class ProtocolSpec extends WordSpec
with GivenWhenThen
with BeforeAndAfterAll
with Matchers {
private val ArrayString = "*5\\r\\n-error\\r\\n+simple\\r\\n:1000\\r\\n:-1000\\r\\n$3\\r\\nlol\\r\\n"
private val Error = ByteString("-error\\r\\n")
private val SimpleString = ByteString("+simple\\r\\n")
private val PositiveInteger = ByteString(":1000\\r\\n")
private val NegativeInteger = ByteString(":-1000\\r\\n")
private val BulkString = ByteString("$3\\r\\nlol\\r\\n")
private val Array = ByteString(ArrayString)
private val All = Error ++
SimpleString ++
PositiveInteger ++
NegativeInteger ++
BulkString ++
Array
"count" when {
"receiving different types of response" should {
"correctly count them" in {
val buffer = All.toByteBuffer
Protocol.count(buffer) should be (6)
buffer.remaining should be (0)
val buffer2 = ByteString("*3\\r\\n$12\\r\\npunsubscribe\\r\\n$-1\\r\\n:0\\r\\n").toByteBuffer
Protocol.count(buffer2) should be (1)
buffer2.remaining should be (0)
}
}
"receiving different types of responses with nested arrays" should {
"correctly count them" in {
val nestedArrays = All ++ ByteString(
s"*3\\r\\n*2\\r\\n$ArrayString$ArrayString*2\\r\\n$ArrayString-error\\r\\n$$3\\r\\nlol\\r\\n"
)
val buffer = nestedArrays.toByteBuffer
Protocol.count(buffer) should be (7)
buffer.remaining should be (0)
}
}
"receiving fragmented responses" should {
"count them up to the last full response" in {
var fragmented = ByteString("").toByteBuffer
Protocol.count(fragmented) should be (0)
fragmented.remaining should be (0)
fragmented = ByteString("-").toByteBuffer
Protocol.count(fragmented) should be (0)
fragmented.position should be (0)
fragmented = ByteString("-error").toByteBuffer
Protocol.count(fragmented) should be (0)
fragmented.position should be (0)
fragmented = ByteString("-error\\r").toByteBuffer
Protocol.count(fragmented) should be (0)
fragmented.position should be (0)
fragmented = ByteString("-error\\r\\n+hello").toByteBuffer
Protocol.count(fragmented) should be (1)
fragmented.position should be (8)
fragmented = ByteString(
"*5\\r\\n-error\\r\\n+simple\\r\\n:1000\\r\\n:-1000\\r\\n$3\\r\\nlol\\r"
).toByteBuffer
Protocol.count(fragmented) should be (0)
fragmented.position should be (0)
fragmented = ByteString(
s"$ArrayString*3\\r\\n*2\\r\\n$ArrayString$ArrayString*2\\r\\n$ArrayString-error\\r\\n$$3\\r\\n"
).toByteBuffer
Protocol.count(fragmented) should be (1)
fragmented.position should be (ArrayString.size)
}
}
}
"decode" should {
"succeed" in {
val buffer = ByteString("*3\\r\\n$12\\r\\npunsubscribe\\r\\n$-1\\r\\n:0\\r\\n").toByteBuffer
val response = Protocol.decode(buffer)
response shouldBe an [ArrayResponse]
val arrayResponse = response.asInstanceOf[ArrayResponse]
arrayResponse.length should be (3)
}
}
}
|
Livestream/scredis
|
src/test/scala/scredis/protocol/ProtocolSpec.scala
|
Scala
|
apache-2.0
| 3,320
|
/*^
===========================================================================
TwoBinManager
===========================================================================
Copyright (C) 2016-2017 Gianluca Costa
===========================================================================
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program. If not, see
<http://www.gnu.org/licenses/gpl-3.0.html>.
===========================================================================
*/
package info.gianlucacosta.twobinmanager.db
import java.util.UUID
import javax.persistence.RollbackException
import info.gianlucacosta.helios.jpa.Includes._
import info.gianlucacosta.twobinmanager.db.DbConversions._
import info.gianlucacosta.twobinpack.test.SimpleTestData.{ProblemA, ProblemB}
class TestDbProblemRepository extends DbTestBase {
"Retrieving a missing problem by id" should "return None" in {
val retrievedProblem =
problemRepository.findById(UUID.randomUUID())
retrievedProblem should be(None)
}
"Retrieving a saved problem by id" should "return it correctly" in {
entityManagerFactory.runTransaction(entityManager => {
val problemEntity: ProblemEntity =
ProblemA
entityManager.persist(problemEntity)
})
val retrievedProblem =
problemRepository.findById(ProblemA.id)
retrievedProblem should be(Some(ProblemA))
}
"Retrieving a missing problem by name" should "return None" in {
val retrievedProblem =
problemRepository.findByName("MISSING")
retrievedProblem should be(None)
}
"Retrieving a problem by name" should "return it correctly" in {
entityManagerFactory.runTransaction(entityManager => {
val problemEntity: ProblemEntity =
ProblemA
entityManager.persist(problemEntity)
})
val retrievedProblem =
problemRepository.findByName(ProblemA.name)
retrievedProblem should be(Some(ProblemA))
}
"Retrieving all sorted names in an empty db" should "return an empty list" in {
val retrievedNames =
problemRepository.findAllNamesSorted()
retrievedNames should be(List())
}
"Retrieving all sorted names in a db with saved problems" should "work correctly" in {
entityManagerFactory.runTransaction(entityManager => {
val problemEntityA: ProblemEntity =
ProblemA
val problemEntityB: ProblemEntity =
ProblemB
entityManager.persist(problemEntityA)
entityManager.persist(problemEntityB)
})
val retrievedNames =
problemRepository.findAllNamesSorted()
require(ProblemA.name < ProblemB.name)
retrievedNames should be(List(
ProblemA.name,
ProblemB.name
))
}
"Adding a new problem" should "work" in {
problemRepository.add(ProblemA)
val problemEntity: ProblemEntity =
ProblemA
val retrievedEntity =
entityManager.find(classOf[ProblemEntity], ProblemA.id)
retrievedEntity should be(problemEntity)
}
"Adding the same problem twice" should "fail" in {
problemRepository.add(ProblemA)
intercept[RollbackException] {
problemRepository.add(ProblemA)
}
}
"Adding a problem having no time limit" should "work" in {
require(ProblemB.timeLimitOption.isEmpty)
problemRepository.add(ProblemB)
}
"Retrieving a problem having no time limit" should "work" in {
require(ProblemB.timeLimitOption.isEmpty)
problemRepository.add(ProblemB)
val retrievedProblem =
problemRepository.findByName(ProblemB.name)
retrievedProblem should be(Some(ProblemB))
}
"Updating a new problem" should "fail" in {
problemRepository.update(ProblemA)
}
"Updating an existing problem" should "work" in {
problemRepository.add(ProblemA)
val updatedProblem =
ProblemA.copy(name = "Problem with dedicated new name")
val updatedProblemEntity: ProblemEntity =
updatedProblem
problemRepository.update(updatedProblem)
val retrievedProblemEntity =
entityManager.find(classOf[ProblemEntity], ProblemA.id)
retrievedProblemEntity should be(updatedProblemEntity)
}
"Removing an existing problem" should "work correctly" in {
entityManagerFactory.runTransaction(entityManager => {
val problemEntity: ProblemEntity =
ProblemA
entityManager.persist(problemEntity)
})
problemRepository.findById(ProblemA.id) should not be None
problemRepository.removeByName(ProblemA.name)
problemRepository.findById(ProblemA.id) should be(None)
}
"Removing a missing problem by name" should "do nothing" in {
problemRepository.removeByName("MISSING")
}
"Removing all problems from an empty db" should "do nothing" in {
problemRepository.removeAll()
}
"Removing all problems" should "work" in {
problemRepository.add(ProblemA)
problemRepository.add(ProblemB)
problemRepository.count() should be(2)
problemRepository.removeAll()
problemRepository.count() should be(0)
}
"Counting problems in an empty table" should "return 0" in {
problemRepository.count() should be(0)
}
"Counting problems" should "work" in {
problemRepository.add(ProblemA)
problemRepository.add(ProblemB)
problemRepository.count() should be(2)
}
}
|
giancosta86/TwoBinManager
|
src/test/scala/info/gianlucacosta/twobinmanager/db/TestDbProblemRepository.scala
|
Scala
|
gpl-3.0
| 5,828
|
package im.actor.server.user
import im.actor.api.rpc.users._
import im.actor.server.models
import im.actor.server.models.UserPhone
import scala.language.postfixOps
object UserUtils {
def defaultUserContactRecords(phones: Vector[Long], emails: Vector[String]): Vector[ContactRecord] = {
val phoneRecords = phones map { phone ⇒
ContactRecord(ContactType.Phone, stringValue = None, longValue = Some(phone), title = Some("Mobile phone"), subtitle = None)
}
val emailRecords = emails map { email ⇒
ContactRecord(ContactType.Email, stringValue = Some(email), longValue = None, title = Some("Email"), subtitle = None)
}
phoneRecords ++ emailRecords
}
def userContactRecords(phones: Vector[models.UserPhone], emails: Vector[models.UserEmail]): Vector[ContactRecord] = {
val phoneRecords = phones map { phone ⇒
ContactRecord(ContactType.Phone, stringValue = None, longValue = Some(phone.number), title = Some(phone.title), subtitle = None)
}
val emailRecords = emails map { email ⇒
ContactRecord(ContactType.Email, stringValue = Some(email.email), longValue = None, title = Some(email.title), subtitle = None)
}
phoneRecords ++ emailRecords
}
def userPhone(u: models.User, phones: Seq[UserPhone]): Option[Long] = {
phones.headOption match {
case Some(phone) ⇒ Some(phone.number)
case None ⇒ Some(0L)
}
}
def normalizeLocalName(name: Option[String]) = name match {
case n @ Some(name) if name.nonEmpty ⇒ n
case _ ⇒ None
}
}
|
winiceo/actor-platform
|
actor-server/actor-core/src/main/scala/im/actor/server/user/UserUtils.scala
|
Scala
|
mit
| 1,585
|
package org.odfi.indesign.core.module.ui.www
import org.odfi.indesign.core.harvest.HarvestedResource
import com.idyria.osi.wsb.webapp.localweb.LocalWebHTMLVIew
import com.idyria.osi.wsb.webapp.localweb.LocalWebHTMLVIewCompiler
class IndesignUIView extends LocalWebHTMLVIew with HarvestedResource with IndesignUIHtmlBuilder {
// Standalone
//--------------
/**
* If set, the main UI module will map this view to a specific path and open a link to a new tab
*/
var targetViewPath: Option[String] = None
def changeTargetViewPath(path: String) = {
this.targetViewPath = Some(path)
this
}
// ! Important, if the View is derived from a Scala Source File, then root it
//------------------
var isProxy = false
var proxiedView: Option[LocalWebHTMLVIew] = None
/*this.onProcess {
this.parentResource match {
case Some(p: ScalaSourceFile) =>
this.root
case _ =>
}
}*/
override def getClassLoader = proxiedView match {
case None => super.getClassLoader
case Some(v) => v.getClassLoader
}
// Actions
//---------------
override def getActions = proxiedView match {
case Some(v) => v.getActions
case None => super.getActions
}
// INfos
//------------
/**
* Alias for #getUIViewName
*/
//var name = getUIViewName
def getUIViewName = getClass.getSimpleName.replace("$", "")
def getId = getClass.getCanonicalName
var reloadEnable = true
// Rendering can be local or from source
//----------------------
override def render = {
this.isProxy match {
case true if (proxiedView == None) =>
this.parentResource match {
/*case Some(p: JavaSourceFile) =>
// Ensure Compilation is done
p.ensureCompiled
var cl = p.loadClass
println(s"View source file: " + cl)
// Create UI View
var view = LocalWebHTMLVIewCompiler.newInstance[LocalWebHTMLVIew](None, cl.asInstanceOf[Class[LocalWebHTMLVIew]])
// Set proxy on new view
view.proxy = Some(this)
this.proxiedView = Some(view)
view.viewPath = this.viewPath
// On reload, replace
p.onChange {
println(s"Modified scala source, trying to relaod view")
// Compile, and keep errors on the main proxying view
keepErrorsOn(this) {
p.ensureCompiled
// If we came up here, no errors
//p.getUpchainCompilingProject.resetClassDomain
var cl = p.loadClass
var view = LocalWebHTMLVIewCompiler.newInstance[LocalWebHTMLVIew](None, cl.asInstanceOf[Class[LocalWebHTMLVIew]])
// Close old view
//this.proxiedView.get.closeView
// Set proxy on new view
view.proxy = Some(this)
this.proxiedView = Some(view)
view.viewPath = this.viewPath
}
// Refresh
this.getTopParentView.@->("refresh")
}
proxiedView.get.rerender
*/
case _ =>
super.render
}
case true if (proxiedView.isDefined) =>
proxiedView.get.rerender
case _ =>
super.render
}
}
}
|
opendesignflow/indesign
|
indesign-wwwui/src/main/scala/org/odfi/indesign/core/module/ui/www/IndesignUIView.scala
|
Scala
|
gpl-3.0
| 3,442
|
package org.jetbrains.plugins.scala
package components
import javax.swing.SwingUtilities
import javax.swing.event.HyperlinkEvent
import com.intellij.ide.plugins._
import com.intellij.ide.plugins.cl.PluginClassLoader
import com.intellij.notification._
import com.intellij.openapi.application.{Application, ApplicationManager}
import com.intellij.openapi.components.ApplicationComponent
import com.intellij.openapi.diagnostic.Logger
import com.intellij.openapi.extensions.ExtensionPointName
/**
* @author Alefas
* @since 31.10.12
*/
abstract class ScalaPluginVersionVerifier {
def getSinceVersion: String
def getUntilVersion: String
}
object ScalaPluginVersionVerifier {
class Version(private val major: Int, private val minor: Int, private val build: Int) extends Ordered[Version] with Serializable {
def compare(that: Version) = implicitly[Ordering[(Int, Int, Int)]]
.compare((major, minor, build), (that.major, that.minor, that.build))
val presentation: String = if (major == Int.MaxValue) "SNAPSHOT" else s"$major.$minor.$build"
def isSnapshot = presentation == "SNAPSHOT"
override def equals(that: Any) = compare(that.asInstanceOf[Version]) == 0
override def toString = presentation
}
object Version {
object Snapshot extends Version(Int.MaxValue, Int.MaxValue, Int.MaxValue)
def parse(version: String): Option[Version] = {
val VersionRegex = "(\\\\d+)[.](\\\\d+)[.](\\\\d+)".r
version match {
case "VERSION" => Some(Snapshot)
case VersionRegex(major: String, minor: String, build: String) => Some(new Version(major.toInt, minor.toInt, build.toInt))
case _ => None
}
}
}
val EP_NAME: ExtensionPointName[ScalaPluginVersionVerifier] = ExtensionPointName.create("org.intellij.scala.scalaPluginVersionVerifier")
lazy val getPluginVersion: Option[Version] = {
getClass.getClassLoader match {
case pluginLoader: PluginClassLoader =>
Version.parse(PluginManager.getPlugin(pluginLoader.getPluginId).getVersion)
case _ => Some(Version.Snapshot)
}
}
def getPluginDescriptor = {
getClass.getClassLoader match {
case pluginLoader: PluginClassLoader =>
PluginManager.getPlugin(pluginLoader.getPluginId).asInstanceOf[IdeaPluginDescriptorImpl]
case other => throw new RuntimeException(s"Wrong plugin classLoader: $other")
}
}
}
object ScalaPluginVersionVerifierApplicationComponent {
private val LOG = Logger.getInstance("#org.jetbrains.plugins.scala.components.ScalaPluginVersionVerifierApplicationComponent")
}
class ScalaPluginVersionVerifierApplicationComponent extends ApplicationComponent {
import ScalaPluginVersionVerifier._
def getComponentName: String = "ScalaPluginVersionVerifierApplicationComponent"
def initComponent() {
def checkVersion() {
ScalaPluginVersionVerifier.getPluginVersion match {
case Some(version) =>
val extensions = ScalaPluginVersionVerifier.EP_NAME.getExtensions
for (extension <- extensions) {
var failed = false
def wrongVersion() {
failed = true
extension.getClass.getClassLoader match {
case pluginLoader: PluginClassLoader =>
val plugin = PluginManager.getPlugin(pluginLoader.getPluginId)
val message =
s"Plugin ${plugin.getName} of version ${plugin.getVersion} is " +
s"icompatible with Scala plugin of version $version. Do you want to disable ${plugin.getName} plugin?\\n" +
s"""<p/><a href="Yes">Yes, disable it</a>\\n""" +
s"""<p/><a href="No">No, leave it enabled</a>"""
if (ApplicationManager.getApplication.isUnitTestMode) {
ScalaPluginVersionVerifierApplicationComponent.LOG.error(message)
} else {
val Scala_Group = "Scala Plugin Incompatibility"
val app: Application = ApplicationManager.getApplication
if (!app.isDisposed) {
app.getMessageBus.syncPublisher(Notifications.TOPIC).register(Scala_Group, NotificationDisplayType.STICKY_BALLOON)
}
NotificationGroup.balloonGroup(Scala_Group)
val notification = new Notification(Scala_Group, "Incompatible plugin detected", message, NotificationType.ERROR, new NotificationListener {
def hyperlinkUpdate(notification: Notification, event: HyperlinkEvent) {
notification.expire()
val description = event.getDescription
description match {
case "Yes" =>
PluginManagerCore.disablePlugin(plugin.getPluginId.getIdString)
PluginManagerConfigurable.showRestartDialog()
case "No" => //do nothing it seems all is ok for the user
case _ => //do nothing it seems all is ok for the user
}
}
})
Notifications.Bus.notify(notification)
}
}
}
Version.parse(extension.getSinceVersion) match {
case Some(sinceVersion) =>
if (sinceVersion != version && version < sinceVersion) {
wrongVersion()
}
case _ =>
}
Version.parse(extension.getUntilVersion) match {
case Some(untilVersion) =>
if (untilVersion != version && untilVersion < version) {
wrongVersion()
}
case _ =>
}
}
case None =>
}
ScalaPluginUpdater.askUpdatePluginBranch()
}
SwingUtilities.invokeLater(new Runnable {
def run() {
ScalaPluginUpdater.upgradeRepo()
checkVersion()
ScalaPluginUpdater.postCheckIdeaCompatibility()
ScalaPluginUpdater.setupReporter()
}
})
}
def disposeComponent() {}
}
|
whorbowicz/intellij-scala
|
src/org/jetbrains/plugins/scala/components/PluginVersionVerifier.scala
|
Scala
|
apache-2.0
| 6,224
|
package pl.project13.scala.akka.raft
import akka.actor.ActorRef
sealed trait ClusterConfiguration {
def members: Set[ActorRef]
def sequenceNumber: Long
def isOlderThan(that: ClusterConfiguration) = this.sequenceNumber <= that.sequenceNumber
def isNewerThan(that: ClusterConfiguration) = this.sequenceNumber > that.sequenceNumber
def isTransitioning: Boolean
def transitionTo(newConfiguration: ClusterConfiguration): ClusterConfiguration
/**
* Basically "drop" ''old configuration'' and keep using only the new one.
*
* {{{
* StableConfiguration => StableConfiguration
* JointConsensusConfuguration(old, new) => StableConfiguration(new)
* }}}
*/
def transitionToStable: ClusterConfiguration
/** When in the middle of a configuration migration we may need to know if we're part of the new config (in order to step down if not) */
def isPartOfNewConfiguration(member: ActorRef): Boolean
}
object ClusterConfiguration {
def apply(members: Iterable[ActorRef]): ClusterConfiguration =
StableClusterConfiguration(0, members.toSet)
def apply(members: ActorRef*): ClusterConfiguration =
StableClusterConfiguration(0, members.toSet)
}
/**
* Used for times when the cluster is NOT undergoing membership changes.
* Use `transitionTo` in order to enter a [[pl.project13.scala.akka.raft.JointConsensusClusterConfiguration]] state.
*/
case class StableClusterConfiguration(sequenceNumber: Long, members: Set[ActorRef]) extends ClusterConfiguration {
val isTransitioning = false
/**
* Implementation detail: The resulting configurations `sequenceNumber` will be equal to the current one.
*/
def transitionTo(newConfiguration: ClusterConfiguration): JointConsensusClusterConfiguration =
JointConsensusClusterConfiguration(sequenceNumber, members, newConfiguration.members)
def isPartOfNewConfiguration(ref: ActorRef) = members contains ref
def transitionToStable = this
override def toString = s"StableRaftConfiguration(${members.map(_.path.elements.last)})"
}
/**
* Configuration during transition to new configuration consists of both old / new member sets.
* As the configuration is applied, the old configuration may be discarded.
*
* During the transition phase:
*
* - Log entries are replicated to all members in both configurations
* - Any member from either configuration may serve as Leader
* - Agreement (for elections and entry commitment) requires majoritis from ''both'' old and new configurations
*/
case class JointConsensusClusterConfiguration(sequenceNumber: Long, oldMembers: Set[ActorRef], newMembers: Set[ActorRef]) extends ClusterConfiguration {
/** Members from both configurations participate in the joint consensus phase */
val members = oldMembers union newMembers
val isTransitioning = true
/**
* Implementation detail: The resulting stable configurations `sequenceNumber` will be incremented from the current one, to mark the following "stable phase".
*/
def transitionTo(newConfiguration: ClusterConfiguration) =
throw new IllegalStateException(s"Cannot start another configuration transition, already in progress! " +
s"Migrating from [${oldMembers.size}] $oldMembers to [${newMembers.size}] $newMembers")
/** When in the middle of a configuration migration we may need to know if we're part of the new config (in order to step down if not) */
def isPartOfNewConfiguration(member: ActorRef): Boolean = newMembers contains member
def transitionToStable = StableClusterConfiguration(sequenceNumber + 1, newMembers)
override def toString =
s"JointConsensusRaftConfiguration(old:${oldMembers.map(_.path.elements.last)}, new:${newMembers.map(_.path.elements.last)})"
}
|
ktoso/akka-raft
|
src/main/scala/pl/project13/scala/akka/raft/ClusterConfiguration.scala
|
Scala
|
apache-2.0
| 3,750
|
package com.twitter.finagle.serverset2
import com.twitter.common.io.JsonCodec
import com.twitter.common.zookeeper.ServerSets
import com.twitter.conversions.time._
import com.twitter.finagle.serverset2.ServiceDiscoverer.ClientHealth
import com.twitter.finagle.serverset2.ZkOp.{GetData, GetChildrenWatch, ExistsWatch}
import com.twitter.finagle.stats.{NullStatsReceiver, StatsReceiver}
import com.twitter.finagle.serverset2.client._
import com.twitter.finagle.util.DefaultTimer
import com.twitter.io.Buf
import com.twitter.io.Buf.ByteArray
import com.twitter.thrift
import com.twitter.thrift.ServiceInstance
import com.twitter.util._
import org.junit.runner.RunWith
import org.mockito.Mockito.when
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.junit.JUnitRunner
import org.scalatest.FunSuite
import org.scalatest.mock.MockitoSugar
import java.util.concurrent.atomic.AtomicReference
@RunWith(classOf[JUnitRunner])
class ServiceDiscovererTest extends FunSuite with MockitoSugar with Eventually with IntegrationPatience{
class ServiceDiscovererWithExposedCache(
varZkSession: Var[ZkSession],
statsReceiver: StatsReceiver,
timer: Timer = DefaultTimer.twitter
) extends ServiceDiscoverer(varZkSession, statsReceiver, ForeverEpoch, timer) {
val cache = new ZkEntryCache("/foo/bar", NullStatsReceiver)
cache.setSession(varZkSession.sample)
override val entriesOf = Memoize { path: String =>
entitiesOf(path, cache, NullStatsReceiver.stat("meh"), ServiceDiscoverer.EndpointGlob)
}
}
def ep(port: Int) = Endpoint(Array(null), "localhost", port, Int.MinValue, Endpoint.Status.Alive, port.toString)
val ForeverEpoch = Epoch(Duration.Top)(new MockTimer)
val retryStream = RetryStream()
def createEntry(id: Int): Buf = {
val jsonCodec = JsonCodec.create(classOf[ServiceInstance])
val serviceInstance = new ServiceInstance()
serviceInstance.setShard(1)
serviceInstance.setStatus(thrift.Status.ALIVE)
serviceInstance.setServiceEndpoint(new thrift.Endpoint(s"$id.0.0.12", 32123))
ByteArray(ServerSets.serializeServiceInstance(serviceInstance, jsonCodec))
}
test("ServiceDiscoverer.zipWithWeights") {
val port1 = 80 // not bound
val port2 = 53 // ditto
val ents = Seq[Entry](ep(port1), ep(port2), ep(3), ep(4))
val v1 = Vector(Seq(
Descriptor(Selector.Host("localhost", port1), 1.1, 1),
Descriptor(Selector.Host("localhost", port2), 1.4, 1),
Descriptor(Selector.Member("3"), 3.1, 1)))
val v2 = Vector(Seq(Descriptor(Selector.Member(port2.toString), 2.0, 1)))
val vecs = Seq(v1, v2)
assert(ServiceDiscoverer.zipWithWeights(ents, vecs.toSet).toSet == Set(
ep(port1) -> 1.1,
ep(port2) -> 2.8,
ep(3) -> 3.1,
ep(4) -> 1.0))
}
test("New observation do not cause reads; entries are cached") {
implicit val timer = new MockTimer
val watchedZk = Watched(new OpqueueZkReader(), Var(WatchState.Pending))
val sd = new ServiceDiscoverer(Var.value(new ZkSession(retryStream, watchedZk, NullStatsReceiver)), NullStatsReceiver, ForeverEpoch, timer)
val f1 = sd("/foo/bar").states.filter(_ != Activity.Pending).toFuture()
val ew@ExistsWatch("/foo/bar") = watchedZk.value.opq(0)
val ewwatchv = Var[WatchState](WatchState.Pending)
ew.res() = Return(Watched(Some(Data.Stat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)), ewwatchv))
val gw@GetChildrenWatch("/foo/bar") = watchedZk.value.opq(1)
gw.res() = Return(Watched(Node.Children(Seq("member_1"), null), Var.value(WatchState.Pending)))
assert(!f1.isDefined)
val gd@GetData("/foo/bar/member_1") = watchedZk.value.opq(2)
gd.res() = Return(Node.Data(None, null))
assert(f1.isDefined)
val f2 = sd("/foo/bar").states.filter(_ != Activity.Pending).toFuture()
assert(f2.isDefined)
}
test("Removed entries are removed from cache") {
implicit val timer = new MockTimer
val watchedZk = Watched(new OpqueueZkReader(), Var(WatchState.Pending))
val sd = new ServiceDiscovererWithExposedCache(Var.value(new ZkSession(retryStream,
watchedZk, NullStatsReceiver)), NullStatsReceiver)
val f1 = sd("/foo/bar").states.filter(_ != Activity.Pending).toFuture()
val cache = sd.cache
val ew@ExistsWatch("/foo/bar") = watchedZk.value.opq(0)
val ewwatchv = Var[WatchState](WatchState.Pending)
ew.res() = Return(Watched(Some(Data.Stat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)), ewwatchv))
assert(cache.keys == Set.empty)
val gw@GetChildrenWatch("/foo/bar") = watchedZk.value.opq(1)
gw.res() = Return(Watched(Node.Children(Seq("member_1"), null), Var.value(new WatchState.Determined(NodeEvent.Created))))
val gd@GetData("/foo/bar/member_1") = watchedZk.value.opq(2)
gd.res() = Return(Node.Data(None, null))
assert(cache.keys == Set("member_1"))
val gw2@GetChildrenWatch("/foo/bar") = watchedZk.value.opq(3)
gw2.res() = Return(Watched(Node.Children(Seq.empty, null), Var.value(new WatchState.Determined(NodeEvent.Created))))
assert(cache.keys == Set.empty)
val gw3@GetChildrenWatch("/foo/bar") = watchedZk.value.opq(4)
gw3.res() = Return(Watched(Node.Children(Seq("member_2"), null), Var.value(new WatchState.Determined(NodeEvent.Created))))
val gd2@GetData("/foo/bar/member_2") = watchedZk.value.opq(5)
gd2.res() = Return(Node.Data(None, null))
assert(cache.keys == Set("member_2"))
val gw4@GetChildrenWatch("/foo/bar") = watchedZk.value.opq(6)
gw4.res() = Return(Watched(Node.Children(Seq("member_3", "member_4"), null), Var.value(new WatchState.Determined(NodeEvent.Created))))
val gd3@GetData("/foo/bar/member_3") = watchedZk.value.opq(7)
gd3.res() = Return(Node.Data(None, null))
val gd4@GetData("/foo/bar/member_4") = watchedZk.value.opq(8)
gd4.res() = Return(Node.Data(None, null))
assert(cache.keys == Set("member_3", "member_4"))
}
test("If all reads fail the serverset is in Failed state") {
implicit val timer = new MockTimer
val watchedZk = Watched(new OpqueueZkReader(), Var(WatchState.Pending))
val sd = new ServiceDiscovererWithExposedCache(Var.value(new ZkSession(retryStream,
watchedZk, NullStatsReceiver)), NullStatsReceiver, timer)
val f1 = sd("/foo/bar").states.filter(_ != Activity.Pending).toFuture()
val cache = sd.cache
val ew@ExistsWatch("/foo/bar") = watchedZk.value.opq(0)
val ewwatchv = Var[WatchState](WatchState.Pending)
ew.res() = Return(Watched(Some(Data.Stat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)), ewwatchv))
val gw@GetChildrenWatch("/foo/bar") = watchedZk.value.opq(1)
gw.res() = Return(Watched(Node.Children(Seq("member_1", "member_2"), null), Var.value(new WatchState.Determined(NodeEvent.Created))))
val gd@GetData("/foo/bar/member_1") = watchedZk.value.opq(2)
gd.res() = Throw(new Exception)
val gd2@GetData("/foo/bar/member_2") = watchedZk.value.opq(3)
gd2.res() = Throw(new Exception)
Await.result(f1, 1.second) match {
case Activity.Failed(ServiceDiscoverer.EntryLookupFailureException) => // great!
case other => fail(s"Expected entry lookup exception. Received $other")
}
}
test("Partial failures are successful and retried") {
Time.withCurrentTimeFrozen { timeControl =>
implicit val timer = new MockTimer
val watchedZk = Watched(new OpqueueZkReader(), Var(WatchState.Pending))
val sd = new ServiceDiscovererWithExposedCache(Var.value(new ZkSession(retryStream,
watchedZk, NullStatsReceiver)), NullStatsReceiver, timer)
val currentValue = new AtomicReference[Activity.State[Seq[(Entry, Double)]]]
sd("/foo/bar").states.filter(_ != Activity.Pending).register(Witness(currentValue))
val cache = sd.cache
val ew@ExistsWatch("/foo/bar") = watchedZk.value.opq(0)
val ewwatchv = Var[WatchState](WatchState.Pending)
ew.res() = Return(Watched(Some(Data.Stat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)), ewwatchv))
val gw@GetChildrenWatch("/foo/bar") = watchedZk.value.opq(1)
gw.res() = Return(Watched(Node.Children(Seq("member_1", "member_2"), null), Var.value(WatchState.Pending)))
val gd@GetData("/foo/bar/member_1") = watchedZk.value.opq(2)
gd.res() = Throw(new Exception)
val gd2@GetData("/foo/bar/member_2") = watchedZk.value.opq(3)
gd2.res() = Return(Node.Data(Some(createEntry(1)), null))
// Should succeed with only 1 resolved value
eventually {
currentValue.get match {
case Activity.Ok(seq) => assert(seq.size == 1) // member_2 has good data
case other => fail(s"Expected entry lookup exception. Received $other")
}
}
// member_1 will be requeried for eventually
eventually {
timeControl.advance(2.minutes)
timer.tick()
val gd3@GetData("/foo/bar/member_1") = watchedZk.value.opq(4)
gd3.res() = Return(Node.Data(Some(createEntry(2)), null))
}
// Then we should see 2 values in the serverset
currentValue.get match {
case Activity.Ok(seq) => assert(seq.size == 2) // both have good values now
case other => fail(s"Expected entry lookup exception. Received $other")
}
}
}
test("Consecutive observations do not cause reads; entries are cached") {
implicit val timer = new MockTimer
val watchedZk = Watched(new OpqueueZkReader(), Var(WatchState.Pending))
val sd = new ServiceDiscoverer(Var.value(new ZkSession(retryStream,watchedZk, NullStatsReceiver)), NullStatsReceiver, ForeverEpoch, timer)
val f1 = sd("/foo/bar").states.filter(_ != Activity.Pending).toFuture()
val f2 = sd("/foo/bar").states.filter(_ != Activity.Pending).toFuture()
val ew@ExistsWatch("/foo/bar") = watchedZk.value.opq(0)
val ewwatchv = Var[WatchState](WatchState.Pending)
ew.res() = Return(Watched(Some(Data.Stat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)), ewwatchv))
val gw@GetChildrenWatch("/foo/bar") = watchedZk.value.opq(1)
gw.res() = Return(Watched(Node.Children(Seq("member_1"), null), Var.value(WatchState.Pending)))
assert(!f1.isDefined)
assert(!f2.isDefined)
val gd@GetData("/foo/bar/member_1") = watchedZk.value.opq(2)
gd.res() = Return(Node.Data(None, null))
// ensure that we are hitting the cache: even though we called
// GetData only once, the two observations are fulfilled.
assert(f1.isDefined)
assert(f2.isDefined)
}
test("New sessions are used") {
implicit val timer = new MockTimer
val fakeWatchedZk = Watched(new OpqueueZkReader(), Var(WatchState.Pending))
val watchedZk = Watched(new OpqueueZkReader(), Var(WatchState.Pending))
val watchedZkVar = new ReadWriteVar(new ZkSession(retryStream, fakeWatchedZk, NullStatsReceiver))
val sd = new ServiceDiscoverer(watchedZkVar, NullStatsReceiver, ForeverEpoch, timer)
val f1 = sd("/foo/bar").states.filter(_ != Activity.Pending).toFuture()
val f2 = sd("/foo/bar").states.filter(_ != Activity.Pending).toFuture()
watchedZkVar.update(new ZkSession(retryStream, watchedZk, NullStatsReceiver))
val ew@ExistsWatch("/foo/bar") = watchedZk.value.opq(0)
val ewwatchv = Var[WatchState](WatchState.Pending)
ew.res() = Return(Watched(Some(Data.Stat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)), ewwatchv))
val gw@GetChildrenWatch("/foo/bar") = watchedZk.value.opq(1)
gw.res() = Return(Watched(Node.Children(Seq("member_1"), null), Var.value(WatchState.Pending)))
assert(!f1.isDefined)
assert(!f2.isDefined)
val gd@GetData("/foo/bar/member_1") = watchedZk.value.opq(2)
gd.res() = Return(Node.Data(None, null))
// ensure that we are hitting the cache: even though we called
// GetData only once, the two observations are fulfilled.
assert(f1.isDefined)
assert(f2.isDefined)
}
def newZkSession(): (ZkSession, Witness[WatchState]) = {
val mockZkSession = mock[ZkSession]
val watchStateEvent = Event[WatchState]()
val watchStateVar = Var[WatchState](WatchState.Pending, watchStateEvent)
when(mockZkSession.state).thenReturn(watchStateVar)
(mockZkSession, watchStateEvent)
}
test("ServiceDiscoverer stable health is reported correctly") {
Time.withCurrentTimeFrozen { timeControl =>
val zkSession = Event[ZkSession]()
val varZkSession = Var[ZkSession](ZkSession.nil, zkSession)
val period = 1.second
implicit val timer = new MockTimer
val sd = new ServiceDiscoverer(varZkSession, NullStatsReceiver, Epoch(period)(timer), timer)
val stabilizedHealth = new AtomicReference[ClientHealth](ClientHealth.Healthy)
sd.health.changes.register(Witness {
stabilizedHealth
})
// should start as healthy until updated otherwise
assert(stabilizedHealth.get == ClientHealth.Healthy)
val (session1, state1) = newZkSession()
zkSession.notify(session1)
assert(stabilizedHealth.get == ClientHealth.Healthy)
// make unhealthy without turning the clock
state1.notify(WatchState.SessionState(SessionState.Expired))
assert(stabilizedHealth.get == ClientHealth.Healthy)
timer.tick()
//advance past the health period to make the stabilized health unhealthy
timeControl.advance(period)
timer.tick()
assert(stabilizedHealth.get == ClientHealth.Unhealthy)
// flip to a new session
val (session2, state2) = newZkSession()
state2.notify(WatchState.SessionState(SessionState.SyncConnected))
zkSession.notify(session2)
assert(stabilizedHealth.get == ClientHealth.Healthy)
}
}
test("ServiceDiscoverer rawHealth is reported correctly") {
val zkSession = Event[ZkSession]()
val varZkSession = Var[ZkSession](ZkSession.nil, zkSession)
val sd = new ServiceDiscoverer(varZkSession, NullStatsReceiver, ForeverEpoch, DefaultTimer.twitter)
val health = new AtomicReference[ClientHealth](ClientHealth.Healthy)
sd.rawHealth.changes.register(Witness {
health
})
// should start as healthy until updated otherwise
assert(health.get == ClientHealth.Healthy)
val (session1, state1) = newZkSession()
zkSession.notify(session1)
assert(health.get == ClientHealth.Healthy)
// make unhealthy
state1.notify(WatchState.SessionState(SessionState.Expired))
assert(health.get == ClientHealth.Unhealthy)
// flip to a new session
val (session2, state2) = newZkSession()
state2.notify(WatchState.SessionState(SessionState.SyncConnected))
zkSession.notify(session2)
assert(health.get == ClientHealth.Healthy)
// pulse the bad session (which is NOT the current session) and ensure we stay healthy
state1.notify(WatchState.SessionState(SessionState.Disconnected))
assert(health.get == ClientHealth.Healthy)
// pulse the current session with an event that should be ignored
state2.notify(WatchState.Pending)
assert(health.get == ClientHealth.Healthy)
}
}
|
liamstewart/finagle
|
finagle-serversets/src/test/scala/com/twitter/finagle/serverset2/ServiceDiscovererTest.scala
|
Scala
|
apache-2.0
| 15,002
|
package com.twitter.logging
import com.twitter.concurrent.{NamedPoolThreadFactory, AsyncQueue}
import com.twitter.util._
import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicBoolean
import java.util.{logging => javalog}
object QueueingHandler {
private[this] val executor = Executors.newCachedThreadPool(
new NamedPoolThreadFactory("QueueingHandlerPool", makeDaemons = true)
)
private val DefaultFuturePool = new ExecutorServiceFuturePool(executor)
private object QueueClosedException extends RuntimeException
/**
* Generates a HandlerFactory that returns a QueueingHandler
*
* @param handler Wrapped handler that publishing is proxied to.
*
* @param maxQueueSize Maximum queue size. Records are sent to
* [[QueueingHandler.onOverflow]] when it is at capacity.
*/
// The type parameter exists to ease java interop
def apply[H <: Handler](
handler: () => H,
maxQueueSize: Int = Int.MaxValue,
inferClassNames: Boolean = false
): () => QueueingHandler =
() => new QueueingHandler(handler(), maxQueueSize, inferClassNames)
def apply(handler: HandlerFactory, maxQueueSize: Int): () => QueueingHandler =
apply(handler, maxQueueSize, false)
// java interop
def apply[H <: Handler](handler: () => H): () => QueueingHandler =
apply(handler, Int.MaxValue)
private case class RecordWithLocals(record: javalog.LogRecord, locals: Local.Context)
}
/**
* Proxy handler that queues log records and publishes them in another thread to
* a nested handler. Useful for when a handler may block.
*
* @param handler Wrapped handler that publishing is proxied to.
*
* @param maxQueueSize Maximum queue size. Records are sent to
* [[onOverflow]] when it is at capacity.
*
* @param inferClassNames [[com.twitter.logging.LogRecord]] and
* `java.util.logging.LogRecord` both attempt to infer the class and
* method name of the caller, but the inference needs the stack trace at
* the time that the record is logged. QueueingHandler breaks the
* inference because the log record is rendered out of band, so the stack
* trace is gone. Setting this option to true will cause the
* introspection to happen before the log record is queued, which means
* that the class name and method name will be available when the log
* record is passed to the underlying handler. This defaults to false
* because it loses some of the latency improvements of deferring
* logging by getting the stack trace synchronously.
*/
class QueueingHandler(handler: Handler, val maxQueueSize: Int, inferClassNames: Boolean)
extends ProxyHandler(handler) {
import QueueingHandler._
def this(handler: Handler, maxQueueSize: Int) =
this(handler, maxQueueSize, false)
def this(handler: Handler) =
this(handler, Int.MaxValue)
protected val dropLogNode: String = ""
protected val log: Logger = Logger(dropLogNode)
private[this] val queue = new AsyncQueue[RecordWithLocals](maxQueueSize)
private[this] val closed = new AtomicBoolean(false)
override def publish(record: javalog.LogRecord): Unit = {
// Calling getSourceClassName has the side-effect of inspecting the
// stack and filling in the class and method names if they have not
// already been set. See the description of inferClassNames for why
// we might do this here.
if (inferClassNames) record.getSourceClassName
DefaultFuturePool {
// We run this in a FuturePool to avoid satisfying pollers
// (which flush the record) inline.
if (!queue.offer(RecordWithLocals(record, Local.save())))
onOverflow(record)
}
}
private[this] def doPublish(record: RecordWithLocals): Unit = Local.let(record.locals) {
super.publish(record.record)
}
private[this] def loop(): Future[Unit] = {
queue.poll().map(doPublish).respond {
case Return(_) => loop()
case Throw(QueueClosedException) => // indicates we should shutdown
case Throw(e) =>
// `doPublish` can throw, and we want to keep on publishing...
e.printStackTrace()
loop()
}
}
// begin polling for log records
DefaultFuturePool {
loop()
}
override def close(): Unit = {
if (closed.compareAndSet(false, true)) {
queue.fail(QueueClosedException, discard = true)
// Propagate close
super.close()
}
}
override def flush(): Unit = {
// Publish all records in queue
queue.drain().map { records => records.foreach(doPublish) }
// Propagate flush
super.flush()
}
/**
* Called when record dropped. Default is to log to console.
*/
protected def onOverflow(record: javalog.LogRecord): Unit = {
Console.err.println(
String.format("[%s] log queue overflow - record dropped", Time.now.toString)
)
}
}
|
twitter/util
|
util-logging/src/main/scala/com/twitter/logging/QueueingHandler.scala
|
Scala
|
apache-2.0
| 4,802
|
package shop.infrastructure
import slick.driver.PostgresDriver.simple._
// import org.joda.time.DateTime
// import com.github.tototoshi.slick.PostgresJodaSupport._
import shop.model._
import scala.slick.jdbc.meta.MTable
class ShoppingItemSchema(tag: Tag) extends Table[(Long,String,String,Long)](tag,"shopping_item"){
def id = column[Long]("id",O.PrimaryKey,O.AutoInc)
def name = column[String]("itemname")
def description = column[String]("description")
def listId = column[Long]("list_id")
def * = (id, name, description, listId)
}
class ShoppingItemRepository(implicit val registry: ComponentRegistry) extends Repository {
def findItems(listId: Long): Seq[ShoppingItem] = {
database.withSession{ implicit session =>
( for {
item <- shoppingItems if item.listId === listId
} yield (item.id,item.name) ).list.map{
case (itemId,itemName) => new ShoppingItem(itemId,itemName)
}
}
}
def findItem(listId: Long, itemId: Long): Option[ShoppingItem] = {
database.withSession{ implicit session =>
( for {
item <- shoppingItems
if item.listId === listId
if item.id === itemId
} yield (item.id,item.name) ).firstOption.map{
case (itemId,itemName) => new ShoppingItem(itemId,itemName)
}
}
}
def save(list: ShoppingList, item: ShoppingItem): Option[Long] = {
database.withSession{ implicit session =>
list.id.map { listId =>
(shoppingItems returning shoppingItems.map(_.id)) += (-1,item.name,item.description.getOrElse(""),listId)
}
}
}
def update(item: ShoppingItem) = {
database.withSession{ implicit session =>
item.id.map{ id =>
val nameQuery = for {
dbItem <- shoppingItems if dbItem.id === id
} yield dbItem.name
nameQuery.update(item.name)
item
}
}
}
}
|
flurdy/shoppinglist
|
shopservice/src/main/scala/infrastructure/ShoppingItemRepository.scala
|
Scala
|
mit
| 1,996
|
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt.internal.util
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
abstract class UnitSpec extends AnyFlatSpec with Matchers
|
sbt/sbt
|
internal/util-complete/src/test/scala/UnitSpec.scala
|
Scala
|
apache-2.0
| 320
|
package org.akkapulline
import scala.reflect.ClassTag
import akka.actor._
import scala.collection.mutable
import org.akkapulline.Messages._
import scala.util.Try
class PullLineAdapter[L: ClassTag, R: ClassTag](workerLocation: String,
leftLocation: String, rightLocation: String, bufferSize: Int)
extends Actor
with ActorLogging {
val worker = context.actorSelection(workerLocation)
val left = context.actorSelection(leftLocation)
val right = context.actorSelection(rightLocation)
val rightBuffer = mutable.Queue.empty[R]
override def postStop(): Unit = {
log.info("PullLineWorker stopped")
left ! PoisonPill
}
def empty: Receive = {
case Pull =>
log.debug("Pull requested from {}. State: empty", sender)
left ! Pull
case PullDone(result: Option[L]) => worker ! Work(result)
case WorkDone(result: Try[R]) =>
result.foreach { _result =>
rightBuffer.enqueue(_result)
right ! WorkIsReady
context.become(ready)
}
left ! Pull
case WorkIsReady =>
left ! Pull
case NoMoreData => context.become(noMoreData)
}
def ready: Receive = {
case Pull =>
log.debug("Pull requested from {}. State: ready", sender)
sender ! PullDone(Some(rightBuffer.dequeue()))
left ! Pull
if (rightBuffer.isEmpty)
context.become(empty)
case PullDone(result: Option[L]) => worker ! Work(result)
case WorkDone(result: Try[R]) =>
result.foreach(rightBuffer.enqueue(_))
if (rightBuffer.size < bufferSize)
left ! Pull
case WorkIsReady => left ! Pull
case NoMoreData => context.become(noMoreData)
}
def noMoreData: Receive = {
case Pull =>
log.debug("Pull requested from {}. State: noMoreData", sender)
if (rightBuffer.isEmpty) {
sender ! NoMoreData
worker ! PoisonPill
} else {
sender ! PullDone(Some(rightBuffer.dequeue()))
}
case WorkDone(result: Try[R]) => result.foreach(rightBuffer.enqueue(_))
case NoMoreData =>
}
def receive = empty
}
|
fehmicansaglam/akka-pulline
|
src/main/scala/org/akkapulline/PullLineAdapter.scala
|
Scala
|
unlicense
| 2,120
|
package com.github.tototoshi.play2.auth.social.core
trait OAuthAuthenticator {
type AccessToken
}
|
tototoshi/play2-auth
|
social/src/main/scala/com/github/tototoshi/play2/auth/social/core/OAuthAuthenticator.scala
|
Scala
|
apache-2.0
| 103
|
package monocle
import eu.timepit.refined._
import eu.timepit.refined.api.{Refined, Validate}
import eu.timepit.refined.char.{LowerCase, UpperCase}
import eu.timepit.refined.string.{EndsWith, StartsWith}
import eu.timepit.refined.numeric.Interval
package object refined {
type ZeroTo[T] = Int Refined Interval.Closed[W.`0`.T, T]
type ByteBits = ZeroTo[W.`7`.T]
type CharBits = ZeroTo[W.`15`.T]
type IntBits = ZeroTo[W.`31`.T]
type LongBits = ZeroTo[W.`63`.T]
type LowerCaseChar = Char Refined LowerCase
type UpperCaseChar = Char Refined UpperCase
type StartsWithString[T <: String] = String Refined StartsWith[T]
type EndsWithString[T <: String] = String Refined EndsWith[T]
private[refined] def refinedPrism[T, P](implicit v: Validate[T, P]): Prism[T, T Refined P] =
Prism.partial[T, T Refined P] {
case t if v.isValid(t) => Refined.unsafeApply(t)
} {
_.value
}
}
|
aoiroaoino/Monocle
|
refined/src/main/scala/monocle/refined/package.scala
|
Scala
|
mit
| 918
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.api.python
import java.io.OutputStream
import java.nio.{ByteBuffer, ByteOrder}
import java.nio.charset.StandardCharsets
import java.util.{ArrayList => JArrayList, List => JList, Map => JMap}
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
import net.razorvine.pickle._
import org.apache.spark.api.java.{JavaRDD, JavaSparkContext}
import org.apache.spark.api.python.SerDeUtil
import org.apache.spark.mllib.classification._
import org.apache.spark.mllib.clustering._
import org.apache.spark.mllib.evaluation.RankingMetrics
import org.apache.spark.mllib.feature._
import org.apache.spark.mllib.fpm.{FPGrowth, FPGrowthModel, PrefixSpan}
import org.apache.spark.mllib.linalg._
import org.apache.spark.mllib.linalg.distributed._
import org.apache.spark.mllib.optimization._
import org.apache.spark.mllib.random.{RandomRDDs => RG}
import org.apache.spark.mllib.recommendation._
import org.apache.spark.mllib.regression._
import org.apache.spark.mllib.stat.{KernelDensity, MultivariateStatisticalSummary, Statistics}
import org.apache.spark.mllib.stat.correlation.CorrelationNames
import org.apache.spark.mllib.stat.distribution.MultivariateGaussian
import org.apache.spark.mllib.stat.test.{ChiSqTestResult, KolmogorovSmirnovTestResult}
import org.apache.spark.mllib.tree.{DecisionTree, GradientBoostedTrees, RandomForest}
import org.apache.spark.mllib.tree.configuration.{Algo, BoostingStrategy, Strategy}
import org.apache.spark.mllib.tree.impurity._
import org.apache.spark.mllib.tree.loss.Losses
import org.apache.spark.mllib.tree.model.{DecisionTreeModel, GradientBoostedTreesModel,
RandomForestModel}
import org.apache.spark.mllib.util.{LinearDataGenerator, MLUtils}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.types.LongType
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.Utils
/**
* The Java stubs necessary for the Python mllib bindings. It is called by Py4J on the Python side.
*/
private[python] class PythonMLLibAPI extends Serializable {
/**
* Loads and serializes labeled points saved with `RDD#saveAsTextFile`.
* @param jsc Java SparkContext
* @param path file or directory path in any Hadoop-supported file system URI
* @param minPartitions min number of partitions
* @return serialized labeled points stored in a JavaRDD of byte array
*/
def loadLabeledPoints(
jsc: JavaSparkContext,
path: String,
minPartitions: Int): JavaRDD[LabeledPoint] =
MLUtils.loadLabeledPoints(jsc.sc, path, minPartitions)
/**
* Loads and serializes vectors saved with `RDD#saveAsTextFile`.
* @param jsc Java SparkContext
* @param path file or directory path in any Hadoop-supported file system URI
* @return serialized vectors in a RDD
*/
def loadVectors(jsc: JavaSparkContext, path: String): RDD[Vector] =
MLUtils.loadVectors(jsc.sc, path)
private def trainRegressionModel(
learner: GeneralizedLinearAlgorithm[_ <: GeneralizedLinearModel],
data: JavaRDD[LabeledPoint],
initialWeights: Vector): JList[Object] = {
try {
val model = learner.run(data.rdd.persist(StorageLevel.MEMORY_AND_DISK), initialWeights)
if (model.isInstanceOf[LogisticRegressionModel]) {
val lrModel = model.asInstanceOf[LogisticRegressionModel]
List(lrModel.weights, lrModel.intercept, lrModel.numFeatures, lrModel.numClasses)
.map(_.asInstanceOf[Object]).asJava
} else {
List(model.weights, model.intercept).map(_.asInstanceOf[Object]).asJava
}
} finally {
data.rdd.unpersist()
}
}
/**
* Return the Updater from string
*/
def getUpdaterFromString(regType: String): Updater = {
if (regType == "l2") {
new SquaredL2Updater
} else if (regType == "l1") {
new L1Updater
} else if (regType == null || regType == "none") {
new SimpleUpdater
} else {
throw new IllegalArgumentException("Invalid value for 'regType' parameter."
+ " Can only be initialized using the following string values: ['l1', 'l2', None].")
}
}
/**
* Java stub for Python mllib BisectingKMeans.run()
*/
def trainBisectingKMeans(
data: JavaRDD[Vector],
k: Int,
maxIterations: Int,
minDivisibleClusterSize: Double,
seed: java.lang.Long): BisectingKMeansModel = {
val kmeans = new BisectingKMeans()
.setK(k)
.setMaxIterations(maxIterations)
.setMinDivisibleClusterSize(minDivisibleClusterSize)
if (seed != null) kmeans.setSeed(seed)
kmeans.run(data)
}
/**
* Java stub for Python mllib LinearRegressionWithSGD.train()
*/
def trainLinearRegressionModelWithSGD(
data: JavaRDD[LabeledPoint],
numIterations: Int,
stepSize: Double,
miniBatchFraction: Double,
initialWeights: Vector,
regParam: Double,
regType: String,
intercept: Boolean,
validateData: Boolean,
convergenceTol: Double): JList[Object] = {
val lrAlg = new LinearRegressionWithSGD(1.0, 100, 0.0, 1.0)
lrAlg.setIntercept(intercept)
.setValidateData(validateData)
lrAlg.optimizer
.setNumIterations(numIterations)
.setRegParam(regParam)
.setStepSize(stepSize)
.setMiniBatchFraction(miniBatchFraction)
.setConvergenceTol(convergenceTol)
lrAlg.optimizer.setUpdater(getUpdaterFromString(regType))
trainRegressionModel(
lrAlg,
data,
initialWeights)
}
/**
* Java stub for Python mllib LassoWithSGD.train()
*/
def trainLassoModelWithSGD(
data: JavaRDD[LabeledPoint],
numIterations: Int,
stepSize: Double,
regParam: Double,
miniBatchFraction: Double,
initialWeights: Vector,
intercept: Boolean,
validateData: Boolean,
convergenceTol: Double): JList[Object] = {
val lassoAlg = new LassoWithSGD(1.0, 100, 0.01, 1.0)
lassoAlg.setIntercept(intercept)
.setValidateData(validateData)
lassoAlg.optimizer
.setNumIterations(numIterations)
.setRegParam(regParam)
.setStepSize(stepSize)
.setMiniBatchFraction(miniBatchFraction)
.setConvergenceTol(convergenceTol)
trainRegressionModel(
lassoAlg,
data,
initialWeights)
}
/**
* Java stub for Python mllib RidgeRegressionWithSGD.train()
*/
def trainRidgeModelWithSGD(
data: JavaRDD[LabeledPoint],
numIterations: Int,
stepSize: Double,
regParam: Double,
miniBatchFraction: Double,
initialWeights: Vector,
intercept: Boolean,
validateData: Boolean,
convergenceTol: Double): JList[Object] = {
val ridgeAlg = new RidgeRegressionWithSGD(1.0, 100, 0.01, 1.0)
ridgeAlg.setIntercept(intercept)
.setValidateData(validateData)
ridgeAlg.optimizer
.setNumIterations(numIterations)
.setRegParam(regParam)
.setStepSize(stepSize)
.setMiniBatchFraction(miniBatchFraction)
.setConvergenceTol(convergenceTol)
trainRegressionModel(
ridgeAlg,
data,
initialWeights)
}
/**
* Java stub for Python mllib SVMWithSGD.train()
*/
def trainSVMModelWithSGD(
data: JavaRDD[LabeledPoint],
numIterations: Int,
stepSize: Double,
regParam: Double,
miniBatchFraction: Double,
initialWeights: Vector,
regType: String,
intercept: Boolean,
validateData: Boolean,
convergenceTol: Double): JList[Object] = {
val SVMAlg = new SVMWithSGD()
SVMAlg.setIntercept(intercept)
.setValidateData(validateData)
SVMAlg.optimizer
.setNumIterations(numIterations)
.setRegParam(regParam)
.setStepSize(stepSize)
.setMiniBatchFraction(miniBatchFraction)
.setConvergenceTol(convergenceTol)
SVMAlg.optimizer.setUpdater(getUpdaterFromString(regType))
trainRegressionModel(
SVMAlg,
data,
initialWeights)
}
/**
* Java stub for Python mllib LogisticRegressionWithSGD.train()
*/
def trainLogisticRegressionModelWithSGD(
data: JavaRDD[LabeledPoint],
numIterations: Int,
stepSize: Double,
miniBatchFraction: Double,
initialWeights: Vector,
regParam: Double,
regType: String,
intercept: Boolean,
validateData: Boolean,
convergenceTol: Double): JList[Object] = {
val LogRegAlg = new LogisticRegressionWithSGD(1.0, 100, 0.01, 1.0)
LogRegAlg.setIntercept(intercept)
.setValidateData(validateData)
LogRegAlg.optimizer
.setNumIterations(numIterations)
.setRegParam(regParam)
.setStepSize(stepSize)
.setMiniBatchFraction(miniBatchFraction)
.setConvergenceTol(convergenceTol)
LogRegAlg.optimizer.setUpdater(getUpdaterFromString(regType))
trainRegressionModel(
LogRegAlg,
data,
initialWeights)
}
/**
* Java stub for Python mllib LogisticRegressionWithLBFGS.train()
*/
def trainLogisticRegressionModelWithLBFGS(
data: JavaRDD[LabeledPoint],
numIterations: Int,
initialWeights: Vector,
regParam: Double,
regType: String,
intercept: Boolean,
corrections: Int,
tolerance: Double,
validateData: Boolean,
numClasses: Int): JList[Object] = {
val LogRegAlg = new LogisticRegressionWithLBFGS()
LogRegAlg.setIntercept(intercept)
.setValidateData(validateData)
.setNumClasses(numClasses)
LogRegAlg.optimizer
.setNumIterations(numIterations)
.setRegParam(regParam)
.setNumCorrections(corrections)
.setConvergenceTol(tolerance)
LogRegAlg.optimizer.setUpdater(getUpdaterFromString(regType))
trainRegressionModel(
LogRegAlg,
data,
initialWeights)
}
/**
* Java stub for NaiveBayes.train()
*/
def trainNaiveBayesModel(
data: JavaRDD[LabeledPoint],
lambda: Double): JList[Object] = {
val model = NaiveBayes.train(data.rdd, lambda)
List(Vectors.dense(model.labels), Vectors.dense(model.pi), model.theta.map(Vectors.dense)).
map(_.asInstanceOf[Object]).asJava
}
/**
* Java stub for Python mllib IsotonicRegression.run()
*/
def trainIsotonicRegressionModel(
data: JavaRDD[Vector],
isotonic: Boolean): JList[Object] = {
val isotonicRegressionAlg = new IsotonicRegression().setIsotonic(isotonic)
val input = data.rdd.map { x =>
(x(0), x(1), x(2))
}.persist(StorageLevel.MEMORY_AND_DISK)
try {
val model = isotonicRegressionAlg.run(input)
List[AnyRef](model.boundaryVector, model.predictionVector).asJava
} finally {
data.rdd.unpersist()
}
}
/**
* Java stub for Python mllib KMeans.run()
*/
def trainKMeansModel(
data: JavaRDD[Vector],
k: Int,
maxIterations: Int,
initializationMode: String,
seed: java.lang.Long,
initializationSteps: Int,
epsilon: Double,
initialModel: java.util.ArrayList[Vector]): KMeansModel = {
val kMeansAlg = new KMeans()
.setK(k)
.setMaxIterations(maxIterations)
.setInitializationMode(initializationMode)
.setInitializationSteps(initializationSteps)
.setEpsilon(epsilon)
if (seed != null) kMeansAlg.setSeed(seed)
if (!initialModel.isEmpty()) kMeansAlg.setInitialModel(new KMeansModel(initialModel))
try {
kMeansAlg.run(data.rdd.persist(StorageLevel.MEMORY_AND_DISK))
} finally {
data.rdd.unpersist()
}
}
/**
* Java stub for Python mllib KMeansModel.computeCost()
*/
def computeCostKmeansModel(
data: JavaRDD[Vector],
centers: java.util.ArrayList[Vector]): Double = {
new KMeansModel(centers).computeCost(data)
}
/**
* Java stub for Python mllib GaussianMixture.run()
* Returns a list containing weights, mean and covariance of each mixture component.
*/
def trainGaussianMixtureModel(
data: JavaRDD[Vector],
k: Int,
convergenceTol: Double,
maxIterations: Int,
seed: java.lang.Long,
initialModelWeights: java.util.ArrayList[Double],
initialModelMu: java.util.ArrayList[Vector],
initialModelSigma: java.util.ArrayList[Matrix]): GaussianMixtureModelWrapper = {
val gmmAlg = new GaussianMixture()
.setK(k)
.setConvergenceTol(convergenceTol)
.setMaxIterations(maxIterations)
if (initialModelWeights != null && initialModelMu != null && initialModelSigma != null) {
val gaussians = initialModelMu.asScala.toSeq.zip(initialModelSigma.asScala.toSeq).map {
case (x, y) => new MultivariateGaussian(x, y)
}
val initialModel = new GaussianMixtureModel(
initialModelWeights.asScala.toArray, gaussians.toArray)
gmmAlg.setInitialModel(initialModel)
}
if (seed != null) gmmAlg.setSeed(seed)
new GaussianMixtureModelWrapper(gmmAlg.run(data.rdd))
}
/**
* Java stub for Python mllib GaussianMixtureModel.predictSoft()
*/
def predictSoftGMM(
data: JavaRDD[Vector],
wt: Vector,
mu: Array[Object],
si: Array[Object]): RDD[Vector] = {
val weight = wt.toArray
val mean = mu.map(_.asInstanceOf[DenseVector])
val sigma = si.map(_.asInstanceOf[DenseMatrix])
val gaussians = Array.tabulate(weight.length) {
i => new MultivariateGaussian(mean(i), sigma(i))
}
val model = new GaussianMixtureModel(weight, gaussians)
model.predictSoft(data).map(Vectors.dense)
}
/**
* Java stub for Python mllib PowerIterationClustering.run(). This stub returns a
* handle to the Java object instead of the content of the Java object. Extra care
* needs to be taken in the Python code to ensure it gets freed on exit; see the
* Py4J documentation.
* @param data an RDD of (i, j, s,,ij,,) tuples representing the affinity matrix.
* @param k number of clusters.
* @param maxIterations maximum number of iterations of the power iteration loop.
* @param initMode the initialization mode. This can be either "random" to use
* a random vector as vertex properties, or "degree" to use
* normalized sum similarities. Default: random.
*/
def trainPowerIterationClusteringModel(
data: JavaRDD[Vector],
k: Int,
maxIterations: Int,
initMode: String): PowerIterationClusteringModel = {
val pic = new PowerIterationClustering()
.setK(k)
.setMaxIterations(maxIterations)
.setInitializationMode(initMode)
val model = pic.run(data.rdd.map(v => (v(0).toLong, v(1).toLong, v(2))))
new PowerIterationClusteringModelWrapper(model)
}
/**
* Java stub for Python mllib ALS.train(). This stub returns a handle
* to the Java object instead of the content of the Java object. Extra care
* needs to be taken in the Python code to ensure it gets freed on exit; see
* the Py4J documentation.
*/
def trainALSModel(
ratingsJRDD: JavaRDD[Rating],
rank: Int,
iterations: Int,
lambda: Double,
blocks: Int,
nonnegative: Boolean,
seed: java.lang.Long): MatrixFactorizationModel = {
val als = new ALS()
.setRank(rank)
.setIterations(iterations)
.setLambda(lambda)
.setBlocks(blocks)
.setNonnegative(nonnegative)
if (seed != null) als.setSeed(seed)
val model = als.run(ratingsJRDD.rdd)
new MatrixFactorizationModelWrapper(model)
}
/**
* Java stub for Python mllib ALS.trainImplicit(). This stub returns a
* handle to the Java object instead of the content of the Java object.
* Extra care needs to be taken in the Python code to ensure it gets freed on
* exit; see the Py4J documentation.
*/
def trainImplicitALSModel(
ratingsJRDD: JavaRDD[Rating],
rank: Int,
iterations: Int,
lambda: Double,
blocks: Int,
alpha: Double,
nonnegative: Boolean,
seed: java.lang.Long): MatrixFactorizationModel = {
val als = new ALS()
.setImplicitPrefs(true)
.setRank(rank)
.setIterations(iterations)
.setLambda(lambda)
.setBlocks(blocks)
.setAlpha(alpha)
.setNonnegative(nonnegative)
if (seed != null) als.setSeed(seed)
val model = als.run(ratingsJRDD.rdd)
new MatrixFactorizationModelWrapper(model)
}
/**
* Java stub for Python mllib LDA.run()
*/
def trainLDAModel(
data: JavaRDD[java.util.List[Any]],
k: Int,
maxIterations: Int,
docConcentration: Double,
topicConcentration: Double,
seed: java.lang.Long,
checkpointInterval: Int,
optimizer: String): LDAModelWrapper = {
val algo = new LDA()
.setK(k)
.setMaxIterations(maxIterations)
.setDocConcentration(docConcentration)
.setTopicConcentration(topicConcentration)
.setCheckpointInterval(checkpointInterval)
.setOptimizer(optimizer)
if (seed != null) algo.setSeed(seed)
val documents = data.rdd.map(_.asScala.toArray).map { r =>
r(0) match {
case i: java.lang.Integer => (i.toLong, r(1).asInstanceOf[Vector])
case i: java.lang.Long => (i.toLong, r(1).asInstanceOf[Vector])
case _ => throw new IllegalArgumentException("input values contains invalid type value.")
}
}
val model = algo.run(documents)
new LDAModelWrapper(model)
}
/**
* Load a LDA model
*/
def loadLDAModel(jsc: JavaSparkContext, path: String): LDAModelWrapper = {
val model = DistributedLDAModel.load(jsc.sc, path)
new LDAModelWrapper(model)
}
/**
* Java stub for Python mllib FPGrowth.train(). This stub returns a handle
* to the Java object instead of the content of the Java object. Extra care
* needs to be taken in the Python code to ensure it gets freed on exit; see
* the Py4J documentation.
*/
def trainFPGrowthModel(
data: JavaRDD[java.lang.Iterable[Any]],
minSupport: Double,
numPartitions: Int): FPGrowthModel[Any] = {
val fpg = new FPGrowth(minSupport, numPartitions)
val model = fpg.run(data.rdd.map(_.asScala.toArray))
new FPGrowthModelWrapper(model)
}
/**
* Java stub for Python mllib PrefixSpan.train(). This stub returns a handle
* to the Java object instead of the content of the Java object. Extra care
* needs to be taken in the Python code to ensure it gets freed on exit; see
* the Py4J documentation.
*/
def trainPrefixSpanModel(
data: JavaRDD[java.util.ArrayList[java.util.ArrayList[Any]]],
minSupport: Double,
maxPatternLength: Int,
localProjDBSize: Int ): PrefixSpanModelWrapper = {
val prefixSpan = new PrefixSpan()
.setMinSupport(minSupport)
.setMaxPatternLength(maxPatternLength)
.setMaxLocalProjDBSize(localProjDBSize)
val trainData = data.rdd.map(_.asScala.toArray.map(_.asScala.toArray))
val model = prefixSpan.run(trainData)
new PrefixSpanModelWrapper(model)
}
/**
* Java stub for Normalizer.transform()
*/
def normalizeVector(p: Double, vector: Vector): Vector = {
new Normalizer(p).transform(vector)
}
/**
* Java stub for Normalizer.transform()
*/
def normalizeVector(p: Double, rdd: JavaRDD[Vector]): JavaRDD[Vector] = {
new Normalizer(p).transform(rdd)
}
/**
* Java stub for StandardScaler.fit(). This stub returns a
* handle to the Java object instead of the content of the Java object.
* Extra care needs to be taken in the Python code to ensure it gets freed on
* exit; see the Py4J documentation.
*/
def fitStandardScaler(
withMean: Boolean,
withStd: Boolean,
data: JavaRDD[Vector]): StandardScalerModel = {
new StandardScaler(withMean, withStd).fit(data.rdd)
}
/**
* Java stub for ChiSqSelector.fit(). This stub returns a
* handle to the Java object instead of the content of the Java object.
* Extra care needs to be taken in the Python code to ensure it gets freed on
* exit; see the Py4J documentation.
*/
def fitChiSqSelector(
selectorType: String,
numTopFeatures: Int,
percentile: Double,
fpr: Double,
fdr: Double,
fwe: Double,
data: JavaRDD[LabeledPoint]): ChiSqSelectorModel = {
new ChiSqSelector()
.setSelectorType(selectorType)
.setNumTopFeatures(numTopFeatures)
.setPercentile(percentile)
.setFpr(fpr)
.setFdr(fdr)
.setFwe(fwe)
.fit(data.rdd)
}
/**
* Java stub for PCA.fit(). This stub returns a
* handle to the Java object instead of the content of the Java object.
* Extra care needs to be taken in the Python code to ensure it gets freed on
* exit; see the Py4J documentation.
*/
def fitPCA(k: Int, data: JavaRDD[Vector]): PCAModel = {
new PCA(k).fit(data.rdd)
}
/**
* Java stub for IDF.fit(). This stub returns a
* handle to the Java object instead of the content of the Java object.
* Extra care needs to be taken in the Python code to ensure it gets freed on
* exit; see the Py4J documentation.
*/
def fitIDF(minDocFreq: Int, dataset: JavaRDD[Vector]): IDFModel = {
new IDF(minDocFreq).fit(dataset)
}
/**
* Java stub for Python mllib Word2Vec fit(). This stub returns a
* handle to the Java object instead of the content of the Java object.
* Extra care needs to be taken in the Python code to ensure it gets freed on
* exit; see the Py4J documentation.
* @param dataJRDD input JavaRDD
* @param vectorSize size of vector
* @param learningRate initial learning rate
* @param numPartitions number of partitions
* @param numIterations number of iterations
* @param seed initial seed for random generator
* @param windowSize size of window
* @return A handle to java Word2VecModelWrapper instance at python side
*/
def trainWord2VecModel(
dataJRDD: JavaRDD[java.util.ArrayList[String]],
vectorSize: Int,
learningRate: Double,
numPartitions: Int,
numIterations: Int,
seed: java.lang.Long,
minCount: Int,
windowSize: Int): Word2VecModelWrapper = {
val word2vec = new Word2Vec()
.setVectorSize(vectorSize)
.setLearningRate(learningRate)
.setNumPartitions(numPartitions)
.setNumIterations(numIterations)
.setMinCount(minCount)
.setWindowSize(windowSize)
if (seed != null) word2vec.setSeed(seed)
try {
val model = word2vec.fit(dataJRDD.rdd.persist(StorageLevel.MEMORY_AND_DISK_SER))
new Word2VecModelWrapper(model)
} finally {
dataJRDD.rdd.unpersist()
}
}
/**
* Java stub for Python mllib DecisionTree.train().
* This stub returns a handle to the Java object instead of the content of the Java object.
* Extra care needs to be taken in the Python code to ensure it gets freed on exit;
* see the Py4J documentation.
* @param data Training data
* @param categoricalFeaturesInfo Categorical features info, as Java map
*/
def trainDecisionTreeModel(
data: JavaRDD[LabeledPoint],
algoStr: String,
numClasses: Int,
categoricalFeaturesInfo: JMap[Int, Int],
impurityStr: String,
maxDepth: Int,
maxBins: Int,
minInstancesPerNode: Int,
minInfoGain: Double): DecisionTreeModel = {
val algo = Algo.fromString(algoStr)
val impurity = Impurities.fromString(impurityStr)
val strategy = new Strategy(
algo = algo,
impurity = impurity,
maxDepth = maxDepth,
numClasses = numClasses,
maxBins = maxBins,
categoricalFeaturesInfo = categoricalFeaturesInfo.asScala.toMap,
minInstancesPerNode = minInstancesPerNode,
minInfoGain = minInfoGain)
try {
DecisionTree.train(data.rdd.persist(StorageLevel.MEMORY_AND_DISK), strategy)
} finally {
data.rdd.unpersist()
}
}
/**
* Java stub for Python mllib RandomForest.train().
* This stub returns a handle to the Java object instead of the content of the Java object.
* Extra care needs to be taken in the Python code to ensure it gets freed on exit;
* see the Py4J documentation.
*/
def trainRandomForestModel(
data: JavaRDD[LabeledPoint],
algoStr: String,
numClasses: Int,
categoricalFeaturesInfo: JMap[Int, Int],
numTrees: Int,
featureSubsetStrategy: String,
impurityStr: String,
maxDepth: Int,
maxBins: Int,
seed: java.lang.Long): RandomForestModel = {
val algo = Algo.fromString(algoStr)
val impurity = Impurities.fromString(impurityStr)
val strategy = new Strategy(
algo = algo,
impurity = impurity,
maxDepth = maxDepth,
numClasses = numClasses,
maxBins = maxBins,
categoricalFeaturesInfo = categoricalFeaturesInfo.asScala.toMap)
val cached = data.rdd.persist(StorageLevel.MEMORY_AND_DISK)
// Only done because methods below want an int, not an optional Long
val intSeed = getSeedOrDefault(seed).toInt
try {
if (algo == Algo.Classification) {
RandomForest.trainClassifier(cached, strategy, numTrees, featureSubsetStrategy, intSeed)
} else {
RandomForest.trainRegressor(cached, strategy, numTrees, featureSubsetStrategy, intSeed)
}
} finally {
cached.unpersist()
}
}
/**
* Java stub for Python mllib GradientBoostedTrees.train().
* This stub returns a handle to the Java object instead of the content of the Java object.
* Extra care needs to be taken in the Python code to ensure it gets freed on exit;
* see the Py4J documentation.
*/
def trainGradientBoostedTreesModel(
data: JavaRDD[LabeledPoint],
algoStr: String,
categoricalFeaturesInfo: JMap[Int, Int],
lossStr: String,
numIterations: Int,
learningRate: Double,
maxDepth: Int,
maxBins: Int): GradientBoostedTreesModel = {
val boostingStrategy = BoostingStrategy.defaultParams(algoStr)
boostingStrategy.setLoss(Losses.fromString(lossStr))
boostingStrategy.setNumIterations(numIterations)
boostingStrategy.setLearningRate(learningRate)
boostingStrategy.treeStrategy.setMaxDepth(maxDepth)
boostingStrategy.treeStrategy.setMaxBins(maxBins)
boostingStrategy.treeStrategy.categoricalFeaturesInfo = categoricalFeaturesInfo.asScala.toMap
val cached = data.rdd.persist(StorageLevel.MEMORY_AND_DISK)
try {
GradientBoostedTrees.train(cached, boostingStrategy)
} finally {
cached.unpersist()
}
}
def elementwiseProductVector(scalingVector: Vector, vector: Vector): Vector = {
new ElementwiseProduct(scalingVector).transform(vector)
}
def elementwiseProductVector(scalingVector: Vector, vector: JavaRDD[Vector]): JavaRDD[Vector] = {
new ElementwiseProduct(scalingVector).transform(vector)
}
/**
* Java stub for mllib Statistics.colStats(X: RDD[Vector]).
* TODO figure out return type.
*/
def colStats(rdd: JavaRDD[Vector]): MultivariateStatisticalSummary = {
Statistics.colStats(rdd.rdd)
}
/**
* Java stub for mllib Statistics.corr(X: RDD[Vector], method: String).
* Returns the correlation matrix serialized into a byte array understood by deserializers in
* pyspark.
*/
def corr(x: JavaRDD[Vector], method: String): Matrix = {
Statistics.corr(x.rdd, getCorrNameOrDefault(method))
}
/**
* Java stub for mllib Statistics.corr(x: RDD[Double], y: RDD[Double], method: String).
*/
def corr(x: JavaRDD[Double], y: JavaRDD[Double], method: String): Double = {
Statistics.corr(x.rdd, y.rdd, getCorrNameOrDefault(method))
}
/**
* Java stub for mllib Statistics.chiSqTest()
*/
def chiSqTest(observed: Vector, expected: Vector): ChiSqTestResult = {
if (expected == null) {
Statistics.chiSqTest(observed)
} else {
Statistics.chiSqTest(observed, expected)
}
}
/**
* Java stub for mllib Statistics.chiSqTest(observed: Matrix)
*/
def chiSqTest(observed: Matrix): ChiSqTestResult = {
Statistics.chiSqTest(observed)
}
/**
* Java stub for mllib Statistics.chiSqTest(RDD[LabelPoint])
*/
def chiSqTest(data: JavaRDD[LabeledPoint]): Array[ChiSqTestResult] = {
Statistics.chiSqTest(data.rdd)
}
// used by the corr methods to retrieve the name of the correlation method passed in via pyspark
private def getCorrNameOrDefault(method: String) = {
if (method == null) CorrelationNames.defaultCorrName else method
}
// Used by the *RDD methods to get default seed if not passed in from pyspark
private def getSeedOrDefault(seed: java.lang.Long): Long = {
if (seed == null) Utils.random.nextLong else seed
}
// Used by *RDD methods to get default numPartitions if not passed in from pyspark
private def getNumPartitionsOrDefault(numPartitions: java.lang.Integer,
jsc: JavaSparkContext): Int = {
if (numPartitions == null) {
jsc.sc.defaultParallelism
} else {
numPartitions
}
}
// Note: for the following methods, numPartitions and seed are boxed to allow nulls to be passed
// in for either argument from pyspark
/**
* Java stub for Python mllib RandomRDDGenerators.uniformRDD()
*/
def uniformRDD(jsc: JavaSparkContext,
size: Long,
numPartitions: java.lang.Integer,
seed: java.lang.Long): JavaRDD[Double] = {
val parts = getNumPartitionsOrDefault(numPartitions, jsc)
val s = getSeedOrDefault(seed)
RG.uniformRDD(jsc.sc, size, parts, s)
}
/**
* Java stub for Python mllib RandomRDDGenerators.normalRDD()
*/
def normalRDD(jsc: JavaSparkContext,
size: Long,
numPartitions: java.lang.Integer,
seed: java.lang.Long): JavaRDD[Double] = {
val parts = getNumPartitionsOrDefault(numPartitions, jsc)
val s = getSeedOrDefault(seed)
RG.normalRDD(jsc.sc, size, parts, s)
}
/**
* Java stub for Python mllib RandomRDDGenerators.logNormalRDD()
*/
def logNormalRDD(jsc: JavaSparkContext,
mean: Double,
std: Double,
size: Long,
numPartitions: java.lang.Integer,
seed: java.lang.Long): JavaRDD[Double] = {
val parts = getNumPartitionsOrDefault(numPartitions, jsc)
val s = getSeedOrDefault(seed)
RG.logNormalRDD(jsc.sc, mean, std, size, parts, s)
}
/**
* Java stub for Python mllib RandomRDDGenerators.poissonRDD()
*/
def poissonRDD(jsc: JavaSparkContext,
mean: Double,
size: Long,
numPartitions: java.lang.Integer,
seed: java.lang.Long): JavaRDD[Double] = {
val parts = getNumPartitionsOrDefault(numPartitions, jsc)
val s = getSeedOrDefault(seed)
RG.poissonRDD(jsc.sc, mean, size, parts, s)
}
/**
* Java stub for Python mllib RandomRDDGenerators.exponentialRDD()
*/
def exponentialRDD(jsc: JavaSparkContext,
mean: Double,
size: Long,
numPartitions: java.lang.Integer,
seed: java.lang.Long): JavaRDD[Double] = {
val parts = getNumPartitionsOrDefault(numPartitions, jsc)
val s = getSeedOrDefault(seed)
RG.exponentialRDD(jsc.sc, mean, size, parts, s)
}
/**
* Java stub for Python mllib RandomRDDGenerators.gammaRDD()
*/
def gammaRDD(jsc: JavaSparkContext,
shape: Double,
scale: Double,
size: Long,
numPartitions: java.lang.Integer,
seed: java.lang.Long): JavaRDD[Double] = {
val parts = getNumPartitionsOrDefault(numPartitions, jsc)
val s = getSeedOrDefault(seed)
RG.gammaRDD(jsc.sc, shape, scale, size, parts, s)
}
/**
* Java stub for Python mllib RandomRDDGenerators.uniformVectorRDD()
*/
def uniformVectorRDD(jsc: JavaSparkContext,
numRows: Long,
numCols: Int,
numPartitions: java.lang.Integer,
seed: java.lang.Long): JavaRDD[Vector] = {
val parts = getNumPartitionsOrDefault(numPartitions, jsc)
val s = getSeedOrDefault(seed)
RG.uniformVectorRDD(jsc.sc, numRows, numCols, parts, s)
}
/**
* Java stub for Python mllib RandomRDDGenerators.normalVectorRDD()
*/
def normalVectorRDD(jsc: JavaSparkContext,
numRows: Long,
numCols: Int,
numPartitions: java.lang.Integer,
seed: java.lang.Long): JavaRDD[Vector] = {
val parts = getNumPartitionsOrDefault(numPartitions, jsc)
val s = getSeedOrDefault(seed)
RG.normalVectorRDD(jsc.sc, numRows, numCols, parts, s)
}
/**
* Java stub for Python mllib RandomRDDGenerators.logNormalVectorRDD()
*/
def logNormalVectorRDD(jsc: JavaSparkContext,
mean: Double,
std: Double,
numRows: Long,
numCols: Int,
numPartitions: java.lang.Integer,
seed: java.lang.Long): JavaRDD[Vector] = {
val parts = getNumPartitionsOrDefault(numPartitions, jsc)
val s = getSeedOrDefault(seed)
RG.logNormalVectorRDD(jsc.sc, mean, std, numRows, numCols, parts, s)
}
/**
* Java stub for Python mllib RandomRDDGenerators.poissonVectorRDD()
*/
def poissonVectorRDD(jsc: JavaSparkContext,
mean: Double,
numRows: Long,
numCols: Int,
numPartitions: java.lang.Integer,
seed: java.lang.Long): JavaRDD[Vector] = {
val parts = getNumPartitionsOrDefault(numPartitions, jsc)
val s = getSeedOrDefault(seed)
RG.poissonVectorRDD(jsc.sc, mean, numRows, numCols, parts, s)
}
/**
* Java stub for Python mllib RandomRDDGenerators.exponentialVectorRDD()
*/
def exponentialVectorRDD(jsc: JavaSparkContext,
mean: Double,
numRows: Long,
numCols: Int,
numPartitions: java.lang.Integer,
seed: java.lang.Long): JavaRDD[Vector] = {
val parts = getNumPartitionsOrDefault(numPartitions, jsc)
val s = getSeedOrDefault(seed)
RG.exponentialVectorRDD(jsc.sc, mean, numRows, numCols, parts, s)
}
/**
* Java stub for Python mllib RandomRDDGenerators.gammaVectorRDD()
*/
def gammaVectorRDD(jsc: JavaSparkContext,
shape: Double,
scale: Double,
numRows: Long,
numCols: Int,
numPartitions: java.lang.Integer,
seed: java.lang.Long): JavaRDD[Vector] = {
val parts = getNumPartitionsOrDefault(numPartitions, jsc)
val s = getSeedOrDefault(seed)
RG.gammaVectorRDD(jsc.sc, shape, scale, numRows, numCols, parts, s)
}
/**
* Java stub for the constructor of Python mllib RankingMetrics
*/
def newRankingMetrics(predictionAndLabels: DataFrame): RankingMetrics[Any] = {
new RankingMetrics(predictionAndLabels.rdd.map(
r => (r.getSeq(0).toArray[Any], r.getSeq(1).toArray[Any])))
}
/**
* Java stub for the estimate method of KernelDensity
*/
def estimateKernelDensity(
sample: JavaRDD[Double],
bandwidth: Double, points: java.util.ArrayList[Double]): Array[Double] = {
new KernelDensity().setSample(sample).setBandwidth(bandwidth).estimate(
points.asScala.toArray)
}
/**
* Java stub for the update method of StreamingKMeansModel.
*/
def updateStreamingKMeansModel(
clusterCenters: JList[Vector],
clusterWeights: JList[Double],
data: JavaRDD[Vector],
decayFactor: Double,
timeUnit: String): JList[Object] = {
val model = new StreamingKMeansModel(
clusterCenters.asScala.toArray, clusterWeights.asScala.toArray)
.update(data, decayFactor, timeUnit)
List[AnyRef](model.clusterCenters, Vectors.dense(model.clusterWeights)).asJava
}
/**
* Wrapper around the generateLinearInput method of LinearDataGenerator.
*/
def generateLinearInputWrapper(
intercept: Double,
weights: JList[Double],
xMean: JList[Double],
xVariance: JList[Double],
nPoints: Int,
seed: Int,
eps: Double): Array[LabeledPoint] = {
LinearDataGenerator.generateLinearInput(
intercept, weights.asScala.toArray, xMean.asScala.toArray,
xVariance.asScala.toArray, nPoints, seed, eps).toArray
}
/**
* Wrapper around the generateLinearRDD method of LinearDataGenerator.
*/
def generateLinearRDDWrapper(
sc: JavaSparkContext,
nexamples: Int,
nfeatures: Int,
eps: Double,
nparts: Int,
intercept: Double): JavaRDD[LabeledPoint] = {
LinearDataGenerator.generateLinearRDD(
sc, nexamples, nfeatures, eps, nparts, intercept)
}
/**
* Java stub for Statistics.kolmogorovSmirnovTest()
*/
def kolmogorovSmirnovTest(
data: JavaRDD[Double],
distName: String,
params: JList[Double]): KolmogorovSmirnovTestResult = {
val paramsSeq = params.asScala.toSeq
Statistics.kolmogorovSmirnovTest(data, distName, paramsSeq: _*)
}
/**
* Wrapper around RowMatrix constructor.
*/
def createRowMatrix(rows: JavaRDD[Vector], numRows: Long, numCols: Int): RowMatrix = {
new RowMatrix(rows.rdd, numRows, numCols)
}
def createRowMatrix(df: DataFrame, numRows: Long, numCols: Int): RowMatrix = {
require(df.schema.length == 1 && df.schema.head.dataType.getClass == classOf[VectorUDT],
"DataFrame must have a single vector type column")
new RowMatrix(df.rdd.map { case Row(vector: Vector) => vector }, numRows, numCols)
}
/**
* Wrapper around IndexedRowMatrix constructor.
*/
def createIndexedRowMatrix(rows: DataFrame, numRows: Long, numCols: Int): IndexedRowMatrix = {
// We use DataFrames for serialization of IndexedRows from Python,
// so map each Row in the DataFrame back to an IndexedRow.
require(rows.schema.length == 2 && rows.schema.head.dataType == LongType &&
rows.schema(1).dataType.getClass == classOf[VectorUDT],
"DataFrame must consist of a long type index column and a vector type column")
val indexedRows = rows.rdd.map {
case Row(index: Long, vector: Vector) => IndexedRow(index, vector)
}
new IndexedRowMatrix(indexedRows, numRows, numCols)
}
/**
* Wrapper around CoordinateMatrix constructor.
*/
def createCoordinateMatrix(rows: DataFrame, numRows: Long, numCols: Long): CoordinateMatrix = {
// We use DataFrames for serialization of MatrixEntry entries from
// Python, so map each Row in the DataFrame back to a MatrixEntry.
val entries = rows.rdd.map {
case Row(i: Long, j: Long, value: Double) => MatrixEntry(i, j, value)
}
new CoordinateMatrix(entries, numRows, numCols)
}
/**
* Wrapper around BlockMatrix constructor.
*/
def createBlockMatrix(blocks: DataFrame, rowsPerBlock: Int, colsPerBlock: Int,
numRows: Long, numCols: Long): BlockMatrix = {
// We use DataFrames for serialization of sub-matrix blocks from
// Python, so map each Row in the DataFrame back to a
// ((blockRowIndex, blockColIndex), sub-matrix) tuple.
val blockTuples = blocks.rdd.map {
case Row(Row(blockRowIndex: Long, blockColIndex: Long), subMatrix: Matrix) =>
((blockRowIndex.toInt, blockColIndex.toInt), subMatrix)
}
new BlockMatrix(blockTuples, rowsPerBlock, colsPerBlock, numRows, numCols)
}
/**
* Return the rows of an IndexedRowMatrix.
*/
def getIndexedRows(indexedRowMatrix: IndexedRowMatrix): DataFrame = {
// We use DataFrames for serialization of IndexedRows to Python,
// so return a DataFrame.
val sc = indexedRowMatrix.rows.sparkContext
val spark = SparkSession.builder().sparkContext(sc).getOrCreate()
spark.createDataFrame(indexedRowMatrix.rows)
}
/**
* Return the entries of a CoordinateMatrix.
*/
def getMatrixEntries(coordinateMatrix: CoordinateMatrix): DataFrame = {
// We use DataFrames for serialization of MatrixEntry entries to
// Python, so return a DataFrame.
val sc = coordinateMatrix.entries.sparkContext
val spark = SparkSession.builder().sparkContext(sc).getOrCreate()
spark.createDataFrame(coordinateMatrix.entries)
}
/**
* Return the sub-matrix blocks of a BlockMatrix.
*/
def getMatrixBlocks(blockMatrix: BlockMatrix): DataFrame = {
// We use DataFrames for serialization of sub-matrix blocks to
// Python, so return a DataFrame.
val sc = blockMatrix.blocks.sparkContext
val spark = SparkSession.builder().sparkContext(sc).getOrCreate()
spark.createDataFrame(blockMatrix.blocks)
}
/**
* Python-friendly version of [[MLUtils.convertVectorColumnsToML()]].
*/
def convertVectorColumnsToML(dataset: DataFrame, cols: JArrayList[String]): DataFrame = {
MLUtils.convertVectorColumnsToML(dataset, cols.asScala.toSeq: _*)
}
/**
* Python-friendly version of [[MLUtils.convertVectorColumnsFromML()]]
*/
def convertVectorColumnsFromML(dataset: DataFrame, cols: JArrayList[String]): DataFrame = {
MLUtils.convertVectorColumnsFromML(dataset, cols.asScala.toSeq: _*)
}
/**
* Python-friendly version of [[MLUtils.convertMatrixColumnsToML()]].
*/
def convertMatrixColumnsToML(dataset: DataFrame, cols: JArrayList[String]): DataFrame = {
MLUtils.convertMatrixColumnsToML(dataset, cols.asScala.toSeq: _*)
}
/**
* Python-friendly version of [[MLUtils.convertMatrixColumnsFromML()]]
*/
def convertMatrixColumnsFromML(dataset: DataFrame, cols: JArrayList[String]): DataFrame = {
MLUtils.convertMatrixColumnsFromML(dataset, cols.asScala.toSeq: _*)
}
}
/**
* Basic SerDe utility class.
*/
private[spark] abstract class SerDeBase {
val PYSPARK_PACKAGE: String
def initialize(): Unit
/**
* Base class used for pickle
*/
private[spark] abstract class BasePickler[T: ClassTag]
extends IObjectPickler with IObjectConstructor {
private val cls = implicitly[ClassTag[T]].runtimeClass
private val module = PYSPARK_PACKAGE + "." + cls.getName.split('.')(4)
private val name = cls.getSimpleName
// register this to Pickler and Unpickler
def register(): Unit = {
Pickler.registerCustomPickler(this.getClass, this)
Pickler.registerCustomPickler(cls, this)
Unpickler.registerConstructor(module, name, this)
}
def pickle(obj: Object, out: OutputStream, pickler: Pickler): Unit = {
if (obj == this) {
out.write(Opcodes.GLOBAL)
out.write((module + "\n" + name + "\n").getBytes(StandardCharsets.UTF_8))
} else {
pickler.save(this) // it will be memorized by Pickler
saveState(obj, out, pickler)
out.write(Opcodes.REDUCE)
}
}
private[python] def saveObjects(out: OutputStream, pickler: Pickler, objects: Any*) = {
if (objects.length == 0 || objects.length > 3) {
out.write(Opcodes.MARK)
}
objects.foreach(pickler.save)
val code = objects.length match {
case 1 => Opcodes.TUPLE1
case 2 => Opcodes.TUPLE2
case 3 => Opcodes.TUPLE3
case _ => Opcodes.TUPLE
}
out.write(code)
}
protected def getBytes(obj: Object): Array[Byte] = {
if (obj.getClass.isArray) {
obj.asInstanceOf[Array[Byte]]
} else {
// This must be ISO 8859-1 / Latin 1, not UTF-8, to interoperate correctly
obj.asInstanceOf[String].getBytes(StandardCharsets.ISO_8859_1)
}
}
private[python] def saveState(obj: Object, out: OutputStream, pickler: Pickler): Unit
}
def dumps(obj: AnyRef): Array[Byte] = {
obj match {
// Pickler in Python side cannot deserialize Scala Array normally. See SPARK-12834.
case array: Array[_] => new Pickler(/* useMemo = */ true,
/* valueCompare = */ false).dumps(array.toSeq.asJava)
case _ => new Pickler(/* useMemo = */ true,
/* valueCompare = */ false).dumps(obj)
}
}
def loads(bytes: Array[Byte]): AnyRef = {
new Unpickler().loads(bytes)
}
/* convert object into Tuple */
def asTupleRDD(rdd: RDD[Array[Any]]): RDD[(Int, Int)] = {
rdd.map(x => (x(0).asInstanceOf[Int], x(1).asInstanceOf[Int]))
}
/* convert RDD[Tuple2[,]] to RDD[Array[Any]] */
def fromTuple2RDD(rdd: RDD[(Any, Any)]): RDD[Array[Any]] = {
rdd.map(x => Array(x._1, x._2))
}
/**
* Convert an RDD of Java objects to an RDD of serialized Python objects, that is usable by
* PySpark.
*/
def javaToPython(jRDD: JavaRDD[Any]): JavaRDD[Array[Byte]] = {
jRDD.rdd.mapPartitions { iter =>
initialize() // let it called in executor
new SerDeUtil.AutoBatchedPickler(iter)
}
}
/**
* Convert an RDD of serialized Python objects to RDD of objects, that is usable by PySpark.
*/
def pythonToJava(pyRDD: JavaRDD[Array[Byte]], batched: Boolean): JavaRDD[Any] = {
pyRDD.rdd.mapPartitions { iter =>
initialize() // let it called in executor
val unpickle = new Unpickler
iter.flatMap { row =>
val obj = unpickle.loads(row)
if (batched) {
obj match {
case list: JArrayList[_] => list.asScala
case arr: Array[_] => arr
}
} else {
Seq(obj)
}
}
}.toJavaRDD()
}
}
/**
* SerDe utility functions for PythonMLLibAPI.
*/
private[spark] object SerDe extends SerDeBase with Serializable {
override val PYSPARK_PACKAGE = "pyspark.mllib"
// Pickler for DenseVector
private[python] class DenseVectorPickler extends BasePickler[DenseVector] {
def saveState(obj: Object, out: OutputStream, pickler: Pickler): Unit = {
val vector: DenseVector = obj.asInstanceOf[DenseVector]
val bytes = new Array[Byte](8 * vector.size)
val bb = ByteBuffer.wrap(bytes)
bb.order(ByteOrder.nativeOrder())
val db = bb.asDoubleBuffer()
db.put(vector.values)
out.write(Opcodes.BINSTRING)
out.write(PickleUtils.integer_to_bytes(bytes.length))
out.write(bytes)
out.write(Opcodes.TUPLE1)
}
def construct(args: Array[Object]): Object = {
require(args.length == 1)
if (args.length != 1) {
throw new PickleException("should be 1")
}
val bytes = getBytes(args(0))
val bb = ByteBuffer.wrap(bytes, 0, bytes.length)
bb.order(ByteOrder.nativeOrder())
val db = bb.asDoubleBuffer()
val ans = new Array[Double](bytes.length / 8)
db.get(ans)
Vectors.dense(ans)
}
}
// Pickler for DenseMatrix
private[python] class DenseMatrixPickler extends BasePickler[DenseMatrix] {
def saveState(obj: Object, out: OutputStream, pickler: Pickler): Unit = {
val m: DenseMatrix = obj.asInstanceOf[DenseMatrix]
val bytes = new Array[Byte](8 * m.values.length)
val order = ByteOrder.nativeOrder()
val isTransposed = if (m.isTransposed) 1 else 0
ByteBuffer.wrap(bytes).order(order).asDoubleBuffer().put(m.values)
out.write(Opcodes.MARK)
out.write(Opcodes.BININT)
out.write(PickleUtils.integer_to_bytes(m.numRows))
out.write(Opcodes.BININT)
out.write(PickleUtils.integer_to_bytes(m.numCols))
out.write(Opcodes.BINSTRING)
out.write(PickleUtils.integer_to_bytes(bytes.length))
out.write(bytes)
out.write(Opcodes.BININT)
out.write(PickleUtils.integer_to_bytes(isTransposed))
out.write(Opcodes.TUPLE)
}
def construct(args: Array[Object]): Object = {
if (args.length != 4) {
throw new PickleException("should be 4")
}
val bytes = getBytes(args(2))
val n = bytes.length / 8
val values = new Array[Double](n)
val order = ByteOrder.nativeOrder()
ByteBuffer.wrap(bytes).order(order).asDoubleBuffer().get(values)
val isTransposed = args(3).asInstanceOf[Int] == 1
new DenseMatrix(args(0).asInstanceOf[Int], args(1).asInstanceOf[Int], values, isTransposed)
}
}
// Pickler for SparseMatrix
private[python] class SparseMatrixPickler extends BasePickler[SparseMatrix] {
def saveState(obj: Object, out: OutputStream, pickler: Pickler): Unit = {
val s = obj.asInstanceOf[SparseMatrix]
val order = ByteOrder.nativeOrder()
val colPtrsBytes = new Array[Byte](4 * s.colPtrs.length)
val indicesBytes = new Array[Byte](4 * s.rowIndices.length)
val valuesBytes = new Array[Byte](8 * s.values.length)
val isTransposed = if (s.isTransposed) 1 else 0
ByteBuffer.wrap(colPtrsBytes).order(order).asIntBuffer().put(s.colPtrs)
ByteBuffer.wrap(indicesBytes).order(order).asIntBuffer().put(s.rowIndices)
ByteBuffer.wrap(valuesBytes).order(order).asDoubleBuffer().put(s.values)
out.write(Opcodes.MARK)
out.write(Opcodes.BININT)
out.write(PickleUtils.integer_to_bytes(s.numRows))
out.write(Opcodes.BININT)
out.write(PickleUtils.integer_to_bytes(s.numCols))
out.write(Opcodes.BINSTRING)
out.write(PickleUtils.integer_to_bytes(colPtrsBytes.length))
out.write(colPtrsBytes)
out.write(Opcodes.BINSTRING)
out.write(PickleUtils.integer_to_bytes(indicesBytes.length))
out.write(indicesBytes)
out.write(Opcodes.BINSTRING)
out.write(PickleUtils.integer_to_bytes(valuesBytes.length))
out.write(valuesBytes)
out.write(Opcodes.BININT)
out.write(PickleUtils.integer_to_bytes(isTransposed))
out.write(Opcodes.TUPLE)
}
def construct(args: Array[Object]): Object = {
if (args.length != 6) {
throw new PickleException("should be 6")
}
val order = ByteOrder.nativeOrder()
val colPtrsBytes = getBytes(args(2))
val indicesBytes = getBytes(args(3))
val valuesBytes = getBytes(args(4))
val colPtrs = new Array[Int](colPtrsBytes.length / 4)
val rowIndices = new Array[Int](indicesBytes.length / 4)
val values = new Array[Double](valuesBytes.length / 8)
ByteBuffer.wrap(colPtrsBytes).order(order).asIntBuffer().get(colPtrs)
ByteBuffer.wrap(indicesBytes).order(order).asIntBuffer().get(rowIndices)
ByteBuffer.wrap(valuesBytes).order(order).asDoubleBuffer().get(values)
val isTransposed = args(5).asInstanceOf[Int] == 1
new SparseMatrix(
args(0).asInstanceOf[Int], args(1).asInstanceOf[Int], colPtrs, rowIndices, values,
isTransposed)
}
}
// Pickler for SparseVector
private[python] class SparseVectorPickler extends BasePickler[SparseVector] {
def saveState(obj: Object, out: OutputStream, pickler: Pickler): Unit = {
val v: SparseVector = obj.asInstanceOf[SparseVector]
val n = v.indices.length
val indiceBytes = new Array[Byte](4 * n)
val order = ByteOrder.nativeOrder()
ByteBuffer.wrap(indiceBytes).order(order).asIntBuffer().put(v.indices)
val valueBytes = new Array[Byte](8 * n)
ByteBuffer.wrap(valueBytes).order(order).asDoubleBuffer().put(v.values)
out.write(Opcodes.BININT)
out.write(PickleUtils.integer_to_bytes(v.size))
out.write(Opcodes.BINSTRING)
out.write(PickleUtils.integer_to_bytes(indiceBytes.length))
out.write(indiceBytes)
out.write(Opcodes.BINSTRING)
out.write(PickleUtils.integer_to_bytes(valueBytes.length))
out.write(valueBytes)
out.write(Opcodes.TUPLE3)
}
def construct(args: Array[Object]): Object = {
if (args.length != 3) {
throw new PickleException("should be 3")
}
val size = args(0).asInstanceOf[Int]
val indiceBytes = getBytes(args(1))
val valueBytes = getBytes(args(2))
val n = indiceBytes.length / 4
val indices = new Array[Int](n)
val values = new Array[Double](n)
if (n > 0) {
val order = ByteOrder.nativeOrder()
ByteBuffer.wrap(indiceBytes).order(order).asIntBuffer().get(indices)
ByteBuffer.wrap(valueBytes).order(order).asDoubleBuffer().get(values)
}
new SparseVector(size, indices, values)
}
}
// Pickler for MLlib LabeledPoint
private[python] class LabeledPointPickler extends BasePickler[LabeledPoint] {
def saveState(obj: Object, out: OutputStream, pickler: Pickler): Unit = {
val point: LabeledPoint = obj.asInstanceOf[LabeledPoint]
saveObjects(out, pickler, point.label, point.features)
}
def construct(args: Array[Object]): Object = {
if (args.length != 2) {
throw new PickleException("should be 2")
}
new LabeledPoint(args(0).asInstanceOf[Double], args(1).asInstanceOf[Vector])
}
}
// Pickler for Rating
private[python] class RatingPickler extends BasePickler[Rating] {
def saveState(obj: Object, out: OutputStream, pickler: Pickler): Unit = {
val rating: Rating = obj.asInstanceOf[Rating]
saveObjects(out, pickler, rating.user, rating.product, rating.rating)
}
def construct(args: Array[Object]): Object = {
if (args.length != 3) {
throw new PickleException("should be 3")
}
new Rating(ratingsIdCheckLong(args(0)), ratingsIdCheckLong(args(1)),
args(2).asInstanceOf[Double])
}
private def ratingsIdCheckLong(obj: Object): Int = {
try {
obj.asInstanceOf[Int]
} catch {
case ex: ClassCastException =>
throw new PickleException(s"Ratings id ${obj.toString} exceeds " +
s"max integer value of ${Int.MaxValue}", ex)
}
}
}
var initialized = false
// This should be called before trying to serialize any above classes
// In cluster mode, this should be put in the closure
override def initialize(): Unit = {
SerDeUtil.initialize()
synchronized {
if (!initialized) {
new DenseVectorPickler().register()
new DenseMatrixPickler().register()
new SparseMatrixPickler().register()
new SparseVectorPickler().register()
new LabeledPointPickler().register()
new RatingPickler().register()
initialized = true
}
}
}
// will not called in Executor automatically
initialize()
}
|
chuckchen/spark
|
mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala
|
Scala
|
apache-2.0
| 54,281
|
package spatial.codegen.pirgen
import argon.core._
import scala.collection.mutable
import scala.util.control.NoStackTrace
trait PIRSplitting extends PIRTraversal {
class SplitException(val msg: String) extends Exception("Unable to split!")
/**
CU splitting is graph partitioning, where each partition has a limited number of inputs, outputs, and nodes
Given a set of stages, we want to find a set of minimum cuts where each partition satisfies these limits
This is more restricted/complex than the standard graph partitioning problem as nodes and edges cannot be treated uniformly:
- Reduction stages represent multiple real ALU stages in the architecture (logV + 1 ?)
- Reduction stages cannot directly consume CU inputs (requires a bypass stage)
- Reduction stages cannot directly produce CU outputs (requires a bypass stage)
- Write address computation stages MUST be preserved/copied within consumer CUs
- Local vector writes: address and data must be available in the same CU
Nodes must be duplicated/created depending on how the graph is partitioned
Could model this as conditional costs for nodes in context of their partition?
**/
def splitCU(cu: CU, archCU: CUCost, archMU: MUCost, others: Seq[CU]): List[CU] = cu.style match {
case MemoryCU => splitPMU(cu, archMU, others)
case _:FringeCU => List(cu)
case _ if cu.computeStages.isEmpty => List(cu)
case _ => splitPCU(cu, archCU, others)
}
// TODO: PMU splitting. For now just throws an exception if it doesn't fit the specified constraints
def splitPMU(cu: CU, archMU: MUCost, others: Seq[CU]): List[CU] = {
if (cu.lanes > spec.lanes) {
var errReport = s"Failed splitting in PMU $cu"
errReport += s"\\nCU had ${cu.lanes} lanes, greater than allowed ${spec.lanes}"
throw new SplitException(errReport) with NoStackTrace
}
val allStages = cu.allStages.toList
val ctrl = cu.cchains.find{case _:UnitCChain | _:CChainInstance => true; case _ => false}
val partitions = mutable.ArrayBuffer[Partition]()
val current = new MUPartition(mutable.ArrayBuffer.empty, mutable.ArrayBuffer.empty, ctrl, false) //TODO
val cost = getMUCost(current, partitions, allStages, others, cu)
if (cost > archMU) {
var errReport = s"Failed splitting in PMU $cu"
errReport += s"\\nRead Stages: "
current.rstages.foreach{stage => errReport += s"\\n $stage" }
errReport += s"\\nWrite Stages: "
current.wstages.foreach{stage => errReport += s"\\n $stage" }
errReport += "\\nCost for last split option: "
errReport += s"\\n$cost"
throw new SplitException(errReport) with NoStackTrace
}
List(cu)
}
def splitPCU(cu: CU, arch: CUCost, others: Seq[CU]): List[CU] = dbgblk(s"splitCU($cu)"){
dbgl(s"Compute: ") {
cu.computeStages.foreach{stage => dbgs(s"$stage")}
}
val allStages = cu.allStages.toList
val ctrl = cu.cchains.find{case _:UnitCChain | _:CChainInstance => true; case _ => false}
val partitions = mutable.ArrayBuffer[CUPartition]()
var current: CUPartition = Partition.emptyCU(ctrl, true)
val remote: CUPartition = new CUPartition(cu.computeStages, ctrl, false)
def getCost(p: CUPartition): CUCost = getCUCost(p, partitions, allStages, others, cu){p => p.cstages}
while (remote.nonEmpty) {
dbgs(s"Computing partition ${partitions.length}")
// Bite off a chunk to maximize compute usage
current addTail remote.popHead(arch.comp)
var cost = getCost(current)
while (!(cost > arch) && remote.nonEmpty) {
//dbgs(s"Adding stages until exceeds cost")
current addTail remote.popHead()
cost = getCost(current)
}
while (cost > arch && current.nonEmpty) {
//dbgs(s"Removing stage")
//TODO: This splitting stratagy highly depends on the linear schedule of the stages.
//It's possible that different valid linear schedule can give a much better splitting
//result.
remote addHead current.popTail()
cost = getCost(current)
}
if (current.cstages.isEmpty) {
// Find first SRAM owner which can still be spliced
var errReport = s"Failed splitting in CU $cu"
errReport += s"\\nCompute stages: "
remote.cstages.foreach{stage => errReport += s"\\n $stage\\n" }
errReport += "Cost for last split option: "
current addTail remote.popHead()
current.cstages.foreach{stage => errReport += s"\\n $stage\\n"}
val cost = getCost(current)
errReport += s"Arch: \\n$arch\\n"
errReport += s"Cost: \\n$cost\\n"
throw new SplitException(errReport) with NoStackTrace
} else {
dbgs(s"Partition ${partitions.length}")
dbgs(getCost(current))
dbgs(s" Compute stages: ")
current.cstages.foreach{stage => dbgs(s" $stage") }
partitions += current
current = Partition.emptyCU(ctrl, false)
}
} // end while
val parent = if (partitions.length > 1) {
val exp = mappingOf(cu)
val parent = ComputeUnit(cu.name, StreamCU)
mappingOf(exp) = parent
parent.parent = cu.parent
parent.cchains ++= cu.cchains
parent.memMap ++= cu.memMap.filter { case (e, m) => usedMem(cu.cchains).contains(m) }
Some(parent)
}
else None
val cus = partitions.zipWithIndex.map{case (p,i) =>
scheduleCUPartition(orig = cu, p, i, parent)
}
cus.zip(partitions).zipWithIndex.foreach{case ((cu,p), i) =>
dbgs(s"Partition #$i: $cu")
val cost = getCost(p)
dbgs(s"Cost: ")
dbgs(cost)
dbgs(s"Util: ")
reportUtil(getUtil(cu, cus.filterNot(_ == cu)++others))
dbgl(s"Compute stages: ") {
cu.computeStages.foreach{stage => dbgs(s"$stage") }
}
}
parent.toList ++ cus.toList
}
def scheduleCUPartition(orig: CU, part: CUPartition, i: Int, parent: Option[CU]): CU = dbgblk(s"scheduleCUPartition(orig=$orig, part=$part, i=$i, parent=$parent)"){
val isUnit = orig.lanes == 1
val cu = ComputeUnit(orig.name+"_"+i, orig.style)
mappingOf(mappingOf(orig)) = cu
cu.parent = if (parent.isDefined) parent else orig.parent
cu.innerPar = orig.innerPar
cu.fringeGlobals ++= orig.fringeGlobals
val local = part.cstages
val remote = orig.allStages.toList diff part.allStages
val localIns = local.flatMap(_.inputMems).toSet ++ cu.cchains.flatMap(localInputs)
val localOuts = local.flatMap(_.outputMems).toSet
val readMems = localIns.collect{case MemLoad(mem) => mem }
orig.memMap.foreach{case (k,mem) => if (readMems contains mem) cu.memMap += k -> mem }
val remoteIns = remote.flatMap(_.inputMems).toSet
val remoteOuts = remote.flatMap(_.outputMems).toSet
val ctx = ComputeContext(cu)
def globalBus(reg: LocalComponent, isScalar:Option[Boolean]): GlobalBus = {
val bus = reg match {
case ScalarIn(bus) => bus
case VectorIn(bus) => bus
case ScalarOut(bus) => bus
case VectorOut(bus) => bus
case ControlIn(bus) => bus
case ControlOut(bus) => bus
case MemLoad(mem) if List(SRAMType, VectorFIFOType).contains(mem.tpe) =>
val bus = CUVector(mem.name+"_vdata", cu.innerPar)
bus
case MemLoad(mem) if List(ScalarFIFOType, ScalarBufferType).contains(mem.tpe) =>
val bus = CUScalar(mem.name+"_sdata")
bus
case WriteAddrWire(mem) =>
val bus = CUScalar(mem.name+"_waddr")
bus
case ReadAddrWire(mem) =>
val bus = CUScalar(mem.name+"_raddr")
bus
case _ =>
val bus = if (isScalar.getOrElse(isUnit)) CUScalar("bus_" + reg.id)
else CUVector("bus_" + reg.id, cu.innerPar)
bus
}
dbgs(s"globalBus: reg=$reg ${scala.runtime.ScalaRunTime._toString(reg.asInstanceOf[Product])}, bus=$bus")
bus
}
def portOut(reg: LocalComponent, isScalar:Option[Boolean] = None) = globalBus(reg, isScalar) match {
case bus:ControlBus => ControlOut(bus)
case bus:ScalarBus => ScalarOut(bus)
case bus:VectorBus => VectorOut(bus)
}
def portIn(reg: LocalComponent, isScalar:Option[Boolean] = None) = {
val bus = globalBus(reg, isScalar)
val fifo = allocateRetimingFIFO(reg, bus, cu)
MemLoad(fifo)
}
def rerefIn(reg: LocalComponent): LocalRef = {
val in = reg match {
case _:ConstReg[_] | _:CounterReg => reg
case _:ValidReg | _:ControlReg => reg
case _:ReduceMem[_] => if (localOuts.contains(reg)) reg else portIn(reg)
case MemLoad(mem) =>
assert(localIns.contains(reg), s"localIns=$localIns doesn't contains $reg")
assert(cu.mems.contains(mem), s"cu.mems=${cu.mems} doesn't contains $mem")
reg
case _ if !remoteOuts.contains(reg) | cu.regs.contains(reg) => reg
case _ if remoteOuts.contains(reg) & !cu.regs.contains(reg) => portIn(reg)
}
cu.regs += in
ctx.refIn(in)
}
def rerefOut(reg: LocalComponent): List[LocalRef] = {
val outs = reg match {
case _:ControlOut => List(reg)
case _:ScalarOut => List(reg)
case _:VectorOut => List(reg)
case _ =>
val local = if (localIns.contains(reg)) List(reg) else Nil
val global = if (remoteIns.contains(reg)) List(portOut(reg)) else Nil
local ++ global
}
cu.regs ++= outs
outs.map{out => ctx.refOut(out)}
}
// --- Reconnect remotely computed read addresses (after write stages, before compute)
/*cu.mems.foreach{ sram =>
val remoteAddr = remoteOuts.find{case ReadAddrWire(`sram`) => true; case _ => false}
val localAddr = localOuts.find{case ReadAddrWire(`sram`) => true; case _ => false}
if (remoteAddr.isDefined && localAddr.isEmpty) {
val reg = ReadAddrWire(sram)
val addrIn = portIn(reg, isUnit)
ctx.addStage(MapStage(PIRBypass, List(ctx.refIn(addrIn)), List(ctx.refOut(reg))))
cu.regs += reg
cu.regs += addrIn
}
}*/
// --- Reschedule compute stages
part.cstages.foreach{
case MapStage(op, ins, outs) =>
val inputs = ins.map{in => rerefIn(in.reg) }
val outputs = outs.flatMap{out => rerefOut(out.reg) }
ctx.addStage(MapStage(op, inputs, outputs))
case ReduceStage(op, init, in, acc, accumParent) =>
var input = rerefIn(in.reg)
if (!input.reg.isInstanceOf[ReduceMem[_]]) {
val redReg = ReduceReg()
val reduce = ctx.refOut(redReg)
cu.regs += redReg
ctx.addStage(MapStage(PIRBypass, List(input), List(reduce)))
input = ctx.refIn(redReg)
}
cu.regs += acc
val newAccumParent =
if (accumParent==orig) parent.getOrElse(cu) // Take StreamController of the splitted cu
// Or current CU if single partition
else accumParent // outer controller. Shouldn't be splitted
dbgs(s"accumParent==orig ${accumParent==orig}")
dbgs(s"accumParent=${accumParent} orig=${orig} parent=${parent.map(_.name)} cu=${cu.name}")
dbgs(s"newAccumParent=${newAccumParent.name}")
ctx.addStage(ReduceStage(op, init, input, acc, newAccumParent))
if (remoteIns.contains(acc)) {
val bus = portOut(acc, Some(true))
ctx.addStage(MapStage(PIRBypass, List(ctx.refIn(acc)), List(ctx.refOut(bus))))
}
}
// --- Add bypass stages for locally hosted, remotely read SRAMs
/*val remoteSRAMReads = remoteIns.collect{case MemLoad(sram) => sram}
val localBypasses = remoteSRAMReads intersect cu.mems
localBypasses.foreach{sram =>
val reg = MemLoad(sram)
val out = portOut(reg, isUnit)
if (!cu.computeStages.flatMap(_.outputMems).contains(out)) {
ctx.addStage(MapStage(PIRBypass, List(ctx.refIn(reg)), List(ctx.refOut(out))))
cu.regs += reg
cu.regs += out
}
}*/
// --- Reconnect split feedback paths
//val rescheduledOutputs = cu.computeStages.flatMap(_.outputMems).toSet
//TODO: readPort?
/*cu.mems.foreach{sram => sram.writePort match {
case Some(LocalVectorBus) =>
val dataReg = FeedbackDataReg(sram)
val addrReg = FeedbackAddrReg(sram)
// TODO: What is the correct thing to do here?
// TODO: Will the timing still be correct?
if (!rescheduledOutputs.contains(dataReg)) {
//sram.vector = Some(globalBus(dataReg, cu.isUnit))
val in = portIn(dataReg, cu.isUnit)
ctx.addStage(MapStage(PIRBypass, List(ctx.refIn(in)), List(ctx.refOut(dataReg))))
cu.regs += in
cu.regs += dataReg
}
if (!rescheduledOutputs.contains(addrReg)) {
val in = portIn(addrReg, cu.isUnit)
ctx.addStage(MapStage(PIRBypass, List(ctx.refIn(in)), List(ctx.refOut(addrReg))))
cu.regs += in
cu.regs += dataReg
}
case _ =>
}}*/
// --- TODO: Control logic
// --- Copy counters
parent.fold { // No split
cu.cchains ++= orig.cchains
} { parent => // split
val unitCtr = CUCounter(ConstReg(0), ConstReg(1), ConstReg(cu.innerPar), par=cu.innerPar)
cu.cchains += CChainInstance(s"${cu.name}_unit", Seq(unitCtr))
val f = copyIterators(cu, parent)
def tx(cc: CUCChain): CUCChain = f.getOrElse(cc, cc)
def swap_cchain_Reg(x: LocalComponent) = x match {
case CounterReg(cc,cIdx,iter) => CounterReg(tx(cc), cIdx, iter)
case ValidReg(cc,cIdx, valid) => ValidReg(tx(cc), cIdx, valid)
case _ => x
}
def swap_cchains_Ref(x: LocalRef) = x match {
case LocalRef(i, reg) => LocalRef(i, swap_cchain_Reg(reg))
}
cu.allStages.foreach{
case stage@MapStage(_,ins,_) => stage.ins = ins.map{in => swap_cchains_Ref(in) }
case _ =>
}
cu.mems.foreach{sram =>
//sram.readAddr = sram.readAddr.map{swap_cchain_Reg(_)}
//sram.writeAddr = sram.writeAddr.map{swap_cchain_Reg(_)}
sram.readPort.transform{ case (data, addr, top) => (data, addr.map(swap_cchain_Reg), top) }
sram.writePort.transform{ case (data, addr, top) => (data, addr.map(swap_cchain_Reg), top) }
}
}
cu
}
}
|
stanford-ppl/spatial-lang
|
spatial/core/src/spatial/codegen/pirgen/PIRSplitting.scala
|
Scala
|
mit
| 14,453
|
package mesosphere.marathon.core.launchqueue.impl
import akka.actor._
import akka.event.LoggingReceive
import mesosphere.marathon.core.base.Clock
import mesosphere.marathon.core.flow.OfferReviver
import mesosphere.marathon.core.launcher.{ TaskOp, TaskOpFactory }
import mesosphere.marathon.core.launchqueue.LaunchQueue.QueuedTaskInfo
import mesosphere.marathon.core.launchqueue.LaunchQueueConfig
import mesosphere.marathon.core.launchqueue.impl.AppTaskLauncherActor.RecheckIfBackOffUntilReached
import mesosphere.marathon.core.matcher.base
import mesosphere.marathon.core.matcher.base.OfferMatcher
import mesosphere.marathon.core.matcher.base.OfferMatcher.{ MatchedTaskOps, TaskOpWithSource }
import mesosphere.marathon.core.matcher.base.util.TaskOpSourceDelegate.TaskOpNotification
import mesosphere.marathon.core.matcher.base.util.{ ActorOfferMatcher, TaskOpSourceDelegate }
import mesosphere.marathon.core.matcher.manager.OfferMatcherManager
import mesosphere.marathon.core.task.bus.TaskChangeObservables.TaskChanged
import mesosphere.marathon.core.task.tracker.TaskTracker
import mesosphere.marathon.core.task.{ Task, TaskStateChange }
import mesosphere.marathon.state.{ AppDefinition, Timestamp }
import org.apache.mesos.{ Protos => Mesos }
import scala.concurrent.duration._
private[launchqueue] object AppTaskLauncherActor {
// scalastyle:off parameter.number
def props(
config: LaunchQueueConfig,
offerMatcherManager: OfferMatcherManager,
clock: Clock,
taskOpFactory: TaskOpFactory,
maybeOfferReviver: Option[OfferReviver],
taskTracker: TaskTracker,
rateLimiterActor: ActorRef)(
app: AppDefinition,
initialCount: Int): Props = {
Props(new AppTaskLauncherActor(
config,
offerMatcherManager,
clock, taskOpFactory,
maybeOfferReviver,
taskTracker, rateLimiterActor,
app, initialCount))
}
// scalastyle:on parameter.number
sealed trait Requests
/**
* Increase the task count of the receiver.
* The actor responds with a [[QueuedTaskInfo]] message.
*/
case class AddTasks(app: AppDefinition, count: Int) extends Requests
/**
* Get the current count.
* The actor responds with a [[QueuedTaskInfo]] message.
*/
case object GetCount extends Requests
/**
* Results in rechecking whether we may launch tasks.
*/
private case object RecheckIfBackOffUntilReached extends Requests
case object Stop extends Requests
private val TASK_OP_REJECTED_TIMEOUT_REASON: String =
"AppTaskLauncherActor: no accept received within timeout. " +
"You can reconfigure the timeout with --task_operation_notification_timeout."
}
/**
* Allows processing offers for starting tasks for the given app.
*/
// scalastyle:off parameter.number
private class AppTaskLauncherActor(
config: LaunchQueueConfig,
offerMatcherManager: OfferMatcherManager,
clock: Clock,
taskOpFactory: TaskOpFactory,
maybeOfferReviver: Option[OfferReviver],
taskTracker: TaskTracker,
rateLimiterActor: ActorRef,
private[this] var app: AppDefinition,
private[this] var tasksToLaunch: Int) extends Actor with ActorLogging with Stash {
// scalastyle:on parameter.number
private[this] var inFlightTaskOperations = Map.empty[Task.Id, Cancellable]
private[this] var recheckBackOff: Option[Cancellable] = None
private[this] var backOffUntil: Option[Timestamp] = None
/** tasks that are in flight and those in the tracker */
private[this] var tasksMap: Map[Task.Id, Task] = _
/** Decorator to use this actor as a [[base.OfferMatcher#TaskOpSource]] */
private[this] val myselfAsLaunchSource = TaskOpSourceDelegate(self)
override def preStart(): Unit = {
super.preStart()
log.info("Started appTaskLaunchActor for {} version {} with initial count {}",
app.id, app.version, tasksToLaunch)
tasksMap = taskTracker.tasksByAppSync.appTasksMap(app.id).taskMap
rateLimiterActor ! RateLimiterActor.GetDelay(app)
}
override def postStop(): Unit = {
OfferMatcherRegistration.unregister()
recheckBackOff.foreach(_.cancel())
if (inFlightTaskOperations.nonEmpty) {
log.warning("Actor shutdown but still some tasks in flight: {}", inFlightTaskOperations.keys.mkString(", "))
inFlightTaskOperations.values.foreach(_.cancel())
}
super.postStop()
log.info("Stopped appTaskLaunchActor for {} version {}", app.id, app.version)
}
override def receive: Receive = waitForInitialDelay
private[this] def waitForInitialDelay: Receive = LoggingReceive.withLabel("waitingForInitialDelay") {
case RateLimiterActor.DelayUpdate(delayApp, delayUntil) if delayApp == app =>
stash()
unstashAll()
context.become(active)
case msg @ RateLimiterActor.DelayUpdate(delayApp, delayUntil) if delayApp != app =>
log.warning("Received delay update for other app: {}", msg)
case message: Any => stash()
}
private[this] def active: Receive = LoggingReceive.withLabel("active") {
Seq(
receiveStop,
receiveDelayUpdate,
receiveTaskLaunchNotification,
receiveTaskUpdate,
receiveGetCurrentCount,
receiveAddCount,
receiveProcessOffers,
receiveUnknown
).reduce(_.orElse[Any, Unit](_))
}
private[this] def stopping: Receive = LoggingReceive.withLabel("stopping") {
Seq(
receiveStop,
receiveWaitingForInFlight,
receiveUnknown
).reduce(_.orElse[Any, Unit](_))
}
private[this] def receiveWaitingForInFlight: Receive = {
case notification: TaskOpNotification =>
receiveTaskLaunchNotification(notification)
waitForInFlightIfNecessary()
case AppTaskLauncherActor.Stop => // ignore, already stopping
case "waitingForInFlight" => sender() ! "waitingForInFlight" // for testing
}
private[this] def receiveUnknown: Receive = {
case msg: Any =>
// fail fast and do not let the sender time out
sender() ! Status.Failure(new IllegalStateException(s"Unhandled message: $msg"))
}
private[this] def receiveStop: Receive = {
case AppTaskLauncherActor.Stop =>
if (inFlightTaskOperations.nonEmpty) {
// try to stop gracefully but also schedule timeout
import context.dispatcher
log.info("schedule timeout for stopping in " + config.taskOpNotificationTimeout().milliseconds)
context.system.scheduler.scheduleOnce(config.taskOpNotificationTimeout().milliseconds, self, PoisonPill)
}
waitForInFlightIfNecessary()
}
private[this] def waitForInFlightIfNecessary(): Unit = {
if (inFlightTaskOperations.isEmpty) {
context.stop(self)
}
else {
val taskIds = inFlightTaskOperations.keys.take(3).mkString(", ")
log.info(
s"Stopping but still waiting for ${inFlightTaskOperations.size} in-flight messages, " +
s"first three task ids: $taskIds"
)
context.become(stopping)
}
}
/**
* Receive rate limiter updates.
*/
private[this] def receiveDelayUpdate: Receive = {
case RateLimiterActor.DelayUpdate(delayApp, delayUntil) if delayApp == app =>
if (backOffUntil != Some(delayUntil)) {
backOffUntil = Some(delayUntil)
recheckBackOff.foreach(_.cancel())
recheckBackOff = None
val now: Timestamp = clock.now()
if (backOffUntil.exists(_ > now)) {
import context.dispatcher
recheckBackOff = Some(
context.system.scheduler.scheduleOnce(now until delayUntil, self, RecheckIfBackOffUntilReached)
)
}
OfferMatcherRegistration.manageOfferMatcherStatus()
}
log.debug("After delay update {}", status)
case msg @ RateLimiterActor.DelayUpdate(delayApp, delayUntil) if delayApp != app =>
log.warning("Received delay update for other app: {}", msg)
case RecheckIfBackOffUntilReached => OfferMatcherRegistration.manageOfferMatcherStatus()
}
private[this] def receiveTaskLaunchNotification: Receive = {
case TaskOpSourceDelegate.TaskOpRejected(op, reason) if inFlight(op) =>
removeTask(op.taskId)
log.info("Task op '{}' for {} was REJECTED, reason '{}', rescheduling. {}",
op.getClass.getSimpleName, op.taskId, reason, status)
op match {
// only increment for launch ops, not for reservations:
case _: TaskOp.Launch => tasksToLaunch += 1
case _ => ()
}
OfferMatcherRegistration.manageOfferMatcherStatus()
case TaskOpSourceDelegate.TaskOpRejected(op, AppTaskLauncherActor.TASK_OP_REJECTED_TIMEOUT_REASON) =>
// This is a message that we scheduled in this actor.
// When we receive a launch confirmation or rejection, we cancel this timer but
// there is still a race and we might send ourselves the message nevertheless, so we just
// ignore it here.
log.debug("Unnecessary timeout message. Ignoring task launch rejected for task id '{}'.", op.taskId)
case TaskOpSourceDelegate.TaskOpRejected(op, reason) =>
log.warning("Unexpected task op '{}' rejected for {}.", op.getClass.getSimpleName, op.taskId)
case TaskOpSourceDelegate.TaskOpAccepted(op) =>
inFlightTaskOperations -= op.taskId
log.info("Task op '{}' for {} was accepted. {}", op.getClass.getSimpleName, op.taskId, status)
}
private[this] def receiveTaskUpdate: Receive = {
case TaskChanged(stateOp, stateChange) =>
stateChange match {
case TaskStateChange.Update(newState, _) =>
log.info("receiveTaskUpdate: updating status of {}", newState.taskId)
tasksMap += newState.taskId -> newState
case TaskStateChange.Expunge(task) =>
log.info("receiveTaskUpdate: {} finished", task.taskId)
removeTask(task.taskId)
// A) If the app has constraints, we need to reconsider offers that
// we already rejected. E.g. when a host:unique constraint prevented
// us to launch tasks on a particular node before, we need to reconsider offers
// of that node after a task on that node has died.
//
// B) If a reservation timed out, already rejected offers might become eligible for creating new reservations.
if (app.constraints.nonEmpty || (app.isResident && shouldLaunchTasks)) {
maybeOfferReviver.foreach(_.reviveOffers())
}
case _ =>
log.info("receiveTaskUpdate: ignoring stateChange {}", stateChange)
}
replyWithQueuedTaskCount()
}
private[this] def removeTask(taskId: Task.Id): Unit = {
inFlightTaskOperations.get(taskId).foreach(_.cancel())
inFlightTaskOperations -= taskId
tasksMap -= taskId
}
private[this] def receiveGetCurrentCount: Receive = {
case AppTaskLauncherActor.GetCount =>
replyWithQueuedTaskCount()
}
private[this] def receiveAddCount: Receive = {
case AppTaskLauncherActor.AddTasks(newApp, addCount) =>
val configChange = app.isUpgrade(newApp)
if (configChange || app.needsRestart(newApp) || app.isOnlyScaleChange(newApp)) {
app = newApp
tasksToLaunch = addCount
if (configChange) {
log.info(
"getting new app definition config for '{}', version {} with {} initial tasks",
app.id, app.version, addCount
)
suspendMatchingUntilWeGetBackoffDelayUpdate()
}
else {
log.info(
"scaling change for '{}', version {} with {} initial tasks",
app.id, app.version, addCount
)
}
}
else {
tasksToLaunch += addCount
}
OfferMatcherRegistration.manageOfferMatcherStatus()
replyWithQueuedTaskCount()
}
private[this] def suspendMatchingUntilWeGetBackoffDelayUpdate(): Unit = {
// signal no interest in new offers until we get the back off delay.
// this makes sure that we see unused offers again that we rejected for the old configuration.
OfferMatcherRegistration.unregister()
// get new back off delay, don't do anything until we get that.
backOffUntil = None
rateLimiterActor ! RateLimiterActor.GetDelay(app)
context.become(waitForInitialDelay)
}
private[this] def replyWithQueuedTaskCount(): Unit = {
val tasksLaunched = tasksMap.values.count(_.launched.isDefined)
val taskLaunchesInFlight = inFlightTaskOperations.keys
.count(taskId => tasksMap.get(taskId).exists(_.launched.isDefined))
sender() ! QueuedTaskInfo(
app,
inProgress = tasksToLaunch > 0 || inFlightTaskOperations.nonEmpty,
tasksLeftToLaunch = tasksToLaunch,
finalTaskCount = tasksToLaunch + taskLaunchesInFlight + tasksLaunched,
backOffUntil.getOrElse(clock.now())
)
}
private[this] def receiveProcessOffers: Receive = {
case ActorOfferMatcher.MatchOffer(deadline, offer) if clock.now() >= deadline || !shouldLaunchTasks =>
val deadlineReached = clock.now() >= deadline
log.debug("ignoring offer, offer deadline {}reached. {}", if (deadlineReached) "" else "NOT ", status)
sender ! MatchedTaskOps(offer.getId, Seq.empty)
case ActorOfferMatcher.MatchOffer(deadline, offer) =>
val matchRequest = TaskOpFactory.Request(app, offer, tasksMap, tasksToLaunch)
val taskOp: Option[TaskOp] = taskOpFactory.buildTaskOp(matchRequest)
taskOp match {
case Some(op) => handleTaskOp(op, offer)
case None => sender() ! MatchedTaskOps(offer.getId, Seq.empty)
}
}
private[this] def handleTaskOp(taskOp: TaskOp, offer: Mesos.Offer): Unit = {
def updateActorState(): Unit = {
val taskId = taskOp.taskId
taskOp match {
// only decrement for launched tasks, not for reservations:
case _: TaskOp.Launch => tasksToLaunch -= 1
case _ => ()
}
// We will receive the updated task once it's been persisted. Before that,
// we can only store the possible state, as we don't have the updated task
// yet.
taskOp.stateOp.possibleNewState.foreach { newState =>
tasksMap += taskId -> newState
scheduleTaskOpTimeout(taskOp)
}
OfferMatcherRegistration.manageOfferMatcherStatus()
}
log.info("Request {} for task '{}', version '{}'. {}",
taskOp.getClass.getSimpleName, taskOp.taskId.idString, app.version, status)
updateActorState()
sender() ! MatchedTaskOps(offer.getId, Seq(TaskOpWithSource(myselfAsLaunchSource, taskOp)))
}
private[this] def scheduleTaskOpTimeout(taskOp: TaskOp): Unit = {
val reject = TaskOpSourceDelegate.TaskOpRejected(
taskOp, AppTaskLauncherActor.TASK_OP_REJECTED_TIMEOUT_REASON
)
val cancellable = scheduleTaskOperationTimeout(context, reject)
inFlightTaskOperations += taskOp.taskId -> cancellable
}
private[this] def inFlight(task: TaskOp): Boolean = inFlightTaskOperations.contains(task.taskId)
protected def scheduleTaskOperationTimeout(
context: ActorContext,
message: TaskOpSourceDelegate.TaskOpRejected): Cancellable =
{
import context.dispatcher
context.system.scheduler.scheduleOnce(config.taskOpNotificationTimeout().milliseconds, self, message)
}
private[this] def backoffActive: Boolean = backOffUntil.forall(_ > clock.now())
private[this] def shouldLaunchTasks: Boolean = tasksToLaunch > 0 && !backoffActive
private[this] def status: String = {
val backoffStr = backOffUntil match {
case Some(until) if until > clock.now() => s"currently waiting for backoff($until)"
case _ => "not backing off"
}
val inFlight = inFlightTaskOperations.size
val tasksLaunchedOrRunning = tasksMap.values.count(_.launched.isDefined) - inFlight
val instanceCountDelta = tasksMap.size + tasksToLaunch - app.instances
val matchInstanceStr = if (instanceCountDelta == 0) "" else s"instance count delta $instanceCountDelta."
s"$tasksToLaunch tasksToLaunch, $inFlight in flight, " +
s"$tasksLaunchedOrRunning confirmed. $matchInstanceStr $backoffStr"
}
/** Manage registering this actor as offer matcher. Only register it if tasksToLaunch > 0. */
private[this] object OfferMatcherRegistration {
private[this] val myselfAsOfferMatcher: OfferMatcher = {
//set the precedence only, if this app is resident
new ActorOfferMatcher(clock, self, app.residency.map(_ => app.id))
}
private[this] var registeredAsMatcher = false
/** Register/unregister as necessary */
def manageOfferMatcherStatus(): Unit = {
val shouldBeRegistered = shouldLaunchTasks
if (shouldBeRegistered && !registeredAsMatcher) {
log.debug("Registering for {}, {}.", app.id, app.version)
offerMatcherManager.addSubscription(myselfAsOfferMatcher)(context.dispatcher)
registeredAsMatcher = true
}
else if (!shouldBeRegistered && registeredAsMatcher) {
if (tasksToLaunch > 0) {
log.info("Backing off due to task failures. Stop receiving offers for {}, {}", app.id, app.version)
}
else {
log.info("No tasks left to launch. Stop receiving offers for {}, {}", app.id, app.version)
}
offerMatcherManager.removeSubscription(myselfAsOfferMatcher)(context.dispatcher)
registeredAsMatcher = false
}
}
def unregister(): Unit = {
if (registeredAsMatcher) {
log.info("Deregister as matcher.")
offerMatcherManager.removeSubscription(myselfAsOfferMatcher)(context.dispatcher)
registeredAsMatcher = false
}
}
}
}
|
titosand/marathon
|
src/main/scala/mesosphere/marathon/core/launchqueue/impl/AppTaskLauncherActor.scala
|
Scala
|
apache-2.0
| 17,604
|
package com.sksamuel.scapegoat.inspections.nulls
import com.sksamuel.scapegoat._
/** @author Stephen Samuel */
class NullParameter extends Inspection {
def inspector(context: InspectionContext): Inspector = new Inspector(context) {
override def postTyperTraverser = Some apply new context.Traverser {
import context.global._
def containsNull(trees: List[Tree]) = trees exists {
case Literal(Constant(null)) => true
case _ => false
}
override def inspect(tree: Tree): Unit = {
tree match {
case Apply(_, _) if tree.tpe.toString == "scala.xml.Elem" =>
case Apply(_, args) =>
if (containsNull(args))
warn(tree)
case DefDef(mods, _, _, _, _, _) if mods.hasFlag(Flag.SYNTHETIC) =>
case _ => continue(tree)
}
}
private def warn(tree: Tree) {
context.warn("Null parameter",
tree.pos,
Levels.Warning,
"Null is used as a method parameter: " + tree.toString().take(300),
NullParameter.this)
}
}
}
}
|
pwwpche/scalac-scapegoat-plugin
|
src/main/scala/com/sksamuel/scapegoat/inspections/nulls/NullParameter.scala
|
Scala
|
apache-2.0
| 1,116
|
/* Copyright 2015 Mario Pastorelli (pastorelli.mario@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package purecsv.unsafe.converter
import java.util.UUID
import purecsv.unsafe.converter.defaults.rawfields._
import purecsv.unsafe.converter.defaults.string._
import purecsv.util.serializeAndDeserialize
import org.scalatest.{FunSuite, Matchers}
import shapeless.{::, Generic, HNil}
case class Event(ts: Long, msg: String)
class ConverterSuite extends FunSuite with Matchers {
test("conversion String <-> Boolean works") {
StringConverter[Boolean].to(true) should be ("true")
StringConverter[Boolean].from("false") should be (false)
StringConverter[Boolean].from("1") should be (true)
StringConverter[Boolean].from("TRUE") should be (true)
}
test("conversion String <-> UUID works") {
val uuid = UUID.randomUUID()
StringConverter[UUID].to(uuid) should be (uuid.toString)
StringConverter[UUID].from(uuid.toString) should be (uuid)
StringConverter[UUID].from(uuid.toString.toLowerCase) should be (uuid)
StringConverter[UUID].from(uuid.toString.toUpperCase) should be (uuid)
}
test("conversion HNil <-> String works") {
RawFieldsConverter[HNil].to(HNil) should contain theSameElementsInOrderAs (Seq.empty)
RawFieldsConverter[HNil].from(Seq.empty) should be (HNil)
}
test("conversion HList <-> String works") {
val conv = RawFieldsConverter[String :: Int :: HNil]
conv.to("test" :: 1 :: HNil) should contain theSameElementsInOrderAs (Seq("\"test\"","1"))
conv.from(Seq("foo","2")) should be ("foo" :: 2 :: HNil)
}
test("conversion case class <-> String works") {
val conv = RawFieldsConverter[Event]
conv.to(Event(1,"foobar")) should contain theSameElementsInOrderAs(Seq("1","\"foobar\""))
conv.from(Seq("2","barfoo")) should be (Event(2,"barfoo"))
}
class Event2(val ts: Long, var msg: String) {
override def equals(o: Any): Boolean = o match {
case other:Event2 => (this.ts == other.ts && this.msg == other.msg)
case _ => false
}
override def toString: String = s"Event($ts, $msg)"
}
implicit val fooGeneric = new Generic[Event2] {
override type Repr = Long :: String :: HNil
override def from(r: Repr): Event2 = {
val ts :: msg :: HNil = r
new Event2(ts, msg)
}
override def to(t: Event2): Repr = t.ts :: t.msg :: HNil
}
test("conversion class with custom Generic <-> String works") {
val conv = RawFieldsConverter[Event2]
conv.to(new Event2(1,"foo")) should contain theSameElementsInOrderAs(Seq("1","\"foo\""))
conv.from(Seq("2","bar")) should be (new Event2(2,"bar"))
// Strings are quoted
val event = new Event2(1,"foo")
val expectedEvent = new Event2(1, "\"foo\"")
conv.from(conv.to(event)) should be (expectedEvent)
}
test("serializing a RawFieldsConverter should work") {
val conv = RawFieldsConverter[Event]
val convDeserialized = serializeAndDeserialize(conv)
convDeserialized.to(Event(1,"foobar")) should contain theSameElementsInOrderAs(Seq("1","\"foobar\""))
convDeserialized.from(Seq("2","barfoo")) should be (Event(2,"barfoo"))
}
}
|
melrief/PureCSV
|
shared/src/test/scala/purecsv/unsafe/converter/ConverterSuite.scala
|
Scala
|
apache-2.0
| 3,702
|
package lsh
import edu.berkeley.cs.amplab.spark.indexedrdd.IndexedRDD
import edu.berkeley.cs.amplab.spark.indexedrdd.IndexedRDD._
import org.apache.hadoop.fs.Path
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.util.{Loader, Saveable}
import org.apache.spark.rdd.RDD
import org.apache.spark.{Logging, SparkContext}
import org.json4s.JsonDSL._
import org.json4s._
import org.json4s.jackson.JsonMethods._
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.util.Random
/**
*
* @param numHashFunctions number of hash functions
* @param numHashTables number of hash tables
* @param dimension dimension of input data
* @param binLength length of bins
*/
class LSHModel(val numHashFunctions: Int, val numHashTables: Int, val dimension: Int, val binLength: Double)
extends Serializable with Saveable {
val bits = mutable.BitSet.empty
/** only compute k/2 * m functions to reduce time complexity of hash function computation */
val m = ((1 + Math.sqrt(1 + numHashTables << 3)) / 2).toInt
val halfK = numHashFunctions >>> 1
println(s"m:$m")
private val _hashFunctions = ListBuffer[PStableHasher]()
// private val _hashFunctions = ListBuffer[CosineHasher]()
// for (i <- 0 to numHashFunctions * numHashTables - 1)
for (i <- 0 until halfK * m)
_hashFunctions += PStableHasher(dimension, binLength)
// _hashFunctions += CosineHasher(dimension)
final var hashFunctions: List[(PStableHasher, Long)] = _hashFunctions.toList.zipWithIndex.map(x => (x._1, x._2.toLong))
// final var hashFunctions: List[(CosineHasher, Long)] = _hashFunctions.toList.zipWithIndex.map(x => (x._1, x._2.toLong))
/** hashTables ((tableId, hashKey), vectorId) */
// var hashTables: IndexedRDD[String, List[Long]] = null
var hashTables: RDD[(String, List[Long])] = null
/** computes hash value for a vector in each hashTable. Array(tableID, binID) */
def hashValue(data: Vector): Array[String] = {
val values = hashFunctions.map(f => (f._2 % m, f._1.hash(data)))
.groupBy(_._1)
.map(a => (a._1, a._2.map(_._2).mkString(""))).toList
var result = ListBuffer[String]()
for (i <- values.indices) {
for (j <- (i + 1) until values.length) {
result += (values(i)._2 + values(j)._2)
}
}
result.toArray.zipWithIndex.map(x => x._2 + x._1)
// result.toList.zipWithIndex.map(x => (x._2 + x._1).hashCode.toLong)
// result.toList.zipWithIndex.map(x => Hasher.FNVHash1(x._2 + x._1))
}
/** returns candidates for a given vector with duplicates
*
* @param vec query point(vector)
* @return candidate points of vec
* */
def getCandidates0(vec: Vector): Array[Long] = {
val buckets = hashValue(vec)
val candidates = hashTables.asInstanceOf[IndexedRDD[String, List[Long]]].multiget(buckets).flatMap(x => x._2).toArray.distinct
candidates
}
def getCandidates1(vec: Vector): Array[Long] = {
val buckets = hashValue(vec)
val candidates = hashTables.filter(x => buckets.contains(x._1)).flatMap(x => x._2).distinct().collect()
candidates
}
override def save(sc: SparkContext, path: String): Unit = {
LSHModel.SaveLoadV0_0_1.save(sc, this, path)
}
override protected def formatVersion: String = "0.1"
}
object LSHModel extends Loader[LSHModel] with Logging {
def load(sc: SparkContext, path: String): LSHModel = {
LSHModel.SaveLoadV0_0_1.load(sc, path)
}
private[lsh] object SaveLoadV0_0_1 {
// private val thisFormatVersion = "0.0.1"
// private val thisClassName = this.getClass.getName()
def thisFormatVersion: String = "0.1"
def thisClassName: String = this.getClass.getName
def save(sc: SparkContext, model: LSHModel, path: String): Unit = {
// save metadata in json format
val metadata =
compact(render(
("class" -> thisClassName)
~ ("version" -> thisFormatVersion)
~ ("numHashFunctions" -> model.numHashFunctions)
~ ("numHashTables" -> model.numHashTables)
~ ("dimension" -> model.dimension)
~ ("binLength" -> model.binLength)))
//save metadata info
sc.parallelize(Seq(metadata), 1).saveAsTextFile(Loader.metadataPath(path))
//save hash functions as (functionIndex, RandomVectorA, RandomNumberB, binLength, dimension)
sc.parallelize(model.hashFunctions
.map(f => (f._2, f._1.a.toArray.mkString(","), f._1.b))) //(table#, a, b)
// .map(f => (f._2, f._1.r.mkString(",")))) //(table#, a, b)
.map(_.productIterator.mkString("\t"))
.saveAsTextFile(Loader.hasherPath(path))
//save data as (hashTableId#+hashValue, vectorId)
model.hashTables
.map(x => x._1 + "\t" + x._2.mkString(","))
// .map(x => x._1 + "\t" + x._2.productIterator.mkString("\t"))
// .map(_.productIterator.mkString("\t"))
.saveAsTextFile(Loader.dataPath(path))
}
def load(sc: SparkContext, path: String): LSHModel = {
implicit val formats = DefaultFormats
val (className, formatVersion, numHashFunctions, numHashTables, dimension, binLength, metadata) = Loader.loadMetadata(sc, path)
assert(className == thisClassName)
assert(formatVersion == thisFormatVersion)
assert(numHashTables != 0, s"Loaded hashTables are empty")
assert(numHashFunctions != 0, s"Loaded hashFunctions are empty")
assert(dimension != 0, s"Loaded binLength is 0")
assert(binLength != 0, s"Loaded binLength is 0")
//load hashTables
// val hashTables_ = IndexedRDD(sc.textFile(Loader.dataPath(path), 256)
// .map(x => x.split("\t"))
//// .map(x => (x(0).toLong, x(1).split(",").map(a => (x(0).toLong, a.toLong))))
//// .flatMap(_._2))
// .map(x => (x(0), x(1).split(",").map(_.toLong).toList)))
val hashTables_ = sc.textFile(Loader.dataPath(path), 128)
.map(x => x.split("\t"))
// .map(x => (x(0).toLong, x(1).split(",").map(a => (x(0).toLong, a.toLong))))
// .flatMap(_._2))
.map(x => (x(0), x(1).split(",").map(_.toLong).toList))
//load hashFunctions
val hashFunctions_ = sc.textFile(Loader.hasherPath(path))
.map(a => a.split("\t"))
.map { x =>
val functionIndex = x(0).toLong
val a = Vectors.dense(x(1).split(",").map(y => y.toDouble))
val b = x(2).toDouble
// val r = x(1).split(",").map(_.toDouble)
val hasher = new PStableHasher(a, b, binLength)
// val hasher = new CosineHasher(r)
(hasher, functionIndex)
}.collect()
// compute parameters for LSHModel according to hashTables and hashFunctions
// val numHashTables: Int = hashTables.map(x => x._1._1).distinct().count().toInt
// val numHashTables: Int = hashTables.map(x => x._1).distinct().count().toInt
// val numHashFunctions: Int = hashFunctions.length / numHashTables
// val dimensions = hashFunctions.head._1.a.size
// val binLength = hashFunctions.head._1.w
//Validate loaded data
//check size of data
//check hashValue size. Should be equal to numHashFunc
// val hashKeyLength = hashTables.collect().head._1._2.length
// val hashKey1 = hashTables.collect().head._1._2
// println(s"numhashFunctions: ${numHashFunctions}, hahsKey: ${hashKey1} hashKeyLength: ${hashKeyLength}")
// assert(hashTables.map(x => x._1._2).filter(x => x.size != numHashFunctions).collect().size == 0,
// s"hashValues in data does not match with hash functions")
//create model
val model = new LSHModel(numHashFunctions, numHashTables, dimension, binLength)
model.hashFunctions = hashFunctions_.toList
model.hashTables = hashTables_
model
}
}
/** transform an array to a sparse vector
* @deprecated
* */
def ArrayToSparseVector(arr: Array[Double]): Vector = {
val size = arr.length
val arrWithIndex = arr.zipWithIndex
val index: Array[Int] = arrWithIndex.filter(_._1 != 0).unzip._2.toArray
val number: Array[Double] = arrWithIndex.filter(_._1 != 0).unzip._1.toArray
Vectors.sparse(size, index, number)
}
}
/** Helper functions for save/load data from mllib package.
* TODO: Remove and use Loader functions from mllib. */
private[lsh] object Loader {
/** Returns URI for path/data using the Hadoop filesystem */
def dataPath(path: String): String = new Path(path, "hashTables").toUri.toString
/** Returns URI for path/metadata using the Hadoop filesystem */
def metadataPath(path: String): String = new Path(path, "metadata").toUri.toString
/** Returns URI for path/metadata using the Hadoop filesystem */
def hasherPath(path: String): String = new Path(path, "hashFunctions").toUri.toString
/**
* Load metadata from the given path.
* @return (class name, version, metadata)
*/
def loadMetadata(sc: SparkContext, path: String): (String, String, Int, Int, Int, Double, JValue) = {
implicit val formats = DefaultFormats
val metadata = parse(sc.textFile(metadataPath(path)).first()) //parse json to scala object, here is a path string?
val clazz = (metadata \ "class").extract[String]
val version = (metadata \ "version").extract[String]
val numHashFunctions = (metadata \ "numHashFunctions").extract[Int]
val numHashTables = (metadata \ "numHashTables").extract[Int]
val dimension = (metadata \ "dimension").extract[Int]
val binLength = (metadata \ "binLength").extract[Double]
(clazz, version, numHashFunctions, numHashTables, dimension, binLength, metadata)
}
}
|
KevinZwx/SES-LSH
|
main/scala/lsh/LSHModel.scala
|
Scala
|
gpl-2.0
| 9,719
|
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.sync
import com.waz.api.IConversation.{Access, AccessRole}
import com.waz.api.NetworkMode
import com.waz.content.{UserPreferences, UsersStorage}
import com.waz.content.UserPreferences.{ShouldSyncConversations, ShouldSyncInitial}
import com.waz.log.BasicLogging.LogTag.DerivedLogTag
import com.waz.model.UserData.ConnectionStatus
import com.waz.model.otr.ClientId
import com.waz.model.sync.SyncJob.Priority
import com.waz.model.sync._
import com.waz.model.{AccentColor, Availability, _}
import com.waz.service._
import com.waz.service.assets2.UploadAssetStatus
import com.waz.sync.SyncResult.Failure
import com.waz.threading.Threading
import org.threeten.bp.Instant
import com.waz.log.LogSE._
import scala.concurrent.Future
import scala.concurrent.duration._
trait SyncServiceHandle {
def syncSearchQuery(query: SearchQuery): Future[SyncId]
def exactMatchHandle(handle: Handle): Future[SyncId]
def syncUsers(ids: Set[UserId]): Future[SyncId]
def syncSelfUser(): Future[SyncId]
def deleteAccount(): Future[SyncId]
def syncConversations(ids: Set[ConvId] = Set.empty, dependsOn: Option[SyncId] = None): Future[SyncId]
def syncConvLink(id: ConvId): Future[SyncId]
def syncTeam(dependsOn: Option[SyncId] = None): Future[SyncId]
def syncTeamMember(id: UserId): Future[SyncId]
def syncConnections(dependsOn: Option[SyncId] = None): Future[SyncId]
def syncRichMedia(id: MessageId, priority: Int = Priority.MinPriority): Future[SyncId]
def postAddBot(cId: ConvId, pId: ProviderId, iId: IntegrationId): Future[SyncId]
def postRemoveBot(cId: ConvId, botId: UserId): Future[SyncId]
def postSelfUser(info: UserInfo): Future[SyncId]
def postSelfPicture(picture: UploadAssetId): Future[SyncId]
def postSelfName(name: Name): Future[SyncId]
def postSelfAccentColor(color: AccentColor): Future[SyncId]
def postAvailability(status: Availability): Future[SyncId]
def postMessage(id: MessageId, conv: ConvId, editTime: RemoteInstant): Future[SyncId]
def postDeleted(conv: ConvId, msg: MessageId): Future[SyncId]
def postRecalled(conv: ConvId, currentMsgId: MessageId, recalledMsgId: MessageId): Future[SyncId]
def postAssetStatus(id: MessageId, conv: ConvId, exp: Option[FiniteDuration], status: UploadAssetStatus): Future[SyncId]
def postLiking(id: ConvId, liking: Liking): Future[SyncId]
def postConnection(user: UserId, name: Name, message: String): Future[SyncId]
def postConnectionStatus(user: UserId, status: ConnectionStatus): Future[SyncId]
def postReceiptMode(id: ConvId, receiptMode: Int): Future[SyncId]
def postConversationName(id: ConvId, name: Name): Future[SyncId]
def postConversationMemberJoin(id: ConvId, members: Seq[UserId]): Future[SyncId]
def postConversationMemberLeave(id: ConvId, member: UserId): Future[SyncId]
def postConversationState(id: ConvId, state: ConversationState): Future[SyncId]
def postConversation(id: ConvId, users: Set[UserId], name: Option[Name], team: Option[TeamId], access: Set[Access], accessRole: AccessRole, receiptMode: Option[Int]): Future[SyncId]
def postLastRead(id: ConvId, time: RemoteInstant): Future[SyncId]
def postCleared(id: ConvId, time: RemoteInstant): Future[SyncId]
def postAddressBook(ab: AddressBook): Future[SyncId]
def postTypingState(id: ConvId, typing: Boolean): Future[SyncId]
def postOpenGraphData(conv: ConvId, msg: MessageId, editTime: RemoteInstant): Future[SyncId]
def postReceipt(conv: ConvId, messages: Seq[MessageId], user: UserId, tpe: ReceiptType): Future[SyncId]
def postProperty(key: PropertyKey, value: Boolean): Future[SyncId]
def postProperty(key: PropertyKey, value: Int): Future[SyncId]
def postProperty(key: PropertyKey, value: String): Future[SyncId]
def registerPush(token: PushToken): Future[SyncId]
def deletePushToken(token: PushToken): Future[SyncId]
def syncSelfClients(): Future[SyncId]
def syncSelfPermissions(): Future[SyncId]
def postClientLabel(id: ClientId, label: String): Future[SyncId]
def syncClients(user: UserId): Future[SyncId]
def syncClientsLocation(): Future[SyncId]
def syncProperties(): Future[SyncId]
def syncPreKeys(user: UserId, clients: Set[ClientId]): Future[SyncId]
def postSessionReset(conv: ConvId, user: UserId, client: ClientId): Future[SyncId]
def performFullSync(): Future[Unit]
}
class AndroidSyncServiceHandle(account: UserId,
service: SyncRequestService,
timeouts: Timeouts,
userPreferences: UserPreferences,
usersStorage: UsersStorage) extends SyncServiceHandle with DerivedLogTag {
import Threading.Implicits.Background
import com.waz.model.sync.SyncRequest._
val shouldSyncAll = userPreferences(ShouldSyncInitial)
val shouldSyncConversations = userPreferences(ShouldSyncConversations)
for {
all <- shouldSyncAll()
convs <- shouldSyncConversations()
_ <-
if (all) performFullSync()
else if (convs) syncConversations()
else Future.successful({})
_ <- shouldSyncAll := false
_ <- shouldSyncConversations := false
} yield {}
private def addRequest(req: SyncRequest, priority: Int = Priority.Normal, dependsOn: Seq[SyncId] = Nil, forceRetry: Boolean = false, delay: FiniteDuration = Duration.Zero): Future[SyncId] =
service.addRequest(account, req, priority, dependsOn, forceRetry, delay)
def syncSearchQuery(query: SearchQuery) = addRequest(SyncSearchQuery(query), priority = Priority.High)
def syncUsers(ids: Set[UserId]) = addRequest(SyncUser(ids))
def exactMatchHandle(handle: Handle) = addRequest(ExactMatchHandle(handle), priority = Priority.High)
def syncSelfUser() = addRequest(SyncSelf, priority = Priority.High)
def deleteAccount() = addRequest(DeleteAccount)
def syncConversations(ids: Set[ConvId], dependsOn: Option[SyncId]) =
if (ids.nonEmpty) addRequest(SyncConversation(ids), priority = Priority.Normal, dependsOn = dependsOn.toSeq)
else addRequest(SyncConversations, priority = Priority.High, dependsOn = dependsOn.toSeq)
def syncConvLink(id: ConvId) = addRequest(SyncConvLink(id))
def syncTeam(dependsOn: Option[SyncId] = None): Future[SyncId] = addRequest(SyncTeam, priority = Priority.High, dependsOn = dependsOn.toSeq)
def syncTeamMember(id: UserId): Future[SyncId] = addRequest(SyncTeamMember(id))
def syncConnections(dependsOn: Option[SyncId]) = addRequest(SyncConnections, dependsOn = dependsOn.toSeq)
def syncRichMedia(id: MessageId, priority: Int = Priority.MinPriority) = addRequest(SyncRichMedia(id), priority = priority)
def postSelfUser(info: UserInfo) = addRequest(PostSelf(info))
def postSelfPicture(picture: UploadAssetId) = addRequest(PostSelfPicture(picture))
def postSelfName(name: Name) = addRequest(PostSelfName(name))
def postSelfAccentColor(color: AccentColor) = addRequest(PostSelfAccentColor(color))
def postAvailability(status: Availability) = addRequest(PostAvailability(status))
def postMessage(id: MessageId, conv: ConvId, time: RemoteInstant) = addRequest(PostMessage(conv, id, time), forceRetry = true)
def postDeleted(conv: ConvId, msg: MessageId) = addRequest(PostDeleted(conv, msg))
def postRecalled(conv: ConvId, msg: MessageId, recalled: MessageId) = addRequest(PostRecalled(conv, msg, recalled))
def postAssetStatus(id: MessageId, conv: ConvId, exp: Option[FiniteDuration], status: UploadAssetStatus) = addRequest(PostAssetStatus(conv, id, exp, status))
def postAddressBook(ab: AddressBook) = addRequest(PostAddressBook(ab))
def postConnection(user: UserId, name: Name, message: String) = addRequest(PostConnection(user, name, message))
def postConnectionStatus(user: UserId, status: ConnectionStatus) = addRequest(PostConnectionStatus(user, Some(status)))
def postTypingState(conv: ConvId, typing: Boolean) = addRequest(PostTypingState(conv, typing))
def postConversationName(id: ConvId, name: Name) = addRequest(PostConvName(id, name))
def postConversationState(id: ConvId, state: ConversationState) = addRequest(PostConvState(id, state))
def postConversationMemberJoin(id: ConvId, members: Seq[UserId]) = addRequest(PostConvJoin(id, members.toSet))
def postConversationMemberLeave(id: ConvId, member: UserId) = addRequest(PostConvLeave(id, member))
def postConversation(id: ConvId, users: Set[UserId], name: Option[Name], team: Option[TeamId], access: Set[Access], accessRole: AccessRole, receiptMode: Option[Int]): Future[SyncId]
= addRequest(PostConv(id, users, name, team, access, accessRole, receiptMode))
def postReceiptMode(id: ConvId, receiptMode: Int): Future[SyncId] = addRequest(PostConvReceiptMode(id, receiptMode))
def postLiking(id: ConvId, liking: Liking): Future[SyncId] = addRequest(PostLiking(id, liking))
def postLastRead(id: ConvId, time: RemoteInstant) = addRequest(PostLastRead(id, time), priority = Priority.Low, delay = timeouts.messages.lastReadPostDelay)
def postCleared(id: ConvId, time: RemoteInstant) = addRequest(PostCleared(id, time))
def postOpenGraphData(conv: ConvId, msg: MessageId, time: RemoteInstant) = addRequest(PostOpenGraphMeta(conv, msg, time), priority = Priority.Low)
def postReceipt(conv: ConvId, messages: Seq[MessageId], user: UserId, tpe: ReceiptType): Future[SyncId] = addRequest(PostReceipt(conv, messages, user, tpe), priority = Priority.Optional)
def postAddBot(cId: ConvId, pId: ProviderId, iId: IntegrationId) = addRequest(PostAddBot(cId, pId, iId))
def postRemoveBot(cId: ConvId, botId: UserId) = addRequest(PostRemoveBot(cId, botId))
def postProperty(key: PropertyKey, value: Boolean): Future[SyncId] = addRequest(PostBoolProperty(key, value), forceRetry = true)
def postProperty(key: PropertyKey, value: Int): Future[SyncId] = addRequest(PostIntProperty(key, value), forceRetry = true)
def postProperty(key: PropertyKey, value: String): Future[SyncId] = addRequest(PostStringProperty(key, value), forceRetry = true)
def registerPush(token: PushToken) = addRequest(RegisterPushToken(token), priority = Priority.High, forceRetry = true)
def deletePushToken(token: PushToken) = addRequest(DeletePushToken(token), priority = Priority.Low)
def syncSelfClients() = addRequest(SyncSelfClients, priority = Priority.Critical)
def syncSelfPermissions() = addRequest(SyncSelfPermissions, priority = Priority.High)
def postClientLabel(id: ClientId, label: String) = addRequest(PostClientLabel(id, label))
def syncClients(user: UserId) = addRequest(SyncClients(user))
def syncClientsLocation() = addRequest(SyncClientsLocation)
def syncPreKeys(user: UserId, clients: Set[ClientId]) = addRequest(SyncPreKeys(user, clients))
def syncProperties(): Future[SyncId] = addRequest(SyncProperties, forceRetry = true)
def postSessionReset(conv: ConvId, user: UserId, client: ClientId) = addRequest(PostSessionReset(conv, user, client))
override def performFullSync(): Future[Unit] = {
verbose(l"SYNC performFullSync")
for {
id1 <- syncSelfUser()
id2 <- syncSelfClients()
id3 <- syncSelfPermissions()
id4 <- syncTeam()
id5 <- syncConversations()
id6 <- syncConnections()
id7 <- syncProperties()
userIds <- usersStorage.list().map(_.map(_.id).toSet)
id8 <- syncUsers(userIds)
_ = verbose(l"SYNC waiting for full sync to finish...")
_ <- service.await(Set(id1, id2, id3, id4, id5, id6, id7, id8))
_ = verbose(l"SYNC ... and done")
} yield ()
}
}
trait SyncHandler {
import SyncHandler._
def apply(account: UserId, req: SyncRequest)(implicit reqInfo: RequestInfo): Future[SyncResult]
}
object SyncHandler {
case class RequestInfo(attempt: Int, requestStart: Instant, network: Option[NetworkMode] = None)
}
class AccountSyncHandler(accounts: AccountsService) extends SyncHandler {
import SyncHandler._
import Threading.Implicits.Background
import com.waz.model.sync.SyncRequest._
override def apply(accountId: UserId, req: SyncRequest)(implicit reqInfo: RequestInfo): Future[SyncResult] =
accounts.getZms(accountId).flatMap {
case Some(zms) =>
req match {
case SyncSelfClients => zms.otrClientsSync.syncClients(accountId)
case SyncClients(user) => zms.otrClientsSync.syncClients(user)
case SyncClientsLocation => zms.otrClientsSync.syncClientsLocation()
case SyncPreKeys(user, clients) => zms.otrClientsSync.syncPreKeys(Map(user -> clients.toSeq))
case PostClientLabel(id, label) => zms.otrClientsSync.postLabel(id, label)
case SyncConversation(convs) => zms.conversationSync.syncConversations(convs.toSeq)
case SyncConversations => zms.conversationSync.syncConversations()
case SyncConvLink(conv) => zms.conversationSync.syncConvLink(conv)
case SyncUser(u) => zms.usersSync.syncUsers(u.toSeq: _*)
case SyncSearchQuery(query) => zms.usersearchSync.syncSearchQuery(query)
case ExactMatchHandle(query) => zms.usersearchSync.exactMatchHandle(query)
case SyncRichMedia(messageId) => zms.richmediaSync.syncRichMedia(messageId)
case DeletePushToken(token) => zms.gcmSync.deleteGcmToken(token)
case PostConnection(userId, name, message) => zms.connectionsSync.postConnection(userId, name, message)
case PostConnectionStatus(userId, status) => zms.connectionsSync.postConnectionStatus(userId, status)
case SyncTeam => zms.teamsSync.syncTeam()
case SyncTeamMember(userId) => zms.teamsSync.syncMember(userId)
case SyncConnections => zms.connectionsSync.syncConnections()
case SyncSelf => zms.usersSync.syncSelfUser()
case SyncSelfPermissions => zms.teamsSync.syncSelfPermissions()
case DeleteAccount => zms.usersSync.deleteAccount()
case PostSelf(info) => zms.usersSync.postSelfUser(info)
case PostSelfPicture(assetId) => zms.usersSync.postSelfPicture(assetId)
case PostSelfName(name) => zms.usersSync.postSelfName(name)
case PostSelfAccentColor(color) => zms.usersSync.postSelfAccentColor(color)
case PostAvailability(availability) => zms.usersSync.postAvailability(availability)
case PostAddressBook(ab) => zms.addressbookSync.postAddressBook(ab)
case RegisterPushToken(token) => zms.gcmSync.registerPushToken(token)
case PostLiking(convId, liking) => zms.reactionsSync.postReaction(convId, liking)
case PostAddBot(cId, pId, iId) => zms.integrationsSync.addBot(cId, pId, iId)
case PostRemoveBot(cId, botId) => zms.integrationsSync.removeBot(cId, botId)
case PostDeleted(convId, msgId) => zms.messagesSync.postDeleted(convId, msgId)
case PostLastRead(convId, time) => zms.lastReadSync.postLastRead(convId, time)
case PostOpenGraphMeta(conv, msg, time) => zms.openGraphSync.postMessageMeta(conv, msg, time)
case PostRecalled(convId, msg, recall) => zms.messagesSync.postRecalled(convId, msg, recall)
case PostSessionReset(conv, user, client) => zms.otrSync.postSessionReset(conv, user, client)
case PostReceipt(conv, msg, user, tpe) => zms.messagesSync.postReceipt(conv, msg, user, tpe)
case PostMessage(convId, messageId, time) => zms.messagesSync.postMessage(convId, messageId, time)
case PostAssetStatus(cid, mid, exp, status) => zms.messagesSync.postAssetStatus(cid, mid, exp, status)
case PostConvJoin(convId, u) => zms.conversationSync.postConversationMemberJoin(convId, u)
case PostConvLeave(convId, u) => zms.conversationSync.postConversationMemberLeave(convId, u)
case PostConv(convId, u, name, team, access, accessRole, receiptMode) => zms.conversationSync.postConversation(convId, u, name, team, access, accessRole, receiptMode)
case PostConvName(convId, name) => zms.conversationSync.postConversationName(convId, name)
case PostConvReceiptMode(convId, receiptMode) => zms.conversationSync.postConversationReceiptMode(convId, receiptMode)
case PostConvState(convId, state) => zms.conversationSync.postConversationState(convId, state)
case PostTypingState(convId, ts) => zms.typingSync.postTypingState(convId, ts)
case PostCleared(convId, time) => zms.clearedSync.postCleared(convId, time)
case PostBoolProperty(key, value) => zms.propertiesSyncHandler.postProperty(key, value)
case PostIntProperty(key, value) => zms.propertiesSyncHandler.postProperty(key, value)
case PostStringProperty(key, value) => zms.propertiesSyncHandler.postProperty(key, value)
case SyncProperties => zms.propertiesSyncHandler.syncProperties
case Unknown => Future.successful(Failure("Unknown sync request"))
}
case None => Future.successful(Failure(s"Account $accountId is not logged in"))
}
}
|
wireapp/wire-android-sync-engine
|
zmessaging/src/main/scala/com/waz/sync/SyncServiceHandle.scala
|
Scala
|
gpl-3.0
| 19,031
|
package cpup.mc.computers.content.computers
import io.netty.buffer.ByteBuf
trait Buffer {
def curWidth: Int
def curHeight: Int
def colors: ByteBuf
def data: Array[Char]
def fgIndex = Buffer.fgIndex(curWidth)_
def bgIndex = Buffer.bgIndex(curWidth)_
def charIndex = Buffer.charIndex(curWidth)_
def char(x: Int, y: Int) = data(charIndex(x, y))
def fg(x: Int, y: Int) = Color.fromByte(colors.getByte(fgIndex(x, y)))
def bg(x: Int, y: Int) = Color.fromByte(colors.getByte(bgIndex(x, y)))
def onUpdate(x: Int, y: Int, width: Int, height: Int) {}
def copyTo(_x: Int, _y: Int, _width: Int, _height: Int, other: Buffer, _dx: Int, _dy: Int) {
val (_, _, width, height, dx, dy) = Buffer.copy((curWidth, curHeight, colors, data), _x, _y, _width, _height, (other.curWidth, other.curHeight, other.colors, other.data), _dx, _dy)
other.onUpdate(dx, dy, width, height)
}
def write(x_ : Int, y_ : Int, fg: Color, bg: Color, text: String) {
val x = Math.max(x_, 0)
val y = Math.max(y_, 0)
val _width = Math.min(text.length, curWidth - x)
for(i <- 0 until _width) {
colors.setByte(fgIndex(x + i, y), fg.toByte)
colors.setByte(bgIndex(x + i, y), bg.toByte)
data(charIndex(x + i, y)) = text(i)
}
onUpdate(x, y, _width, 1)
}
def fill(x_ : Int, y_ : Int, width_ : Int, height_ : Int, fg: Color, bg: Color, char: Char) {
val x = Math.max(x_, 0)
val y = Math.max(y_, 0)
val _width = Math.min(width_, curWidth - x)
val _height = Math.min(height_, curHeight - y)
for(_x <- x until x + _width; _y <- y until y + _width) {
colors.setByte(fgIndex(_x, _y), fg.toByte)
colors.setByte(bgIndex(_x, _y), bg.toByte)
data(charIndex(_x, _y)) = char
}
onUpdate(x, y, _width, _height)
}
}
object Buffer {
def fgIndex(width: Int)(x: Int, y: Int) = y * 2 * width + x
def bgIndex(width: Int)(x: Int, y: Int) = (y * 2 + 1) * width + x
def charIndex(width: Int)(x: Int, y: Int) = y * width + x
def copy(from: (Int, Int, ByteBuf, Array[Char]), _x: Int, _y: Int, _width: Int, _height: Int, dest: (Int, Int, ByteBuf, Array[Char]), _dx: Int, _dy: Int): (Int, Int, Int, Int, Int, Int) = {
val x = Math.max(_x, 0)
val y = Math.max(_y, 0)
val dx = Math.max(_dx, 0)
val dy = Math.max(_dy, 0)
val width = Math.min(Math.min(_width, dest._1 - dx), from._1 - x)
val height = Math.min(Math.min(_height, dest._2 - dy), from._2 - y)
for(_y <- 0 until height) {
val idx = fgIndex(from._1)(x, y + _y) + width
from._3.getBytes(fgIndex(from._1)(x, y + _y), dest._3, fgIndex(dest._1)(dx, dy + _y), width)
from._3.getBytes(bgIndex(from._1)(x, y + _y), dest._3, bgIndex(dest._1)(dx, dy + _y), width)
for(_x <- 0 until width) {
dest._4(charIndex(dest._1)(dx + _x, dy + _y)) = from._4(charIndex(from._1)(x + _x, y + _y))
}
}
(x, y, width, height, dx, dy)
}
}
|
CoderPuppy/cpup-computers-mc
|
src/main/scala/cpup/mc/computers/content/computers/Buffer.scala
|
Scala
|
mit
| 2,813
|
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.redis.sink.config
import java.util
import com.datamountaineer.streamreactor.connect.config.base.traits._
import org.apache.kafka.common.config.ConfigDef
import org.apache.kafka.common.config.ConfigDef.{Importance, Type}
object RedisConfig {
val config: ConfigDef = new ConfigDef()
.define(RedisConfigConstants.REDIS_HOST, Type.STRING, Importance.HIGH, RedisConfigConstants.REDIS_HOST_DOC,
"Connection", 2, ConfigDef.Width.MEDIUM, RedisConfigConstants.REDIS_HOST)
.define(RedisConfigConstants.REDIS_PORT, Type.INT, Importance.HIGH, RedisConfigConstants.REDIS_PORT_DOC,
"Connection", 3, ConfigDef.Width.MEDIUM, RedisConfigConstants.REDIS_PORT)
.define(RedisConfigConstants.REDIS_PASSWORD, Type.PASSWORD, null, Importance.LOW, RedisConfigConstants.REDIS_PASSWORD_DOC,
"Connection", 4, ConfigDef.Width.MEDIUM, RedisConfigConstants.REDIS_PASSWORD)
.define(RedisConfigConstants.KCQL_CONFIG, Type.STRING, Importance.HIGH, RedisConfigConstants.KCQL_CONFIG,
"Connection", 1, ConfigDef.Width.MEDIUM, RedisConfigConstants.KCQL_CONFIG)
.define(RedisConfigConstants.ERROR_POLICY, Type.STRING, RedisConfigConstants.ERROR_POLICY_DEFAULT,
Importance.HIGH, RedisConfigConstants.ERROR_POLICY_DOC,
"Connection", 5, ConfigDef.Width.MEDIUM, RedisConfigConstants.ERROR_POLICY)
.define(RedisConfigConstants.ERROR_RETRY_INTERVAL, Type.INT, RedisConfigConstants.ERROR_RETRY_INTERVAL_DEFAULT,
Importance.MEDIUM, RedisConfigConstants.ERROR_RETRY_INTERVAL_DOC,
"Connection", 6, ConfigDef.Width.MEDIUM, RedisConfigConstants.ERROR_RETRY_INTERVAL)
.define(RedisConfigConstants.NBR_OF_RETRIES, Type.INT, RedisConfigConstants.NBR_OF_RETIRES_DEFAULT,
Importance.MEDIUM, RedisConfigConstants.NBR_OF_RETRIES_DOC,
"Connection", 7, ConfigDef.Width.MEDIUM, RedisConfigConstants.NBR_OF_RETRIES)
.define(RedisConfigConstants.PROGRESS_COUNTER_ENABLED, Type.BOOLEAN, RedisConfigConstants.PROGRESS_COUNTER_ENABLED_DEFAULT,
Importance.MEDIUM, RedisConfigConstants.PROGRESS_COUNTER_ENABLED_DOC,
"Metrics", 1, ConfigDef.Width.MEDIUM, RedisConfigConstants.PROGRESS_COUNTER_ENABLED_DISPLAY)
}
/**
* <h1>RedisSinkConfig</h1>
*
* Holds config, extends AbstractConfig.
**/
case class RedisConfig(props: util.Map[String, String])
extends BaseConfig(RedisConfigConstants.CONNECTOR_PREFIX, RedisConfig.config, props)
with KcqlSettings
with ErrorPolicySettings
with NumberRetriesSettings
with UserSettings
|
CodeSmell/stream-reactor
|
kafka-connect-redis/src/main/scala/com/datamountaineer/streamreactor/connect/redis/sink/config/RedisConfig.scala
|
Scala
|
apache-2.0
| 3,137
|
/**
* Copyright (c) 2007-2011 Eric Torreborre <etorreborre@yahoo.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of
* the Software. Neither the name of specs nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
* TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package org.specs.specification
import org.specs.util.Configuration
/**
* This trait defines some optional behaviour for a specification such as executing examples in a copy of the specification
* to be isolated from any other example modifying local variables.
*/
trait SpecificationConfiguration {
/** get the configuration state */
private[specification] var oneSpecInstancePerExample: Boolean = Configuration.config.oneSpecInstancePerExample
/**
* use this method to use the same specification object to execute Examples, effectively sharing
* variables between them.
*/
def shareVariables() = shareVariablesIs(true)
/**
* use this method *not* to use the same specification object to execute Examples, effectively *not* sharing
* variables between them.
*/
def dontShareVariables() = shareVariablesIs(false)
def shareVariablesIs(b: Boolean) = oneSpecInstancePerExample = !b
}
|
stuhood/specs
|
src/main/scala/org/specs/specification/SpecificationConfiguration.scala
|
Scala
|
mit
| 2,236
|
package org.jetbrains.plugins.scala
package codeInspection.methodSignature
import com.intellij.codeInspection._
import org.intellij.lang.annotations.Language
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunctionDefinition
import quickfix.RemoveEqualsSign
/**
* Pavel Fatin
*/
class UnitMethodDefinedWithEqualsSignInspection extends AbstractMethodSignatureInspection(
"ScalaUnitMethodDefinedWithEqualsSign", "Method with Unit result type defined with equals sign") {
def actionFor(holder: ProblemsHolder) = {
case f: ScFunctionDefinition if !f.hasExplicitType && f.hasUnitResultType && !f.isSecondaryConstructor =>
f.assignment.foreach { assignment =>
holder.registerProblem(assignment, getDisplayName, new RemoveEqualsSign(f))
}
}
}
|
consulo/consulo-scala
|
src/org/jetbrains/plugins/scala/codeInspection/methodSignature/UnitMethodDefinedWithEqualsSignInspection.scala
|
Scala
|
apache-2.0
| 784
|
package run.local
import org.apache.spark.sql.SparkSession
import run.shared.Word2VecExample
object Word2VecLocalExample {
def main(args: Array[String]): Unit = {
// default values
var filePath = Word2VecLocalExample.getClass.getClassLoader.getResource("hp1.txt").getPath
var masterInfo = "local[*]"
var targetWord = "Harry"
val ss = SparkSession.builder().appName("Word2Vec Local Example").master(masterInfo).getOrCreate()
// val filePath = "hdfs://master.atscluster:8020/hp1.txt"
// val masterInfo = "spark://master.atscluster:7077"
val result = Word2VecExample.runWord2Vec(ss,targetWord,filePath)
result.foreach(println)
}
}
|
Erwangf/wikipedia-mining
|
src/main/scala/run/local/Word2VecLocalExample.scala
|
Scala
|
mit
| 683
|
/*
* Copyright 1998-2018 Linux.org.ru
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ru.org.linux.topic
import org.junit.runner.RunWith
import org.junit.{Assert, Test}
import org.mockito.Mockito
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.context.annotation.{Bean, Configuration, ImportResource}
import org.springframework.test.context.ContextConfiguration
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner
import ru.org.linux.edithistory.{EditHistoryDao, EditHistoryService}
import ru.org.linux.gallery.{ImageDao, ImageService}
import ru.org.linux.group.GroupDao
import ru.org.linux.section.{SectionDao, SectionDaoImpl, SectionService}
import ru.org.linux.spring.SiteConfig
import ru.org.linux.spring.dao.{DeleteInfoDao, MsgbaseDao}
import ru.org.linux.topic.TopicDaoIntegrationTest._
import ru.org.linux.user.{IgnoreListDao, UserDao, UserLogDao, UserService}
import ru.org.linux.util.bbcode.LorCodeService
@RunWith (classOf[SpringJUnit4ClassRunner])
@ContextConfiguration (classes = Array (classOf[TopicDaoIntegrationTestConfiguration] ) )
class TopicDaoIntegrationTest {
@Autowired
var topicDao: TopicDao = _
@Test
def testLoadTopic(): Unit = {
val topic = topicDao.getById(TestTopic)
Assert.assertNotNull(topic)
Assert.assertEquals(TestTopic, topic.getId)
}
@Test
def testNextPrev():Unit = {
val topic = topicDao.getById(TestTopic)
val nextTopic = topicDao.getNextMessage(topic, null)
val prevTopic = topicDao.getPreviousMessage(topic, null)
Assert.assertNotSame(topic.getId, nextTopic.getId)
Assert.assertNotSame(topic.getId, prevTopic.getId)
}
}
object TopicDaoIntegrationTest {
val TestTopic = 1937347
}
@Configuration
@ImportResource (Array ("classpath:database.xml", "classpath:common.xml") )
class TopicDaoIntegrationTestConfiguration {
@Bean
def groupDao = new GroupDao()
@Bean
def sectionService(sectionDao:SectionDao) = new SectionService(sectionDao)
@Bean
def sectionDao = new SectionDaoImpl()
@Bean
def topicDao = new TopicDao()
@Bean
def userDao = new UserDao()
@Bean
def imageDao = new ImageDao()
@Bean
def imageService = Mockito.mock(classOf[ImageService])
@Bean
def ignoreListDao = new IgnoreListDao()
@Bean
def userService(siteConfig:SiteConfig, userDao:UserDao,
ignoreListDao:IgnoreListDao) = new UserService(siteConfig, userDao, ignoreListDao)
@Bean
def userLogDao = Mockito.mock(classOf[UserLogDao])
@Bean
def topicTagService = Mockito.mock(classOf[TopicTagService])
@Bean
def msgbaseDao = Mockito.mock(classOf[MsgbaseDao])
@Bean
def deleteInfoDao = Mockito.mock(classOf[DeleteInfoDao])
@Bean
def editHistoryService = Mockito.mock(classOf[EditHistoryService])
@Bean
def editHistoryDao = Mockito.mock(classOf[EditHistoryDao])
@Bean
def lorcodeService = Mockito.mock(classOf[LorCodeService])
}
|
hizel/lorsource
|
src/test/scala/ru/org/linux/topic/TopicDaoIntegrationTest.scala
|
Scala
|
apache-2.0
| 3,486
|
package latis.reader.tsml.ml
import latis.util.StringUtils
import scala.collection.Map
import scala.collection.Seq
import scala.collection.mutable
import scala.xml.Attribute
import scala.xml.Elem
import scala.xml.Node
import scala.xml.NodeSeq.seqToNodeSeq
/**
* Wrapper for an Element within the TSML that represents a Variable.
*/
abstract class VariableMl(xml: Node) {
def label: String = xml.label
/**
* Get the value of this element's attribute with the given name.
*/
def getAttribute(name: String): Option[String] = {
(xml \\ ("@"+name)).text match {
case s: String if s.length > 0 => Some(s)
case _ => None
}
}
def getAttributes: Map[String, String] = {
val atts = xml.attributes //(xml \\ ("@*"))
atts.map(att => (att.asInstanceOf[Attribute].key -> att.asInstanceOf[Attribute].value.text)).toMap
}
/**
* Get the text content of this element.
*/
def getContent(): Option[String] = {
xml.child.find(_.isInstanceOf[scala.xml.Text]) match {
case Some(text) => Some(text.text.trim())
case None => None
}
}
/**
* Find the first Element with the given label.
*/
def getElement(label: String): Option[Elem] = {
val nodes = xml \\ label
nodes.length match {
case 0 => None
case _ => Some(nodes.head.asInstanceOf[Elem])
}
}
/**
* Get all the metadata attributes from the tsml for this Variable as key/value pairs.
*/
def getMetadataAttributes: Map[String, String] = {
//Gather the XML attributes from the "metadata" element for this Variable.
val map = mutable.HashMap[String,String]()
val seq = for {
e <- xml \\ "metadata"
att <- e.attributes
} yield (att.key, StringUtils.resolveParameterizedString(att.value.text))
Map[String, String](seq: _*)
}
def hasName(name: String): Boolean = {
val names = ((xml \\ "@id").map(_.text)) ++ //id attribute
(((xml \\ "metadata").flatMap(_ \\ "@name")).map(_.text)) :+ //metadata element name
label //implicit names
names.contains(name)
}
def getName: String = {
val names = ((xml \\ "@id").map(_.text)) ++ //id attribute
(((xml \\ "metadata").flatMap(_ \\ "@name")).map(_.text)) :+ //metadata element name
label //implicit names
names.head
}
/**
* Find a VariableMl with the given name.
*/
def findVariableMl(name: String): Option[VariableMl] = {
if (hasName(name)) Some(this)
else if(this.isInstanceOf[ScalarMl]) None
else (xml \\ "_").flatMap(VariableMl(_).findVariableMl(name)).headOption
}
override def toString: String = xml.toString
}
object VariableMl {
def apply(xml: Node): VariableMl = {
xml.label match {
case "tuple" => new TupleMl(xml)
case "function" => new FunctionMl(xml)
case "time" => new TimeMl(xml)
case _ => new ScalarMl(xml)
}
}
/**
* If more than one, wrap in TupleMl.
*/
def apply(es: Seq[Node]): VariableMl = es.length match {
case 1 => VariableMl(es.head) //only one
case _ => new TupleMl(<tuple/>.copy(child = es)) //implicit Tuple
}
}
|
dlindhol/LaTiS
|
src/main/scala/latis/reader/tsml/ml/VariableMl.scala
|
Scala
|
epl-1.0
| 3,124
|
/*
* This file is part of CubeLoader.
* Copyright (c) 2016 - 2017, KitsuneAlex, All rights reserved.
*
* CubeLoader is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* CubeLoader is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with CubeLoader. If not, see <http://www.gnu.org/licenses/lgpl>.
*/
package de.keri.cubelib.client.render.item
import codechicken.lib.render.item.IItemRenderer
import net.minecraft.client.renderer.Tessellator
import net.minecraft.client.renderer.block.model.ItemCameraTransforms
import net.minecraft.client.renderer.vertex.DefaultVertexFormats
import net.minecraft.item.ItemStack
import net.minecraftforge.common.model.IModelState
import net.minecraftforge.fml.relauncher.{Side, SideOnly}
import org.lwjgl.opengl.GL11
@SideOnly(Side.CLIENT)
class ItemRenderingAdapter(handler: IItemRenderingHandler) extends IItemRenderer {
override def renderItem(stack: ItemStack, transformType: ItemCameraTransforms.TransformType): Unit = {
val buffer = Tessellator.getInstance().getBuffer
buffer.begin(GL11.GL_QUADS, DefaultVertexFormats.ITEM)
handler.renderInventory(stack, transformType, buffer)
Tessellator.getInstance().draw()
}
override def getTransforms: IModelState = handler.getTransformations
override def isGui3d: Boolean = handler.applyLighting
override def isAmbientOcclusion: Boolean = handler.isAmbientOcclusion
}
|
TeamMD5/CubeLoader
|
src/main/scala/de/keri/cubelib/client/render/item/ItemRenderingAdapter.scala
|
Scala
|
gpl-3.0
| 1,902
|
import java.io.File
import org.scalatestplus.play.FakeApplicationFactory
import play.api._
import play.api.inject._
trait MyApplicationFactory extends FakeApplicationFactory {
override def fakeApplication: Application = {
val env = Environment.simple(new File("."))
val context = ApplicationLoader.Context.create(env)
val loader = new MyApplicationLoader()
loader.load(context)
}
}
|
play2-maven-plugin/play2-maven-test-projects
|
play28/scala/compile-di-example/test/MyApplicationFactory.scala
|
Scala
|
apache-2.0
| 406
|
package im.actor.server.session
import java.util.concurrent.TimeUnit
import akka.actor.{ ActorLogging, ActorRef, Cancellable, Props }
import akka.stream.actor._
import com.typesafe.config.Config
import im.actor.api.rpc.{ RpcOk, UpdateBox, RpcResult ⇒ ApiRpcResult }
import im.actor.api.rpc.codecs.UpdateBoxCodec
import im.actor.api.rpc.sequence._
import im.actor.server.api.rpc.RpcResultCodec
import im.actor.server.mtproto.protocol._
import scodec.bits.BitVector
import scala.annotation.tailrec
import scala.collection.{ immutable, mutable }
import scala.concurrent.duration._
import scala.util.control.NoStackTrace
private[session] sealed trait ReSenderMessage
private[session] object ReSenderMessage {
final case class NewClient(client: ActorRef) extends ReSenderMessage
final case class IncomingAck(messageIds: Seq[Long]) extends ReSenderMessage
final case class IncomingRequestResend(messageId: Long) extends ReSenderMessage
// final case class OutgoingMessage(msg: ProtoMessage) extends ReSenderMessage
final case class OutgoingAck(messageIds: Seq[Long]) extends ReSenderMessage
final case class Push(ub: UpdateBox, reduceKey: Option[String]) extends ReSenderMessage
final case class RpcResult(rsp: ApiRpcResult, requestMessageId: Long) extends ReSenderMessage
final case class SetUpdateOptimizations(updateOptimizations: Set[ApiUpdateOptimization.Value]) extends ReSenderMessage
}
private[session] case class ReSenderConfig(ackTimeout: FiniteDuration, maxResendSize: Long, maxBufferSize: Long, maxPushBufferSize: Long)
private[session] object ReSenderConfig {
def fromConfig(config: Config): ReSenderConfig = {
ReSenderConfig(
ackTimeout = config.getDuration("ack-timeout", TimeUnit.SECONDS).seconds,
maxResendSize = config.getBytes("max-resend-size"),
maxBufferSize = config.getBytes("max-buffer-size"),
maxPushBufferSize = config.getBytes("max-push-buffer-size")
)
}
}
private[session] object ReSender {
private case class ScheduledResend(messageId: Long, item: ResendableItem)
private sealed trait ResendableItem {
val bitsSize: Long
val size = bitsSize / 8
val priority: Priority
}
private object RpcItem {
def apply(result: ApiRpcResult, requestMessageId: Long): RpcItem =
RpcItem(RpcResultCodec.encode(result).require, requestMessageId)
}
private final case class RpcItem(body: BitVector, requestMessageId: Long) extends ResendableItem {
override lazy val bitsSize = body.size
override val priority = Priority.RPC
}
private object PushItem {
def apply(ub: UpdateBox, reduceKeyOpt: Option[String]): PushItem = {
val priority = ub match {
case _: SeqUpdate | _: FatSeqUpdate ⇒ Priority.SeqPush
case _: WeakUpdate ⇒ Priority.WeakPush
}
PushItem(UpdateBoxCodec.encode(ub).require, reduceKeyOpt, priority)
}
}
private final case class PushItem(body: BitVector, reduceKeyOpt: Option[String], priority: Priority) extends ResendableItem {
override lazy val bitsSize = body.size
}
private final case class NewSessionItem(newSession: NewSession) extends ResendableItem {
override val bitsSize = 0L
override val priority = Priority.NewSession
}
sealed trait Priority {
val id: Int
}
object Priority {
object NewSession extends Priority {
override val id = 2
}
object Ack extends Priority {
override val id = 1
}
object RPC extends Priority {
override val id = 0
}
object SeqPush extends Priority {
override val id = -1
}
object WeakPush extends Priority {
override val id = -2
}
}
private case object BufferOverflow
def props(authId: Long, sessionId: Long, firstMessageId: Long)(implicit config: ReSenderConfig) =
Props(classOf[ReSender], authId, sessionId, firstMessageId, config)
}
private[session] class ReSender(authId: Long, sessionId: Long, firstMessageId: Long)(implicit config: ReSenderConfig)
extends ActorSubscriber with ActorPublisher[MessageBox] with ActorLogging with MessageIdHelper {
import ActorPublisherMessage._
import ActorSubscriberMessage._
import ReSender._
import ReSenderMessage._
import context.dispatcher
// TODO: configurable
private val AckTimeout = config.ackTimeout
private val MaxBufferSize = config.maxBufferSize
private val MaxResendSize = config.maxResendSize
def receive = resendingToNewClients
def resendingToNewClients: Receive = subscriber.orElse(publisher).orElse {
case NewClient(_) ⇒
log.debug("New client, sending all scheduled for resend")
this.mbQueue.clear()
this.resendBufferSize = 0
this.resendPushBufferSize = 0
this.newSessionBuffer foreach {
case (messageId, ni, scheduled) ⇒
scheduled.cancel()
enqueueNewSession(ni)
}
this.responseBuffer foreach {
case (messageId, (pi, scheduled)) ⇒
scheduled.cancel()
enqueueRpc(pi, nextMessageId())
}
this.pushBuffer foreach {
case (messageId, (pi, scheduled)) ⇒
scheduled.cancel()
enqueuePush(pi, nextMessageId())
}
}
private[this] var resendBufferSize = 0L
private[this] var resendPushBufferSize = 0L
private[this] var updateOptimizations = Set.empty[ApiUpdateOptimization.Value]
private[this] var newSessionBuffer: Option[(Long, NewSessionItem, Cancellable)] = None
private[this] var responseBuffer = immutable.SortedMap.empty[Long, (RpcItem, Cancellable)]
private[this] var pushBuffer = immutable.SortedMap.empty[Long, (PushItem, Cancellable)]
// Provides mapping from reduceKey to the last message with the reduceKey
private[this] var pushReduceMap = immutable.Map.empty[String, Long]
// Provides mapping from request messageId to a responseMessageId
// to prevent response duplicates when client re-requests with same messageId
type RequestMessageId = Long
type ResponseMessageId = Long
private[this] var rpcMap = immutable.Map.empty[RequestMessageId, ResponseMessageId]
// Used to prevent scheduling multiple updates at the same millisecond and result out of order
private[this] var lastScheduledResend = System.currentTimeMillis - 1
override def preStart(): Unit = {
super.preStart()
enqueueNewSession(NewSessionItem(NewSession(sessionId, firstMessageId)))
}
override def preRestart(reason: Throwable, message: Option[Any]): Unit = {
log.error(reason, "An error occured while processing message: {}", message)
super.preRestart(reason, message)
}
// Subscriber-related
def subscriber: Receive = {
case OnNext(IncomingAck(messageIds)) ⇒
log.debug("Received Acks {}", messageIds)
messageIds foreach { messageId ⇒
getResendableItem(messageId) foreach {
case (item, scheduledResend) ⇒
decreaseBufferSize(item)
scheduledResend.cancel()
item match {
case PushItem(_, reduceKeyOpt, _) ⇒
reduceKeyOpt foreach { reduceKey ⇒
if (pushReduceMap.get(reduceKey).contains(messageId))
pushReduceMap -= reduceKey
}
pushBuffer -= messageId
case _: RpcItem ⇒
responseBuffer -= messageId
rpcMap -= messageId
case item: NewSessionItem ⇒
this.newSessionBuffer = None
}
}
}
case OnNext(OutgoingAck(messageIds)) ⇒
enqueueAcks(messageIds)
case OnNext(IncomingRequestResend(messageId)) ⇒
getResendableItem(messageId) foreach {
case (item, scheduled) ⇒
scheduled.cancel()
item match {
case pi: PushItem ⇒
enqueuePush(pi, nextMessageId())
case ri: RpcItem ⇒
enqueueRpc(ri, nextMessageId())
case ni: NewSessionItem ⇒
enqueueNewSession(ni)
}
}
case OnNext(RpcResult(rsp, requestMessageId)) ⇒
val item = RpcItem(rsp, requestMessageId)
this.rpcMap get requestMessageId match {
// we are trying to deliver this response already,
// so we cancel previous scheduled resend as client already requested a resend by doubling RPC request
case Some(responseMessageId) ⇒
responseBuffer.get(responseMessageId) map (_._2.cancel()) match {
case Some(false) ⇒
case _ ⇒ enqueueRpc(item, responseMessageId)
}
// it's a new rpc response
case None ⇒
val responseMessageId = nextMessageId()
this.rpcMap += (requestMessageId → responseMessageId)
enqueueRpc(item, responseMessageId)
}
case OnNext(p @ Push(_, reduceKey)) ⇒ enqueuePush(PushItem(p.ub, reduceKey), nextMessageId())
case OnNext(SetUpdateOptimizations(opts)) ⇒ this.updateOptimizations = opts
case OnComplete ⇒
log.debug("Stopping due to stream completion")
// TODO: cleanup scheduled resends
context.stop(self)
case OnError(cause) ⇒
log.error(cause, "Stopping due to stream error")
// TODO: cleanup scheduled resends
context.stop(self)
case ScheduledResend(messageId, item) ⇒
if (getResendableItem(messageId).isDefined) {
log.debug("Scheduled resend for messageId: {}, item: {}, resending", messageId, item)
decreaseBufferSize(item)
item match {
case ni: NewSessionItem ⇒ enqueueNewSession(ni)
case pi: PushItem ⇒
if (pi.size > MaxResendSize)
enqueueUnsentPush(pi, messageId)
else
enqueuePush(pi, messageId)
case ri: RpcItem ⇒
if (ri.size > MaxResendSize)
enqueueUnsentRpc(ri, messageId)
else
enqueueRpc(ri, messageId)
}
} else log.debug("ScheduledResend for messageId: {}, item: {}, ignoring (absent in buffer)", messageId, item)
case BufferOverflow ⇒
if (this.resendBufferSize > config.maxBufferSize) {
log.warning("Buffer overflow, stopping session")
this.onCompleteThenStop()
}
}
private def increaseBufferSize(item: ResendableItem): Unit = {
this.resendBufferSize += item.size
item match {
case p: PushItem ⇒
if (this.resendPushBufferSize > config.maxPushBufferSize)
clearPushBuffer()
else
this.resendPushBufferSize += item.size
case _ ⇒
}
}
private def decreaseBufferSize(item: ResendableItem): Unit = {
this.resendBufferSize -= item.size
item match {
case _: PushItem ⇒ this.resendPushBufferSize -= item.size
case _ ⇒
}
}
private def clearPushBuffer(): Unit = {
log.debug("Push buffer exceeded, clearing and sending SeqUpdateTooLong")
pushBuffer foreach {
case (messageId, (pi: PushItem, resend)) ⇒
pushBuffer -= messageId
decreaseBufferSize(pi)
resend.cancel()
case _ ⇒
}
enqueueSeqUpdateTooLong()
}
// Publisher-related
override val requestStrategy = WatermarkRequestStrategy(100) // TODO: configurable
// Publisher-related
private[this] val mbQueue = mutable.PriorityQueue.empty[(MessageBox, Priority)](Ordering.by { case (mb, p) ⇒ (p.id, mb.messageId) })
def publisher: Receive = {
case Request(n) ⇒
deliverBuf()
case Cancel ⇒
context.stop(self)
}
@tailrec final def deliverBuf(): Unit = {
if (isActive && totalDemand > 0 && mbQueue.nonEmpty)
mbQueue.dequeue() match {
case (mb, _) ⇒
onNext(mb)
deliverBuf()
}
}
override def unhandled(message: Any): Unit = {
super.unhandled(message)
log.error("Unhandled {}", message)
}
private def getResendableItem(messageId: Long): Option[(ResendableItem, Cancellable)] = {
responseBuffer
.get(messageId)
.orElse(pushBuffer.get(messageId))
.orElse {
this.newSessionBuffer match {
case Some((`messageId`, item, scheduled)) ⇒
Some((item, scheduled))
case _ ⇒ None
}
}
}
private def calcScheduleDelay(): FiniteDuration = {
val currentTime = System.currentTimeMillis()
if (currentTime > this.lastScheduledResend) {
this.lastScheduledResend = currentTime
AckTimeout
} else {
val delta = this.lastScheduledResend - currentTime + 1
this.lastScheduledResend = currentTime + delta
AckTimeout + delta.milli
}
}
private def scheduleResend(item: ResendableItem, messageId: Long) = {
log.debug("Scheduling resend of messageId: {}, timeout: {}", messageId, AckTimeout)
// FIXME: increase resendBufferSize by real Unsent
if (resendBufferSize <= MaxBufferSize) {
val delay = calcScheduleDelay()
val scheduled = context.system.scheduler.scheduleOnce(delay, self, ScheduledResend(messageId, item))
item match {
case pi @ PushItem(_, reduceKeyOpt, _) ⇒
reduceKeyOpt foreach { reduceKey ⇒
for {
msgId ← pushReduceMap.get(reduceKey)
(ritem, resend) ← pushBuffer.get(msgId)
} yield {
this.pushBuffer -= msgId
decreaseBufferSize(ritem)
resend.cancel()
}
this.pushReduceMap += (reduceKey → messageId)
}
this.pushBuffer = this.pushBuffer.updated(messageId, (pi, scheduled))
case ni: NewSessionItem ⇒
this.newSessionBuffer = Some((messageId, ni, scheduled))
case ri: RpcItem ⇒
this.responseBuffer = this.responseBuffer.updated(messageId, (ri, scheduled))
}
} else bufferOverflow()
increaseBufferSize(item)
}
private def enqueueAcks(messageIds: Seq[Long]): Unit =
enqueue(MessageBox(nextMessageId(), MessageAck(messageIds.toVector)), Priority.Ack)
private def enqueueNewSession(item: NewSessionItem): Unit = {
val messageId = nextMessageId()
scheduleResend(item, messageId)
enqueue(MessageBox(messageId, item.newSession), Priority.NewSession)
}
private def enqueueSeqUpdateTooLong(): Unit =
enqueue(MessageBox(nextMessageId(), ProtoPush(UpdateBoxCodec.encode(SeqUpdateTooLong).require)), Priority.SeqPush)
private def enqueueRpc(item: RpcItem, messageId: Long): Unit = {
scheduleResend(item, messageId)
val mb = MessageBox(messageId, ProtoRpcResponse(item.requestMessageId, item.body))
enqueue(mb, Priority.RPC)
}
private def enqueueUnsentRpc(item: RpcItem, unsentMessageId: Long): Unit = {
scheduleResend(item, unsentMessageId)
val mb = MessageBox(nextMessageId(), UnsentResponse(unsentMessageId, item.requestMessageId, item.size.toInt))
enqueue(mb, Priority.RPC)
}
private def enqueuePush(item: PushItem, messageId: Long): Unit = {
scheduleResend(item, messageId)
val mb = MessageBox(messageId, ProtoPush(item.body))
enqueue(mb, item.priority)
}
private def enqueueUnsentPush(item: PushItem, unsentMessageId: Long): Unit = {
scheduleResend(item, unsentMessageId)
val mb = MessageBox(nextMessageId(), UnsentMessage(unsentMessageId, item.size.toInt))
enqueue(mb, item.priority)
}
private def enqueue(mb: MessageBox, priority: Priority): Unit = {
log.debug("Queue size: {}, bufferSize: {}, pushBufferSize: {}", mbQueue.size, resendBufferSize, resendPushBufferSize)
if (isActive && totalDemand > 0 && mbQueue.isEmpty) {
onNext(mb)
} else {
this.mbQueue.enqueue(mb → priority)
deliverBuf()
}
}
private def bufferOverflow(): Unit = {
self ! BufferOverflow
}
private def pushBufferSize = responseBuffer.size + pushBuffer.size + newSessionBuffer.map(_ ⇒ 1).getOrElse(0)
override def postStop(): Unit = {
super.postStop()
log.debug("Clearing resend buffers ({} items)", pushBufferSize)
responseBuffer.values foreach (_._2.cancel())
pushBuffer.values foreach (_._2.cancel())
newSessionBuffer foreach (_._3.cancel())
}
}
|
actorapp/actor-platform
|
actor-server/actor-session/src/main/scala/im/actor/server/session/Resender.scala
|
Scala
|
agpl-3.0
| 16,153
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.encoders
import scala.util.Random
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.{RandomDataGenerator, Row}
import org.apache.spark.sql.catalyst.util.{ArrayData, GenericArrayData}
import org.apache.spark.sql.types._
@SQLUserDefinedType(udt = classOf[ExamplePointUDT])
class ExamplePoint(val x: Double, val y: Double) extends Serializable {
override def hashCode: Int = 41 * (41 + x.toInt) + y.toInt
override def equals(that: Any): Boolean = {
if (that.isInstanceOf[ExamplePoint]) {
val e = that.asInstanceOf[ExamplePoint]
(this.x == e.x || (this.x.isNaN && e.x.isNaN) || (this.x.isInfinity && e.x.isInfinity)) &&
(this.y == e.y || (this.y.isNaN && e.y.isNaN) || (this.y.isInfinity && e.y.isInfinity))
} else {
false
}
}
}
/**
* User-defined type for [[ExamplePoint]].
*/
class ExamplePointUDT extends UserDefinedType[ExamplePoint] {
override def sqlType: DataType = ArrayType(DoubleType, false)
override def pyUDT: String = "pyspark.sql.tests.ExamplePointUDT"
override def serialize(p: ExamplePoint): GenericArrayData = {
val output = new Array[Any](2)
output(0) = p.x
output(1) = p.y
new GenericArrayData(output)
}
override def deserialize(datum: Any): ExamplePoint = {
datum match {
case values: ArrayData =>
if (values.numElements() > 1) {
new ExamplePoint(values.getDouble(0), values.getDouble(1))
} else {
val random = new Random()
new ExamplePoint(random.nextDouble(), random.nextDouble())
}
}
}
override def userClass: Class[ExamplePoint] = classOf[ExamplePoint]
private[spark] override def asNullable: ExamplePointUDT = this
}
class RowEncoderSuite extends SparkFunSuite {
private val structOfString = new StructType().add("str", StringType)
private val structOfUDT = new StructType().add("udt", new ExamplePointUDT, false)
private val arrayOfString = ArrayType(StringType)
private val arrayOfNull = ArrayType(NullType)
private val mapOfString = MapType(StringType, StringType)
private val arrayOfUDT = ArrayType(new ExamplePointUDT, false)
encodeDecodeTest(
new StructType()
.add("null", NullType)
.add("boolean", BooleanType)
.add("byte", ByteType)
.add("short", ShortType)
.add("int", IntegerType)
.add("long", LongType)
.add("float", FloatType)
.add("double", DoubleType)
.add("decimal", DecimalType.SYSTEM_DEFAULT)
.add("string", StringType)
.add("binary", BinaryType)
.add("date", DateType)
.add("timestamp", TimestampType)
.add("udt", new ExamplePointUDT))
encodeDecodeTest(
new StructType()
.add("arrayOfNull", arrayOfNull)
.add("arrayOfString", arrayOfString)
.add("arrayOfArrayOfString", ArrayType(arrayOfString))
.add("arrayOfArrayOfInt", ArrayType(ArrayType(IntegerType)))
.add("arrayOfMap", ArrayType(mapOfString))
.add("arrayOfStruct", ArrayType(structOfString))
.add("arrayOfUDT", arrayOfUDT))
encodeDecodeTest(
new StructType()
.add("mapOfIntAndString", MapType(IntegerType, StringType))
.add("mapOfStringAndArray", MapType(StringType, arrayOfString))
.add("mapOfArrayAndInt", MapType(arrayOfString, IntegerType))
.add("mapOfArray", MapType(arrayOfString, arrayOfString))
.add("mapOfStringAndStruct", MapType(StringType, structOfString))
.add("mapOfStructAndString", MapType(structOfString, StringType))
.add("mapOfStruct", MapType(structOfString, structOfString)))
encodeDecodeTest(
new StructType()
.add("structOfString", structOfString)
.add("structOfStructOfString", new StructType().add("struct", structOfString))
.add("structOfArray", new StructType().add("array", arrayOfString))
.add("structOfMap", new StructType().add("map", mapOfString))
.add("structOfArrayAndMap",
new StructType().add("array", arrayOfString).add("map", mapOfString))
.add("structOfUDT", structOfUDT))
test("encode/decode decimal type") {
val schema = new StructType()
.add("int", IntegerType)
.add("string", StringType)
.add("double", DoubleType)
.add("java_decimal", DecimalType.SYSTEM_DEFAULT)
.add("scala_decimal", DecimalType.SYSTEM_DEFAULT)
.add("catalyst_decimal", DecimalType.SYSTEM_DEFAULT)
val encoder = RowEncoder(schema).resolveAndBind()
val javaDecimal = new java.math.BigDecimal("1234.5678")
val scalaDecimal = BigDecimal("1234.5678")
val catalystDecimal = Decimal("1234.5678")
val input = Row(100, "test", 0.123, javaDecimal, scalaDecimal, catalystDecimal)
val row = encoder.toRow(input)
val convertedBack = encoder.fromRow(row)
// Decimal will be converted back to Java BigDecimal when decoding.
assert(convertedBack.getDecimal(3).compareTo(javaDecimal) == 0)
assert(convertedBack.getDecimal(4).compareTo(scalaDecimal.bigDecimal) == 0)
assert(convertedBack.getDecimal(5).compareTo(catalystDecimal.toJavaBigDecimal) == 0)
}
test("RowEncoder should preserve decimal precision and scale") {
val schema = new StructType().add("decimal", DecimalType(10, 5), false)
val encoder = RowEncoder(schema).resolveAndBind()
val decimal = Decimal("67123.45")
val input = Row(decimal)
val row = encoder.toRow(input)
assert(row.toSeq(schema).head == decimal)
}
test("RowEncoder should preserve schema nullability") {
val schema = new StructType().add("int", IntegerType, nullable = false)
val encoder = RowEncoder(schema).resolveAndBind()
assert(encoder.serializer.length == 1)
assert(encoder.serializer.head.dataType == IntegerType)
assert(encoder.serializer.head.nullable == false)
}
test("RowEncoder should preserve nested column name") {
val schema = new StructType().add(
"struct",
new StructType()
.add("i", IntegerType, nullable = false)
.add(
"s",
new StructType().add("int", IntegerType, nullable = false),
nullable = false),
nullable = false)
val encoder = RowEncoder(schema).resolveAndBind()
assert(encoder.serializer.length == 1)
assert(encoder.serializer.head.dataType ==
new StructType()
.add("i", IntegerType, nullable = false)
.add(
"s",
new StructType().add("int", IntegerType, nullable = false),
nullable = false))
assert(encoder.serializer.head.nullable == false)
}
test("RowEncoder should support primitive arrays") {
val schema = new StructType()
.add("booleanPrimitiveArray", ArrayType(BooleanType, false))
.add("bytePrimitiveArray", ArrayType(ByteType, false))
.add("shortPrimitiveArray", ArrayType(ShortType, false))
.add("intPrimitiveArray", ArrayType(IntegerType, false))
.add("longPrimitiveArray", ArrayType(LongType, false))
.add("floatPrimitiveArray", ArrayType(FloatType, false))
.add("doublePrimitiveArray", ArrayType(DoubleType, false))
val encoder = RowEncoder(schema).resolveAndBind()
val input = Seq(
Array(true, false),
Array(1.toByte, 64.toByte, Byte.MaxValue),
Array(1.toShort, 255.toShort, Short.MaxValue),
Array(1, 10000, Int.MaxValue),
Array(1.toLong, 1000000.toLong, Long.MaxValue),
Array(1.1.toFloat, 123.456.toFloat, Float.MaxValue),
Array(11.1111, 123456.7890123, Double.MaxValue)
)
val row = encoder.toRow(Row.fromSeq(input))
val convertedBack = encoder.fromRow(row)
input.zipWithIndex.map { case (array, index) =>
assert(convertedBack.getSeq(index) === array)
}
}
test("RowEncoder should support array as the external type for ArrayType") {
val schema = new StructType()
.add("array", ArrayType(IntegerType))
.add("nestedArray", ArrayType(ArrayType(StringType)))
.add("deepNestedArray", ArrayType(ArrayType(ArrayType(LongType))))
val encoder = RowEncoder(schema).resolveAndBind()
val input = Row(
Array(1, 2, null),
Array(Array("abc", null), null),
Array(Seq(Array(0L, null), null), null))
val row = encoder.toRow(input)
val convertedBack = encoder.fromRow(row)
assert(convertedBack.getSeq(0) == Seq(1, 2, null))
assert(convertedBack.getSeq(1) == Seq(Seq("abc", null), null))
assert(convertedBack.getSeq(2) == Seq(Seq(Seq(0L, null), null), null))
}
test("RowEncoder should throw RuntimeException if input row object is null") {
val schema = new StructType().add("int", IntegerType)
val encoder = RowEncoder(schema)
val e = intercept[RuntimeException](encoder.toRow(null))
assert(e.getMessage.contains("Null value appeared in non-nullable field"))
assert(e.getMessage.contains("top level row object"))
}
test("RowEncoder should validate external type") {
val e1 = intercept[RuntimeException] {
val schema = new StructType().add("a", IntegerType)
val encoder = RowEncoder(schema)
encoder.toRow(Row(1.toShort))
}
assert(e1.getMessage.contains("java.lang.Short is not a valid external type"))
val e2 = intercept[RuntimeException] {
val schema = new StructType().add("a", StringType)
val encoder = RowEncoder(schema)
encoder.toRow(Row(1))
}
assert(e2.getMessage.contains("java.lang.Integer is not a valid external type"))
val e3 = intercept[RuntimeException] {
val schema = new StructType().add("a",
new StructType().add("b", IntegerType).add("c", StringType))
val encoder = RowEncoder(schema)
encoder.toRow(Row(1 -> "a"))
}
assert(e3.getMessage.contains("scala.Tuple2 is not a valid external type"))
val e4 = intercept[RuntimeException] {
val schema = new StructType().add("a", ArrayType(TimestampType))
val encoder = RowEncoder(schema)
encoder.toRow(Row(Array("a")))
}
assert(e4.getMessage.contains("java.lang.String is not a valid external type"))
}
private def encodeDecodeTest(schema: StructType): Unit = {
test(s"encode/decode: ${schema.simpleString}") {
val encoder = RowEncoder(schema).resolveAndBind()
val inputGenerator = RandomDataGenerator.forType(schema, nullable = false).get
var input: Row = null
try {
for (_ <- 1 to 5) {
input = inputGenerator.apply().asInstanceOf[Row]
val row = encoder.toRow(input)
val convertedBack = encoder.fromRow(row)
assert(input == convertedBack)
}
} catch {
case e: Exception =>
fail(
s"""
|schema: ${schema.simpleString}
|input: ${input}
""".stripMargin, e)
}
}
}
}
|
Panos-Bletsos/spark-cost-model-optimizer
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/RowEncoderSuite.scala
|
Scala
|
apache-2.0
| 11,542
|
/*
Copyright 2013 Originate Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import play.api._
import play.api.mvc._
import play.api.db.DB
import java.util.UUID
import scala.slick.session.Database
import play.api.Play.current
object Application extends Controller {
lazy val database = Database.forDataSource(DB.getDataSource())
def index = Action {
implicit request =>
val session = request.session.get("s") match {
case Some(sessionId) => {
request.session
}
case _ =>
val sessionId = UUID.randomUUID().toString
Logger.info("Session created: " + sessionId)
request.session +("s", sessionId)
}
Ok(views.html.index("Your new application is ready.")).withSession(session)
}
}
|
Bowbaq/play2-websocket
|
sample-websocket-app/app/controllers/Application.scala
|
Scala
|
apache-2.0
| 1,307
|
package infra.sockjs.impl
import infra.sockjs.SockJsService
/**
* @author alari
* @since 12/16/13
*/
object CookieNeededEchoService extends SockJsService{
override val cookieNeeded = true
}
|
alari/play-sockjs
|
module-code/app/infra/sockjs/impl/CookieNeededEchoService.scala
|
Scala
|
apache-2.0
| 197
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.orc
import java.net.URI
import java.util.Properties
import scala.collection.JavaConverters._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hadoop.hive.ql.io.orc._
import org.apache.hadoop.hive.serde2.objectinspector.{SettableStructObjectInspector, StructObjectInspector}
import org.apache.hadoop.hive.serde2.typeinfo.{StructTypeInfo, TypeInfoUtils}
import org.apache.hadoop.io.{NullWritable, Writable}
import org.apache.hadoop.mapred.{JobConf, OutputFormat => MapRedOutputFormat, RecordWriter, Reporter}
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat, FileSplit}
import org.apache.orc.OrcConf.COMPRESS
import org.apache.spark.TaskContext
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.execution.datasources.orc.OrcOptions
import org.apache.spark.sql.hive.{HiveInspectors, HiveShim}
import org.apache.spark.sql.sources.{Filter, _}
import org.apache.spark.sql.types._
import org.apache.spark.util.SerializableConfiguration
/**
* `FileFormat` for reading ORC files. If this is moved or renamed, please update
* `DataSource`'s backwardCompatibilityMap.
*/
class OrcFileFormat extends FileFormat with DataSourceRegister with Serializable {
override def shortName(): String = "orc"
override def toString: String = "ORC"
override def inferSchema(
sparkSession: SparkSession,
options: Map[String, String],
files: Seq[FileStatus]): Option[StructType] = {
val ignoreCorruptFiles = sparkSession.sessionState.conf.ignoreCorruptFiles
OrcFileOperator.readSchema(
files.map(_.getPath.toString),
Some(sparkSession.sessionState.newHadoopConf()),
ignoreCorruptFiles
)
}
override def prepareWrite(
sparkSession: SparkSession,
job: Job,
options: Map[String, String],
dataSchema: StructType): OutputWriterFactory = {
val orcOptions = new OrcOptions(options, sparkSession.sessionState.conf)
val configuration = job.getConfiguration
configuration.set(COMPRESS.getAttribute, orcOptions.compressionCodec)
configuration match {
case conf: JobConf =>
conf.setOutputFormat(classOf[OrcOutputFormat])
case conf =>
conf.setClass(
"mapred.output.format.class",
classOf[OrcOutputFormat],
classOf[MapRedOutputFormat[_, _]])
}
new OutputWriterFactory {
override def newInstance(
path: String,
dataSchema: StructType,
context: TaskAttemptContext): OutputWriter = {
new OrcOutputWriter(path, dataSchema, context)
}
override def getFileExtension(context: TaskAttemptContext): String = {
val compressionExtension: String = {
val name = context.getConfiguration.get(COMPRESS.getAttribute)
OrcFileFormat.extensionsForCompressionCodecNames.getOrElse(name, "")
}
compressionExtension + ".orc"
}
}
}
override def isSplitable(
sparkSession: SparkSession,
options: Map[String, String],
path: Path): Boolean = {
true
}
override def buildReader(
sparkSession: SparkSession,
dataSchema: StructType,
partitionSchema: StructType,
requiredSchema: StructType,
filters: Seq[Filter],
options: Map[String, String],
hadoopConf: Configuration): (PartitionedFile) => Iterator[InternalRow] = {
if (sparkSession.sessionState.conf.orcFilterPushDown) {
// Sets pushed predicates
OrcFilters.createFilter(requiredSchema, filters.toArray).foreach { f =>
hadoopConf.set(OrcFileFormat.SARG_PUSHDOWN, f.toKryo)
hadoopConf.setBoolean(ConfVars.HIVEOPTINDEXFILTER.varname, true)
}
}
val broadcastedHadoopConf =
sparkSession.sparkContext.broadcast(new SerializableConfiguration(hadoopConf))
val ignoreCorruptFiles = sparkSession.sessionState.conf.ignoreCorruptFiles
(file: PartitionedFile) => {
val conf = broadcastedHadoopConf.value.value
val filePath = new Path(new URI(file.filePath))
// SPARK-8501: Empty ORC files always have an empty schema stored in their footer. In this
// case, `OrcFileOperator.readSchema` returns `None`, and we can't read the underlying file
// using the given physical schema. Instead, we simply return an empty iterator.
val isEmptyFile =
OrcFileOperator.readSchema(Seq(filePath.toString), Some(conf), ignoreCorruptFiles).isEmpty
if (isEmptyFile) {
Iterator.empty
} else {
OrcFileFormat.setRequiredColumns(conf, dataSchema, requiredSchema)
val orcRecordReader = {
val job = Job.getInstance(conf)
FileInputFormat.setInputPaths(job, file.filePath)
val fileSplit = new FileSplit(filePath, file.start, file.length, Array.empty)
// Custom OrcRecordReader is used to get
// ObjectInspector during recordReader creation itself and can
// avoid NameNode call in unwrapOrcStructs per file.
// Specifically would be helpful for partitioned datasets.
val orcReader = OrcFile.createReader(filePath, OrcFile.readerOptions(conf))
new SparkOrcNewRecordReader(orcReader, conf, fileSplit.getStart, fileSplit.getLength)
}
val recordsIterator = new RecordReaderIterator[OrcStruct](orcRecordReader)
Option(TaskContext.get())
.foreach(_.addTaskCompletionListener[Unit](_ => recordsIterator.close()))
// Unwraps `OrcStruct`s to `UnsafeRow`s
OrcFileFormat.unwrapOrcStructs(
conf,
dataSchema,
requiredSchema,
Some(orcRecordReader.getObjectInspector.asInstanceOf[StructObjectInspector]),
recordsIterator)
}
}
}
override def supportDataType(dataType: DataType, isReadPath: Boolean): Boolean = dataType match {
case _: AtomicType => true
case st: StructType => st.forall { f => supportDataType(f.dataType, isReadPath) }
case ArrayType(elementType, _) => supportDataType(elementType, isReadPath)
case MapType(keyType, valueType, _) =>
supportDataType(keyType, isReadPath) && supportDataType(valueType, isReadPath)
case udt: UserDefinedType[_] => supportDataType(udt.sqlType, isReadPath)
case _: NullType => isReadPath
case _ => false
}
}
private[orc] class OrcSerializer(dataSchema: StructType, conf: Configuration)
extends HiveInspectors {
def serialize(row: InternalRow): Writable = {
wrapOrcStruct(cachedOrcStruct, structOI, row)
serializer.serialize(cachedOrcStruct, structOI)
}
private[this] val serializer = {
val table = new Properties()
table.setProperty("columns", dataSchema.fieldNames.mkString(","))
table.setProperty("columns.types", dataSchema.map(_.dataType.catalogString).mkString(":"))
val serde = new OrcSerde
serde.initialize(conf, table)
serde
}
// Object inspector converted from the schema of the relation to be serialized.
private[this] val structOI = {
val typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(dataSchema.catalogString)
OrcStruct.createObjectInspector(typeInfo.asInstanceOf[StructTypeInfo])
.asInstanceOf[SettableStructObjectInspector]
}
private[this] val cachedOrcStruct = structOI.create().asInstanceOf[OrcStruct]
// Wrapper functions used to wrap Spark SQL input arguments into Hive specific format
private[this] val wrappers = dataSchema.zip(structOI.getAllStructFieldRefs().asScala.toSeq).map {
case (f, i) => wrapperFor(i.getFieldObjectInspector, f.dataType)
}
private[this] def wrapOrcStruct(
struct: OrcStruct,
oi: SettableStructObjectInspector,
row: InternalRow): Unit = {
val fieldRefs = oi.getAllStructFieldRefs
var i = 0
val size = fieldRefs.size
while (i < size) {
oi.setStructFieldData(
struct,
fieldRefs.get(i),
wrappers(i)(row.get(i, dataSchema(i).dataType))
)
i += 1
}
}
}
private[orc] class OrcOutputWriter(
path: String,
dataSchema: StructType,
context: TaskAttemptContext)
extends OutputWriter {
private[this] val serializer = new OrcSerializer(dataSchema, context.getConfiguration)
// `OrcRecordWriter.close()` creates an empty file if no rows are written at all. We use this
// flag to decide whether `OrcRecordWriter.close()` needs to be called.
private var recordWriterInstantiated = false
private lazy val recordWriter: RecordWriter[NullWritable, Writable] = {
recordWriterInstantiated = true
new OrcOutputFormat().getRecordWriter(
new Path(path).getFileSystem(context.getConfiguration),
context.getConfiguration.asInstanceOf[JobConf],
path,
Reporter.NULL
).asInstanceOf[RecordWriter[NullWritable, Writable]]
}
override def write(row: InternalRow): Unit = {
recordWriter.write(NullWritable.get(), serializer.serialize(row))
}
override def close(): Unit = {
if (recordWriterInstantiated) {
recordWriter.close(Reporter.NULL)
}
}
}
private[orc] object OrcFileFormat extends HiveInspectors {
// This constant duplicates `OrcInputFormat.SARG_PUSHDOWN`, which is unfortunately not public.
private[orc] val SARG_PUSHDOWN = "sarg.pushdown"
// The extensions for ORC compression codecs
val extensionsForCompressionCodecNames = Map(
"NONE" -> "",
"SNAPPY" -> ".snappy",
"ZLIB" -> ".zlib",
"LZO" -> ".lzo")
def unwrapOrcStructs(
conf: Configuration,
dataSchema: StructType,
requiredSchema: StructType,
maybeStructOI: Option[StructObjectInspector],
iterator: Iterator[Writable]): Iterator[InternalRow] = {
val deserializer = new OrcSerde
val mutableRow = new SpecificInternalRow(requiredSchema.map(_.dataType))
val unsafeProjection = UnsafeProjection.create(requiredSchema)
def unwrap(oi: StructObjectInspector): Iterator[InternalRow] = {
val (fieldRefs, fieldOrdinals) = requiredSchema.zipWithIndex.map {
case (field, ordinal) =>
var ref = oi.getStructFieldRef(field.name)
if (ref == null) {
ref = oi.getStructFieldRef("_col" + dataSchema.fieldIndex(field.name))
}
ref -> ordinal
}.unzip
val unwrappers = fieldRefs.map(r => if (r == null) null else unwrapperFor(r))
iterator.map { value =>
val raw = deserializer.deserialize(value)
var i = 0
val length = fieldRefs.length
while (i < length) {
val fieldRef = fieldRefs(i)
val fieldValue = if (fieldRef == null) null else oi.getStructFieldData(raw, fieldRef)
if (fieldValue == null) {
mutableRow.setNullAt(fieldOrdinals(i))
} else {
unwrappers(i)(fieldValue, mutableRow, fieldOrdinals(i))
}
i += 1
}
unsafeProjection(mutableRow)
}
}
maybeStructOI.map(unwrap).getOrElse(Iterator.empty)
}
def setRequiredColumns(
conf: Configuration, dataSchema: StructType, requestedSchema: StructType): Unit = {
val ids = requestedSchema.map(a => dataSchema.fieldIndex(a.name): Integer)
val (sortedIDs, sortedNames) = ids.zip(requestedSchema.fieldNames).sorted.unzip
HiveShim.appendReadColumns(conf, sortedIDs, sortedNames)
}
}
|
michalsenkyr/spark
|
sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala
|
Scala
|
apache-2.0
| 12,376
|
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.feats
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
class MetaMagicFeatTest extends AnyFunSpec with Matchers {
describe("Meta Magics") {
they("can be bestowed as Feats") {
noException shouldBe thrownBy(MetaMagicFeat.values)
}
}
}
|
adarro/ddo-calc
|
subprojects/common/ddo-core/src/test/scala/io/truthencode/ddo/model/feats/MetaMagicFeatTest.scala
|
Scala
|
apache-2.0
| 967
|
/*
* Copyright (c) <2015-2016>, see CONTRIBUTORS
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package ch.usi.inf.l3.sana.ooj.eval
import ch.usi.inf.l3.sana
import sana.ooj
import sana.brokenj
import sana.primj
import sana.tiny
import sana.calcj
import tiny.core.TransformationComponent
import tiny.dsl._
import tiny.symbols.Symbol
import ooj.ast._
import ooj.ast.Implicits._
import ooj.ast.TreeExtractors._
import ooj.modifiers.Ops._
import ooj.symbols.SymbolUtils
import brokenj.ast.{TreeCopiers => _, TreeUtils => _, _}
import primj.ast.{TreeCopiers => _, MethodDefApi => PMethodDefApi,
ProgramApi => _, TreeUtils => _, _}
import calcj.ast.{TreeCopiers => _, _}
import tiny.ast.{TreeCopiers => _, _}
/**
* This phase together with [[ConstantFoldingComponent]] perform
* constant-folding constant expressions. This step is useful to be done before
* type-checking takes place for case-guard distinctness and assignment
* conversion checking. Constant expression is defined as per Java 1
* specification.
*
* This phase is run first, and feeds an environment with the final fields of
* the entire program and binds them to their right-hand side expression.
* Then, [[ConstantFoldingComponent]] phase comes and evaluates the entire
* program using the resulting environment, as per usual.
*/
trait ConstantCollectingComponent
extends TransformationComponent[(Tree, Env), Env] {
def collect: ((Tree, Env)) => Env
}
@component(tree, env)
trait ProgramConstantCollectingComponent extends ConstantCollectingComponent {
(prg: ProgramApi) => {
prg.members.foldLeft(env){
(z, member) =>
collect((member, z))
}
}
}
@component(tree, env)
trait CompilationUnitConstantCollectingComponent extends
ConstantCollectingComponent {
(cunit: CompilationUnitApi) => {
collect((cunit.module, env))
}
}
@component(tree, env)
trait PackageDefConstantCollectingComponent
extends ConstantCollectingComponent {
(pkg: PackageDefApi) => {
pkg.members.foldLeft(env) {
(z, member) =>
collect((member, z))
}
}
}
@component(tree, env)
trait ClassDefConstantCollectingComponent
extends ConstantCollectingComponent {
(clazz: ClassDefApi) => {
collect((clazz.body, env))
}
}
@component(tree, env)
trait TemplateConstantCollectingComponent
extends ConstantCollectingComponent {
(template: TemplateApi) => {
template.members.foldLeft(env){
(z, member) =>
collect((member, z))
}
}
}
@component(tree, env)
trait ValDefConstantCollectingComponent
extends ConstantCollectingComponent {
(valdef: ValDefApi) => {
if(valdef.mods.isFinal && valdef.mods.isField &&
valdef.rhs != NoTree ) {
val newEnv2 = valdef.symbol.map { sym =>
env.bind(sym, ExprValue(valdef.rhs))
}.getOrElse(env)
newEnv2
} else env
}
}
@component(tree, env)
trait MethodDefConstantCollectingComponent
extends ConstantCollectingComponent {
(mthd: PMethodDefApi) => env
}
@component(tree, env)
trait BlockConstantCollectingComponent
extends ConstantCollectingComponent {
(block: BlockApi) => env
}
// @component(tree, env)
// trait SelectConstantCollectingComponent
// extends ConstantCollectingComponent {
// (select: SelectApi) => {
// if(isTypeSymbol(select.qual.symbol) &&
// isStatic(select.tree.symbol)) {
// select.symbol.map { sym =>
// env.getValue(sym) match {
// case TypeValue(tenv) =>
// val (v, _) = collect((select.tree, tenv))
// if(isConstantExpression(v)) {
// (v, env)
// } else {
// (select, env)
// }
// case _ =>
// (select, env)
// }
// }.getOrElse((select, env))
// } else {
// (select, env)
// }
// }
//
//
// def isTypeSymbol(sym: Option[Symbol]): Boolean =
// SymbolUtils.isTypeSymbol(sym)
//
// def isStatic(sym: Option[Symbol]): Boolean =
// sym.map(_.mods.isStatic).getOrElse(false)
//
// protected def isConstantExpression(tree: Tree): Boolean =
// TreeUtils.isConstantExpression(tree)
// }
//
//
// @component(tree, env)
// trait IdentConstantCollectingComponent
// extends ConstantCollectingComponent {
// (ident: IdentApi) => {
// ident.symbol.map { sym =>
// env.getValue(sym) match {
// case ExprValue(v) =>
// (v, env)
// case _ =>
// (ident, env)
// }
// }.getOrElse((ident, env))
// }
//
// protected def isConstantExpression(tree: Tree): Boolean =
// TreeUtils.isConstantExpression(tree)
// }
|
amanjpro/languages-a-la-carte
|
ooj/src/main/scala/eval/constantcollectors.scala
|
Scala
|
bsd-3-clause
| 6,151
|
/*
* Copyright (C) 2015 - 2017 Juergen Pfundt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* This Parboiled2 grammar implements the
Uniform Resource Identifier (URI): Generic Syntax
as described in
http://tools.ietf.org/html/rfc3987
Network Working Group M. Duerst
Request for Comments: 3987 W3C
Category: Standards Track M. Suignard
Microsoft Corporation
January 2005
IRIs are defined similarly to URIs in [RFC3986], but the class of
unreserved characters is extended by adding the characters of the UCS
(Universal Character Set, [ISO10646]) beyond U+007F, subject to the
limitations given in the syntax rules below and in section 6.1.
Otherwise, the syntax and use of components and reserved characters
is the same as that in [RFC3986]. All the operations defined in
[RFC3986], such as the resolution of relative references, can be
applied to IRIs by IRI-processing software in exactly the same way as
they are for URIs by URI-processing software.
Characters outside the US-ASCII repertoire are not reserved and
therefore MUST NOT be used for syntactical purposes, such as to
delimit components in newly defined schemes. For example, U+00A2,
CENT SIGN, is not allowed as a delimiter in IRIs, because it is in
the 'iunreserved' category. This is similar to the fact that it is
not possible to use '-' as a delimiter in URIs, because it is in the
'unreserved' category.
*/
package org.arktos
import org.parboiled2.{ ErrorFormatter, ParseError, ParserInput, CharPredicate }
import org.parboiled2.CharPredicate._
import scala.util.{ Failure, Success }
class IRIParser(input: ParserInput) extends URIParser(input: ParserInput) {
import IRIParser._
import URIAST._
//private = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
val `private` = CharPredicate('\\uE000' to '\\uF8FF')
def private_supplement = rule { `private` | !(str("\\\\U000FFFFE") | str("\\\\U000FFFFF") | str("\\\\U00010FFE") | str("\\\\U00010FFF")) ~ isHighSurrogate ~ isLowSurrogate }
val ucschar = CharPredicate('\\u00A0' to '\\uD7FF', '\\uF900' to '\\uFDCF', '\\uFDF0' to '\\uFFEF')
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" / ucschar
override val unreserved = AlphaNum ++ '-' ++ '.' ++ '_' ++ '~' ++ ucschar
def ucschar_supplement = rule {
!(str("\\\\U0001FFFE") | str("\\\\U0001FFFF") |
str("\\\\U0002FFFE") | str("\\\\U0002FFFF") |
str("\\\\U0003FFFE") | str("\\\\U0003FFFF") |
str("\\\\U0004FFFE") | str("\\\\U0004FFFF") |
str("\\\\U0005FFFE") | str("\\\\U0005FFFF") |
str("\\\\U0006FFFE") | str("\\\\U0006FFFF") |
str("\\\\U0007FFFE") | str("\\\\U0007FFFF") |
str("\\\\U0008FFFE") | str("\\\\U0008FFFF") |
str("\\\\U0009FFFE") | str("\\\\U0009FFFF") |
str("\\\\U000AFFFE") | str("\\\\U000AFFFF") |
str("\\\\U000BFFFE") | str("\\\\U000BFFFF") |
str("\\\\U000CFFFE") | str("\\\\U000CFFFF") |
str("\\\\U000DFFFE") | str("\\\\U000DFFFF") |
str("\\\\U000EFFFE") | str("\\\\U000EFFFF")) ~ isHighSurrogate ~ isLowSurrogate
}
// URI-reference = URI / relative-ref
def IRI_reference = rule { (URI | relative_ref) ~ EOI ~> URI_Reference }
// user = *( unreserved / pct-encoded / sub-delims )
override def user = rule {
atomic(capture((unreserved | pct_encoded | sub_delims | ucschar_supplement).*)) ~> URI_User
}
// reg-name = *( unreserved / pct-encoded / sub-delims )
override def reg_name = rule { atomic(capture((unreserved | pct_encoded | sub_delims | ucschar_supplement).*)) ~> URI_Reg_Name }
//segment-nz-nc = 1*( unreserved / pct-encoded / sub-delims / "@" ) ; non-zero-length segment without any ':' ":"
override def segment_nz_nc = rule { (unreserved | pct_encoded | sub_delims | '@' | ucschar_supplement).+ }
override def qchar = rule { unreserved | pct_encoded | query_delims | ':' | '@' | ucschar_supplement | `private` }
}
object IRIParser {
import URI.URIType
import URIReturnValue._
val isHighSurrogate = CharPredicate.from(Character.isHighSurrogate)
val isLowSurrogate = CharPredicate.from(Character.isLowSurrogate)
def apply(input: ParserInput, validate: Boolean = false) = {
val parser = new IRIParser(input)
val result = parser.IRI_reference.run()
result match {
case Success(x) ⇒ Success(if (validate) {
Map.empty: URIType
} else {
(evalURI().eval(result.get): @unchecked) match {
case URIMap(m) ⇒ m
}
})
case Failure(e: ParseError) ⇒ Failure(new RuntimeException(parser.formatError(result.failed.get.asInstanceOf[org.parboiled2.ParseError], new ErrorFormatter())))
case Failure(e) ⇒ Failure(new RuntimeException("Unexpected error during parsing run: " + result.failed.get))
}
}
def validatePath(input: ParserInput, validate: Boolean = false) = {
val parser = new IRIParser(input)
val result = parser.path.run()
result match {
case Success(x) ⇒ Success(if (validate) { Map.empty: URIType }
else { (evalURI().eval(result.get): @unchecked) match { case URIMap(m) ⇒ m } })
case Failure(e: ParseError) ⇒ Failure(new RuntimeException(parser.formatError(result.failed.get.asInstanceOf[org.parboiled2.ParseError], new ErrorFormatter())))
case Failure(e) ⇒ Failure(new RuntimeException("Unexpected error during parsing run: " + result.failed.get))
}
}
}
|
JuPfu/arktos
|
shared/src/main/scala/org/arktos/IRIParser.scala
|
Scala
|
apache-2.0
| 6,143
|
package services
import play.api.Logger
import reactivemongo.api.collections.bson.BSONCollection
import reactivemongo.api.indexes.{Index, IndexType}
import reactivemongo.api.{Cursor, DefaultDB}
import reactivemongo.bson.{BSONDocument, BSONDocumentReader, BSONDocumentWriter, document}
import scala.concurrent.{ExecutionContext, Future}
class MongoService[T](database: Future[DefaultDB], val collectionType: String)(implicit executionContext: ExecutionContext) {
def count(selector: BSONDocument): Future[Int] = {
val collection: Future[BSONCollection] = database.map(_.collection(collectionType))
collection.flatMap(_.count(Option(selector)))
}
def find(query: BSONDocument)(implicit reader: BSONDocumentReader[T]): Future[Seq[T]] = {
val collection: Future[BSONCollection] = database.map(_.collection(collectionType))
collection.flatMap(_.find(query).cursor[T]().collect(-1, Cursor.ContOnError[Seq[T]]()))
}
def findAll()(implicit reader: BSONDocumentReader[T]): Future[Seq[T]] = {
val collection: Future[BSONCollection] = database.map(_.collection(collectionType))
collection.flatMap(_.find(document()).cursor[T]().collect(-1, Cursor.ContOnError[Seq[T]]()))
}
def upsert(selector: BSONDocument, entity: T)(implicit writer: BSONDocumentWriter[T]): Future[Unit] = {
val collection: Future[BSONCollection] = database.map(_.collection(collectionType))
collection.flatMap(_.update(selector, entity, upsert = true).map(_ => {}))
}
def test(selector: BSONDocument, entity: T)(implicit writer: BSONDocumentWriter[T]): Future[Unit] = {
val collection: Future[BSONCollection] = database.map(_.collection(collectionType))
collection.flatMap(_.update(selector, entity, upsert = true).map(_ => {}))
}
def distinct(fieldName: String, query: Option[BSONDocument] = None)(implicit reader: BSONDocumentReader[T]): Future[List[String]] = {
val collection: Future[BSONCollection] = database.map(_.collection(collectionType))
collection.flatMap(_.distinct[String, List](fieldName, query))
}
def ensureIndex(indexes: Seq[(String, IndexType)]): Future[Unit] = {
val collection: Future[BSONCollection] = database.map(_.collection(collectionType))
collection.flatMap(_.indexesManager.ensure(Index(indexes))).map(a => {
Logger.info(s"ensured index for $collectionType: $a")
})
}
}
|
soniCaH/footbalisto-api
|
app/services/MongoService.scala
|
Scala
|
apache-2.0
| 2,367
|
package org.jetbrains.plugins.scala.testingSupport.scalatest.scala2_11.scalatest3_0_1
import org.jetbrains.plugins.scala.testingSupport.scalatest.staticStringTest._
/**
* @author Roman.Shein
* @since 10.03.2017
*/
class Scalatest2_11_3_0_1_StaticStringTest extends Scalatest2_11_3_0_1_Base with FeatureSpecStaticStringTest with
FlatSpecStaticStringTest with FreeSpecStaticStringTest with FunSpecStaticStringTest with FunSuiteStaticStringTest with
PropSpecStaticStringTest with WordSpecStaticStringTest with MethodsStaticStringTest {
}
|
loskutov/intellij-scala
|
test/org/jetbrains/plugins/scala/testingSupport/scalatest/scala2_11/scalatest3_0_1/Scalatest2_11_3_0_1_StaticStringTest.scala
|
Scala
|
apache-2.0
| 544
|
package org.jetbrains.plugins.scala
package lang
package psi
package api
package toplevel
import com.intellij.openapi.util.text.StringUtil
import com.intellij.psi.{PsiClass, PsiElement, PsiMethod}
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScClassParameter
import org.jetbrains.plugins.scala.lang.psi.fake.FakePsiMethod
import org.jetbrains.plugins.scala.lang.psi.light.{PsiClassWrapper, PsiTypedDefinitionWrapper, StaticPsiTypedDefinitionWrapper}
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.Parameter
import org.jetbrains.plugins.scala.lang.psi.types.result.{TypingContext, TypingContextOwner}
import org.jetbrains.plugins.scala.macroAnnotations.{Cached, ModCount}
/**
* Member definitions, classes, named patterns which have types
*/
trait ScTypedDefinition extends ScNamedElement with TypingContextOwner {
/**
* @return false for variable elements
*/
def isStable = true
private def typeArr2paramArr(a: Array[ScType]): Array[Parameter] = a.toSeq.mapWithIndex {
case (tpe, index) => new Parameter("", None, tpe, false, false, false, index)
}.toArray
@Cached(synchronized = false, modificationCount = ModCount.getOutOfCodeBlockModificationCount, this)
def getUnderEqualsMethod: PsiMethod = {
val hasModifierProperty: String => Boolean = nameContext match {
case v: ScModifierListOwner => v.hasModifierProperty
case _ => _ => false
}
val tType = getType(TypingContext.empty).getOrAny
new FakePsiMethod(this, name + "_=", typeArr2paramArr(Array[ScType](tType)), types.Unit, hasModifierProperty)
}
@Cached(synchronized = false, modificationCount = ModCount.getOutOfCodeBlockModificationCount, this)
def getGetBeanMethod: PsiMethod = {
val hasModifierProperty: String => Boolean = nameContext match {
case v: ScModifierListOwner => v.hasModifierProperty
case _ => _ => false
}
new FakePsiMethod(this, "get" + StringUtil.capitalize(this.name), Array.empty,
this.getType(TypingContext.empty).getOrAny, hasModifierProperty)
}
@Cached(synchronized = false, modificationCount = ModCount.getOutOfCodeBlockModificationCount, this)
def getSetBeanMethod: PsiMethod = {
val hasModifierProperty: String => Boolean = nameContext match {
case v: ScModifierListOwner => v.hasModifierProperty
case _ => _ => false
}
val tType = getType(TypingContext.empty).getOrAny
new FakePsiMethod(this, "set" + name.capitalize, typeArr2paramArr(Array[ScType](tType)), types.Unit, hasModifierProperty)
}
@Cached(synchronized = false, modificationCount = ModCount.getOutOfCodeBlockModificationCount, this)
def getIsBeanMethod: PsiMethod = {
val hasModifierProperty: String => Boolean = nameContext match {
case v: ScModifierListOwner => v.hasModifierProperty
case _ => _ => false
}
new FakePsiMethod(this, "is" + StringUtil.capitalize(this.name), Array.empty,
this.getType(TypingContext.empty).getOrAny, hasModifierProperty)
}
@Cached(synchronized = false, modificationCount = ModCount.getOutOfCodeBlockModificationCount, this)
def getBeanMethods: Seq[PsiMethod] = {
def valueSeq(v: ScAnnotationsHolder with ScModifierListOwner): Seq[PsiMethod] = {
val beanProperty = ScalaPsiUtil.isBeanProperty(v)
val booleanBeanProperty = ScalaPsiUtil.isBooleanBeanProperty(v)
if (beanProperty || booleanBeanProperty) {
Seq(if (beanProperty) getGetBeanMethod else getIsBeanMethod)
} else Seq.empty
}
def variableSeq(v: ScAnnotationsHolder with ScModifierListOwner): Seq[PsiMethod] = {
val beanProperty = ScalaPsiUtil.isBeanProperty(v)
val booleanBeanProperty = ScalaPsiUtil.isBooleanBeanProperty(v)
if (beanProperty || booleanBeanProperty) {
Seq(if (beanProperty) getGetBeanMethod else getIsBeanMethod, getSetBeanMethod)
} else Seq.empty
}
ScalaPsiUtil.nameContext(this) match {
case v: ScValue =>
valueSeq(v)
case v: ScVariable =>
variableSeq(v)
case v: ScClassParameter if v.isVal =>
valueSeq(v)
case v: ScClassParameter if v.isVar =>
variableSeq(v)
case _ => Seq.empty
}
}
import org.jetbrains.plugins.scala.lang.psi.light.PsiTypedDefinitionWrapper.DefinitionRole._
@Cached(synchronized = false, modificationCount = ModCount.getOutOfCodeBlockModificationCount, this)
def getTypedDefinitionWrapper(isStatic: Boolean, isInterface: Boolean, role: DefinitionRole,
cClass: Option[PsiClass] = None): PsiTypedDefinitionWrapper = {
new PsiTypedDefinitionWrapper(this, isStatic, isInterface, role, cClass)
}
@Cached(synchronized = false, modificationCount = ModCount.getOutOfCodeBlockModificationCount, this)
def getStaticTypedDefinitionWrapper(role: DefinitionRole, cClass: PsiClassWrapper): StaticPsiTypedDefinitionWrapper = {
new StaticPsiTypedDefinitionWrapper(this, role, cClass)
}
def nameContext: PsiElement = ScalaPsiUtil.nameContext(this)
def isVar: Boolean = false
def isVal: Boolean = false
def isAbstractMember: Boolean = ScalaPsiUtil.nameContext(this) match {
case _: ScFunctionDefinition | _: ScPatternDefinition | _: ScVariableDefinition => false
case cp: ScClassParameter => false
case _ => true
}
}
|
JetBrains/intellij-scala-historical
|
src/org/jetbrains/plugins/scala/lang/psi/api/toplevel/ScTypedDefinition.scala
|
Scala
|
apache-2.0
| 5,472
|
package ml.sparkling.graph.api.operators.algorithms.shortestpaths
/**
* Created by Roman Bartusiak (roman.bartusiak@pwr.edu.pl http://riomus.github.io).
*/
object ShortestPathsTypes {
type JMap[K,V]=java.util.Map[K,V]
type JLong=java.lang.Long
type JDouble=java.lang.Double
type JSet[T]=java.util.Set[T]
type JList[T]=java.util.List[T]
type JCollection[T]=java.util.Collection[T]
type JPath=JList[JDouble]
type JPathCollection=JSet[JPath]
type WithPathContainer=JMap[JLong,JPathCollection]
}
|
sparkling-graph/sparkling-graph
|
api/src/main/scala/ml/sparkling/graph/api/operators/algorithms/shortestpaths/ShortestPathsTypes.scala
|
Scala
|
bsd-2-clause
| 513
|
package blackboard.monitor.mbean
import scala.collection.mutable.HashMap
import scala.collection.mutable.Map
import scala.collection.mutable.SynchronizedMap
import scala.compat.Platform
trait CacheableBean[T] {
object MapMaker {
def makeMap: Map[String, (Long, T)] = {
new HashMap[String, (Long, T)] with SynchronizedMap[String, (Long, T)]
}
}
private val beans: Map[String, (Long, T)] = MapMaker.makeMap
def get(name: String, validateTime: Long): Option[T] = {
val result = beans.get(name)
result match {
case Some(name) => {
val duration = Platform.currentTime - result.get._1
if (duration >= validateTime * 1000) {
None
} else {
Some(result.get._2)
}
}
case None => {
None
}
}
}
def put(name: String, value: T) = {
beans += name ->(Platform.currentTime, value)
}
}
|
blackboard/monitor-bridge
|
src/main/scala/blackboard/monitor/mbean/CacheableBean.scala
|
Scala
|
bsd-3-clause
| 901
|
package controllers
import play.api.mvc._
/**
* Controller that renders example templates.
*/
object Application extends Controller {
def index = Action {
Ok(views.html.index())
}
/**
* Renders a minimal HTML template with no parameters.
*/
def minimal = Action {
Ok(views.html.minimal())
}
/**
* Renders a template with a String title parameter.
*/
def title = Action {
Ok(views.html.title("New Arrivals"))
}
}
|
Bubblemint/PlayForScala
|
ch03/Templates/app/controllers/Application.scala
|
Scala
|
apache-2.0
| 459
|
package org.ai4fm.proofprocess.ui.actions
import org.ai4fm.proofprocess.ProofFeatureDef
import org.ai4fm.proofprocess.ui.features.FeatureDefInfoDialog
import org.ai4fm.proofprocess.ui.util.SWTUtil.selectionElement
import org.eclipse.core.commands.{AbstractHandler, ExecutionEvent, ExecutionException}
import org.eclipse.ui.handlers.HandlerUtil
/**
* Opens a dialog to edit feature definition properties for the given selection.
*
* @author Andrius Velykis
*/
class FeatureDefInfoHandler extends AbstractHandler {
@throws(classOf[ExecutionException])
override def execute(event: ExecutionEvent): AnyRef = {
// get the selected element if available
val selection = selectionElement(HandlerUtil.getCurrentSelection(event))
selection match {
case Some(feature: ProofFeatureDef) => handleFeatureSelected(event, feature)
case _ => // ignore
}
// return value is reserved for future APIs
null
}
private def handleFeatureSelected(event: ExecutionEvent, feature: ProofFeatureDef) {
val shell = HandlerUtil.getActiveShell(event)
val dialog = new FeatureDefInfoDialog(shell, feature)
dialog.open()
}
}
|
andriusvelykis/proofprocess
|
org.ai4fm.proofprocess.ui/src/org/ai4fm/proofprocess/ui/actions/FeatureDefInfoHandler.scala
|
Scala
|
epl-1.0
| 1,175
|
package sample.cluster.factorial
import scala.concurrent.duration._
import com.typesafe.config.ConfigFactory
import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.ActorSystem
import akka.actor.Props
import akka.cluster.Cluster
import akka.routing.FromConfig
import akka.actor.ReceiveTimeout
//#frontend
class FactorialFrontend(upToN: Int, repeat: Boolean) extends Actor with ActorLogging {
val backend = context.actorOf(FromConfig.props(),
name = "factorialBackendRouter")
override def preStart(): Unit = {
sendJobs()
if (repeat) {
context.setReceiveTimeout(10.seconds)
}
}
def receive = {
case (n: Int, factorial: BigInt) =>
if (n == upToN) {
log.debug("{}! = {}", n, factorial)
if (repeat) sendJobs()
else context.stop(self)
}
case ReceiveTimeout =>
log.info("Timeout")
sendJobs()
}
def sendJobs(): Unit = {
log.info("Starting batch of factorials up to [{}]", upToN)
1 to upToN foreach { backend ! _ }
}
}
//#frontend
object FactorialFrontend {
def main(args: Array[String]): Unit = {
val upToN = 200
val config = ConfigFactory.parseString("akka.cluster.roles = [frontend]").
withFallback(ConfigFactory.load("factorial"))
val system = ActorSystem("ClusterSystem", config)
system.log.info("Factorials will start when 2 backend members in the cluster.")
//#registerOnUp
Cluster(system) registerOnMemberUp {
system.actorOf(Props(classOf[FactorialFrontend], upToN, true),
name = "factorialFrontend")
}
//#registerOnUp
}
}
|
linearregression/social_data_collector
|
src/main/scala/sample/cluster/factorial/FactorialFrontend.scala
|
Scala
|
cc0-1.0
| 1,603
|
package com.tribbloids.spookystuff.testutils
import org.json4s.JValue
import org.json4s.jackson.JsonMethods
import org.scalatest.Suite
trait Suitex {
self: Suite =>
final val ACTUAL =
"[ACTUAL / LEFT]"
final val EXPECTED =
"[EXPECTED / RIGHT]"
// CommonUtils.debugCPResource()
@transient implicit class TestStringView(str: String) {
//TODO: use reflection to figure out test name and annotate
def shouldBe(
gd: String = null,
sort: Boolean = false,
ignoreCase: Boolean = false,
superSet: Boolean = false
): Unit = {
val rows = str
.split("\\n")
.toList
var a: List[String] = rows
.filterNot(_.replaceAllLiterally(" ", "").isEmpty)
.map(v => ("|" + v).trim.stripPrefix("|"))
if (sort) a = a.sorted
if (ignoreCase) a = a.map(_.toLowerCase)
Option(gd) match {
case None =>
println(AssertionErrorObject(rows, null).actualInfo)
case Some(_gd) =>
var b = _gd
.split("\\n")
.toList
.filterNot(_.replaceAllLiterally(" ", "").isEmpty)
.map(v => ("|" + v).trim.stripPrefix("|"))
if (sort) b = b.sorted
if (ignoreCase) b = b.map(_.toLowerCase)
if (superSet) {
Predef.assert(
a.intersect(b).nonEmpty,
AssertionErrorObject(a, b)
)
} else {
Predef.assert(
a == b,
AssertionErrorObject(a, b)
)
}
}
}
//ignore sequence
// def jsonShouldBe(
// gd: String = null,
// mode: JSONCompareMode = JSONCompareMode.LENIENT
// ): Unit = {
//
// JSONAssert.assertEquals(str, gd, mode)
// }
def jsonShouldBe(gd: String): Unit = {
val selfJ = JsonMethods.parse(str)
val gdJ = JsonMethods.parse(gd)
assertValidDataInJson(selfJ, gdJ)
}
def rowsShouldBe(
gd: String = null
): Unit = shouldBe(gd, sort = true)
def shouldBeLike(
gd: String = null,
sort: Boolean = false,
ignoreCase: Boolean = false
): Unit = {
val aRaw: List[String] = str
.split("\\n")
.toList
.filterNot(_.replaceAllLiterally(" ", "").isEmpty)
.map(v => ("|" + v).trim.stripPrefix("|"))
val a =
if (sort) aRaw.sorted
else aRaw
Option(gd) match {
case None =>
println(AssertionErrorObject(a, null).actualInfo)
case Some(_gd) =>
var b = _gd
.split("\\n")
.toList
.filterNot(_.replaceAllLiterally(" ", "").isEmpty)
.map(v => ("|" + v).trim.stripPrefix("|"))
if (sort) b = b.sorted
if (ignoreCase) b = b.map(_.toLowerCase)
try {
a.zipAll(b, null, null).foreach { tuple =>
val fixes = tuple._2.split("[.]{6,}", 2)
Predef.assert(
tuple._1.startsWith(fixes.head)
)
Predef.assert(
tuple._1.endsWith(fixes.last)
)
}
} catch {
case e: Exception =>
throw new AssertionError("" + AssertionErrorObject(a, b), e)
}
}
}
def rowsShouldBeLike(gd: String = null): Unit = shouldBeLike(gd, sort = true)
// def uriContains(contains: String): Boolean = {
// str.contains(contains) &&
// str.contains(URLEncoder.encode(contains,"UTF-8"))
// }
//
// def assertUriContains(contains: String): Unit = {
// assert(
// str.contains(contains) &&
// str.contains(URLEncoder.encode(contains,"UTF-8")),
// s"$str doesn't contain either:\\n" +
// s"$contains OR\\n" +
// s"${URLEncoder.encode(contains,"UTF-8")}"
// )
// }
// from org.apache.spark.JsonTestUtils
def assertValidDataInJson(validateJson: JValue, expectedJson: JValue) {
import org.json4s._
val Diff(c, a, d) = validateJson.diff(expectedJson)
val validatePretty = JsonMethods.pretty(validateJson)
val expectedPretty = JsonMethods.pretty(expectedJson)
val errorMessage = s"Expected:\\n$expectedPretty\\nFound:\\n$validatePretty"
// import org.scalactic.TripleEquals._
assert(c == JNothing, s"$errorMessage\\nChanged:\\n${JsonMethods.pretty(c)}")
assert(a == JNothing, s"$errorMessage\\nAdded:\\n${JsonMethods.pretty(a)}")
assert(d == JNothing, s"$errorMessage\\nDeleted:\\n${JsonMethods.pretty(d)}")
}
}
//TODO: update to be on par with scalatest supported by IDE
case class AssertionErrorObject(actual: List[String], expected: List[String]) {
lazy val actualStr: String = actual.mkString("\\n")
lazy val actualInfo: String = s"\\n=============================== $ACTUAL ================================\\n\\n" +
actualStr
lazy val expectedStr: String = expected.mkString("\\n")
lazy val expectedInfo: String = s"\\n=============================== $EXPECTED ================================\\n\\n" +
expectedStr
override lazy val toString: String = {
val result = s"""
|"
|$actualInfo
|" did not equal "
|$expectedInfo
|"
""".stripMargin.split('\\n').filter(_.nonEmpty).mkString("\\n")
result
}
}
@transient implicit class TestMapView[K, V](map: scala.collection.Map[K, V]) {
assert(map != null)
def shouldBe(expected: scala.collection.Map[K, V]): Unit = {
val messages = expected.toSeq.flatMap { tuple =>
val messageOpt = map.get(tuple._1) match {
case None =>
Some(s"${tuple._1} doesn't exist in map")
case Some(v) =>
if (v == tuple._2) None
else Some(s"${tuple._1} mismatch: expected ${tuple._2} =/= actual $v")
}
messageOpt
}
if (messages.nonEmpty)
throw new AssertionError("Assertion failure: {\\n" + messages.mkString("\\n") + "\\n}")
}
def shouldBe(expected: (K, V)*): Unit = {
this.shouldBe(Map(expected: _*))
}
}
def bypass(f: => Unit): Unit = {}
// override def intercept[T <: AnyRef](f: => Any)(implicit manifest: Manifest[T]): T = {
// super.intercept{
// try f
// catch {
// case e: Exception =>
// println("Attempt to intercept:")
// e.printStackTrace()
// throw e
// }
// }
// }
}
|
tribbloid/spookystuff
|
mldsl/src/test/scala/com/tribbloids/spookystuff/testutils/Suitex.scala
|
Scala
|
apache-2.0
| 6,528
|
package im.mange.jetpac.input
import im.mange.jetboot.widget.form.FormInput
import im.mange.jetpac.Js
import net.liftweb.http.SHtml
case class CheckBox(field: Field, default: Boolean) extends FormInput {
var value = default
//TODO: we need to remove the wrapping span - but we can't because EventHandling needs an Elem and we have a NodeSeq ... arrrrggghhh
//TODO: as a result EventHandling isn't going to work on CheckBox .... because the event handles wont be attached to the input
//TODO: use the old attachHandlersToBase() trick
override def baseElement = <span>{SHtml.checkbox(default, onSubmit _, "id" → id, "style" → styles.render, "class" → classes.render)}</span>
//TODO: there should probably be a default on FormInput which should be rarely overriden (like here only so far)
private def onSubmit(value: Boolean) { this.value = value }
override def reset = if (default) Js.setAttributeValue(id, "checked", "checked") else Js.removeElementAttribute(id, "checked")
}
|
alltonp/jetboot
|
src/main/scala/im/mange/jetpac/input/CheckBox.scala
|
Scala
|
apache-2.0
| 1,001
|
// Copyright (C) 2010-2011 Monash University
//
// This file is part of Factotum.
//
// Factotum is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Factotum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with Factotum. If not, see <http://www.gnu.org/licenses/>.
//
// Designed and implemented by Dmitri Nikulin.
//
// Repository: https://github.com/dnikulin/factotum
// Email: dnikulin+factotum@gmail.com
package com.dnikulin.factotum.engine
import scala.collection.mutable.{ArrayBuffer, HashMap}
import scala.xml._
import java.io._
import java.util.Date
import java.util.concurrent._
import com.google.common.io.Files
import net.liftweb.common._
import net.liftweb.http._
import net.liftweb.json._
import net.liftweb.mapper._
import com.dnikulin.vijil.file._
import com.dnikulin.vijil.parse._
import com.dnikulin.vijil.store._
import com.dnikulin.vijil.text._
import com.dnikulin.vijil.tools._
import com.dnikulin.vijil.traits._
import com.dnikulin.factotum.engine._
import com.dnikulin.factotum.model._
import com.dnikulin.factotum.report._
import com.dnikulin.factotum.snippet._
import com.dnikulin.factotum.web._
object SessionIndex extends SessionVar[UserIndex](new UserIndex)
case class StoreToken(
val index: UserIndex,
val owner: Box[User],
val isPublic: Boolean,
val isPermanent: Boolean,
val isGuest: Boolean
)
class TextStub(
val meta: TextFile,
val store: ObjectStore[TextFile],
val token: StoreToken
) extends KeyedByHash[TextStub] {
override val hash = meta.hash
def fetch(): Option[TextFile] =
store.get(hash)
}
object TextStore extends FromJson[TextFile] {
override def fromJValue(jv: JValue): Option[TextFile] =
TextFile.fromJValue(jv).map(buildPath)
def buildPath(root: String, span: TextSpan): TextSpan = {
// Extract block level and name.
val parts = span.tag("BlockLevel").take(1) ++ span.tag("BlockName").take(1)
// Construct new path.
val path = (root + (parts.map(_.trim).mkString(" ").trim)).trim
// Create tag for new path.
val tag = Tag("BlockPath", path)
val tags = tag :: span.tags.filter(_.name != "BlockPath")
// Create new path root, and invoke recursively.
val nroot = (path + ", ").replace(", ,", ",")
val spans = span.spans.map(buildPath(nroot, _))
// Modify only tags and child spans.
span.copy(tags = tags, spans = spans)
}
def buildPath(text: TextFile): TextFile = {
val spans = text.spans.map(buildPath("", _))
text.copy(spans = spans)
}
def cleanName(name: String): String =
name.replace(".txt", "")
}
class TextStore extends WorkerPool with HasTexts {
import FactotumWeb.log
import FactotumWords.words
import Hash.utf8
import TextStore._
// Use few workers to reduce peak memory pressure.
override val nworkers = 4
private val indices = new HashMap[Long, UserIndex]
val publicIndex = new UserIndex
/** Cached store for TextFile. */
val texts = TokyoStack (42771, TextStore)
/** Cached store for SpanPairsReport. */
val reports = TokyoStack (42772, SpanPairsReport)
/** Light store for raw text bytes. */
val files = SnappyStack (42773)
def makeSessionIndex: UserIndex =
SessionIndex.is
private def makeTextIndexStack: TextIndexStack = {
var stack = publicIndex :: Nil
if (User.loggedIn_?)
stack = userIndex(User.currentUser.get.id) :: stack
new TextIndexStack(makeSessionIndex :: stack)
}
private def userIndex(id: Long): UserIndex =
synchronized(indices.getOrElseUpdate(id, new UserIndex(id)))
def makeUserIndex: UserIndex =
userIndex(User.currentUser.get.id)
def makeRightIndex(permanent: Boolean): UserIndex =
if (permanent) makeUserIndex else makeSessionIndex
def indexText(text: TextFile, stub: TextStub) {
val token = stub.token
val owner = token.owner
if (token.isPermanent) {
val index = {
if (token.isPublic) publicIndex
else userIndex(owner.get.id)
}
// Create stub with 'this' as storage.
index.keepTexts(List(stub))
} else {
// Record text in direct (but sessional) storage.
// This will expire with the session.
token.index.keepTexts(List(stub))
}
val name = owner.map(_.openId).openOr("commons")
log.info("Listed '" + text.name + "' for user '" + name + "'")
// Clear similarity cache.
FindSimilar.clearCache()
}
def addText(text: TextFile, stub: TextStub) {
indexText(text, stub)
// Add database and disk entries.
storeText(text, stub)
}
private def storeText(text: TextFile, stub: TextStub) {
val token = stub.token
if (token.isPermanent) {
background {
texts.put(text.hash, text)
StoredText.
create.
name(text.name).
hash(text.hash).
dateAdded(new Date).
owner(token.owner).
isPublic(token.isPublic).
save()
}
}
}
def recallStoredTexts() {
val hashes = StoredText.findAll.map(_.hash.is).toSet
for (hash <- hashes) {
for (dbtext <- StoredText.find(By(StoredText.hash, hash))) {
background {
for (text <- texts.get(hash)) {
println("Recalled [%s] (%s)".format(hash, text.name))
val user = User.find(By(User.id, dbtext.owner.is))
val index = userIndex(dbtext.owner.is)
val token = StoreToken(index, user, dbtext.isPublic, true, false)
val meta = text.toMeta
val stub = new TextStub(meta, texts, token)
indexText(text, stub)
}
}
}
}
}
override def text(hash: String): Option[TextFile] =
makeTextIndexStack.text(hash)
def allTexts: List[TextStub] =
makeTextIndexStack.texts
def stubByHash(hash: String): Option[TextStub] =
allTexts.find(_.hash == hash)
private def removeStoredText(hash: String) {
// In demo mode, delete texts under no condition
if (FactotumWeb.isDemo == true)
return
for (text <- StoredText.findAll(By(StoredText.hash, hash)))
text.delete_!
}
def removeTexts(texts: List[TextStub]) = {
makeTextIndexStack.removeTexts(texts)
for (text <- texts)
removeStoredText(text.hash)
// Clear similarity cache.
FindSimilar.clearCache()
}
def saveReport(report: SpanPairsReport, token: StoreToken): Option[String] = {
val hashes = reports.put(report)
for (hash <- hashes) {
if (token.isPermanent == true) {
StoredReport.
create.
name(report.name).
hash(hash).
dateAdded(new Date).
owner(token.owner).
isPublic(token.isPublic).
save()
}
}
return hashes
}
def preloadFile(path: File) {
background {
val bytes = Files.toByteArray(path)
// Attempt to read as Factotum XML.
val texts = XmlTools.readNode(bytes) match {
case Some(node) => // Factotum XML format.
ReadFactotumXML(node).map(buildPath)
case _ => // Plaintext format.
val name = cleanName(path.getName)
val data = new String(bytes, utf8)
Some(StringToText(name, data)).map(buildPath)
}
for (text <- texts) {
// Replicate in raw storage.
files.put(text.hash, bytes)
// Create storage token and stub.
val token = StoreToken(publicIndex, None, true, true, false)
val meta = text.toMeta
val stub = new TextStub(meta, this.texts, token)
addText(text, stub)
println("Read text '%s' [%s] from %d bytes".format
(text.name, text.hash, bytes.length))
}
}
}
}
|
dnikulin/factotum
|
src/main/scala/com/dnikulin/factotum/engine/TextStore.scala
|
Scala
|
agpl-3.0
| 8,202
|
package services.datetime
import org.specs2.mutable.Specification
import services.SDate
import services.graphstages.Crunch
class UtcDateRangeSpec extends Specification {
val startDateUtc = "2020-05-01T00:00Z"
val endDateUtc = "2020-05-03T00:00Z"
s"Given a UTC date of $startDateUtc, falling inside of BST" >> {
s"When I ask for the inclusive UTC date range between it and $endDateUtc" >> {
val day1 = "2020-05-01T00:00Z"
val day2 = "2020-05-02T00:00Z"
val day3 = "2020-05-03T00:00Z"
s"I should get $day1, $day2, $day3" >> {
val dates = Crunch.utcDaysInPeriod(SDate(startDateUtc), SDate(endDateUtc)).map(SDate(_).millisSinceEpoch)
val expected = Seq(day1, day2, day3).map(SDate(_).millisSinceEpoch)
dates === expected
}
}
}
s"Given a UTC date of $startDateUtc, falling inside of BST, and parsed to UTC" >> {
s"When I ask for the inclusive UTC date range between it and $endDateUtc, and parsed to UTC" >> {
val day1 = "2020-05-01T00:00Z"
val day2 = "2020-05-02T00:00Z"
val day3 = "2020-05-03T00:00Z"
s"I should get $day1, $day2, $day3" >> {
val dates = Crunch.utcDaysInPeriod(SDate(startDateUtc, Crunch.utcTimeZone), SDate(endDateUtc, Crunch.utcTimeZone)).map(SDate(_).millisSinceEpoch)
val expected = Seq(day1, day2, day3).map(SDate(_).millisSinceEpoch)
dates === expected
}
}
}
val startDateBst = "2020-05-01T00:00+01"
val endDateBst = "2020-05-03T00:00+01"
s"Given a BST date of $startDateBst, falling inside of BST" >> {
s"When I ask for the inclusive UTC date range between it and $endDateBst" >> {
val day1 = "2020-04-30T00:00Z"
val day2 = "2020-05-01T00:00Z"
val day3 = "2020-05-02T00:00Z"
s"I should get $day1, $day2, $day3 (because the 1hr offset pushed each date to the date before)" >> {
val dates = Crunch.utcDaysInPeriod(SDate(startDateBst), SDate(endDateBst)).map(SDate(_).millisSinceEpoch)
val expected = Seq(day1, day2, day3).map(SDate(_).millisSinceEpoch)
dates === expected
}
}
}
s"Given a BST date of $startDateBst, falling inside of BST, and parsed to Europe/London" >> {
s"When I ask for the inclusive UTC date range between it and $endDateBst, and parsed to Europe/London" >> {
val day1 = "2020-04-30T00:00Z"
val day2 = "2020-05-01T00:00Z"
val day3 = "2020-05-02T00:00Z"
s"I should get $day1, $day2, $day3 (because the 1hr offset pushed each date to the date before) - The timezone of the SDate should not impact the utc days" >> {
val dates = Crunch.utcDaysInPeriod(SDate(startDateBst, Crunch.europeLondonTimeZone), SDate(endDateBst, Crunch.europeLondonTimeZone)).map(SDate(_).millisSinceEpoch)
val expected = Seq(day1, day2, day3).map(SDate(_).millisSinceEpoch)
dates === expected
}
}
}
}
|
UKHomeOffice/drt-scalajs-spa-exploration
|
server/src/test/scala/services/datetime/UtcDateRangeSpec.scala
|
Scala
|
apache-2.0
| 2,885
|
/*
* Copyright 2014 - 2015 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package slamdata.engine.physical.mongodb
import slamdata.Predef._
import scala.Any
import scala.collection.JavaConverters._
import scala.Predef.{
boolean2Boolean, double2Double, int2Integer, long2Long,
Boolean2boolean, Double2double, Integer2int, Long2long}
import slamdata.engine.fp._
import slamdata.engine.javascript._
import org.threeten.bp.{Instant, ZoneOffset}
import org.threeten.bp.temporal.{ChronoUnit}
import org.bson.types
import scalaz._
import Scalaz._
/**
* A type-safe ADT for Mongo's native data format. Note that this representation
* is not suitable for efficiently storing large quantities of data.
*/
sealed trait Bson {
def repr: java.lang.Object
def toJs: Js.Expr
}
object Bson {
def fromRepr(obj: java.lang.Object): Bson = {
@SuppressWarnings(Array("org.brianmckenna.wartremover.warts.Null"))
def loop(v: Any): Bson = v match {
case null => Null
case x: String => Text(x)
case x: java.lang.Boolean => Bool(x)
case x: java.lang.Integer => Int32(x)
case x: java.lang.Long => Int64(x)
case x: java.lang.Double => Dec(x)
case list: java.util.ArrayList[_] => Arr(list.asScala.map(loop).toList)
case obj: org.bson.Document => Doc(obj.keySet.asScala.toList.map(k => k -> loop(obj.get(k))).toListMap)
case x: java.util.Date => Date(Instant.ofEpochMilli(x.getTime))
case x: types.ObjectId => ObjectId(x.toByteArray)
case x: types.Binary => Binary(x.getData)
case _: types.MinKey => MinKey
case _: types.MaxKey => MaxKey
case x: types.Symbol => Symbol(x.getSymbol)
case x: types.BSONTimestamp => Timestamp(Instant.ofEpochSecond(x.getTime), x.getInc)
case x: java.util.regex.Pattern => Regex(x.pattern)
case x: Array[Byte] => Binary(x)
case x: java.util.UUID =>
val bos = new java.io.ByteArrayOutputStream
val dos = new java.io.DataOutputStream(bos)
dos.writeLong(x.getLeastSignificantBits)
dos.writeLong(x.getMostSignificantBits)
Binary(bos.toByteArray.reverse)
// NB: the remaining types are not easily translated back to Bson,
// and we don't expect them to appear anyway.
// • JavaScript/JavaScriptScope: would require parsing a string to our
// Js type.
// • Any other value that might be produced by MongoDB which is unknown
// to us.
case _ => Undefined
}
loop(obj)
}
final case class Dec(value: Double) extends Bson {
def repr = value: java.lang.Double
def toJs = Js.Num(value, true)
}
final case class Text(value: String) extends Bson {
def repr = value
def toJs = Js.Str(value)
}
final case class Binary(value: ImmutableArray[Byte]) extends Bson {
def repr = value.toArray[Byte]
def toJs = Js.Call(Js.Ident("BinData"), List(Js.Num(0, false), Js.Str(new sun.misc.BASE64Encoder().encode(value.toArray))))
override def toString = "Binary(Array[Byte](" + value.mkString(", ") + "))"
override def equals(that: Any): Boolean = that match {
case Binary(value2) => value === value2
case _ => false
}
override def hashCode = java.util.Arrays.hashCode(value.toArray[Byte])
}
object Binary {
def apply(array: Array[Byte]): Binary = Binary(ImmutableArray.fromArray(array))
}
final case class Doc(value: ListMap[String, Bson]) extends Bson {
def repr: org.bson.Document = new org.bson.Document((value ∘ (_.repr)).asJava)
def toJs = Js.AnonObjDecl((value ∘ (_.toJs)).toList)
}
final case class Arr(value: List[Bson]) extends Bson {
def repr = new java.util.ArrayList(value.map(_.repr).asJava)
def toJs = Js.AnonElem(value ∘ (_.toJs))
}
final case class ObjectId(value: ImmutableArray[Byte]) extends Bson {
def repr = new types.ObjectId(value.toArray[Byte])
def str = repr.toHexString
def toJs = Js.Call(Js.Ident("ObjectId"), List(Js.Str(str)))
override def toString = "ObjectId(" + str + ")"
override def equals(that: Any): Boolean = that match {
case ObjectId(value2) => value === value2
case _ => false
}
override def hashCode = java.util.Arrays.hashCode(value.toArray[Byte])
}
object ObjectId {
def apply(array: Array[Byte]): ObjectId = ObjectId(ImmutableArray.fromArray(array))
def apply(str: String): Option[ObjectId] = {
\\/.fromTryCatchNonFatal(new types.ObjectId(str)).toOption.map(oid => ObjectId(oid.toByteArray))
}
}
final case class Bool(value: Boolean) extends Bson {
def repr = value: java.lang.Boolean
def toJs = Js.Bool(value)
}
final case class Date(value: Instant) extends Bson {
def repr = new java.util.Date(value.toEpochMilli)
def toJs =
Js.Call(Js.Ident("ISODate"), List(Js.Str(value.toString)))
}
final case object Null extends Bson {
@SuppressWarnings(Array("org.brianmckenna.wartremover.warts.Null"))
def repr = null
override def toJs = Js.Null
}
/** DEPRECATED in the spec, but the 3.0 Mongo driver returns it to us. */
final case object Undefined extends Bson {
def repr = new org.bson.BsonUndefined
override def toJs = Js.Undefined
}
final case class Regex(value: String) extends Bson {
def repr = java.util.regex.Pattern.compile(value)
def toJs = Js.New(Js.Call(Js.Ident("RegExp"), List(Js.Str(value))))
}
final case class JavaScript(value: Js.Expr) extends Bson {
def repr = new types.Code(value.render(2))
def toJs = value
}
final case class JavaScriptScope(code: Js.Expr, doc: Doc) extends Bson {
def repr = new types.CodeWithScope(code.render(2), doc.repr)
// FIXME: this loses scope, but I don’t know what it should look like
def toJs = code
}
final case class Symbol(value: String) extends Bson {
def repr = new types.Symbol(value)
def toJs = Js.Ident(value)
}
final case class Int32(value: Int) extends Bson {
def repr = value: java.lang.Integer
def toJs = Js.Call(Js.Ident("NumberInt"), List(Js.Num(value, false)))
}
final case class Int64(value: Long) extends Bson {
def repr = value: java.lang.Long
def toJs = Js.Call(Js.Ident("NumberLong"), List(Js.Num(value, false)))
}
final case class Timestamp private (epochSecond: Int, ordinal: Int) extends Bson {
def repr = new types.BSONTimestamp(epochSecond, ordinal)
def toJs = Js.Call(Js.Ident("Timestamp"),
List(Js.Num(epochSecond, false), Js.Num(ordinal, false)))
override def toString = "Timestamp(" + Instant.ofEpochSecond(epochSecond) + ", " + ordinal + ")"
}
object Timestamp {
def apply(instant: Instant, ordinal: Int): Timestamp =
Timestamp((instant.toEpochMilli/1000).toInt, ordinal)
}
final case object MinKey extends Bson {
def repr = new types.MinKey
def toJs = Js.Ident("MinKey")
}
final case object MaxKey extends Bson {
def repr = new types.MaxKey
def toJs = Js.Ident("MaxKey")
}
}
sealed trait BsonType {
def ordinal: Int
}
object BsonType {
private[BsonType] abstract class AbstractType(val ordinal: Int) extends BsonType
final case object Dec extends AbstractType(1)
final case object Text extends AbstractType(2)
final case object Doc extends AbstractType(3)
final case object Arr extends AbstractType(4)
final case object Binary extends AbstractType(5)
final case object Undefined extends AbstractType(6)
final case object ObjectId extends AbstractType(7)
final case object Bool extends AbstractType(8)
final case object Date extends AbstractType(9)
final case object Null extends AbstractType(10)
final case object Regex extends AbstractType(11)
final case object JavaScript extends AbstractType(13)
final case object JavaScriptScope extends AbstractType(15)
final case object Symbol extends AbstractType(14)
final case object Int32 extends AbstractType(16)
final case object Int64 extends AbstractType(18)
final case object Timestamp extends AbstractType(17)
final case object MinKey extends AbstractType(255)
final case object MaxKey extends AbstractType(127)
}
sealed trait BsonField {
def asText : String
def asField : String = "$" + asText
def asVar : String = "$$" + asText
def bson = Bson.Text(asText)
def bsonField = Bson.Text(asField)
def bsonVar = Bson.Text(asVar)
import BsonField._
def \\ (that: BsonField): BsonField = (this, that) match {
case (Path(x), Path(y)) => Path(NonEmptyList.nel(x.head, x.tail ++ y.list))
case (Path(x), y: Leaf) => Path(NonEmptyList.nel(x.head, x.tail :+ y))
case (y: Leaf, Path(x)) => Path(NonEmptyList.nel(y, x.list))
case (x: Leaf, y: Leaf) => Path(NonEmptyList.nels(x, y))
}
def \\\\ (tail: List[BsonField]): BsonField = if (tail.isEmpty) this else this match {
case Path(p) => Path(NonEmptyList.nel(p.head, p.tail ::: tail.flatMap(_.flatten.toList)))
case l: Leaf => Path(NonEmptyList.nel(l, tail.flatMap(_.flatten.toList)))
}
def flatten: NonEmptyList[Leaf]
def parent: Option[BsonField] =
BsonField(flatten.reverse.toList.drop(1).reverse)
def startsWith(that: BsonField) =
this.flatten.toList.startsWith(that.flatten.toList)
def toJs: JsFn =
this.flatten.foldLeft(JsFn.identity)((acc, leaf) =>
leaf match {
case Name(v) => JsFn(JsFn.base, JsCore.Access(acc(JsFn.base.fix), JsCore.Literal(Js.Str(v)).fix).fix)
case Index(v) => JsFn(JsFn.base, JsCore.Access(acc(JsFn.base.fix), JsCore.Literal(Js.Num(v, false)).fix).fix)
})
override def hashCode = this match {
case Name(v) => v.hashCode
case Index(v) => v.hashCode
case Path(v) if (v.tail.length == 0) => v.head.hashCode
case p @ Path(_) => p.flatten.hashCode
}
override def equals(that: Any): Boolean = (this, that) match {
case (Name(v1), Name(v2)) => v1 == v2
case (Name(_), Index(_)) => false
case (Index(v1), Index(v2)) => v1 == v2
case (Index(_), Name(_)) => false
case (v1: BsonField, v2: BsonField) => v1.flatten.equals(v2.flatten)
case _ => false
}
}
object BsonField {
sealed trait Root
final case object Root extends Root
def apply(v: List[BsonField.Leaf]): Option[BsonField] = v match {
case Nil => None
case head :: Nil => Some(head)
case head :: tail => Some(Path(NonEmptyList.nel(head, tail)))
}
sealed trait Leaf extends BsonField {
def asText = Path(NonEmptyList(this)).asText
def flatten = NonEmptyList(this)
// Distinction between these is artificial as far as BSON concerned so you
// can always translate a leaf to a Name (but not an Index since the key might
// not be numeric).
def toName: Name = this match {
case n @ Name(_) => n
case Index(idx) => Name(idx.toString)
}
}
final case class Name(value: String) extends Leaf {
override def toString = s"""BsonField.Name("$value")"""
}
final case class Index(value: Int) extends Leaf {
override def toString = s"BsonField.Index($value)"
}
private final case class Path(values: NonEmptyList[Leaf]) extends BsonField {
def flatten = values
def asText = (values.list.zipWithIndex.map {
case (Name(value), 0) => value
case (Name(value), _) => "." + value
case (Index(value), 0) => value.toString
case (Index(value), _) => "." + value.toString
}).mkString("")
override def toString = values.list.mkString(" \\\\ ")
}
}
|
wemrysi/quasar
|
core/src/main/scala/slamdata/engine/physical/mongodb/bson.scala
|
Scala
|
apache-2.0
| 12,187
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.io.{InputStream, OutputStream}
import java.lang.reflect.Method
import java.rmi.server.UID
import scala.collection.JavaConverters._
import scala.language.implicitConversions
import scala.reflect.ClassTag
import com.google.common.base.Objects
import org.apache.avro.Schema
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hive.ql.exec.UDF
import org.apache.hadoop.hive.ql.plan.{FileSinkDesc, TableDesc}
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFMacro
import org.apache.hadoop.hive.serde2.ColumnProjectionUtils
import org.apache.hadoop.hive.serde2.avro.{AvroGenericRecordWritable, AvroSerdeUtils}
import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector
import org.apache.hadoop.io.Writable
import org.apache.spark.internal.Logging
import org.apache.spark.sql.types.Decimal
import org.apache.spark.util.Utils
private[hive] object HiveShim {
// Precision and scale to pass for unlimited decimals; these are the same as the precision and
// scale Hive 0.13 infers for BigDecimals from sources that don't specify them (e.g. UDFs)
val UNLIMITED_DECIMAL_PRECISION = 38
val UNLIMITED_DECIMAL_SCALE = 18
val HIVE_GENERIC_UDF_MACRO_CLS = "org.apache.hadoop.hive.ql.udf.generic.GenericUDFMacro"
/*
* This function in hive-0.13 become private, but we have to do this to work around hive bug
*/
private def appendReadColumnNames(conf: Configuration, cols: Seq[String]): Unit = {
val old: String = conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, "")
val result: StringBuilder = new StringBuilder(old)
var first: Boolean = old.isEmpty
for (col <- cols) {
if (first) {
first = false
} else {
result.append(',')
}
result.append(col)
}
conf.set(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, result.toString)
}
/*
* Cannot use ColumnProjectionUtils.appendReadColumns directly, if ids is null
*/
def appendReadColumns(conf: Configuration, ids: Seq[Integer], names: Seq[String]): Unit = {
if (ids != null) {
ColumnProjectionUtils.appendReadColumns(conf, ids.asJava)
}
if (names != null) {
appendReadColumnNames(conf, names)
}
}
/*
* Bug introduced in hive-0.13. AvroGenericRecordWritable has a member recordReaderID that
* is needed to initialize before serialization.
*/
def prepareWritable(w: Writable, serDeProps: Seq[(String, String)]): Writable = {
w match {
case w: AvroGenericRecordWritable =>
w.setRecordReaderID(new UID())
// In Hive 1.1, the record's schema may need to be initialized manually or a NPE will
// be thrown.
if (w.getFileSchema() == null) {
serDeProps
.find(_._1 == AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName())
.foreach { kv =>
w.setFileSchema(new Schema.Parser().parse(kv._2))
}
}
case _ =>
}
w
}
def toCatalystDecimal(hdoi: HiveDecimalObjectInspector, data: Any): Decimal = {
if (hdoi.preferWritable()) {
Decimal(hdoi.getPrimitiveWritableObject(data).getHiveDecimal().bigDecimalValue,
hdoi.precision(), hdoi.scale())
} else {
Decimal(hdoi.getPrimitiveJavaObject(data).bigDecimalValue(), hdoi.precision(), hdoi.scale())
}
}
/**
* This class provides the UDF creation and also the UDF instance serialization and
* de-serialization cross process boundary.
*
* Detail discussion can be found at https://github.com/apache/spark/pull/3640
*
* @param functionClassName UDF class name
* @param instance optional UDF instance which contains additional information (for macro)
* @param clazz optional class instance to create UDF instance
*/
private[hive] case class HiveFunctionWrapper(
var functionClassName: String,
private var instance: AnyRef = null,
private var clazz: Class[_ <: AnyRef] = null) extends java.io.Externalizable {
// for Serialization
def this() = this(null)
override def hashCode(): Int = {
if (functionClassName == HIVE_GENERIC_UDF_MACRO_CLS) {
Objects.hashCode(functionClassName, instance.asInstanceOf[GenericUDFMacro].getBody())
} else {
functionClassName.hashCode()
}
}
override def equals(other: Any): Boolean = other match {
case a: HiveFunctionWrapper if functionClassName == a.functionClassName =>
// In case of udf macro, check to make sure they point to the same underlying UDF
if (functionClassName == HIVE_GENERIC_UDF_MACRO_CLS) {
a.instance.asInstanceOf[GenericUDFMacro].getBody() ==
instance.asInstanceOf[GenericUDFMacro].getBody()
} else {
true
}
case _ => false
}
private lazy val serUtilClass =
Utils.classForName("org.apache.hadoop.hive.ql.exec.SerializationUtilities")
private lazy val utilClass = Utils.classForName("org.apache.hadoop.hive.ql.exec.Utilities")
private val deserializeMethodName = "deserializeObjectByKryo"
private val serializeMethodName = "serializeObjectByKryo"
private def findMethod(klass: Class[_], name: String, args: Class[_]*): Method = {
val method = klass.getDeclaredMethod(name, args: _*)
method.setAccessible(true)
method
}
def deserializePlan[UDFType](is: java.io.InputStream, clazz: Class[_]): UDFType = {
if (HiveUtils.isHive23) {
val borrowKryo = serUtilClass.getMethod("borrowKryo")
val kryo = borrowKryo.invoke(serUtilClass)
val deserializeObjectByKryo = findMethod(serUtilClass, deserializeMethodName,
kryo.getClass.getSuperclass, classOf[InputStream], classOf[Class[_]])
try {
deserializeObjectByKryo.invoke(null, kryo, is, clazz).asInstanceOf[UDFType]
} finally {
serUtilClass.getMethod("releaseKryo", kryo.getClass.getSuperclass).invoke(null, kryo)
}
} else {
val runtimeSerializationKryo = utilClass.getField("runtimeSerializationKryo")
val threadLocalValue = runtimeSerializationKryo.get(utilClass)
val getMethod = threadLocalValue.getClass.getMethod("get")
val kryo = getMethod.invoke(threadLocalValue)
val deserializeObjectByKryo = findMethod(utilClass, deserializeMethodName,
kryo.getClass, classOf[InputStream], classOf[Class[_]])
deserializeObjectByKryo.invoke(null, kryo, is, clazz).asInstanceOf[UDFType]
}
}
def serializePlan(function: AnyRef, out: java.io.OutputStream): Unit = {
if (HiveUtils.isHive23) {
val borrowKryo = serUtilClass.getMethod("borrowKryo")
val kryo = borrowKryo.invoke(serUtilClass)
val serializeObjectByKryo = findMethod(serUtilClass, serializeMethodName,
kryo.getClass.getSuperclass, classOf[Object], classOf[OutputStream])
try {
serializeObjectByKryo.invoke(null, kryo, function, out)
} finally {
serUtilClass.getMethod("releaseKryo", kryo.getClass.getSuperclass).invoke(null, kryo)
}
} else {
val runtimeSerializationKryo = utilClass.getField("runtimeSerializationKryo")
val threadLocalValue = runtimeSerializationKryo.get(utilClass)
val getMethod = threadLocalValue.getClass.getMethod("get")
val kryo = getMethod.invoke(threadLocalValue)
val serializeObjectByKryo = findMethod(utilClass, serializeMethodName,
kryo.getClass, classOf[Object], classOf[OutputStream])
serializeObjectByKryo.invoke(null, kryo, function, out)
}
}
def writeExternal(out: java.io.ObjectOutput): Unit = {
// output the function name
out.writeUTF(functionClassName)
// Write a flag if instance is null or not
out.writeBoolean(instance != null)
if (instance != null) {
// Some of the UDF are serializable, but some others are not
// Hive Utilities can handle both cases
val baos = new java.io.ByteArrayOutputStream()
serializePlan(instance, baos)
val functionInBytes = baos.toByteArray
// output the function bytes
out.writeInt(functionInBytes.length)
out.write(functionInBytes, 0, functionInBytes.length)
}
}
def readExternal(in: java.io.ObjectInput): Unit = {
// read the function name
functionClassName = in.readUTF()
if (in.readBoolean()) {
// if the instance is not null
// read the function in bytes
val functionInBytesLength = in.readInt()
val functionInBytes = new Array[Byte](functionInBytesLength)
in.readFully(functionInBytes)
// deserialize the function object via Hive Utilities
clazz = Utils.getContextOrSparkClassLoader.loadClass(functionClassName)
.asInstanceOf[Class[_ <: AnyRef]]
instance = deserializePlan[AnyRef](new java.io.ByteArrayInputStream(functionInBytes),
clazz)
}
}
def createFunction[UDFType <: AnyRef](): UDFType = {
if (instance != null) {
instance.asInstanceOf[UDFType]
} else {
if (clazz == null) {
clazz = Utils.getContextOrSparkClassLoader.loadClass(functionClassName)
.asInstanceOf[Class[_ <: AnyRef]]
}
val func = clazz.getConstructor().newInstance().asInstanceOf[UDFType]
if (!func.isInstanceOf[UDF]) {
// We cache the function if it's no the Simple UDF,
// as we always have to create new instance for Simple UDF
instance = func
}
func
}
}
}
/*
* Bug introduced in hive-0.13. FileSinkDesc is serializable, but its member path is not.
* Fix it through wrapper.
*/
implicit def wrapperToFileSinkDesc(w: ShimFileSinkDesc): FileSinkDesc = {
val f = new FileSinkDesc(new Path(w.dir), w.tableInfo, w.compressed)
f.setCompressCodec(w.compressCodec)
f.setCompressType(w.compressType)
f.setTableInfo(w.tableInfo)
f.setDestTableId(w.destTableId)
f
}
/*
* Bug introduced in hive-0.13. FileSinkDesc is serializable, but its member path is not.
* Fix it through wrapper.
*/
private[hive] class ShimFileSinkDesc(
var dir: String,
var tableInfo: TableDesc,
var compressed: Boolean)
extends Serializable with Logging {
var compressCodec: String = _
var compressType: String = _
var destTableId: Int = _
def setCompressed(compressed: Boolean): Unit = {
this.compressed = compressed
}
def getDirName(): String = dir
def setDestTableId(destTableId: Int): Unit = {
this.destTableId = destTableId
}
def setTableInfo(tableInfo: TableDesc): Unit = {
this.tableInfo = tableInfo
}
def setCompressCodec(intermediateCompressorCodec: String): Unit = {
compressCodec = intermediateCompressorCodec
}
def setCompressType(intermediateCompressType: String): Unit = {
compressType = intermediateCompressType
}
}
}
|
matthewfranglen/spark
|
sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveShim.scala
|
Scala
|
mit
| 11,894
|
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testsuite.javalib.lang
import org.junit.Test
import org.junit.Assert._
class CharacterTestOnJDK7 {
@Test
def shouldProvideIsBmpCodePoint(): Unit = {
// 50 randomly chosen characters that produce true
assertTrue(Character.isBmpCodePoint('\\u0120'))
assertTrue(Character.isBmpCodePoint('\\u0147'))
assertTrue(Character.isBmpCodePoint('\\u028E'))
assertTrue(Character.isBmpCodePoint('\\u0464'))
assertTrue(Character.isBmpCodePoint('\\u0DB8'))
assertTrue(Character.isBmpCodePoint('\\u1909'))
assertTrue(Character.isBmpCodePoint('\\u1F26'))
assertTrue(Character.isBmpCodePoint('\\u1F7E'))
assertTrue(Character.isBmpCodePoint('\\u2C64'))
assertTrue(Character.isBmpCodePoint('\\u353E'))
assertTrue(Character.isBmpCodePoint('\\u39DB'))
assertTrue(Character.isBmpCodePoint('\\u3E74'))
assertTrue(Character.isBmpCodePoint('\\u3F14'))
assertTrue(Character.isBmpCodePoint('\\u3FB3'))
assertTrue(Character.isBmpCodePoint('\\u4656'))
assertTrue(Character.isBmpCodePoint('\\u4824'))
assertTrue(Character.isBmpCodePoint('\\u488D'))
assertTrue(Character.isBmpCodePoint('\\u4C03'))
assertTrue(Character.isBmpCodePoint('\\u4D87'))
assertTrue(Character.isBmpCodePoint('\\u4F3E'))
assertTrue(Character.isBmpCodePoint('\\u570D'))
assertTrue(Character.isBmpCodePoint('\\u57CF'))
assertTrue(Character.isBmpCodePoint('\\u5A1E'))
assertTrue(Character.isBmpCodePoint('\\u5A3E'))
assertTrue(Character.isBmpCodePoint('\\u5C29'))
assertTrue(Character.isBmpCodePoint('\\u6082'))
assertTrue(Character.isBmpCodePoint('\\u6518'))
assertTrue(Character.isBmpCodePoint('\\u7202'))
assertTrue(Character.isBmpCodePoint('\\u7B3F'))
assertTrue(Character.isBmpCodePoint('\\u93F4'))
assertTrue(Character.isBmpCodePoint('\\u9812'))
assertTrue(Character.isBmpCodePoint('\\u986B'))
assertTrue(Character.isBmpCodePoint('\\uA228'))
assertTrue(Character.isBmpCodePoint('\\uB05E'))
assertTrue(Character.isBmpCodePoint('\\uB4DD'))
assertTrue(Character.isBmpCodePoint('\\uB4EB'))
assertTrue(Character.isBmpCodePoint('\\uB824'))
assertTrue(Character.isBmpCodePoint('\\uB8D4'))
assertTrue(Character.isBmpCodePoint('\\uBA9D'))
assertTrue(Character.isBmpCodePoint('\\uC33E'))
assertTrue(Character.isBmpCodePoint('\\uD0A4'))
assertTrue(Character.isBmpCodePoint('\\uD0D7'))
assertTrue(Character.isBmpCodePoint('\\uD596'))
assertTrue(Character.isBmpCodePoint('\\uDF06'))
assertTrue(Character.isBmpCodePoint('\\uE419'))
assertTrue(Character.isBmpCodePoint('\\uE66A'))
assertTrue(Character.isBmpCodePoint('\\uED6B'))
assertTrue(Character.isBmpCodePoint('\\uEE2A'))
assertTrue(Character.isBmpCodePoint('\\uF116'))
assertTrue(Character.isBmpCodePoint('\\uFC7E'))
// 50 randomly chosen characters that produce false
assertFalse(Character.isBmpCodePoint(104494))
assertFalse(Character.isBmpCodePoint(125793))
assertFalse(Character.isBmpCodePoint(131158))
assertFalse(Character.isBmpCodePoint(163501))
assertFalse(Character.isBmpCodePoint(182050))
assertFalse(Character.isBmpCodePoint(190085))
assertFalse(Character.isBmpCodePoint(195066))
assertFalse(Character.isBmpCodePoint(197399))
assertFalse(Character.isBmpCodePoint(212487))
assertFalse(Character.isBmpCodePoint(220872))
assertFalse(Character.isBmpCodePoint(228423))
assertFalse(Character.isBmpCodePoint(229980))
assertFalse(Character.isBmpCodePoint(231638))
assertFalse(Character.isBmpCodePoint(250644))
assertFalse(Character.isBmpCodePoint(251190))
assertFalse(Character.isBmpCodePoint(262467))
assertFalse(Character.isBmpCodePoint(278850))
assertFalse(Character.isBmpCodePoint(279865))
assertFalse(Character.isBmpCodePoint(282442))
assertFalse(Character.isBmpCodePoint(298662))
assertFalse(Character.isBmpCodePoint(304549))
assertFalse(Character.isBmpCodePoint(329478))
assertFalse(Character.isBmpCodePoint(425451))
assertFalse(Character.isBmpCodePoint(432483))
assertFalse(Character.isBmpCodePoint(459609))
assertFalse(Character.isBmpCodePoint(468002))
assertFalse(Character.isBmpCodePoint(481274))
assertFalse(Character.isBmpCodePoint(493980))
assertFalse(Character.isBmpCodePoint(504523))
assertFalse(Character.isBmpCodePoint(531036))
assertFalse(Character.isBmpCodePoint(544623))
assertFalse(Character.isBmpCodePoint(580236))
assertFalse(Character.isBmpCodePoint(604965))
assertFalse(Character.isBmpCodePoint(605557))
assertFalse(Character.isBmpCodePoint(608870))
assertFalse(Character.isBmpCodePoint(611891))
assertFalse(Character.isBmpCodePoint(631390))
assertFalse(Character.isBmpCodePoint(679492))
assertFalse(Character.isBmpCodePoint(694553))
assertFalse(Character.isBmpCodePoint(879975))
assertFalse(Character.isBmpCodePoint(926901))
assertFalse(Character.isBmpCodePoint(934288))
assertFalse(Character.isBmpCodePoint(951314))
assertFalse(Character.isBmpCodePoint(952297))
assertFalse(Character.isBmpCodePoint(965942))
assertFalse(Character.isBmpCodePoint(1003588))
assertFalse(Character.isBmpCodePoint(1005139))
assertFalse(Character.isBmpCodePoint(1043178))
assertFalse(Character.isBmpCodePoint(1075598))
assertFalse(Character.isBmpCodePoint(1097392))
}
@Test
def shouldProvideIsAlphabetic(): Unit = {
// 50 randomly chosen characters that produce true
assertTrue(Character.isAlphabetic('\\u04F8'))
assertTrue(Character.isAlphabetic('\\u05DB'))
assertTrue(Character.isAlphabetic('\\u1314'))
assertTrue(Character.isAlphabetic('\\u3515'))
assertTrue(Character.isAlphabetic('\\u3780'))
assertTrue(Character.isAlphabetic('\\u391C'))
assertTrue(Character.isAlphabetic('\\u3B06'))
assertTrue(Character.isAlphabetic('\\u3FEF'))
assertTrue(Character.isAlphabetic('\\u47CF'))
assertTrue(Character.isAlphabetic('\\u5076'))
assertTrue(Character.isAlphabetic('\\u5684'))
assertTrue(Character.isAlphabetic('\\u5773'))
assertTrue(Character.isAlphabetic('\\u591C'))
assertTrue(Character.isAlphabetic('\\u59A0'))
assertTrue(Character.isAlphabetic('\\u5B09'))
assertTrue(Character.isAlphabetic('\\u6775'))
assertTrue(Character.isAlphabetic('\\u7434'))
assertTrue(Character.isAlphabetic('\\u83FB'))
assertTrue(Character.isAlphabetic('\\u8761'))
assertTrue(Character.isAlphabetic('\\u8993'))
assertTrue(Character.isAlphabetic('\\u947A'))
assertTrue(Character.isAlphabetic('\\u98AB'))
assertTrue(Character.isAlphabetic('\\u98DA'))
assertTrue(Character.isAlphabetic('\\u9B44'))
assertTrue(Character.isAlphabetic('\\uADFF'))
assertTrue(Character.isAlphabetic('\\uC091'))
assertTrue(Character.isAlphabetic('\\uC43F'))
assertTrue(Character.isAlphabetic('\\uCB5D'))
assertTrue(Character.isAlphabetic(133889))
assertTrue(Character.isAlphabetic(134427))
assertTrue(Character.isAlphabetic(134471))
assertTrue(Character.isAlphabetic(138909))
assertTrue(Character.isAlphabetic(139164))
assertTrue(Character.isAlphabetic(140493))
assertTrue(Character.isAlphabetic(148737))
assertTrue(Character.isAlphabetic(149345))
assertTrue(Character.isAlphabetic(151435))
assertTrue(Character.isAlphabetic(156857))
assertTrue(Character.isAlphabetic(158440))
assertTrue(Character.isAlphabetic(159937))
assertTrue(Character.isAlphabetic(159952))
assertTrue(Character.isAlphabetic(163859))
assertTrue(Character.isAlphabetic(166872))
assertTrue(Character.isAlphabetic(167076))
assertTrue(Character.isAlphabetic(168670))
assertTrue(Character.isAlphabetic(170390))
assertTrue(Character.isAlphabetic(170999))
assertTrue(Character.isAlphabetic(172036))
assertTrue(Character.isAlphabetic(173135))
assertTrue(Character.isAlphabetic(176898))
// 50 randomly chosen characters that produce false
assertFalse(Character.isAlphabetic(1002047))
assertFalse(Character.isAlphabetic(1009593))
assertFalse(Character.isAlphabetic(1042564))
assertFalse(Character.isAlphabetic(1052587))
assertFalse(Character.isAlphabetic(1061824))
assertFalse(Character.isAlphabetic(1077156))
assertFalse(Character.isAlphabetic(1077935))
assertFalse(Character.isAlphabetic(108164))
assertFalse(Character.isAlphabetic(117071))
assertFalse(Character.isAlphabetic(180747))
assertFalse(Character.isAlphabetic(235975))
assertFalse(Character.isAlphabetic(256440))
assertFalse(Character.isAlphabetic(291721))
assertFalse(Character.isAlphabetic(313351))
assertFalse(Character.isAlphabetic(333549))
assertFalse(Character.isAlphabetic(353806))
assertFalse(Character.isAlphabetic(390947))
assertFalse(Character.isAlphabetic(400920))
assertFalse(Character.isAlphabetic(403305))
assertFalse(Character.isAlphabetic(417636))
assertFalse(Character.isAlphabetic(419085))
assertFalse(Character.isAlphabetic(443247))
assertFalse(Character.isAlphabetic(468248))
assertFalse(Character.isAlphabetic(485549))
assertFalse(Character.isAlphabetic(491917))
assertFalse(Character.isAlphabetic(511059))
assertFalse(Character.isAlphabetic(530210))
assertFalse(Character.isAlphabetic(569030))
assertFalse(Character.isAlphabetic(595429))
assertFalse(Character.isAlphabetic(607797))
assertFalse(Character.isAlphabetic(654788))
assertFalse(Character.isAlphabetic(660783))
assertFalse(Character.isAlphabetic(715383))
assertFalse(Character.isAlphabetic(752828))
assertFalse(Character.isAlphabetic(778169))
assertFalse(Character.isAlphabetic(781077))
assertFalse(Character.isAlphabetic(796535))
assertFalse(Character.isAlphabetic(819655))
assertFalse(Character.isAlphabetic(850895))
assertFalse(Character.isAlphabetic(866871))
assertFalse(Character.isAlphabetic(885354))
assertFalse(Character.isAlphabetic(908455))
assertFalse(Character.isAlphabetic(908635))
assertFalse(Character.isAlphabetic(924461))
assertFalse(Character.isAlphabetic(930019))
assertFalse(Character.isAlphabetic(948273))
assertFalse(Character.isAlphabetic(974041))
assertFalse(Character.isAlphabetic(977329))
assertFalse(Character.isAlphabetic(99202))
assertFalse(Character.isAlphabetic(993967))
}
@Test
def shouldProvideIsIdeographic(): Unit = {
// 50 randomly chosen characters that produce true
assertTrue(Character.isIdeographic('\\u388F'))
assertTrue(Character.isIdeographic('\\u4711'))
assertTrue(Character.isIdeographic('\\u527E'))
assertTrue(Character.isIdeographic('\\u5328'))
assertTrue(Character.isIdeographic('\\u5922'))
assertTrue(Character.isIdeographic('\\u5BA2'))
assertTrue(Character.isIdeographic('\\u5CAC'))
assertTrue(Character.isIdeographic('\\u65AF'))
assertTrue(Character.isIdeographic('\\u694C'))
assertTrue(Character.isIdeographic('\\u8068'))
assertTrue(Character.isIdeographic('\\u8C34'))
assertTrue(Character.isIdeographic('\\u8C9D'))
assertTrue(Character.isIdeographic('\\u8D3D'))
assertTrue(Character.isIdeographic('\\u9C62'))
assertTrue(Character.isIdeographic(131994))
assertTrue(Character.isIdeographic(132852))
assertTrue(Character.isIdeographic(133501))
assertTrue(Character.isIdeographic(133591))
assertTrue(Character.isIdeographic(134246))
assertTrue(Character.isIdeographic(134328))
assertTrue(Character.isIdeographic(136431))
assertTrue(Character.isIdeographic(139867))
assertTrue(Character.isIdeographic(140528))
assertTrue(Character.isIdeographic(141460))
assertTrue(Character.isIdeographic(146741))
assertTrue(Character.isIdeographic(146759))
assertTrue(Character.isIdeographic(147539))
assertTrue(Character.isIdeographic(148459))
assertTrue(Character.isIdeographic(148689))
assertTrue(Character.isIdeographic(153593))
assertTrue(Character.isIdeographic(155694))
assertTrue(Character.isIdeographic(155818))
assertTrue(Character.isIdeographic(159961))
assertTrue(Character.isIdeographic(163220))
assertTrue(Character.isIdeographic(163464))
assertTrue(Character.isIdeographic(164167))
assertTrue(Character.isIdeographic(164197))
assertTrue(Character.isIdeographic(165508))
assertTrue(Character.isIdeographic(165973))
assertTrue(Character.isIdeographic(167743))
assertTrue(Character.isIdeographic(168585))
assertTrue(Character.isIdeographic(168758))
assertTrue(Character.isIdeographic(169731))
assertTrue(Character.isIdeographic(170186))
assertTrue(Character.isIdeographic(171240))
assertTrue(Character.isIdeographic(171988))
assertTrue(Character.isIdeographic(172886))
assertTrue(Character.isIdeographic(174236))
assertTrue(Character.isIdeographic(177495))
assertTrue(Character.isIdeographic(178011))
// 50 randomly chosen characters that produce false
assertFalse(Character.isIdeographic('\\uFB45'))
assertFalse(Character.isIdeographic(1005864))
assertFalse(Character.isIdeographic(1006626))
assertFalse(Character.isIdeographic(1009910))
assertFalse(Character.isIdeographic(1032559))
assertFalse(Character.isIdeographic(1040837))
assertFalse(Character.isIdeographic(1070571))
assertFalse(Character.isIdeographic(107607))
assertFalse(Character.isIdeographic(1084694))
assertFalse(Character.isIdeographic(1098896))
assertFalse(Character.isIdeographic(121214))
assertFalse(Character.isIdeographic(193874))
assertFalse(Character.isIdeographic(208650))
assertFalse(Character.isIdeographic(253670))
assertFalse(Character.isIdeographic(266437))
assertFalse(Character.isIdeographic(268828))
assertFalse(Character.isIdeographic(269494))
assertFalse(Character.isIdeographic(278691))
assertFalse(Character.isIdeographic(282114))
assertFalse(Character.isIdeographic(294021))
assertFalse(Character.isIdeographic(334194))
assertFalse(Character.isIdeographic(351339))
assertFalse(Character.isIdeographic(356942))
assertFalse(Character.isIdeographic(388239))
assertFalse(Character.isIdeographic(398495))
assertFalse(Character.isIdeographic(424210))
assertFalse(Character.isIdeographic(437688))
assertFalse(Character.isIdeographic(454763))
assertFalse(Character.isIdeographic(499908))
assertFalse(Character.isIdeographic(543025))
assertFalse(Character.isIdeographic(544352))
assertFalse(Character.isIdeographic(552973))
assertFalse(Character.isIdeographic(557901))
assertFalse(Character.isIdeographic(570614))
assertFalse(Character.isIdeographic(607804))
assertFalse(Character.isIdeographic(639906))
assertFalse(Character.isIdeographic(659980))
assertFalse(Character.isIdeographic(668239))
assertFalse(Character.isIdeographic(711022))
assertFalse(Character.isIdeographic(765532))
assertFalse(Character.isIdeographic(776989))
assertFalse(Character.isIdeographic(777331))
assertFalse(Character.isIdeographic(812822))
assertFalse(Character.isIdeographic(815221))
assertFalse(Character.isIdeographic(828259))
assertFalse(Character.isIdeographic(82920))
assertFalse(Character.isIdeographic(869335))
assertFalse(Character.isIdeographic(912462))
assertFalse(Character.isIdeographic(958559))
assertFalse(Character.isIdeographic(999076))
}
@Test
def shouldProvideIsSurrogate(): Unit = {
//non-surrogate
assertFalse(Character.isSurrogate((Character.MIN_SURROGATE - 1).toChar))
assertFalse(Character.isSurrogate((Character.MAX_SURROGATE + 1).toChar))
assertFalse(Character.isSurrogate('a'))
assertFalse(Character.isSurrogate('7'))
assertFalse(Character.isSurrogate('ö'))
assertFalse(Character.isSurrogate('\\t'))
//high surrogates
assertTrue(Character.isSurrogate(Character.MIN_SURROGATE))
assertTrue(Character.isSurrogate('\\uD800')) //min
assertTrue(Character.isSurrogate('\\uDBFF')) //max
assertTrue(Character.isSurrogate('\\uDAAA'))
assertTrue(Character.isSurrogate('\\uD999'))
assertTrue(Character.isSurrogate('\\uDBFE'))
//low surrogates
assertTrue(Character.isSurrogate(Character.MAX_SURROGATE))
assertTrue(Character.isSurrogate('\\uDFFF')) //max
assertTrue(Character.isSurrogate('\\uDC00')) //min
assertTrue(Character.isSurrogate('\\uDDDD'))
assertTrue(Character.isSurrogate('\\uDE99'))
assertTrue(Character.isSurrogate('\\uDFFE'))
assertTrue(Character.isSurrogate('\\uDC01'))
}
}
|
SebsLittleHelpers/scala-js
|
test-suite/shared/src/test/require-jdk7/org/scalajs/testsuite/javalib/lang/CharacterTestOnJDK7.scala
|
Scala
|
apache-2.0
| 16,792
|
package dotty.tools
package dotc
package typer
import core._
import ast.{Trees, untpd, tpd, TreeInfo}
import util.Positions._
import util.Stats.track
import Trees.Untyped
import Mode.ImplicitsEnabled
import Contexts._
import Flags._
import Denotations._
import NameOps._
import Symbols._
import Types._
import Decorators._
import ErrorReporting._
import Trees._
import Names._
import StdNames._
import ProtoTypes._
import EtaExpansion._
import collection.mutable
import reflect.ClassTag
import config.Printers._
import TypeApplications._
import language.implicitConversions
object Applications {
import tpd._
private val isNamedArg = (arg: Any) => arg.isInstanceOf[Trees.NamedArg[_]]
def hasNamedArg(args: List[Any]) = args exists isNamedArg
def wrapDefs(defs: mutable.ListBuffer[Tree], tree: Tree)(implicit ctx: Context): Tree =
if (defs != null && defs.nonEmpty) tpd.Block(defs.toList, tree) else tree
}
import Applications._
trait Applications extends Compatibility { self: Typer =>
import Applications._
import tpd.{ cpy => _, _ }
import untpd.cpy
/** @param Arg the type of arguments, could be tpd.Tree, untpd.Tree, or Type
* @param methRef the reference to the method of the application
* @param funType the type of the function part of the application
* @param args the arguments of the application
* @param resultType the expected result type of the application
*/
abstract class Application[Arg](methRef: TermRef, funType: Type, args: List[Arg], resultType: Type)(implicit ctx: Context) {
/** The type of typed arguments: either tpd.Tree or Type */
type TypedArg
/** Given an original argument and the type of the corresponding formal
* parameter, produce a typed argument.
*/
protected def typedArg(arg: Arg, formal: Type): TypedArg
/** Turn a typed tree into an argument */
protected def treeToArg(arg: Tree): Arg
/** Check that argument corresponds to type `formal` and
* possibly add it to the list of adapted arguments
*/
protected def addArg(arg: TypedArg, formal: Type): Unit
/** Is this an argument of the form `expr: _*` or a RepeatedParamType
* derived from such an argument?
*/
protected def isVarArg(arg: Arg): Boolean
/** If constructing trees, turn last `n` processed arguments into a
* `SeqLiteral` tree with element type `elemFormal`.
*/
protected def makeVarArg(n: Int, elemFormal: Type): Unit
/** Signal failure with given message at position of given argument */
protected def fail(msg: => String, arg: Arg): Unit
/** Signal failure with given message at position of the application itself */
protected def fail(msg: => String): Unit
protected def appPos: Position
/** If constructing trees, the current function part, which might be
* affected by lifting. EmptyTree otherwise.
*/
protected def normalizedFun: Tree
/** If constructing trees, pull out all parts of the function
* which are not idempotent into separate prefix definitions
*/
protected def liftFun(): Unit = ()
/** A flag signalling that the typechecking the application was so far succesful */
private[this] var _ok = true
def ok = _ok
def ok_=(x: Boolean) = {
assert(x || ctx.errorsReported || !ctx.typerState.isCommittable) // !!! DEBUG
_ok = x
}
/** The function's type after widening and instantiating polytypes
* with polyparams in constraint set
*/
val methType = funType.widen match {
case funType: MethodType => funType
case funType: PolyType => constrained(funType).resultType
case tp => tp //was: funType
}
/** The arguments re-ordered so that each named argument matches the
* same-named formal parameter.
*/
lazy val orderedArgs =
if (hasNamedArg(args))
reorder(args.asInstanceOf[List[untpd.Tree]]).asInstanceOf[List[Arg]]
else
args
protected def init() = methType match {
case methType: MethodType =>
// apply the result type constraint, unless method type is dependent
if (!methType.isDependent)
if (!constrainResult(methType.resultType, resultType))
fail(err.typeMismatchStr(methType.resultType, resultType))
// match all arguments with corresponding formal parameters
matchArgs(orderedArgs, methType.paramTypes, 0)
case _ =>
if (methType.isError) ok = false
else fail(s"$methString does not take parameters")
}
/** The application was succesful */
def success = ok
protected def methodType = methType.asInstanceOf[MethodType]
private def methString: String = s"method ${methRef.name}: ${methType.show}"
/** Re-order arguments to correctly align named arguments */
def reorder[T >: Untyped](args: List[Trees.Tree[T]]): List[Trees.Tree[T]] = {
/** @param pnames The list of parameter names that are missing arguments
* @param args The list of arguments that are not yet passed, or that are waiting to be dropped
* @param nameToArg A map from as yet unseen names to named arguments
* @param todrop A set of names that have aready be passed as named arguments
*
* For a well-typed application we have the invariants
*
* 1. `(args diff toDrop)` can be reordered to match `pnames`
* 2. For every `(name -> arg)` in `nameToArg`, `arg` is an element of `args`
*/
def recur(pnames: List[Name], args: List[Trees.Tree[T]],
nameToArg: Map[Name, Trees.NamedArg[T]], toDrop: Set[Name]): List[Trees.Tree[T]] = pnames match {
case pname :: pnames1 if nameToArg contains pname =>
// there is a named argument for this parameter; pick it
nameToArg(pname) :: recur(pnames1, args, nameToArg - pname, toDrop + pname)
case _ =>
def pnamesRest = if (pnames.isEmpty) pnames else pnames.tail
args match {
case (arg @ NamedArg(aname, _)) :: args1 =>
if (toDrop contains aname) // argument is already passed
recur(pnames, args1, nameToArg, toDrop - aname)
else if ((nameToArg contains aname) && pnames.nonEmpty) // argument is missing, pass an empty tree
genericEmptyTree :: recur(pnames.tail, args, nameToArg, toDrop)
else { // name not (or no longer) available for named arg
def msg =
if (methodType.paramNames contains aname)
s"parameter $aname of $methString is already instantiated"
else
s"$methString does not have a parameter $aname"
fail(msg, arg.asInstanceOf[Arg])
arg :: recur(pnamesRest, args1, nameToArg, toDrop)
}
case arg :: args1 =>
arg :: recur(pnamesRest, args1, nameToArg, toDrop) // unnamed argument; pick it
case Nil => // no more args, continue to pick up any preceding named args
if (pnames.isEmpty) Nil
else recur(pnamesRest, args, nameToArg, toDrop)
}
}
val nameAssocs = for (arg @ NamedArg(name, _) <- args) yield (name, arg)
recur(methodType.paramNames, args, nameAssocs.toMap, Set())
}
/** Splice new method reference into existing application */
def spliceMeth(meth: Tree, app: Tree): Tree = app match {
case Apply(fn, args) => Apply(spliceMeth(meth, fn), args)
case TypeApply(fn, targs) => TypeApply(spliceMeth(meth, fn), targs)
case _ => meth
}
/** Find reference to default parameter getter for parameter #n in current
* parameter list, or NoType if none was found
*/
def findDefaultGetter(n: Int)(implicit ctx: Context): Type = {
val meth = methRef.symbol
val prefix =
if ((meth is Synthetic) && meth.name == nme.apply) nme.CONSTRUCTOR else methRef.name
def getterName = prefix.defaultGetterName(n)
def ref(pre: Type, sym: Symbol): Type =
if (pre.exists && sym.isTerm) pre select sym else NoType
if (meth.hasDefaultParams)
methRef.prefix match {
case NoPrefix =>
def findDefault(cx: Context): Type = {
if (cx eq NoContext) NoType
else if (cx.scope != cx.outer.scope &&
cx.denotNamed(methRef.name).hasAltWith(_.symbol == meth)) {
val denot = cx.denotNamed(getterName)
assert(denot.exists, s"non-existent getter denotation ($denot) for getter($getterName)")
cx.owner.thisType.select(getterName, denot)
} else findDefault(cx.outer)
}
findDefault(ctx)
case mpre =>
val cls = meth.owner
val pre =
if (meth.isClassConstructor) {
// default getters for class constructors are found in the companion object
mpre.baseTypeRef(cls) match {
case tp: TypeRef => ref(tp.prefix, cls.companionModule)
case _ => NoType
}
} else mpre
val getter = pre.member(getterName)
ref(pre, getter.symbol)
}
else NoType
}
/** Match re-ordered arguments against formal parameters
* @param n The position of the first parameter in formals in `methType`.
*/
def matchArgs(args: List[Arg], formals: List[Type], n: Int): Unit = {
if (success) formals match {
case formal :: formals1 =>
def addTyped(arg: Arg, formal: Type) =
addArg(typedArg(arg, formal), formal)
def missingArg(n: Int): Unit = {
val pname = methodType.paramNames(n)
fail(
if (pname contains '$') s"not enough arguments for $methString"
else s"missing argument for parameter $pname of $methString")
}
def tryDefault(n: Int, args1: List[Arg]): Unit = {
findDefaultGetter(n + numArgs(normalizedFun)) match {
case dref: NamedType =>
liftFun()
addTyped(treeToArg(spliceMeth(Ident(dref) withPos appPos, normalizedFun)), formal)
matchArgs(args1, formals1, n + 1)
case _ =>
missingArg(n)
}
}
if (formal.isRepeatedParam)
args match {
case arg :: Nil if isVarArg(arg) =>
addTyped(arg, formal)
case _ =>
val elemFormal = formal.argTypesLo.head
args foreach (addTyped(_, elemFormal))
makeVarArg(args.length, elemFormal)
}
else args match {
case EmptyTree :: args1 =>
tryDefault(n, args1)
case arg :: args1 =>
addTyped(arg, formal)
matchArgs(args1, formals1, n + 1)
case nil =>
tryDefault(n, args)
}
case nil =>
args match {
case arg :: args1 => fail(s"too many arguments for $methString", arg)
case nil =>
}
}
}
}
/** Subclass of Application for the cases where we are interested only
* in a "can/cannot apply" answer, without needing to construct trees or
* issue error messages.
*/
abstract class TestApplication[Arg](methRef: TermRef, funType: Type, args: List[Arg], resultType: Type)(implicit ctx: Context)
extends Application[Arg](methRef, funType, args, resultType) {
type TypedArg = Arg
type Result = Unit
/** The type of the given argument */
protected def argType(arg: Arg, formal: Type): Type
def typedArg(arg: Arg, formal: Type): Arg = arg
def addArg(arg: TypedArg, formal: Type) =
ok = ok & isCompatible(argType(arg, formal), formal)
def makeVarArg(n: Int, elemFormal: Type) = {}
def fail(msg: => String, arg: Arg) =
ok = false
def fail(msg: => String) =
ok = false
def appPos = NoPosition
def normalizedFun = EmptyTree
init()
}
/** Subclass of Application for applicability tests with type arguments and value
* argument trees.
*/
class ApplicableToTrees(methRef: TermRef, targs: List[Type], args: List[Tree], resultType: Type)(implicit ctx: Context)
extends TestApplication(methRef, methRef.widen.appliedTo(targs), args, resultType) {
def argType(arg: Tree, formal: Type): Type = normalize(arg.tpe, formal)
def treeToArg(arg: Tree): Tree = arg
def isVarArg(arg: Tree): Boolean = tpd.isWildcardStarArg(arg)
}
/** Subclass of Application for applicability tests with value argument types. */
class ApplicableToTypes(methRef: TermRef, args: List[Type], resultType: Type)(implicit ctx: Context)
extends TestApplication(methRef, methRef, args, resultType) {
def argType(arg: Type, formal: Type): Type = arg
def treeToArg(arg: Tree): Type = arg.tpe
def isVarArg(arg: Type): Boolean = arg.isRepeatedParam
}
/** Subclass of Application for type checking an Apply node, where
* types of arguments are either known or unknown.
*/
abstract class TypedApply[T >: Untyped](
app: untpd.Apply, fun: Tree, methRef: TermRef, args: List[Trees.Tree[T]], resultType: Type)(implicit ctx: Context)
extends Application(methRef, fun.tpe, args, resultType) {
type TypedArg = Tree
def isVarArg(arg: Trees.Tree[T]): Boolean = untpd.isWildcardStarArg(arg)
private var typedArgBuf = new mutable.ListBuffer[Tree]
private var liftedDefs: mutable.ListBuffer[Tree] = null
private var myNormalizedFun: Tree = fun
init()
def addArg(arg: Tree, formal: Type): Unit =
typedArgBuf += adaptInterpolated(arg, formal.widenExpr)
def makeVarArg(n: Int, elemFormal: Type): Unit = {
val args = typedArgBuf.takeRight(n).toList
typedArgBuf.trimEnd(n)
val seqLit = if (methodType.isJava) JavaSeqLiteral(args) else SeqLiteral(args)
typedArgBuf += seqToRepeated(seqLit)
}
override def appPos = app.pos
def fail(msg: => String, arg: Trees.Tree[T]) = {
ctx.error(msg, arg.pos)
ok = false
}
def fail(msg: => String) = {
ctx.error(msg, app.pos)
ok = false
}
def normalizedFun = myNormalizedFun
override def liftFun(): Unit =
if (liftedDefs == null) {
liftedDefs = new mutable.ListBuffer[Tree]
myNormalizedFun = liftApp(liftedDefs, myNormalizedFun)
}
/** The index of the first difference between lists of trees `xs` and `ys`,
* where `EmptyTree`s in the second list are skipped.
* -1 if there are no differences.
*/
private def firstDiff[T <: Trees.Tree[_]](xs: List[T], ys: List[T], n: Int = 0): Int = xs match {
case x :: xs1 =>
ys match {
case EmptyTree :: ys1 => firstDiff(xs1, ys1, n)
case y :: ys1 => if (x ne y) n else firstDiff(xs1, ys1, n + 1)
case nil => n
}
case nil =>
ys match {
case EmptyTree :: ys1 => firstDiff(xs, ys1, n)
case y :: ys1 => n
case nil => -1
}
}
private def sameSeq[T <: Trees.Tree[_]](xs: List[T], ys: List[T]): Boolean = firstDiff(xs, ys) < 0
val result = {
var typedArgs = typedArgBuf.toList
val app0 = cpy.Apply(app, normalizedFun, typedArgs)
val app1 =
if (!success) app0.withType(ErrorType)
else {
if (!sameSeq(args, orderedArgs)) {
// need to lift arguments to maintain evaluation order in the
// presence of argument reorderings.
liftFun()
val eqSuffixLength = firstDiff(app.args.reverse, orderedArgs.reverse)
val (liftable, rest) = typedArgs splitAt (typedArgs.length - eqSuffixLength)
typedArgs = liftArgs(liftedDefs, methType, liftable) ++ rest
}
if (sameSeq(typedArgs, args)) // trick to cut down on tree copying
typedArgs = args.asInstanceOf[List[Tree]]
assignType(app0, normalizedFun, typedArgs)
}
wrapDefs(liftedDefs, app1)
}
}
/** Subclass of Application for type checking an Apply node with untyped arguments. */
class ApplyToUntyped(app: untpd.Apply, fun: Tree, methRef: TermRef, proto: FunProto, resultType: Type)(implicit ctx: Context)
extends TypedApply(app, fun, methRef, proto.args, resultType) {
def typedArg(arg: untpd.Tree, formal: Type): TypedArg = proto.typedArg(arg, formal.widenExpr)
def treeToArg(arg: Tree): untpd.Tree = untpd.TypedSplice(arg)
}
/** Subclass of Application for type checking an Apply node with typed arguments. */
class ApplyToTyped(app: untpd.Apply, fun: Tree, methRef: TermRef, args: List[Tree], resultType: Type)(implicit ctx: Context)
extends TypedApply(app, fun, methRef, args, resultType) {
def typedArg(arg: Tree, formal: Type): TypedArg = arg
def treeToArg(arg: Tree): Tree = arg
}
def typedApply(tree: untpd.Apply, pt: Type)(implicit ctx: Context): Tree = {
def realApply(implicit ctx: Context): Tree = track("realApply") {
val proto = new FunProto(tree.args, pt, this)
val fun1 = typedExpr(tree.fun, proto)
methPart(fun1).tpe match {
case funRef: TermRef =>
tryEither { implicit ctx =>
val app =
if (proto.argsAreTyped) new ApplyToTyped(tree, fun1, funRef, proto.typedArgs, pt)
else new ApplyToUntyped(tree, fun1, funRef, proto, pt)
val result = app.result
ConstFold(result)
} { (failedVal, failedState) => fun1 match {
case Select(qual, name) =>
// try with prototype `[].name(args)`, this might succeed by inserting an
// implicit conversion around []. (an example is Int + BigInt).
tryEither { implicit ctx =>
val simpleFunProto = new FunProto(tree.args, WildcardType, this) // drop result type, because views are disabled
val selProto = SelectionProto(name, simpleFunProto, NoViewsAllowed)
val qual1 = adaptInterpolated(qual, selProto)
if (qual eq qual1) ctx.error("no progress")
if (ctx.reporter.hasErrors) qual1
else
typedApply(
cpy.Apply(tree,
cpy.Select(fun1, untpd.TypedSplice(qual1), name),
proto.typedArgs map untpd.TypedSplice),
pt)
} { (_, _) =>
failedState.commit()
failedVal
}
case _ =>
failedState.commit()
failedVal
}
}
case _ =>
fun1.tpe match {
case ErrorType =>
tree.withType(ErrorType)
case tp =>
throw new Error(s"unexpected type.\\n fun1 = $fun1,\\n methPart(fun1) = ${methPart(fun1)},\\n methPart(fun1).tpe = ${methPart(fun1).tpe},\\n tpe = $tp")
}
}
}
/** Convert expression like
*
* e += (args)
*
* where the lifted-for-assignment version of e is { val xs = es; e' } to
*
* { val xs = es; e' = e' + args }
*/
def typedOpAssign: Tree = track("typedOpAssign") {
val Apply(Select(lhs, name), rhss) = tree
val lhs1 = typedExpr(lhs)
val liftedDefs = new mutable.ListBuffer[Tree]
val lhs2 = untpd.TypedSplice(liftAssigned(liftedDefs, lhs1))
val assign = untpd.Assign(lhs2, untpd.Apply(untpd.Select(lhs2, name.init), rhss))
wrapDefs(liftedDefs, typed(assign))
}
if (untpd.isOpAssign(tree))
tryEither {
implicit ctx => realApply
} { (failedVal, failedState) =>
tryEither {
implicit ctx => typedOpAssign
} { (_, _) =>
failedState.commit()
failedVal
}
}
else realApply
}
def typedTypeApply(tree: untpd.TypeApply, pt: Type)(implicit ctx: Context): Tree = track("typedTypeApply") {
val typedArgs = tree.args mapconserve (typedType(_))
val typedFn = typedExpr(tree.fun, PolyProto(typedArgs.tpes, pt))
typedFn.tpe.widen match {
case pt: PolyType => checkBounds(typedArgs, pt, tree.pos)
case _ =>
}
assignType(cpy.TypeApply(tree, typedFn, typedArgs), typedFn, typedArgs)
}
def typedUnApply(tree: untpd.Apply, pt: Type)(implicit ctx: Context): Tree = track("typedUnApply") {
val Apply(qual, args) = tree
def notAnExtractor(tree: Tree) =
errorTree(tree, s"${qual.show} cannot be used as an extractor in a pattern because it lacks an unapply or unapplySeq method")
/** If this is a term ref tree, try to typecheck with its type name.
* If this refers to a type alias, follow the alias, and if
* one finds a class, reference the class companion module.
*/
def followTypeAlias(tree: untpd.Tree): untpd.Tree = {
tree match {
case tree: untpd.RefTree =>
val ttree = typedType(tree.withName(tree.name.toTypeName))
ttree.tpe match {
case alias: TypeRef if alias.info.isAlias =>
companionRef(alias) match {
case companion: TermRef => return untpd.ref(companion) withPos tree.pos
case _ =>
}
case _ =>
}
case _ =>
}
untpd.EmptyTree
}
/** A typed qual.unappy or qual.unappySeq tree, if this typechecks.
* Otherwise fallBack with (maltyped) qual.unapply as argument
*/
def trySelectUnapply(qual: untpd.Tree)(fallBack: Tree => Tree): Tree = {
val unappProto = new UnapplyFunProto(this)
tryEither {
implicit ctx => typedExpr(untpd.Select(qual, nme.unapply), unappProto)
} {
(sel, _) =>
tryEither {
implicit ctx => typedExpr(untpd.Select(qual, nme.unapplySeq), unappProto) // for backwards compatibility; will be dropped
} {
(_, _) => fallBack(sel)
}
}
}
/** Produce a typed qual.unappy or qual.unappySeq tree, or
* else if this fails follow a type alias and try again.
*/
val unapplyFn = trySelectUnapply(qual) { sel =>
val qual1 = followTypeAlias(qual)
if (qual1.isEmpty) notAnExtractor(sel)
else trySelectUnapply(qual1)(_ => notAnExtractor(sel))
}
def fromScala2x = unapplyFn.symbol.exists && (unapplyFn.symbol.owner is Scala2x)
def unapplyArgs(unapplyResult: Type)(implicit ctx: Context): List[Type] = {
def extractorMemberType(tp: Type, name: Name) = {
val ref = tp member name
if (ref.isOverloaded)
errorType(s"Overloaded reference to ${ref.show} is not allowed in extractor", tree.pos)
else if (ref.info.isInstanceOf[PolyType])
errorType(s"Reference to polymorphic ${ref.show}: ${ref.info.show} is not allowed in extractor", tree.pos)
else
ref.info.widenExpr.dealias
}
def productSelectors(tp: Type): List[Type] = {
val sels = for (n <- Iterator.from(0)) yield extractorMemberType(tp, nme.selectorName(n))
sels.takeWhile(_.exists).toList
}
def seqSelector = defn.RepeatedParamType.appliedTo(unapplyResult.elemType :: Nil)
def getSelectors(tp: Type): List[Type] =
if (defn.isProductSubType(tp) && args.length > 1) productSelectors(tp)
else tp :: Nil
def getTp = extractorMemberType(unapplyResult, nme.get)
// println(s"unapply $unapplyResult ${extractorMemberType(unapplyResult, nme.isDefined)}")
if (extractorMemberType(unapplyResult, nme.isDefined) isRef defn.BooleanClass) {
if (getTp.exists)
if (unapplyFn.symbol.name == nme.unapplySeq) {
val seqArg = boundsToHi(getTp.firstBaseArgInfo(defn.SeqClass))
if (seqArg.exists) return args map Function.const(seqArg)
}
else return getSelectors(getTp)
else if (defn.isProductSubType(unapplyResult)) return productSelectors(unapplyResult)
}
if (unapplyResult derivesFrom defn.SeqClass) seqSelector :: Nil
else if (unapplyResult isRef defn.BooleanClass) Nil
else {
ctx.error(s"${unapplyResult.show} is not a valid result type of an unapply method of an extractor", tree.pos)
Nil
}
}
/** Can `subtp` be made to be a subtype of `tp`, possibly by dropping some
* refinements in `tp`?
*/
def isSubTypeOfParent(subtp: Type, tp: Type): Boolean =
if (subtp <:< tp) true
else tp match {
case RefinedType(parent, _) => isSubTypeOfParent(subtp, parent)
case _ => false
}
unapplyFn.tpe.widen match {
case mt: MethodType if mt.paramTypes.length == 1 && !mt.isDependent =>
val unapplyArgType = mt.paramTypes.head
unapp.println(s"unapp arg tpe = ${unapplyArgType.show}, pt = ${pt.show}")
def wpt = widenForMatchSelector(pt) // needed?
val ownType =
if (pt <:< unapplyArgType) {
fullyDefinedType(unapplyArgType, "extractor argument", tree.pos)
unapp.println(i"case 1 $unapplyArgType ${ctx.typerState.constraint}")
pt
} else if (isSubTypeOfParent(unapplyArgType, wpt)) {
maximizeType(unapplyArgType) match {
case Some(tvar) =>
def msg =
i"""There is no best instantiation of pattern type $unapplyArgType
|that makes it a subtype of selector type $pt.
|Non-variant type variable ${tvar.origin} cannot be uniquely instantiated.""".stripMargin
if (fromScala2x) {
// We can't issue an error here, because in Scala 2, ::[B] is invariant
// whereas List[+T] is covariant. According to the strict rule, a pattern
// match of a List[C] against a case x :: xs is illegal, because
// B cannot be uniquely instantiated. Of course :: should have been
// covariant in the first place, but in the Scala libraries it isn't.
// So for now we allow these kinds of patterns, even though they
// can open unsoundness holes. See SI-7952 for an example of the hole this opens.
if (ctx.settings.verbose.value) ctx.warning(msg, tree.pos)
} else {
unapp.println(s" ${unapplyFn.symbol.owner} ${unapplyFn.symbol.owner is Scala2x}")
ctx.error(msg, tree.pos)
}
case _ =>
}
unapp.println(i"case 2 $unapplyArgType ${ctx.typerState.constraint}")
unapplyArgType
} else {
unapp.println("Neither sub nor super")
unapp.println(TypeComparer.explained(implicit ctx => unapplyArgType <:< wpt))
errorType(
i"Pattern type $unapplyArgType is neither a subtype nor a supertype of selector type $wpt",
tree.pos)
}
val dummyArg = dummyTreeOfType(unapplyArgType)
val unapplyApp = typedExpr(untpd.TypedSplice(Apply(unapplyFn, dummyArg :: Nil)))
val unapplyImplicits = unapplyApp match {
case Apply(Apply(unapply, `dummyArg` :: Nil), args2) => assert(args2.nonEmpty); args2
case Apply(unapply, `dummyArg` :: Nil) => Nil
}
var argTypes = unapplyArgs(unapplyApp.tpe)
for (argType <- argTypes) assert(!argType.isInstanceOf[TypeBounds], unapplyApp.tpe.show)
val bunchedArgs = argTypes match {
case argType :: Nil if argType.isRepeatedParam => untpd.SeqLiteral(args) :: Nil
case _ => args
}
if (argTypes.length != bunchedArgs.length) {
ctx.error(i"wrong number of argument patterns for $qual; expected: ($argTypes%, %)", tree.pos)
argTypes = argTypes.take(args.length) ++
List.fill(argTypes.length - args.length)(WildcardType)
}
val unapplyPatterns = (bunchedArgs, argTypes).zipped map (typed(_, _))
val result = assignType(cpy.UnApply(tree, unapplyFn, unapplyImplicits, unapplyPatterns), ownType)
unapp.println(s"unapply patterns = $unapplyPatterns")
if ((ownType eq pt) || ownType.isError) result
else Typed(result, TypeTree(ownType))
case tp =>
val unapplyErr = if (tp.isError) unapplyFn else notAnExtractor(unapplyFn)
val typedArgsErr = args mapconserve (typed(_, defn.AnyType))
cpy.UnApply(tree, unapplyErr, Nil, typedArgsErr) withType ErrorType
}
}
/** Is given method reference applicable to type arguments `targs` and argument trees `args`?
* @param resultType The expected result type of the application
*/
def isApplicable(methRef: TermRef, targs: List[Type], args: List[Tree], resultType: Type)(implicit ctx: Context): Boolean = {
val nestedContext = ctx.fresh.withExploreTyperState
new ApplicableToTrees(methRef, targs, args, resultType)(nestedContext).success
}
/** Is given method reference applicable to argument types `args`?
* @param resultType The expected result type of the application
*/
def isApplicable(methRef: TermRef, args: List[Type], resultType: Type)(implicit ctx: Context): Boolean = {
val nestedContext = ctx.fresh.withExploreTyperState
new ApplicableToTypes(methRef, args, resultType)(nestedContext).success
}
/** Is given type applicable to type arguments `targs` and argument trees `args`,
* possibly after inserting an `apply`?
* @param resultType The expected result type of the application
*/
def isApplicable(tp: Type, targs: List[Type], args: List[Tree], resultType: Type)(implicit ctx: Context): Boolean =
onMethod(tp, isApplicable(_, targs, args, resultType))
/** Is given type applicable to argument types `args`, possibly after inserting an `apply`?
* @param resultType The expected result type of the application
*/
def isApplicable(tp: Type, args: List[Type], resultType: Type)(implicit ctx: Context): Boolean =
onMethod(tp, isApplicable(_, args, resultType))
private def onMethod(tp: Type, p: TermRef => Boolean)(implicit ctx: Context): Boolean = tp match {
case methRef: TermRef if methRef.widenSingleton.isInstanceOf[SignedType] =>
p(methRef)
case mt: SignedType =>
p(mt.narrow)
case _ =>
tp.member(nme.apply).hasAltWith(d => p(TermRef(tp, nme.apply, d)))
}
/** In a set of overloaded applicable alternatives, is `alt1` at least as good as
* `alt2`? `alt1` and `alt2` are nonoverloaded references.
*/
def isAsGood(alt1: TermRef, alt2: TermRef)(implicit ctx: Context): Boolean = track("isAsGood") { ctx.traceIndented(i"isAsGood($alt1, $alt2)", overload) {
assert(alt1 ne alt2)
/** Is class or module class `sym1` derived from class or module class `sym2`?
* Module classes also inherit the relationship from their companions.
*/
def isDerived(sym1: Symbol, sym2: Symbol): Boolean =
if (sym1 isSubClass sym2) true
else if (sym2 is Module) isDerived(sym1, sym2.companionClass)
else (sym1 is Module) && isDerived(sym1.companionClass, sym2)
/** Is alternative `alt1` with type `tp1` as specific as alternative
* `alt2` with type `tp2` ? This is the case if
*
* 1. `tp2` is a method or poly type but `tp1` isn't, or `tp1` is nullary.
* 2. `tp2` and `tp1` are method or poly types and `tp2` can be applied to the parameters of `tp1`.
* 3. Neither `tp1` nor `tp2` are method or poly types and `tp1` is compatible with `tp2`.
*/
def isAsSpecific(alt1: TermRef, tp1: Type, alt2: TermRef, tp2: Type): Boolean = ctx.traceIndented(i"isAsSpecific $tp1 $tp2", overload) { tp1 match {
case tp1: PolyType =>
def bounds(tparamRefs: List[TypeRef]) = tp1.paramBounds map (_.substParams(tp1, tparamRefs))
val tparams = ctx.newTypeParams(alt1.symbol.owner, tp1.paramNames, EmptyFlags, bounds)
isAsSpecific(alt1, tp1.instantiate(tparams map (_.typeRef)), alt2, tp2)
case tp1: MethodType =>
def repeatedToSingle(tp: Type) = if (tp.isRepeatedParam) tp.argTypesHi.head else tp
isApplicable(alt2, tp1.paramTypes map repeatedToSingle, WildcardType) ||
tp1.paramTypes.isEmpty && tp2.isInstanceOf[MethodOrPoly]
case _ =>
tp2 match {
case tp2: MethodOrPoly => true
case _ => isCompatible(tp1, tp2)
}
}}
/** Drop any implicit parameter section */
def stripImplicit(tp: Type) = tp match {
case mt: ImplicitMethodType if !mt.isDependent => mt.resultType // todo: make sure implicit method types are not dependent
case _ => tp
}
val owner1 = alt1.symbol.owner
val owner2 = alt2.symbol.owner
val tp1 = stripImplicit(alt1.widen)
val tp2 = stripImplicit(alt2.widen)
def winsOwner1 = isDerived(owner1, owner2)
def winsType1 = isAsSpecific(alt1, tp1, alt2, tp2)
def winsOwner2 = isDerived(owner2, owner1)
def winsType2 = isAsSpecific(alt2, tp2, alt1, tp1)
// Assume the following probabilities:
//
// P(winsOwnerX) = 2/3
// P(winsTypeX) = 1/3
//
// Then the call probabilities of the 4 basic operations are as follows:
//
// winsOwner1: 1/1
// winsOwner2: 1/1
// winsType1 : 7/9
// winsType2 : 4/9
if (winsOwner1) /* 6/9 */ !winsOwner2 || /* 4/9 */ winsType1 || /* 8/27 */ !winsType2
else if (winsOwner2) /* 2/9 */ winsType1 && /* 2/27 */ !winsType2
else /* 1/9 */ winsType1 || /* 2/27 */ !winsType2
}}
def narrowMostSpecific(alts: List[TermRef])(implicit ctx: Context): List[TermRef] = track("narrowMostSpecific") {
(alts: @unchecked) match {
case alt :: alts1 =>
def winner(bestSoFar: TermRef, alts: List[TermRef]): TermRef = alts match {
case alt :: alts1 =>
winner(if (isAsGood(alt, bestSoFar)) alt else bestSoFar, alts1)
case nil =>
bestSoFar
}
val best = winner(alt, alts1)
def asGood(alts: List[TermRef]): List[TermRef] = alts match {
case alt :: alts1 =>
if ((alt eq best) || !isAsGood(alt, best)) asGood(alts1)
else alt :: asGood(alts1)
case nil =>
Nil
}
best :: asGood(alts)
}
}
/** Resolve overloaded alternative `alts`, given expected type `pt` and
* possibly also type argument `targs` that need to be applied to each alternative
* to form the method type.
* todo: use techniques like for implicits to pick candidates quickly?
*/
def resolveOverloaded(alts: List[TermRef], pt: Type, targs: List[Type] = Nil)(implicit ctx: Context): List[TermRef] = track("resolveOverloaded") {
def isDetermined(alts: List[TermRef]) = alts.isEmpty || alts.tail.isEmpty
/** The shape of given tree as a type; cannot handle named arguments. */
def typeShape(tree: untpd.Tree): Type = tree match {
case untpd.Function(args, body) =>
defn.FunctionType(args map Function.const(defn.AnyType), typeShape(body))
case _ =>
defn.NothingType
}
/** The shape of given tree as a type; is more expensive than
* typeShape but can can handle named arguments.
*/
def treeShape(tree: untpd.Tree): Tree = tree match {
case NamedArg(name, arg) =>
val argShape = treeShape(arg)
cpy.NamedArg(tree, name, argShape).withType(argShape.tpe)
case _ =>
dummyTreeOfType(typeShape(tree))
}
def narrowByTypes(alts: List[TermRef], argTypes: List[Type], resultType: Type): List[TermRef] =
alts filter (isApplicable(_, argTypes, resultType))
val candidates = pt match {
case pt @ FunProto(args, resultType, _) =>
val numArgs = args.length
def sizeFits(alt: TermRef, tp: Type): Boolean = tp match {
case tp: PolyType => sizeFits(alt, tp.resultType)
case MethodType(_, ptypes) =>
val numParams = ptypes.length
def isVarArgs = ptypes.nonEmpty && ptypes.last.isRepeatedParam
def hasDefault = alt.symbol.hasDefaultParams
if (numParams == numArgs) true
else if (numParams < numArgs) isVarArgs
else if (numParams > numArgs + 1) hasDefault
else isVarArgs || hasDefault
case _ =>
numArgs == 0
}
def narrowBySize(alts: List[TermRef]): List[TermRef] =
alts filter (alt => sizeFits(alt, alt.widen))
def narrowByShapes(alts: List[TermRef]): List[TermRef] =
if (args exists (_.isInstanceOf[untpd.Function]))
if (args exists (_.isInstanceOf[Trees.NamedArg[_]]))
narrowByTrees(alts, args map treeShape, resultType)
else
narrowByTypes(alts, args map typeShape, resultType)
else
alts
def narrowByTrees(alts: List[TermRef], args: List[Tree], resultType: Type): List[TermRef] =
alts filter (isApplicable(_, targs, args, resultType))
val alts1 = narrowBySize(alts)
if (isDetermined(alts1)) alts1
else {
val alts2 = narrowByShapes(alts1)
if (isDetermined(alts2)) alts2
else narrowByTrees(alts2, pt.typedArgs, resultType)
}
case pt @ PolyProto(targs, pt1) =>
val alts1 = alts filter pt.isMatchedBy
resolveOverloaded(alts1, pt1, targs)
case defn.FunctionType(args, resultType) =>
narrowByTypes(alts, args, resultType)
case pt =>
alts filter (normalizedCompatible(_, pt))
}
if (isDetermined(candidates)) candidates
else narrowMostSpecific(candidates)
}
}
/*
def typedApply(app: untpd.Apply, fun: Tree, methRef: TermRef, args: List[Tree], resultType: Type)(implicit ctx: Context): Tree = track("typedApply") {
new ApplyToTyped(app, fun, methRef, args, resultType).result
}
def typedApply(fun: Tree, methRef: TermRef, args: List[Tree], resultType: Type)(implicit ctx: Context): Tree =
typedApply(untpd.Apply(untpd.TypedSplice(fun), args), fun, methRef, args, resultType)
*/
|
DarkDimius/dotty
|
src/dotty/tools/dotc/typer/Applications.scala
|
Scala
|
bsd-3-clause
| 38,248
|
package io.vamp.http_api.ws
import java.util.UUID
import akka.actor.PoisonPill
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.ws.{ Message, TextMessage }
import akka.http.scaladsl.server.Route
import akka.stream._
import akka.stream.scaladsl.{ Flow, Sink, Source }
import akka.util.Timeout
import io.vamp.common.akka.IoC._
import io.vamp.common.http.{ HttpApiDirectives, HttpApiHandlers, TerminateFlowStage }
import io.vamp.common.{ Config, Namespace }
import io.vamp.http_api.ws.WebSocketActor.{ SessionClosed, SessionEvent, SessionOpened, SessionRequest }
import io.vamp.http_api.{ AbstractRoute, LogDirective }
import scala.concurrent.Future
trait WebSocketRoute extends AbstractRoute with WebSocketMarshaller with HttpApiHandlers {
this: HttpApiDirectives with LogDirective ⇒
implicit def materializer: Materializer
private lazy val limit = Config.int("vamp.http-api.websocket.stream-limit")
protected def websocketApiHandler(implicit namespace: Namespace, timeout: Timeout): Route
def websocketRoutes(implicit namespace: Namespace, timeout: Timeout) = {
pathEndOrSingleSlash {
get {
extractRequest { request ⇒
handleWebSocketMessages {
websocket(request)
}
}
}
}
}
protected def filterWebSocketOutput(message: AnyRef)(implicit namespace: Namespace, timeout: Timeout): Future[Boolean] = Future.successful(true)
private def apiHandler(implicit namespace: Namespace, timeout: Timeout) = Route.asyncHandler(log {
websocketApiHandler
})
private def websocket(origin: HttpRequest)(implicit namespace: Namespace, timeout: Timeout): Flow[AnyRef, Message, Any] = {
val id = UUID.randomUUID()
val in = Flow[AnyRef].collect {
case TextMessage.Strict(message) ⇒ Future.successful(message)
case TextMessage.Streamed(stream) ⇒ stream.limit(limit()).completionTimeout(timeout.duration).runFold("")(_ + _)
}.mapAsync(parallelism = 3)(identity)
.mapConcat(unmarshall)
.map(SessionRequest(apiHandler, id, origin, _))
.to(Sink.actorRef[SessionEvent](actorFor[WebSocketActor], SessionClosed(id)))
val out = Source.actorRef[AnyRef](16, OverflowStrategy.dropHead)
.mapMaterializedValue(actorFor[WebSocketActor] ! SessionOpened(id, _))
.via(new TerminateFlowStage[AnyRef](_ == PoisonPill))
.mapAsync(parallelism = 3)(message ⇒ filterWebSocketOutput(message).map(f ⇒ f → message))
.collect { case (true, m) ⇒ m }
.map(message ⇒ TextMessage.Strict(marshall(message)))
Flow.fromSinkAndSource(in, out)
}
}
|
magneticio/vamp
|
http_api/src/main/scala/io/vamp/http_api/ws/WebSocketRoute.scala
|
Scala
|
apache-2.0
| 2,620
|
package org.broadinstitute.dsde.workbench.sam.api
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.headers.OAuth2BearerToken
import akka.http.scaladsl.testkit.ScalatestRouteTest
import cats.effect.IO
import org.broadinstitute.dsde.workbench.model.ErrorReportJsonSupport._
import org.broadinstitute.dsde.workbench.model._
import org.broadinstitute.dsde.workbench.sam.TestSupport.{configResourceTypes, genGoogleSubjectId, googleServicesConfig}
import org.broadinstitute.dsde.workbench.sam.api.TestSamRoutes.SamResourceActionPatterns
import org.broadinstitute.dsde.workbench.sam.dataAccess.{MockAccessPolicyDAO, MockDirectoryDAO, MockRegistrationDAO}
import org.broadinstitute.dsde.workbench.sam.model.RootPrimitiveJsonSupport._
import org.broadinstitute.dsde.workbench.sam.model.SamJsonSupport._
import org.broadinstitute.dsde.workbench.sam.model._
import org.broadinstitute.dsde.workbench.sam.service._
import org.broadinstitute.dsde.workbench.sam.util.SamRequestContext
import org.broadinstitute.dsde.workbench.sam.{TestSupport, model}
import org.mockito.ArgumentMatcher
import org.mockito.ArgumentMatchers.{any, argThat, eq => mockitoEq}
import org.mockito.Mockito._
import org.scalatest.AppendedClues
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import org.scalatestplus.mockito.MockitoSugar
import spray.json.DefaultJsonProtocol._
import spray.json.{JsBoolean, JsValue}
import scala.concurrent.Future
class ResourceRoutesV2Spec extends AnyFlatSpec with Matchers with TestSupport with ScalatestRouteTest with AppendedClues with MockitoSugar {
implicit val errorReportSource = ErrorReportSource("sam")
val defaultUserInfo = UserInfo(OAuth2BearerToken("accessToken"), WorkbenchUserId("user1"), WorkbenchEmail("user1@example.com"), 0)
val defaultResourceType = ResourceType(
ResourceTypeName("rt"),
Set.empty,
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.getParent))),
ResourceRoleName("owner")
)
private def createSamRoutes(resourceTypes: Map[ResourceTypeName, ResourceType] = Map(defaultResourceType.name -> defaultResourceType),
userInfo: UserInfo = defaultUserInfo): SamRoutes = {
val accessPolicyDAO = new MockAccessPolicyDAO(resourceTypes)
val directoryDAO = new MockDirectoryDAO()
val registrationDAO = new MockRegistrationDAO()
val emailDomain = "example.com"
val policyEvaluatorService = mock[PolicyEvaluatorService](RETURNS_SMART_NULLS)
val mockResourceService = mock[ResourceService](RETURNS_SMART_NULLS)
resourceTypes.map { case (resourceTypeName, resourceType) =>
when(mockResourceService.getResourceType(resourceTypeName)).thenReturn(IO(Option(resourceType)))
}
val mockUserService = new UserService(directoryDAO, NoExtensions, registrationDAO, Seq.empty, new TosService(directoryDAO, registrationDAO, googleServicesConfig.appsDomain, TestSupport.tosConfig))
val mockStatusService = new StatusService(directoryDAO, registrationDAO, NoExtensions, TestSupport.dbRef)
val mockManagedGroupService = new ManagedGroupService(mockResourceService, policyEvaluatorService, resourceTypes, accessPolicyDAO, directoryDAO, NoExtensions, emailDomain)
mockUserService.createUser(WorkbenchUser(defaultUserInfo.userId, genGoogleSubjectId(), defaultUserInfo.userEmail, None), samRequestContext)
new TestSamRoutes(mockResourceService, policyEvaluatorService, mockUserService, mockStatusService, mockManagedGroupService, userInfo, directoryDAO, registrationDAO)
}
private val managedGroupResourceType = configResourceTypes.getOrElse(ResourceTypeName("managed-group"), throw new Error("Failed to load managed-group resource type from reference.conf"))
private val defaultTestUser = WorkbenchUser(WorkbenchUserId("testuser"), genGoogleSubjectId(), WorkbenchEmail("testuser@foo.com"), None)
"GET /api/resources/v2/{resourceType}/{resourceId}/actions/{action}" should "404 for unknown resource type" in {
val samRoutes = TestSamRoutes(Map.empty)
Get("/api/resources/v2/foo/bar/action") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
responseAs[ErrorReport].message shouldEqual "resource type foo not found"
}
}
"GET /api/config/v1/resourceTypes" should "200 when listing all resource types" in {
val samRoutes = TestSamRoutes(Map.empty)
Get("/api/config/v1/resourceTypes") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
}
}
"POST /api/resources/v2/{resourceType}" should "204 create resource" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(ResourceActionPattern("run", "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(ResourceAction("run")))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val createResourceRequest = CreateResourceRequest(ResourceId("foo"), Map(AccessPolicyName("goober") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(ResourceAction("run")), Set(resourceType.ownerRoleName))), Set.empty)
Post(s"/api/resources/v2/${resourceType.name}", createResourceRequest) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
Get(s"/api/resources/v2/${resourceType.name}/foo/action/run") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[JsValue] shouldEqual JsBoolean(true)
}
}
"POST /api/resources/v2/{resourceType} with returnResource = true" should "201 create resource with content" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(ResourceActionPattern("run", "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(ResourceAction("run")))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val createResourceRequest = CreateResourceRequest(ResourceId("foo"), Map(AccessPolicyName("goober") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(ResourceAction("run")), Set(resourceType.ownerRoleName))), Set.empty, Some(true))
Post(s"/api/resources/v2/${resourceType.name}", createResourceRequest) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Created
val r = responseAs[CreateResourceResponse]
r.resourceId shouldEqual createResourceRequest.resourceId
r.authDomain shouldEqual createResourceRequest.authDomain
r.resourceTypeName shouldEqual resourceType.name
val returnedNames = r.accessPolicies.map( x => x.id.accessPolicyName )
createResourceRequest.policies.keys.foreach { k =>
returnedNames.contains(k) shouldEqual true
}
}
}
"POST /api/resources/v2/{resourceType} with returnResource = false" should "204 create resource with content" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(ResourceActionPattern("run", "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(ResourceAction("run")))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val createResourceRequest = CreateResourceRequest(ResourceId("foo"), Map(AccessPolicyName("goober") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(ResourceAction("run")), Set(resourceType.ownerRoleName))), Set.empty, Some(false))
Post(s"/api/resources/v2/${resourceType.name}", createResourceRequest) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
it should "204 create resource with content with parent" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(ResourceActionPattern(SamResourceActions.setParent.value, "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.setParent, SamResourceActions.addChild))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val createParentResourceRequest = CreateResourceRequest(ResourceId("parent"), Map(AccessPolicyName("goober") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set.empty, Set(resourceType.ownerRoleName))), Set.empty, Some(false))
Post(s"/api/resources/v2/${resourceType.name}", createParentResourceRequest) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
val createResourceRequest = CreateResourceRequest(ResourceId("foo"), Map(AccessPolicyName("goober") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set.empty, Set(resourceType.ownerRoleName))), Set.empty, Some(false), Some(FullyQualifiedResourceId(resourceType.name, createParentResourceRequest.resourceId)))
Post(s"/api/resources/v2/${resourceType.name}", createResourceRequest) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
it should "400 with parent when parents not allowed" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(ResourceActionPattern(SamResourceActions.setParent.value, "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.addChild))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val createParentResourceRequest = CreateResourceRequest(ResourceId("parent"), Map(AccessPolicyName("goober") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set.empty, Set(resourceType.ownerRoleName))), Set.empty, Some(false))
Post(s"/api/resources/v2/${resourceType.name}", createParentResourceRequest) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
val createResourceRequest = CreateResourceRequest(ResourceId("foo"), Map(AccessPolicyName("goober") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set.empty, Set(resourceType.ownerRoleName))), Set.empty, Some(false), Some(FullyQualifiedResourceId(resourceType.name, createParentResourceRequest.resourceId)))
Post(s"/api/resources/v2/${resourceType.name}", createResourceRequest) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.BadRequest
}
}
it should "403 with parent when add_child not allowed on parent" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(ResourceActionPattern(SamResourceActions.setParent.value, "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.setParent))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val createParentResourceRequest = CreateResourceRequest(ResourceId("parent"), Map(AccessPolicyName("goober") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set.empty, Set(resourceType.ownerRoleName))), Set.empty, Some(false))
Post(s"/api/resources/v2/${resourceType.name}", createParentResourceRequest) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
val createResourceRequest = CreateResourceRequest(ResourceId("foo"), Map(AccessPolicyName("goober") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set.empty, Set(resourceType.ownerRoleName))), Set.empty, Some(false), Some(FullyQualifiedResourceId(resourceType.name, createParentResourceRequest.resourceId)))
Post(s"/api/resources/v2/${resourceType.name}", createResourceRequest) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "403 with parent when parent does not exist" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(ResourceActionPattern(SamResourceActions.setParent.value, "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.setParent))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val createResourceRequest = CreateResourceRequest(ResourceId("foo"), Map(AccessPolicyName("goober") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set.empty, Set(resourceType.ownerRoleName))), Set.empty, Some(false), Some(FullyQualifiedResourceId(resourceType.name, ResourceId("parent"))))
Post(s"/api/resources/v2/${resourceType.name}", createResourceRequest) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "204 when valid auth domain is provided and the resource type is constrainable" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(ResourceActionPattern("run", "", true)), Set(ResourceRole(ResourceRoleName("owner"), Set(ResourceAction("run")))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType, managedGroupResourceType.name -> managedGroupResourceType))
resourceType.isAuthDomainConstrainable shouldEqual true
val authDomainId = ResourceId("myAuthDomain")
runAndWait(samRoutes.managedGroupService.createManagedGroup(authDomainId, defaultUserInfo, samRequestContext = samRequestContext))
val authDomain = Set(WorkbenchGroupName(authDomainId.value))
val createResourceRequest = CreateResourceRequest(ResourceId("foo"), Map(AccessPolicyName("goober") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(ResourceAction("run")), Set(resourceType.ownerRoleName))), authDomain)
Post(s"/api/resources/v2/${resourceType.name}", createResourceRequest) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
Get(s"/api/resources/v2/${resourceType.name}/foo/action/run") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[JsValue] shouldEqual JsBoolean(true)
}
}
it should "400 when resource type allows auth domains and id reuse" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(ResourceActionPattern("run", "", true)), Set(ResourceRole(ResourceRoleName("owner"), Set(ResourceAction("run")))), ResourceRoleName("owner"), true)
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val createResourceRequest = CreateResourceRequest(ResourceId("foo"), Map(AccessPolicyName("goober") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(ResourceAction("run")), Set(resourceType.ownerRoleName))), Set.empty)
Post(s"/api/resources/v2/${resourceType.name}", createResourceRequest) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.BadRequest
}
}
it should "400 when no policies are provided" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(ResourceActionPattern("run", "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(ResourceAction("run")))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val createResourceRequest = CreateResourceRequest(ResourceId("foo"), Map.empty, Set.empty)
Post(s"/api/resources/v2/${resourceType.name}", createResourceRequest) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.BadRequest
}
}
it should "400 when auth domain group does not exist" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(ResourceActionPattern("run", "", true)), Set(ResourceRole(ResourceRoleName("owner"), Set(ResourceAction("run")))), ResourceRoleName("owner"))
val authDomainId = ResourceId("myAuthDomain")
val samRoutes = ManagedGroupRoutesSpec.createSamRoutesWithResource(Map(resourceType.name -> resourceType, managedGroupResourceType.name -> managedGroupResourceType), Resource(ManagedGroupService.managedGroupTypeName, authDomainId, Set.empty))
resourceType.isAuthDomainConstrainable shouldEqual true
val authDomain = Set(WorkbenchGroupName(authDomainId.value))
// Group is never persisted
val createResourceRequest = CreateResourceRequest(ResourceId("foo"), Map(AccessPolicyName("goober") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(ResourceAction("run")), Set(resourceType.ownerRoleName))), authDomain)
Post(s"/api/resources/v2/${resourceType.name}", createResourceRequest) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.BadRequest
}
}
it should "400 when auth domain group exists but requesting user is not in that group" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(ResourceActionPattern("run", "", true)), Set(ResourceRole(ResourceRoleName("owner"), Set(ResourceAction("run")))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType, managedGroupResourceType.name -> managedGroupResourceType))
resourceType.isAuthDomainConstrainable shouldEqual true
val authDomainId = ResourceId("myAuthDomain")
val otherUser = UserInfo(OAuth2BearerToken("magicString"), WorkbenchUserId("bugsBunny"), WorkbenchEmail("bugsford_bunnington@example.com"), 0)
runAndWait(samRoutes.userService.createUser(WorkbenchUser(otherUser.userId, genGoogleSubjectId(), otherUser.userEmail, None), samRequestContext))
runAndWait(samRoutes.managedGroupService.createManagedGroup(authDomainId, otherUser, samRequestContext = samRequestContext))
val authDomain = Set(WorkbenchGroupName(authDomainId.value))
val createResourceRequest = CreateResourceRequest(ResourceId("foo"), Map(AccessPolicyName("goober") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(ResourceAction("run")), Set(resourceType.ownerRoleName))), authDomain)
Post(s"/api/resources/v2/${resourceType.name}", createResourceRequest) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.BadRequest
}
}
"POST /api/resources/v2/{resourceType}/{resourceId}" should "204 create resource" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(ResourceActionPattern("run", "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(ResourceAction("run")))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
Post(s"/api/resources/v2/${resourceType.name}/foo") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
Get(s"/api/resources/v2/${resourceType.name}/foo/action/run") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[JsValue] shouldEqual JsBoolean(true)
}
}
"GET /api/resources/v2/{resourceType}/{resourceId}/roles" should "200 on list resource roles" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(ResourceActionPattern("run", "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(ResourceAction("run")))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
Post(s"/api/resources/v2/${resourceType.name}/foo") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
Get(s"/api/resources/v2/${resourceType.name}/foo/roles") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[Set[String]]
}
}
it should "404 on list resource roles when resource type doesnt exist" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(ResourceActionPattern("run", "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(ResourceAction("run")))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
Get(s"/api/resources/v2/doesntexist/foo/roles") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
"GET /api/resources/v2/{resourceType}/{resourceId}/actions" should "200 on list resource actions" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(ResourceActionPattern("run", "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(ResourceAction("run")))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
Post(s"/api/resources/v2/${resourceType.name}/foo") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
Get(s"/api/resources/v2/${resourceType.name}/foo/actions") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[Set[String]]
}
}
it should "404 on list resource actions when resource type doesnt exist" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(ResourceActionPattern("run", "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(ResourceAction("run")))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
Get(s"/api/resources/v2/doesntexist/foo/actions") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
private def responsePayloadClue(str: String): String = s" -> Here is the response payload: $str"
private def createUserResourcePolicy(members: AccessPolicyMembership, resourceType: ResourceType, samRoutes: TestSamRoutes, resourceId: ResourceId, policyName: AccessPolicyName): Unit = {
val user = WorkbenchUser(samRoutes.userInfo.userId, genGoogleSubjectId(), samRoutes.userInfo.userEmail, None)
findOrCreateUser(user, samRoutes.userService)
Post(s"/api/resources/v2/${resourceType.name}/${resourceId.value}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent withClue responsePayloadClue(responseAs[String])
}
Put(s"/api/resources/v2/${resourceType.name}/${resourceId.value}/policies/${policyName.value}", members) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Created withClue responsePayloadClue(responseAs[String])
}
}
private def findOrCreateUser(user: WorkbenchUser, userService: UserService): UserStatus = {
runAndWait(userService.getUserStatus(user.id, samRequestContext = samRequestContext)) match {
case Some(userStatus) => userStatus
case None => runAndWait(userService.createUser(user, samRequestContext))
}
}
"DELETE /api/resources/v2/{resourceType}/{resourceId}" should "204 when deleting a resource and the user has permission to do so" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(SamResourceActionPatterns.alterPolicies, SamResourceActionPatterns.readPolicies, SamResourceActionPatterns.delete), Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.delete, SamResourceActions.readPolicies))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
//Create the resource
Post(s"/api/resources/v2/${resourceType.name}/foo") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
//Read the policies to make sure the resource exists)
Get(s"/api/resources/v2/${resourceType.name}/foo/policies") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
}
//Delete the resource
Delete(s"/api/resources/v2/${resourceType.name}/foo") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
it should "403 when deleting a resource and the user has permission to see the resource but not delete" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(SamResourceActionPatterns.alterPolicies, SamResourceActionPatterns.readPolicies), Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.readPolicies))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
//Create the resource
Post(s"/api/resources/v2/${resourceType.name}/foo") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
//Read the policies to make sure the resource exists)
Get(s"/api/resources/v2/${resourceType.name}/foo/policies") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
}
//Delete the resource
Delete(s"/api/resources/v2/${resourceType.name}/foo") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "404 when deleting a resource of a type that doesn't exist" in {
val samRoutes = TestSamRoutes(Map.empty)
//Delete the resource
Delete(s"/api/resources/v2/INVALID_RESOURCE_TYPE/foo") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
it should "404 when deleting a resource that exists but can't be seen by the user" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(SamResourceActionPatterns.alterPolicies, SamResourceActionPatterns.readPolicies), Set(ResourceRole(ResourceRoleName("owner"), Set(ResourceAction("run")))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
samRoutes.resourceService.createResourceType(resourceType, samRequestContext).unsafeRunSync()
runAndWait(samRoutes.userService.createUser(WorkbenchUser(WorkbenchUserId("user2"), genGoogleSubjectId(), WorkbenchEmail("user2@example.com"), None), samRequestContext))
runAndWait(samRoutes.resourceService.createResource(resourceType, ResourceId("foo"), UserInfo(OAuth2BearerToken("accessToken"), WorkbenchUserId("user2"), WorkbenchEmail("user2@example.com"), 0), samRequestContext))
//Verify resource exists by checking for conflict on recreate
Post(s"/api/resources/v2/${resourceType.name}/foo") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Conflict
}
//Delete the resource
Delete(s"/api/resources/v2/${resourceType.name}/foo") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
it should "204 deleting a child resource" in {
val childResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val currentParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("currentParent"))
val samRoutes = createSamRoutes(Map(defaultResourceType.name -> defaultResourceType))
setupParentRoutes(samRoutes, childResource,
currentParentOpt = Option(currentParentResource),
actionsOnChild = Set(SamResourceActions.setParent, SamResourceActions.delete))
//Delete the resource
Delete(s"/api/resources/v2/${defaultResourceType.name}/${childResource.resourceId.value}") ~> samRoutes.route ~> check {
withClue(responseAs[String]) {
status shouldEqual StatusCodes.NoContent
}
}
}
it should "400 when attempting to delete a resource with children" in {
val childResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val parentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("parent"))
val samRoutes = createSamRoutes(Map(defaultResourceType.name -> defaultResourceType))
setupParentRoutes(samRoutes, childResource,
currentParentOpt = Option(parentResource),
actionsOnChild = Set(SamResourceActions.setParent, SamResourceActions.delete),
actionsOnCurrentParent = Set(SamResourceActions.removeChild))
//Throw 400 exception when delete is called
when(samRoutes.resourceService.deleteResource(mockitoEq(childResource), any[SamRequestContext]))
.thenThrow(new WorkbenchExceptionWithErrorReport(ErrorReport(StatusCodes.BadRequest, "Cannot delete a resource with children. Delete the children first then try again.")))
//Delete the resource
Delete(s"/api/resources/v2/${defaultResourceType.name}/${childResource.resourceId.value}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.BadRequest
}
}
"GET /api/resources/v2/{resourceType}" should "200" in {
val resourceType = ResourceType(ResourceTypeName("rt"), Set(SamResourceActionPatterns.readPolicies, ResourceActionPattern("can_compute", "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.readPolicies))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
//Create a resource
Post(s"/api/resources/v2/${resourceType.name}/foo") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
//Read the policies
Get(s"/api/resources/v2/${resourceType.name}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[List[UserResourcesResponse]].size should equal(1)
}
}
"PUT /api/resources/v2/{resourceType}/{resourceId}/policies/{policyName}/memberEmails/{email}" should "204 adding a member" in {
// happy case
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set(SamResourceActionPatterns.alterPolicies),
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.alterPolicies))),
ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
runAndWait(samRoutes.resourceService.createResource(resourceType, ResourceId("foo"), defaultUserInfo, samRequestContext))
runAndWait(samRoutes.userService.createUser(defaultTestUser, samRequestContext))
Put(s"/api/resources/v2/${resourceType.name}/foo/policies/${resourceType.ownerRoleName}/memberEmails/${defaultTestUser.email}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
it should "204 adding a member with can share" in {
// happy case
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set(SamResourceActionPatterns.sharePolicy),
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.sharePolicy(AccessPolicyName("owner"))))),
ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
runAndWait(samRoutes.resourceService.createResource(resourceType, ResourceId("foo"), defaultUserInfo, samRequestContext))
runAndWait(samRoutes.userService.createUser(defaultTestUser, samRequestContext))
Put(s"/api/resources/v2/${resourceType.name}/foo/policies/${resourceType.ownerRoleName}/memberEmails/${defaultTestUser.email}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
it should "400 adding unknown subject" in {
// differs from happy case in that we don't create user
val resourceType = ResourceType(ResourceTypeName("rt"), Set(SamResourceActionPatterns.alterPolicies, ResourceActionPattern("can_compute", "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.alterPolicies))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
runAndWait(samRoutes.resourceService.createResource(resourceType, ResourceId("foo"), defaultUserInfo, samRequestContext))
//runAndWait(samRoutes.userService.createUser(testUser))
Put(s"/api/resources/v2/${resourceType.name}/foo/policies/${resourceType.ownerRoleName}/memberEmails/${defaultTestUser.email}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.BadRequest
}
}
it should "403 adding without permission" in {
// differs from happy case in that owner role does not have alter_policies
val resourceType = ResourceType(ResourceTypeName("rt"), Set(SamResourceActionPatterns.alterPolicies, SamResourceActionPatterns.sharePolicy), Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.sharePolicy(AccessPolicyName("splat"))))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
runAndWait(samRoutes.resourceService.createResource(resourceType, ResourceId("foo"), defaultUserInfo, samRequestContext))
runAndWait(samRoutes.userService.createUser(defaultTestUser, samRequestContext))
Put(s"/api/resources/v2/${resourceType.name}/foo/policies/${resourceType.ownerRoleName}/memberEmails/${defaultTestUser.email}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "404 adding without any access" in {
// differs from happy case in that testUser creates resource, not defaultUser which calls the PUT
val resourceType = ResourceType(ResourceTypeName("rt"), Set(SamResourceActionPatterns.alterPolicies, ResourceActionPattern("can_compute", "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(ResourceAction("can_compute")))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val testUser = UserInfo(OAuth2BearerToken("token"), WorkbenchUserId("testuser"), WorkbenchEmail("testuser@foo.com"), 0)
runAndWait(samRoutes.userService.createUser(WorkbenchUser(testUser.userId, genGoogleSubjectId(), testUser.userEmail, None), samRequestContext))
runAndWait(samRoutes.resourceService.createResource(resourceType, ResourceId("foo"), testUser, samRequestContext))
Put(s"/api/resources/v2/${resourceType.name}/foo/policies/${resourceType.ownerRoleName}/memberEmails/${testUser.userEmail}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
"DELETE /api/resources/v2/{resourceType}/{resourceId}/policies/{policyName}/memberEmails/{email}" should "204 deleting a member" in {
// happy case
val resourceType = ResourceType(ResourceTypeName("rt"), Set(SamResourceActionPatterns.alterPolicies, ResourceActionPattern("can_compute", "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.alterPolicies))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
runAndWait(samRoutes.resourceService.createResource(resourceType, ResourceId("foo"), defaultUserInfo, samRequestContext))
runAndWait(samRoutes.userService.createUser(defaultTestUser, samRequestContext))
runAndWait(samRoutes.resourceService.addSubjectToPolicy(FullyQualifiedPolicyId(
FullyQualifiedResourceId(resourceType.name, ResourceId("foo")), AccessPolicyName(resourceType.ownerRoleName.value)), defaultTestUser.id, samRequestContext))
Delete(s"/api/resources/v2/${resourceType.name}/foo/policies/${resourceType.ownerRoleName}/memberEmails/${defaultTestUser.email}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
it should "204 deleting a member with can share" in {
// happy case
val resourceType = ResourceType(ResourceTypeName("rt"), Set(SamResourceActionPatterns.sharePolicy, ResourceActionPattern("can_compute", "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.sharePolicy(AccessPolicyName("owner"))))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
runAndWait(samRoutes.resourceService.createResource(resourceType, ResourceId("foo"), defaultUserInfo, samRequestContext))
runAndWait(samRoutes.userService.createUser(defaultTestUser, samRequestContext))
runAndWait(samRoutes.resourceService.addSubjectToPolicy(FullyQualifiedPolicyId(
FullyQualifiedResourceId(resourceType.name, ResourceId("foo")), AccessPolicyName(resourceType.ownerRoleName.value)), defaultTestUser.id, samRequestContext))
Delete(s"/api/resources/v2/${resourceType.name}/foo/policies/${resourceType.ownerRoleName}/memberEmails/${defaultTestUser.email}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
it should "400 deleting unknown subject" in {
// differs from happy case in that we don't create user
val resourceType = ResourceType(ResourceTypeName("rt"), Set(SamResourceActionPatterns.alterPolicies, ResourceActionPattern("can_compute", "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.alterPolicies))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
runAndWait(samRoutes.resourceService.createResource(resourceType, ResourceId("foo"), defaultUserInfo, samRequestContext))
//runAndWait(samRoutes.userService.createUser(testUser))
//runAndWait(samRoutes.resourceService.addSubjectToPolicy(ResourceAndPolicyName(Resource(resourceType.name, ResourceId("foo")), AccessPolicyName(resourceType.ownerRoleName.value)), testUser.id))
Delete(s"/api/resources/v2/${resourceType.name}/foo/policies/${resourceType.ownerRoleName}/memberEmails/${defaultTestUser.email}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.BadRequest
}
}
it should "403 removing without permission" in {
// differs from happy case in that owner role does not have alter_policies
val resourceType = ResourceType(ResourceTypeName("rt"), Set(SamResourceActionPatterns.alterPolicies, SamResourceActionPatterns.sharePolicy), Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.sharePolicy(AccessPolicyName("splat"))))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
runAndWait(samRoutes.resourceService.createResource(resourceType, ResourceId("foo"), defaultUserInfo, samRequestContext))
runAndWait(samRoutes.userService.createUser(defaultTestUser, samRequestContext))
runAndWait(samRoutes.resourceService.addSubjectToPolicy(FullyQualifiedPolicyId(
FullyQualifiedResourceId(resourceType.name, ResourceId("foo")), AccessPolicyName(resourceType.ownerRoleName.value)), defaultTestUser.id, samRequestContext))
Delete(s"/api/resources/v2/${resourceType.name}/foo/policies/${resourceType.ownerRoleName}/memberEmails/${defaultTestUser.email}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "404 removing without any access" in {
// differs from happy case in that testUser creates resource, not defaultUser which calls the PUT
val resourceType = ResourceType(ResourceTypeName("rt"), Set(SamResourceActionPatterns.alterPolicies, ResourceActionPattern("can_compute", "", false)), Set(ResourceRole(ResourceRoleName("owner"), Set(ResourceAction("can_compute")))), ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val testUser = UserInfo(OAuth2BearerToken("token"), WorkbenchUserId("testuser"), WorkbenchEmail("testuser@foo.com"), 0)
runAndWait(samRoutes.userService.createUser(WorkbenchUser(testUser.userId, genGoogleSubjectId(), testUser.userEmail, None), samRequestContext))
runAndWait(samRoutes.resourceService.createResource(resourceType, ResourceId("foo"), testUser, samRequestContext))
runAndWait(samRoutes.resourceService.addSubjectToPolicy(FullyQualifiedPolicyId(
FullyQualifiedResourceId(resourceType.name, ResourceId("foo")), AccessPolicyName(resourceType.ownerRoleName.value)), testUser.userId, samRequestContext))
Delete(s"/api/resources/v2/${resourceType.name}/foo/policies/${resourceType.ownerRoleName}/memberEmails/${testUser.userEmail}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
"GET /api/resources/v2/{resourceType}/{resourceId}/policies/{policyName}/public" should "200 if user has read_policies" in {
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set(SamResourceActionPatterns.readPolicies),
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.readPolicies))),
ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val resourceId = ResourceId("foo")
runAndWait(samRoutes.resourceService.createResource(resourceType, resourceId, samRoutes.userInfo, samRequestContext))
Get(s"/api/resources/v2/${resourceType.name}/${resourceId.value}/policies/${resourceType.ownerRoleName.value}/public") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK withClue responsePayloadClue(responseAs[String])
responseAs[Boolean] should equal(false)
}
}
it should "200 if user has read_policy::" in {
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set(SamResourceActionPatterns.readPolicy),
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.readPolicy(AccessPolicyName("owner"))))),
ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val resourceId = ResourceId("foo")
runAndWait(samRoutes.resourceService.createResource(resourceType, resourceId, samRoutes.userInfo, samRequestContext))
Get(s"/api/resources/v2/${resourceType.name}/${resourceId.value}/policies/${resourceType.ownerRoleName.value}/public") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK withClue responsePayloadClue(responseAs[String])
responseAs[Boolean] should equal(false)
}
}
it should "403 if user cannot read policies" in {
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set(SamResourceActionPatterns.delete),
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.delete))),
ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val resourceId = ResourceId("foo")
runAndWait(samRoutes.resourceService.createResource(resourceType, resourceId, samRoutes.userInfo, samRequestContext))
Get(s"/api/resources/v2/${resourceType.name}/${resourceId.value}/policies/${resourceType.ownerRoleName.value}/public") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden withClue responsePayloadClue(responseAs[String])
}
}
"PUT /api/resources/v2/{resourceType}/{resourceId}/policies/{policyName}/public" should "204 if user has alter_policies and set_public" in {
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set(SamResourceActionPatterns.alterPolicies, SamResourceActionPatterns.readPolicies),
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.alterPolicies, SamResourceActions.readPolicies))),
ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
runAndWait(samRoutes.resourceService.addSubjectToPolicy(model.FullyQualifiedPolicyId(
model.FullyQualifiedResourceId(TestSamRoutes.resourceTypeAdmin.name, ResourceId(resourceType.name.value)), AccessPolicyName(TestSamRoutes.resourceTypeAdmin.ownerRoleName.value)), samRoutes.userInfo.userId, samRequestContext))
val resourceId = ResourceId("foo")
runAndWait(samRoutes.resourceService.createResource(resourceType, resourceId, samRoutes.userInfo, samRequestContext))
Put(s"/api/resources/v2/${resourceType.name}/${resourceId.value}/policies/${resourceType.ownerRoleName.value}/public", true) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent withClue responsePayloadClue(responseAs[String])
}
}
it should "204 if user has share_policy:: and set_public::" in {
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set(SamResourceActionPatterns.sharePolicy, SamResourceActionPatterns.readPolicies),
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.sharePolicy(AccessPolicyName("owner")), SamResourceActions.readPolicies))),
ResourceRoleName("owner"))
val resourceTypeAdmin = ResourceType(
ResourceTypeName("resource_type_admin"),
Set(
SamResourceActionPatterns.alterPolicies,
SamResourceActionPatterns.readPolicies,
SamResourceActionPatterns.sharePolicy,
SamResourceActionPatterns.readPolicy,
SamResourceActionPatterns.setPublic,
SamResourceActionPatterns.setPolicyPublic
),
Set(
ResourceRole(
ResourceRoleName("owner"),
Set(SamResourceActions.alterPolicies, SamResourceActions.readPolicies, SamResourceActions.setPublicPolicy(AccessPolicyName("owner"))))),
ResourceRoleName("owner")
)
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
runAndWait(samRoutes.resourceService.addSubjectToPolicy(model.FullyQualifiedPolicyId(
model.FullyQualifiedResourceId(resourceTypeAdmin.name, ResourceId(resourceType.name.value)), AccessPolicyName(resourceTypeAdmin.ownerRoleName.value)), samRoutes.userInfo.userId, samRequestContext))
val resourceId = ResourceId("foo")
runAndWait(samRoutes.resourceService.createResource(resourceType, resourceId, samRoutes.userInfo, samRequestContext))
Put(s"/api/resources/v2/${resourceType.name}/${resourceId.value}/policies/${resourceType.ownerRoleName.value}/public", true) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent withClue responsePayloadClue(responseAs[String])
}
}
it should "403 if user does not have policy access" in {
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set(SamResourceActionPatterns.readPolicies),
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.readPolicies))),
ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
runAndWait(samRoutes.resourceService.addSubjectToPolicy(model.FullyQualifiedPolicyId(
model.FullyQualifiedResourceId(TestSamRoutes.resourceTypeAdmin.name, ResourceId(resourceType.name.value)), AccessPolicyName(TestSamRoutes.resourceTypeAdmin.ownerRoleName.value)), samRoutes.userInfo.userId, samRequestContext))
val resourceId = ResourceId("foo")
runAndWait(samRoutes.resourceService.createResource(resourceType, resourceId, samRoutes.userInfo, samRequestContext))
Put(s"/api/resources/v2/${resourceType.name}/${resourceId.value}/policies/${resourceType.ownerRoleName.value}/public", true) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden withClue responsePayloadClue(responseAs[String])
}
}
it should "404 if user does not have set public access" in {
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set(SamResourceActionPatterns.alterPolicies, SamResourceActionPatterns.readPolicies),
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.alterPolicies, SamResourceActions.readPolicies))),
ResourceRoleName("owner"))
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val resourceId = ResourceId("foo")
runAndWait(samRoutes.resourceService.createResource(resourceType, resourceId, samRoutes.userInfo, samRequestContext))
Put(s"/api/resources/v2/${resourceType.name}/${resourceId.value}/policies/${resourceType.ownerRoleName.value}/public", true) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound withClue responsePayloadClue(responseAs[String])
}
}
"GET /api/resources/v2/{resourceType}/{resourceId}/authDomain" should "200 with auth domain if auth domain is set and user has read_auth_domain" in {
val managedGroupResourceType = initManagedGroupResourceType()
val authDomain = "authDomain"
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set(SamResourceActionPatterns.readAuthDomain, SamResourceActionPatterns.use),
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.readAuthDomain, ManagedGroupService.useAction))),
ResourceRoleName("owner")
)
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType, managedGroupResourceType.name -> managedGroupResourceType))
runAndWait(samRoutes.managedGroupService.createManagedGroup(ResourceId(authDomain), defaultUserInfo, samRequestContext = samRequestContext))
val resourceId = ResourceId("foo")
val policiesMap = Map(AccessPolicyName("ap") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(SamResourceActions.readAuthDomain, ManagedGroupService.useAction), Set(ResourceRoleName("owner"))))
runAndWait(samRoutes.resourceService.createResource(resourceType, resourceId, policiesMap, Set(WorkbenchGroupName(authDomain)), None, defaultUserInfo.userId, samRequestContext))
Get(s"/api/resources/v2/${resourceType.name}/${resourceId.value}/authDomain") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[Set[String]] shouldEqual Set(authDomain)
}
}
it should "200 with an empty set when the user has read_auth_domain but there is no auth domain set" in {
val managedGroupResourceType = initManagedGroupResourceType()
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set(SamResourceActionPatterns.readAuthDomain, SamResourceActionPatterns.use),
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.readAuthDomain, ManagedGroupService.useAction))),
ResourceRoleName("owner")
)
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType, managedGroupResourceType.name -> managedGroupResourceType))
val resourceId = ResourceId("foo")
val policiesMap = Map(AccessPolicyName("ap") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(SamResourceActions.readAuthDomain, ManagedGroupService.useAction), Set(ResourceRoleName("owner"))))
runAndWait(samRoutes.resourceService.createResource(resourceType, resourceId, policiesMap, Set.empty, None, defaultUserInfo.userId, samRequestContext))
Get(s"/api/resources/v2/${resourceType.name}/${resourceId.value}/authDomain") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[Set[String]] shouldEqual Set.empty
}
}
it should "403 when user does not have read_auth_domain" in {
val managedGroupResourceType = initManagedGroupResourceType()
val authDomain = "authDomain"
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set(SamResourceActionPatterns.use),
Set(ResourceRole(ResourceRoleName("owner"), Set(ManagedGroupService.useAction))),
ResourceRoleName("owner")
)
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType, managedGroupResourceType.name -> managedGroupResourceType))
runAndWait(samRoutes.managedGroupService.createManagedGroup(ResourceId(authDomain), defaultUserInfo, samRequestContext = samRequestContext))
val resourceId = ResourceId("foo")
val policiesMap = Map(AccessPolicyName("ap") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(ManagedGroupService.useAction), Set(ResourceRoleName("owner"))))
runAndWait(samRoutes.resourceService.createResource(resourceType, resourceId, policiesMap, Set(WorkbenchGroupName(authDomain)), None, defaultUserInfo.userId, samRequestContext))
Get(s"/api/resources/v2/${resourceType.name}/${resourceId.value}/authDomain") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "404 when resource or resource type is not found" in {
val managedGroupResourceType = initManagedGroupResourceType()
val authDomain = "authDomain"
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set(SamResourceActionPatterns.readAuthDomain, SamResourceActionPatterns.use),
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.readAuthDomain, ManagedGroupService.useAction))),
ResourceRoleName("owner")
)
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType, managedGroupResourceType.name -> managedGroupResourceType))
runAndWait(samRoutes.managedGroupService.createManagedGroup(ResourceId(authDomain), defaultUserInfo, samRequestContext = samRequestContext))
val resourceId = ResourceId("foo")
val policiesMap = Map(AccessPolicyName("ap") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(SamResourceActions.readAuthDomain, ManagedGroupService.useAction), Set(ResourceRoleName("owner"))))
runAndWait(samRoutes.resourceService.createResource(resourceType, resourceId, policiesMap, Set(WorkbenchGroupName(authDomain)), None, defaultUserInfo.userId, samRequestContext))
Get(s"/api/resources/v2/fakeResourceTypeName/$resourceId/authDomain") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
Get(s"/api/resources/v2/${resourceType.name}/fakeResourceId/authDomain") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
it should "404 when the user is not a member of any policy on the resource" in {
val managedGroupResourceType = initManagedGroupResourceType()
val authDomain = "authDomain"
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set(SamResourceActionPatterns.readAuthDomain, SamResourceActionPatterns.use),
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.readAuthDomain, ManagedGroupService.useAction))),
ResourceRoleName("owner")
)
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType, managedGroupResourceType.name -> managedGroupResourceType))
runAndWait(samRoutes.managedGroupService.createManagedGroup(ResourceId(authDomain), defaultUserInfo, samRequestContext = samRequestContext))
val resourceId = ResourceId("foo")
val policiesMap = Map(AccessPolicyName("ap") -> AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(SamResourceActions.readAuthDomain, ManagedGroupService.useAction), Set(ResourceRoleName("owner"))))
runAndWait(samRoutes.resourceService.createResource(resourceType, resourceId, policiesMap, Set(WorkbenchGroupName(authDomain)), None, defaultUserInfo.userId, samRequestContext))
Get(s"/api/resources/v2/${resourceType.name}/${resourceId.value}/authDomain") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[Set[String]] shouldEqual Set(authDomain)
}
val otherUserSamRoutes = TestSamRoutes(Map(resourceType.name -> resourceType), UserInfo(OAuth2BearerToken("accessToken"), WorkbenchUserId("user2"), WorkbenchEmail("user2@example.com"), 0))
Get(s"/api/resources/v2/${resourceType.name}/${resourceId.value}/authDomain") ~> otherUserSamRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
private def initManagedGroupResourceType(): ResourceType = {
val accessPolicyNames = Set(ManagedGroupService.adminPolicyName, ManagedGroupService.memberPolicyName, ManagedGroupService.adminNotifierPolicyName)
val policyActions: Set[ResourceAction] = accessPolicyNames.flatMap(policyName => Set(SamResourceActions.sharePolicy(policyName), SamResourceActions.readPolicy(policyName)))
val resourceActions = Set(ResourceAction("delete"), ResourceAction("notify_admins"), ResourceAction("set_access_instructions"), ManagedGroupService.useAction) union policyActions
val resourceActionPatterns = resourceActions.map(action => ResourceActionPattern(action.value, "", false))
val defaultOwnerRole = ResourceRole(ManagedGroupService.adminRoleName, resourceActions)
val defaultMemberRole = ResourceRole(ManagedGroupService.memberRoleName, Set.empty)
val defaultAdminNotifierRole = ResourceRole(ManagedGroupService.adminNotifierRoleName, Set(ResourceAction("notify_admins")))
val defaultRoles = Set(defaultOwnerRole, defaultMemberRole, defaultAdminNotifierRole)
ResourceType(ManagedGroupService.managedGroupTypeName, resourceActionPatterns, defaultRoles, ManagedGroupService.adminRoleName)
}
"GET /api/resources/v2/{resourceTypeName}/{resourceId}/allUsers" should "200 with all users list when user has read_policies action" in {
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set(SamResourceActionPatterns.readPolicies),
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.readPolicies))),
ResourceRoleName("owner")
)
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val resourceId = ResourceId("foo")
runAndWait(samRoutes.resourceService.createResource(resourceType, resourceId, samRoutes.userInfo, samRequestContext))
val user = samRoutes.directoryDAO.loadUser(samRoutes.userInfo.userId, samRequestContext).unsafeRunSync().get
val userIdInfo = UserIdInfo(user.id, user.email, user.googleSubjectId)
Get(s"/api/resources/v2/${resourceType.name}/${resourceId.value}/allUsers") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[Set[UserIdInfo]].map(_.userSubjectId) shouldEqual Set(userIdInfo.userSubjectId)
}
}
it should "403 when user does not have read_policies action" in {
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set.empty,
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.readAuthDomain))), // any action except read_policies
ResourceRoleName("owner")
)
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val resourceId = ResourceId("foo")
runAndWait(samRoutes.resourceService.createResource(resourceType, resourceId, samRoutes.userInfo, samRequestContext))
Get(s"/api/resources/v2/${resourceType.name}/${resourceId.value}/allUsers") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "404 when resource or resourceType does not exist" in {
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set(SamResourceActionPatterns.readPolicies),
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.readPolicies))),
ResourceRoleName("owner")
)
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val resourceId = ResourceId("foo")
runAndWait(samRoutes.resourceService.createResource(resourceType, resourceId, samRoutes.userInfo, samRequestContext))
Get(s"/api/resources/v2/fakeResourceTypeName/${resourceId.value}/allUsers") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
Get(s"/api/resources/v2/${resourceType.name}/fakeResourceId/allUsers") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
it should "404 when user is not in any of the policies on the resource" in {
val resourceType = ResourceType(
ResourceTypeName("rt"),
Set(SamResourceActionPatterns.readPolicies),
Set(ResourceRole(ResourceRoleName("owner"), Set(SamResourceActions.readPolicies))),
ResourceRoleName("owner")
)
val samRoutes = TestSamRoutes(Map(resourceType.name -> resourceType))
val resourceId = ResourceId("foo")
runAndWait(samRoutes.resourceService.createResource(resourceType, resourceId, samRoutes.userInfo, samRequestContext))
val user = samRoutes.directoryDAO.loadUser(samRoutes.userInfo.userId, samRequestContext).unsafeRunSync().get
val userIdInfo = UserIdInfo(user.id, user.email, user.googleSubjectId)
Get(s"/api/resources/v2/${resourceType.name}/${resourceId.value}/allUsers") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[Set[UserIdInfo]].map(_.userSubjectId) shouldEqual Set(userIdInfo.userSubjectId)
}
val otherUserSamRoutes = TestSamRoutes(Map(resourceType.name -> resourceType), UserInfo(OAuth2BearerToken("accessToken"), WorkbenchUserId("user2"), WorkbenchEmail("user2@example.com"), 0))
Get(s"/api/resources/v2/${resourceType.name}/${resourceId.value}/allUsers") ~> otherUserSamRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
private def mockPermissionsForResource(samRoutes: SamRoutes,
resource: FullyQualifiedResourceId,
actionsOnResource: Set[ResourceAction]): Unit = {
val actionAllowed = new ArgumentMatcher[Iterable[ResourceAction]] {
override def matches(argument: Iterable[ResourceAction]): Boolean = actionsOnResource.intersect(argument.toSet).nonEmpty
}
val actionNotAllowed = new ArgumentMatcher[Iterable[ResourceAction]] {
override def matches(argument: Iterable[ResourceAction]): Boolean = actionsOnResource.intersect(argument.toSet).isEmpty
}
when(samRoutes.policyEvaluatorService.hasPermissionOneOf(mockitoEq(resource), argThat(actionAllowed), mockitoEq(defaultUserInfo.userId), any[SamRequestContext])).
thenReturn(IO.pure(true))
when(samRoutes.policyEvaluatorService.hasPermissionOneOf(mockitoEq(resource), argThat(actionNotAllowed), mockitoEq(defaultUserInfo.userId), any[SamRequestContext])).
thenReturn(IO.pure(false))
when(samRoutes.policyEvaluatorService.listUserResourceActions(mockitoEq(resource), mockitoEq(defaultUserInfo.userId), any[SamRequestContext])).
thenReturn(IO.pure(actionsOnResource))
}
// mock out a bunch of calls in ResourceService and PolicyEvaluatorService to reduce bloat in /parent tests
private def setupParentRoutes(samRoutes: SamRoutes,
childResource: FullyQualifiedResourceId,
currentParentOpt: Option[FullyQualifiedResourceId] = None,
newParentOpt: Option[FullyQualifiedResourceId] = None,
actionsOnChild: Set[ResourceAction],
actionsOnCurrentParent: Set[ResourceAction] = Set.empty,
actionsOnNewParent: Set[ResourceAction] = Set.empty): Unit = {
// mock responses for child resource
mockPermissionsForResource(samRoutes, childResource, actionsOnResource = actionsOnChild)
// mock responses for current parent resource
currentParentOpt match {
case Some(currentParent) =>
when(samRoutes.resourceService.getResourceParent(mockitoEq(childResource), any[SamRequestContext]))
.thenReturn(IO(Option(currentParent)))
when(samRoutes.resourceService.deleteResourceParent(mockitoEq(childResource), any[SamRequestContext]))
.thenReturn(IO.pure(true))
mockPermissionsForResource(samRoutes, currentParent,
actionsOnResource = actionsOnCurrentParent)
case None =>
when(samRoutes.resourceService.getResourceParent(mockitoEq(childResource), any[SamRequestContext]))
.thenReturn(IO(None))
when(samRoutes.resourceService.deleteResourceParent(mockitoEq(childResource), any[SamRequestContext]))
.thenReturn(IO.pure(false))
}
// mock responses for new parent resource
newParentOpt.map { newParent =>
when(samRoutes.resourceService.setResourceParent(mockitoEq(childResource), mockitoEq(newParent), any[SamRequestContext]))
.thenReturn(IO.unit)
mockPermissionsForResource(samRoutes, newParent, actionsOnResource = actionsOnNewParent)
}
if (actionsOnChild.contains(SamResourceActions.delete)) {
when(samRoutes.resourceService.deleteResource(mockitoEq(childResource), any[SamRequestContext])).thenReturn(Future.unit)
}
}
"GET /api/resources/v2/{resourceTypeName}/{resourceId}/parent" should "200 if user has get_parent on resource and resource has parent" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val fullyQualifiedParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("parent"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource,
currentParentOpt = Option(fullyQualifiedParentResource),
actionsOnChild = Set(SamResourceActions.getParent))
Get(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[FullyQualifiedResourceId] shouldEqual fullyQualifiedParentResource
}
}
it should "403 if user is missing get_parent on resource" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val fullyQualifiedParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("parent"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource, currentParentOpt = Option(fullyQualifiedParentResource),
actionsOnChild = Set(SamResourceActions.readPolicies))
Get(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "404 if resource has no parent" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource, None, actionsOnChild = Set(SamResourceActions.getParent))
Get(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
it should "404 if user doesn't have access to resource" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource, actionsOnChild = Set.empty)
Get(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
"PUT /api/resources/v2/{resourceTypeName}/{resourceId}/parent" should "204 on success when there is not a parent already set" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val fullyQualifiedParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("parent"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource, newParentOpt = Option(fullyQualifiedParentResource),
actionsOnChild = Set(SamResourceActions.setParent),
actionsOnNewParent = Set(SamResourceActions.addChild))
Put(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent", fullyQualifiedParentResource) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
it should "204 on success when there is a parent already set" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val newParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("newParent"))
val currentParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("currentParent"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource,
currentParentOpt = Option(currentParentResource),
newParentOpt = Option(newParentResource),
actionsOnChild = Set(SamResourceActions.setParent),
actionsOnCurrentParent = Set(SamResourceActions.removeChild),
actionsOnNewParent = Set(SamResourceActions.addChild))
Put(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent", newParentResource) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
it should "403 if user is missing set_parent on child resource" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val newParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("newParent"))
val currentParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("currentParent"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource,
currentParentOpt = Option(currentParentResource),
newParentOpt = Option(newParentResource),
actionsOnChild = Set(SamResourceActions.readPolicies),
actionsOnCurrentParent = Set(SamResourceActions.removeChild),
actionsOnNewParent = Set(SamResourceActions.addChild))
Put(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent", newParentResource) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "403 if user is missing add_child on new parent resource" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val newParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("newParent"))
val currentParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("currentParent"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource,
currentParentOpt = Option(currentParentResource),
newParentOpt = Option(newParentResource),
actionsOnChild = Set(SamResourceActions.setParent),
actionsOnCurrentParent = Set(SamResourceActions.removeChild),
actionsOnNewParent = Set(SamResourceActions.readPolicies))
Put(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent", newParentResource) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "403 if user is missing remove_child on existing parent resource" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val newParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("newParent"))
val currentParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("currentParent"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource,
currentParentOpt = Option(currentParentResource),
newParentOpt = Option(newParentResource),
actionsOnChild = Set(SamResourceActions.setParent),
actionsOnCurrentParent = Set(SamResourceActions.readPolicies),
actionsOnNewParent = Set(SamResourceActions.addChild))
Put(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent", newParentResource) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "404 if user doesn't have access to child resource" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val newParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("newParent"))
val currentParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("currentParent"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource,
currentParentOpt = Option(currentParentResource),
newParentOpt = Option(newParentResource),
actionsOnChild = Set.empty,
actionsOnCurrentParent = Set(SamResourceActions.removeChild),
actionsOnNewParent = Set(SamResourceActions.addChild))
Put(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent", newParentResource) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
it should "403 if user doesn't have access to new parent resource" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val newParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("newParent"))
val currentParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("currentParent"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource,
currentParentOpt = Option(currentParentResource),
newParentOpt = Option(newParentResource),
actionsOnChild = Set(SamResourceActions.setParent),
actionsOnCurrentParent = Set(SamResourceActions.removeChild),
actionsOnNewParent = Set(SamResourceActions.readPolicies))
Put(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent", newParentResource) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "403 if the new parent resource does not exist" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val nonexistentParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("nonexistentParent"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource,
currentParentOpt = None,
newParentOpt = Option(nonexistentParentResource),
actionsOnChild = Set(SamResourceActions.setParent),
actionsOnNewParent = Set(SamResourceActions.readPolicies)
)
Put(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent", nonexistentParentResource) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "403 if user doesn't have access to existing parent resource" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val newParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("newParent"))
val currentParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("currentParent"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource,
currentParentOpt = Option(currentParentResource),
newParentOpt = Option(newParentResource),
actionsOnChild = Set(SamResourceActions.setParent),
actionsOnCurrentParent = Set.empty,
actionsOnNewParent = Set(SamResourceActions.addChild))
Put(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent", newParentResource) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
"DELETE /api/resources/v2/{resourceTypeName}/{resourceId}/parent" should "204 on success" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val currentParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("currentParent"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource,
currentParentOpt = Option(currentParentResource),
actionsOnChild = Set(SamResourceActions.setParent),
actionsOnCurrentParent = Set(SamResourceActions.removeChild))
Delete(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
it should "403 if user is missing set_parent on child resource" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val currentParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("currentParent"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource,
currentParentOpt = Option(currentParentResource),
actionsOnChild = Set(SamResourceActions.readPolicies),
actionsOnCurrentParent = Set(SamResourceActions.removeChild))
Delete(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "403 if user is missing remove_child on parent resource if it exists" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val currentParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("currentParent"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource,
currentParentOpt = Option(currentParentResource),
actionsOnChild = Set(SamResourceActions.setParent),
actionsOnCurrentParent = Set(SamResourceActions.readPolicies))
Delete(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "404 if resource has no parent" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource,
actionsOnChild = Set(SamResourceActions.setParent))
Delete(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
it should "404 if user doesn't have access to child resource" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val currentParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("currentParent"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource,
currentParentOpt = Option(currentParentResource),
actionsOnChild = Set.empty,
actionsOnCurrentParent = Set(SamResourceActions.removeChild))
Delete(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
it should "403 if user doesn't have access to existing parent resource" in {
val fullyQualifiedChildResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child"))
val currentParentResource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("currentParent"))
val samRoutes = createSamRoutes()
setupParentRoutes(samRoutes, fullyQualifiedChildResource,
currentParentOpt = Option(currentParentResource),
actionsOnChild = Set(SamResourceActions.setParent),
actionsOnCurrentParent = Set.empty)
Delete(s"/api/resources/v2/${defaultResourceType.name}/${fullyQualifiedChildResource.resourceId.value}/parent") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
"GET /api/resources/v2/{resourceTypeName}/{resourceId}/children" should "200 with list of children FullyQualifiedResourceIds on success" in {
val child1 = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child1"))
val child2 = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("child2"))
val parent = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("parent"))
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, parent, actionsOnResource = Set(SamResourceActions.listChildren))
when(samRoutes.resourceService.listResourceChildren(mockitoEq(parent), any[SamRequestContext]))
.thenReturn(IO(Set(child1, child2)))
Get(s"/api/resources/v2/${defaultResourceType.name}/${parent.resourceId.value}/children") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[Set[FullyQualifiedResourceId]] shouldEqual Set(child1, child2)
}
}
it should "403 if user is missing list_children on the parent resource" in {
val parent = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("parent"))
val otherPolicy = AccessPolicyWithoutMembers(FullyQualifiedPolicyId(parent, AccessPolicyName("not_owner")), WorkbenchEmail(""), Set.empty, Set.empty, false)
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, parent, actionsOnResource = Set(SamResourceActions.readPolicies))
Get(s"/api/resources/v2/${defaultResourceType.name}/${parent.resourceId.value}/children") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "404 if user doesn't have access to parent resource" in {
val parent = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("parent"))
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, parent,
actionsOnResource = Set.empty)
Get(s"/api/resources/v2/${defaultResourceType.name}/${parent.resourceId.value}/children") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
"DELETE /api/resources/v2/{resourceTypeName}/{resourceId}/policies/{policyName}" should "204 on success" in {
val resource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("resource"))
val policyToDelete = FullyQualifiedPolicyId(resource, AccessPolicyName("policyToDelete"))
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, resource,
actionsOnResource = Set(SamResourceActions.alterPolicies, SamResourceActions.deletePolicy(policyToDelete.accessPolicyName)))
when(samRoutes.resourceService.deletePolicy(mockitoEq(policyToDelete), any[SamRequestContext]))
.thenReturn(IO.unit)
Delete(s"/api/resources/v2/${resource.resourceTypeName}/${resource.resourceId}/policies/${policyToDelete.accessPolicyName}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
it should "403 if user is missing both alter_policies and delete_policy on the resource" in {
val resource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("resource"))
val policyToDelete = FullyQualifiedPolicyId(resource, AccessPolicyName("policyToDelete"))
val otherPolicy = AccessPolicyWithoutMembers(FullyQualifiedPolicyId(resource, AccessPolicyName("not_owner")), WorkbenchEmail(""), Set.empty, Set.empty, false)
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, resource,
actionsOnResource = Set(SamResourceActions.readPolicies))
Delete(s"/api/resources/v2/${resource.resourceTypeName}/${resource.resourceId}/policies/${policyToDelete.accessPolicyName}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "404 if user doesn't have access to the resource" in {
val resource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("resource"))
val policyToDelete = FullyQualifiedPolicyId(resource, AccessPolicyName("policyToDelete"))
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, resource,
actionsOnResource = Set.empty)
Delete(s"/api/resources/v2/${resource.resourceTypeName}/${resource.resourceId}/policies/${policyToDelete.accessPolicyName}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
"GET /api/resources/v2/{resourceType}/{resourceId}/policies/{policyName}" should "200 on existing policy of a resource with read_policies" in {
val members = AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set.empty, Set.empty, None)
val resource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("resource"))
val policyName = AccessPolicyName("policy")
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, resource,
actionsOnResource = Set(SamResourceActions.readPolicies))
// mock response to load policy
when(samRoutes.resourceService.loadResourcePolicy(mockitoEq(FullyQualifiedPolicyId(resource, policyName)), any[SamRequestContext]))
.thenReturn(IO(Option(members)))
Get(s"/api/resources/v2/${resource.resourceTypeName}/${resource.resourceId}/policies/${policyName.value}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[AccessPolicyMembership] shouldEqual members
}
}
it should "200 on existing policy if user can read just that policy" in {
val members = AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set.empty, Set.empty, None)
val resource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("resource"))
val policyName = AccessPolicyName("policy")
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, resource,
actionsOnResource = Set(SamResourceActions.readPolicy(policyName)))
// mock response to load policy
when(samRoutes.resourceService.loadResourcePolicy(mockitoEq(FullyQualifiedPolicyId(resource, policyName)), any[SamRequestContext]))
.thenReturn(IO(Option(members)))
Get(s"/api/resources/v2/${resource.resourceTypeName}/${resource.resourceId}/policies/${policyName.value}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[AccessPolicyMembership] shouldEqual members
}
}
it should "403 on existing policy of a resource without read policies" in {
val resource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("resource"))
val policyName = AccessPolicyName("policy")
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, resource,
actionsOnResource = Set(SamResourceActions.delete))
Get(s"/api/resources/v2/${resource.resourceTypeName}/${resource.resourceId}/policies/${policyName.value}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "404 on non existing policy of a resource" in {
val resource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("resource"))
val policyName = AccessPolicyName("policy")
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, resource,
actionsOnResource = Set.empty)
Get(s"/api/resources/v2/${resource.resourceTypeName}/${resource.resourceId}/policies/${policyName.value}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
"PUT /api/resources/v2/{resourceType}/{resourceId}/policies/{policyName}" should "201 on a new policy being created for a resource" in {
val resource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("resource"))
val policyName = AccessPolicyName("policy")
val members = AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(ResourceAction("can_compute")), Set.empty, None)
val policy = AccessPolicy(FullyQualifiedPolicyId(resource, policyName), Set(defaultUserInfo.userId), WorkbenchEmail("policy@example.com"), members.roles, members.actions, members.getDescendantPermissions, false)
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, resource,
actionsOnResource = Set(SamResourceActions.alterPolicies))
when(samRoutes.resourceService.overwritePolicy(any[ResourceType], mockitoEq(policyName), mockitoEq(resource), mockitoEq(members), any[SamRequestContext]))
.thenReturn(IO(policy))
Put(s"/api/resources/v2/${resource.resourceTypeName}/${resource.resourceId}/policies/${policyName}", members) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Created
}
}
it should "201 on a policy being updated" in {
val resource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("resource"))
val policyName = AccessPolicyName("policy")
val members = AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(ResourceAction("can_compute")), Set.empty, None)
val policy = AccessPolicy(FullyQualifiedPolicyId(resource, policyName), Set(defaultUserInfo.userId), WorkbenchEmail("policy@example.com"), members.roles, members.actions, members.getDescendantPermissions, false)
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, resource,
actionsOnResource = Set(SamResourceActions.alterPolicies))
when(samRoutes.resourceService.overwritePolicy(any[ResourceType], mockitoEq(policyName), mockitoEq(resource), mockitoEq(members), any[SamRequestContext]))
.thenReturn(IO(policy))
Put(s"/api/resources/v2/${resource.resourceTypeName}/${resource.resourceId}/policies/${policyName}", members) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Created
}
// update existing policy
val members2 = AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(ResourceAction("can_compute"), ResourceAction("new_action")), Set.empty, None)
val policy2 = AccessPolicy(FullyQualifiedPolicyId(resource, policyName), Set(defaultUserInfo.userId), WorkbenchEmail("policy@example.com"), members2.roles, members2.actions, members2.getDescendantPermissions, false)
when(samRoutes.resourceService.overwritePolicy(any[ResourceType], mockitoEq(policyName), mockitoEq(resource), mockitoEq(members2), any[SamRequestContext]))
.thenReturn(IO(policy2))
Put(s"/api/resources/v2/${resource.resourceTypeName}/${resource.resourceId}/policies/${policyName}", members2) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Created
}
}
it should "400 when creating an invalid policy" in {
val resource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("resource"))
val policyName = AccessPolicyName("policy")
val members = AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(ResourceAction("can_compute")), Set.empty, None)
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, resource,
actionsOnResource = Set(SamResourceActions.alterPolicies))
when(samRoutes.resourceService.overwritePolicy(any[ResourceType], mockitoEq(policyName), mockitoEq(resource), mockitoEq(members), any[SamRequestContext]))
.thenReturn(IO.raiseError(new WorkbenchExceptionWithErrorReport(ErrorReport(StatusCodes.BadRequest, "You have specified an invalid policy"))))
Put(s"/api/resources/v2/${resource.resourceTypeName}/${resource.resourceId}/policies/${policyName}", members) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.BadRequest
}
}
it should "403 when creating a policy on a resource when the user doesn't have alter_policies permission (but can see the resource)" in {
val resource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("resource"))
val policyName = AccessPolicyName("policy")
val members = AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(ResourceAction("can_compute")), Set.empty, None)
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, resource,
actionsOnResource = Set(SamResourceActions.readPolicies))
Put(s"/api/resources/v2/${resource.resourceTypeName}/${resource.resourceId}/policies/${policyName}", members) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "404 when creating a policy on a resource that the user doesnt have permission to see" in {
val resource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("resource"))
val policyName = AccessPolicyName("policy")
val members = AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(ResourceAction("can_compute")), Set.empty, None)
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, resource,
actionsOnResource = Set.empty)
Put(s"/api/resources/v2/${resource.resourceTypeName}/${resource.resourceId}/policies/${policyName}", members) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
"GET /api/resources/v2/{resourceType}/{resourceId}/policies" should "200 when listing policies for a resource and user has read_policies permission" in {
val resource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("resource"))
val policyName = AccessPolicyName("policy")
val members = AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(ResourceAction("can_compute")), Set.empty, None)
val response = AccessPolicyResponseEntry(policyName, members, WorkbenchEmail("policy@example.com"))
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, resource,
actionsOnResource = Set(SamResourceActions.readPolicies))
when(samRoutes.resourceService.listResourcePolicies(mockitoEq(resource), any[SamRequestContext]))
.thenReturn(IO(LazyList(response)))
Get(s"/api/resources/v2/${resource.resourceTypeName}/${resource.resourceId}/policies") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
}
}
it should "403 when listing policies for a resource and user lacks read_policies permission (but can see the resource)" in {
val resource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("resource"))
val policyName = AccessPolicyName("policy")
val members = AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(ResourceAction("can_compute")), Set.empty, None)
val response = AccessPolicyResponseEntry(policyName, members, WorkbenchEmail("policy@example.com"))
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, resource,
actionsOnResource = Set(SamResourceActions.delete))
when(samRoutes.resourceService.listResourcePolicies(mockitoEq(resource), any[SamRequestContext]))
.thenReturn(IO(LazyList(response)))
Get(s"/api/resources/v2/${resource.resourceTypeName}/${resource.resourceId}/policies") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Forbidden
}
}
it should "404 when listing policies for a resource when user can't see the resource" in {
val resource = FullyQualifiedResourceId(defaultResourceType.name, ResourceId("resource"))
val policyName = AccessPolicyName("policy")
val members = AccessPolicyMembership(Set(defaultUserInfo.userEmail), Set(ResourceAction("can_compute")), Set.empty, None)
val response = AccessPolicyResponseEntry(policyName, members, WorkbenchEmail("policy@example.com"))
val samRoutes = createSamRoutes()
mockPermissionsForResource(samRoutes, resource,
actionsOnResource = Set.empty)
when(samRoutes.resourceService.listResourcePolicies(mockitoEq(resource), any[SamRequestContext]))
.thenReturn(IO(LazyList(response)))
Get(s"/api/resources/v2/${resource.resourceTypeName}/${resource.resourceId}/policies") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
}
|
broadinstitute/sam
|
src/test/scala/org/broadinstitute/dsde/workbench/sam/api/ResourceRoutesV2Spec.scala
|
Scala
|
bsd-3-clause
| 94,070
|
/*
* Copyright (c) 2017-2022 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.eventpopulator
// joda time
import org.joda.time.DateTime
// specs2
import org.specs2.Specification
class MainSpec extends Specification { def is = s2"""
This is a specification for the Event Populator CLI parser
Main class should correctly parse S3 path and dynamodb options $e1
Main class should correctly parse since option $e2
Main class should reject dynamodb config invalidated by Iglu resolver $e3
"""
def e1 = {
val argv =
("--enriched-archive s3://enriched/archive " +
"--storage-config eyJzY2hlbWEiOiJpZ2x1OmNvbS5zbm93cGxvd2FuYWx5dGljcy5zbm93cGxvdy5zdG9yYWdlL2FtYXpvbl9keW5hbW9kYl9jb25maWcvanNvbnNjaGVtYS8xLTAtMCIsImRhdGEiOnsibmFtZSI6IkFXUyBEeW5hbW9EQiBkdXBsaWNhdGVzIHN0b3JhZ2UiLCJhY2Nlc3NLZXlJZCI6IkFERCBIRVJFIiwic2VjcmV0QWNjZXNzS2V5IjoiQUREIEhFUkUiLCJhd3NSZWdpb24iOiJBREQgSEVSRSIsImR5bmFtb2RiVGFibGUiOiJBREQgSEVSRSIsInB1cnBvc2UiOiJEVVBMSUNBVEVfVFJBQ0tJTkcifX0= " +
"--resolver eyJzY2hlbWEiOiJpZ2x1OmNvbS5zbm93cGxvd2FuYWx5dGljcy5pZ2x1L3Jlc29sdmVyLWNvbmZpZy9qc29uc2NoZW1hLzEtMC0yIiwiZGF0YSI6eyJjYWNoZVNpemUiOjUwMCwicmVwb3NpdG9yaWVzIjpbeyJuYW1lIjoiSWdsdSBDZW50cmFsIiwicHJpb3JpdHkiOjAsInZlbmRvclByZWZpeGVzIjpbImNvbS5zbm93cGxvd2FuYWx5dGljcyJdLCJjb25uZWN0aW9uIjp7Imh0dHAiOnsidXJpIjoiaHR0cDovL2lnbHVjZW50cmFsLmNvbSJ9fX1dfX0="
).split(" ")
val jobConf = Main.parse(argv).flatMap(_.toOption)
val expectedS3Path = "enriched/archive/"
val expectedStorageName = "AWS DynamoDB duplicates storage"
val pathResult = jobConf.map(_.enrichedInBucket) must beSome(expectedS3Path)
val nameResult = jobConf.map(_.storageConfig.asInstanceOf[DuplicateStorage.DynamoDbConfig].name) must beSome(expectedStorageName)
pathResult.and(nameResult)
}
def e2 = {
val argv =
("--enriched-archive s3://enriched/archive " +
"--since 2016-12-10 " +
"--storage-config eyJzY2hlbWEiOiJpZ2x1OmNvbS5zbm93cGxvd2FuYWx5dGljcy5zbm93cGxvdy5zdG9yYWdlL2FtYXpvbl9keW5hbW9kYl9jb25maWcvanNvbnNjaGVtYS8xLTAtMCIsImRhdGEiOnsibmFtZSI6IkFXUyBEeW5hbW9EQiBkdXBsaWNhdGVzIHN0b3JhZ2UiLCJhY2Nlc3NLZXlJZCI6IkFERCBIRVJFIiwic2VjcmV0QWNjZXNzS2V5IjoiQUREIEhFUkUiLCJhd3NSZWdpb24iOiJBREQgSEVSRSIsImR5bmFtb2RiVGFibGUiOiJBREQgSEVSRSIsInB1cnBvc2UiOiJEVVBMSUNBVEVfVFJBQ0tJTkcifX0= " +
"--resolver eyJzY2hlbWEiOiJpZ2x1OmNvbS5zbm93cGxvd2FuYWx5dGljcy5pZ2x1L3Jlc29sdmVyLWNvbmZpZy9qc29uc2NoZW1hLzEtMC0yIiwiZGF0YSI6eyJjYWNoZVNpemUiOjUwMCwicmVwb3NpdG9yaWVzIjpbeyJuYW1lIjoiSWdsdSBDZW50cmFsIiwicHJpb3JpdHkiOjAsInZlbmRvclByZWZpeGVzIjpbImNvbS5zbm93cGxvd2FuYWx5dGljcyJdLCJjb25uZWN0aW9uIjp7Imh0dHAiOnsidXJpIjoiaHR0cDovL2lnbHVjZW50cmFsLmNvbSJ9fX1dfX0="
).split(" ")
val expectedSince = DateTime.parse("2016-12-10")
val since = Main.parse(argv).flatMap(_.toOption).flatMap(_.since)
since must beSome(expectedSince)
}
def e3 = {
val argv =
("--enriched-archive s3://enriched/archive " +
"--storage-config eyJzY2hlbWEiOiAiaWdsdTpjb20uc25vd3Bsb3dhbmFseXRpY3Muc25vd3Bsb3cuc3RvcmFnZS9hbWF6b25fZHluYW1vZGJfY29uZmlnL2pzb25zY2hlbWEvMS0wLTAiLCAiZGF0YSI6IHt9fQ== " +
"--resolver eyJzY2hlbWEiOiJpZ2x1OmNvbS5zbm93cGxvd2FuYWx5dGljcy5pZ2x1L3Jlc29sdmVyLWNvbmZpZy9qc29uc2NoZW1hLzEtMC0yIiwiZGF0YSI6eyJjYWNoZVNpemUiOjUwMCwicmVwb3NpdG9yaWVzIjpbeyJuYW1lIjoiSWdsdSBDZW50cmFsIiwicHJpb3JpdHkiOjAsInZlbmRvclByZWZpeGVzIjpbImNvbS5zbm93cGxvd2FuYWx5dGljcyJdLCJjb25uZWN0aW9uIjp7Imh0dHAiOnsidXJpIjoiaHR0cDovL2lnbHVjZW50cmFsLmNvbSJ9fX1dfX0="
).split(" ")
val jobConf = Main.parse(argv).get.toEither
jobConf must beLeft
}
}
|
snowplow/snowplow
|
5-data-modeling/event-manifest-populator/src/test/scala/com/snowplowanalytics/snowplow/eventpopulator/MainSpec.scala
|
Scala
|
apache-2.0
| 4,254
|
package ch3
import scala.annotation.tailrec
import List._
object Exercise18 {
def map[A,B](ls: List[A])(f: A => B): List[B] = foldRight(ls, Nil:List[B])((x, acc) => Cons(f(x), acc))
}
import Exercise18._
/*
from repl you can test typing:
:load src/main/scala/fpinscala/ch3/List.scala
:load src/main/scala/fpinscala/ch3/Exercise18.scala
map(List(1,2,3))(_.toString)
*/
|
rucka/fpinscala
|
src/main/scala/fpinscala/ch3/Exercise18.scala
|
Scala
|
gpl-2.0
| 379
|
/*
* Copyright 2020 ABSA Group Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package za.co.absa.spline.gateway.rest.filter
import java.util.zip.GZIPInputStream
import javax.servlet.{ReadListener, ServletInputStream}
final class GZIPServletInputStream(val inputStream: ServletInputStream) extends ServletInputStream {
val gzipStream = new GZIPInputStream(inputStream)
override def read: Int = gzipStream.read
override def read(b: Array[Byte]): Int = gzipStream.read(b)
override def read(b: Array[Byte], off: Int, len: Int): Int = gzipStream.read(b, off, len)
override def available: Int = gzipStream.available
override def close(): Unit = gzipStream.close()
override def isFinished: Boolean = gzipStream.available() == 0
override def isReady: Boolean = true
override def setReadListener(readListener: ReadListener): Unit = throw new UnsupportedOperationException
}
|
AbsaOSS/spline
|
rest-gateway/src/main/scala/za/co/absa/spline/gateway/rest/filter/GZIPServletInputStream.scala
|
Scala
|
apache-2.0
| 1,422
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.message
import java.io.{InputStream, OutputStream}
import java.nio.ByteBuffer
import org.apache.kafka.common.utils.Crc32
class MessageWriter(segmentSize: Int) extends BufferingOutputStream(segmentSize) {
import Message._
def write(key: Array[Byte] = null, codec: CompressionCodec)(writePayload: OutputStream => Unit): Unit = {
withCrc32Prefix {
write(CurrentMagicValue)
var attributes: Byte = 0
if (codec.codec > 0)
attributes = (attributes | (CompressionCodeMask & codec.codec)).toByte
write(attributes)
// write the key
if (key == null) {
writeInt(-1)
} else {
writeInt(key.length)
write(key, 0, key.length)
}
// write the payload with length prefix
withLengthPrefix {
writePayload(this)
}
}
}
private def writeInt(value: Int): Unit = {
write(value >>> 24)
write(value >>> 16)
write(value >>> 8)
write(value)
}
private def writeInt(out: ReservedOutput, value: Int): Unit = {
out.write(value >>> 24)
out.write(value >>> 16)
out.write(value >>> 8)
out.write(value)
}
private def withCrc32Prefix(writeData: => Unit): Unit = {
// get a writer for CRC value
val crcWriter = reserve(CrcLength)
// save current position
var seg = currentSegment
val offset = currentSegment.written
// write data
writeData
// compute CRC32
val crc = new Crc32()
if (offset < seg.written) crc.update(seg.bytes, offset, seg.written - offset)
seg = seg.next
while (seg != null) {
if (seg.written > 0) crc.update(seg.bytes, 0, seg.written)
seg = seg.next
}
// write CRC32
writeInt(crcWriter, crc.getValue().toInt)
}
private def withLengthPrefix(writeData: => Unit): Unit = {
// get a writer for length value
val lengthWriter = reserve(ValueSizeLength)
// save current size
val oldSize = size
// write data
writeData
// write length value
writeInt(lengthWriter, size - oldSize)
}
}
/*
* OutputStream that buffers incoming data in segmented byte arrays
* This does not copy data when expanding its capacity
* It has a ability to
* - write data directly to ByteBuffer
* - copy data from an input stream to interval segmented arrays directly
* - hold a place holder for an unknown value that can be filled in later
*/
class BufferingOutputStream(segmentSize: Int) extends OutputStream {
protected final class Segment(size: Int) {
val bytes = new Array[Byte](size)
var written = 0
var next: Segment = null
def freeSpace: Int = bytes.length - written
}
protected class ReservedOutput(seg: Segment, offset: Int, length: Int) extends OutputStream {
private[this] var cur = seg
private[this] var off = offset
private[this] var len = length
override def write(value: Int) = {
if (len <= 0) throw new IndexOutOfBoundsException()
if (cur.bytes.length <= off) {
cur = cur.next
off = 0
}
cur.bytes(off) = value.toByte
off += 1
len -= 1
}
}
protected var currentSegment = new Segment(segmentSize)
private[this] val headSegment = currentSegment
private[this] var filled = 0
def size(): Int = filled + currentSegment.written
override def write(b: Int): Unit = {
if (currentSegment.freeSpace <= 0) addSegment()
currentSegment.bytes(currentSegment.written) = b.toByte
currentSegment.written += 1
}
override def write(b: Array[Byte], off: Int, len: Int) {
if (off >= 0 && off <= b.length && len >= 0 && off + len <= b.length) {
var remaining = len
var offset = off
while (remaining > 0) {
if (currentSegment.freeSpace <= 0) addSegment()
val amount = math.min(currentSegment.freeSpace, remaining)
System.arraycopy(b, offset, currentSegment.bytes, currentSegment.written, amount)
currentSegment.written += amount
offset += amount
remaining -= amount
}
} else {
throw new IndexOutOfBoundsException()
}
}
def write(in: InputStream): Unit = {
var amount = 0
while (amount >= 0) {
currentSegment.written += amount
if (currentSegment.freeSpace <= 0) addSegment()
amount = in.read(currentSegment.bytes, currentSegment.written, currentSegment.freeSpace)
}
}
private def addSegment() = {
filled += currentSegment.written
val newSeg = new Segment(segmentSize)
currentSegment.next = newSeg
currentSegment = newSeg
}
private def skip(len: Int): Unit = {
if (len >= 0) {
var remaining = len
while (remaining > 0) {
if (currentSegment.freeSpace <= 0) addSegment()
val amount = math.min(currentSegment.freeSpace, remaining)
currentSegment.written += amount
remaining -= amount
}
} else {
throw new IndexOutOfBoundsException()
}
}
def reserve(len: Int): ReservedOutput = {
val out = new ReservedOutput(currentSegment, currentSegment.written, len)
skip(len)
out
}
def writeTo(buffer: ByteBuffer): Unit = {
var seg = headSegment
while (seg != null) {
buffer.put(seg.bytes, 0, seg.written)
seg = seg.next
}
}
}
|
yobennett/kafka
|
core/src/main/scala/kafka/message/MessageWriter.scala
|
Scala
|
apache-2.0
| 6,050
|
/**
* Copyright (c) 2015 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.trustedanalytics.sparktk.frame.internal.ops.statistics.correlation
import org.trustedanalytics.sparktk.frame.{ FrameSchema, Column, DataTypes }
import org.trustedanalytics.sparktk.frame.internal.rdd.FrameRdd
import org.trustedanalytics.sparktk.testutils.TestingSparkContextWordSpec
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.expressions.GenericRow
import org.scalatest.Matchers
import org.apache.commons.lang.StringUtils
class CorrelationMatrixTest extends TestingSparkContextWordSpec with Matchers {
"correlation matrix calculations" should {
"return the correct values" in {
val inputArray: Array[Array[Double]] = Array(Array(90.0, 60.0, 90.0), Array(90.0, 90.0, 30.0), Array(60.0, 60.0, 60.0), Array(60.0, 60.0, 90.0), Array(30.0, 30.0, 30.0))
val arrGenericRow: Array[Row] = inputArray.map(row => {
val temp: Array[Any] = row.map(x => x)
new GenericRow(temp)
})
val rdd = sparkContext.parallelize(arrGenericRow)
val columnsList = List("col_0", "col_1", "col_2")
val inputDataColumnNamesAndTypes: Vector[Column] = columnsList.map({ name => Column(name, DataTypes.float64) }).toVector
val schema = FrameSchema(inputDataColumnNamesAndTypes)
val frameRdd = new FrameRdd(schema, rdd)
val result = CorrelationFunctions.correlationMatrix(frameRdd, columnsList).collect()
result.size shouldBe 3
result(0) shouldBe Row(1.0, 0.8451542547285167, 0.2988071523335984)
result(1) shouldBe Row(0.8451542547285167, 1.0, 0.0)
result(2) shouldBe Row(0.2988071523335984, 0.0, 1.0)
}
}
}
|
shibanis1/spark-tk
|
core/src/test/scala/org/trustedanalytics/sparktk/frame/internal/ops/statistics/correlation/CorrelationMatrixTest.scala
|
Scala
|
apache-2.0
| 2,318
|
/*
* Copyright 2009-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.linkedin.norbert.javacompat
package network
import cluster.BaseClusterClient
import com.linkedin.norbert.cluster.ClusterClient
import com.linkedin.norbert.network.Serializer
import com.linkedin.norbert.network.server.CallbackContext
class NettyNetworkServer(config: NetworkServerConfig) extends NetworkServer {
val c = new com.linkedin.norbert.network.netty.NetworkServerConfig
c.clusterClient = if (config.getClusterClient != null)
config.getClusterClient.asInstanceOf[BaseClusterClient].underlying
else ClusterClient(null, config.getServiceName, config.getZooKeeperConnectString, config.getZooKeeperSessionTimeoutMillis)
c.zooKeeperSessionTimeoutMillis = config.getZooKeeperSessionTimeoutMillis
c.requestThreadCorePoolSize = config.getRequestThreadCorePoolSize
c.requestThreadMaxPoolSize = config.getRequestThreadMaxPoolSize
c.requestThreadKeepAliveTimeSecs = config.getRequestThreadKeepAliveTimeSecs
val underlying = com.linkedin.norbert.network.server.NetworkServer(c)
def shutdown = underlying.shutdown
def markUnavailable = underlying.markUnavailable
def markAvailable(initialCapability: Long) = underlying.markAvailable(initialCapability)
def markAvailable = underlying.markAvailable
def getMyNode = underlying.myNode
def bind(nodeId: Int, markAvailable: Boolean, initialCapacity: Long) = underlying.bind(nodeId, markAvailable, initialCapacity)
def bind(nodeId: Int, markAvailable: Boolean) = underlying.bind(nodeId, markAvailable)
def bind(nodeId: Int) = underlying.bind(nodeId)
def bindByPort(port: Int) = underlying.bindByPort(port)
def registerHandler[RequestMsg, ResponseMsg](handler: RequestHandler[RequestMsg, ResponseMsg], serializer: Serializer[RequestMsg, ResponseMsg]) = {
underlying.registerHandler((request: RequestMsg) => handler.handleRequest(request))(serializer, serializer)
}
def registerAsyncHandler[RequestMsg, ResponseMsg](handler: CallbackRequestHandler[RequestMsg, ResponseMsg], serializer: Serializer[RequestMsg, ResponseMsg]) = {
underlying.registerAsyncHandler((request: RequestMsg, callback: CallbackContext[ResponseMsg]) => handler.onRequest(request, callback))(serializer, serializer)
}
}
|
linkedin/norbert
|
java-network/src/main/scala/com/linkedin/norbert/javacompat/network/NettyNetworkServer.scala
|
Scala
|
apache-2.0
| 2,805
|
package com.timgroup.eventstore.mysql
import com.timgroup.eventstore.api._
import com.fasterxml.jackson.core._
import com.fasterxml.jackson.databind._
import org.scalatest._
import org.scalatest.OptionValues._
class JsonEventCompatibilityTest extends FlatSpec with Matchers {
"compatibility predicate" should "regard identical events as compatible" in {
errorComparing(mkevent("TestEvent", "{ a: 1 }"), mkevent("TestEvent", "{ a: 1 }")) should not be ('defined)
}
it should "regard events with different types as incompatible" in {
errorComparing(mkevent("TestEventA", "{ a: 1 }"), mkevent("TestEventB", "{ a: 1 }")) should be ('defined)
}
it should "regard reordering fields as compatible" in {
errorComparing(mkevent("TestEvent", "{ a: 1, b: 2 }"), mkevent("TestEvent", "{ b: 2, a: 1 }")) should not be ('defined)
}
it should "regard changing a string value as incompatible" in {
errorComparing(mkevent("TestEvent", "{ a: 'x' }"), mkevent("TestEvent", "{ a: 'y' }")) should be ('defined)
}
it should "regard changing a string value to a number value as incompatible" in {
errorComparing(mkevent("TestEvent", "{ a: '1' }"), mkevent("TestEvent", "{ a: 1 }")) should be ('defined)
}
it should "regard changing null to a value as incompatible" in {
errorComparing(mkevent("TestEvent", "{ a: null }"), mkevent("TestEvent", "{ a: 'x' }")) should be ('defined)
}
it should "regard changing a value to null as incompatible" in {
errorComparing(mkevent("TestEvent", "{ a: 'x' }"), mkevent("TestEvent", "{ a: null }")) should be ('defined)
}
it should "regard adding a new field as compatible" in {
errorComparing(mkevent("TestEvent", "{ a: 'x' }"), mkevent("TestEvent", "{ a: 'x', b: 'y' }")) should not be ('defined)
}
it should "regard removing a field as incompatible" in {
errorComparing(mkevent("TestEvent", "{ a: 'x', b: 'y' }"), mkevent("TestEvent", "{ a: 'x' }")).value should include ("Element in current version, but not new version")
}
it should "regard adding an array value as incompatible" in {
errorComparing(mkevent("TestEvent", "{ a: ['x'] }"), mkevent("TestEvent", "{ a: ['x', 'y'] }")) should be ('defined)
}
it should "regard removing an array value as incompatible" in {
errorComparing(mkevent("TestEvent", "{ a: ['x', 'y'] }"), mkevent("TestEvent", "{ a: ['x'] }")) should be ('defined)
}
it should "regard changing a string value nested inside an object as incompatible" in {
errorComparing(mkevent("TestEvent", "{ a: { b: 'x' } }"), mkevent("TestEvent", "{ a: { b: 'y' } }")) should be ('defined)
}
it should "regard changing a string value nested inside an array as incompatible" in {
errorComparing(mkevent("TestEvent", "{ a: [ 'x' ] }"), mkevent("TestEvent", "{ a: [ 'y' ] }")) should be ('defined)
}
it should "regard reordering the values of an array as incompatible" in {
errorComparing(mkevent("TestEvent", "{ a: [ 'x', 'y' ] }"), mkevent("TestEvent", "{ a: [ 'y', 'x' ] }")) should be ('defined)
}
"error messages" should "include the current and new documents" in {
errorComparing(mkevent("TestEvent", "{ a: 'x' }"), mkevent("TestEvent", "{ a: 'y' }")).value should include ("""current: {"a":"x"}""")
errorComparing(mkevent("TestEvent", "{ a: 'x' }"), mkevent("TestEvent", "{ a: 'y' }")).value should include ("""new: {"a":"y"}""")
}
it should "indicate nesting within an object" in {
errorComparing(mkevent("TestEvent", "{ a: {b: 'x'} }"), mkevent("TestEvent", "{ a: {b: 'y'} }")).value should include ("""a.b""")
}
it should "indicate nesting within an array" in {
errorComparing(mkevent("TestEvent", "{ a: ['x'] }"), mkevent("TestEvent", "{ a: ['y'] }")).value should include ("""a[0]""")
}
it should "indicate the version number" in {
errorComparing(mkevent("TestEvent", "{ a: 'x' }"), mkevent("TestEvent", "{ a: 'y' }"), 12345).value should include ("""version 12345""")
}
private def mkevent(eventType: String, jsonString: String) = {
val objectMapper = new ObjectMapper
val jsonNode = objectMapper.reader
.`with`(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES)
.`with`(JsonParser.Feature.ALLOW_SINGLE_QUOTES)
.`with`(JsonParser.Feature.STRICT_DUPLICATE_DETECTION)
.readTree(jsonString)
val jsonBytes = objectMapper.writeValueAsBytes(jsonNode)
EventData(eventType, jsonBytes)
}
private def errorComparing(currentEvent: EventData, newEvent: EventData, version: Long = 1L): Option[String] = {
try {
JsonEventCompatibility.test(version, currentEvent, newEvent) match {
case true => None
case false => Some("")
}
} catch {
case e: IdempotentWriteFailure => return Some(e.getMessage)
}
}
}
|
tim-group/tg-eventstore
|
mysql-legacy/src/test/scala/com/timgroup/eventstore/mysql/JsonEventCompatibilityTest.scala
|
Scala
|
bsd-2-clause
| 4,889
|
/*
* P40.scala
* Copyright (C) 2017 n3xtchen <echenwen@gmail.com>
*
* Distributed under terms of the GPL-2.0 license.
*/
package nextchen
object P40 {
// scala> 28.goldbach
// res0: (Int, Int) = (5,23)
implicit class S99Int(val n: Int) extends P35.S99Int(n) {
// 歌德巴赫猜想
def goldbach() = {
def proc(ps: Stream[Int]): (Int, Int) = ps match {
case x #:: _ if (n-x).isPrime => (x, n-x)
case _ #:: tail => proc(tail)
}
proc(P35.S99Int.primes)
}
}
def main(args: Array[String]) {
println(28.goldbach)
}
}
|
n3xtchen/s-99
|
src/main/scala/nextchen/P40.scala
|
Scala
|
mit
| 582
|
package com.stulsoft.avro
import java.io.ByteArrayOutputStream
import org.apache.avro.Schema
import org.apache.avro.generic.{GenericData, GenericRecord}
import org.apache.avro.io.{DecoderFactory, EncoderFactory}
import org.apache.avro.specific.{SpecificDatumReader, SpecificDatumWriter}
import scala.io.Source
/** Playing with Avro
*
* @author Yuriy Stul.
* @see [[https://dzone.com/articles/kafka-avro-scala-example Kafka Avro Scala Example]]
*/
object Main1 extends App {
test1()
test2()
/**
* Binary encoding
*/
def test1(): Unit = {
println("==>test1")
val schema = new Schema.Parser().parse(Source.fromURL(getClass.getResource("/user.json")).mkString)
val genericUser = new GenericData.Record(schema)
genericUser.put("name", "test 1")
genericUser.put("favoriteNumber", 1)
genericUser.put("favoriteColor", "red")
println(genericUser)
val writer = new SpecificDatumWriter[GenericRecord](schema)
val out = new ByteArrayOutputStream()
val encoder = EncoderFactory.get().binaryEncoder(out, null)
writer.write(genericUser, encoder)
encoder.flush()
out.close()
val serializedBytes = out.toByteArray
println(serializedBytes)
println(s"serializedBytes.length=${serializedBytes.length}")
val reader = new SpecificDatumReader[GenericRecord](schema)
val decoder = DecoderFactory.get().binaryDecoder(serializedBytes, null)
val userData = reader.read(null, decoder)
// println(userData)
println(s"""${userData.get("name")} ${userData.get("favoriteNumber")} ${userData.get("favoriteColor")}""")
println("<==test1")
}
/**
* Json encoding
*/
def test2(): Unit = {
println("==>test2")
val schema = new Schema.Parser().parse(Source.fromURL(getClass.getResource("/user.json")).mkString)
val genericUser = new GenericData.Record(schema)
genericUser.put("name", "test 2")
genericUser.put("favoriteNumber", 1)
genericUser.put("favoriteColor", "red")
println(genericUser)
val writer = new SpecificDatumWriter[GenericRecord](schema)
val out = new ByteArrayOutputStream()
val encoder = EncoderFactory.get().jsonEncoder(schema, out)
writer.write(genericUser, encoder)
encoder.flush()
out.close()
val serializedString = out.toString
println(serializedString)
println(s"serializedString.length=${serializedString.length}")
val reader = new SpecificDatumReader[GenericRecord](schema)
val decoder = DecoderFactory.get().jsonDecoder(schema, serializedString)
val userData = reader.read(null, decoder)
// println(userData)
println(s"""${userData.get("name")} ${userData.get("favoriteNumber")} ${userData.get("favoriteColor")}""")
println("<==test2")
}
}
|
ysden123/poc
|
avro/src/main/scala/com/stulsoft/avro/Main1.scala
|
Scala
|
mit
| 2,764
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.directdictionary
import java.io.File
import java.sql.Date
import org.apache.spark.sql.Row
import org.apache.spark.sql.common.util.QueryTest
import org.apache.spark.sql.hive.HiveContext
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
/**
* Test Class for detailed query on timestamp datatypes
*
*
*/
class DateDataTypeDirectDictionaryWithNoDictTestCase extends QueryTest with BeforeAndAfterAll {
var hiveContext: HiveContext = _
override def beforeAll {
try {
CarbonProperties.getInstance().addProperty("carbon.direct.dictionary", "true")
sql(
"""
CREATE TABLE IF NOT EXISTS directDictionaryTable
(empno String, doj Date, salary Int)
STORED BY 'org.apache.carbondata.format' TBLPROPERTIES ('DICTIONARY_EXCLUDE'='empno')"""
)
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
val csvFilePath = s"$resourcesPath/datasample.csv"
sql("LOAD DATA local inpath '" + csvFilePath + "' INTO TABLE directDictionaryTable OPTIONS"
+ "('DELIMITER'= ',', 'QUOTECHAR'= '\"')");
} catch {
case x: Throwable =>
x.printStackTrace()
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "dd-MM-yyyy")
}
}
test("select doj from directDictionaryTable") {
checkAnswer(
sql("select doj from directDictionaryTable"),
Seq(Row(Date.valueOf("2016-03-14")),
Row(Date.valueOf("2016-04-14")),
Row(null)
)
)
}
test("select doj from directDictionaryTable with equals filter") {
checkAnswer(
sql("select doj from directDictionaryTable where doj='2016-03-14 15:00:09'"),
Seq(Row(Date.valueOf("2016-03-14")))
)
}
test("select doj from directDictionaryTable with greater than filter") {
checkAnswer(
sql("select doj from directDictionaryTable where doj>'2016-03-14 15:00:09'"),
Seq(Row(Date.valueOf("2016-04-14")))
)
}
override def afterAll {
sql("drop table directDictionaryTable")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "dd-MM-yyyy")
CarbonProperties.getInstance().addProperty("carbon.direct.dictionary", "false")
}
}
|
JihongMA/incubator-carbondata
|
integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/directdictionary/DateDataTypeDirectDictionaryWithNoDictTestCase.scala
|
Scala
|
apache-2.0
| 3,243
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.io.File
import org.apache.commons.io.FileUtils
import org.apache.hadoop.fs.{FileSystem, Path, PathFilter}
import org.apache.parquet.format.converter.ParquetMetadataConverter.NO_FILTER
import org.apache.parquet.hadoop.ParquetFileReader
import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
class ParquetInteroperabilitySuite extends ParquetCompatibilityTest with SharedSQLContext {
test("parquet files with different physical schemas but share the same logical schema") {
import ParquetCompatibilityTest._
// This test case writes two Parquet files, both representing the following Catalyst schema
//
// StructType(
// StructField(
// "f",
// ArrayType(IntegerType, containsNull = false),
// nullable = false))
//
// The first Parquet file comes with parquet-avro style 2-level LIST-annotated group, while the
// other one comes with parquet-protobuf style 1-level unannotated primitive field.
withTempDir { dir =>
val avroStylePath = new File(dir, "avro-style").getCanonicalPath
val protobufStylePath = new File(dir, "protobuf-style").getCanonicalPath
val avroStyleSchema =
"""message avro_style {
| required group f (LIST) {
| repeated int32 array;
| }
|}
""".stripMargin
writeDirect(avroStylePath, avroStyleSchema, { rc =>
rc.message {
rc.field("f", 0) {
rc.group {
rc.field("array", 0) {
rc.addInteger(0)
rc.addInteger(1)
}
}
}
}
})
logParquetSchema(avroStylePath)
val protobufStyleSchema =
"""message protobuf_style {
| repeated int32 f;
|}
""".stripMargin
writeDirect(protobufStylePath, protobufStyleSchema, { rc =>
rc.message {
rc.field("f", 0) {
rc.addInteger(2)
rc.addInteger(3)
}
}
})
logParquetSchema(protobufStylePath)
checkAnswer(
spark.read.parquet(dir.getCanonicalPath),
Seq(
Row(Seq(0, 1)),
Row(Seq(2, 3))))
}
}
test("parquet timestamp conversion") {
// Make a table with one parquet file written by impala, and one parquet file written by spark.
// We should only adjust the timestamps in the impala file, and only if the conf is set
val impalaFile = "test-data/impala_timestamp.parq"
// here are the timestamps in the impala file, as they were saved by impala
val impalaFileData =
Seq(
"2001-01-01 01:01:01",
"2002-02-02 02:02:02",
"2003-03-03 03:03:03"
).map(java.sql.Timestamp.valueOf)
val impalaPath = Thread.currentThread().getContextClassLoader.getResource(impalaFile)
.toURI.getPath
withTempPath { tableDir =>
val ts = Seq(
"2004-04-04 04:04:04",
"2005-05-05 05:05:05",
"2006-06-06 06:06:06"
).map { s => java.sql.Timestamp.valueOf(s) }
import testImplicits._
// match the column names of the file from impala
withSQLConf(SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE.key ->
SQLConf.ParquetOutputTimestampType.INT96.toString) {
val df = spark.createDataset(ts).toDF().repartition(1)
.withColumnRenamed("value", "ts")
df.write.parquet(tableDir.getAbsolutePath)
}
FileUtils.copyFile(new File(impalaPath), new File(tableDir, "part-00001.parq"))
Seq(false, true).foreach { int96TimestampConversion =>
Seq(false, true).foreach { vectorized =>
withSQLConf(
(SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE.key,
SQLConf.ParquetOutputTimestampType.INT96.toString),
(SQLConf.PARQUET_INT96_TIMESTAMP_CONVERSION.key, int96TimestampConversion.toString()),
(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key, vectorized.toString())
) {
val readBack = spark.read.parquet(tableDir.getAbsolutePath).collect()
assert(readBack.size === 6)
// if we apply the conversion, we'll get the "right" values, as saved by impala in the
// original file. Otherwise, they're off by the local timezone offset, set to
// America/Los_Angeles in tests
val impalaExpectations = if (int96TimestampConversion) {
impalaFileData
} else {
impalaFileData.map { ts =>
DateTimeUtils.toJavaTimestamp(DateTimeUtils.convertTz(
DateTimeUtils.fromJavaTimestamp(ts),
DateTimeUtils.TimeZoneUTC,
DateTimeUtils.getTimeZone(conf.sessionLocalTimeZone)))
}
}
val fullExpectations = (ts ++ impalaExpectations).map(_.toString).sorted.toArray
val actual = readBack.map(_.getTimestamp(0).toString).sorted
withClue(
s"int96TimestampConversion = $int96TimestampConversion; vectorized = $vectorized") {
assert(fullExpectations === actual)
// Now test that the behavior is still correct even with a filter which could get
// pushed down into parquet. We don't need extra handling for pushed down
// predicates because (a) in ParquetFilters, we ignore TimestampType and (b) parquet
// does not read statistics from int96 fields, as they are unsigned. See
// scalastyle:off line.size.limit
// https://github.com/apache/parquet-mr/blob/2fd62ee4d524c270764e9b91dca72e5cf1a005b7/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java#L419
// https://github.com/apache/parquet-mr/blob/2fd62ee4d524c270764e9b91dca72e5cf1a005b7/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java#L348
// scalastyle:on line.size.limit
//
// Just to be defensive in case anything ever changes in parquet, this test checks
// the assumption on column stats, and also the end-to-end behavior.
val hadoopConf = spark.sessionState.newHadoopConf()
val fs = FileSystem.get(hadoopConf)
val parts = fs.listStatus(new Path(tableDir.getAbsolutePath), new PathFilter {
override def accept(path: Path): Boolean = !path.getName.startsWith("_")
})
// grab the meta data from the parquet file. The next section of asserts just make
// sure the test is configured correctly.
assert(parts.size == 2)
parts.foreach { part =>
val oneFooter =
ParquetFileReader.readFooter(hadoopConf, part.getPath, NO_FILTER)
assert(oneFooter.getFileMetaData.getSchema.getColumns.size === 1)
val typeName = oneFooter
.getFileMetaData.getSchema.getColumns.get(0).getPrimitiveType.getPrimitiveTypeName
assert(typeName === PrimitiveTypeName.INT96)
val oneBlockMeta = oneFooter.getBlocks().get(0)
val oneBlockColumnMeta = oneBlockMeta.getColumns().get(0)
// This is the important assert. Column stats are written, but they are ignored
// when the data is read back as mentioned above, b/c int96 is unsigned. This
// assert makes sure this holds even if we change parquet versions (if eg. there
// were ever statistics even on unsigned columns).
assert(!oneBlockColumnMeta.getStatistics.hasNonNullValue)
}
// These queries should return the entire dataset with the conversion applied,
// but if the predicates were applied to the raw values in parquet, they would
// incorrectly filter data out.
val query = spark.read.parquet(tableDir.getAbsolutePath)
.where("ts > '2001-01-01 01:00:00'")
val countWithFilter = query.count()
val exp = if (int96TimestampConversion) 6 else 5
assert(countWithFilter === exp, query)
}
}
}
}
}
}
}
|
aosagie/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetInteroperabilitySuite.scala
|
Scala
|
apache-2.0
| 9,332
|
package com.usercase.request.parser
import com.usercase.request.http.HttpData
import org.json.JSONObject
import scala.collection.mutable
/**
* Created by C.J.YOU on 2017/2/21.
*/
class RespondParserReflect(className: String) {
val RPClass = Class.forName(className)
/**
* 通过反射机制自动获取 不同url对应的数据解析方法
* @param methodName 方法名
* @param url 请求url
* @param parameter 参数
* @return
*/
type T = (String, String, mutable.HashMap[String,String], HttpData)
def runMethod(t: T):JSONObject= {
val constructor = RPClass.getConstructor(classOf[String], classOf[mutable.HashMap[String,String]], classOf[HttpData])
val RPObject = constructor.newInstance(t._2, t._3, t._4)
val result = RPClass.getMethod(t._1).invoke(RPObject)
result.asInstanceOf[JSONObject].put("RespondParser",className)
}
}
|
bgfurfeature/AI
|
src/main/scala/com/usercase/request/parser/RespondParserReflect.scala
|
Scala
|
apache-2.0
| 901
|
package scala.models
import io.apibuilder.generator.v0.models.{File, InvocationForm}
import io.apibuilder.spec.v0.models.Service
import models.TestHelper._
import scala.util.matching.Regex
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
class Play26ControllersSpec extends AnyFunSpec with Matchers {
def fileContent(service: Service): File =
Play26Controllers.invoke(InvocationForm(service))
.fold(
{ msgs => Left(new Throwable(s"Generated errors: ${msgs.mkString("\\n - ", "\\n - ", "")}")) },
{
case one :: Nil => Right(one)
case _ :: _ => Left(new Throwable(s"Generated too many files"))
case Nil => Left(new Throwable(s"Generated no files"))
}
)
.fold(throwable => throw throwable, identity)
def count(regex: Regex, service: Service): Int = {
val contents = fileContent(service).contents
regex.findAllIn(contents).length
}
def importCount(service: Service): Int = count("import ".r, service)
def controllerCount(service: Service): Int = count("play\\\\.api\\\\.mvc\\\\.BaseController".r, service)
def responseCount(service: Service): Int = count("sealed trait [^ ]+".r, service)
def responseImplementationCount(service: Service): Int = count("case (class|object) HTTP[0-9]+".r, service)
def methodFinalCount(service: Service): Int = count("play\\\\.api\\\\.mvc\\\\.Action".r, service)
def methodAbstractCount(service: Service): Int = count("scala\\\\.concurrent\\\\.Future".r, service)
describe("for all services") {
List(
collectionJsonDefaultsService,
referenceApiService,
referenceWithImportsApiService,
generatorApiService,
apidocApiService,
dateTimeService,
builtInTypesService,
scalaKeywordsService,
generatorApiServiceWithUnionAndDescriminator,
generatorApiServiceWithUnionWithoutDescriminator,
emptyService,
).zipWithIndex.foreach { case (service, index) =>
describe(s"for services ${index}") {
it("generates all imports") {
assert(importCount(service) == 1 + service.imports.size)
}
it("generates all controllers") {
assert(controllerCount(service) == service.resources.size)
}
it("generates all responses") {
assert(responseCount(service) == service.resources.flatMap(_.operations).size)
}
it("generates all response implementations") {
assert(responseImplementationCount(service) == service.resources.flatMap(_.operations).flatMap(_.responses).size)
}
it("generates all methods final") {
assert(methodFinalCount(service) == service.resources.flatMap(_.operations).size)
}
it("generates all methods abstract") {
assert(methodAbstractCount(service) == service.resources.flatMap(_.operations).size)
}
}
}
}
}
|
mbryzek/apidoc-generator
|
scala-generator/src/test/scala/models/Play26ControllersSpec.scala
|
Scala
|
mit
| 2,907
|
object Foo {
def f()()(implicit ctx: String): Int = ???
def at[T](op: () => T): Unit = ()
at(() => f()) // error: missing ()
}
|
lampepfl/dotty
|
tests/neg/i3542a.scala
|
Scala
|
apache-2.0
| 133
|
package io.plasmap.geo.preprocessing.test
import _root_.io.plasmap.generator.OsmObjectGenerator
import _root_.io.plasmap.geo.data.OsmBB
import _root_.io.plasmap.geo.mappings._
import _root_.io.plasmap.geo.preprocessing.{FlowError, OsmPreprocessor, WayFlow}
import _root_.io.plasmap.geo.preprocessing.OsmPreprocessor._
import _root_.io.plasmap.model._
import _root_.io.plasmap.model.geometry.{GeometryCollection, HashPoint, LineString, Point}
import _root_.io.plasmap.util.Denormalizer
import _root_.io.plasmap.util.test.OsmTestData
import akka.NotUsed
import akka.actor._
import akka.stream._
import akka.stream.scaladsl.{Source, _}
import org.joda.time.DateTime
import org.scalamock.proxy.ProxyMockFactory
import org.scalamock.specs2.IsolatedMockFactory
import org.specs2.mutable.Specification
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scalaz.{Sink => _, Source => _, _}
/**
* Specification for Queries
*/
class WayFlowSpec
extends Specification
with IsolatedMockFactory
with ProxyMockFactory {
sequential
val system = ActorSystem("test")
val mat = ActorMaterializer()
import scala.concurrent.ExecutionContext.Implicits.global
import TestFixtures._
val gen = OsmObjectGenerator()
"The WayFlow" should {
"denormalize a way" in {
val mappings: Map[OsmId, Point] = (for {
way <- ways
nd <- way.nds
point = Point(gen.generatePoint.hash)
} yield nd -> point).toMap
val expectedWays = for {
way <- ways
} yield Denormalizer.denormalizeWay(way, mappings)
val numNds = ways.foldLeft(0)((acc, way) => way.nds.size + acc)
val mappingF = mockFunction[OsmId, Future[Option[OsmNodeMapping]]]
mappingF expects * onCall { id: OsmId =>
val mappingOpt: Option[Point] = mappings.get(id)
Future {
mappingOpt.map((mapping) => {
OsmNodeMapping(mapping.hash, id, DateTime.now)
})
}
} repeat numNds
val wayFlow: Flow[OsmWay, Disjunction[FlowError, OsmDenormalizedWay], NotUsed] = WayFlow.denormalizeWayFlow(mappingF)
val eventualDenormalizedWaysFut: Future[List[OsmDenormalizedWay]] =
Source(ways)
.via(wayFlow)
.filter(_.isRight)
.map(_.toOption.get)
.runFold(List.empty[OsmDenormalizedWay])((list, dway: OsmDenormalizedWay) => dway :: list)
val eventualDenormalizedWays: List[OsmDenormalizedWay] = Await.result(eventualDenormalizedWaysFut, 10 seconds)
eventualDenormalizedWays must containAllOf(expectedWays)
}
}
}
|
plasmap/plasmap
|
processing/src/test/scala/io/plasmap/geo/preprocessing/test/WayFlowSpec.scala
|
Scala
|
apache-2.0
| 2,594
|
package scala.meta.io
import java.io._
import java.nio.{file => nio}
import java.net._
import java.nio.file.Path
import java.nio.file.Paths
import scalapb.GeneratedMessage
import scala.meta.internal.io.PlatformPathIO
import scala.meta.internal.io.FileIO
import scala.meta.internal.io.PathIO
/** Wrapper around an absolute nio.Path. */
sealed abstract case class AbsolutePath(toNIO: nio.Path) {
require(toNIO.isAbsolute, s"$toNIO is not absolute!")
def toFile: File = toNIO.toFile
def toURI: URI = toNIO.toUri
def syntax: String = toString
def structure: String = s"""AbsolutePath("$syntax")"""
override def toString: String = toNIO.toString
def toRelative: RelativePath = toRelative(PathIO.workingDirectory)
def toRelative(prefix: AbsolutePath): RelativePath = RelativePath(prefix.toNIO.relativize(toNIO))
def resolve(other: RelativePath): AbsolutePath = AbsolutePath(toNIO.resolve(other.toNIO))(this)
def resolve(other: String): AbsolutePath = AbsolutePath(toNIO.resolve(other))(this)
def resolveSibling(f: String => String): AbsolutePath =
AbsolutePath(toNIO.resolveSibling(f(toNIO.getFileName.toString)))
def isFile: Boolean = FileIO.isFile(this)
def isDirectory: Boolean = FileIO.isDirectory(this)
def readAllBytes: Array[Byte] = FileIO.readAllBytes(this)
}
object AbsolutePath {
lazy val root = new AbsolutePath(Paths.get("").toAbsolutePath.getRoot) {}
// java.{io,nio} implicitly assume sys.props("user.dir") as the working directory.
// This assumption does not hold for JS runtimes.
implicit def workingDirectory: AbsolutePath =
new AbsolutePath(Paths.get(PlatformPathIO.workingDirectoryString)) {}
// Use working directory as cwd, that's the default behavior of java.io.File.
def apply(file: File)(implicit cwd: AbsolutePath): AbsolutePath = apply(file.toPath)(cwd)
def apply(path: String)(implicit cwd: AbsolutePath): AbsolutePath = apply(Paths.get(path))(cwd)
def apply(path: Path)(implicit cwd: AbsolutePath): AbsolutePath =
if (path.isAbsolute) {
new AbsolutePath(path) {}
} else {
cwd.resolve(path.toString)
}
}
|
olafurpg/scalameta
|
scalameta/io/shared/src/main/scala/scala/meta/io/AbsolutePath.scala
|
Scala
|
bsd-3-clause
| 2,109
|
/*
* Copyright (c) 2012-2018 Broad Institute, Inc.
*
* This file is part of Pilon.
*
* Pilon is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* Pilon is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Pilon. If not, see <http://www.gnu.org/licenses/>.
*
*/
package org.broadinstitute.pilon
/**
* Created by IntelliJ IDEA.
* User: bruce
* Date: 11/20/11
* Time: 5:19 PM
* To change this template use File | Settings | File Templates.
*/
import java.io._
class Tracks(val reference: GenomeFile) {
def standardTracks = {
makeBedTrack("Pilon.bed", "Pilon")
changesTrack("Changes.wig")
unconfirmedTrack("Unconfirmed.wig")
copyNumberTrack("CopyNumber.wig")
coverageTrack("Coverage.wig")
badCoverageTrack("BadCoverage.wig")
pctBadTrack("PctBad.wig")
//coverageTrackSD("CoverageSD.wig")
//badCoverageTrackSD("BadCoverageSD.wig")
deltaCoverageTrack("DeltaCoverage.wig")
dipCoverageTrack("DipCoverage.wig")
//fragCoverageTrack("FragCoverage.wig")
physicalCoverageTrack("PhysicalCoverage.wig")
//physicalCoverageTrackSD("PhysicalCoverageSD.wig")
//insertSizeTrack("InsertSize.wig")
//insertSizeTrackSD("InsertSizeSD.wig")
clippedAlignmentTrack("ClippedAlignments.wig")
weightedQualTrack("WeightedQual.wig")
weightedMqTrack("WeightedMq.wig")
gcTrack("GC.wig")
//kmerCopyNumberTrack("KmerCopyNumber.wig")
}
def changesTrack(file: String) = {
makeTrack(file, "Changes",
{ (r: GenomeRegion, i: Int) => if (r.changed(i)) 1 else 0 })
}
def unconfirmedTrack(file: String) = {
makeTrack(file, "Unconfirmed",
{ (r: GenomeRegion, i: Int) => if (r.confirmed(i)) 0 else 1 })
}
def copyNumberTrack(file: String) = {
makeTrack(file, "Copy Number",
{ (r: GenomeRegion, i: Int) => r.copyNumber(i) - 1 })
}
def kmerCopyNumberTrack(file: String) = {
makeTrack(file, "Kmer Copy Number",
{ (r: GenomeRegion, i: Int) => (r.kmerCopyNumber(i) - 1) max 0})
}
def coverageTrack(file: String) = {
makeTrack(file, "Coverage",
{ (r: GenomeRegion, i: Int) => r.coverage(i) })
}
def fragCoverageTrack(file: String) = {
makeTrack(file, "Frag Coverage",
{ (r: GenomeRegion, i: Int) => r.fragCoverage(i) })
}
def coverageTrackSD(file: String) = {
makeTrack(file, "Coverage SD",
{ (r: GenomeRegion, i: Int) => r.coverageDist.toSigma10x(r.coverage(i)) },
"viewLimits=-30:30")
}
def badCoverageTrack(file: String) = {
makeTrack(file, "Bad Coverage",
{ (r: GenomeRegion, i: Int) => r.badCoverage(i) })
}
def badCoverageTrackSD(file: String) = {
makeTrack(file, "Bad Coverage SD",
{ (r: GenomeRegion, i: Int) => r.badCoverageDist.toSigma10x(r.badCoverage(i)) },
"viewLimits=-30:30")
}
def deltaCoverageTrack(file: String, radius: Int = 100) = {
makeTrack(file, "Delta Coverage",
{ (r: GenomeRegion, i: Int) => r.deltaCoverage(i, radius) })
}
def dipCoverageTrack(file: String, radius: Int = 100) = {
makeTrack(file, "Dip Coverage",
{ (r: GenomeRegion, i: Int) => r.dipCoverage(i, radius) })
}
def physicalCoverageTrack(file: String) = {
makeTrack(file, "Physical Coverage",
{ (r: GenomeRegion, i: Int) => r.physCoverage(i) })
}
def physicalCoverageTrackSD(file: String) = {
makeTrack(file, "Physical Coverage SD",
{ (r: GenomeRegion, i: Int) => r.physCoverageDist.toSigma10x(r.physCoverage(i)) },
"viewLimits=-30:30")
}
def gcTrack(file: String) = {
makeTrack(file, "GC",
{ (r: GenomeRegion, i: Int) => r.gc(i) },
"graphType=heatmap midRange=35:65 midColor=0,255,0")
}
def insertSizeTrack(file: String) = {
makeTrack(file, "Insert Size",
{ (r: GenomeRegion, i: Int) => r.insertSize(i) })
}
def insertSizeTrackSD(file: String) = {
makeTrack(file, "Insert Size SD",
{ (r: GenomeRegion, i: Int) => r.insertSizeDist.toSigma10x(r.insertSize(i)) },
"viewLimits=-30:30")
}
def pctBadTrack(file: String) = {
makeTrack(file, "Pct Bad",
{ (r: GenomeRegion, i: Int) =>
val good = r.coverage(i)
val bad = r.badCoverage(i)
if (good+bad > 0) bad * 100 / (good + bad)
else 0
})
}
def weightedMqTrack(file: String) = {
makeTrack(file, "Weighted MQ",
//{ (r: GenomeRegion, i: Int) => r.weightedMqDist.toSigma10x(r.weightedMq(i)) },
//"viewLimits=-30:30")
{ (r: GenomeRegion, i: Int) => r.weightedMq(i) })
}
def weightedQualTrack(file: String) = {
makeTrack(file, "Weighted Qual",
{ (r: GenomeRegion, i: Int) => r.weightedQual(i) })
}
def clippedAlignmentTrack(file: String) = {
makeTrack(file, "Clipped Alignments",
{ (r: GenomeRegion, i: Int) => r.clips(i) })
}
def makeTrack(fileName: String, name: String, func: (GenomeRegion, Int) => Int, options: String = "") = {
val file = Pilon.outputFile(fileName)
println ("Creating " + name + " track in file " + file.getPath())
val writer = new PrintWriter(file)
var headLine = "track type=wiggle_0 graphType=line color=0,0,255 altColor=255,0,0 name=\\"" + name + "\\""
if (options != "") headLine += " " + options
writer.println(headLine)
for ((cName, regions) <- reference.regions) {
regions foreach { region =>
writer.println("fixedStep chrom=%s start=%d step=1".format(cName, region.start))
for (rIndex <- 0 to region.size-1) {
val value = func(region, rIndex)
writer.println(value)
}
}
}
writer.close()
}
def regionsToBed(regions: List[Region], name: String, writer: PrintWriter, rgb: String = "0,255,0") = {
regions map {region: Region =>
List(region.name, region.start-1, region.stop, name, "0", "+", region.start-1, region.stop, rgb) mkString("\\t")
} foreach { writer.println(_) }
}
def makeBedTrack(fileName: String, name: String, options: String = "") = {
val file = Pilon.outputFile(fileName)
println ("Creating " + name + " track in file " + file.getPath())
val writer = new PrintWriter(file)
var headLine = "track description=\\"Issues found by Pilon\\" name=\\"" + name + "\\""
if (options != "") headLine += " " + options
writer.println(headLine)
for ((cName, regions) <- reference.regions) {
regions foreach { r: GenomeRegion =>
regionsToBed(r.unConfirmedRegions, "?", writer, "0,128,128")
regionsToBed(r.changeRegions, "X", writer, "255,0,0")
regionsToBed(r.lowCoverageRegions, "LowCov", writer, "128,0,128")
regionsToBed(r.possibleCollapsedRepeats, "Copy#", writer, "128,128,0")
regionsToBed(r.duplicationEvents, "Duplication", writer, "255,128,0")
regionsToBed(r.gaps, "Gap", writer, "0,0,0")
regionsToBed(r.possibleBreaks, "Break", writer, "255,0,255")
for ((region, outcome) <- r.reassemblyFixes) {
regionsToBed(List(region), outcome, writer, "0,128,0")
}
}
}
writer.close()
}
}
|
B-UMMI/INNUca
|
src/pilon_v1.23/pilon/src/main/scala/org/broadinstitute/pilon/Tracks.scala
|
Scala
|
gpl-3.0
| 7,490
|
/**
* Copyright (C) 2007 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.analysis
import org.orbeon.oxf.util.CollectionUtils._
import org.orbeon.oxf.xforms.analysis.model.{Instance, Model}
import org.orbeon.xforms.xbl.Scope
import scala.collection.mutable
// Part analysis: models and instances information
trait PartModelAnalysis extends TransientState {
self =>
def startScope: Scope
private[PartModelAnalysis] val modelsByScope = mutable.LinkedHashMap[Scope, mutable.Buffer[Model]]()
private[PartModelAnalysis] val modelsByPrefixedId = mutable.LinkedHashMap[String, Model]()
private[PartModelAnalysis] val modelByInstancePrefixedId = mutable.LinkedHashMap[String, Model]()
def iterateModels: Iterator[Model] =
for {
models <- modelsByScope.valuesIterator
model <- models.iterator
} yield
model
def getModel(prefixedId: String): Model =
modelsByPrefixedId.get(prefixedId).orNull
def getModelByInstancePrefixedId(prefixedId: String): Model =
modelByInstancePrefixedId.get(prefixedId).orNull
def getInstances(modelPrefixedId: String): Seq[Instance] =
modelsByPrefixedId.get(modelPrefixedId).toSeq flatMap (_.instances.values)
def defaultModel: Option[Model] =
getDefaultModelForScope(startScope)
def getDefaultModelForScope(scope: Scope): Option[Model] =
modelsByScope.get(scope) flatMap (_.headOption)
def getModelByScopeAndBind(scope: Scope, bindStaticId: String): Model =
modelsByScope.get(scope) flatMap
(_ find (_.bindsById.contains(bindStaticId))) orNull
def getModelsForScope(scope: Scope): collection.Seq[Model] =
modelsByScope.getOrElse(scope, Nil)
def findInstanceInScope(scope: Scope, instanceStaticId: String): Option[Instance] =
getModelsForScope(scope).iterator flatMap
(_.instances.iterator) collectFirst
{ case (`instanceStaticId`, instance) => instance }
// NOTE: This searches ancestor scopes as well.
def findInstancePrefixedId(startScope: Scope, instanceStaticId: String): Option[String] = {
val prefixedIdIt =
for {
scope <- Iterator.iterateOpt(startScope)(_.parent)
model <- getModelsForScope(scope)
if model.instances.contains(instanceStaticId)
} yield
scope.prefixedIdForStaticId(instanceStaticId)
prefixedIdIt.nextOption()
}
def indexModel(model: Model): Unit = {
val models = modelsByScope.getOrElseUpdate(model.scope, mutable.Buffer[Model]())
models += model
modelsByPrefixedId += model.prefixedId -> model
for (instance <- model.instances.values)
modelByInstancePrefixedId += instance.prefixedId -> model
}
def deindexModel(model: Model): Unit = {
modelsByScope.get(model.scope) foreach (_ -= model)
modelsByPrefixedId -= model.prefixedId
for (instance <- model.instances.values)
modelByInstancePrefixedId -= instance.prefixedId
}
override def freeTransientState(): Unit = {
super.freeTransientState()
for (model <- modelsByPrefixedId.values)
model.freeTransientState()
}
}
|
orbeon/orbeon-forms
|
xforms-analysis/shared/src/main/scala/org/orbeon/oxf/xforms/analysis/PartModelAnalysis.scala
|
Scala
|
lgpl-2.1
| 3,694
|
package org.http4s
package parser
import util.CaseInsensitiveString._
import org.specs2.mutable.Specification
class HeaderParserSpec extends Specification {
"Header parsing should catch errors" in {
val h2 = Header.Raw("Date".ci, "Fri, 06 Feb 0010 15:28:43 GMT") // Invalid year: must be >= 1800
h2.parsed must not (throwA[Exception])
}
}
|
m4dc4p/http4s
|
tests/src/test/scala/org/http4s/parser/HeaderParserSpec.scala
|
Scala
|
apache-2.0
| 357
|
package eu.ace_design.island.stdlib
import eu.ace_design.island.game._
import eu.ace_design.island.geom.Point
import eu.ace_design.island.map._
import eu.ace_design.island.stdlib.PointOfInterests.{Creek, EmergencySite}
import scala.util.Random
/**
* Standard element to be used as Point of interests
**/
object PointOfInterests {
/**
* A port is used to support the landing operation
* @param identifier the identifier of this port
*/
case class Creek(override val identifier: String,
override val location: Option[Point]) extends PointOfInterest {}
case class Hideout(override val identifier: String,
override val location: Option[Point]) extends PointOfInterest {}
case class EmergencySite(override val identifier: String,
override val location: Option[Point]) extends PointOfInterest {}
}
/**
* POIGenerators are function used in the GameBoardBuilder to introduce Point of Interest in a board
*/
object POIGenerators {
/**
* This class introduce ports in the board
* @param howMany number of ports to add
*/
class WithCreeks(howMany: Int) extends POIGenerator {
override def apply(rand: Random = new Random(), loc: TileLocator)(board: GameBoard): GameBoard = {
// find locations:
val coasts = board.m.findVerticesWith(Set(IsCoast())).toSeq
// instantiate ports
val ports: IndexedSeq[((Int, Int), Creek)] = (0 until howMany) map { i =>
val idx = rand.nextInt(coasts.size-1)
loc(coasts(idx)) -> Creek(UUIDGenerator(), Some(coasts(idx)))
}
// enrich the board
(board /: ports) { (acc, poi) => acc addPOI poi }
}
}
object WithEmergencySite extends POIGenerator {
// We generate one emergency site near the coast
override def apply(rand: Random = new Random(), loc: TileLocator)(board: GameBoard): GameBoard = {
val faceRef = emergencyFaceAsBeach(board, rand) match {
case None => emergencyFaceAsHighFace(board, rand) match {
case None => throw new POIException("Unable to find a face for the emergency rescue site")
case Some(ref) => ref
}
case Some(ref) => ref
}
val emergencyPoint = board.m.vertex(board.m.face(faceRef).center)
board addPOI (loc(emergencyPoint) -> EmergencySite(UUIDGenerator(),Some(emergencyPoint)))
}
private def emergencyFaceAsBeach(board: GameBoard, rand: Random): Option[Int] = {
(board.m.faceProps.restrictedTo(HasForBiome()) filter { case (_, b) => b == Biomes.BEACH }).toList match {
case Nil => None
case list => Some(list(rand.nextInt(list.size))._1)
}
}
private def emergencyFaceAsHighFace(board: GameBoard, rand: Random): Option[Int] = {
val dataset = board.m.faceProps.restrictedTo(HasForHeight())
val max = dataset.values.max
(dataset filter { case (_, h) => h <= max * 0.4 }).toList match {
case Nil => None
case list => Some(list(rand.nextInt(list.size))._1)
}
}
}
}
class POIException(message: String) extends Exception(message)
|
ace-design/island
|
engine/src/main/scala/eu/ace_design/island/stdlib/PointOfInterests.scala
|
Scala
|
lgpl-3.0
| 3,121
|
class A
trait Foo {
def foo: A ?=> Int
}
class Test {
new FooI{}
}
class FooI extends Foo {
def foo: A ?=> Int = 3
}
|
lampepfl/dotty
|
tests/pos/i4753.scala
|
Scala
|
apache-2.0
| 125
|
package org.scalatest
import Filter.IgnoreTag
/**
* Filter whose <code>apply</code> method determines which of the passed tests to run and ignore based on tags to include and exclude passed as
* as class parameters.
*
* <p>
* This class handles the <code>org.scalatest.Ignore</code> tag specially, in that its <code>apply</code> method indicates which
* tests should be ignored based on whether they are tagged with <code>org.scalatest.Ignore</code>. If
* <code>"org.scalatest.Ignore"</code> is not passed in the <code>tagsToExclude</code> set, it will be implicitly added. However, if the
* <code>tagsToInclude</code> option is defined, and the contained set does not include <code>"org.scalatest.Ignore"</code>, then only those tests
* that are both tagged with <code>org.scalatest.Ignore</code> and at least one of the tags in the <code>tagsToInclude</code> set
* will be included in the result of <code>apply</code> and marked as ignored (so long as the test is not also
* marked with a tag other than <code>org.scalatest.Ignore</code> that is a member of the <code>tagsToExclude</code>
* set. For example, if <code>SlowAsMolasses</code> is a member of the <code>tagsToInclude</code> set and a
* test is tagged with both <code>org.scalatest.Ignore</code> and <code>SlowAsMolasses</code>, and
* <code>SlowAsMolasses</code> appears in the <code>tagsToExclude</code> set, the
* <code>SlowAsMolasses</code> tag will "overpower" the <code>org.scalatest.Ignore</code> tag, and the
* test will be filtered out entirely rather than being ignored.
* </p>
*
* @param tagsToInclude an optional <code>Set</code> of <code>String</code> tag names to include (<em>i.e.</em>, not filter out) when filtering tests
* @param tagsToExclude a <code>Set</code> of <code>String</code> tag names to exclude (<em>i.e.</em>, filter out) when filtering tests
*
* @throws NullPointerException if either <code>tagsToInclude</code> or <code>tagsToExclude</code> are null
* @throws IllegalArgumentException if <code>tagsToInclude</code> is defined, but contains an empty set
*/
final class Filter(val tagsToInclude: Option[Set[String]], val tagsToExclude: Set[String], val includeNestedSuites: Boolean = true, val dynaTags: DynaTags = DynaTags(Map.empty, Map.empty)) extends Function2[Set[String], Map[String, Set[String]], List[(String, Boolean)]] {
if (tagsToInclude == null)
throw new NullPointerException("tagsToInclude was null")
if (tagsToExclude == null)
throw new NullPointerException("tagsToExclude was null")
tagsToInclude match {
case Some(tagsToInclude) =>
if (tagsToInclude.isEmpty)
throw new IllegalArgumentException("tagsToInclude was defined, but contained an empty set")
case None =>
}
private def includedTestNames(testNamesAsList: List[String], tags: Map[String, Set[String]]): List[String] =
tagsToInclude match {
case None => testNamesAsList
case Some(tagsToInclude) =>
for {
testName <- testNamesAsList
if tags contains testName
intersection = tagsToInclude intersect tags(testName)
if intersection.size > 0
} yield testName
}
private def verifyPreconditionsForMethods(testNames: Set[String], tags: Map[String, Set[String]]) {
val testWithEmptyTagSet = tags.find(tuple => tuple._2.isEmpty)
testWithEmptyTagSet match {
case Some((testName, _)) => throw new IllegalArgumentException(testName + " was associated with an empty set in the map passsed as tags")
case None =>
}
}
/**
* Filter test names based on their tags.
*
* <p>
* Each tuple in the returned list contains a <code>String</code>
* test name and a <code>Boolean</code> that indicates whether the test should be ignored. A test will be marked as ignored
* if <code>org.scalatest.Ignore</code> is in its tags set, and either <code>tagsToInclude</code> is <code>None</code>, or
* <code>tagsToInclude</code>'s value (a set) contains the test's name, unless another tag for that test besides <code>org.scalatest.Ignore</code>
* is also included in <code>tagsToExclude</code>. For example, if a test is tagged with
* both <code>org.scalatest.Ignore</code> and <code>SlowAsMolasses</code>, and <code>SlowAsMolasses</code>
* appears in the <code>tagsToExclude</code> set, the <code>SlowAsMolasses</code> tag will
* "overpower" the <code>org.scalatest.Ignore</code> tag, and this method will return
* a list that does not include the test name.
* </p>
*
* <pre class="stHighlight">
* for ((testName, ignoreTest) <- filter(testNames, tags))
* if (ignoreTest)
* // ignore the test
* else
* // execute the test
* </pre>
*
* @param testNames test names to be filtered
* @param tags a map from test name to tags, containing only test names included in the <code>testNames</code> set, and
* only test names that have at least one tag
*
* @throws IllegalArgumentException if any set contained in the passed <code>tags</code> map is empty
*/
@deprecated("Please use the apply method that takes a suiteId instead, the one with this signature: def apply(testNames: Set[String], testTags: Map[String, Set[String]], suiteId: String): List[(String, Boolean)]")
def apply(testNames: Set[String], tags: Map[String, Set[String]]): List[(String, Boolean)] = {
verifyPreconditionsForMethods(testNames, tags)
val testNamesAsList = testNames.toList // to preserve the order
val filtered =
for {
testName <- includedTestNames(testNamesAsList, tags)
if !tags.contains(testName) ||
(tags(testName).contains(IgnoreTag) && (tags(testName) intersect (tagsToExclude + "org.scalatest.Ignore")).size == 1) ||
(tags(testName) intersect tagsToExclude).size == 0
} yield (testName, tags.contains(testName) && tags(testName).contains(IgnoreTag))
filtered
}
def apply(testNames: Set[String], testTags: Map[String, Set[String]], suiteId: String): List[(String, Boolean)] = {
apply(testNames, testTags)
}
/**
* Filter one test name based on its tags.
*
* <p>
* The returned tuple contains a <code>Boolean</code>
* that indicates whether the test should be filtered, and if not, a <code>Boolean</code> that
* indicates whether the test should be ignored. A test will be marked as ignored
* if <code>org.scalatest.Ignore</code> is in its tags set, and either <code>tagsToInclude</code>
* is <code>None</code>, or <code>tagsToInclude</code>'s value (a set) contains the passed
* test name, unless another tag for that test besides <code>org.scalatest.Ignore</code>
* is also included in <code>tagsToExclude</code>. For example, if a test is tagged with
* both <code>org.scalatest.Ignore</code> and <code>SlowAsMolasses</code>, and <code>SlowAsMolasses</code>
* appears in the <code>tagsToExclude</code> set, the <code>SlowAsMolasses</code> tag will
* "overpower" the <code>org.scalatest.Ignore</code> tag, and this method will return
* (true, false).
* </p>
*
* <pre class="stHighlight">
* val (filterTest, ignoreTest) = filter(testName, tags)
* if (!filterTest)
* if (ignoreTest)
* // ignore the test
* else
* // execute the test
* </pre>
*
* @param testName the test name to be filtered
* @param tags a map from test name to tags, containing only test names that have at least one tag
*
* @throws IllegalArgumentException if any set contained in the passed <code>tags</code> map is empty
*/
@deprecated("Please use the apply method that takes a suiteId instead, the one with this signature: def apply(testName: String, testTags: Map[String, Set[String]], suiteId: String): (Boolean, Boolean)")
def apply(testName: String, tags: Map[String, Set[String]]): (Boolean, Boolean) = {
val list = apply(Set(testName), tags)
if (list.isEmpty)
(true, false)
else
(false, list.head._2)
}
def apply(testName: String, testTags: Map[String, Set[String]], suiteId: String): (Boolean, Boolean) = {
apply(testName, testTags)
}
/**
* Returns the number of tests that should be run after the passed <code>testNames</code> and <code>tags</code> have been filtered
* with the <code>tagsToInclude</code> and <code>tagsToExclude</code> class parameters.
*
* <p>
* The result of this method may be smaller than the number of
* elements in the list returned by <code>apply</code>, because the count returned by this method does not include ignored tests,
* and the list returned by <code>apply</code> does include ignored tests.
* </p>
*
* @param testNames test names to be filtered
* @param tags a map from test name to tags, containing only test names included in the <code>testNames</code> set, and
* only test names that have at least one tag
*
* @throws IllegalArgumentException if any set contained in the passed <code>tags</code> map is empty
*/
@deprecated("Please use the runnableTestCount method that takes a suiteId instead, the one with this signature: def runnableTestCount(testNames: Set[String], testTags: Map[String, Set[String]], suiteId: String): Int")
def runnableTestCount(testNames: Set[String], tags: Map[String, Set[String]]): Int = {
verifyPreconditionsForMethods(testNames, tags)
val testNamesAsList = testNames.toList // to preserve the order
val runnableTests =
for {
testName <- includedTestNames(testNamesAsList, tags)
if !tags.contains(testName) || (!tags(testName).contains(IgnoreTag) && (tags(testName) intersect tagsToExclude).size == 0)
} yield testName
runnableTests.size
}
def runnableTestCount(testNames: Set[String], testTags: Map[String, Set[String]], suiteId: String): Int = {
runnableTestCount(testNames, testTags)
}
// pair._1 is filterSuite and pair._1 is ignoreSuite
/**
* <pre class="stHighlight">
* val (filterSuite, ignoreSuite) = filter(suite)
* if (!filterSuite)
* if (ignoreSuite)
* // ignore the Suite
* fireSuiteIgnored(suite, ...
* else
* // execute the Suite
* suite.run(...
* </pre>
*/
def apply(suite: Suite): (Boolean, Boolean) = {
(false, false)
}
// The boolean is ignoreSuite
/*
* <pre class="stHighlight">
* for ((suite, ignoreSuite) <- filter(nestedSuites))
* if (ignoreSuite)
* // ignore the Suite
* fireSuiteIgnored(...
* else
* // execute the Suite
* suite.run(...
* </pre>
*/
def apply(suites: List[Suite]): List[(Suite, Boolean)] = {
List.empty
}
}
object Filter {
private final val IgnoreTag = "org.scalatest.Ignore"
/**
* Factory method for a <code>Filter</code> initialized with the passed <code>tagsToInclude</code>
* and <code>tagsToExclude</code>.
*
* @param tagsToInclude an optional <code>Set</code> of <code>String</code> tag names to include (<em>i.e.</em>, not filter out) when filtering tests
* @param tagsToExclude a <code>Set</code> of <code>String</code> tag names to exclude (<em>i.e.</em>, filter out) when filtering tests
*
* @throws NullPointerException if either <code>tagsToInclude</code> or <code>tagsToExclude</code> are null
* @throws IllegalArgumentException if <code>tagsToInclude</code> is defined, but contains an empty set
*/
def apply(tagsToInclude: Option[Set[String]], tagsToExclude: Set[String]) =
new Filter(tagsToInclude, tagsToExclude)
/**
* Factory method for a <code>Filter</code> initialized with <code>None</code> for <code>tagsToInclude</code>
* and an empty set for <code>tagsToExclude</code>.
*
* @param tagsToInclude an optional <code>Set</code> of <code>String</code> tag names to include (<em>i.e.</em>, not filter out) when filtering tests
* @param tagsToExclude a <code>Set</code> of <code>String</code> tag names to exclude (<em>i.e.</em>, filter out) when filtering tests
*
* @throws NullPointerException if either <code>tagsToInclude</code> or <code>tagsToExclude</code> are null
* @throws IllegalArgumentException if <code>tagsToInclude</code> is defined, but contains an empty set
*/
def apply() =
new Filter(None, Set("org.scalatest.Ignore"))
}
|
epishkin/scalatest-google-code
|
src/main/scala/org/scalatest/Filter.scala
|
Scala
|
apache-2.0
| 12,250
|
/*
* Licensed to Intel Corporation under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Intel Corporation licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.models.rnn
import com.intel.analytics.bigdl.dataset.{DataSet, LocalDataSet, MiniBatch, SampleToBatch}
import com.intel.analytics.bigdl.dataset.text.{LabeledSentence, LabeledSentenceToSample}
import com.intel.analytics.bigdl.nn.{LogSoftMax, Module}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.Engine
import org.apache.log4j.{Level, Logger}
import scala.util.Random
object Test {
Logger.getLogger("org").setLevel(Level.ERROR)
Logger.getLogger("akka").setLevel(Level.ERROR)
Logger.getLogger("breeze").setLevel(Level.ERROR)
Logger.getLogger("com.intel.analytics.bigdl.optim").setLevel(Level.INFO)
import Utils._
val logger = Logger.getLogger(getClass)
def main(args: Array[String]): Unit = {
testParser.parse(args, new TestParams()).map(param => {
val vocab = new Dictionary(param.folder)
val model = Module.load[Float](param.modelSnapshot.get)
Engine.setCoreNumber(param.coreNumber)
val logSoftMax = LogSoftMax[Float]()
val lines = readSentence(param.folder)
val input = lines.map(x =>
x.map(t => vocab.getIndex(t).toFloat))
var labeledInput = input.map(x =>
new LabeledSentence[Float](x, x))
val batchSize = 1
var index = 0
while (index < param.numOfWords.getOrElse(0)) {
index += 1
val validationSet = DataSet.array(labeledInput)
.transform(LabeledSentenceToSample(vocab.length + 1))
.transform(SampleToBatch(batchSize = batchSize))
.asInstanceOf[LocalDataSet[MiniBatch[Float]]]
val dataIter = validationSet.data(train = false)
val predict = dataIter.map(batch => {
require(batch.data.size(1) == 1, "predict sentence one by one")
val output = model.forward(batch.data)
.asInstanceOf[Tensor[Float]]
val predictProbDist = logSoftMax.forward(output(output.size(1)))
.storage().map(x => math.exp(x).toFloat).toArray
.map {
var s = 0.0f; d => {
s += d; s
}
}
.filter(_ < Random.nextFloat())
(predictProbDist.length - 1).toFloat
}).toArray
labeledInput = (labeledInput zip predict).map(x => {
val addedInput = x._1.asInstanceOf[LabeledSentence[Float]]
.data() ++ Array(x._2)
new LabeledSentence[Float](addedInput, addedInput)
})
}
val results = labeledInput.map(x => x.data()
.map(t => vocab.getWord(t)))
results.foreach(x =>
logger.info(x.mkString(",")))
})
}
}
|
SeaOfOcean/BigDL
|
dl/src/main/scala/com/intel/analytics/bigdl/models/rnn/Test.scala
|
Scala
|
apache-2.0
| 3,432
|
package io.tabmo.aeroless
trait AsEncoder[A] {
self =>
def encode(a: A): AsValue
def contramap[B](f: B => A): AsEncoder[B] = new AsEncoder[B] {
override def encode(b: B): AsValue = self.encode(f(b))
}
}
object AsEncoder {
import shapeless._
import shapeless.labelled._
import shapeless.ops.hlist.IsHCons
def instance[A](f: A => AsValue) = new AsEncoder[A] {
override def encode(a: A): AsValue = f(a)
}
implicit val longEncoder: AsEncoder[Long] = instance(AsLong)
implicit val stringEncoder: AsEncoder[String] = instance(AsString)
implicit def mapEncoder[V](implicit evV: AsEncoder[V]): AsEncoder[Map[String, V]] = instance(kv => AsObject(kv.mapValues(evV.encode)))
implicit def listEncoder[A](implicit ev: AsEncoder[A]): AsEncoder[List[A]] = instance { list =>
AsArray(list.map(ev.encode).toIndexedSeq)
}
implicit def optionEncoder[A](implicit ev: AsEncoder[A]): AsEncoder[Option[A]] = instance {
case Some(a) => ev.encode(a)
case None => AsNull
}
implicit val hnilEncoder: AsEncoder[HNil] = instance(_ => AsObject())
implicit def hlistEncoder[K <: Symbol, H, T <: shapeless.HList](
implicit witness: Witness.Aux[K],
isHCons: IsHCons.Aux[H :: T, H, T],
hEncoder: Lazy[AsEncoder[H]],
tEncoder: Lazy[AsEncoder[T]]
): AsEncoder[FieldType[K, H] :: T] = instance { o =>
val head = AsObject(witness.value.name, hEncoder.value.encode(isHCons.head(o)))
val tail = tEncoder.value.encode(isHCons.tail(o))
head ++ tail.asInstanceOf[AsObject]
}
implicit def objectEncoder[A, Repr <: HList](
implicit gen: LabelledGeneric.Aux[A, Repr],
hlistEncoder: AsEncoder[Repr]
): AsEncoder[A] = instance { o =>
hlistEncoder.encode(gen.to(o))
}
def apply[A](implicit ev: AsEncoder[A]): AsEncoder[A] = ev
}
|
tabmo/Aeroless
|
src/main/scala/io/tabmo/aeroless/AsEncoder.scala
|
Scala
|
mit
| 1,803
|
/*
* Copyright 2017-2018 Azad Bolour
* Licensed under GNU Affero General Public License v3.0 -
* https://github.com/azadbolour/boardgame/blob/master/LICENSE.md
*/
package com.bolour.boardgame.scala.server.domain
import com.bolour.boardgame.scala.common.domain.PlayPiece
import com.bolour.plane.scala.domain.Point
import org.slf4j.LoggerFactory
/**
* Scores plays.
*
* @param pointValues 2D list of values of the board's points.
*/
class Scorer(pointValues: List[List[Int]]) {
val logger = LoggerFactory.getLogger(this.getClass)
// val multiplierGrid: Grid[ScoreMultiplier] = mkMultiplierGrid(dimension)
/**
* Score a play.
*
* @param playPieces Consecutive list of all play pieces for a play.
* Includes both moved and existing pieces forming the play word.
* @return The score of the play.
*/
def scorePlay(playPieces: List[PlayPiece]): Int = scoreWord(playPieces)
// playPieces map { pp => (pp.piece.value, pp.point, pp.moved)
/**
* Score an individual word.
*
* @param playPieces Consecutive list of all play pieces for a play.
* Includes both moved and existing pieces forming the play word.
* @return The score of the word.
*/
def scoreWord(playPieces: List[PlayPiece]): Int = {
val movedPoints = playPieces filter { _.moved } map {_.point}
def value: Point => Int = {case Point(row, col) => pointValues(row)(col)}
val score = movedPoints.foldLeft(0)((total, point) => total + value(point))
score
}
}
object Scorer {
type Score = Int
def apply(dimension: Int, trayCapacity: Int, pointValues: List[List[Int]]): Scorer =
new Scorer(pointValues)
}
|
azadbolour/boardgame
|
scala-server/app/com/bolour/boardgame/scala/server/domain/Scorer.scala
|
Scala
|
agpl-3.0
| 1,698
|
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.ctrl.utils
import cmwell.ctrl.checkers._
import cmwell.ctrl.config.Config
import cmwell.ctrl.hc._
import com.typesafe.scalalogging.LazyLogging
import org.slf4j.LoggerFactory
import play.api.libs.json.{JsString, Json}
/**
* Created by michael on 2/24/15.
*/
trait AlertReporter extends LazyLogging {
private[this] var shortNames = Map.empty[String, String]
private[this] var hostIps = Map.empty[String, Vector[String]]
def updateShortName(ip: String, sn: String) = {
val currentVal = shortNames.get(ip)
currentVal match {
case Some(v) => if (v != sn) shortNames = shortNames.updated(ip, sn)
case None => shortNames = shortNames.updated(ip, sn)
}
}
def updateIps(host: String, ips: Vector[String]) = {
hostIps = hostIps.updated(host, ips)
}
def getHostNameFromIp(ip: String): Option[String] = {
hostIps.find(_._2.contains(ip)).map(_._1)
}
val alertsLogger = LoggerFactory.getLogger("alerts")
object AlertColor {
def fromStatusColor(sc: StatusColor) = {
sc match {
case GreenStatus => GREEN
case YellowStatus => YELLOW
case RedStatus => RED
}
}
}
trait AlertColor {
def severity: String
}
case object GREEN extends AlertColor {
override def severity: String = "CLEAR"
}
case object YELLOW extends AlertColor {
override def severity: String = "MINOR"
}
case object RED extends AlertColor {
override def severity: String = "CRITICAL"
}
// def alert(msg : String, color : AlertColor = GREEN, host : Option[String] = None, eventKey : Option[String] = None, node : Option[String] = None) {
// logger.info(s"""CM-WELL-ALERT alert_status=$color severity=${color.severity} alert_message='$msg' EventSource=ctrl${if(host.isDefined)
// s" alert_host=${host.get}" else ""}${if(eventKey.isDefined) s" EventKey=${eventKey.get}" else ""}${if(node.isDefined) s" Node=${node.get}" else ""}""")
// }
case class Alert(msg: String,
color: AlertColor = GREEN,
host: Option[String] = None,
eventKey: Option[String] = None,
node: Option[String] = None) {
def json: String = {
val fields = Map(
"message" -> msg,
"host" -> host.flatMap(shortNames.get(_)).getOrElse(""),
"group" -> "CM-WELL",
"key" -> eventKey.getOrElse(""),
"severity" -> color.severity,
"status" -> color.toString,
"node" -> node.getOrElse("")
).filter(_._2.nonEmpty)
Json
.obj(
"event" -> JsString("trams.alert"),
"environment" -> JsString(Config.clusterName),
"trams" -> Json.obj(
"alert" -> Json.toJson(fields)
)
)
.toString
}
}
def alert(msg: String,
color: AlertColor = GREEN,
host: Option[String] = None,
eventKey: Option[String] = None,
node: Option[String] = None) {
val alertObj = Alert(msg, color, host, eventKey, node)
logger.info(
s"""CM-WELL-ALERT alert_status=$color severity=${color.severity} alert_message='$msg' EventSource=ctrl${if (host.isDefined)
s" alert_host=${host.get}"
else ""}${if (eventKey.isDefined) s" EventKey=${eventKey.get}" else ""}${if (node.isDefined) s" Node=${node.get}"
else ""}"""
)
alertsLogger.info(alertObj.json)
}
def alert(clusterEvent: ClusterEvent) {
clusterEvent match {
case NodesJoinedDetected(nodes) => alert(s"The nodes ${nodes.mkString(",")} joined the cluster.", GREEN)
case DownNodesDetected(nodes) => alert(s"The nodes ${nodes.mkString(",")} are down.", RED)
case EndOfGraceTime => alert("End of grace time.", YELLOW)
}
}
def alert(host: String, joinResponse: JoinResponse) {
joinResponse match {
case JoinOk => alert(s"$host was trying to join and was accepted.", GREEN, Some(host))
case JoinBootComponents =>
alert(
s"$host was trying to return to the cluster and was accepted. It will start cm-well components on this machine.",
GREEN,
Some(host)
)
case JoinShutdown =>
alert(s"$host was trying to return to the cluster but was rejected because to much time passed.",
YELLOW,
Some(host))
}
}
def alert(ce: ComponentEvent, hostName: Option[String]) {
ce match {
case WebNormalEvent(ip) => alert(s"Web is normal at $ip", GREEN, Some(ip), Some("Web"), hostName)
case WebBadCodeEvent(ip, code) =>
alert(s"Web is returning code $code at $ip", YELLOW, Some(ip), Some("Web"), hostName)
case WebDownEvent(ip) => alert(s"Web is down at $ip", RED, Some(ip), Some("Web"), hostName)
case BgOkEvent(ip) => alert(s"Bg is normal at $ip", GREEN, Some(ip), Some("Bg"), hostName)
case BgNotOkEvent(ip) => alert(s"Bg is not ok (stuck?) at $ip", RED, Some(ip), Some("Bg"), hostName)
case CassandraNodeNormalEvent(ip) =>
alert(s"Cassandra is normal at $ip", GREEN, getHostNameFromIp(ip), Some("Cassandra"), hostName)
case CassandraNodeDownEvent(ip) =>
alert(s"Cassandra is down at $ip", YELLOW, getHostNameFromIp(ip), Some("Cassandra"), hostName)
case CassandraNodeUnavailable(ip) =>
alert(s"Cassandra is unavailable at $ip", YELLOW, getHostNameFromIp(ip), Some("Cassandra"), hostName)
case ElasticsearchNodeGreenEvent(ip) =>
alert(s"Elasticsearch is green at $ip", GREEN, Some(ip), Some("Elasticsearch"), hostName)
case ElasticsearchNodeYellowEvent(ip) =>
alert(s"Elasticsearch is yellow at $ip", YELLOW, Some(ip), Some("Elasticsearch"), hostName)
case ElasticsearchNodeRedEvent(ip) =>
alert(s"Elasticsearch is red at $ip", RED, Some(ip), Some("Elasticsearch"), hostName)
case ElasticsearchNodeBadCodeEvent(ip, code) =>
alert(s"Elasticsearch is returning code $code at $ip", YELLOW, Some(ip), Some("Elasticsearch"), hostName)
case ElasticsearchNodeDownEvent(ip) =>
alert(s"Elasticsearch is down at $ip", RED, Some(ip), Some("Elasticsearch"), hostName)
case DcNormalEvent(dc, checker) =>
alert(s"Syncing data from data center $dc.", GREEN, Some(checker), Some("DataCenter"))
case DcNotSyncingEvent(dc, checker) =>
alert(s"Couldn't sync from data center $dc.", YELLOW, Some(checker), Some("DataCenter"))
case DcLostConnectionEvent(dc, checker) =>
alert(s"Couldn't sync from data center $dc for very long time.", RED, Some(checker), Some("DataCenter"))
case DcCouldNotGetDcEvent(dc, checker) =>
alert(s"Couldn't get meta data from the data center $dc.", YELLOW, Some(checker), Some("DataCenter"))
case DcCouldNotGetDcLongEvent(dc, checker) =>
alert(s"Couldn't get meta data from the data center $dc for long time.", RED, Some(checker), Some("DataCenter"))
case DcCouldNotGetDataEvent(checker) =>
alert(s"Couldn't get local data center meta data.", RED, Some(checker), Some("DataCenter"))
case ComponentChangedColorEvent(component, color) =>
alert(s"$component changed to $color", AlertColor.fromStatusColor(color), eventKey = Some(component))
case _ => //
}
}
}
|
hochgi/CM-Well
|
server/cmwell-controller/src/main/scala/cmwell/ctrl/utils/AlertReporter.scala
|
Scala
|
apache-2.0
| 7,896
|
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.race
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
/**
* Created by adarr on 3/17/2017.
*/
class Race$Test extends AnyFunSpec with Matchers {
describe("Race$Test") {
it("should determine Family") {
val elf = Race.HalfElf
val family = List(RaceFamily.Elven, RaceFamily.Human)
elf.families should contain atLeastOneElementOf family
}
}
}
|
adarro/ddo-calc
|
subprojects/common/ddo-core/src/test/scala/io/truthencode/ddo/model/race/Race$Test.scala
|
Scala
|
apache-2.0
| 1,127
|
package effechecka
import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, SourceShape}
import akka.stream.scaladsl.{Concat, Flow, GraphDSL, Sink, Source}
import akka.util.ByteString
import com.typesafe.config.Config
import io.eels.Row
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import scala.concurrent.{Await, ExecutionContext}
import scala.concurrent.duration._
trait ChecklistFetcherHDFS
extends ChecklistFetcher
with ParquetReaderIterator
with HDFSUtil {
implicit def config: Config
protected implicit val configHadoop: Configuration
protected implicit val fs: FileSystem
implicit val materializer: ActorMaterializer
def tsvFor(checklist: ChecklistRequest): Source[ByteString, NotUsed] = {
val checklistGraph = GraphDSL.create(toSourceShape(checklist = checklist)) { implicit builder =>
(checklist) =>
import GraphDSL.Implicits._
val toByteString = Flow[ChecklistItem]
.map(item => {
val taxonName = taxonNameFor(item)
ByteString(s"\\n$taxonName\\t${item.taxon}\\t${item.recordcount}")
})
val out = builder.add(toByteString)
checklist ~> out
SourceShape(out.out)
}
val occurrenceSource = Source.fromGraph(checklistGraph)
val header = Source.single[ByteString](ByteString(Seq("taxonName", "taxonPath", "recordCount").mkString("\\t")))
Source.combine(header, occurrenceSource)(Concat[ByteString])
}
protected def taxonNameFor(item: ChecklistItem): String = {
item.taxon.split("""\\|""").filter(_.trim.nonEmpty).reverse.headOption.getOrElse("")
}
private def sourceForItems(checklist: ChecklistRequest): Source[ChecklistItem, NotUsed] = {
Source.fromGraph(toSourceShape(checklist))
}
private def toSourceShape(checklist: ChecklistRequest) = {
GraphDSL.create(new ParquetReaderSourceShape(checklistPath(checklist, "checklist/", ""), checklist.limit)) { implicit builder =>
(checklist) =>
import GraphDSL.Implicits._
val toItems = Flow[Row]
.map(row => ChecklistItem(row.get("taxonPath").toString, Integer.parseInt(row.get("recordCount").toString)))
val out = builder.add(toItems)
checklist ~> out
SourceShape(out.out)
}
}
def itemsFor(checklist: ChecklistRequest): Iterator[ChecklistItem] = {
val runWith = sourceForItems(checklist).runWith(Sink.seq)
Await.result(runWith, 30.second).iterator
}
private def checklistExists(checklist: ChecklistRequest) = {
val runWith = sourceForItems(checklist).runWith(Sink.seq)
Await.result(runWith, 30.second).iterator.nonEmpty
}
private def checklistPath(checklist: ChecklistRequest, prefix: String, suffix: String) = {
patternFor(prefix + pathForChecklist(checklist.selector) + suffix)
}
def pathForChecklist(occurrenceSelector: Selector): String = {
pathForSelector(occurrenceSelector)
}
def statusOf(checklist: ChecklistRequest): Option[String] = {
if (checklistExists(checklist)) Some("ready") else None
}
}
|
jhpoelen/effechecka
|
src/main/scala/effechecka/ChecklistFetcherHDFS.scala
|
Scala
|
mit
| 3,102
|
package com.twitter.finagle.loadbalancer
import com.twitter.util.{Future, Time}
import com.twitter.finagle.{Service, ServiceFactory}
import com.twitter.finagle.stats.NullStatsReceiver
object Benchmark {
// todo: simulate distributions of loads.
private[this] val N = 1<<19
private[this] val W = 1 //1000
private[this] val F = 500
private[this] val loads = new Array[Int](F)
private[this] val service = new Service[Int, Int] {
def apply(req: Int) = null
}
private[this] class LoadedServiceFactory(i: Int) extends ServiceFactory[Int, Int] {
def make() = { loads(i) += 1; Future.value(service) }
def close = ()
}
private[this] val factories = 0 until F map { i => new LoadedServiceFactory(i) }
def reset() {
0 until loads.size foreach { i => loads(i) = 0 }
}
def bench(factory: ServiceFactory[_, _]) = Time.measure {
val outstanding = new Array[Service[_, _]](W)
0 until N foreach { i =>
val j = i % W
// todo: jitter in release. pick one at random.
if (outstanding(j) ne null) outstanding(j).release()
outstanding(j) = factory.make()()
}
}
def go(factory: ServiceFactory[_, _], name: String) = {
printf("warming up %s..\n", name)
bench(factory)
reset()
printf("measuring %s..\n", name)
val elapsed = bench(factory)
printf("%dms - %d ops/sec\n",
elapsed.inMilliseconds, 1000 * N/elapsed.inMilliseconds)
val histo = loads.sorted.foldLeft(Nil: List[(Int, Int)]) {
case ((load, count) :: rest, thisLoad) if load == thisLoad =>
(load, count + 1) :: rest
case (rest, thisLoad) =>
(thisLoad, 1) :: rest
}
printf("loads: %s\n", histo mkString " ")
}
def main(args: Array[String]) {
val leastQueued = new LoadBalancedFactory(
factories, NullStatsReceiver, new LeastQueuedStrategy)
val heap = new HeapBalancer(factories)
go(leastQueued, "LeastQueued")
go(heap, "Heap")
}
}
|
enachb/finagle_2.9_durgh
|
finagle-core/src/main/scala/com/twitter/finagle/loadbalancer/Benchmark.scala
|
Scala
|
apache-2.0
| 1,952
|
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.spark
import java.{util => ju}
import java.util.{Map => JMap}
import com.typesafe.scalalogging.LazyLogging
import com.vividsolutions.jts.geom.{Coordinate, GeometryFactory, Point}
import org.apache.spark.sql.{DataFrame, SQLContext, SparkSession}
import org.geotools.data.{DataStore, DataStoreFinder}
import org.geotools.geometry.jts.JTSFactoryFinder
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.interop.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import org.apache.spark.sql.SQLTypes
import org.apache.spark.sql.catalyst.plans.logical.Filter
import org.apache.spark.sql.execution.datasources.LogicalRelation
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class SparkSQLDataTest extends Specification with LazyLogging {
val createPoint = JTSFactoryFinder.getGeometryFactory.createPoint(_: Coordinate)
"sql data tests" should {
sequential
val dsParams: JMap[String, String] = Map("cqengine" -> "true", "geotools" -> "true")
var ds: DataStore = null
var spark: SparkSession = null
var sc: SQLContext = null
var df: DataFrame = null
var dfIndexed: DataFrame = null
var dfPartitioned: DataFrame = null
// before
step {
ds = DataStoreFinder.getDataStore(dsParams)
spark = SparkSQLTestUtils.createSparkSession()
sc = spark.sqlContext
SQLTypes.init(sc)
}
"ingest chicago" >> {
SparkSQLTestUtils.ingestChicago(ds)
df = spark.read
.format("geomesa")
.options(dsParams)
.option("geomesa.feature", "chicago")
.load()
logger.info(df.schema.treeString)
df.createOrReplaceTempView("chicago")
df.collect.length mustEqual 3
}
"create indexed relation" >> {
dfIndexed = spark.read
.format("geomesa")
.options(dsParams)
.option("geomesa.feature", "chicago")
.option("cache", "true")
.load()
logger.info(df.schema.treeString)
dfIndexed.createOrReplaceTempView("chicagoIndexed")
dfIndexed.collect.length mustEqual 3
}
"create spatially partitioned relation" >> {
dfPartitioned = spark.read
.format("geomesa")
.options(dsParams)
.option("geomesa.feature", "chicago")
.option("cache", "true")
.option("spatial","true")
.option("strategy", "RTREE")
.load()
logger.info(df.schema.treeString)
dfPartitioned.createOrReplaceTempView("chicagoPartitioned")
// Filter if features belonged to multiple partition envelopes
// TODO: Better way
val hashSet = new ju.HashSet[String]()
dfPartitioned.collect.foreach{ row =>
hashSet.add(row.getAs[String]("__fid__"))
}
hashSet.size() mustEqual 3
}
"handle projections on in-memory store" >> {
val r = sc.sql("select geom from chicagoIndexed where case_number = 1")
val d = r.collect
d.length mustEqual 1
val row = d(0)
row.schema.fieldNames.length mustEqual 1
row.fieldIndex("geom") mustEqual 0
}
"basic sql indexed" >> {
val r = sc.sql("select * from chicagoIndexed where st_equals(geom, st_geomFromWKT('POINT(-76.5 38.5)'))")
val d = r.collect
d.length mustEqual 1
d.head.getAs[Point]("geom") mustEqual createPoint(new Coordinate(-76.5, 38.5))
}
"basic sql partitioned" >> {
sc.sql("select * from chicagoPartitioned").show()
val r = sc.sql("select * from chicagoPartitioned where st_equals(geom, st_geomFromWKT('POINT(-77 38)'))")
val d = r.collect
d.length mustEqual 1
d.head.getAs[Point]("geom") mustEqual createPoint(new Coordinate(-77, 38))
}
"basic sql 1" >> {
val r = sc.sql("select * from chicago where st_equals(geom, st_geomFromWKT('POINT(-76.5 38.5)'))")
val d = r.collect
d.length mustEqual 1
d.head.getAs[Point]("geom") mustEqual createPoint(new Coordinate(-76.5, 38.5))
}
"basic sql 4" >> {
val r = sc.sql("select 1 + 1 > 4")
val d = r.collect
d.length mustEqual 1
}
"basic sql 5" >> {
val r = sc.sql("select * from chicago where case_number = 1 and st_intersects(geom, st_makeBox2d(st_point(-77, 38), st_point(-76, 39)))")
val d = r.collect
d.length mustEqual 1
}
"basic sql 6" >> {
val r = sc.sql("select st_intersects(st_makeBox2d(st_point(-77, 38), st_point(-76, 39)), st_makeBox2d(st_point(-77, 38), st_point(-76, 39)))")
val d = r.collect
d.length mustEqual 1
}
"pushdown spatial predicates" >> {
val pushdown = sc.sql("select geom from chicago where st_intersects(st_makeBox2d(st_point(-77, 38), st_point(-76, 39)), geom)")
val pushdownPlan = pushdown.queryExecution.optimizedPlan
val pushdownDF = df.where("st_intersects(st_makeBox2D(st_point(-77, 38), st_point(-76, 39)), geom)")
val pushdownDFPlan = pushdownDF.queryExecution.optimizedPlan
val noPushdown = sc.sql("select geom from chicago where __fid__ = 1")
val noPushdownPlan = noPushdown.queryExecution.optimizedPlan
pushdownPlan.children.head.isInstanceOf[LogicalRelation] mustEqual true // filter is pushed down
pushdownDFPlan.isInstanceOf[LogicalRelation] mustEqual true // filter is pushed down
noPushdownPlan.children.head.isInstanceOf[Filter] mustEqual true // filter remains at top level
}
"pushdown attribute filters" >> {
val pushdown = sc.sql("select geom from chicago where case_number = 1")
val pushdownPlan = pushdown.queryExecution.optimizedPlan
val pushdownDF = df.where("case_number = 1")
val pushdownDFPlan = pushdownDF.queryExecution.optimizedPlan
val noPushdown = sc.sql("select geom from chicago where __fid__ = 1")
val noPushdownPlan = noPushdown.queryExecution.optimizedPlan
pushdownPlan.children.head.isInstanceOf[LogicalRelation] mustEqual true // filter is pushed down
pushdownDFPlan.isInstanceOf[LogicalRelation] mustEqual true // filter is pushed down
noPushdownPlan.children.head.isInstanceOf[Filter] mustEqual true // filter remains at top level
}
"pushdown attribute comparison filters" >> {
val pushdownLt = sc.sql("select case_number from chicago where case_number < 2")
val pushdownLte = sc.sql("select case_number from chicago where case_number <= 2")
val pushdownGt = sc.sql("select case_number from chicago where case_number > 2")
val pushdownGte = sc.sql("select case_number from chicago where case_number >= 2")
// ensure all 4 were pushed down
val queries = Seq(pushdownLt, pushdownLte, pushdownGt, pushdownGte)
val plans = queries.map{ q => q.queryExecution.optimizedPlan.children.head.getClass }.toArray
plans mustEqual Array.fill(4)(classOf[LogicalRelation])
// ensure correct results
pushdownLt.first().get(0) mustEqual 1
pushdownLte.collect().map{ r=> r.get(0) } mustEqual Array(1, 2)
pushdownGt.first().get(0) mustEqual 3
pushdownGte.collect().map{ r=> r.get(0) } mustEqual Array(2, 3)
}
"st_translate" >> {
"null" >> {
sc.sql("select st_translate(null, null, null)").collect.head(0) must beNull
}
"point" >> {
val r = sc.sql(
"""
|select st_translate(st_geomFromWKT('POINT(0 0)'), 5, 12)
""".stripMargin)
r.collect().head.getAs[Point](0) mustEqual WKTUtils.read("POINT(5 12)")
}
}
"where __fid__ equals" >> {
val r = sc.sql("select * from chicago where __fid__ = '1'")
val d = r.collect()
d.length mustEqual 1
d.head.getAs[Int]("case_number") mustEqual 1
}
"where attr equals" >> {
val r = sc.sql("select * from chicago where case_number = 2")
val d = r.collect()
d.length mustEqual 1
d.head.getAs[Int]("case_number") mustEqual 2
}
"where __fid__ in" >> {
val r = sc.sql("select * from chicago where __fid__ in ('1', '2')")
val d = r.collect()
d.length mustEqual 2
d.map(_.getAs[Int]("case_number")).toSeq must containTheSameElementsAs(Seq(1, 2))
}
"where attr in" >> {
val r = sc.sql("select * from chicago where case_number in (2, 3)")
val d = r.collect()
d.length mustEqual 2
d.map(_.getAs[Int]("case_number")).toSeq must containTheSameElementsAs(Seq(2, 3))
}
"sweepline join" >> {
val gf = new GeometryFactory
val points = SparkSQLTestUtils.generatePoints(gf, 1000)
SparkSQLTestUtils.ingestPoints(ds, "points", points)
val polys = SparkSQLTestUtils.generatePolys(gf, 1000)
SparkSQLTestUtils.ingestGeometries(ds, "polys", polys)
val polysDf = spark.read
.format("geomesa")
.options(dsParams)
.option("geomesa.feature", "polys")
.load()
val pointsDf = spark.read
.format("geomesa")
.options(dsParams)
.option("geomesa.feature", "points")
.load()
val partitionedPolys = spark.read
.format("geomesa")
.options(dsParams)
.option("geomesa.feature", "polys")
.option("spatial","true")
.option("strategy", "EARTH")
.option("partitions","10")
.load()
val partitionedPoints = spark.read
.format("geomesa")
.options(dsParams)
.option("geomesa.feature", "points")
.option("spatial","true")
.option("strategy", "EARTH")
.option("partitions","10")
.load()
partitionedPolys.createOrReplaceTempView("polysSpatial")
partitionedPoints.createOrReplaceTempView("pointsSpatial")
pointsDf.createOrReplaceTempView("points")
polysDf.createOrReplaceTempView("polys")
var now = System.currentTimeMillis()
val r1 = spark.sql("select * from polys join points on st_intersects(points.geom, polys.geom)")
val count = r1.count()
logger.info(s"Regular join took ${System.currentTimeMillis() - now}ms")
now = System.currentTimeMillis()
val r2 = spark.sql("select * from polysSpatial join pointsSpatial on st_intersects(pointsSpatial.geom, polysSpatial.geom)")
val sweeplineCount = r2.count()
logger.info(s"Sweepline join took ${System.currentTimeMillis() - now}ms")
sweeplineCount mustEqual count
}
// after
step {
ds.dispose()
spark.stop()
}
}
}
|
jahhulbert-ccri/geomesa
|
geomesa-spark/geomesa-spark-sql/src/test/scala/org/locationtech/geomesa/spark/SparkSQLDataTest.scala
|
Scala
|
apache-2.0
| 10,940
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.