code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package org.jetbrains.plugins.scala
package lang.refactoring.memberPullUp
import com.intellij.openapi.project.Project
import com.intellij.psi.codeStyle.CodeStyleManager
import com.intellij.psi.{PsiDocumentManager, PsiElement}
import com.intellij.refactoring.{BaseRefactoringProcessor, RefactoringBundle}
import com.intellij.usageView.{UsageInfo, UsageViewDescriptor}
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.{TypeAdjuster, ScalaPsiUtil}
import org.jetbrains.plugins.scala.lang.psi.api.ScalaRecursiveElementVisitor
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScBindingPattern
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScSimpleTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScMember, ScTemplateDefinition}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.types.result.{Success, TypingContext}
import org.jetbrains.plugins.scala.lang.refactoring.extractTrait.ScalaExtractMemberInfo
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaChangeContextUtil
import scala.collection.mutable.ArrayBuffer
/**
* Nikolay.Tropin
* 2014-05-27
*/
class ScalaPullUpProcessor(project: Project,
sourceClass: ScTemplateDefinition,
targetClass: ScTemplateDefinition,
memberInfos: Seq[ScalaExtractMemberInfo]) extends BaseRefactoringProcessor(project) {
override def createUsageViewDescriptor(usages: Array[UsageInfo]): UsageViewDescriptor =
new PullUpUsageViewDescriptor
override def getCommandName = RefactoringBundle.message("pullUp.command", sourceClass.name)
override def performRefactoring(usages: Array[UsageInfo]) = {
}
override def findUsages() = Array[UsageInfo]()
/**
* Should be invoked in write action
* */
def moveMembersToBase() {
val manager = targetClass.getManager
val extendsBlock = targetClass.extendsBlock
val templateBody = extendsBlock.templateBody match {
case Some(tb) => tb
case None => extendsBlock.add(ScalaPsiElementFactory.createTemplateBody(manager))
}
val anchor = templateBody.getLastChild
val collectImportScope = memberInfos.collect {
case ScalaExtractMemberInfo(m, false) => m
} //extracted declarations are handled with ScalaPsiUtil.adjustTypes
ScalaChangeContextUtil.encodeContextInfo(collectImportScope)
extensions.withDisabledPostprocessFormatting(project) {
val movedDefinitions = ArrayBuffer[ScMember]()
for {
info <- memberInfos
memberCopy <- memberCopiesToExtract(info)
} {
handleOldMember(info)
templateBody.addBefore(ScalaPsiElementFactory.createNewLine(manager), anchor)
val added = templateBody.addBefore(memberCopy, anchor).asInstanceOf[ScMember]
if (info.isToAbstract) TypeAdjuster.markToAdjust(added)
else movedDefinitions += added
}
templateBody.addBefore(ScalaPsiElementFactory.createNewLine(manager), anchor)
ScalaChangeContextUtil.decodeContextInfo(movedDefinitions)
}
for (tb <- sourceClass.extendsBlock.templateBody if tb.members.isEmpty) {
tb.delete()
}
reformatAfter()
}
private def reformatAfter() {
val documentManager = PsiDocumentManager.getInstance(project)
val csManager = CodeStyleManager.getInstance(project)
val targetDocument = documentManager.getDocument(targetClass.getContainingFile)
documentManager.doPostponedOperationsAndUnblockDocument(targetDocument)
csManager.reformat(targetClass)
val sourceDocument = documentManager.getDocument(sourceClass.getContainingFile)
documentManager.doPostponedOperationsAndUnblockDocument(sourceDocument)
csManager.adjustLineIndent(sourceClass.getContainingFile, sourceClass.getTextRange)
}
private def memberCopiesToExtract(info: ScalaExtractMemberInfo): Seq[ScMember] = {
info match {
case ScalaExtractMemberInfo(decl: ScDeclaration, _) =>
val member = decl.copy().asInstanceOf[ScMember]
Seq(member)
case ScalaExtractMemberInfo(m, true) =>
declarationsText(m).map(ScalaPsiElementFactory.createDeclarationFromText(_, m.getParent, m).asInstanceOf[ScMember])
case ScalaExtractMemberInfo(m, false) if m.hasModifierProperty("override") =>
val copy = m.copy().asInstanceOf[ScMember]
copy.setModifierProperty("override", value = false)
val shift = "override ".length
ScalaChangeContextUtil.shiftAssociations(copy, - shift)
Seq(copy)
case _ => Seq(info.getMember.copy().asInstanceOf[ScMember])
}
}
private def handleOldMember(info: ScalaExtractMemberInfo) = {
info match {
case ScalaExtractMemberInfo(m: ScDeclaration, _) => m.delete()
case ScalaExtractMemberInfo(m, false) => m.delete()
case ScalaExtractMemberInfo(m, true) => m.setModifierProperty("override", value = true)
}
}
private def declarationsText(m: ScMember): Seq[String] = {
def textForBinding(b: ScBindingPattern) = {
val typeText = b.getType(TypingContext.empty) match {
case Success(t, _) => s": ${t.canonicalText}"
case _ => ""
}
s"${b.name}$typeText"
}
m match {
case decl: ScDeclaration => Seq(decl.getText)
case funDef: ScFunctionDefinition =>
val copy = funDef.copy().asInstanceOf[ScFunctionDefinition]
copy.setModifierProperty("override", value = false)
Seq(copy.assignment, copy.body).flatten.foreach(_.delete())
copy.accept(new ScalaRecursiveElementVisitor() {
override def visitSimpleTypeElement(te: ScSimpleTypeElement) = {
val tpe = te.calcType
te.replace(ScalaPsiElementFactory.createTypeElementFromText(tpe.canonicalText, te.getManager))
}
})
Seq(copy.getText)
case valDef: ScPatternDefinition =>
val copy = valDef.copy().asInstanceOf[ScPatternDefinition]
copy.bindings.collect {
case b: ScBindingPattern => "val " + textForBinding(b)
}
case varDef: ScVariableDefinition =>
val copy = varDef.copy().asInstanceOf[ScVariableDefinition]
copy.bindings.collect {
case b: ScBindingPattern => "var " + textForBinding(b)
}
case ta: ScTypeAliasDefinition =>
val copy = ta.copy().asInstanceOf[ScTypeAliasDefinition]
Seq(
Option(copy.findFirstChildByType(ScalaTokenTypes.tASSIGN)),
Option(copy.findFirstChildByType(ScalaTokenTypes.tUPPER_BOUND)),
Option(copy.findFirstChildByType(ScalaTokenTypes.tLOWER_BOUND)),
Option(copy.aliasedTypeElement)
).flatten.foreach(_.delete())
Seq(copy.getText)
case _ => throw new IllegalArgumentException(s"Cannot create declaration text from member ${m.getText}")
}
}
private class PullUpUsageViewDescriptor extends UsageViewDescriptor {
def getProcessedElementsHeader: String = "Pull up members from"
def getElements: Array[PsiElement] = Array[PsiElement](sourceClass)
def getCodeReferencesText(usagesCount: Int, filesCount: Int): String =
s"Class to pull up members to ${targetClass.name}"
def getCommentReferencesText(usagesCount: Int, filesCount: Int): String = null
}
}
|
whorbowicz/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/refactoring/memberPullUp/ScalaPullUpProcessor.scala
|
Scala
|
apache-2.0
| 7,441
|
/**
* Copyright 2013 Jim Burton.
* Copyright 2014 Kangmo Kim
*
* Licensed under the MIT license (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://opensource.org/licenses/mit-license.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.nhnsoft.bitcoin.crypto;
import org.bitcoinj.wallet.Protos.Wallet.EncryptionType;
import org.spongycastle.crypto.params.KeyParameter;
import java.io.Serializable;
/**
* <p>A KeyCrypter can be used to encrypt and decrypt a message. The sequence of events to encrypt and then decrypt
* a message are as follows:</p>
*
* <p>(1) Ask the user for a password. deriveKey() is then called to create an KeyParameter. This contains the AES
* key that will be used for encryption.</p>
* <p>(2) Encrypt the message using encrypt(), providing the message bytes and the KeyParameter from (1). This returns
* an EncryptedData which contains the encryptedPrivateKey bytes and an initialisation vector.</p>
* <p>(3) To decrypt an EncryptedData, repeat step (1) to get a KeyParameter, then call decrypt().</p>
*
* <p>There can be different algorithms used for encryption/ decryption so the getUnderstoodEncryptionType is used
* to determine whether any given KeyCrypter can understand the type of encrypted data you have.</p>
*/
trait KeyCrypter extends Serializable {
/**
* Return the EncryptionType enum value which denotes the type of encryption/ decryption that this KeyCrypter
* can understand.
*/
def getUnderstoodEncryptionType() : EncryptionType
/**
* Create a KeyParameter (which typically contains an AES key)
* @param password
* @return KeyParameter The KeyParameter which typically contains the AES key to use for encrypting and decrypting
* @throws KeyCrypterException
*/
@throws( classOf[KeyCrypterException] )
def deriveKey(password : CharSequence) : KeyParameter
/**
* Decrypt the provided encrypted bytes, converting them into unencrypted bytes.
*
* @throws KeyCrypterException if decryption was unsuccessful.
*/
@throws( classOf[KeyCrypterException] )
def decrypt(encryptedBytesToDecode : EncryptedData, aesKey : KeyParameter) : Array[Byte]
/**
* Encrypt the supplied bytes, converting them into ciphertext.
*
* @return encryptedPrivateKey An encryptedPrivateKey containing the encrypted bytes and an initialisation vector.
* @throws KeyCrypterException if encryption was unsuccessful
*/
@throws( classOf[KeyCrypterException] )
def encrypt(plainBytes : Array[Byte], aesKey : KeyParameter) : EncryptedData
}
|
Kangmo/bitcoinj
|
core/src/main/scala/com/nhnsoft/bitcoin/crypto/KeyCrypter.scala
|
Scala
|
apache-2.0
| 2,978
|
package org.zalando.nakadi.client.utils
import java.util.Optional
import java.util.concurrent.TimeUnit
import scala.collection.JavaConversions._
import scala.concurrent.Await
import scala.concurrent.duration.Duration
object FutureConversions {
//
private def extractEither[T](either: Either[String, T]): T = either match {
case Left(error) => throw new RuntimeException(error)
case Right(t) => t
}
def fromOption2Optional[T](in: scala.concurrent.Future[Option[T]]): java.util.concurrent.Future[Optional[T]] = {
new MFuture[Option[T], Optional[T]](in, a => fromOptional2Optional(a))
}
def fromOption2Void[T](in: scala.concurrent.Future[Option[T]]): java.util.concurrent.Future[Void] = {
new MFuture[Option[T], Void](in, a => null)
}
def fromFuture2Future[T](in: scala.concurrent.Future[T]): java.util.concurrent.Future[T] = {
new MFuture[T, T](in, a => a)
}
def fromFuture2FutureVoid[T](in: scala.concurrent.Future[T]): java.util.concurrent.Future[Void] = {
new MFuture[T, Void](in, a => null)
}
private def fromSequenceToList[T](in: Seq[T]): Optional[java.util.List[T]] =
in match {
case Nil => Optional.empty()
case seq =>
Optional.of(new java.util.ArrayList[T](seq))
}
private def fromOptional2Optional[R](in: Option[R]): Optional[R] = in match {
case Some(value) => Optional.of(value)
case None => Optional.empty()
}
private def convert[T](x: scala.concurrent.Future[Either[String, T]]): java.util.concurrent.Future[T] =
new MFuture[Either[String, T], T](x, a => extractEither(a))
}
private class MFuture[A, B](f: scala.concurrent.Future[A], converter: A => B) extends java.util.concurrent.Future[B] {
override def isCancelled: Boolean =
throw new UnsupportedOperationException
override def get(): B =
converter.apply(Await.result(f, Duration.Inf))
override def get(timeout: Long, unit: TimeUnit): B =
converter.apply(Await.result(f, Duration.create(timeout, unit)))
override def cancel(mayInterruptIfRunning: Boolean): Boolean =
throw new UnsupportedOperationException
override def isDone: Boolean = f.isCompleted
}
|
zalando/nakadi-klients
|
client/src/main/scala/org/zalando/nakadi/client/utils/FutureConversions.scala
|
Scala
|
mit
| 2,164
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package unit.kafka.security.auth
import kafka.common.KafkaException
import kafka.security.auth.{ResourceType, Topic}
import org.junit.{Test, Assert}
import org.scalatest.junit.JUnitSuite
class ResourceTypeTest extends JUnitSuite {
@Test
def testFromString(): Unit = {
val resourceType = ResourceType.fromString("Topic")
Assert.assertEquals(Topic, resourceType)
try {
ResourceType.fromString("badName")
fail("Expected exception on invalid ResourceType name.")
} catch {
case e: KafkaException => "Expected."
}
}
}
|
usakey/kafka
|
core/src/test/scala/unit/kafka/security/auth/ResourceTypeTest.scala
|
Scala
|
apache-2.0
| 1,361
|
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.action.ws2
import io.gatling.commons.validation.Validation
import io.gatling.core.action.{ Action, ExitableAction, RequestAction }
import io.gatling.core.session._
import io.gatling.core.stats.StatsEngine
import io.gatling.core.util.NameGen
import io.gatling.http.action.async.ws.WsAction
import io.gatling.http.action.ws2.fsm.SendTextMessage
class WsSend(
override val requestName: Expression[String],
wsName: String,
message: Expression[String],
checkSequences: List[WsCheckSequence],
val statsEngine: StatsEngine,
val next: Action
) extends RequestAction with WsAction with ExitableAction with NameGen {
override val name = genName("wsSend")
override def sendRequest(requestName: String, session: Session): Validation[Unit] =
for {
wsActor <- fetchActor(wsName, session)
message <- message(session)
} yield {
logger.info(s"Sending message $message with websocket '$wsName': Scenario '${session.scenario}', UserId #${session.userId}")
wsActor ! SendTextMessage(requestName, message, checkSequences, session, next)
}
}
|
MykolaB/gatling
|
gatling-http/src/main/scala/io/gatling/http/action/ws2/WsSend.scala
|
Scala
|
apache-2.0
| 1,800
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.torch
import com.intel.analytics.bigdl.nn.Euclidean
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.RandomGenerator._
import scala.util.Random
@com.intel.analytics.bigdl.tags.Serial
class EuclideanSpec extends TorchSpec {
"A Euclidean " should "generate correct output and grad with input one dimension" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val input = Tensor[Double](7).apply1(e => Random.nextDouble())
val gradOutput = Tensor[Double](7).apply1(e => Random.nextDouble())
val code = "torch.manualSeed(" + seed + ")\\n" +
"module = nn.Euclidean(7, 7)\\n" +
"weight = module.weight\\n" +
"output = module:forward(input)\\n" +
"module:zeroGradParameters()\\n" +
"gradInput = module:backward(input,gradOutput)\\n" +
"gradWeight = module.gradWeight\\n" +
"_repeat2 = module._repeat2\\n"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput", "weight", "gradWeight", "_repeat2"))
val luaOutput1 = torchResult("output").asInstanceOf[Tensor[Double]]
val luaOutput2 = torchResult("gradInput").asInstanceOf[Tensor[Double]]
val luaWeight = torchResult("weight").asInstanceOf[Tensor[Double]]
val luaGradWeight = torchResult("gradWeight").asInstanceOf[Tensor[Double]]
val module = new Euclidean[Double](7, 7)
val start = System.nanoTime()
val output = module.forward(input)
val gradInput = module.backward(input, gradOutput)
val weight = module.weight
val gradWeight = module.gradWeight
val end = System.nanoTime()
val scalaTime = end - start
weight should be(luaWeight)
output should be(luaOutput1)
gradInput should be(luaOutput2)
gradWeight should be(luaGradWeight)
println("Test case : Euclidean, Torch : " + luaTime +
" s, Scala : " + scalaTime / 1e9 + " s")
}
"A Euclidean " should "generate correct output and grad with input two dimensions" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val input = Tensor[Double](8, 7).apply1(e => Random.nextDouble())
val gradOutput = Tensor[Double](8, 7).apply1(e => Random.nextDouble())
val code = "torch.manualSeed(" + seed + ")\\n" +
"module = nn.Euclidean(7, 7)\\n" +
"weight = module.weight\\n" +
"output = module:forward(input)\\n" +
"module:zeroGradParameters()\\n" +
"gradInput = module:backward(input,gradOutput)\\n" +
"gradWeight = module.gradWeight\\n" +
"_repeat2 = module._repeat2\\n"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput", "weight", "gradWeight", "_repeat2"))
val luaOutput1 = torchResult("output").asInstanceOf[Tensor[Double]]
val luaOutput2 = torchResult("gradInput").asInstanceOf[Tensor[Double]]
val luaWeight = torchResult("weight").asInstanceOf[Tensor[Double]]
val luaGradWeight = torchResult("gradWeight").asInstanceOf[Tensor[Double]]
val module = new Euclidean[Double](7, 7)
val start = System.nanoTime()
val output = module.forward(input)
val gradInput = module.backward(input, gradOutput)
val weight = module.weight
val gradWeight = module.gradWeight
val end = System.nanoTime()
val scalaTime = end - start
weight should be(luaWeight)
output should be(luaOutput1)
gradInput should be(luaOutput2)
gradWeight should be(luaGradWeight)
println("Test case : Euclidean, Torch : " + luaTime +
" s, Scala : " + scalaTime / 1e9 + " s")
}
}
|
psyyz10/BigDL
|
spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/EuclideanSpec.scala
|
Scala
|
apache-2.0
| 4,240
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.test.SharedSQLContext
class SubquerySuite extends QueryTest with SharedSQLContext {
import testImplicits._
setupTestData()
val row = identity[(java.lang.Integer, java.lang.Double)](_)
lazy val l = Seq(
row(1, 2.0),
row(1, 2.0),
row(2, 1.0),
row(2, 1.0),
row(3, 3.0),
row(null, null),
row(null, 5.0),
row(6, null)).toDF("a", "b")
lazy val r = Seq(
row(2, 3.0),
row(2, 3.0),
row(3, 2.0),
row(4, 1.0),
row(null, null),
row(null, 5.0),
row(6, null)).toDF("c", "d")
lazy val t = r.filter($"c".isNotNull && $"d".isNotNull)
protected override def beforeAll(): Unit = {
super.beforeAll()
l.createOrReplaceTempView("l")
r.createOrReplaceTempView("r")
t.createOrReplaceTempView("t")
}
test("SPARK-18854 numberedTreeString for subquery") {
val df = sql("select * from range(10) where id not in " +
"(select id from range(2) union all select id from range(2))")
// The depth first traversal of the plan tree
val dfs = Seq("Project", "Filter", "Union", "Project", "Range", "Project", "Range", "Range")
val numbered = df.queryExecution.analyzed.numberedTreeString.split("\\n")
// There should be 8 plan nodes in total
assert(numbered.size == dfs.size)
for (i <- dfs.indices) {
val node = df.queryExecution.analyzed(i)
assert(node.nodeName == dfs(i))
assert(numbered(i).contains(node.nodeName))
}
}
test("rdd deserialization does not crash [SPARK-15791]") {
sql("select (select 1 as b) as b").rdd.count()
}
test("simple uncorrelated scalar subquery") {
checkAnswer(
sql("select (select 1 as b) as b"),
Array(Row(1))
)
checkAnswer(
sql("select (select (select 1) + 1) + 1"),
Array(Row(3))
)
// string type
checkAnswer(
sql("select (select 's' as s) as b"),
Array(Row("s"))
)
}
test("define CTE in CTE subquery") {
checkAnswer(
sql(
"""
| with t2 as (with t1 as (select 1 as b, 2 as c) select b, c from t1)
| select a from (select 1 as a union all select 2 as a) t
| where a = (select max(b) from t2)
""".stripMargin),
Array(Row(1))
)
checkAnswer(
sql(
"""
| with t2 as (with t1 as (select 1 as b, 2 as c) select b, c from t1),
| t3 as (
| with t4 as (select 1 as d, 3 as e)
| select * from t4 cross join t2 where t2.b = t4.d
| )
| select a from (select 1 as a union all select 2 as a)
| where a = (select max(d) from t3)
""".stripMargin),
Array(Row(1))
)
}
test("uncorrelated scalar subquery in CTE") {
checkAnswer(
sql("with t2 as (select 1 as b, 2 as c) " +
"select a from (select 1 as a union all select 2 as a) t " +
"where a = (select max(b) from t2) "),
Array(Row(1))
)
}
test("uncorrelated scalar subquery should return null if there is 0 rows") {
checkAnswer(
sql("select (select 's' as s limit 0) as b"),
Array(Row(null))
)
}
test("runtime error when the number of rows is greater than 1") {
val error2 = intercept[RuntimeException] {
sql("select (select a from (select 1 as a union all select 2 as a) t) as b").collect()
}
assert(error2.getMessage.contains(
"more than one row returned by a subquery used as an expression")
)
}
test("uncorrelated scalar subquery on a DataFrame generated query") {
val df = Seq((1, "one"), (2, "two"), (3, "three")).toDF("key", "value")
df.createOrReplaceTempView("subqueryData")
checkAnswer(
sql("select (select key from subqueryData where key > 2 order by key limit 1) + 1"),
Array(Row(4))
)
checkAnswer(
sql("select -(select max(key) from subqueryData)"),
Array(Row(-3))
)
checkAnswer(
sql("select (select value from subqueryData limit 0)"),
Array(Row(null))
)
checkAnswer(
sql("select (select min(value) from subqueryData" +
" where key = (select max(key) from subqueryData) - 1)"),
Array(Row("two"))
)
}
test("SPARK-15677: Queries against local relations with scalar subquery in Select list") {
withTempView("t1", "t2") {
Seq((1, 1), (2, 2)).toDF("c1", "c2").createOrReplaceTempView("t1")
Seq((1, 1), (2, 2)).toDF("c1", "c2").createOrReplaceTempView("t2")
checkAnswer(
sql("SELECT (select 1 as col) from t1"),
Row(1) :: Row(1) :: Nil)
checkAnswer(
sql("SELECT (select max(c1) from t2) from t1"),
Row(2) :: Row(2) :: Nil)
checkAnswer(
sql("SELECT 1 + (select 1 as col) from t1"),
Row(2) :: Row(2) :: Nil)
checkAnswer(
sql("SELECT c1, (select max(c1) from t2) + c2 from t1"),
Row(1, 3) :: Row(2, 4) :: Nil)
checkAnswer(
sql("SELECT c1, (select max(c1) from t2 where t1.c2 = t2.c2) from t1"),
Row(1, 1) :: Row(2, 2) :: Nil)
}
}
test("SPARK-14791: scalar subquery inside broadcast join") {
val df = sql("select a, sum(b) as s from l group by a having a > (select avg(a) from l)")
val expected = Row(3, 2.0, 3, 3.0) :: Row(6, null, 6, null) :: Nil
(1 to 10).foreach { _ =>
checkAnswer(r.join(df, $"c" === $"a"), expected)
}
}
test("EXISTS predicate subquery") {
checkAnswer(
sql("select * from l where exists (select * from r where l.a = r.c)"),
Row(2, 1.0) :: Row(2, 1.0) :: Row(3, 3.0) :: Row(6, null) :: Nil)
checkAnswer(
sql("select * from l where exists (select * from r where l.a = r.c) and l.a <= 2"),
Row(2, 1.0) :: Row(2, 1.0) :: Nil)
}
test("NOT EXISTS predicate subquery") {
checkAnswer(
sql("select * from l where not exists (select * from r where l.a = r.c)"),
Row(1, 2.0) :: Row(1, 2.0) :: Row(null, null) :: Row(null, 5.0) :: Nil)
checkAnswer(
sql("select * from l where not exists (select * from r where l.a = r.c and l.b < r.d)"),
Row(1, 2.0) :: Row(1, 2.0) :: Row(3, 3.0) ::
Row(null, null) :: Row(null, 5.0) :: Row(6, null) :: Nil)
}
test("EXISTS predicate subquery within OR") {
checkAnswer(
sql("select * from l where exists (select * from r where l.a = r.c)" +
" or exists (select * from r where l.a = r.c)"),
Row(2, 1.0) :: Row(2, 1.0) :: Row(3, 3.0) :: Row(6, null) :: Nil)
checkAnswer(
sql("select * from l where not exists (select * from r where l.a = r.c and l.b < r.d)" +
" or not exists (select * from r where l.a = r.c)"),
Row(1, 2.0) :: Row(1, 2.0) :: Row(3, 3.0) ::
Row(null, null) :: Row(null, 5.0) :: Row(6, null) :: Nil)
}
test("IN predicate subquery") {
checkAnswer(
sql("select * from l where l.a in (select c from r)"),
Row(2, 1.0) :: Row(2, 1.0) :: Row(3, 3.0) :: Row(6, null) :: Nil)
checkAnswer(
sql("select * from l where l.a in (select c from r where l.b < r.d)"),
Row(2, 1.0) :: Row(2, 1.0) :: Nil)
checkAnswer(
sql("select * from l where l.a in (select c from r) and l.a > 2 and l.b is not null"),
Row(3, 3.0) :: Nil)
}
test("NOT IN predicate subquery") {
checkAnswer(
sql("select * from l where a not in (select c from r)"),
Nil)
checkAnswer(
sql("select * from l where a not in (select c from r where c is not null)"),
Row(1, 2.0) :: Row(1, 2.0) :: Nil)
checkAnswer(
sql("select * from l where (a, b) not in (select c, d from t) and a < 4"),
Row(1, 2.0) :: Row(1, 2.0) :: Row(2, 1.0) :: Row(2, 1.0) :: Row(3, 3.0) :: Nil)
// Empty sub-query
checkAnswer(
sql("select * from l where (a, b) not in (select c, d from r where c > 10)"),
Row(1, 2.0) :: Row(1, 2.0) :: Row(2, 1.0) :: Row(2, 1.0) ::
Row(3, 3.0) :: Row(null, null) :: Row(null, 5.0) :: Row(6, null) :: Nil)
}
test("IN predicate subquery within OR") {
checkAnswer(
sql("select * from l where l.a in (select c from r)" +
" or l.a in (select c from r where l.b < r.d)"),
Row(2, 1.0) :: Row(2, 1.0) :: Row(3, 3.0) :: Row(6, null) :: Nil)
intercept[AnalysisException] {
sql("select * from l where a not in (select c from r)" +
" or a not in (select c from r where c is not null)")
}
}
test("complex IN predicate subquery") {
checkAnswer(
sql("select * from l where (a, b) not in (select c, d from r)"),
Nil)
checkAnswer(
sql("select * from l where (a, b) not in (select c, d from t) and (a + b) is not null"),
Row(1, 2.0) :: Row(1, 2.0) :: Row(2, 1.0) :: Row(2, 1.0) :: Row(3, 3.0) :: Nil)
}
test("same column in subquery and outer table") {
checkAnswer(
sql("select a from l l1 where a in (select a from l where a < 3 group by a)"),
Row(1) :: Row(1) :: Row(2) :: Row(2) :: Nil
)
}
test("having with function in subquery") {
checkAnswer(
sql("select a from l group by 1 having exists (select 1 from r where d < min(b))"),
Row(null) :: Row(1) :: Row(3) :: Nil)
}
test("SPARK-15832: Test embedded existential predicate sub-queries") {
withTempView("t1", "t2", "t3", "t4", "t5") {
Seq((1, 1), (2, 2)).toDF("c1", "c2").createOrReplaceTempView("t1")
Seq((1, 1), (2, 2)).toDF("c1", "c2").createOrReplaceTempView("t2")
Seq((1, 1), (2, 2), (1, 2)).toDF("c1", "c2").createOrReplaceTempView("t3")
checkAnswer(
sql(
"""
| select c1 from t1
| where c2 IN (select c2 from t2)
|
""".stripMargin),
Row(1) :: Row(2) :: Nil)
checkAnswer(
sql(
"""
| select c1 from t1
| where c2 NOT IN (select c2 from t2)
|
""".stripMargin),
Nil)
checkAnswer(
sql(
"""
| select c1 from t1
| where EXISTS (select c2 from t2)
|
""".stripMargin),
Row(1) :: Row(2) :: Nil)
checkAnswer(
sql(
"""
| select c1 from t1
| where NOT EXISTS (select c2 from t2)
|
""".stripMargin),
Nil)
checkAnswer(
sql(
"""
| select c1 from t1
| where NOT EXISTS (select c2 from t2) and
| c2 IN (select c2 from t3)
|
""".stripMargin),
Nil)
checkAnswer(
sql(
"""
| select c1 from t1
| where (case when c2 IN (select 1 as one) then 1
| else 2 end) = c1
|
""".stripMargin),
Row(1) :: Row(2) :: Nil)
checkAnswer(
sql(
"""
| select c1 from t1
| where (case when c2 IN (select 1 as one) then 1
| else 2 end)
| IN (select c2 from t2)
|
""".stripMargin),
Row(1) :: Row(2) :: Nil)
checkAnswer(
sql(
"""
| select c1 from t1
| where (case when c2 IN (select c2 from t2) then 1
| else 2 end)
| IN (select c2 from t3)
|
""".stripMargin),
Row(1) :: Row(2) :: Nil)
checkAnswer(
sql(
"""
| select c1 from t1
| where (case when c2 IN (select c2 from t2) then 1
| when c2 IN (select c2 from t3) then 2
| else 3 end)
| IN (select c2 from t1)
|
""".stripMargin),
Row(1) :: Row(2) :: Nil)
checkAnswer(
sql(
"""
| select c1 from t1
| where (c1, (case when c2 IN (select c2 from t2) then 1
| when c2 IN (select c2 from t3) then 2
| else 3 end))
| IN (select c1, c2 from t1)
|
""".stripMargin),
Row(1) :: Nil)
checkAnswer(
sql(
"""
| select c1 from t3
| where ((case when c2 IN (select c2 from t2) then 1 else 2 end),
| (case when c2 IN (select c2 from t3) then 2 else 3 end))
| IN (select c1, c2 from t3)
|
""".stripMargin),
Row(1) :: Row(2) :: Row(1) :: Nil)
checkAnswer(
sql(
"""
| select c1 from t1
| where ((case when EXISTS (select c2 from t2) then 1 else 2 end),
| (case when c2 IN (select c2 from t3) then 2 else 3 end))
| IN (select c1, c2 from t3)
|
""".stripMargin),
Row(1) :: Row(2) :: Nil)
checkAnswer(
sql(
"""
| select c1 from t1
| where (case when c2 IN (select c2 from t2) then 3
| else 2 end)
| NOT IN (select c2 from t3)
|
""".stripMargin),
Row(1) :: Row(2) :: Nil)
checkAnswer(
sql(
"""
| select c1 from t1
| where ((case when c2 IN (select c2 from t2) then 1 else 2 end),
| (case when NOT EXISTS (select c2 from t3) then 2
| when EXISTS (select c2 from t2) then 3
| else 3 end))
| NOT IN (select c1, c2 from t3)
|
""".stripMargin),
Row(1) :: Row(2) :: Nil)
checkAnswer(
sql(
"""
| select c1 from t1
| where (select max(c1) from t2 where c2 IN (select c2 from t3))
| IN (select c2 from t2)
|
""".stripMargin),
Row(1) :: Row(2) :: Nil)
}
}
test("correlated scalar subquery in where") {
checkAnswer(
sql("select * from l where b < (select max(d) from r where a = c)"),
Row(2, 1.0) :: Row(2, 1.0) :: Nil)
}
test("correlated scalar subquery in select") {
checkAnswer(
sql("select a, (select sum(b) from l l2 where l2.a = l1.a) sum_b from l l1"),
Row(1, 4.0) :: Row(1, 4.0) :: Row(2, 2.0) :: Row(2, 2.0) :: Row(3, 3.0) ::
Row(null, null) :: Row(null, null) :: Row(6, null) :: Nil)
}
test("correlated scalar subquery in select (null safe)") {
checkAnswer(
sql("select a, (select sum(b) from l l2 where l2.a <=> l1.a) sum_b from l l1"),
Row(1, 4.0) :: Row(1, 4.0) :: Row(2, 2.0) :: Row(2, 2.0) :: Row(3, 3.0) ::
Row(null, 5.0) :: Row(null, 5.0) :: Row(6, null) :: Nil)
}
test("correlated scalar subquery in aggregate") {
checkAnswer(
sql("select a, (select sum(d) from r where a = c) sum_d from l l1 group by 1, 2"),
Row(1, null) :: Row(2, 6.0) :: Row(3, 2.0) :: Row(null, null) :: Row(6, null) :: Nil)
}
test("SPARK-18504 extra GROUP BY column in correlated scalar subquery is not permitted") {
withTempView("t") {
Seq((1, 1), (1, 2)).toDF("c1", "c2").createOrReplaceTempView("t")
val errMsg = intercept[AnalysisException] {
sql("select (select sum(-1) from t t2 where t1.c2 = t2.c1 group by t2.c2) sum from t t1")
}
assert(errMsg.getMessage.contains(
"A GROUP BY clause in a scalar correlated subquery cannot contain non-correlated columns:"))
}
}
test("non-aggregated correlated scalar subquery") {
val msg1 = intercept[AnalysisException] {
sql("select a, (select b from l l2 where l2.a = l1.a) sum_b from l l1")
}
assert(msg1.getMessage.contains("Correlated scalar subqueries must be Aggregated"))
val msg2 = intercept[AnalysisException] {
sql("select a, (select b from l l2 where l2.a = l1.a group by 1) sum_b from l l1")
}
assert(msg2.getMessage.contains(
"The output of a correlated scalar subquery must be aggregated"))
}
test("non-equal correlated scalar subquery") {
val msg1 = intercept[AnalysisException] {
sql("select a, (select sum(b) from l l2 where l2.a < l1.a) sum_b from l l1")
}
assert(msg1.getMessage.contains(
"Correlated column is not allowed in a non-equality predicate:"))
}
test("disjunctive correlated scalar subquery") {
checkAnswer(
sql("""
|select a
|from l
|where (select count(*)
| from r
| where (a = c and d = 2.0) or (a = c and d = 1.0)) > 0
""".stripMargin),
Row(3) :: Nil)
}
test("SPARK-15370: COUNT bug in WHERE clause (Filter)") {
// Case 1: Canonical example of the COUNT bug
checkAnswer(
sql("select l.a from l where (select count(*) from r where l.a = r.c) < l.a"),
Row(1) :: Row(1) :: Row(3) :: Row(6) :: Nil)
// Case 2: count(*) = 0; could be rewritten to NOT EXISTS but currently uses
// a rewrite that is vulnerable to the COUNT bug
checkAnswer(
sql("select l.a from l where (select count(*) from r where l.a = r.c) = 0"),
Row(1) :: Row(1) :: Row(null) :: Row(null) :: Nil)
// Case 3: COUNT bug without a COUNT aggregate
checkAnswer(
sql("select l.a from l where (select sum(r.d) is null from r where l.a = r.c)"),
Row(1) :: Row(1) ::Row(null) :: Row(null) :: Row(6) :: Nil)
}
test("SPARK-15370: COUNT bug in SELECT clause (Project)") {
checkAnswer(
sql("select a, (select count(*) from r where l.a = r.c) as cnt from l"),
Row(1, 0) :: Row(1, 0) :: Row(2, 2) :: Row(2, 2) :: Row(3, 1) :: Row(null, 0)
:: Row(null, 0) :: Row(6, 1) :: Nil)
}
test("SPARK-15370: COUNT bug in HAVING clause (Filter)") {
checkAnswer(
sql("select l.a as grp_a from l group by l.a " +
"having (select count(*) from r where grp_a = r.c) = 0 " +
"order by grp_a"),
Row(null) :: Row(1) :: Nil)
}
test("SPARK-15370: COUNT bug in Aggregate") {
checkAnswer(
sql("select l.a as aval, sum((select count(*) from r where l.a = r.c)) as cnt " +
"from l group by l.a order by aval"),
Row(null, 0) :: Row(1, 0) :: Row(2, 4) :: Row(3, 1) :: Row(6, 1) :: Nil)
}
test("SPARK-15370: COUNT bug negative examples") {
// Case 1: Potential COUNT bug case that was working correctly prior to the fix
checkAnswer(
sql("select l.a from l where (select sum(r.d) from r where l.a = r.c) is null"),
Row(1) :: Row(1) :: Row(null) :: Row(null) :: Row(6) :: Nil)
// Case 2: COUNT aggregate but no COUNT bug due to > 0 test.
checkAnswer(
sql("select l.a from l where (select count(*) from r where l.a = r.c) > 0"),
Row(2) :: Row(2) :: Row(3) :: Row(6) :: Nil)
// Case 3: COUNT inside aggregate expression but no COUNT bug.
checkAnswer(
sql("select l.a from l where (select count(*) + sum(r.d) from r where l.a = r.c) = 0"),
Nil)
}
test("SPARK-15370: COUNT bug in subquery in subquery in subquery") {
checkAnswer(
sql("""select l.a from l
|where (
| select cntPlusOne + 1 as cntPlusTwo from (
| select cnt + 1 as cntPlusOne from (
| select sum(r.c) s, count(*) cnt from r where l.a = r.c having cnt = 0
| )
| )
|) = 2""".stripMargin),
Row(1) :: Row(1) :: Row(null) :: Row(null) :: Nil)
}
test("SPARK-15370: COUNT bug with nasty predicate expr") {
checkAnswer(
sql("select l.a from l where " +
"(select case when count(*) = 1 then null else count(*) end as cnt " +
"from r where l.a = r.c) = 0"),
Row(1) :: Row(1) :: Row(null) :: Row(null) :: Nil)
}
test("SPARK-15370: COUNT bug with attribute ref in subquery input and output ") {
checkAnswer(
sql(
"""
|select l.b, (select (r.c + count(*)) is null
|from r
|where l.a = r.c group by r.c) from l
""".stripMargin),
Row(1.0, false) :: Row(1.0, false) :: Row(2.0, true) :: Row(2.0, true) ::
Row(3.0, false) :: Row(5.0, true) :: Row(null, false) :: Row(null, true) :: Nil)
}
test("SPARK-16804: Correlated subqueries containing LIMIT - 1") {
withTempView("onerow") {
Seq(1).toDF("c1").createOrReplaceTempView("onerow")
checkAnswer(
sql(
"""
| select c1 from onerow t1
| where exists (select 1 from onerow t2 where t1.c1=t2.c1)
| and exists (select 1 from onerow LIMIT 1)""".stripMargin),
Row(1) :: Nil)
}
}
test("SPARK-16804: Correlated subqueries containing LIMIT - 2") {
withTempView("onerow") {
Seq(1).toDF("c1").createOrReplaceTempView("onerow")
checkAnswer(
sql(
"""
| select c1 from onerow t1
| where exists (select 1
| from (select 1 from onerow t2 LIMIT 1)
| where t1.c1=t2.c1)""".stripMargin),
Row(1) :: Nil)
}
}
test("SPARK-17337: Incorrect column resolution leads to incorrect results") {
withTempView("t1", "t2") {
Seq(1, 2).toDF("c1").createOrReplaceTempView("t1")
Seq(1).toDF("c2").createOrReplaceTempView("t2")
checkAnswer(
sql(
"""
| select *
| from (select t2.c2+1 as c3
| from t1 left join t2 on t1.c1=t2.c2) t3
| where c3 not in (select c2 from t2)""".stripMargin),
Row(2) :: Nil)
}
}
test("SPARK-17348: Correlated subqueries with non-equality predicate (good case)") {
withTempView("t1", "t2") {
Seq((1, 1)).toDF("c1", "c2").createOrReplaceTempView("t1")
Seq((1, 1), (2, 0)).toDF("c1", "c2").createOrReplaceTempView("t2")
// Simple case
checkAnswer(
sql(
"""
| select c1
| from t1
| where c1 in (select t2.c1
| from t2
| where t1.c2 >= t2.c2)""".stripMargin),
Row(1) :: Nil)
// More complex case with OR predicate
checkAnswer(
sql(
"""
| select t1.c1
| from t1, t1 as t3
| where t1.c1 = t3.c1
| and (t1.c1 in (select t2.c1
| from t2
| where t1.c2 >= t2.c2
| or t3.c2 < t2.c2)
| or t1.c2 >= 0)""".stripMargin),
Row(1) :: Nil)
}
}
test("SPARK-17348: Correlated subqueries with non-equality predicate (error case)") {
withTempView("t1", "t2", "t3", "t4") {
Seq((1, 1)).toDF("c1", "c2").createOrReplaceTempView("t1")
Seq((1, 1), (2, 0)).toDF("c1", "c2").createOrReplaceTempView("t2")
Seq((2, 1)).toDF("c1", "c2").createOrReplaceTempView("t3")
Seq((1, 1), (2, 2)).toDF("c1", "c2").createOrReplaceTempView("t4")
// Simplest case
intercept[AnalysisException] {
sql(
"""
| select t1.c1
| from t1
| where t1.c1 in (select max(t2.c1)
| from t2
| where t1.c2 >= t2.c2)""".stripMargin).collect()
}
// Add a HAVING on top and augmented within an OR predicate
intercept[AnalysisException] {
sql(
"""
| select t1.c1
| from t1
| where t1.c1 in (select max(t2.c1)
| from t2
| where t1.c2 >= t2.c2
| having count(*) > 0 )
| or t1.c2 >= 0""".stripMargin).collect()
}
// Add a HAVING on top and augmented within an OR predicate
intercept[AnalysisException] {
sql(
"""
| select t1.c1
| from t1, t1 as t3
| where t1.c1 = t3.c1
| and (t1.c1 in (select max(t2.c1)
| from t2
| where t1.c2 = t2.c2
| or t3.c2 = t2.c2)
| )""".stripMargin).collect()
}
// In Window expression: changing the data set to
// demonstrate if this query ran, it would return incorrect result.
intercept[AnalysisException] {
sql(
"""
| select c1
| from t3
| where c1 in (select max(t4.c1) over ()
| from t4
| where t3.c2 >= t4.c2)""".stripMargin).collect()
}
}
}
// This restriction applies to
// the permutation of { LOJ, ROJ, FOJ } x { EXISTS, IN, scalar subquery }
// where correlated predicates appears in right operand of LOJ,
// or in left operand of ROJ, or in either operand of FOJ.
// The test cases below cover the representatives of the patterns
test("Correlated subqueries in outer joins") {
withTempView("t1", "t2", "t3") {
Seq(1).toDF("c1").createOrReplaceTempView("t1")
Seq(2).toDF("c1").createOrReplaceTempView("t2")
Seq(1).toDF("c1").createOrReplaceTempView("t3")
// Left outer join (LOJ) in IN subquery context
intercept[AnalysisException] {
sql(
"""
| select t1.c1
| from t1
| where 1 IN (select 1
| from t3 left outer join
| (select c1 from t2 where t1.c1 = 2) t2
| on t2.c1 = t3.c1)""".stripMargin).collect()
}
// Right outer join (ROJ) in EXISTS subquery context
intercept[AnalysisException] {
sql(
"""
| select t1.c1
| from t1
| where exists (select 1
| from (select c1 from t2 where t1.c1 = 2) t2
| right outer join t3
| on t2.c1 = t3.c1)""".stripMargin).collect()
}
// SPARK-18578: Full outer join (FOJ) in scalar subquery context
intercept[AnalysisException] {
sql(
"""
| select (select max(1)
| from (select c1 from t2 where t1.c1 = 2 and t1.c1=t2.c1) t2
| full join t3
| on t2.c1=t3.c1)
| from t1""".stripMargin).collect()
}
}
}
// Generate operator
test("Correlated subqueries in LATERAL VIEW") {
withTempView("t1", "t2") {
Seq((1, 1), (2, 0)).toDF("c1", "c2").createOrReplaceTempView("t1")
Seq[(Int, Array[Int])]((1, Array(1, 2)), (2, Array(-1, -3)))
.toDF("c1", "arr_c2").createTempView("t2")
checkAnswer(
sql(
"""
| select c2
| from t1
| where exists (select *
| from t2 lateral view explode(arr_c2) q as c2
where t1.c1 = t2.c1)""".stripMargin),
Row(1) :: Row(0) :: Nil)
}
}
test("SPARK-19933 Do not eliminate top-level aliases in sub-queries") {
withTempView("t1", "t2") {
spark.range(4).createOrReplaceTempView("t1")
checkAnswer(
sql("select * from t1 where id in (select id as id from t1)"),
Row(0) :: Row(1) :: Row(2) :: Row(3) :: Nil)
spark.range(2).createOrReplaceTempView("t2")
checkAnswer(
sql("select * from t1 where id in (select id as id from t2)"),
Row(0) :: Row(1) :: Nil)
}
}
test("ListQuery and Exists should work even no correlated references") {
checkAnswer(
sql("select * from l, r where l.a = r.c AND (r.d in (select d from r) OR l.a >= 1)"),
Row(2, 1.0, 2, 3.0) :: Row(2, 1.0, 2, 3.0) :: Row(2, 1.0, 2, 3.0) ::
Row(2, 1.0, 2, 3.0) :: Row(3.0, 3.0, 3, 2.0) :: Row(6, null, 6, null) :: Nil)
checkAnswer(
sql("select * from l, r where l.a = r.c + 1 AND (exists (select * from r) OR l.a = r.c)"),
Row(3, 3.0, 2, 3.0) :: Row(3, 3.0, 2, 3.0) :: Nil)
}
}
|
JerryLead/spark
|
sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala
|
Scala
|
apache-2.0
| 28,837
|
package com.arcusys.valamis.quiz.service
import com.arcusys.valamis.quiz.model.{ QuizQuestionCategory, QuizTreeElement }
import com.arcusys.valamis.quiz.storage.{ QuizQuestionCategoryStorage, QuizQuestionStorage, QuizTreeStorage }
trait QuizCategoryServiceImpl extends QuizQuestionServiceImpl with QuizService {
protected def categoryStorage: QuizQuestionCategoryStorage
protected def questionStorage: QuizQuestionStorage
protected def quizTreeStorage: QuizTreeStorage
def getCategory(categoryId: Int): QuizQuestionCategory = {
categoryStorage.getByID(categoryId).getOrElse(throw new Exception("quiz category not found, categoryId: " + categoryId))
}
def getCategories(quizId: Int, parentCategoryId: Option[Int]): Seq[QuizQuestionCategory] = {
categoryStorage.getChildren(quizId, parentCategoryId)
}
def createCategory(quizId: Int, title: String, description: String): QuizQuestionCategory = {
val categoryId = categoryStorage.createAndGetID(QuizQuestionCategory(0, title, description, quizId, None))
quizTreeStorage.createAndGetID(QuizTreeElement(0, quizId, "c_" + categoryId, true, None))
getCategory(categoryId)
}
def deleteCategory(quizId: Int, categoryId: Int) = {
questionStorage.getByCategory(quizId, Some(categoryId)).foreach(q => deleteQuestion(quizId, q.id))
quizTreeStorage.getByQuizAndElementID(quizId, "c_" + categoryId).map(e => quizTreeStorage.delete(e.id))
categoryStorage.delete(categoryId)
}
def updateCategory(categoryId: Int, title: String, description: String): QuizQuestionCategory = {
categoryStorage.modify(categoryId, title, description)
getCategory(categoryId)
}
def getCategoryIndex(quizId: Int, categoryId: Int): Int = {
val quizTreeElement = quizTreeStorage.getByQuizAndElementID(quizId, "c_" + categoryId)
quizTreeElement.map(_.arrangementIndex).getOrElse(1)
}
def moveCategory(quizId: Int, categoryId: Int, parentId: Option[Int], index: Int) = {
for (parentIdValue <- parentId)
if (quizTreeStorage.getByQuizAndElementID(quizId, "q_" + parentIdValue).isEmpty)
throw new Exception("can`t move quiz question, not parent " + parentIdValue)
for (quizTree <- quizTreeStorage.getByQuizAndElementID(quizId, "c_" + categoryId)) {
categoryStorage.updateParent(categoryId, parentId)
quizTreeStorage.move(quizTree.copy(parentID = parentId.map("c_" + _), arrangementIndex = index), quizTree.arrangementIndex, parentId.map("c_" + _))
}
}
}
|
ViLPy/Valamis
|
valamis-quiz/src/main/scala/com/arcusys/valamis/quiz/service/QuizCategoryServiceImpl.scala
|
Scala
|
lgpl-3.0
| 2,490
|
package ch.wsl.box.rest.logic.notification
import java.util.Date
import ch.wsl.box.jdbc.Connection
import ch.wsl.box.services.Services
import org.postgresql.PGConnection
import scribe.Logging
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}
trait PgNotifier{
def stop()
}
object NotificationsHandler {
def create(channel:String,connection:Connection,callback: (String) => Future[Boolean])(implicit ec:ExecutionContext):PgNotifier = new PgNotifier {
val listener = new Listener(connection,channel,callback)
listener.start()
override def stop(): Unit = listener.stopRunning()
}
}
import java.sql.SQLException
class Listener(connection:Connection,channel:String,callback: (String) => Future[Boolean])(implicit ec:ExecutionContext) extends Thread with Logging {
val user = connection.adminUser
var pgconn: PGConnection = null
var conn:java.sql.Connection = null
private var running = true
def stopRunning() = {
running = false
}
private def reloadConnection() = {
conn = connection.dataSource(s"Notification $channel").getConnection()
val stmt = conn.createStatement
val listenQuery = s"""SET ROLE "$user"; LISTEN $channel"""
logger.info(listenQuery)
stmt.execute(listenQuery)
stmt.close
pgconn = conn.asInstanceOf[PGConnection]
}
def select1() = {
val stmt = conn.createStatement
val rs = stmt.executeQuery(s"SELECT 1")
rs.close()
stmt.close()
}
reloadConnection()
override def run(): Unit = {
while ( running ) {
Thread.sleep(500)
try {
// issue a dummy query to contact the backend
// and receive any pending notifications.
Try(select1()) match {
case Success(value) => value
case Failure(exception) => {
Thread.sleep(1000)
Try(conn.close())
reloadConnection()
select1()
}
}
val notifications = pgconn.getNotifications(1000)
if(notifications != null) {
notifications.foreach{ n =>
logger.info(s"""
|Recived notification:
|timestamp: ${new Date().toString}
|name: ${n.getName}
|parameter: ${n.getParameter}
|""".stripMargin)
callback(n.getParameter).onComplete {
case Success(ok) => true
case Failure(exception) => {
exception.printStackTrace()
logger.error(exception.getMessage)
false
}
}
}
}
// wait a while before checking again for new
// notifications
//Thread.sleep(1000)
}
catch {
case sqle: SQLException =>
sqle.printStackTrace()
case ie: InterruptedException =>
ie.printStackTrace()
}
}
}
}
|
Insubric/box
|
server/src/main/scala/ch/wsl/box/rest/logic/notification/NotificationsHandler.scala
|
Scala
|
apache-2.0
| 2,990
|
package com.twitter.inject.modules
import com.twitter.finagle.stats.{LoadedStatsReceiver, StatsReceiver}
import com.twitter.inject.TwitterModule
object StatsReceiverModule extends TwitterModule {
override def configure() {
bindSingleton[StatsReceiver].toInstance(LoadedStatsReceiver)
}
}
|
tom-chan/finatra
|
inject/inject-modules/src/main/scala/com/twitter/inject/modules/StatsReceiverModule.scala
|
Scala
|
apache-2.0
| 298
|
/*
* Copyright 2012-2014 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.xfinity.sirius.api.impl.status
import com.comcast.xfinity.sirius.api.SiriusConfiguration
import com.comcast.xfinity.sirius.admin.SiriusMonitorReader
import akka.actor.{Props, Actor}
import com.comcast.xfinity.sirius.api.impl.status.NodeStats._
object StatusWorker {
// trait this in case we want to add different
// kinds of more specific inqueries
/**
* Trait encapsulating all status inqueries
*/
sealed trait StatusQuery
/**
* General Status inquery, will return everything
*/
case object GetStatus extends StatusQuery
/**
* Create Props for an actor of this type.
* Create a StatusWorker, which will respond to and status queries
*
* @param supAddressString the string that this node identifies by,
* a smart engineer would pass in the supervisors external
* address ;)
* @param config the node's configuration
*/
def props(supAddressString: String, config: SiriusConfiguration): Props = {
Props(classOf[StatusWorker], supAddressString, config, new SiriusMonitorReader)
}
}
/**
* Actor for receiving and replying to status inquery messages
*
* Prefer companion object construction over this
*
* @param config Sirius's Configuration
* @param supAddressString fully qualified string address of
* this systems supervisor, as it should be reference externally
* @param monitorReader SiriusMonitorReader for reading local
* registered MBeans, if such configured
*/
class StatusWorker(supAddressString: String,
config: SiriusConfiguration,
monitorReader: SiriusMonitorReader) extends Actor {
import StatusWorker._
def receive = {
case GetStatus =>
val memStats = getMemStats
val configMap = getConfigMap
val monitorStats = getMonitorStats
sender ! FullNodeStatus(supAddressString, memStats, configMap, monitorStats)
}
private def getMemStats = {
val runtime = Runtime.getRuntime
MemoryUsage(runtime.freeMemory, runtime.totalMemory)
}
private def getConfigMap = {
val configMap = config.getConfigMap
NodeConfig(configMap.map(kv => (kv._1, kv._2.toString)))
}
private def getMonitorStats = MonitorStats(monitorReader.getMonitorStats(config))
}
|
mattinger/sirius
|
src/main/scala/com/comcast/xfinity/sirius/api/impl/status/StatusWorker.scala
|
Scala
|
apache-2.0
| 2,918
|
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Outworkers Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.builder.ops
import com.websudos.phantom.builder.QueryBuilder
import com.websudos.phantom.builder.clauses.{WhereClause, OrderingColumn, CompareAndSetClause}
import com.websudos.phantom.builder.primitives.Primitive
import com.websudos.phantom.column._
import com.websudos.phantom.dsl._
import com.websudos.phantom.keys.{Undroppable, Indexed}
import shapeless.<:!<
import scala.annotation.implicitNotFound
sealed class DropColumn[RR](val column: AbstractColumn[RR])
sealed class CasConditionalOperators[RR](col: AbstractColumn[RR]) {
/**
* DSL method used to chain "is" clauses in Compare-And-Set operations.
* Using a call to {{is}}, a column is only updated if the conditional clause of the compare-and-set is met.
*
* Example:
*
* {{{
* Recipes.update.where(_.url eqs recipe.url)
* .modify(_.description setTo updated)
* .onlyIf(_.description is recipe.description)
* .future()
* }}}
*
* @param value The value to compare against in the match clause.
* @return A compare and set clause usable in an "onlyIf" condition.
*/
final def is(value: RR): CompareAndSetClause.Condition = {
new CompareAndSetClause.Condition(QueryBuilder.Where.eqs(col.name, col.asCql(value)))
}
final def isNot(value: RR): CompareAndSetClause.Condition = {
new CompareAndSetClause.Condition(QueryBuilder.Where.notEqs(col.name, col.asCql(value)))
}
final def isGt(value: RR): CompareAndSetClause.Condition = {
new CompareAndSetClause.Condition(QueryBuilder.Where.gt(col.name, col.asCql(value)))
}
final def isGte(value: RR): CompareAndSetClause.Condition = {
new CompareAndSetClause.Condition(QueryBuilder.Where.gte(col.name, col.asCql(value)))
}
final def isLt(value: RR): CompareAndSetClause.Condition = {
new CompareAndSetClause.Condition(QueryBuilder.Where.lt(col.name, col.asCql(value)))
}
final def isLte(value: RR): CompareAndSetClause.Condition = {
new CompareAndSetClause.Condition(QueryBuilder.Where.lte(col.name, col.asCql(value)))
}
}
sealed class SetConditionals[T <: CassandraTable[T, R], R, RR](val col: AbstractSetColumn[T, R, RR]) {
/**
* Generates a Set CONTAINS clause that can be used inside a CQL Where condition.
* @param elem The element to check for in the contains clause.
* @return A Where clause.
*/
final def contains(elem: RR): WhereClause.Condition = {
new WhereClause.Condition(
QueryBuilder.Where.contains(col.name, col.valueAsCql(elem))
)
}
}
sealed class MapEntriesConditionals[K : Primitive, V : Primitive](val col: MapKeyUpdateClause[K, V]) {
/**
* Generates a Map CONTAINS ENTRY clause that can be used inside a CQL Where condition.
* This allows users to lookup records by their full entry inside a map column of a table.
*
* Key support is not yet enabled in phantom because index generation has to be done differently.
* Otherwise, there is no support for simultaneous indexing on both KEYS and VALUES of a MAP column.
* This limitation will be lifted in the future.
*
* @param entry The map entry to look for.
* @return A Where clause.
*/
final def eqs(entry: V): WhereClause.Condition = {
new WhereClause.Condition(
QueryBuilder.Where.containsEntry(col.column, col.keyName, Primitive[V].asCql(entry))
)
}
}
sealed class MapKeyConditionals[T <: CassandraTable[T, R], R, K, V](val col: AbstractMapColumn[T, R, K, V]) {
/**
* Generates a Map CONTAINS KEY clause that can be used inside a CQL Where condition.
* This allows users to lookup records by a KEY inside a map column of a table.
*
* Key support is not yet enabled in phantom because index generation has to be done differently.
* Otherwise, there is no support for simultaneous indexing on both KEYS and VALUES of a MAP column.
* This limitation will be lifted in the future.
*
* @param elem The element to check for in the contains clause.
* @return A Where clause.
*/
final def containsKey(elem: K): WhereClause.Condition = {
new WhereClause.Condition(
QueryBuilder.Where.containsKey(col.name, col.keyAsCql(elem))
)
}
}
sealed class MapConditionals[T <: CassandraTable[T, R], R, K, V](val col: AbstractMapColumn[T, R, K, V]) {
/**
* Generates a Map CONTAINS clause that can be used inside a CQL Where condition.
* This allows users to lookup records by a VALUE inside a map column of a table.
*
* @param elem The element to check for in the contains clause.
* @return A Where clause.
*/
final def contains(elem: K): WhereClause.Condition = {
new WhereClause.Condition(
QueryBuilder.Where.contains(col.name, col.keyAsCql(elem))
)
}
}
private[phantom] trait ImplicitMechanism extends ModifyMechanism {
@implicitNotFound(msg = "Compare-and-set queries can only be applied to non indexed primitive columns.")
implicit final def columnToCasCompareColumn[RR](col: AbstractColumn[RR])(implicit ev: col.type <:!< Indexed): CasConditionalOperators[RR] = {
new CasConditionalOperators[RR](col)
}
@implicitNotFound(msg = "Index columns and counters cannot be dropped!")
implicit final def columnToDropColumn[T](col: AbstractColumn[T])(implicit ev: col.type <:!< Undroppable): DropColumn[T] = new DropColumn[T](col)
implicit def indexedToQueryColumn[T : Primitive](col: AbstractColumn[T] with Indexed): QueryColumn[T] = new QueryColumn(col)
implicit def orderingColumn[RR](col: AbstractColumn[RR] with PrimaryKey[RR]): OrderingColumn[RR] = new OrderingColumn[RR](col)
implicit def setColumnToQueryColumn[T <: CassandraTable[T, R], R, RR](col: AbstractSetColumn[T, R, RR] with Index[Set[RR]]): SetConditionals[T, R, RR] = {
new SetConditionals(col)
}
/**
* Definition used to cast a comparison clause to Map entry lookup based on a secondary index.
* @param cond The column update clause generated from MapColumn.apply(keyValue)
* @tparam K The type of the key inside the MapColumn.
* @tparam V The type of the value held inside the MapColumn.
* @return A MapEntriesConditionals query that allows secondary index operators on map entries.
*/
implicit def mapColumnDefinitionToEntriesQueryColumn[
K : Primitive,
V: Primitive
](cond: MapKeyUpdateClause[K, V]): MapEntriesConditionals[K, V] = {
new MapEntriesConditionals[K, V](cond)
}
/**
* Definition used to cast an index map column with values indexed to a query-able definition.
* This will allow users to use "CONTAINS" clauses to search for matches based on map values.
*
* @param col The map column to cast to a Map column secondary index query.
* @tparam T The Cassandra table inner type.
* @tparam R The record type of the table.
* @tparam K The type of the key held in the map.
* @tparam V The type of the value held in the map.
* @return A MapConditionals class with CONTAINS support.
*/
implicit def mapColumnToQueryColumn[T <: CassandraTable[T, R], R, K, V](
col: AbstractMapColumn[T, R, K, V] with Index[Map[K, V]]
)(implicit ev: col.type <:!< Keys): MapConditionals[T, R, K, V] = {
new MapConditionals(col)
}
/**
* Definition used to cast an index map column with keys indexed to a query-able definition.
* This will allow users to use "CONTAINS KEY" clauses to search for matches based on map keys.
*
* @param col The map column to cast to a Map column secondary index query.
* @tparam T The Cassandra table inner type.
* @tparam R The record type of the table.
* @tparam K The type of the key held in the map.
* @tparam V The type of the value held in the map.
* @return A MapConditionals class with CONTAINS KEY support.
*/
implicit def mapKeysColumnToQueryColumn[T <: CassandraTable[T, R], R, K, V](
col: AbstractMapColumn[T, R, K, V] with Index[Map[K, V]] with Keys): MapKeyConditionals[T, R, K, V] = {
new MapKeyConditionals(col)
}
}
|
levinson/phantom
|
phantom-dsl/src/main/scala/com/websudos/phantom/builder/ops/ImplicitMechanism.scala
|
Scala
|
bsd-2-clause
| 9,482
|
package utils
import java.nio.charset.Charset
import java.nio.file.Files
import java.nio.file.Path
import java.io.{File, PrintWriter}
object FileUtils {
def readFileToString(file: File, encoding: Charset) = {
val encoded = Files.readAllBytes(file.toPath)
new String(encoded, encoding)
}
def writeStringToFile(filename: String, contents: String) = {
val printWriter = new PrintWriter(filename)
printWriter.println(contents)
printWriter.close()
}
}
|
j-c-w/mlc
|
src/main/scala/utils/FileUtils.scala
|
Scala
|
gpl-3.0
| 479
|
package org.precompiler.spark101.env
import java.io.File
/**
*
* @author Richard Li
*/
trait EnvSetup {
private val winutilsPath = getUserDir() + File.separator + "src" + File.separator + "main" + File.separator + "resources"
def setupMockHadoop(): Unit = {
if (isWindows()) {
System.setProperty("hadoop.home.dir", winutilsPath)
}
}
def isWindows(): Boolean = {
System.getProperty("os.name").startsWith("Windows")
}
def getUserDir(): String = {
System.getProperty("user.dir")
}
}
|
precompiler/spark-101
|
learning-spark/src/main/scala/org/precompiler/spark101/env/EnvSetup.scala
|
Scala
|
apache-2.0
| 527
|
package skinny.task
import org.scalatest._
class SkinnyTaskLauncherSpec extends FlatSpec with Matchers {
val launcher = new SkinnyTaskLauncher {}
var result: String = "ng"
it should "accept registered tasks" in {
launcher.register("echo", (params) => params.foreach(println))
launcher.register("save", (params) => params.headOption.foreach(p => result = p))
launcher.main(Array("save", "ok"))
result should equal("ok")
}
it should "show usage" in {
launcher.showUsage
}
}
|
skinny-framework/skinny-framework
|
task/src/test/scala/skinny/task/SkinnyTaskLauncherSpec.scala
|
Scala
|
mit
| 517
|
package unfiltered.response
import unfiltered.request._
trait ResponseFunction[-A] {
// A is contravariant so e.g. a ResponseFunction[Any] can be supplied
// when ResponseFunction[HttpServletResponse] is expected.
def apply[B <: A](res: HttpResponse[B]): HttpResponse[B]
}
trait Responder[A] extends ResponseFunction[A] { self =>
def apply[B <: A](res: HttpResponse[B]) = {
respond(res)
res
}
def respond(res: HttpResponse[A])
def ~> [B <: A](that: ResponseFunction[B]) = new Responder[B] {
def respond(res: HttpResponse[B]) {
that(self(res))
}
}
}
/** Base class for composing a response function from others */
class ChainResponse[A](f: ResponseFunction[A]) extends
Responder[A] {
def respond(res: HttpResponse[A]) { f(res) }
}
/** Tells the binding implentation to treat the request as non-matching */
object Pass extends ResponseFunction[Any] {
def apply[T](res: HttpResponse[T]) = res
}
|
softprops/Unfiltered
|
library/src/main/scala/response/functions.scala
|
Scala
|
mit
| 944
|
/*
* Copyright 2014-15 Intelix Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eventstreams.instructions
import _root_.core.sysevents.SyseventOps.symbolToSyseventOps
import _root_.core.sysevents.WithSyseventPublisher
import _root_.core.sysevents.ref.ComponentWithBaseSysevents
import eventstreams.Tools.{configHelper, _}
import eventstreams._
import eventstreams.instructions.Types.SimpleInstructionType
import org.joda.time.format.DateTimeFormat
import org.joda.time.{DateTime, DateTimeZone}
import play.api.libs.json.{JsValue, Json}
import scala.util.Try
import scalaz.Scalaz._
import scalaz._
/**
*
* Symbol Meaning Presentation Examples
* ------ ------- ------------ -------
* G era text AD
* C century of era (>=0) number 20
* Y year of era (>=0) year 1996
*
* x weekyear year 1996
* w week of weekyear number 27
* e day of week number 2
* E day of week text Tuesday; Tue
*
* y year year 1996
* D day of year number 189
* M month of year month July; Jul; 07
* d day of month number 10
*
* a halfday of day text PM
* K hour of halfday (0~11) number 0
* h clockhour of halfday (1~12) number 12
*
* H hour of day (0~23) number 0
* k clockhour of day (1~24) number 24
* m minute of hour number 30
* s second of minute number 55
* S fraction of second number 978
*
* z time zone text Pacific Standard Time; PST
* Z time zone offset/id zone -0800; -08:00; America/Los_Angeles
*
* ' escape for text delimiter
* double' single quote literal '
*
*/
trait DateInstructionSysevents extends ComponentWithBaseSysevents {
val Built = 'Built.trace
val DateParsed = 'DateParsed.trace
val UnableToParseDate = 'UnableToParse.info
override def componentId: String = "Instruction.Date"
}
trait DateInstructionConstants extends InstructionConstants with DateInstructionSysevents {
val CfgFSource = "source"
val CfgFPattern = "pattern"
val CfgFSourceZone = "sourceZone"
val CfgFTargetZone = "targetZone"
val CfgFTargetPattern = "targetPattern"
val CfgFTargetFmtField = "targetFmtField"
val CfgFTargetTsField = "targetTsField"
val default = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZZ")
val default_targetFmtField = "date_fmt"
val default_targetTsField = "date_ts"
}
object DateInstructionConstants extends DateInstructionConstants
class DateInstruction extends SimpleInstructionBuilder with DateInstructionConstants with WithSyseventPublisher {
val configId = "date"
override def simpleInstruction(props: JsValue, id: Option[String] = None): \\/[Fail, SimpleInstructionType] =
for (
source <- props ~> CfgFSource orFail s"Invalid $configId instruction. Missing '$CfgFSource' value. Contents: ${Json.stringify(props)}";
targetZone = props ~> CfgFTargetZone;
zone = props ~> CfgFSourceZone;
_ <- Try(targetZone.foreach(DateTimeZone.forID)).toOption orFail s"Invalid $configId instruction. Invalid '$CfgFTargetZone' value. Contents: $targetZone";
_ <- Try(zone.foreach(DateTimeZone.forID)).toOption orFail s"Invalid $configId instruction. Invalid '$CfgFSourceZone' value. Contents: $zone"
) yield {
val pattern = (props ~> CfgFPattern).map(DateTimeFormat.forPattern)
val zone = props ~> CfgFSourceZone
val targetZone = props ~> CfgFTargetZone
var targetPattern = Try((props ~> CfgFTargetPattern).map(DateTimeFormat.forPattern)).getOrElse(Some(DateInstructionConstants.default)) | DateInstructionConstants.default
val sourcePattern = pattern.map { p =>
zone match {
case Some(l) if !l.isEmpty => p.withZone(DateTimeZone.forID(l))
case None => p
}
}
targetPattern = targetZone match {
case Some(l) if !l.isEmpty => targetPattern.withZone(DateTimeZone.forID(l))
case None => targetPattern
}
val targetFmtField = props ~> CfgFTargetFmtField | DateInstructionConstants.default_targetFmtField
val targetTsField = props ~> CfgFTargetTsField | DateInstructionConstants.default_targetTsField
val uuid = UUIDTools.generateShortUUID
Built >>('Config -> Json.stringify(props), 'InstructionInstanceId -> uuid)
fr: EventFrame => {
val sourceField = macroReplacement(fr, source)
Try {
sourcePattern match {
case Some(p) =>
val sourceValue = locateFieldValue(fr, sourceField)
(sourceValue, p.parseDateTime(sourceValue))
case None =>
val sourceValue = locateRawFieldValue(fr, sourceField, 0).asNumber.map(_.longValue()) | 0
(sourceValue, new DateTime(sourceValue))
}
}.map { case (s, dt) =>
val fmt = dt.toString(targetPattern)
val eventId = fr.eventIdOrNA
DateParsed >>> Seq('SourceValue -> s, 'SourceDate -> dt, 'ResultFmt -> fmt, 'Ts -> dt.getMillis, 'EventId -> eventId, 'InstructionInstanceId -> uuid)
List(
EventValuePath(targetTsField).setLongInto(
EventValuePath(targetFmtField).setStringInto(fr, fmt), dt.getMillis))
}.recover {
case x =>
UnableToParseDate >>('Source -> fr, 'InstructionInstanceId -> uuid)
List(fr)
}.get
}
}
}
|
intelix/eventstreams
|
es-instructions/es-instructions-set/src/main/scala/eventstreams/instructions/DateInstruction.scala
|
Scala
|
apache-2.0
| 6,417
|
package ingraph.ire.listeners
case class AddListener(listener: ChangeListener)
|
FTSRG/ingraph
|
ire/src/main/scala/ingraph/ire/listeners/AddListener.scala
|
Scala
|
epl-1.0
| 80
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.orc
import java.io.File
import java.nio.charset.StandardCharsets
import java.sql.Timestamp
import java.time.{LocalDateTime, ZoneOffset}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce.{JobID, TaskAttemptID, TaskID, TaskType}
import org.apache.hadoop.mapreduce.lib.input.FileSplit
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.orc.{OrcConf, OrcFile}
import org.apache.orc.OrcConf.COMPRESS
import org.apache.orc.mapred.OrcStruct
import org.apache.orc.mapreduce.OrcInputFormat
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.execution.FileSourceScanExec
import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation, RecordReaderIterator}
import org.apache.spark.sql.execution.datasources.v2.BatchScanExec
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
case class AllDataTypesWithNonPrimitiveType(
stringField: String,
intField: Int,
longField: Long,
floatField: Float,
doubleField: Double,
shortField: Short,
byteField: Byte,
booleanField: Boolean,
array: Seq[Int],
arrayContainsNull: Seq[Option[Int]],
map: Map[Int, Long],
mapValueContainsNull: Map[Int, Option[Long]],
data: (Seq[Int], (Int, String)))
case class BinaryData(binaryData: Array[Byte])
case class Contact(name: String, phone: String)
case class Person(name: String, age: Int, contacts: Seq[Contact])
abstract class OrcQueryTest extends OrcTest {
import testImplicits._
test("Read/write All Types") {
val data = (0 to 255).map { i =>
(s"$i", i, i.toLong, i.toFloat, i.toDouble, i.toShort, i.toByte, i % 2 == 0)
}
withOrcFile(data) { file =>
checkAnswer(
spark.read.orc(file),
data.toDF().collect())
}
}
test("Read/write binary data") {
withOrcFile(BinaryData("test".getBytes(StandardCharsets.UTF_8)) :: Nil) { file =>
val bytes = spark.read.orc(file).head().getAs[Array[Byte]](0)
assert(new String(bytes, StandardCharsets.UTF_8) === "test")
}
}
test("Read/write all types with non-primitive type") {
val data: Seq[AllDataTypesWithNonPrimitiveType] = (0 to 255).map { i =>
AllDataTypesWithNonPrimitiveType(
s"$i", i, i.toLong, i.toFloat, i.toDouble, i.toShort, i.toByte, i % 2 == 0,
0 until i,
(0 until i).map(Option(_).filter(_ % 3 == 0)),
(0 until i).map(i => i -> i.toLong).toMap,
(0 until i).map(i => i -> Option(i.toLong)).toMap + (i -> None),
(0 until i, (i, s"$i")))
}
withOrcFile(data) { file =>
checkAnswer(
spark.read.orc(file),
data.toDF().collect())
}
}
test("Read/write UserDefinedType") {
withTempPath { path =>
val data = Seq((1, new TestUDT.MyDenseVector(Array(0.25, 2.25, 4.25))))
val udtDF = data.toDF("id", "vectors")
udtDF.write.orc(path.getAbsolutePath)
val readBack = spark.read.schema(udtDF.schema).orc(path.getAbsolutePath)
checkAnswer(udtDF, readBack)
}
}
test("Creating case class RDD table") {
val data = (1 to 100).map(i => (i, s"val_$i"))
sparkContext.parallelize(data).toDF().createOrReplaceTempView("t")
withTempView("t") {
checkAnswer(sql("SELECT * FROM t"), data.toDF().collect())
}
}
test("Simple selection form ORC table") {
val data = (1 to 10).map { i =>
Person(s"name_$i", i, (0 to 1).map { m => Contact(s"contact_$m", s"phone_$m") })
}
withOrcTable(data, "t") {
// ppd:
// leaf-0 = (LESS_THAN_EQUALS age 5)
// expr = leaf-0
assert(sql("SELECT name FROM t WHERE age <= 5").count() === 5)
// ppd:
// leaf-0 = (LESS_THAN_EQUALS age 5)
// expr = (not leaf-0)
assertResult(10) {
sql("SELECT name, contacts FROM t where age > 5")
.rdd
.flatMap(_.getAs[scala.collection.Seq[_]]("contacts"))
.count()
}
// ppd:
// leaf-0 = (LESS_THAN_EQUALS age 5)
// leaf-1 = (LESS_THAN age 8)
// expr = (and (not leaf-0) leaf-1)
{
val df = sql("SELECT name, contacts FROM t WHERE age > 5 AND age < 8")
assert(df.count() === 2)
assertResult(4) {
df.rdd.flatMap(_.getAs[scala.collection.Seq[_]]("contacts")).count()
}
}
// ppd:
// leaf-0 = (LESS_THAN age 2)
// leaf-1 = (LESS_THAN_EQUALS age 8)
// expr = (or leaf-0 (not leaf-1))
{
val df = sql("SELECT name, contacts FROM t WHERE age < 2 OR age > 8")
assert(df.count() === 3)
assertResult(6) {
df.rdd.flatMap(_.getAs[scala.collection.Seq[_]]("contacts")).count()
}
}
}
}
test("save and load case class RDD with `None`s as orc") {
val data = (
Option.empty[Int],
Option.empty[Long],
Option.empty[Float],
Option.empty[Double],
Option.empty[Boolean]
) :: Nil
withOrcFile(data) { file =>
checkAnswer(
spark.read.orc(file),
Row(Seq.fill(5)(null): _*))
}
}
test("SPARK-16610: Respect orc.compress (i.e., OrcConf.COMPRESS) when compression is unset") {
// Respect `orc.compress` (i.e., OrcConf.COMPRESS).
withTempPath { file =>
spark.range(0, 10).write
.option(COMPRESS.getAttribute, "ZLIB")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".zlib.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("ZLIB" === reader.getCompressionKind.name)
}
}
// `compression` overrides `orc.compress`.
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "ZLIB")
.option(COMPRESS.getAttribute, "SNAPPY")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".zlib.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("ZLIB" === reader.getCompressionKind.name)
}
}
}
test("Compression options for writing to an ORC file (SNAPPY, ZLIB and NONE)") {
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "ZLIB")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".zlib.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("ZLIB" === reader.getCompressionKind.name)
}
}
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "SNAPPY")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".snappy.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("SNAPPY" === reader.getCompressionKind.name)
}
}
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "NONE")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("NONE" === reader.getCompressionKind.name)
}
}
}
test("simple select queries") {
withOrcTable((0 until 10).map(i => (i, i.toString)), "t") {
checkAnswer(
sql("SELECT `_1` FROM t where t.`_1` > 5"),
(6 until 10).map(Row.apply(_)))
checkAnswer(
sql("SELECT `_1` FROM t as tmp where tmp.`_1` < 5"),
(0 until 5).map(Row.apply(_)))
}
}
test("appending") {
val data = (0 until 10).map(i => (i, i.toString))
spark.createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
withOrcFile(data) { file =>
withTempView("t") {
spark.read.orc(file).createOrReplaceTempView("t")
checkAnswer(spark.table("t"), data.map(Row.fromTuple))
sql("INSERT INTO TABLE t SELECT * FROM tmp")
checkAnswer(spark.table("t"), (data ++ data).map(Row.fromTuple))
}
}
spark.sessionState.catalog.dropTable(
TableIdentifier("tmp"),
ignoreIfNotExists = true,
purge = false)
}
test("overwriting") {
val data = (0 until 10).map(i => (i, i.toString))
spark.createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
withOrcTable(data, "t") {
sql("INSERT OVERWRITE TABLE t SELECT * FROM tmp")
checkAnswer(spark.table("t"), data.map(Row.fromTuple))
}
spark.sessionState.catalog.dropTable(
TableIdentifier("tmp"),
ignoreIfNotExists = true,
purge = false)
}
test("self-join") {
// 4 rows, cells of column 1 of row 2 and row 4 are null
val data = (1 to 4).map { i =>
val maybeInt = if (i % 2 == 0) None else Some(i)
(maybeInt, i.toString)
}
withOrcTable(data, "t") {
val selfJoin = sql("SELECT * FROM t x JOIN t y WHERE x.`_1` = y.`_1`")
val queryOutput = selfJoin.queryExecution.analyzed.output
assertResult(4, "Field count mismatches")(queryOutput.size)
assertResult(2, s"Duplicated expression ID in query plan:\\n $selfJoin") {
queryOutput.filter(_.name == "_1").map(_.exprId).size
}
checkAnswer(selfJoin, List(Row(1, "1", 1, "1"), Row(3, "3", 3, "3")))
}
}
test("nested data - struct with array field") {
val data = (1 to 10).map(i => Tuple1((i, Seq(s"val_$i"))))
withOrcTable(data, "t") {
checkAnswer(sql("SELECT `_1`.`_2`[0] FROM t"), data.map {
case Tuple1((_, Seq(string))) => Row(string)
})
}
}
test("nested data - array of struct") {
val data = (1 to 10).map(i => Tuple1(Seq(i -> s"val_$i")))
withOrcTable(data, "t") {
checkAnswer(sql("SELECT `_1`[0].`_2` FROM t"), data.map {
case Tuple1(Seq((_, string))) => Row(string)
})
}
}
test("columns only referenced by pushed down filters should remain") {
withOrcTable((1 to 10).map(Tuple1.apply), "t") {
checkAnswer(sql("SELECT `_1` FROM t WHERE `_1` < 10"), (1 to 9).map(Row.apply(_)))
}
}
test("SPARK-5309 strings stored using dictionary compression in orc") {
withOrcTable((0 until 1000).map(i => ("same", "run_" + i / 100, 1)), "t") {
checkAnswer(
sql("SELECT `_1`, `_2`, SUM(`_3`) FROM t GROUP BY `_1`, `_2`"),
(0 until 10).map(i => Row("same", "run_" + i, 100)))
checkAnswer(
sql("SELECT `_1`, `_2`, SUM(`_3`) FROM t WHERE `_2` = 'run_5' GROUP BY `_1`, `_2`"),
List(Row("same", "run_5", 100)))
}
}
test("SPARK-9170: Don't implicitly lowercase of user-provided columns") {
withTempPath { dir =>
val path = dir.getCanonicalPath
spark.range(0, 10).select('id as "Acol").write.orc(path)
spark.read.orc(path).schema("Acol")
intercept[IllegalArgumentException] {
spark.read.orc(path).schema("acol")
}
checkAnswer(spark.read.orc(path).select("acol").sort("acol"),
(0 until 10).map(Row(_)))
}
}
test("SPARK-10623 Enable ORC PPD") {
withTempPath { dir =>
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
import testImplicits._
val path = dir.getCanonicalPath
// For field "a", the first column has odds integers. This is to check the filtered count
// when `isNull` is performed. For Field "b", `isNotNull` of ORC file filters rows
// only when all the values are null (maybe this works differently when the data
// or query is complicated). So, simply here a column only having `null` is added.
val data = (0 until 10).map { i =>
val maybeInt = if (i % 2 == 0) None else Some(i)
val nullValue: Option[String] = None
(maybeInt, nullValue)
}
// It needs to repartition data so that we can have several ORC files
// in order to skip stripes in ORC.
spark.createDataFrame(data).toDF("a", "b").repartition(10).write.orc(path)
val df = spark.read.orc(path)
def checkPredicate(pred: Column, answer: Seq[Row]): Unit = {
val sourceDf = stripSparkFilter(df.where(pred))
val data = sourceDf.collect().toSet
val expectedData = answer.toSet
// When a filter is pushed to ORC, ORC can apply it to rows. So, we can check
// the number of rows returned from the ORC to make sure our filter pushdown work.
// A tricky part is, ORC does not process filter rows fully but return some possible
// results. So, this checks if the number of result is less than the original count
// of data, and then checks if it contains the expected data.
assert(
sourceDf.count < 10 && expectedData.subsetOf(data),
s"No data was filtered for predicate: $pred")
}
checkPredicate('a === 5, List(5).map(Row(_, null)))
checkPredicate('a <=> 5, List(5).map(Row(_, null)))
checkPredicate('a < 5, List(1, 3).map(Row(_, null)))
checkPredicate('a <= 5, List(1, 3, 5).map(Row(_, null)))
checkPredicate('a > 5, List(7, 9).map(Row(_, null)))
checkPredicate('a >= 5, List(5, 7, 9).map(Row(_, null)))
checkPredicate('a.isNull, List(null).map(Row(_, null)))
checkPredicate('b.isNotNull, List())
checkPredicate('a.isin(3, 5, 7), List(3, 5, 7).map(Row(_, null)))
checkPredicate('a > 0 && 'a < 3, List(1).map(Row(_, null)))
checkPredicate('a < 1 || 'a > 8, List(9).map(Row(_, null)))
checkPredicate(!('a > 3), List(1, 3).map(Row(_, null)))
checkPredicate(!('a > 0 && 'a < 3), List(3, 5, 7, 9).map(Row(_, null)))
}
}
}
test("SPARK-14962 Produce correct results on array type with isnotnull") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val data = (0 until 10).map(i => Tuple1(Array(i)))
withOrcFile(data) { file =>
val actual = spark
.read
.orc(file)
.where("_1 is not null")
val expected = data.toDF()
checkAnswer(actual, expected)
}
}
}
test("SPARK-15198 Support for pushing down filters for boolean types") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val data = (0 until 10).map(_ => (true, false))
withOrcFile(data) { file =>
val df = spark.read.orc(file).where("_2 == true")
val actual = stripSparkFilter(df).count()
// ORC filter should be applied and the total count should be 0.
assert(actual === 0)
}
}
}
test("Support for pushing down filters for decimal types") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val data = (0 until 10).map(i => Tuple1(BigDecimal.valueOf(i)))
checkPredicatePushDown(spark.createDataFrame(data).toDF("a"), 10, "a == 2")
}
}
test("Support for pushing down filters for timestamp types") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val timeString = "2015-08-20 14:57:00"
val data = (0 until 10).map { i =>
val milliseconds = Timestamp.valueOf(timeString).getTime + i * 3600
Tuple1(new Timestamp(milliseconds))
}
checkPredicatePushDown(spark.createDataFrame(data).toDF("a"), 10, s"a == '$timeString'")
}
}
test("column nullability and comment - write and then read") {
val schema = (new StructType)
.add("cl1", IntegerType, nullable = false, comment = "test")
.add("cl2", IntegerType, nullable = true)
.add("cl3", IntegerType, nullable = true)
val row = Row(3, null, 4)
val df = spark.createDataFrame(sparkContext.parallelize(row :: Nil), schema)
val tableName = "tab"
withTable(tableName) {
df.write.format("orc").mode("overwrite").saveAsTable(tableName)
// Verify the DDL command result: DESCRIBE TABLE
checkAnswer(
sql(s"desc $tableName").select("col_name", "comment").where($"comment" === "test"),
Row("cl1", "test") :: Nil)
// Verify the schema
val expectedFields = schema.fields.map(f => f.copy(nullable = true))
assert(spark.table(tableName).schema == schema.copy(fields = expectedFields))
}
}
test("Empty schema does not read data from ORC file") {
val data = Seq((1, 1), (2, 2))
withOrcFile(data) { path =>
val conf = new Configuration()
conf.set(OrcConf.INCLUDE_COLUMNS.getAttribute, "")
conf.setBoolean("hive.io.file.read.all.columns", false)
val orcRecordReader = {
val file = new File(path).listFiles().find(_.getName.endsWith(".snappy.orc")).head
val split = new FileSplit(new Path(file.toURI), 0, file.length, Array.empty[String])
val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId)
val oif = new OrcInputFormat[OrcStruct]
oif.createRecordReader(split, hadoopAttemptContext)
}
val recordsIterator = new RecordReaderIterator[OrcStruct](orcRecordReader)
try {
assert(recordsIterator.next().toString == "{null, null}")
} finally {
recordsIterator.close()
}
}
}
test("read from multiple orc input paths") {
val path1 = Utils.createTempDir()
val path2 = Utils.createTempDir()
makeOrcFile((1 to 10).map(Tuple1.apply), path1)
makeOrcFile((1 to 10).map(Tuple1.apply), path2)
val df = spark.read.orc(path1.getCanonicalPath, path2.getCanonicalPath)
assert(df.count() == 20)
}
test("Enabling/disabling ignoreCorruptFiles") {
def testIgnoreCorruptFiles(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.orc(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.orc(new Path(basePath, "second").toString)
spark.range(2, 3).toDF("a").write.json(new Path(basePath, "third").toString)
val df = spark.read.orc(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString,
new Path(basePath, "third").toString)
checkAnswer(df, Seq(Row(0), Row(1)))
}
}
def testIgnoreCorruptFilesWithoutSchemaInfer(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.orc(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.orc(new Path(basePath, "second").toString)
spark.range(2, 3).toDF("a").write.json(new Path(basePath, "third").toString)
val df = spark.read.schema("a long").orc(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString,
new Path(basePath, "third").toString)
checkAnswer(df, Seq(Row(0), Row(1)))
}
}
def testAllCorruptFiles(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.json(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.json(new Path(basePath, "second").toString)
val df = spark.read.orc(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString)
assert(df.count() == 0)
}
}
def testAllCorruptFilesWithoutSchemaInfer(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.json(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.json(new Path(basePath, "second").toString)
val df = spark.read.schema("a long").orc(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString)
assert(df.count() == 0)
}
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "true") {
testIgnoreCorruptFiles()
testIgnoreCorruptFilesWithoutSchemaInfer()
val m1 = intercept[AnalysisException] {
testAllCorruptFiles()
}.getMessage
assert(m1.contains("Unable to infer schema for ORC"))
testAllCorruptFilesWithoutSchemaInfer()
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "false") {
val e1 = intercept[SparkException] {
testIgnoreCorruptFiles()
}
assert(e1.getMessage.contains("Malformed ORC file"))
val e2 = intercept[SparkException] {
testIgnoreCorruptFilesWithoutSchemaInfer()
}
assert(e2.getMessage.contains("Malformed ORC file"))
val e3 = intercept[SparkException] {
testAllCorruptFiles()
}
assert(e3.getMessage.contains("Could not read footer for file"))
val e4 = intercept[SparkException] {
testAllCorruptFilesWithoutSchemaInfer()
}
assert(e4.getMessage.contains("Malformed ORC file"))
}
}
test("SPARK-27160 Predicate pushdown correctness on DecimalType for ORC") {
withTempPath { dir =>
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val path = dir.getCanonicalPath
Seq(BigDecimal(0.1), BigDecimal(0.2), BigDecimal(-0.3))
.toDF("x").write.orc(path)
val df = spark.read.orc(path)
checkAnswer(df.filter("x >= 0.1"), Seq(Row(0.1), Row(0.2)))
checkAnswer(df.filter("x > 0.1"), Seq(Row(0.2)))
checkAnswer(df.filter("x <= 0.15"), Seq(Row(0.1), Row(-0.3)))
checkAnswer(df.filter("x < 0.1"), Seq(Row(-0.3)))
checkAnswer(df.filter("x == 0.2"), Seq(Row(0.2)))
}
}
}
}
abstract class OrcQuerySuite extends OrcQueryTest with SharedSparkSession {
import testImplicits._
test("LZO compression options for writing to an ORC file") {
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "LZO")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".lzo.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("LZO" === reader.getCompressionKind.name)
}
}
}
test("Schema discovery on empty ORC files") {
// SPARK-8501 is fixed.
withTempPath { dir =>
val path = dir.getCanonicalPath
withTable("empty_orc") {
withTempView("empty", "single") {
spark.sql(
s"""CREATE TABLE empty_orc(key INT, value STRING)
|USING ORC
|LOCATION '${dir.toURI}'
""".stripMargin)
val emptyDF = Seq.empty[(Int, String)].toDF("key", "value").coalesce(1)
emptyDF.createOrReplaceTempView("empty")
// This creates 1 empty ORC file with ORC SerDe. We are using this trick because
// Spark SQL ORC data source always avoids write empty ORC files.
spark.sql(
s"""INSERT INTO TABLE empty_orc
|SELECT key, value FROM empty
""".stripMargin)
val df = spark.read.orc(path)
assert(df.schema === emptyDF.schema.asNullable)
checkAnswer(df, emptyDF)
}
}
}
}
test("SPARK-21791 ORC should support column names with dot") {
withTempDir { dir =>
val path = new File(dir, "orc").getCanonicalPath
Seq(Some(1), None).toDF("col.dots").write.orc(path)
assert(spark.read.orc(path).collect().length == 2)
}
}
test("SPARK-25579 ORC PPD should support column names with dot") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
checkPredicatePushDown(spark.range(10).toDF("col.dot"), 10, "`col.dot` == 2")
}
}
test("SPARK-20728 Make ORCFileFormat configurable between sql/hive and sql/core") {
withSQLConf(SQLConf.ORC_IMPLEMENTATION.key -> "hive") {
val e = intercept[AnalysisException] {
sql("CREATE TABLE spark_20728(a INT) USING ORC")
}
assert(e.message.contains("Hive built-in ORC data source must be used with Hive support"))
}
withSQLConf(SQLConf.ORC_IMPLEMENTATION.key -> "native") {
withTable("spark_20728") {
sql("CREATE TABLE spark_20728(a INT) USING ORC")
val fileFormat = sql("SELECT * FROM spark_20728").queryExecution.analyzed.collectFirst {
case l: LogicalRelation => l.relation.asInstanceOf[HadoopFsRelation].fileFormat.getClass
}
assert(fileFormat == Some(classOf[OrcFileFormat]))
}
}
}
test("SPARK-34862: Support ORC vectorized reader for nested column") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(10).map { x =>
val stringColumn = s"$x" * 10
val structColumn = (x, s"$x" * 100)
val arrayColumn = (0 until 5).map(i => (x + i, s"$x" * 5))
val mapColumn = Map(
s"$x" -> (x * 0.1, (x, s"$x" * 100)),
(s"$x" * 2) -> (x * 0.2, (x, s"$x" * 200)),
(s"$x" * 3) -> (x * 0.3, (x, s"$x" * 300)))
(x, stringColumn, structColumn, arrayColumn, mapColumn)
}.toDF("int_col", "string_col", "struct_col", "array_col", "map_col")
df.write.format("orc").save(path)
withSQLConf(SQLConf.ORC_VECTORIZED_READER_NESTED_COLUMN_ENABLED.key -> "true") {
val readDf = spark.read.orc(path)
val vectorizationEnabled = readDf.queryExecution.executedPlan.find {
case scan @ (_: FileSourceScanExec | _: BatchScanExec) => scan.supportsColumnar
case _ => false
}.isDefined
assert(vectorizationEnabled)
checkAnswer(readDf, df)
}
}
}
test("SPARK-36594: ORC vectorized reader should properly check maximal number of fields") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(10).map { x =>
val stringColumn = s"$x" * 10
val structColumn = (x, s"$x" * 100)
val arrayColumn = (0 until 5).map(i => (x + i, s"$x" * 5))
val mapColumn = Map(s"$x" -> (x * 0.1, (x, s"$x" * 100)))
(x, stringColumn, structColumn, arrayColumn, mapColumn)
}.toDF("int_col", "string_col", "struct_col", "array_col", "map_col")
df.write.format("orc").save(path)
Seq(("5", false), ("10", true)).foreach {
case (maxNumFields, vectorizedEnabled) =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_NESTED_COLUMN_ENABLED.key -> "true",
SQLConf.WHOLESTAGE_MAX_NUM_FIELDS.key -> maxNumFields) {
val scanPlan = spark.read.orc(path).queryExecution.executedPlan
assert(scanPlan.find {
case scan @ (_: FileSourceScanExec | _: BatchScanExec) => scan.supportsColumnar
case _ => false
}.isDefined == vectorizedEnabled)
}
}
}
}
test("Read/write all timestamp types") {
val data = (0 to 255).map { i =>
(new Timestamp(i), LocalDateTime.of(2019, 3, 21, 0, 2, 3, 456000000 + i))
} :+ (null, null)
withOrcFile(data) { file =>
withAllOrcReaders {
checkAnswer(spark.read.orc(file), data.toDF().collect())
}
}
}
test("SPARK-36346: can't read TimestampLTZ as TimestampNTZ") {
val data = (1 to 10).map { i =>
val ts = new Timestamp(i)
Row(ts)
}
val answer = (1 to 10).map { i =>
// The second parameter is `nanoOfSecond`, while java.sql.Timestamp accepts milliseconds
// as input. So here we multiple the `nanoOfSecond` by NANOS_PER_MILLIS
val ts = LocalDateTime.ofEpochSecond(0, i * 1000000, ZoneOffset.UTC)
Row(ts)
}
val actualSchema = StructType(Seq(StructField("time", TimestampType, false)))
val providedSchema = StructType(Seq(StructField("time", TimestampNTZType, false)))
withTempPath { file =>
val df = spark.createDataFrame(sparkContext.parallelize(data), actualSchema)
df.write.orc(file.getCanonicalPath)
withAllOrcReaders {
val msg = intercept[SparkException] {
spark.read.schema(providedSchema).orc(file.getCanonicalPath).collect()
}.getMessage
assert(msg.contains("Unable to convert timestamp of Orc to data type 'timestamp_ntz'"))
}
}
}
test("SPARK-36346: read TimestampNTZ as TimestampLTZ") {
val data = (1 to 10).map { i =>
// The second parameter is `nanoOfSecond`, while java.sql.Timestamp accepts milliseconds
// as input. So here we multiple the `nanoOfSecond` by NANOS_PER_MILLIS
val ts = LocalDateTime.ofEpochSecond(0, i * 1000000, ZoneOffset.UTC)
Row(ts)
}
val answer = (1 to 10).map { i =>
val ts = new java.sql.Timestamp(i)
Row(ts)
}
val actualSchema = StructType(Seq(StructField("time", TimestampNTZType, false)))
val providedSchema = StructType(Seq(StructField("time", TimestampType, false)))
withTempPath { file =>
val df = spark.createDataFrame(sparkContext.parallelize(data), actualSchema)
df.write.orc(file.getCanonicalPath)
withAllOrcReaders {
checkAnswer(spark.read.schema(providedSchema).orc(file.getCanonicalPath), answer)
}
}
}
}
class OrcV1QuerySuite extends OrcQuerySuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "orc")
}
class OrcV2QuerySuite extends OrcQuerySuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "")
}
|
nchammas/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala
|
Scala
|
apache-2.0
| 31,390
|
package Repositories
import Database.UserSchema
import Models.User
import org.squeryl.PrimitiveTypeMode._
/**
* Created by sandman on 11/29/14.
*/
object UserRepository {
def addUser(name: String, email: String) = {
transaction {
val newUser = new User(name = name, email = email)
UserSchema.users.insert(newUser)
println("Inserted user")
}
}
}
|
J4g0n/testingSqueryl
|
src/main/scala/Repositories/UserRepository.scala
|
Scala
|
mit
| 379
|
package org.jetbrains.plugins.scala
package codeInspection
package collections
import com.intellij.codeInspection.ProblemHighlightType
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScExpression
import org.jetbrains.plugins.scala.lang.psi.types.ScTypeExt
import org.jetbrains.plugins.scala.lang.psi.types.result._
import scala.collection.immutable.ArraySeq
/**
* @author Nikolay.Tropin
*/
object RedundantCollectionConversion extends SimplificationType {
override def hint: String = ScalaInspectionBundle.message("redundant.collection.conversion")
override def getSimplification(expr: ScExpression): Option[Simplification] = {
val typeAfterConversion = expr.`type`().getOrAny
// note:
// will match <Seq(1, 2).to> and <Seq(1, 2).to[List]> but not <Seq(1, 2).to>[List]
// because of a check in MethodRepr in `.toCollection`
expr match {
// TODO infix notation?
case `.toCollection`(base@Typeable(baseType)) if baseType.conforms(typeAfterConversion) =>
val simplification = replace(expr).withText(base.getText).highlightFrom(base)
Some(simplification)
case _ => None
}
}
}
class RedundantCollectionConversionInspection extends OperationOnCollectionInspection {
override def highlightType = ProblemHighlightType.LIKE_UNUSED_SYMBOL
override def possibleSimplificationTypes: ArraySeq[SimplificationType] = ArraySeq(RedundantCollectionConversion)
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/codeInspection/collections/RedundantCollectionConversionInspection.scala
|
Scala
|
apache-2.0
| 1,427
|
package io.vamp.gateway_driver.haproxy
import io.vamp.common.util.HashUtil
case class HaProxy(
frontends: List[Frontend],
backends: List[Backend],
virtualHostFrontends: List[Frontend],
virtualHostBackends: List[Backend]
)
case class Frontend(
name: String,
lookup: String,
metadata: Map[String, Any],
bindIp: Option[String],
bindPort: Option[Int],
mode: Mode.Value,
unixSock: Option[String],
sockProtocol: Option[String],
conditions: List[Condition],
defaultBackend: Backend
)
case class Backend(
name: String,
lookup: String,
metadata: Map[String, Any],
mode: Mode.Value,
proxyServers: List[ProxyServer],
servers: List[Server],
rewrites: List[Rewrite],
sticky: Boolean,
balance: String
)
object Mode extends Enumeration {
val http, tcp = Value
}
case class Condition(name: String, destination: Backend, acls: Option[HaProxyAcls])
object Acl {
def apply(definition: String): Acl = Acl(HashUtil.hexSha1(definition).substring(0, 16), definition)
}
case class Acl(name: String, definition: String)
case class Rewrite(path: String, condition: String)
case class ProxyServer(name: String, lookup: String, unixSock: String, weight: Int)
case class Server(name: String, lookup: String, url: Option[String], host: Option[String], port: Option[Int], weight: Int, checkInterval: Option[Int] = None)
|
magneticio/vamp
|
haproxy/src/main/scala/io/vamp/gateway_driver/haproxy/HaProxy.scala
|
Scala
|
apache-2.0
| 1,480
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
package tools
import StringReporter._
import Fragment.{countTrailingEOLs, countLeadingEOLs}
import SharedHelpers.thisLineNumber
import org.scalatest.exceptions.StackDepthException
import org.scalatest.exceptions.TestFailedException
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class StringReporterSuite extends AnyFunSuite with Matchers {
test("Empty string should just come back as an empty string.") {
assert(Fragment("", AnsiCyan).toPossiblyColoredText(true) === "")
}
test("Blank string should just come back as the same string.") {
assert(Fragment(" ", AnsiCyan).toPossiblyColoredText(true) === " ")
assert(Fragment(" ", AnsiCyan).toPossiblyColoredText(true) === " ")
assert(Fragment(" ", AnsiCyan).toPossiblyColoredText(true) === " ")
}
val cyanRegex = "\\u001b\\\\[36m"
val resetRegex = "\\u001b\\\\[0m"
// Have to pad the strings to get the count of appearances to be one less than
// the array resulting from splitting on it, because split doesn't give me one
// if it is on the end
def pad(s: String) = " " + s + " "
def countCyan(s: String) = pad(s).split(cyanRegex).length - 1
def countReset(s: String) = pad(s).split(resetRegex).length - 1
test("A non-blank string with no EOLs should come back surrounded by ansiColor and ansiReset, " +
"with no other occurrences of them in the returned string") {
val aResult = Fragment("a", AnsiCyan).toPossiblyColoredText(true)
aResult should startWith (ansiCyan)
aResult should endWith (ansiReset)
countCyan(aResult) should equal (1)
countReset(aResult) should equal (1)
}
test("A non-blank string with one EOL should come back surrounded by ansiColor and ansiReset, " +
"with two occurrences of them in the returned string") {
val aResult = Fragment("a\\nb", AnsiCyan).toPossiblyColoredText(true)
aResult should startWith (ansiCyan)
aResult should endWith (ansiReset)
countCyan(aResult) should equal (2)
countReset(aResult) should equal (2)
}
test("countTrailingEOLs should return the number of EOLs at the end of the passed string") {
countTrailingEOLs("") should be (0)
countTrailingEOLs("\\n") should be (1)
countTrailingEOLs("\\n\\n") should be (2)
countTrailingEOLs("howdy\\n\\n") should be (2)
countTrailingEOLs("\\nhowdy\\n\\n") should be (2)
countTrailingEOLs("hohoho\\nhowdy\\n\\n") should be (2)
countTrailingEOLs("\\n\\nhohoho\\nhowdy\\n\\n") should be (2)
countTrailingEOLs("\\n\\nhohoho\\nhowdy\\n") should be (1)
countTrailingEOLs("\\n\\nhohoho\\nhowdy") should be (0)
countTrailingEOLs("not a single one ") should be (0)
countTrailingEOLs("not a single one") should be (0)
}
test("countLeadingEOLs should return the number of EOLs at the beginning of the passed string") {
countLeadingEOLs("") should be (0)
countLeadingEOLs("\\n") should be (0) // Allow these to be counted as at the end
countLeadingEOLs("\\n\\n") should be (0)
countLeadingEOLs("\\n\\nhowdy") should be (2)
countLeadingEOLs("\\n\\nhowdy\\n") should be (2)
countLeadingEOLs("\\n\\nhohoho\\nhowdy") should be (2)
countLeadingEOLs("\\n\\nhohoho\\nhowdy\\n\\n") should be (2)
countLeadingEOLs("\\nhohoho\\nhowdy\\n\\n") should be (1)
countLeadingEOLs("hohoho\\nhowdy\\n\\n") should be (0)
countLeadingEOLs("not a single one ") should be (0)
countLeadingEOLs("not a single one") should be (0)
}
test("A non-blank string with one EOL in the middle and one EOL at the end should come back with the first two " +
"strings surrounded by ansiColor and ansiReset, but nothing after the trailing EOL") {
val aResult = Fragment("a\\nb\\n", AnsiCyan).toPossiblyColoredText(true)
aResult should startWith (ansiCyan)
aResult should not endWith (ansiReset)
aResult should endWith (ansiReset + "\\n")
countCyan(aResult) should equal (2)
countReset(aResult) should equal (2)
}
test("A non-blank string with one EOL in the middle and two EOLs at the end should come back with the first two " +
"strings surrounded by ansiColor and ansiReset, but nothing after the trailing EOLs") {
val aResult = Fragment("a\\nb\\n\\n", AnsiCyan).toPossiblyColoredText(true)
aResult should startWith (ansiCyan)
aResult should not endWith (ansiReset)
aResult should endWith (ansiReset + "\\n\\n")
countCyan(aResult) should equal (2)
countReset(aResult) should equal (2)
}
test("A non-blank string with one EOL in the middle and one EOL at the beginning should come back with the last two " +
"strings surrounded by ansiColor and ansiReset, but nothing before the initial EOL") {
val aResult = Fragment("\\na\\nb", AnsiCyan).toPossiblyColoredText(true)
aResult should not startWith (ansiCyan)
aResult should endWith (ansiReset)
aResult should startWith ("\\n" + ansiCyan)
withClue("\\"" + aResult.toList.mkString(" ").replaceAll("\\n", "EOL") + "\\"") {
countCyan(aResult) should equal (2)
countReset(aResult) should equal (2)
}
}
test("stringsToPrintOnError should include the message in unformatted mode") {
val msg = "A stitch in time saves nine."
val strings: Vector[String] =
StringReporter.stringsToPrintOnError(
noteMessageFun = Resources.infoProvidedNote,
errorMessageFun = Resources.infoProvided,
message = msg,
throwable = None,
analysis = Vector.empty,
formatter = None,
suiteName = None,
testName = None,
duration = None,
presentUnformatted = true,
presentAllDurations = false,
presentShortStackTraces = false,
presentFullStackTraces = false,
presentFilePathname = false
)
strings should have size 1
strings(0) should include (msg)
}
test("makeDurationString when duration < 1000") {
assert(makeDurationString(23) === "23 milliseconds")
}
test("makeDurationString when duration == 1") {
assert(makeDurationString(1) === "1 millisecond")
}
test("makeDurationString when duration == 1000") {
assert(makeDurationString(1000) === "1 second")
}
test("makeDurationString when duration == 1001") {
assert(makeDurationString(1001) === "1 second, 1 millisecond")
}
test("makeDurationString when duration == 1049") {
assert(makeDurationString(1049) === "1 second, 49 milliseconds")
}
test("makeDurationString when duration == 2000") {
assert(makeDurationString(2000) === "2 seconds")
}
test("makeDurationString when duration == 10000") {
assert(makeDurationString(10000) === "10 seconds")
}
test("makeDurationString when duration == 3049") {
assert(makeDurationString(3049) === "3 seconds, 49 milliseconds")
}
test("makeDurationString when duration == 60000") {
assert(makeDurationString(60000) === "1 minute")
}
test("makeDurationString when duration == 60001") {
assert(makeDurationString(60000) === "1 minute")
}
test("makeDurationString when duration == 60999") {
assert(makeDurationString(60000) === "1 minute")
}
test("makeDurationString when duration == 61000") {
assert(makeDurationString(61000) === "1 minute, 1 second")
}
test("makeDurationString when duration == 61999") {
assert(makeDurationString(61000) === "1 minute, 1 second")
}
test("makeDurationString when duration == 62000") {
assert(makeDurationString(62000) === "1 minute, 2 seconds")
}
test("makeDurationString when duration == 65388") {
assert(makeDurationString(65388) === "1 minute, 5 seconds")
}
test("makeDurationString when duration == 120000") {
assert(makeDurationString(120000) === "2 minutes")
}
test("makeDurationString when duration == 120999") {
assert(makeDurationString(120999) === "2 minutes")
}
test("makeDurationString when duration == 121000") {
assert(makeDurationString(121000) === "2 minutes, 1 second")
}
test("makeDurationString when duration == 241999") {
assert(makeDurationString(241999) === "4 minutes, 1 second")
}
test("makeDurationString when duration == 122000") {
assert(makeDurationString(122000) === "2 minutes, 2 seconds")
}
test("makeDurationString when duration == 299999") {
assert(makeDurationString(299999) === "4 minutes, 59 seconds")
}
test("makeDurationString when duration == 3600000") {
assert(makeDurationString(3600000) === "1 hour")
}
test("makeDurationString when duration == 3600999") {
assert(makeDurationString(3600999) === "1 hour")
}
test("makeDurationString when duration == 3601000") {
assert(makeDurationString(3601000) === "1 hour, 1 second")
}
test("makeDurationString when duration == 3601999") {
assert(makeDurationString(3601999) === "1 hour, 1 second")
}
test("makeDurationString when duration == 3602000") {
assert(makeDurationString(3602000) === "1 hour, 2 seconds")
}
test("makeDurationString when duration == 3659999") {
assert(makeDurationString(3659999) === "1 hour, 59 seconds")
}
test("makeDurationString when duration == 3660000") {
assert(makeDurationString(3660000) === "1 hour, 1 minute")
}
test("makeDurationString when duration == 3660999") {
assert(makeDurationString(3660999) === "1 hour, 1 minute")
}
test("makeDurationString when duration == 3661000") {
assert(makeDurationString(3661000) === "1 hour, 1 minute, 1 second")
}
test("makeDurationString when duration == 3661999") {
assert(makeDurationString(3661999) === "1 hour, 1 minute, 1 second")
}
test("makeDurationString when duration == 3662000") {
assert(makeDurationString(3662000) === "1 hour, 1 minute, 2 seconds")
}
test("makeDurationString when duration == 3719999") {
assert(makeDurationString(3719999) === "1 hour, 1 minute, 59 seconds")
}
test("makeDurationString when duration == 3720000") {
assert(makeDurationString(3720000) === "1 hour, 2 minutes")
}
test("makeDurationString when duration == 7140999") {
assert(makeDurationString(7140999) === "1 hour, 59 minutes")
}
test("makeDurationString when duration == 3721000") {
assert(makeDurationString(3721000) === "1 hour, 2 minutes, 1 second")
}
test("makeDurationString when duration == 7141999") {
assert(makeDurationString(7141999) === "1 hour, 59 minutes, 1 second")
}
test("makeDurationString when duration == 3722500") {
assert(makeDurationString(3722500) === "1 hour, 2 minutes, 2 seconds")
}
test("makeDurationString when duration == 7199999") {
assert(makeDurationString(7199999) === "1 hour, 59 minutes, 59 seconds")
}
test("makeDurationString when duration == 7200000") {
assert(makeDurationString(7200000) === "2 hours")
}
test("makeDurationString when duration == 360000000") {
assert(makeDurationString(360000000) === "100 hours")
}
test("makeDurationString when duration == 7201000") {
assert(makeDurationString(7201000) === "2 hours, 1 second")
}
test("makeDurationString when duration == 7201999") {
assert(makeDurationString(7201999) === "2 hours, 1 second")
}
test("makeDurationString when duration == 7202000") {
assert(makeDurationString(7202000) === "2 hours, 2 seconds")
}
test("makeDurationString when duration == 7259999") {
assert(makeDurationString(7259999) === "2 hours, 59 seconds")
}
test("makeDurationString when duration == 7260000") {
assert(makeDurationString(7260000) === "2 hours, 1 minute")
}
test("makeDurationString when duration == 7260999") {
assert(makeDurationString(7260999) === "2 hours, 1 minute")
}
test("makeDurationString when duration == 7261000") {
assert(makeDurationString(7261000) === "2 hours, 1 minute, 1 second")
}
test("makeDurationString when duration == 7261999") {
assert(makeDurationString(7261999) === "2 hours, 1 minute, 1 second")
}
test("makeDurationString when duration == 7262000") {
assert(makeDurationString(7262000) === "2 hours, 1 minute, 2 seconds")
}
test("makeDurationString when duration == 7319999") {
assert(makeDurationString(7319999) === "2 hours, 1 minute, 59 seconds")
}
test("makeDurationString when duration == 7320000") {
assert(makeDurationString(7320000) === "2 hours, 2 minutes")
}
test("makeDurationString when duration == 10740999") {
assert(makeDurationString(10740999) === "2 hours, 59 minutes")
}
test("makeDurationString when duration == 7321000") {
assert(makeDurationString(7321000) === "2 hours, 2 minutes, 1 second")
}
test("makeDurationString when duration == 10741999") {
assert(makeDurationString(10741999) === "2 hours, 59 minutes, 1 second")
}
test("makeDurationString when duration == 7322500") {
assert(makeDurationString(7322500) === "2 hours, 2 minutes, 2 seconds")
}
test("makeDurationString when duration == 10799999") {
assert(makeDurationString(10799999) === "2 hours, 59 minutes, 59 seconds")
}
test("withPossibleLineNumber returns simple file name on same line if presentFilePathname is false") {
import org.scalactic.source
import StringReporter.withPossibleLineNumber
val result = withPossibleLineNumber("oops", Some(new TestFailedException((_: StackDepthException) => Some("also oops"), None, Left(source.Position.here), None, Vector.empty)), false)
assert(result === "oops (StringReporterSuite.scala:" + (thisLineNumber - 1) + ")")
}
test("withPossibleLineNumber returns full file pathname on next line if presentFilePathname is true and it is available") {
import StringReporter.withPossibleLineNumber
import org.scalactic.source
val result = withPossibleLineNumber("oops", Some(new TestFailedException((_: StackDepthException) => Some("also oops"), None, Left(source.Position.here), None, Vector.empty)), true)
assert(result startsWith "oops\\n** ")
if (System.getenv("SCALACTIC_FILL_FILE_PATHNAMES") != null && System.getenv("SCALACTIC_FILL_FILE_PATHNAMES") == "yes")
assert(result endsWith "org/scalatest/tools/StringReporterSuite.scala:" + (thisLineNumber - 3) + " **")
else
assert(result endsWith "Please set the environment variable SCALACTIC_FILL_FILE_PATHNAMES to yes at compile time to enable this feature.:" + (thisLineNumber - 5) + " **")
}
class BuilderStringReporter(presentInColor: Boolean) extends StringReporter(
false,
presentInColor,
false,
false,
false,
false,
false,
false,
false,
false,
false
) {
val builder = new StringBuilder
protected def printPossiblyInColor(fragment: Fragment): Unit = {
builder.append(fragment.toPossiblyColoredText(presentInColor) + scala.compat.Platform.EOL)
}
protected def printNoColor(text: String): Unit = {
builder.append(text)
}
def dispose(): Unit = {
}
def content: String = builder.toString
}
case class Person(name: String, age: Int)
test("StringReporter should include difference analysis in the content it display") {
class ExampleSpec extends AnyFunSuite with Matchers {
test("test") {
Person("Student 1", 22) shouldEqual Person("Student 2", 23)
}
}
val rep = new BuilderStringReporter(false)
val suite = new ExampleSpec
suite.run(None, Args(rep))
val failingLineNumber = thisLineNumber - 6
assert(rep.content ==
s"""- test *** FAILED ***
| Person(Student 1,22) did not equal Person(Student 2,23) (StringReporterSuite.scala:$failingLineNumber)
| Analysis:
| StringReporterSuite$$Person(age: 22 -> 23, name: "Student [1]" -> "Student [2]")
|""".stripMargin
)
}
}
|
scalatest/scalatest
|
jvm/scalatest-test/src/test/scala/org/scalatest/tools/StringReporterSuite.scala
|
Scala
|
apache-2.0
| 16,269
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rpc.netty
import java.io._
import java.net.{InetSocketAddress, URI}
import java.nio.ByteBuffer
import java.nio.channels.{Pipe, ReadableByteChannel, WritableByteChannel}
import java.util.concurrent._
import java.util.concurrent.atomic.AtomicBoolean
import javax.annotation.Nullable
import scala.concurrent.{Future, Promise}
import scala.reflect.ClassTag
import scala.util.{DynamicVariable, Failure, Success, Try}
import scala.util.control.NonFatal
import org.apache.spark.{SecurityManager, SparkConf, SparkContext}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.EXECUTOR_ID
import org.apache.spark.internal.config.Network._
import org.apache.spark.network.TransportContext
import org.apache.spark.network.client._
import org.apache.spark.network.crypto.{AuthClientBootstrap, AuthServerBootstrap}
import org.apache.spark.network.netty.SparkTransportConf
import org.apache.spark.network.server._
import org.apache.spark.rpc._
import org.apache.spark.serializer.{JavaSerializer, JavaSerializerInstance, SerializationStream}
import org.apache.spark.util.{ByteBufferInputStream, ByteBufferOutputStream, ThreadUtils, Utils}
private[netty] class NettyRpcEnv(
val conf: SparkConf,
javaSerializerInstance: JavaSerializerInstance,
host: String,
securityManager: SecurityManager,
numUsableCores: Int) extends RpcEnv(conf) with Logging {
val role = conf.get(EXECUTOR_ID).map { id =>
if (id == SparkContext.DRIVER_IDENTIFIER) "driver" else "executor"
}
private[netty] val transportConf = SparkTransportConf.fromSparkConf(
conf.clone.set(RPC_IO_NUM_CONNECTIONS_PER_PEER, 1),
"rpc",
conf.get(RPC_IO_THREADS).getOrElse(numUsableCores),
role)
private val dispatcher: Dispatcher = new Dispatcher(this, numUsableCores)
private val streamManager = new NettyStreamManager(this)
private val transportContext = new TransportContext(transportConf,
new NettyRpcHandler(dispatcher, this, streamManager))
private def createClientBootstraps(): java.util.List[TransportClientBootstrap] = {
if (securityManager.isAuthenticationEnabled()) {
java.util.Arrays.asList(new AuthClientBootstrap(transportConf,
securityManager.getSaslUser(), securityManager))
} else {
java.util.Collections.emptyList[TransportClientBootstrap]
}
}
private val clientFactory = transportContext.createClientFactory(createClientBootstraps())
/**
* A separate client factory for file downloads. This avoids using the same RPC handler as
* the main RPC context, so that events caused by these clients are kept isolated from the
* main RPC traffic.
*
* It also allows for different configuration of certain properties, such as the number of
* connections per peer.
*/
@volatile private var fileDownloadFactory: TransportClientFactory = _
val timeoutScheduler = ThreadUtils.newDaemonSingleThreadScheduledExecutor("netty-rpc-env-timeout")
// Because TransportClientFactory.createClient is blocking, we need to run it in this thread pool
// to implement non-blocking send/ask.
// TODO: a non-blocking TransportClientFactory.createClient in future
private[netty] val clientConnectionExecutor = ThreadUtils.newDaemonCachedThreadPool(
"netty-rpc-connection",
conf.get(RPC_CONNECT_THREADS))
@volatile private var server: TransportServer = _
private val stopped = new AtomicBoolean(false)
/**
* A map for [[RpcAddress]] and [[Outbox]]. When we are connecting to a remote [[RpcAddress]],
* we just put messages to its [[Outbox]] to implement a non-blocking `send` method.
*/
private val outboxes = new ConcurrentHashMap[RpcAddress, Outbox]()
/**
* Remove the address's Outbox and stop it.
*/
private[netty] def removeOutbox(address: RpcAddress): Unit = {
val outbox = outboxes.remove(address)
if (outbox != null) {
outbox.stop()
}
}
def startServer(bindAddress: String, port: Int): Unit = {
val bootstraps: java.util.List[TransportServerBootstrap] =
if (securityManager.isAuthenticationEnabled()) {
java.util.Arrays.asList(new AuthServerBootstrap(transportConf, securityManager))
} else {
java.util.Collections.emptyList()
}
server = transportContext.createServer(bindAddress, port, bootstraps)
dispatcher.registerRpcEndpoint(
RpcEndpointVerifier.NAME, new RpcEndpointVerifier(this, dispatcher))
}
@Nullable
override lazy val address: RpcAddress = {
if (server != null) RpcAddress(host, server.getPort()) else null
}
override def setupEndpoint(name: String, endpoint: RpcEndpoint): RpcEndpointRef = {
dispatcher.registerRpcEndpoint(name, endpoint)
}
def asyncSetupEndpointRefByURI(uri: String): Future[RpcEndpointRef] = {
val addr = RpcEndpointAddress(uri)
val endpointRef = new NettyRpcEndpointRef(conf, addr, this)
val verifier = new NettyRpcEndpointRef(
conf, RpcEndpointAddress(addr.rpcAddress, RpcEndpointVerifier.NAME), this)
verifier.ask[Boolean](RpcEndpointVerifier.CheckExistence(endpointRef.name)).flatMap { find =>
if (find) {
Future.successful(endpointRef)
} else {
Future.failed(new RpcEndpointNotFoundException(uri))
}
}(ThreadUtils.sameThread)
}
override def stop(endpointRef: RpcEndpointRef): Unit = {
require(endpointRef.isInstanceOf[NettyRpcEndpointRef])
dispatcher.stop(endpointRef)
}
private def postToOutbox(receiver: NettyRpcEndpointRef, message: OutboxMessage): Unit = {
if (receiver.client != null) {
message.sendWith(receiver.client)
} else {
require(receiver.address != null,
"Cannot send message to client endpoint with no listen address.")
val targetOutbox = {
val outbox = outboxes.get(receiver.address)
if (outbox == null) {
val newOutbox = new Outbox(this, receiver.address)
val oldOutbox = outboxes.putIfAbsent(receiver.address, newOutbox)
if (oldOutbox == null) {
newOutbox
} else {
oldOutbox
}
} else {
outbox
}
}
if (stopped.get) {
// It's possible that we put `targetOutbox` after stopping. So we need to clean it.
outboxes.remove(receiver.address)
targetOutbox.stop()
} else {
targetOutbox.send(message)
}
}
}
private[netty] def send(message: RequestMessage): Unit = {
val remoteAddr = message.receiver.address
if (remoteAddr == address) {
// Message to a local RPC endpoint.
try {
dispatcher.postOneWayMessage(message)
} catch {
case e: RpcEnvStoppedException => logDebug(e.getMessage)
}
} else {
// Message to a remote RPC endpoint.
postToOutbox(message.receiver, OneWayOutboxMessage(message.serialize(this)))
}
}
private[netty] def createClient(address: RpcAddress): TransportClient = {
clientFactory.createClient(address.host, address.port)
}
private[netty] def ask[T: ClassTag](message: RequestMessage, timeout: RpcTimeout): Future[T] = {
val promise = Promise[Any]()
val remoteAddr = message.receiver.address
def onFailure(e: Throwable): Unit = {
if (!promise.tryFailure(e)) {
e match {
case e : RpcEnvStoppedException => logDebug (s"Ignored failure: $e")
case _ => logWarning(s"Ignored failure: $e")
}
}
}
def onSuccess(reply: Any): Unit = reply match {
case RpcFailure(e) => onFailure(e)
case rpcReply =>
if (!promise.trySuccess(rpcReply)) {
logWarning(s"Ignored message: $reply")
}
}
try {
if (remoteAddr == address) {
val p = Promise[Any]()
p.future.onComplete {
case Success(response) => onSuccess(response)
case Failure(e) => onFailure(e)
}(ThreadUtils.sameThread)
dispatcher.postLocalMessage(message, p)
} else {
val rpcMessage = RpcOutboxMessage(message.serialize(this),
onFailure,
(client, response) => onSuccess(deserialize[Any](client, response)))
postToOutbox(message.receiver, rpcMessage)
promise.future.failed.foreach {
case _: TimeoutException => rpcMessage.onTimeout()
case _ =>
}(ThreadUtils.sameThread)
}
val timeoutCancelable = timeoutScheduler.schedule(new Runnable {
override def run(): Unit = {
onFailure(new TimeoutException(s"Cannot receive any reply from ${remoteAddr} " +
s"in ${timeout.duration}"))
}
}, timeout.duration.toNanos, TimeUnit.NANOSECONDS)
promise.future.onComplete { v =>
timeoutCancelable.cancel(true)
}(ThreadUtils.sameThread)
} catch {
case NonFatal(e) =>
onFailure(e)
}
promise.future.mapTo[T].recover(timeout.addMessageIfTimeout)(ThreadUtils.sameThread)
}
private[netty] def serialize(content: Any): ByteBuffer = {
javaSerializerInstance.serialize(content)
}
/**
* Returns [[SerializationStream]] that forwards the serialized bytes to `out`.
*/
private[netty] def serializeStream(out: OutputStream): SerializationStream = {
javaSerializerInstance.serializeStream(out)
}
private[netty] def deserialize[T: ClassTag](client: TransportClient, bytes: ByteBuffer): T = {
NettyRpcEnv.currentClient.withValue(client) {
deserialize { () =>
javaSerializerInstance.deserialize[T](bytes)
}
}
}
override def endpointRef(endpoint: RpcEndpoint): RpcEndpointRef = {
dispatcher.getRpcEndpointRef(endpoint)
}
override def shutdown(): Unit = {
cleanup()
}
override def awaitTermination(): Unit = {
dispatcher.awaitTermination()
}
private def cleanup(): Unit = {
if (!stopped.compareAndSet(false, true)) {
return
}
val iter = outboxes.values().iterator()
while (iter.hasNext()) {
val outbox = iter.next()
outboxes.remove(outbox.address)
outbox.stop()
}
if (timeoutScheduler != null) {
timeoutScheduler.shutdownNow()
}
if (dispatcher != null) {
dispatcher.stop()
}
if (server != null) {
server.close()
}
if (clientFactory != null) {
clientFactory.close()
}
if (clientConnectionExecutor != null) {
clientConnectionExecutor.shutdownNow()
}
if (fileDownloadFactory != null) {
fileDownloadFactory.close()
}
if (transportContext != null) {
transportContext.close()
}
}
override def deserialize[T](deserializationAction: () => T): T = {
NettyRpcEnv.currentEnv.withValue(this) {
deserializationAction()
}
}
override def fileServer: RpcEnvFileServer = streamManager
override def openChannel(uri: String): ReadableByteChannel = {
val parsedUri = new URI(uri)
require(parsedUri.getHost() != null, "Host name must be defined.")
require(parsedUri.getPort() > 0, "Port must be defined.")
require(parsedUri.getPath() != null && parsedUri.getPath().nonEmpty, "Path must be defined.")
val pipe = Pipe.open()
val source = new FileDownloadChannel(pipe.source())
Utils.tryWithSafeFinallyAndFailureCallbacks(block = {
val client = downloadClient(parsedUri.getHost(), parsedUri.getPort())
val callback = new FileDownloadCallback(pipe.sink(), source, client)
client.stream(parsedUri.getPath(), callback)
})(catchBlock = {
pipe.sink().close()
source.close()
})
source
}
private def downloadClient(host: String, port: Int): TransportClient = {
if (fileDownloadFactory == null) synchronized {
if (fileDownloadFactory == null) {
val module = "files"
val prefix = "spark.rpc.io."
val clone = conf.clone()
// Copy any RPC configuration that is not overridden in the spark.files namespace.
conf.getAll.foreach { case (key, value) =>
if (key.startsWith(prefix)) {
val opt = key.substring(prefix.length())
clone.setIfMissing(s"spark.$module.io.$opt", value)
}
}
val ioThreads = clone.getInt("spark.files.io.threads", 1)
val downloadConf = SparkTransportConf.fromSparkConf(clone, module, ioThreads)
val downloadContext = new TransportContext(downloadConf, new NoOpRpcHandler(), true)
fileDownloadFactory = downloadContext.createClientFactory(createClientBootstraps())
}
}
fileDownloadFactory.createClient(host, port)
}
private class FileDownloadChannel(source: Pipe.SourceChannel) extends ReadableByteChannel {
@volatile private var error: Throwable = _
def setError(e: Throwable): Unit = {
// This setError callback is invoked by internal RPC threads in order to propagate remote
// exceptions to application-level threads which are reading from this channel. When an
// RPC error occurs, the RPC system will call setError() and then will close the
// Pipe.SinkChannel corresponding to the other end of the `source` pipe. Closing of the pipe
// sink will cause `source.read()` operations to return EOF, unblocking the application-level
// reading thread. Thus there is no need to actually call `source.close()` here in the
// onError() callback and, in fact, calling it here would be dangerous because the close()
// would be asynchronous with respect to the read() call and could trigger race-conditions
// that lead to data corruption. See the PR for SPARK-22982 for more details on this topic.
error = e
}
override def read(dst: ByteBuffer): Int = {
Try(source.read(dst)) match {
// See the documentation above in setError(): if an RPC error has occurred then setError()
// will be called to propagate the RPC error and then `source`'s corresponding
// Pipe.SinkChannel will be closed, unblocking this read. In that case, we want to propagate
// the remote RPC exception (and not any exceptions triggered by the pipe close, such as
// ChannelClosedException), hence this `error != null` check:
case _ if error != null => throw error
case Success(bytesRead) => bytesRead
case Failure(readErr) => throw readErr
}
}
override def close(): Unit = source.close()
override def isOpen(): Boolean = source.isOpen()
}
private class FileDownloadCallback(
sink: WritableByteChannel,
source: FileDownloadChannel,
client: TransportClient) extends StreamCallback {
override def onData(streamId: String, buf: ByteBuffer): Unit = {
while (buf.remaining() > 0) {
sink.write(buf)
}
}
override def onComplete(streamId: String): Unit = {
sink.close()
}
override def onFailure(streamId: String, cause: Throwable): Unit = {
logDebug(s"Error downloading stream $streamId.", cause)
source.setError(cause)
sink.close()
}
}
}
private[netty] object NettyRpcEnv extends Logging {
/**
* When deserializing the [[NettyRpcEndpointRef]], it needs a reference to [[NettyRpcEnv]].
* Use `currentEnv` to wrap the deserialization codes. E.g.,
*
* {{{
* NettyRpcEnv.currentEnv.withValue(this) {
* your deserialization codes
* }
* }}}
*/
private[netty] val currentEnv = new DynamicVariable[NettyRpcEnv](null)
/**
* Similar to `currentEnv`, this variable references the client instance associated with an
* RPC, in case it's needed to find out the remote address during deserialization.
*/
private[netty] val currentClient = new DynamicVariable[TransportClient](null)
}
private[rpc] class NettyRpcEnvFactory extends RpcEnvFactory with Logging {
def create(config: RpcEnvConfig): RpcEnv = {
val sparkConf = config.conf
// Use JavaSerializerInstance in multiple threads is safe. However, if we plan to support
// KryoSerializer in future, we have to use ThreadLocal to store SerializerInstance
val javaSerializerInstance =
new JavaSerializer(sparkConf).newInstance().asInstanceOf[JavaSerializerInstance]
val nettyEnv =
new NettyRpcEnv(sparkConf, javaSerializerInstance, config.advertiseAddress,
config.securityManager, config.numUsableCores)
if (!config.clientMode) {
val startNettyRpcEnv: Int => (NettyRpcEnv, Int) = { actualPort =>
nettyEnv.startServer(config.bindAddress, actualPort)
(nettyEnv, nettyEnv.address.port)
}
try {
Utils.startServiceOnPort(config.port, startNettyRpcEnv, sparkConf, config.name)._1
} catch {
case NonFatal(e) =>
nettyEnv.shutdown()
throw e
}
}
nettyEnv
}
}
/**
* The NettyRpcEnv version of RpcEndpointRef.
*
* This class behaves differently depending on where it's created. On the node that "owns" the
* RpcEndpoint, it's a simple wrapper around the RpcEndpointAddress instance.
*
* On other machines that receive a serialized version of the reference, the behavior changes. The
* instance will keep track of the TransportClient that sent the reference, so that messages
* to the endpoint are sent over the client connection, instead of needing a new connection to
* be opened.
*
* The RpcAddress of this ref can be null; what that means is that the ref can only be used through
* a client connection, since the process hosting the endpoint is not listening for incoming
* connections. These refs should not be shared with 3rd parties, since they will not be able to
* send messages to the endpoint.
*
* @param conf Spark configuration.
* @param endpointAddress The address where the endpoint is listening.
* @param nettyEnv The RpcEnv associated with this ref.
*/
private[netty] class NettyRpcEndpointRef(
@transient private val conf: SparkConf,
private val endpointAddress: RpcEndpointAddress,
@transient @volatile private var nettyEnv: NettyRpcEnv) extends RpcEndpointRef(conf) {
@transient @volatile var client: TransportClient = _
override def address: RpcAddress =
if (endpointAddress.rpcAddress != null) endpointAddress.rpcAddress else null
private def readObject(in: ObjectInputStream): Unit = {
in.defaultReadObject()
nettyEnv = NettyRpcEnv.currentEnv.value
client = NettyRpcEnv.currentClient.value
}
private def writeObject(out: ObjectOutputStream): Unit = {
out.defaultWriteObject()
}
override def name: String = endpointAddress.name
override def ask[T: ClassTag](message: Any, timeout: RpcTimeout): Future[T] = {
nettyEnv.ask(new RequestMessage(nettyEnv.address, this, message), timeout)
}
override def send(message: Any): Unit = {
require(message != null, "Message is null")
nettyEnv.send(new RequestMessage(nettyEnv.address, this, message))
}
override def toString: String = s"NettyRpcEndpointRef(${endpointAddress})"
final override def equals(that: Any): Boolean = that match {
case other: NettyRpcEndpointRef => endpointAddress == other.endpointAddress
case _ => false
}
final override def hashCode(): Int =
if (endpointAddress == null) 0 else endpointAddress.hashCode()
}
/**
* The message that is sent from the sender to the receiver.
*
* @param senderAddress the sender address. It's `null` if this message is from a client
* `NettyRpcEnv`.
* @param receiver the receiver of this message.
* @param content the message content.
*/
private[netty] class RequestMessage(
val senderAddress: RpcAddress,
val receiver: NettyRpcEndpointRef,
val content: Any) {
/** Manually serialize [[RequestMessage]] to minimize the size. */
def serialize(nettyEnv: NettyRpcEnv): ByteBuffer = {
val bos = new ByteBufferOutputStream()
val out = new DataOutputStream(bos)
try {
writeRpcAddress(out, senderAddress)
writeRpcAddress(out, receiver.address)
out.writeUTF(receiver.name)
val s = nettyEnv.serializeStream(out)
try {
s.writeObject(content)
} finally {
s.close()
}
} finally {
out.close()
}
bos.toByteBuffer
}
private def writeRpcAddress(out: DataOutputStream, rpcAddress: RpcAddress): Unit = {
if (rpcAddress == null) {
out.writeBoolean(false)
} else {
out.writeBoolean(true)
out.writeUTF(rpcAddress.host)
out.writeInt(rpcAddress.port)
}
}
override def toString: String = s"RequestMessage($senderAddress, $receiver, $content)"
}
private[netty] object RequestMessage {
private def readRpcAddress(in: DataInputStream): RpcAddress = {
val hasRpcAddress = in.readBoolean()
if (hasRpcAddress) {
RpcAddress(in.readUTF(), in.readInt())
} else {
null
}
}
def apply(nettyEnv: NettyRpcEnv, client: TransportClient, bytes: ByteBuffer): RequestMessage = {
val bis = new ByteBufferInputStream(bytes)
val in = new DataInputStream(bis)
try {
val senderAddress = readRpcAddress(in)
val endpointAddress = RpcEndpointAddress(readRpcAddress(in), in.readUTF())
val ref = new NettyRpcEndpointRef(nettyEnv.conf, endpointAddress, nettyEnv)
ref.client = client
new RequestMessage(
senderAddress,
ref,
// The remaining bytes in `bytes` are the message content.
nettyEnv.deserialize(client, bytes))
} finally {
in.close()
}
}
}
/**
* A response that indicates some failure happens in the receiver side.
*/
private[netty] case class RpcFailure(e: Throwable)
/**
* Dispatches incoming RPCs to registered endpoints.
*
* The handler keeps track of all client instances that communicate with it, so that the RpcEnv
* knows which `TransportClient` instance to use when sending RPCs to a client endpoint (i.e.,
* one that is not listening for incoming connections, but rather needs to be contacted via the
* client socket).
*
* Events are sent on a per-connection basis, so if a client opens multiple connections to the
* RpcEnv, multiple connection / disconnection events will be created for that client (albeit
* with different `RpcAddress` information).
*/
private[netty] class NettyRpcHandler(
dispatcher: Dispatcher,
nettyEnv: NettyRpcEnv,
streamManager: StreamManager) extends RpcHandler with Logging {
// A variable to track the remote RpcEnv addresses of all clients
private val remoteAddresses = new ConcurrentHashMap[RpcAddress, RpcAddress]()
override def receive(
client: TransportClient,
message: ByteBuffer,
callback: RpcResponseCallback): Unit = {
val messageToDispatch = internalReceive(client, message)
dispatcher.postRemoteMessage(messageToDispatch, callback)
}
override def receive(
client: TransportClient,
message: ByteBuffer): Unit = {
val messageToDispatch = internalReceive(client, message)
dispatcher.postOneWayMessage(messageToDispatch)
}
private def internalReceive(client: TransportClient, message: ByteBuffer): RequestMessage = {
val addr = client.getChannel().remoteAddress().asInstanceOf[InetSocketAddress]
assert(addr != null)
val clientAddr = RpcAddress(addr.getHostString, addr.getPort)
val requestMessage = RequestMessage(nettyEnv, client, message)
if (requestMessage.senderAddress == null) {
// Create a new message with the socket address of the client as the sender.
new RequestMessage(clientAddr, requestMessage.receiver, requestMessage.content)
} else {
// The remote RpcEnv listens to some port, we should also fire a RemoteProcessConnected for
// the listening address
val remoteEnvAddress = requestMessage.senderAddress
if (remoteAddresses.putIfAbsent(clientAddr, remoteEnvAddress) == null) {
dispatcher.postToAll(RemoteProcessConnected(remoteEnvAddress))
}
requestMessage
}
}
override def getStreamManager: StreamManager = streamManager
override def exceptionCaught(cause: Throwable, client: TransportClient): Unit = {
val addr = client.getChannel.remoteAddress().asInstanceOf[InetSocketAddress]
if (addr != null) {
val clientAddr = RpcAddress(addr.getHostString, addr.getPort)
dispatcher.postToAll(RemoteProcessConnectionError(cause, clientAddr))
// If the remove RpcEnv listens to some address, we should also fire a
// RemoteProcessConnectionError for the remote RpcEnv listening address
val remoteEnvAddress = remoteAddresses.get(clientAddr)
if (remoteEnvAddress != null) {
dispatcher.postToAll(RemoteProcessConnectionError(cause, remoteEnvAddress))
}
} else {
// If the channel is closed before connecting, its remoteAddress will be null.
// See java.net.Socket.getRemoteSocketAddress
// Because we cannot get a RpcAddress, just log it
logError("Exception before connecting to the client", cause)
}
}
override def channelActive(client: TransportClient): Unit = {
val addr = client.getChannel().remoteAddress().asInstanceOf[InetSocketAddress]
assert(addr != null)
val clientAddr = RpcAddress(addr.getHostString, addr.getPort)
dispatcher.postToAll(RemoteProcessConnected(clientAddr))
}
override def channelInactive(client: TransportClient): Unit = {
val addr = client.getChannel.remoteAddress().asInstanceOf[InetSocketAddress]
if (addr != null) {
val clientAddr = RpcAddress(addr.getHostString, addr.getPort)
nettyEnv.removeOutbox(clientAddr)
dispatcher.postToAll(RemoteProcessDisconnected(clientAddr))
val remoteEnvAddress = remoteAddresses.remove(clientAddr)
// If the remove RpcEnv listens to some address, we should also fire a
// RemoteProcessDisconnected for the remote RpcEnv listening address
if (remoteEnvAddress != null) {
dispatcher.postToAll(RemoteProcessDisconnected(remoteEnvAddress))
}
} else {
// If the channel is closed before connecting, its remoteAddress will be null. In this case,
// we can ignore it since we don't fire "Associated".
// See java.net.Socket.getRemoteSocketAddress
}
}
}
|
aosagie/spark
|
core/src/main/scala/org/apache/spark/rpc/netty/NettyRpcEnv.scala
|
Scala
|
apache-2.0
| 26,923
|
/*
Copyright (c) 2016, Rice University
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Rice University
nor the names of its contributors may be used to endorse or
promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.apache.spark.rdd.cl.tests
import java.util.LinkedList
import com.amd.aparapi.internal.writer.ScalaArrayParameter
import com.amd.aparapi.internal.model.Tuple2ClassModel
import org.apache.spark.rdd.cl.SyncCodeGenTest
import org.apache.spark.rdd.cl.CodeGenTest
import org.apache.spark.rdd.cl.CodeGenTests
import org.apache.spark.rdd.cl.CodeGenUtil
import com.amd.aparapi.internal.model.ClassModel
import com.amd.aparapi.internal.model.HardCodedClassModels
import com.amd.aparapi.internal.model.DenseVectorClassModel
import org.apache.spark.mllib.linalg.DenseVector
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.rdd.cl.DenseVectorInputBufferWrapperConfig
object Tuple2DenseOutputTest extends SyncCodeGenTest[Int, (Int, DenseVector)] {
def getExpectedException() : String = { return null }
def getExpectedKernel() : String = { getExpectedKernelHelper(getClass) }
def getExpectedNumInputs() : Int = {
1
}
def init() : HardCodedClassModels = {
val models = new HardCodedClassModels()
val inputClassType1Name = CodeGenUtil.cleanClassName("I")
val inputClassType2Name = CodeGenUtil.cleanClassName("org.apache.spark.mllib.linalg.DenseVector")
val tuple2ClassModel : Tuple2ClassModel = Tuple2ClassModel.create(
inputClassType1Name, inputClassType2Name, true)
models.addClassModelFor(classOf[Tuple2[_, _]], tuple2ClassModel)
val denseVectorModel : DenseVectorClassModel = DenseVectorClassModel.create()
models.addClassModelFor(classOf[DenseVector], denseVectorModel)
models
}
def complete(params : LinkedList[ScalaArrayParameter]) {
params.get(1).addTypeParameter("I", false)
params.get(1).addTypeParameter("Lorg.apache.spark.mllib.linalg.DenseVector;", true)
}
def getFunction() : Function1[Int, (Int, DenseVector)] = {
new Function[Int, (Int, DenseVector)] {
override def apply(in : Int) : Tuple2[Int, DenseVector] = {
val arr : Array[Double] = new Array[Double](in)
var i = 0
while (i < in) {
arr(i) = in
i += 1
}
(in, Vectors.dense(arr).asInstanceOf[DenseVector])
}
}
}
}
|
agrippa/spark-swat
|
swat/src/test/scala/org/apache/spark/rdd/cl/tests/Tuple2DenseOutputTest.scala
|
Scala
|
bsd-3-clause
| 3,668
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
* file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
* to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package kafka.api
import java.util.Properties
import java.util.concurrent.Future
import kafka.consumer.SimpleConsumer
import kafka.integration.KafkaServerTestHarness
import kafka.server.KafkaConfig
import kafka.utils.{ShutdownableThread, TestUtils}
import kafka.utils.Implicits._
import org.apache.kafka.clients.producer._
import org.apache.kafka.clients.producer.internals.ErrorLoggingCallback
import org.junit.Assert._
import org.junit.{Ignore, Test}
import scala.collection.mutable.ArrayBuffer
class ProducerBounceTest extends KafkaServerTestHarness {
private val producerBufferSize = 65536
private val serverMessageMaxBytes = producerBufferSize/2
val numServers = 4
val overridingProps = new Properties()
overridingProps.put(KafkaConfig.AutoCreateTopicsEnableProp, false.toString)
overridingProps.put(KafkaConfig.MessageMaxBytesProp, serverMessageMaxBytes.toString)
// Set a smaller value for the number of partitions for the offset commit topic (__consumer_offset topic)
// so that the creation of that topic/partition(s) and subsequent leader assignment doesn't take relatively long
overridingProps.put(KafkaConfig.OffsetsTopicPartitionsProp, 1.toString)
overridingProps.put(KafkaConfig.ControlledShutdownEnableProp, true.toString)
overridingProps.put(KafkaConfig.UncleanLeaderElectionEnableProp, false.toString)
overridingProps.put(KafkaConfig.AutoLeaderRebalanceEnableProp, false.toString)
// This is the one of the few tests we currently allow to preallocate ports, despite the fact that this can result in transient
// failures due to ports getting reused. We can't use random ports because of bad behavior that can result from bouncing
// brokers too quickly when they get new, random ports. If we're not careful, the client can end up in a situation
// where metadata is not refreshed quickly enough, and by the time it's actually trying to, all the servers have
// been bounced and have new addresses. None of the bootstrap nodes or current metadata can get them connected to a
// running server.
//
// Since such quick rotation of servers is incredibly unrealistic, we allow this one test to preallocate ports, leaving
// a small risk of hitting errors due to port conflicts. Hopefully this is infrequent enough to not cause problems.
override def generateConfigs = {
FixedPortTestUtils.createBrokerConfigs(numServers, zkConnect,enableControlledShutdown = true)
.map(KafkaConfig.fromProps(_, overridingProps))
}
private val topic1 = "topic-1"
/**
* With replication, producer should able to find new leader after it detects broker failure
*/
@Ignore // To be re-enabled once we can make it less flaky (KAFKA-2837)
@Test
def testBrokerFailure() {
val numPartitions = 3
val topicConfig = new Properties()
topicConfig.put(KafkaConfig.MinInSyncReplicasProp, 2.toString)
TestUtils.createTopic(zkUtils, topic1, numPartitions, numServers, servers, topicConfig)
val scheduler = new ProducerScheduler()
scheduler.start
// rolling bounce brokers
for (_ <- 0 until numServers) {
for (server <- servers) {
info("Shutting down server : %s".format(server.config.brokerId))
server.shutdown()
server.awaitShutdown()
info("Server %s shut down. Starting it up again.".format(server.config.brokerId))
server.startup()
info("Restarted server: %s".format(server.config.brokerId))
}
// Make sure the producer do not see any exception in returned metadata due to broker failures
assertFalse(scheduler.failed)
// Make sure the leader still exists after bouncing brokers
(0 until numPartitions).foreach(partition => TestUtils.waitUntilLeaderIsElectedOrChanged(zkUtils, topic1, partition))
}
scheduler.shutdown
// Make sure the producer do not see any exception
// when draining the left messages on shutdown
assertFalse(scheduler.failed)
// double check that the leader info has been propagated after consecutive bounces
val newLeaders = (0 until numPartitions).map(i => TestUtils.waitUntilMetadataIsPropagated(servers, topic1, i))
val fetchResponses = newLeaders.zipWithIndex.map { case (leader, partition) =>
// Consumers must be instantiated after all the restarts since they use random ports each time they start up
val consumer = new SimpleConsumer("localhost", boundPort(servers(leader)), 30000, 1024 * 1024, "")
val response = consumer.fetch(new FetchRequestBuilder().addFetch(topic1, partition, 0, Int.MaxValue).build()).messageSet(topic1, partition)
consumer.close
response
}
val messages = fetchResponses.flatMap(r => r.iterator.toList.map(_.message))
val uniqueMessages = messages.toSet
val uniqueMessageSize = uniqueMessages.size
info(s"number of unique messages sent: ${uniqueMessageSize}")
assertEquals(s"Found ${messages.size - uniqueMessageSize} duplicate messages.", uniqueMessageSize, messages.size)
assertEquals("Should have fetched " + scheduler.sent + " unique messages", scheduler.sent, messages.size)
}
private class ProducerScheduler extends ShutdownableThread("daemon-producer", false) {
val numRecords = 1000
var sent = 0
var failed = false
val producerConfig = new Properties()
producerConfig.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true")
producerConfig.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "5")
val producerConfigWithCompression = new Properties()
producerConfigWithCompression ++= producerConfig
producerConfigWithCompression.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "lz4")
val producers = List(
TestUtils.createNewProducer(brokerList, bufferSize = producerBufferSize / 4, retries = 10, props = Some(producerConfig)),
TestUtils.createNewProducer(brokerList, bufferSize = producerBufferSize / 2, retries = 10, lingerMs = 5000, props = Some(producerConfig)),
TestUtils.createNewProducer(brokerList, bufferSize = producerBufferSize, retries = 10, lingerMs = 10000, props = Some(producerConfigWithCompression))
)
override def doWork(): Unit = {
info("Starting to send messages..")
var producerId = 0
val responses = new ArrayBuffer[IndexedSeq[Future[RecordMetadata]]]()
for (producer <- producers) {
val response =
for (i <- sent+1 to sent+numRecords)
yield producer.send(new ProducerRecord[Array[Byte],Array[Byte]](topic1, null, null, ((producerId + 1) * i).toString.getBytes),
new ErrorLoggingCallback(topic1, null, null, true))
responses.append(response)
producerId += 1
}
try {
for (response <- responses) {
val futures = response.toList
futures.map(_.get)
sent += numRecords
}
info(s"Sent $sent records")
} catch {
case e : Exception =>
error(s"Got exception ${e.getMessage}")
e.printStackTrace()
failed = true
}
}
override def shutdown(){
super.shutdown()
for (producer <- producers) {
producer.close()
}
}
}
}
|
themarkypantz/kafka
|
core/src/test/scala/integration/kafka/api/ProducerBounceTest.scala
|
Scala
|
apache-2.0
| 7,960
|
/**
* (C) Copyright IBM Corp. 2015 - 2017
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.ibm.sparktc.sparkbench.datageneration
import java.io.File
import com.ibm.sparktc.sparkbench.testfixtures.{BuildAndTeardownData, SparkSessionProvider}
import com.ibm.sparktc.sparkbench.utils.SparkBenchException
import org.apache.spark.graphx.GraphLoader
import org.scalatest.{BeforeAndAfterEach, FlatSpec, Matchers}
class GraphDataGenTest extends FlatSpec with Matchers with BeforeAndAfterEach {
val cool = new BuildAndTeardownData("graph-data-gen")
val fileName = s"${cool.sparkBenchTestFolder}/${java.util.UUID.randomUUID.toString}.txt"
var file: File = _
override def beforeEach() {
cool.createFolders()
file = new File(fileName)
}
override def afterEach() {
cool.deleteFolders()
}
"GraphDataGeneration" should "generate data correctly with all default options" in {
val m = Map(
"name" -> "graph-data-generator",
"vertices" -> 100,
"output" -> fileName
)
val generator = GraphDataGen(m)
generator.doWorkload(spark = SparkSessionProvider.spark)
val res = GraphLoader.edgeListFile(SparkSessionProvider.spark.sparkContext, fileName)
res.vertices.count() shouldBe m("vertices")
}
it should "throw an error for any output format but .txt" in {
val m1 = Map(
"name" -> "graph-data-generator",
"vertices" -> 100,
"output" -> "/my-cool-file.csv"
)
val m2 = Map(
"name" -> "graph-data-generator",
"vertices" -> 100,
"output" -> "/my-cool-file.parquet"
)
val m3 = Map(
"name" -> "graph-data-generator",
"vertices" -> 100,
"output" -> "/my-cool-file.tsv"
)
a [SparkBenchException] should be thrownBy GraphDataGen(m1)
a [SparkBenchException] should be thrownBy GraphDataGen(m2)
a [SparkBenchException] should be thrownBy GraphDataGen(m3)
}
it should "throw errors when required values are missing" in {
// Missing vertices
val m1 = Map(
"name" -> "graph-data-generator",
"output" -> "/my-cool-file.csv"
)
// Missing output file name
val m2 = Map(
"name" -> "graph-data-generator",
"vertices" -> 100
)
a [SparkBenchException] should be thrownBy GraphDataGen(m1)
a [SparkBenchException] should be thrownBy GraphDataGen(m2)
}
}
|
SparkTC/spark-bench
|
cli/src/test/scala/com/ibm/sparktc/sparkbench/datageneration/GraphDataGenTest.scala
|
Scala
|
apache-2.0
| 2,887
|
/*
* =========================================================================================
* Copyright © 2017,2018 Workday, Inc.
* Copyright © 2013-2017 the kamon project <http://kamon.io/>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
* =========================================================================================
*/
package com.workday.prometheus.akka.impl
import org.scalatest.{Matchers, WordSpecLike}
class RegexPathFilterSpec extends WordSpecLike with Matchers {
"The RegexPathFilter" should {
"match a single expression" in {
val filter = new RegexPathFilter("/user/actor")
filter.accept("/user/actor") shouldBe true
filter.accept("/user/actor/something") shouldBe false
filter.accept("/user/actor/somethingElse") shouldBe false
}
"match arbitray expressions ending with wildcard" in {
val filter = new RegexPathFilter("/user/.*")
filter.accept("/user/actor") shouldBe true
filter.accept("/user/otherActor") shouldBe true
filter.accept("/user/something/actor") shouldBe true
filter.accept("/user/something/otherActor") shouldBe true
filter.accept("/otheruser/actor") shouldBe false
filter.accept("/otheruser/otherActor") shouldBe false
filter.accept("/otheruser/something/actor") shouldBe false
filter.accept("/otheruser/something/otherActor") shouldBe false
}
"match numbers" in {
val filter = new RegexPathFilter("/user/actor-\\\\d")
filter.accept("/user/actor-1") shouldBe true
filter.accept("/user/actor-2") shouldBe true
filter.accept("/user/actor-3") shouldBe true
filter.accept("/user/actor-one") shouldBe false
filter.accept("/user/actor-two") shouldBe false
filter.accept("/user/actor-tree") shouldBe false
}
}
}
|
Workday/prometheus-akka
|
src/test/scala/com/workday/prometheus/akka/impl/RegexPathFilterSpec.scala
|
Scala
|
apache-2.0
| 2,313
|
package com.twitter.finagle.memcached.unit
import com.twitter.conversions.time._
import com.twitter.finagle.memcached._
import com.twitter.util.{Await, Awaitable}
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.FunSuite
@RunWith(classOf[JUnitRunner])
class PoolingReadRepairClientTest extends FunSuite {
val TimeOut = 15.seconds
private def awaitResult[T](awaitable: Awaitable[T]): T = Await.result(awaitable, TimeOut)
class Context {
val full: MockClient = new MockClient(Map("key" -> "value", "foo" -> "bar"))
val partial: MockClient = new MockClient(Map("key" -> "value"))
val pooled: Client = new PoolingReadRepairClient(Seq(full, partial), 1, 1)
}
test("return the correct value") {
val context = new Context
import context._
assert(awaitResult(pooled.withStrings.get("key")) == Some("value"))
}
test("return the correct value and read-repair") {
val context = new Context
import context._
assert(partial.map.size == 1)
assert(awaitResult(pooled.withStrings.get("foo")) == Some("bar"))
assert(partial.map.size == 2)
}
}
|
mkhq/finagle
|
finagle-memcached/src/test/scala/com/twitter/finagle/memcached/unit/PoolingReadRepairClientTest.scala
|
Scala
|
apache-2.0
| 1,139
|
package atari.st.disk
import java.nio.file.Path
case class Disk(root: Path, info: DiskInfo)
|
suiryc/atari-st-tools
|
src/main/scala/atari/st/disk/Disk.scala
|
Scala
|
gpl-3.0
| 95
|
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js tools **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013-2014, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.core.tools.io
import java.io.{File => JFile}
import org.scalajs.core.tools.io.IRFileCache.IRContainer
trait IRContainerPlatformExtensions { this: IRContainer.type =>
def fromClasspath(classpath: Seq[JFile]): Seq[IRContainer] = {
classpath flatMap { entry =>
if (!entry.exists)
Nil
else if (entry.isDirectory)
fromDirectory(entry)
else if (entry.getName.endsWith(".jar"))
fromJar(entry) :: Nil
else
throw new IllegalArgumentException("Illegal classpath entry " + entry)
}
}
def fromJar(jar: JFile): Jar = {
require(jar.isFile)
val vf = new FileVirtualBinaryFile(jar) with VirtualJarFile
Jar(vf)
}
def fromDirectory(dir: JFile): Seq[File] = {
require(dir.isDirectory)
val baseDir = dir.getAbsoluteFile
def walkForIR(dir: JFile): Seq[JFile] = {
val (subdirs, files) = dir.listFiles().partition(_.isDirectory)
subdirs.flatMap(walkForIR) ++ files.filter(_.getName.endsWith(".sjsir"))
}
for (ir <- walkForIR(baseDir)) yield {
val relDir = ir.getPath.stripPrefix(baseDir.getPath)
val vf = FileVirtualScalaJSIRFile.relative(ir, relDir)
File(vf)
}
}
}
|
mdedetrich/scala-js
|
tools/jvm/src/main/scala/org/scalajs/core/tools/io/IRContainerPlatformExtensions.scala
|
Scala
|
bsd-3-clause
| 1,740
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jp.gihyo.spark.ch06
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{StreamingContext, Seconds}
import org.apache.spark.streaming.dstream.InputDStream
object gihyo_6_3_reduceByKeyAndWindow {
def main(args: Array[String]) {
if (args.length != 2) {
new IllegalArgumentException("Invalid arguments")
System.exit(1)
}
val targetHost = args(0)
val targetHostPort = args(1).toInt
val conf = new SparkConf().setAppName("NetworkWordCount")
val sc = new SparkContext(conf)
val ssc = new StreamingContext(sc, Seconds(5))
val lines = ssc.socketTextStream(targetHost, targetHostPort)
run(lines)
ssc.start
ssc.awaitTermination
}
def run(stream: InputDStream[String], windowLength: Int = 10, slideInterval: Int = 5) {
val userList = stream.map(x => (x, 1))
.reduceByKeyAndWindow((a: Int, b: Int) =>
a + b, Seconds(windowLength), Seconds(slideInterval))
userList.print
}
}
|
yu-iskw/gihyo-spark-book-example
|
src/main/scala/jp/gihyo/spark/ch06/gihyo_6_3_reduceByKeyAndWindow.scala
|
Scala
|
apache-2.0
| 1,794
|
package uk.zebington.junkcraft.proxy
import net.minecraftforge.fml.common.registry.GameRegistry
import uk.zebington.junkcraft._
import uk.zebington.junkcraft.common.tileentities.{TileEntitySpikeStation, TileEntityElectricFence}
/**
* Created by Charlotte on 19/02/2015.
*/
class CommonProxy {
def registerRenderers() {}
def registerTileEntities() {
GameRegistry registerTileEntity(classOf[TileEntityElectricFence], s"${Id}_$NElectricFence")
GameRegistry registerTileEntity(classOf[TileEntitySpikeStation], s"${Id}_$NSpikeStation")
}
}
|
zebington/JunkCraft
|
src/main/scala/uk/zebington/junkcraft/proxy/CommonProxy.scala
|
Scala
|
gpl-3.0
| 554
|
package scala.pickling.singleton.simple
import org.scalatest.FunSuite
import scala.pickling._, scala.pickling.Defaults._, json._
object D {
val shouldntSerializeMe = 42
}
class SingletonSimpleTest extends FunSuite {
test("main") {
val pickle = D.pickle
assert(pickle.toString === """
|JSONPickle({
| "$type": "scala.pickling.singleton.simple.D.type"
|})
""".stripMargin.trim)
assert((pickle.unpickle[D.type] eq D) === true)
}
}
|
scala/pickling
|
core/src/test/scala/scala/pickling/generation/SingletonSimpleTest.scala
|
Scala
|
bsd-3-clause
| 472
|
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.flaminem.flamy.commands
import com.flaminem.flamy.commands.utils.FlamySubcommand
import com.flaminem.flamy.conf.{Environment, FlamyContext, FlamyGlobalOptions}
import com.flaminem.flamy.exec.hive.HiveTableFetcher
import com.flaminem.flamy.exec.utils.{ReturnStatus, ReturnSuccess}
import com.flaminem.flamy.model._
import com.flaminem.flamy.model.names.{ItemName, SchemaName, TableName}
import com.flaminem.flamy.utils.DiffUtils
import com.flaminem.flamy.utils.prettyprint.Tabulator
import org.rogach.scallop.{ScallopConf, ScallopOption, Subcommand}
import scala.language.reflectiveCalls
/**
* Created by fpin on 5/22/15.
*/
class Diff extends Subcommand("diff") with FlamySubcommand{
val schemas = new Subcommand("schemas") {
banner("Show the schemas differences between the specified environment and the modeling environment")
val environment: ScallopOption[Environment] =
opt(name="on", descr="Specifies environment to run on", required=true, noshort=true)
}
val tables = new Subcommand("tables") {
banner("Show the table differences between the specified environment and the modeling environment")
val environment: ScallopOption[Environment] =
opt(name="on", descr="Specifies environment to run on", required=false, noshort=true)
val items: ScallopOption[List[ItemName]] =
trailArg[List[ItemName]](default=Some(List()),required=false)
}
val columns = new Subcommand("columns") {
banner("Show the table differences between the specified environment and the modeling environment")
val environment: ScallopOption[Environment] =
opt(name="on", descr="Specifies environment to run on", required=false, noshort=true)
val items: ScallopOption[List[ItemName]] =
trailArg[List[ItemName]](default=Some(List()),required=false)
}
private def diffSchemas(leftContext: FlamyContext, rightContext: FlamyContext) {
val leftFetcher = HiveTableFetcher(leftContext)
val rightFetcher = HiveTableFetcher(rightContext)
val leftSchemas: Set[SchemaName] = leftFetcher.listSchemaNames.toSet
val rightSchemas: Set[SchemaName] = rightFetcher.listSchemaNames.toSet
val diff = DiffUtils.hammingDiff(leftSchemas.toIndexedSeq.sorted, rightSchemas.toIndexedSeq.sorted, allowReplacements = false)
val results: Seq[Seq[String]] =
diff.flatMap{
case (l, r) if l == r => None
case (l, r) =>
Some(l.map{_.toString}.getOrElse("")::r.map{_.toString}.getOrElse("")::Nil)
}
val header = Seq(leftContext.env,rightContext.env)
println(Tabulator.format(header+:results,leftJustify = true))
}
private[commands]
def diffTables(leftContext: FlamyContext, rightContext: FlamyContext, items: ItemName*): String = {
val itemFilter = new ItemFilter(items,true)
val leftFetcher = HiveTableFetcher(leftContext)
val rightFetcher = HiveTableFetcher(rightContext)
val leftTables: Iterable[TableName] = leftFetcher.listTableNames.filter{itemFilter}
val rightTables: Iterable[TableName] = rightFetcher.listTableNames.filter{itemFilter}
val diff = DiffUtils.hammingDiff(leftTables.toIndexedSeq.sorted, rightTables.toIndexedSeq.sorted, allowReplacements = false)
val results: Seq[Seq[String]] =
diff.flatMap{
case (l, r) if l == r => None
case (l, r) => Some(l.map{_.toString}.getOrElse("")::r.map{_.toString}.getOrElse("")::Nil)
}
val header = Seq(leftContext.env,rightContext.env)
Tabulator.format(header+:results,leftJustify = true)
}
private def formatColumn(col: Column): String = {
val comment = col.comment.map{c => s""" COMMENT "$c" """}.getOrElse("")
s" ${col.columnName}${col.columnType.map{t => " " + t.toUpperCase}.getOrElse("")}$comment"
}
private def formatPartition(col: PartitionKey): String = {
val comment = col.comment.map{c => s""" COMMENT "$c" """}.getOrElse("")
s" * ${col.columnName}${col.columnType.map{t => " " + t.toUpperCase}.getOrElse("")}$comment"
}
private def diffColumnsInTables(leftTable: TableInfo, rightTable: TableInfo): Seq[Seq[String]] = {
val columnDiff: Seq[Seq[String]] =
DiffUtils.hammingDiff(leftTable.columns.toIndexedSeq, rightTable.columns.toIndexedSeq, allowReplacements = true).flatMap {
case (l, r) if l == r =>
None
case (l, r) =>
Some(l.map{formatColumn}.getOrElse("") :: r.map{formatColumn}.getOrElse("") :: Nil)
}
val partitionDiff: Seq[Seq[String]] =
DiffUtils.hammingDiff(leftTable.partitions.toIndexedSeq, rightTable.partitions.toIndexedSeq, allowReplacements = true).flatMap {
case (l, r) if l == r =>
None
case (l, r) =>
Some(l.map{formatPartition}.getOrElse("") :: r.map{formatPartition}.getOrElse("") :: Nil)
}
val diff = columnDiff++partitionDiff
if (diff.isEmpty) {
Nil
}
else {
(leftTable.fullName.toString :: rightTable.fullName.toString :: Nil) +: diff
}
}
private[commands]
def diffColumns(leftContext: FlamyContext, rightContext: FlamyContext, items: ItemName*): String = {
val itemFilter = new ItemFilter(items, true)
val leftFetcher = HiveTableFetcher(leftContext)
val rightFetcher = HiveTableFetcher(rightContext)
val leftTables: Set[TableInfo] = leftFetcher.listTables{itemFilter}.toSet
val rightTables: Set[TableInfo] = rightFetcher.listTables{itemFilter}.toSet
val diff: IndexedSeq[(Option[TableInfo], Option[TableInfo])] =
DiffUtils.hammingDiff(leftTables.toIndexedSeq.sorted, rightTables.toIndexedSeq.sorted, allowReplacements = false)
val results: Seq[Seq[String]] =
diff.flatMap{
case (Some(leftTable), Some(rightTable)) if !leftTable.isView || !rightTable.isView =>
diffColumnsInTables(leftTable, rightTable)
case (l, r) if l == r => None
case (l, r) =>
Some(l.map{_.fullName.toString}.getOrElse("")::r.map{_.fullName.toString}.getOrElse("")::Nil)
}
val header = Seq(leftContext.env,rightContext.env)
Tabulator.format(header+:results,leftJustify = true)
}
override def doCommand(globalOptions: FlamyGlobalOptions, subCommands: List[ScallopConf]): ReturnStatus = {
subCommands match {
case (command@this.schemas) :: Nil =>
val leftContext = new FlamyContext(globalOptions)
val rightContext = new FlamyContext(globalOptions, command.environment.get)
diffSchemas(leftContext, rightContext)
case (command@this.tables) :: Nil =>
val leftContext = new FlamyContext(globalOptions)
val rightContext = new FlamyContext(globalOptions, command.environment.get)
println(diffTables(leftContext, rightContext, command.items():_*))
case (command@this.columns) :: Nil =>
val leftContext = new FlamyContext(globalOptions)
val rightContext = new FlamyContext(globalOptions, command.environment.get)
println(diffColumns(leftContext, rightContext, command.items():_*))
case _ => printHelp()
}
ReturnSuccess
}
}
|
flaminem/flamy
|
src/main/scala/com/flaminem/flamy/commands/Diff.scala
|
Scala
|
apache-2.0
| 7,620
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.s2graph.counter.helper
import com.stumbleupon.async.{Callback, Deferred}
import com.typesafe.config.Config
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.{HBaseConfiguration, TableName}
import org.apache.s2graph.counter.config.S2CounterConfig
import org.hbase.async.HBaseClient
import org.slf4j.LoggerFactory
import scala.concurrent.{Future, Promise}
import scala.util.Try
class WithHBase(config: Config) {
lazy val logger = LoggerFactory.getLogger(this.getClass)
lazy val s2config = new S2CounterConfig(config)
lazy val zkQuorum = s2config.HBASE_ZOOKEEPER_QUORUM
lazy val defaultTableName = s2config.HBASE_TABLE_NAME
logger.info(s"$zkQuorum, $defaultTableName")
val hbaseConfig = HBaseConfiguration.create()
s2config.getConfigMap("hbase").foreach { case (k, v) =>
hbaseConfig.set(k, v)
}
// lazy val conn: HConnection = HConnectionManager.createConnection(hbaseConfig)
lazy val conn: Connection = ConnectionFactory.createConnection(hbaseConfig)
val writeBufferSize = 1024 * 1024 * 2 // 2MB
// def apply[T](op: Table => T): Try[T] = {
// Try {
// val table = conn.getTable(TableName.valueOf(defaultTableName))
// // do not keep failed operation in writer buffer
// table.setWriteBufferSize(writeBufferSize)
// try {
// op(table)
// } catch {
// case e: Throwable =>
// logger.error(s"Operation to table($defaultTableName) is failed: ${e.getMessage}")
// throw e
// } finally {
// table.close()
// }
// }
// }
def apply[T](tableName: String)(op: Table => T): Try[T] = {
Try {
val table = conn.getTable(TableName.valueOf(tableName))
// do not keep failed operation in writer buffer
// table.setWriteBufferSize(writeBufferSize)
try {
op(table)
} catch {
case ex: Exception =>
logger.error(s"$ex: Operation to table($tableName) is failed")
throw ex
} finally {
table.close()
}
}
}
}
case class WithAsyncHBase(config: Config) {
lazy val logger = LoggerFactory.getLogger(this.getClass)
lazy val s2config = new S2CounterConfig(config)
lazy val zkQuorum = s2config.HBASE_ZOOKEEPER_QUORUM
val hbaseConfig = HBaseConfiguration.create()
s2config.getConfigMap("hbase").foreach { case (k, v) =>
hbaseConfig.set(k, v)
}
// lazy val conn: HConnection = HConnectionManager.createConnection(hbaseConfig)
lazy val client: HBaseClient = new HBaseClient(zkQuorum)
val writeBufferSize = 1024 * 1024 * 2 // 2MB
def apply[T](op: HBaseClient => Deferred[T]): Future[T] = {
val promise = Promise[T]()
op(client).addCallback(new Callback[Unit, T] {
def call(arg: T): Unit = {
promise.success(arg)
}
}).addErrback(new Callback[Unit, Exception] {
def call(ex: Exception): Unit = {
promise.failure(ex)
}
})
promise.future
}
}
|
jongwook/incubator-s2graph
|
s2counter_core/src/main/scala/org/apache/s2graph/counter/helper/WithHBase.scala
|
Scala
|
apache-2.0
| 3,763
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.python
import java.io.{ByteArrayOutputStream, DataOutputStream}
import org.apache.spark.SparkFunSuite
class PythonRDDSuite extends SparkFunSuite {
//写大串给worker
test("Writing large strings to the worker") {
val input: List[String] = List("a"*100000)
val buffer = new DataOutputStream(new ByteArrayOutputStream)
PythonRDD.writeIteratorToStream(input.iterator, buffer)
}
//很好的处理null
test("Handle nulls gracefully") {
val buffer = new DataOutputStream(new ByteArrayOutputStream)
// Should not have NPE when write an Iterator with null in it
// The correctness will be tested in Python
PythonRDD.writeIteratorToStream(Iterator("a", null), buffer)
PythonRDD.writeIteratorToStream(Iterator(null, "a"), buffer)
PythonRDD.writeIteratorToStream(Iterator("a".getBytes, null), buffer)
PythonRDD.writeIteratorToStream(Iterator(null, "a".getBytes), buffer)
PythonRDD.writeIteratorToStream(Iterator((null, null), ("a", null), (null, "b")), buffer)
PythonRDD.writeIteratorToStream(
Iterator((null, null), ("a".getBytes, null), (null, "b".getBytes)), buffer)
}
}
|
tophua/spark1.52
|
core/src/test/scala/org/apache/spark/api/python/PythonRDDSuite.scala
|
Scala
|
apache-2.0
| 1,964
|
package provingground.codeexperiments
import annotation.tailrec
//import scala.swing._
import java.awt.Dimension
import akka.actor._
import scala.concurrent._
import scala.concurrent.duration._
import scala.util._
import provingground.TextToInt._
import akka.util.Timeout.durationToTimeout
import akka.pattern.ask
import scala.language.postfixOps
/** This is to experiment with using actors for background computations
We use a Naive algorithm as this suits our purposes
*/
object Factorisation /*extends SimpleSwingApplication*/ {
/*
val s = new Dimension(1500, 1500)
case class FactoriseTask(n: Int)
class FactorActor extends Actor{
val factorsMemo = Stream.from(0) map (factorise(_))
def receive = {
case FactoriseTask(n:Int) =>
sender ! factorsMemo(n)
println(factorsMemo(n))
}
}
@tailrec def findPrimeFactor(n: Int, m:Int = 2): Int = {
if (m * m > n) 1
else if (n % m == 0) m
else findPrimeFactor(n, m+1)
}
@tailrec def factorise(n: Int, knownFactors: List[Int] =List()): List[Int] = {
val factor = findPrimeFactor(n)
if (factor == 1) n :: knownFactors else factorise(n/factor, factor :: knownFactors)
}
val factors = Stream.from(0) map (factorise(_))
val system=ActorSystem("MySystem")
import system.dispatcher
val factorActor = system.actorOf(Props[FactorActor], "FactorActor")
def askFactors(n: Int): Future[List[Int]] = {
factorActor.ask(FactoriseTask(n))(2 seconds).mapTo[List[Int]]
}
val toFactor = new TextArea(10, 10){
charWrap = true
}
val factorButton = new Button{
text = "Factorise"
verticalAlignment = Alignment.Top
}
val factorFrame = new FlowPanel{
contents += factorButton
contents += toFactor
border = Swing.EmptyBorder(20, 20, 20, 20)
}
val factorResult = new TextArea(10, 40){
charWrap = true
}
val leftPanel = new BoxPanel(Orientation.Vertical){
contents += new Label("Enter number to factorise")
contents += factorFrame
contents += new Label("Factors")
contents += factorResult
border = Swing.EmptyBorder(20, 20, 20, 20)
}
def top = new MainFrame{
title = "Factorising a Number"
contents = new BoxPanel(Orientation.Horizontal){
contents += leftPanel
minimumSize = s
}
}
listenTo(factorButton)
reactions +={
case swing.event.ButtonClicked(`factorButton`) =>
toFactor.text match {
case Int(m) if m>=0 =>
val ans = askFactors(m.toInt)
Await.ready(ans, 5 seconds)
ans onComplete {
case Success(s: List[Int]) => factorResult.text = s.toString; println(s)
case Failure(_) => factorResult.text = " could not compute the result"; println("failed")
}
case _ => factorResult.text = "I can only factorize non-negative integers"
}
}*/
}
|
siddhartha-gadgil/ProvingGround
|
digressions/src/main/scala/provingground/codeexperiments/Factorisation.scala
|
Scala
|
mit
| 2,765
|
/**
* Original work: SecureSocial (https://github.com/jaliss/securesocial)
* Copyright 2013 Brian Porter (poornerd at gmail dot com) - twitter: @poornerd
*
* Derivative work: Silhouette (https://github.com/mohiva/play-silhouette)
* Modifications Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.impl.providers.oauth2
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.api.util.HTTPLayer
import com.mohiva.play.silhouette.impl.exceptions.ProfileRetrievalException
import com.mohiva.play.silhouette.impl.providers._
import com.mohiva.play.silhouette.impl.providers.oauth2.FoursquareProvider._
import play.api.libs.concurrent.Execution.Implicits._
import play.api.libs.json.JsValue
import scala.concurrent.Future
/**
* Base Foursquare OAuth2 provider.
*
* @see https://developer.foursquare.com/overview/auth
* @see https://developer.foursquare.com/overview/responses
* @see https://developer.foursquare.com/docs/explore
*/
trait BaseFoursquareProvider extends OAuth2Provider {
/**
* The content type to parse a profile from.
*/
type Content = JsValue
/**
* The provider ID.
*/
val id = ID
/**
* Defines the URLs that are needed to retrieve the profile data.
*/
protected val urls = Map("api" -> API)
/**
* Builds the social profile.
*
* @param authInfo The auth info received from the provider.
* @return On success the build social profile, otherwise a failure.
*/
protected def buildProfile(authInfo: OAuth2Info): Future[Profile] = {
val version = settings.customProperties.getOrElse(APIVersion, DefaultAPIVersion)
httpLayer.url(urls("api").format(authInfo.accessToken, version)).get().flatMap { response =>
val json = response.json
val errorType = (json \\ "meta" \\ "errorType").asOpt[String]
(json \\ "meta" \\ "code").asOpt[Int] match {
case Some(code) if code != 200 =>
val errorDetail = (json \\ "meta" \\ "errorDetail").asOpt[String]
throw new ProfileRetrievalException(SpecifiedProfileError.format(id, code, errorType, errorDetail))
case _ =>
// Status code 200 and an existing errorType can only be a deprecated error
// https://developer.foursquare.com/overview/responses
if (errorType.isDefined) {
logger.info("This implementation may be deprecated! Please contact the Silhouette team for a fix!")
}
profileParser.parse(json)
}
}
}
}
/**
* The profile parser for the common social profile.
*
* @param settings The provider settings.
*/
class FoursquareProfileParser(settings: OAuth2Settings) extends SocialProfileParser[JsValue, CommonSocialProfile] {
/**
* Parses the social profile.
*
* @param json The content returned from the provider.
* @return The social profile from given result.
*/
def parse(json: JsValue) = Future.successful {
val user = json \\ "response" \\ "user"
val userID = (user \\ "id").as[String]
val lastName = (user \\ "lastName").asOpt[String]
val firstName = (user \\ "firstName").asOpt[String]
val avatarURLPart1 = (user \\ "photo" \\ "prefix").asOpt[String]
val avatarURLPart2 = (user \\ "photo" \\ "suffix").asOpt[String]
val email = (user \\ "contact" \\ "email").asOpt[String].filter(!_.isEmpty)
val resolution = settings.customProperties.getOrElse(AvatarResolution, DefaultAvatarResolution)
CommonSocialProfile(
loginInfo = LoginInfo(ID, userID),
firstName = firstName,
lastName = lastName,
avatarURL = for (prefix <- avatarURLPart1; postfix <- avatarURLPart2) yield prefix + resolution + postfix,
email = email)
}
}
/**
* The Foursquare OAuth2 Provider.
*
* @param httpLayer The HTTP layer implementation.
* @param stateProvider The state provider implementation.
* @param settings The provider settings.
*/
class FoursquareProvider(
protected val httpLayer: HTTPLayer,
protected val stateProvider: OAuth2StateProvider,
val settings: OAuth2Settings)
extends BaseFoursquareProvider with CommonSocialProfileBuilder {
/**
* The type of this class.
*/
type Self = FoursquareProvider
/**
* The profile parser implementation.
*/
val profileParser = new FoursquareProfileParser(settings)
/**
* Gets a provider initialized with a new settings object.
*
* @param f A function which gets the settings passed and returns different settings.
* @return An instance of the provider initialized with new settings.
*/
def withSettings(f: (Settings) => Settings) = new FoursquareProvider(httpLayer, stateProvider, f(settings))
}
/**
* The companion object.
*/
object FoursquareProvider {
/**
* The version of this implementation.
*
* @see https://developer.foursquare.com/overview/versioning
*/
val DefaultAPIVersion = "20140206"
/**
* The default avatar resolution.
*/
val DefaultAvatarResolution = "100x100"
/**
* Some custom properties for this provider.
*/
val APIVersion = "api.version"
val AvatarResolution = "avatar.resolution"
/**
* The error messages.
*/
val SpecifiedProfileError = "[Silhouette][%s] Error retrieving profile information. Error code: %s, type: %s, detail: %s"
/**
* The Foursquare constants.
*/
val ID = "foursquare"
val API = "https://api.foursquare.com/v2/users/self?oauth_token=%s&v=%s"
}
|
rfranco/play-silhouette
|
silhouette/app/com/mohiva/play/silhouette/impl/providers/oauth2/FoursquareProvider.scala
|
Scala
|
apache-2.0
| 5,992
|
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
import org.scalatest._
class GeneralContainElementSpec extends Spec with Matchers with CheckedEquality {
object `the contain theSameElementsAs syntax` {
def `should work on different types` {
val jul: java.util.List[Int] = new java.util.ArrayList[Int]
jul.add(3)
jul.add(2)
jul.add(1)
List(1, 2, 3) should (contain theSameElementsAs jul)
}
}
}
|
travisbrown/scalatest
|
src/test/scala/org/scalactic/GeneralContainElementSpec.scala
|
Scala
|
apache-2.0
| 1,012
|
package com.gfeuillen.neo4j.sink
import java.util.Properties
import com.gfeuillen.neo4j.util.ConnectorProperties
import com.gfeuillen.neo4j.wrapper.ScalaSinkConnector
import org.apache.kafka.common.config.ConfigDef
import org.apache.kafka.connect.connector.Task
import scala.collection.mutable
class Neo4JSinkConnector extends ScalaSinkConnector{
val properties:mutable.Map[String,String] = mutable.Map.empty
override def start(map: mutable.Map[String, String]): Unit ={
properties ++= map
}
override def sTaskConfigs(i: Int): Seq[mutable.Map[String, String]] = (1 to i).map(n => properties.clone() + ("task.number" -> n.toString))
override def taskClass(): Class[_ <: Task] = classOf[Neo4JSinkTask]
override def version(): String = ConnectorProperties.version
override def stop(): Unit = {}
override def config(): ConfigDef = {
new ConfigDef()
}
}
|
Gfeuillen/neo4j-sink
|
sink/src/main/scala/com/gfeuillen/neo4j/sink/Neo4JSinkConnector.scala
|
Scala
|
mit
| 888
|
/*
* Copyright 2017 Iaroslav Zeigerman
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package akkeeper
import akka.actor.ActorRef
import akka.cluster.{Member, MemberStatus, UniqueAddress}
import akka.pattern.gracefulStop
import scala.concurrent.duration._
trait ActorTestUtils extends AwaitMixin {
protected val gracefulStopTimeout: FiniteDuration = 6 seconds
protected def gracefulActorStop(actor: ActorRef): Unit = {
await(gracefulStop(actor, gracefulStopTimeout))
}
protected def createTestMember(addr: UniqueAddress): Member = {
createTestMember(addr, MemberStatus.Up)
}
protected def createTestMember(addr: UniqueAddress, status: MemberStatus): Member = {
createTestMember(addr, status, Set.empty)
}
protected def createTestMember(addr: UniqueAddress, status: MemberStatus, roles: Set[String]): Member = {
val ctr = classOf[Member].getDeclaredConstructor(classOf[UniqueAddress], classOf[Int],
classOf[MemberStatus], classOf[Set[String]])
ctr.newInstance(addr, new Integer(1), status, roles + "dc-default")
}
}
|
akkeeper-project/akkeeper
|
akkeeper/src/test/scala/akkeeper/ActorTestUtils.scala
|
Scala
|
apache-2.0
| 1,581
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.controller.test
import org.junit.runner.RunWith
import org.scalatest.BeforeAndAfterEach
import org.scalatest.junit.JUnitRunner
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.model.Uri
import spray.json._
import spray.json.DefaultJsonProtocol._
import org.apache.openwhisk.core.controller.SwaggerDocs
/**
* Tests swagger routes.
*
* @Idioglossia
* "using Specification DSL to write unit tests, as in should, must, not, be"
* "using Specs2RouteTest DSL to chain HTTP requests for unit testing, as in ~>"
*/
@RunWith(classOf[JUnitRunner])
class SwaggerRoutesTests extends ControllerTestCommon with BeforeAndAfterEach {
behavior of "Swagger routes"
it should "server docs" in {
implicit val tid = transid()
val swagger = new SwaggerDocs(Uri.Path.Empty, "infoswagger.json")
Get("/docs") ~> Route.seal(swagger.swaggerRoutes) ~> check {
status shouldBe PermanentRedirect
header("location").get.value shouldBe "docs/index.html?url=/api-docs"
}
Get("/api-docs") ~> Route.seal(swagger.swaggerRoutes) ~> check {
status shouldBe OK
responseAs[JsObject].fields("swagger") shouldBe JsString("2.0")
}
}
}
|
cbickel/openwhisk
|
tests/src/test/scala/org/apache/openwhisk/core/controller/test/SwaggerRoutesTests.scala
|
Scala
|
apache-2.0
| 2,127
|
package models
import api.{Tweet, TweetLocation}
import models.traits.{BaseTable, BaseTableQueryOps, GenericDB}
import org.joda.time.DateTime
import play.api.libs.json.{Format, Json}
import scala.slick.driver.PostgresDriver.simple._
import scala.slick.jdbc.JdbcBackend.Database.dynamicSession
case class UserTweet(id: Long, twitterId: Long, userId: Long, tweet: Tweet, timestamp: DateTime) {
val user = UserTable.getById(userId)
}
trait TweetTableMappers {
implicit val apiLocationFormatter: Format[TweetLocation] = Json.format[TweetLocation]
implicit val apiTweetFormatter: Format[Tweet] = Json.format[Tweet]
implicit val tweetMapper = MappedColumnType.base[Tweet, String](
Json.toJson(_).toString(),
Json.parse(_).as[Tweet])
}
class UserTweetTableDef(tag: Tag) extends Table[UserTweet](tag, "UserTweet") with BaseTable[UserTweet] with TweetTableMappers {
lazy val twitterId = column[Long]("twitterId", O.NotNull)
lazy val userId = column[Long]("userId", O.NotNull)
lazy val tweet = column[Tweet]("tweet", O.NotNull)
lazy val timestamp = column[DateTime]("timestamp", O.NotNull)
def * = (id, twitterId, userId, tweet, timestamp) <>(UserTweet.tupled, UserTweet.unapply)
}
object UserTweetTable extends TableQuery(new UserTweetTableDef(_)) with BaseTableQueryOps[UserTweetTableDef, UserTweet] with TweetTableMappers {
self =>
lazy val db = GenericDB
def listOrdered(limit: Int, offset: Int): List[UserTweet] = {
db.withSession {
self.sortBy(_.timestamp.desc).drop(offset).take(limit).list
}
}
def listHashtag(filter: String, limit: Int, offset: Int): List[UserTweet] = {
db.withSession {
(for {
hashtag <- HashtagTable if hashtag.label like "%" + filter + "%"
userTweet <- this if userTweet.id === hashtag.tweetId
} yield userTweet).sortBy(_.timestamp).drop(offset).take(limit).list
}
}
def listUsername(filter: String, limit: Int, offset: Int): List[UserTweet] = {
db.withSession {
(for {
user <- UserTable if user.username like "%" + filter + "%"
userTweet <- this if userTweet.userId === user.id
} yield userTweet).sortBy(_.timestamp).drop(offset).take(limit).list
}
}
def listLocation(filter: String, limit: Int, offset: Int): List[UserTweet] = {
db.withSession {
(for {
location <- LocationTable if location.label like "%" + filter + "%"
userTweet <- this if userTweet.id === location.tweetId
} yield userTweet).sortBy(_.timestamp).drop(offset).take(limit).list
}
}
def listCount(limit: Int, offset: Int): Seq[(User, Int)] = {
db.withSession {
self.groupBy(_.userId).map {
case (t1, t2) =>
t1 -> t2.size
}.sortBy(_._2.desc).drop(offset).take(limit).list.map {
case (userId, count) =>
UserTable.getById(userId).map {
user =>
(user, count)
}
}.flatten
}
}
def getMaxTwitterId(userId: Long): Option[Long] = {
db.withSession {
self.filter(_.userId === userId).map(_.twitterId).max.run
}
}
}
|
rtfpessoa/distributed-twitter-crawler
|
app/models/UserTweet.scala
|
Scala
|
mit
| 3,097
|
package controllers
import java.util.{Calendar, Date}
import play.api.data.Form
import play.api.data.Forms._
import play.api.db.DB
import play.api.libs.json._
import play.api.mvc._
import anorm._
import play.api.Play.current
class Record extends Controller with Secured{
val recordFormTuple = Form(
tuple(
"id"->number,
"cost" -> bigDecimal,
"name" -> nonEmptyText,
"boughtdate" -> nonEmptyText,
"details" -> nonEmptyText
)
)
def records = withAuth{ account=> implicit request=>
Ok(views.html.records())
}
def record(id: Int) = withAuth{ account=> implicit request=>
var filledRecordFormTuple=recordFormTuple.fill((id,0.00,"","",""))
DB.withConnection{
implicit c=>
val findExpense=SQL("Select * from expenses where id={id}").on("id"->id).apply()
if(findExpense!=Stream.empty) {
val expense = findExpense.head;
val expenseMap= new Tuple5[Int,BigDecimal,String,String,String](expense[Int]("id"), BigDecimal(expense[String]("cost")),expense[String]("name"),expense[String]("boughtdate"),expense[String]("details"))
filledRecordFormTuple=recordFormTuple.fill(expenseMap)
}
}
Ok(views.html.record(filledRecordFormTuple))
}
def recordsubmit = withAuth{ account=> implicit request=>
recordFormTuple.bindFromRequest().fold(
hasErrors=>{
var error="";
hasErrors.errors.foreach(
e=> error = error+e.key+" "+e.message+", "
)
Redirect(routes.Record.record(hasErrors.data.get("id").get.toInt)).flashing("error"->error.dropRight(2))
},
success=>{
DB.withConnection{
implicit c=>
if(success._1==0){
val id: Option[Long]=SQL("insert into expenses(cost,name,boughtdate,details) values({cost},{name},{boughtdate},{details})").on("cost"->success._2,"name"->success._3, "boughtdate"->success._4, "details"->success._5).executeInsert()
if(!id.isEmpty){
Redirect(routes.Record.record(id.get.asInstanceOf[Int])).flashing("success"->"Save successfully")
}else{
Redirect(routes.Record.record(success._1)).flashing("error"->"Error in saving")
}
}else{
val rowsUpdated:Int=SQL("update expenses set cost={cost},name={name},boughtdate={boughtdate},details={details} where id={id}").on("cost"->success._2,"name"->success._3, "boughtdate"->success._4, "details"->success._5, "id"->success._1).executeUpdate()
if(rowsUpdated>0){
Redirect(routes.Record.record(success._1)).flashing("success"->"Save successfully")
}else{
Redirect(routes.Record.record(success._1)).flashing("error"->"Error in saving")
}
}
}
}
)
}
def recordsjson(page:Int,search:Option[String],filter:Option[String]) = Action{
implicit request=>
DB.withConnection {
implicit c =>
val sqlRecordsToJson = (records:Stream[Row]) => {
var expensesJson = JsArray()
records.foreach(row=>{
expensesJson=expensesJson.append(JsObject(Seq(
"id"->JsNumber(row[Int]("id")),
"cost"->JsString(row[String]("cost")),
"name"->JsString(row[String]("name")),
"boughtdate"->JsString(row[String]("boughtdate")),
"details"->JsString(row[String]("details"))
)))
})
expensesJson
}
val limit = 5
val offset = (page - 1) * limit
val getJsonRecordsWithFilter = (search: String, filter: String) => {
Map(
"records" ->
sqlRecordsToJson{
SQL(s"select * from expenses where $filter like '%$search%' order by id desc limit $limit offset $offset").apply()
},
"countRecords" -> {
var countRow = SQL(s"select count(*) as c from expenses where $filter like '%$search%'").apply().head
JsNumber(countRow[Long]("c"))
}
)
}
val getJsonRecords = {
Map(
"records" ->
sqlRecordsToJson {
SQL"select * from expenses order by id desc limit $limit offset $offset".apply()
},
"countRecords" -> {
var countRow = SQL"Select count(*) as c from expenses".apply().head
JsNumber(countRow[Long]("c"))
}
)
}
val records = search match {
case Some(s) => {
filter match {
case Some("name") => Some(getJsonRecordsWithFilter(s, "name"))
case Some("cost") => Some(getJsonRecordsWithFilter(s, "cost"))
case Some("date") => Some(getJsonRecordsWithFilter(s, "boughtdate"))
case Some("details") => Some(getJsonRecordsWithFilter(s, "details"))
case _ => None
}
}
case _ => Some(getJsonRecords)
}
val jsonRecords = records match {
case Some(s) => {
JsObject(Seq(
"total"->s.get("countRecords").get,
"per_page"->JsNumber(limit),
"current_page"->JsNumber(page),
"data"->s.get("records").get
))
}
case _ => {
JsObject(Seq(
"total"->JsNumber(0),
"per_page"->JsNumber(0),
"current_page"->JsNumber(0),
"data"->JsArray()
))
}
}
Ok(Json.toJson(jsonRecords))
}
}
def recorddelete = Action {
implicit request=>
Form("id"->number).bindFromRequest().fold(
hasErrors =>{
Ok("Error")
},
id =>{
DB.withConnection {
implicit c =>
val result: Int = SQL("delete from expenses where id = {id}").on("id"->id).executeUpdate()
if(result>0){
Ok("Ok")
}else{
Ok("Error")
}
}
}
)
}
def expensejson(startdate:String,enddate:String) = Action{
implicit request=>
val dateFormat = new java.text.SimpleDateFormat("yyyy-MM-dd")
var startDate = dateFormat.parse(startdate)
val endDate = dateFormat.parse(enddate)
val calendar = Calendar.getInstance();
var expensesJson=JsArray()
DB.withConnection {
implicit c =>
val selectExpenses = SQL(s"select sum(cost) as costtotal,boughtdate from expenses where date(boughtdate)>=date('${dateFormat.format(startDate)}') and date(boughtdate)<=date('${dateFormat.format(endDate)}') group by boughtdate")
val expensesList:List[(String,BigDecimal)] = selectExpenses().map(row =>
row[String]("boughtdate")->row[BigDecimal]("costtotal")
).toList
while(startDate.compareTo(endDate)<1){
val date = dateFormat.format(startDate)
val expenses:BigDecimal = expensesList.find{ a=> dateFormat.parse(a._1).compareTo(dateFormat.parse(date))==0 } match {
case None => 0
case x=> x.get._2
}
expensesJson = expensesJson.:+(JsObject(
Seq("date"->JsString(date),"expenses"->JsNumber(expenses))
))
calendar.setTime(startDate)
calendar.add(Calendar.DAY_OF_MONTH,1)
startDate=calendar.getTime
}
}
Ok(Json.toJson(expensesJson))
}
}
|
orlyngerano/myexpenses
|
app/controllers/Record.scala
|
Scala
|
apache-2.0
| 7,691
|
// Copyright 2014,2015,2016,2017,2018,2019,2020 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package commbank.grimlock.framework.metadata
import shapeless.{ :+:, CNil, Coproduct }
import shapeless.ops.coproduct.Inject
/** Trait for variable types. */
trait Type extends java.io.Serializable {
/** Returns the most general super type of `this`. */
def getRootType: Type = parent.map(_.getRootType).getOrElse(this)
/** Check if this is a sub-type of `that`. */
def isOfType(that: Type): Boolean = (this == that) || parent.map(_.isOfType(that)).getOrElse(false)
/** Return the first type in common between this and `that`, or `Mixed` in case of no common ancestor. */
def getCommonType(that: Type): Type = {
if (this == that) this
else if (this.isRootType) { if (that.isOfType(this)) this else MixedType }
else if (that.isRootType) { if (this.isOfType(that)) that else MixedType }
else sharedParent(that).getOrElse(MixedType)
}
override def toString: String = toShortString.capitalize + "Type"
/** Return a consise (terse) string representation of a type. */
def toShortString: String
protected val parent: Option[Type] = None
private def isRootType: Boolean = parent.isEmpty
private def sharedParent(that: Type): Option[Type] = parent
.flatMap(p => if (that.isOfType(p)) Option(p) else p.sharedParent(that))
}
/** Companion object to `Type` trait. */
object Type {
/** Type for a default Type co-product when parsing Types from string. */
type DefaultTypes = CategoricalType.type :+:
ContinuousType.type :+:
DateType.type :+:
DiscreteType.type :+:
MixedType.type :+:
NominalType.type :+:
NumericType.type :+:
OrdinalType.type :+:
CNil
/** Type that captures all constraints for parsing default Types from string. */
trait TextParseConstraints[C <: Coproduct] extends java.io.Serializable {
implicit val asCategorical: Inject[C, CategoricalType.type]
implicit val asContinuous: Inject[C, ContinuousType.type]
implicit val asDate: Inject[C, DateType.type]
implicit val asDiscrete: Inject[C, DiscreteType.type]
implicit val asMixed: Inject[C, MixedType.type]
implicit val asNominal: Inject[C, NominalType.type]
implicit val asNumeric: Inject[C, NumericType.type]
implicit val asOrdinal: Inject[C, OrdinalType.type]
}
/** Implicit meeting text parsing constraints for the default Types. */
implicit def typeTextParseConstraints[
C <: Coproduct
](implicit
ev1: Inject[C, CategoricalType.type],
ev2: Inject[C, ContinuousType.type],
ev3: Inject[C, DateType.type],
ev4: Inject[C, DiscreteType.type],
ev5: Inject[C, MixedType.type],
ev6: Inject[C, NominalType.type],
ev7: Inject[C, NumericType.type],
ev8: Inject[C, OrdinalType.type]
): TextParseConstraints[C] = new TextParseConstraints[C] {
implicit val asCategorical = ev1
implicit val asContinuous = ev2
implicit val asDate = ev3
implicit val asDiscrete = ev4
implicit val asMixed = ev5
implicit val asNominal = ev6
implicit val asNumeric = ev7
implicit val asOrdinal = ev8
}
/**
* Parse a type from a string.
*
* @param str String from which to parse the type.
*
* @return A `Some[C]` in case of success, `None` otherwise.
*/
def fromShortString[C <: Coproduct](str: String)(implicit ev: TextParseConstraints[C]): Option[C] = {
import ev._
str match {
case CategoricalType.name => Option(Coproduct(CategoricalType))
case ContinuousType.name => Option(Coproduct(ContinuousType))
case DateType.name => Option(Coproduct(DateType))
case DiscreteType.name => Option(Coproduct(DiscreteType))
case MixedType.name => Option(Coproduct(MixedType))
case NominalType.name => Option(Coproduct(NominalType))
case NumericType.name => Option(Coproduct(NumericType))
case OrdinalType.name => Option(Coproduct(OrdinalType))
case _ => None
}
}
}
/** Type for when the type is mixed. */
case object MixedType extends Type {
/** Short name for this type. */
val name = "mixed"
def toShortString: String = MixedType.name
}
/** Type for numeric types. */
trait NumericType extends Type {
def toShortString: String = NumericType.name
}
/** Companion object to `NumericType` trait. */
case object NumericType extends NumericType {
/** Short name for this type. */
val name = "numeric"
}
/** Type for continuous types. */
case object ContinuousType extends NumericType {
/** Short name for this type. */
val name = "continuous"
override def toShortString: String = ContinuousType.name
override protected val parent: Option[Type] = Some(NumericType)
}
/** Type for discrete types. */
case object DiscreteType extends NumericType {
/** Short name for this type. */
val name = "discrete"
override def toShortString: String = DiscreteType.name
override protected val parent: Option[Type] = Some(NumericType)
}
/** Type for categorical types. */
trait CategoricalType extends Type {
def toShortString: String = CategoricalType.name
}
/** Companion object to `CategoricalType` trait. */
case object CategoricalType extends CategoricalType {
/** Short name for this type. */
val name = "categorical"
}
/** Type for nominal types. */
case object NominalType extends CategoricalType {
/** Short name for this type. */
val name = "nominal"
override def toShortString: String = NominalType.name
override protected val parent: Option[Type] = Some(CategoricalType)
}
/** Type for ordinal types. */
case object OrdinalType extends CategoricalType {
/** Short name for this type. */
val name = "ordinal"
override def toShortString: String = OrdinalType.name
override protected val parent: Option[Type] = Some(CategoricalType)
}
/** Type for date types. */
case object DateType extends Type {
/** Short name for this type. */
val name = "date"
def toShortString: String = DateType.name
}
|
CommBank/grimlock
|
grimlock-core/src/main/scala/commbank/grimlock/framework/Type.scala
|
Scala
|
apache-2.0
| 6,509
|
/*
* Copyright 2019 Google LLC All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.bqsh
import scopt.OptionParser
object QueryOptionParser extends OptionParser[QueryConfig]("query") with ArgParser[QueryConfig] {
def parse(args: Seq[String]): Option[QueryConfig] =
parse(args, QueryConfig())
head("query", Bqsh.UserAgent)
help("help")
.text("prints this usage text")
// z/OS Options
opt[Seq[String]]("parameters_from_file")
.text("Comma-separated query parameters in the form [NAME]:[TYPE]:[DDNAME]. An empty name creates a positional parameter. [TYPE] may be omitted to assume a STRING value in the form: name::ddname or ::ddname. NULL produces a null value.")
.action((x,c) => c.copy(parametersFromFile = x))
opt[Unit]("create_if_needed")
.text("When specified, create destination table. The default value is false.")
.action((x,c) => c.copy(createIfNeeded = true))
opt[Unit]('m', "allow_multiple_queries")
.text("When specified, allow multiple queries. The default value is false.")
.action((x,c) => c.copy(allowMultipleQueries = true))
// Standard Options
opt[Unit]("allow_large_results")
.text("When specified, enables large destination table sizes for legacy SQL queries.")
.action((x,c) => c.copy(allowLargeResults = true))
opt[Unit]("append_table")
.text("When specified, append data to a destination table. The default value is false.")
.action((x,c) => c.copy(appendTable = true))
opt[Unit]("batch")
.text("When specified, run the query in batch mode. The default value is false.")
.action((_,c) => c.copy(batch = true))
opt[Seq[String]]("clustering_fields")
.text("If specified, a comma-separated list of columns is used to cluster the destination table in a query. This flag must be used with the time partitioning flags to create either an ingestion-time partitioned table or a table partitioned on a DATE or TIMESTAMP column. When specified, the table is first partitioned, and then it is clustered using the supplied columns.")
.action((x,c) => c.copy(clusteringFields = x))
opt[String]("destination_kms_key")
.text("The Cloud KMS key used to encrypt the destination table data.")
.action((x,c) => c.copy(destinationKmsKey = x))
opt[String]("destination_schema")
.text("The path to a local JSON schema file or a comma-separated list of column definitions in the form [FIELD]:[DATA_TYPE],[FIELD]:[DATA_TYPE]. The default value is ''.")
.action((x,c) => c.copy(destinationSchema = x))
opt[String]("destination_table")
.text("The name of the destination table for writing query results. The default value is ''")
.action((x,c) => c.copy(destinationTable = x))
opt[Unit]("dry_run")
.text("When specified, the query is validated but not run.")
.action((_,c) => c.copy(dryRun = true))
opt[String]("external_table_definition")
.text("The table name and schema definition used in an external table query. The schema can be a path to a local JSON schema file or a comma-separated list of column definitions in the form [FIELD]:[DATA_TYPE],[FIELD]:[DATA_TYPE]. The format for supplying the table name and schema is: [TABLE]::[PATH_TO_FILE] or [TABLE]::[SCHEMA]@[SOURCE_FORMAT]=[CLOUD_STORAGE_URI]. Repeat this flag to query multiple tables.")
.action((x,c) => c.copy(externalTableDefinition = x))
opt[String]("label")
.text("A label to apply to a query job in the form [KEY]:[VALUE]. Repeat this flag to specify multiple labels.")
.action((x,c) => c.copy(label = x))
opt[Long]("maximum_bytes_billed")
.text("An integer that limits the bytes billed for the query. If the query goes beyond the limit, it fails (without incurring a charge). If not specified, the bytes billed is set to the project default.")
.action((x,c) => c.copy(maximumBytesBilled = x))
opt[Seq[String]]("parameters")
.text("comma-separated query parameters in the form [NAME]:[TYPE]:[VALUE]. An empty name creates a positional parameter. [TYPE] may be omitted to assume a STRING value in the form: name::value or ::value. NULL produces a null value.")
.validate{x =>
if (x.exists(_.split(':').length != 3))
failure("parameter must be in the form [NAME]:[TYPE]:[VALUE]")
else
success
}
.action((x,c) => c.copy(parameters = x))
opt[Unit]("replace")
.text("If specified, overwrite the destination table with the query results. The default value is false.")
.action((_,c) => c.copy(replace = true))
opt[Unit]("require_cache")
.text("If specified, run the query only if results can be retrieved from the cache.")
.action((_,c) => c.copy(requireCache = true))
opt[Boolean]("require_partition_filter")
.text("If specified, a partition filter is required for queries over the supplied table. This flag can only be used with a partitioned table.")
.action((x,c) => c.copy(requirePartitionFilter = x))
opt[Seq[String]]("schema_update_option")
.text("When appending data to a table (in a load job or a query job), or when overwriting a table partition, specifies how to update the schema of the destination table. Possible values include:\\n\\n ALLOW_FIELD_ADDITION: Allow\\nnew fields to be added\\n ALLOW_FIELD_RELAXATION: Allow relaxing REQUIRED fields to NULLABLE")
.action((x,c) => c.copy(schemaUpdateOption = x))
opt[Long]("time_partitioning_expiration")
.text("An integer that specifies (in seconds) when a time-based partition should be deleted. The expiration time evaluates to the partition's UTC date plus the integer value. A negative number indicates no expiration.")
.action((x,c) => c.copy(timePartitioningExpiration = x))
opt[String]("time_partitioning_field")
.text("The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.")
.action((x,c) => c.copy(timePartitioningField = x))
opt[String]("time_partitioning_type")
.text("Enables time-based partitioning on a table and sets the partition type. Currently, the only possible value is DAY which generates one partition per day.")
.action((x,c) => c.copy(timePartitioningType = x))
opt[Boolean]("use_cache")
.text("When specified, caches the query results. The default value is true.")
.action((x,c) => c.copy(useCache = x))
opt[Unit]("use_legacy_sql")
.text("When set to false, runs a standard SQL query. The default value is false (uses Standard SQL).")
.action((x,c) => c.copy(useLegacySql = true))
// Global options
opt[String]("dataset_id")
.text(GlobalConfig.datasetIdText)
.action((x,c) => c.copy(datasetId = x))
opt[Unit]("debug_mode")
.text(GlobalConfig.debugModeText)
.action((x,c) => c.copy(debugMode = true))
opt[String]("job_id")
.text(GlobalConfig.jobIdText)
.action((x,c) => c.copy(jobId = x))
opt[String]("location")
.text(GlobalConfig.locationText)
.action((x,c) => c.copy(location = x))
opt[String]("project_id")
.text(GlobalConfig.projectIdText)
.action((x,c) => c.copy(projectId = x))
opt[Boolean]("synchronous_mode")
.text(GlobalConfig.synchronousModeText)
.action((x,c) => c.copy(sync = x))
opt[Boolean]("sync")
.text(GlobalConfig.syncText)
.action((x,c) => c.copy(sync = x))
// Custom Options
opt[String]("stats_table")
.optional()
.text("tablespec of table to insert stats")
.action((x,c) => c.copy(statsTable = x))
}
|
CloudVLab/professional-services
|
tools/bigquery-zos-mainframe-connector/src/main/scala/com/google/cloud/bqsh/QueryOptionParser.scala
|
Scala
|
apache-2.0
| 8,057
|
package com.twitter.finagle.util
import java.net.InetSocketAddress
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class InetSocketAddressUtilTest extends FunSuite {
test("parseHosts") {
assert(InetSocketAddressUtil.parseHosts("").isEmpty)
assert(InetSocketAddressUtil.parseHosts(",").isEmpty)
intercept[IllegalArgumentException] { InetSocketAddressUtil.parseHosts("gobble-d-gook") }
assert(InetSocketAddressUtil.parseHosts("127.0.0.1:11211") === Seq(new InetSocketAddress("127.0.0.1", 11211)))
assert(InetSocketAddressUtil.parseHosts("127.0.0.1:11211") === Seq(new InetSocketAddress("127.0.0.1", 11211)))
assert(InetSocketAddressUtil.parseHosts("127.0.0.1:11211,") === Seq(new InetSocketAddress("127.0.0.1", 11211)))
assert(InetSocketAddressUtil.parseHosts(",127.0.0.1:11211,") === Seq(new InetSocketAddress("127.0.0.1", 11211)))
assert(InetSocketAddressUtil.parseHosts("127.0.0.1:11211 ") === Seq(new InetSocketAddress("127.0.0.1", 11211)))
assert(InetSocketAddressUtil.parseHosts(" 127.0.0.1:11211 ") === Seq(new InetSocketAddress("127.0.0.1", 11211)))
assert(InetSocketAddressUtil.parseHosts("127.0.0.1:11211,127.0.0.1:11212") ===
Seq(new InetSocketAddress("127.0.0.1", 11211), new InetSocketAddress("127.0.0.1", 11212)))
assert(InetSocketAddressUtil.parseHosts("127.0.0.1:11211 127.0.0.1:11212") ===
Seq(new InetSocketAddress("127.0.0.1", 11211), new InetSocketAddress("127.0.0.1", 11212)))
assert(InetSocketAddressUtil.parseHosts(":11211") === Seq(new InetSocketAddress("0.0.0.0", 11211)))
}
}
|
firebase/finagle
|
finagle-core/src/test/scala/com/twitter/finagle/util/InetSocketAddressUtilTest.scala
|
Scala
|
apache-2.0
| 1,652
|
package gitbucket.core
import java.io.FileOutputStream
import java.nio.charset.StandardCharsets
import java.sql.Connection
import java.util.UUID
import gitbucket.core.model.Activity
import gitbucket.core.util.Directory.ActivityLog
import gitbucket.core.util.JDBCUtil
import io.github.gitbucket.solidbase.Solidbase
import io.github.gitbucket.solidbase.migration.{LiquibaseMigration, Migration, SqlMigration}
import io.github.gitbucket.solidbase.model.{Module, Version}
import org.json4s.{Formats, NoTypeHints}
import org.json4s.jackson.Serialization
import org.json4s.jackson.Serialization.write
import scala.util.Using
object GitBucketCoreModule
extends Module(
"gitbucket-core",
new Version(
"4.0.0",
new LiquibaseMigration("update/gitbucket-core_4.0.xml"),
new SqlMigration("update/gitbucket-core_4.0.sql")
),
new Version("4.1.0"),
new Version("4.2.0", new LiquibaseMigration("update/gitbucket-core_4.2.xml")),
new Version("4.2.1"),
new Version("4.3.0"),
new Version("4.4.0"),
new Version("4.5.0"),
new Version("4.6.0", new LiquibaseMigration("update/gitbucket-core_4.6.xml")),
new Version(
"4.7.0",
new LiquibaseMigration("update/gitbucket-core_4.7.xml"),
new SqlMigration("update/gitbucket-core_4.7.sql")
),
new Version("4.7.1"),
new Version("4.8"),
new Version("4.9.0", new LiquibaseMigration("update/gitbucket-core_4.9.xml")),
new Version("4.10.0"),
new Version("4.11.0", new LiquibaseMigration("update/gitbucket-core_4.11.xml")),
new Version("4.12.0"),
new Version("4.12.1"),
new Version("4.13.0"),
new Version(
"4.14.0",
new LiquibaseMigration("update/gitbucket-core_4.14.xml"),
new SqlMigration("update/gitbucket-core_4.14.sql")
),
new Version("4.14.1"),
new Version("4.15.0"),
new Version("4.16.0"),
new Version("4.17.0"),
new Version("4.18.0"),
new Version("4.19.0"),
new Version("4.19.1"),
new Version("4.19.2"),
new Version("4.19.3"),
new Version("4.20.0"),
new Version("4.21.0", new LiquibaseMigration("update/gitbucket-core_4.21.xml")),
new Version("4.21.1"),
new Version("4.21.2"),
new Version("4.22.0", new LiquibaseMigration("update/gitbucket-core_4.22.xml")),
new Version("4.23.0", new LiquibaseMigration("update/gitbucket-core_4.23.xml")),
new Version("4.23.1"),
new Version("4.24.0", new LiquibaseMigration("update/gitbucket-core_4.24.xml")),
new Version("4.24.1"),
new Version("4.25.0", new LiquibaseMigration("update/gitbucket-core_4.25.xml")),
new Version("4.26.0"),
new Version("4.27.0", new LiquibaseMigration("update/gitbucket-core_4.27.xml")),
new Version("4.28.0"),
new Version("4.29.0"),
new Version("4.30.0"),
new Version("4.30.1"),
new Version("4.31.0", new LiquibaseMigration("update/gitbucket-core_4.31.xml")),
new Version("4.31.1"),
new Version("4.31.2"),
new Version("4.32.0", new LiquibaseMigration("update/gitbucket-core_4.32.xml")),
new Version("4.33.0"),
new Version(
"4.34.0",
new Migration() {
override def migrate(moduleId: String, version: String, context: java.util.Map[String, AnyRef]): Unit = {
implicit val formats: Formats = Serialization.formats(NoTypeHints)
import JDBCUtil._
val conn = context.get(Solidbase.CONNECTION).asInstanceOf[Connection]
val list = conn.select("SELECT * FROM ACTIVITY ORDER BY ACTIVITY_ID") {
rs =>
Activity(
activityId = UUID.randomUUID().toString,
userName = rs.getString("USER_NAME"),
repositoryName = rs.getString("REPOSITORY_NAME"),
activityUserName = rs.getString("ACTIVITY_USER_NAME"),
activityType = rs.getString("ACTIVITY_TYPE"),
message = rs.getString("MESSAGE"),
additionalInfo = {
val additionalInfo = rs.getString("ADDITIONAL_INFO")
if (rs.wasNull()) None else Some(additionalInfo)
},
activityDate = rs.getTimestamp("ACTIVITY_DATE")
)
}
Using.resource(new FileOutputStream(ActivityLog, true)) { out =>
list.foreach { activity =>
out.write((write(activity) + "\n").getBytes(StandardCharsets.UTF_8))
}
}
}
},
new LiquibaseMigration("update/gitbucket-core_4.34.xml")
),
new Version("4.35.0", new LiquibaseMigration("update/gitbucket-core_4.35.xml")),
new Version("4.35.1"),
new Version("4.35.2"),
new Version("4.35.3"),
new Version("4.36.0", new LiquibaseMigration("update/gitbucket-core_4.36.xml")),
new Version("4.36.1"),
new Version("4.36.2"),
new Version("4.37.0", new LiquibaseMigration("update/gitbucket-core_4.37.xml")),
new Version("4.37.1"),
new Version("4.37.2")
)
|
gitbucket/gitbucket
|
src/main/scala/gitbucket/core/GitBucketCoreModule.scala
|
Scala
|
apache-2.0
| 5,158
|
package be.wegenenverkeer.atomium.server.jdbc
trait PostgresDialect extends Dialect {
protected override def createEntryTableStatement(entryTableName: String): String = {
s"""CREATE TABLE IF NOT EXISTS $entryTableName (
|${EntryDbModel.Table.idColumn} SERIAL primary key,
|${EntryDbModel.Table.uuidColumn} varchar,
|${EntryDbModel.Table.valueColumn} text,
|${EntryDbModel.Table.timestampColumn} timestamp not null);""".stripMargin
}
protected override def dropEntryTable(entryTableName: String)(implicit jdbcContext: JdbcContext): Unit = {
sqlUpdate(s"DROP TABLE $entryTableName")
}
override def fetchFeed(feedName: String)(implicit jdbcContext: JdbcContext): Option[FeedDbModel] = {
val feeds = sqlQuery(
s"""SELECT * FROM ${FeedDbModel.Table.name}
| WHERE ${FeedDbModel.Table.nameColumn} = '$feedName';
""".stripMargin, None, FeedDbModel.apply)
feeds.headOption
}
override def addFeed(feed: FeedDbModel)(implicit jdbcContext: JdbcContext): Unit = {
val titleData = feed.title.orNull
sqlUpdatePepared(
s"""INSERT INTO ${FeedDbModel.Table.name} (${FeedDbModel.Table.nameColumn}, ${FeedDbModel.Table.titleColumn})
|VALUES (?, ?);
""".stripMargin, feed.name, titleData)
}
override def fetchFeedEntries(entryTableName: String, start: Long, count: Int, ascending: Boolean)(implicit jdbcContext: JdbcContext): List[EntryDbModel] = {
val (comparator, direction) = if (ascending) (">=", "ASC") else ("<=", "DESC")
sqlQuery(
s"""SELECT * FROM $entryTableName
|WHERE ${EntryDbModel.Table.idColumn} $comparator $start ORDER BY ${EntryDbModel.Table.idColumn} $direction;
""".stripMargin,
Some(count),
EntryDbModel.apply
)
}
override def fetchMostRecentFeedEntries(entryTableName: String, count: Int)(implicit jdbcContext: JdbcContext): List[EntryDbModel] = {
sqlQuery(
s"""SELECT * FROM $entryTableName
|ORDER BY ${EntryDbModel.Table.idColumn} DESC;
""".stripMargin,
Some(count),
EntryDbModel.apply
)
}
override def addFeedEntry(entryTableName: String, entryData: EntryDbModel)(implicit jdbcContext: JdbcContext): Unit = {
val preparedSql =
s"""INSERT INTO $entryTableName (${EntryDbModel.Table.uuidColumn}, ${EntryDbModel.Table.valueColumn}, ${
EntryDbModel.Table.timestampColumn
})
|VALUES (?,?,?);
""".stripMargin
sqlUpdatePepared(preparedSql, entryData.uuid, entryData.value, entryData.timestamp)
}
/**
* Fetch the largest entry ID from the database.
*
* @param entryTableName The name of the entry table.
* @param jdbcContext The JDBC context to use.
* @return The largest entry id for a given entry table, or -1 if the entry table is empty.
*/
override def fetchMaxEntryId(entryTableName: String)(implicit jdbcContext: JdbcContext): Long = {
// maxList is a list with one single element
// - if table is empty, 'max' == null and maxList will have one null element
// - if table is non empty, maxList will have one element with the computed max
// therefore, to avoid a List with a null object, we map it to a List[Option[Long]]
val maxList = sqlQuery[Option[Long]](
s"SELECT max(${EntryDbModel.Table.idColumn}) as max FROM $entryTableName;",
None,
rs => Option(rs.getLong("max"))
)
maxList.headOption.flatten.getOrElse(-1)
}
override def fetchEntryCountLowerThan(entryTableName: String, sequenceNo: Long, inclusive: Boolean)(implicit jdbcContext: JdbcContext): Long = {
val comparator = if (inclusive) "<=" else "<"
val countList = sqlQuery[Long](
s"""SELECT count(*) as total FROM $entryTableName
|WHERE ${EntryDbModel.Table.idColumn} $comparator $sequenceNo;
""".stripMargin,
None,
_.getLong("total")
)
countList.headOption.getOrElse(0)
}
}
|
joachimvda/atomium
|
modules/server-jdbc/src/main/scala/be/wegenenverkeer/atomium/server/jdbc/PostgresDialect.scala
|
Scala
|
mit
| 3,955
|
/*
* Created on 2010/11/04
* Copyright (c) 2010-2014, Wei-ju Wu.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Wei-ju Wu nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package org.zmpp.glulx
import java.util.logging._
class ChannelIOSystem(vm: GlulxVM, rock: Int) extends IOSystem(vm, rock) {
def id = 20
def streamChar(c: Char) {
logger.info("streamChar(%c)".format(c))
}
def streamUniChar(c: Int) {
logger.info("streamUniChar(%c)".format(c.asInstanceOf[Char]))
}
// streamstr actions
def handleChar8(c: Char, inBetween: Boolean, currentStreamByte: Int,
currentStreamBit: Int) = {
logger.info("handleChar8(%c)".format(c))
StreamStrState.Continue
}
def handleChar32(c: Int, inBetween: Boolean, currentStreamByte: Int,
currentStreamBit: Int) = {
logger.info("handleChar32(%d)".format(c))
StreamStrState.Continue
}
def handleHuffmanCString(nodeAddr: Int,
currentStreamByte: Int, currentStreamBit: Int,
inBetween: Boolean): StreamStrState = {
logger.info("handleHuffmanCString(%04x)".format(nodeAddr))
StreamStrState.Continue
}
def handleHuffmanUnicodeString(nodeAddr: Int,
currentStreamByte: Int, currentStreamBit: Int,
inBetween: Boolean): StreamStrState = {
logger.info("handleHuffmanUnicodeString(%04x)".format(nodeAddr))
StreamStrState.Continue
}
}
|
weiju/zmpp2
|
zmpp-glulx/src/main/scala/org/zmpp/glulx/ChannelIO.scala
|
Scala
|
bsd-3-clause
| 2,884
|
/*
*
* * Copyright 2014 websudos ltd.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package com.websudos.phantom.testing
import java.io.IOException
import java.net.ServerSocket
import org.apache.commons.io.IOUtils
import org.slf4j.LoggerFactory
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{ExecutionContext, blocking}
import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import org.scalatest._
import org.scalatest.concurrent.{AsyncAssertions, ScalaFutures}
import com.datastax.driver.core.Session
import com.twitter.util.NonFatal
import com.websudos.phantom.zookeeper.{DefaultZookeeperConnector, ZookeeperInstance}
private[testing] object CassandraStateManager {
val logger = LoggerFactory.getLogger("com.websudos.phantom.testing")
private[this] def isPortAvailable(port: Int): Boolean = {
try {
new ServerSocket(port)
logger.info(s"Port $port available")
true
} catch {
case ex: IOException => {
logger.info(s"Port $port not available")
false
}
}
}
/**
* This does a dummy check to see if Cassandra is started.
* It checks for default ports for embedded Cassandra and local Cassandra.
* @return A boolean saying if Cassandra is started.
*/
def isEmbeddedCassandraRunning: Boolean = {
!isPortAvailable(9142)
}
def isLocalCassandraRunning: Boolean = {
!isPortAvailable(9042)
}
def cassandraRunning(): Boolean = {
try {
val runtime = Runtime.getRuntime
val p1 = runtime.exec("ps -ef")
val input = p1.getInputStream
val p2 = runtime.exec("grep cassandra")
val output = p2.getOutputStream
IOUtils.copy(input, output)
output.close(); // signals grep to finish
val result = IOUtils.readLines(p2.getInputStream)
result.size() > 1
} catch {
case NonFatal(e) => false
}
}
/**
* This checks if the default ports for embedded Cassandra and local Cassandra.
* @return
*/
def isCassandraStarted: Boolean = {
!isPortAvailable(9042) || !isPortAvailable(9142)
}
}
private[testing] object ZooKeeperManager {
lazy val zkInstance = new ZookeeperInstance()
private[this] var isStarted = false
def start(): Unit = Lock.synchronized {
if (!isStarted) {
zkInstance.start()
isStarted = true
}
}
}
private[testing] object Lock
trait CassandraSetup {
/**
* This method tries to check if a local Cassandra instance is found and if not start an embedded version.
* For the time being, the detection mechanism is not completely reliable as we have yet to reach the sweet spot of killing Cassandra and stale JVMs and we
* cannot also reliably detect a running Cassandra cluster using the above methods.
*
* This improved method (in 1.4.1) will try to perform both a port and process check before starting Cassandra in embedded mode.
*/
def setupCassandra(): Unit = {
Lock.synchronized {
blocking {
if (!(CassandraStateManager.cassandraRunning() || CassandraStateManager.isCassandraStarted)) {
try {
CassandraStateManager.logger.info("Starting Cassandra in Embedded mode.")
EmbeddedCassandraServerHelper.mkdirs()
} catch {
case NonFatal(e) => {
CassandraStateManager.logger.error(e.getMessage)
}
}
EmbeddedCassandraServerHelper.startEmbeddedCassandra("cassandra.yaml")
} else {
CassandraStateManager.logger.info("Cassandra is already running.")
}
}
}
}
}
trait TestZookeeperConnector extends DefaultZookeeperConnector with CassandraSetup {
val keySpace = "phantom"
ZooKeeperManager.start()
}
trait CassandraTest extends ScalaFutures
with Matchers with Assertions
with AsyncAssertions with CassandraSetup
with BeforeAndAfterAll {
self : BeforeAndAfterAll with Suite =>
implicit def session: Session
implicit lazy val context: ExecutionContext = global
override def beforeAll() {
super.beforeAll()
setupCassandra()
}
}
trait BaseTest extends FlatSpec with CassandraTest with TestZookeeperConnector
trait FeatureBaseTest extends FeatureSpec with CassandraTest with TestZookeeperConnector
|
nosheenzaza/phantom-data-centric
|
phantom-testing/src/main/scala/com/websudos/phantom/testing/BaseTest.scala
|
Scala
|
gpl-2.0
| 4,821
|
package com.sksamuel.elastic4s
import org.elasticsearch.action.get.GetResponse
import org.elasticsearch.client.{Client, Requests}
import org.elasticsearch.index.VersionType
import org.elasticsearch.index.get.GetField
import org.elasticsearch.search.fetch.source.FetchSourceContext
import scala.concurrent.Future
import scala.language.implicitConversions
/** @author Stephen Samuel */
trait GetDsl {
class GetWithIdExpectsFrom(id: String) {
def from(index: IndexAndTypes): GetDefinition = new GetDefinition(index, id)
}
implicit object GetDefinitionExecutable extends Executable[GetDefinition, GetResponse, RichGetResponse] {
override def apply(c: Client, t: GetDefinition): Future[RichGetResponse] = {
injectFutureAndMap(c.get(t.build, _))(RichGetResponse)
}
}
}
case class GetDefinition(indexTypes: IndexAndTypes, id: String) {
private val _builder = Requests.getRequest(indexTypes.index).`type`(indexTypes.types.headOption.orNull).id(id)
def build = _builder
def fetchSourceContext(context: Boolean) = {
_builder.fetchSourceContext(new FetchSourceContext(context))
this
}
def fetchSourceContext(context: FetchSourceContext) = {
_builder.fetchSourceContext(context)
this
}
def fields(fs: String*): GetDefinition = fields(fs)
def fields(fs: Iterable[String]): GetDefinition = {
_builder.fields(fs.toSeq: _*)
this
}
def ignoreErrorsOnGeneratedFields(ignoreErrorsOnGeneratedFields: Boolean) = {
_builder.ignoreErrorsOnGeneratedFields(ignoreErrorsOnGeneratedFields)
this
}
def parent(p: String) = {
_builder.parent(p)
this
}
def preference(pref: Preference): GetDefinition = preference(pref.elastic)
def preference(pref: String): GetDefinition = {
_builder.preference(pref)
this
}
def realtime(r: Boolean) = {
_builder.realtime(r)
this
}
def refresh(refresh: Boolean) = {
_builder.refresh(refresh)
this
}
def routing(r: String) = {
_builder.routing(r)
this
}
def version(version: Long) = {
_builder.version(version)
this
}
def versionType(versionType: VersionType) = {
_builder.versionType(versionType)
this
}
}
case class RichGetResponse(original: GetResponse) extends AnyVal {
import scala.collection.JavaConverters._
@deprecated("use field(name)", "2.0.0")
def getField(name: String): GetField = field(name)
def field(name: String): GetField = original.getField(name)
def fieldOpt(name: String): Option[GetField] = Option(field(name))
@deprecated("use fields", "2.0.0")
def getFields = original.getFields
def fields: Map[String, GetField] = original.getFields.asScala.toMap
@deprecated("use id", "2.0.0")
def getId: String = id
def id: String = original.getId
@deprecated("use index", "2.0.0")
def getIndex: String = index
def index: String = original.getIndex
def source = original.getSource
def sourceAsBytes = original.getSourceAsBytes
def sourceAsString: String = original.getSourceAsString
@deprecated("use `type`", "2.0.0")
def getType: String = `type`
def `type`: String = original.getType
@deprecated("use version", "2.0.0")
def getVersion: Long = version
def version: Long = original.getVersion
def isExists: Boolean = original.isExists
def isSourceEmpty: Boolean = original.isSourceEmpty
def iterator: Iterator[GetField] = original.iterator.asScala
}
case class RichGetField(original: GetField) extends AnyVal {
import scala.collection.JavaConverters._
def name: String = original.getName
def value: AnyRef = original.getValue
def values: Seq[AnyRef] = original.getValues.asScala
def isMetadataField: Boolean = original.isMetadataField
def iterator: Iterator[AnyRef] = original.iterator.asScala
}
|
k4200/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/GetDsl.scala
|
Scala
|
apache-2.0
| 3,767
|
/*
* Copyright 2017-2018 Azad Bolour
* Licensed under GNU Affero General Public License v3.0 -
* https://github.com/azadbolour/boardgame/blob/master/LICENSE.md
*/
package com.bolour.boardgame.scala.server.domain
import com.bolour.boardgame.scala.common.domain.{Piece, PlayPiece}
import com.bolour.plane.scala.domain.{Axis, Point}
import org.slf4j.LoggerFactory
import scala.collection.immutable.Nil
import scala.collection.mutable
import com.bolour.language.scala.domain.WordDictionary
import com.bolour.plane.scala.domain.Axis.Axis
import com.bolour.boardgame.scala.server.util.WordUtil
import com.bolour.boardgame.scala.server.util.WordUtil.{DictWord, LetterCombo, NumBlanks}
/**
* StripMatcher finds the best word match to a given board.
*
* A match is the tuple (word, strip) where the word exists in the
* dictionary and can legally be played onto the strip: that is,
* the blanks of the strip can be filled by letters in the tray,
* and all crosswords formed by playing the word exist in
* the dictionary.
*
* The algorithm first groups the strips of the board by the "value"
* of a play on them. The value of a strip is a positive integer
* that reflects the expected added score by playing a word on that strip.
* So the algorithm checks for matches in decreasing order of strip value,
* stopping as soon as a match is found.
*
* The naive valuation used here initially simply uses the number of
* blank characters within the strip, in the hope that in general
* the more characters played to form a word the higher the score.
* For now, this naive valuation scheme works reasonably well.
* To experiment with different valuation schemes, sub-class this
* trait and override the "stripValuation" function.
*
* Additionally within each group of equally-valued strips, the strips
* of the group are further grouped by the number of blanks appearing
* in each. Then for each sub-group of a given blank-count,
* all combinations of tray letters of size blank-count are
* tried against all strips of the sub-group.
*/
trait StripMatcher {
// abstract members
def dictionary: WordDictionary
def board: Board
def tray: Tray
// end abstract
import StripMatcher._
protected[this] val logger = LoggerFactory.getLogger(this.getClass)
protected[this] val dimension = board.dimension
protected[this] val trayLetters = tray.pieces.map(_.value).mkString
protected[this] val trayCombosByLength = WordUtil.computeCombosGroupedByLength(trayLetters)
// TODO. Improve strip valuation by summing the point values of its blanks.
protected[this] val stripValuation: Strip => StripValue = _.numBlanks
protected[this] val playableStripsGroupedByValueAndBlanks: Map[StripValue, Map[NumBlanks, List[Strip]]] =
groupPlayableStrips(stripValuation)
protected[this] val allValues = playableStripsGroupedByValueAndBlanks.keySet
protected[this] val maxStripValue = if (allValues.isEmpty) 0 else allValues.max
protected[this] val crossWordFinder = new CrossWordFinder(board)
/**
* Main entry point - find the best match if any (empty list means none found).
*/
def bestMatch(): List[PlayPiece] = {
bestMatchUpToValue(maxStripValue) match {
case None => Nil
case Some((strip, word)) => matchedStripPlayPieces(strip, word)
}
}
/**
* A match is represented internally as the tuple (strip, word)
* meaning the word matches (and is to be played on) the strip -
* convert the match to a list of play pieces (needed by clients).
*/
def matchedStripPlayPieces(strip: Strip, word: String): List[PlayPiece] = {
// Buffer used to peel off played letters from tray pieces - leaving tray immutable.
val restTrayPieces: mutable.Buffer[Piece] = tray.pieces.toBuffer
def removeTrayChar(letter: Char) = {
restTrayPieces.remove(restTrayPieces.indexWhere(_.value == letter))
}
def toPlayPiece(stripOffset: Int) = {
val point = strip.point(stripOffset)
val stripLetter = strip.content(stripOffset)
val wordLetter = word(stripOffset)
val moved = WordUtil.isBlankChar(stripLetter)
val piece = if (moved) removeTrayChar(wordLetter) else board.getPiece(point).get
PlayPiece(piece, point, moved)
}
val stripOffsets = (0 until strip.content.length).toList
stripOffsets.map(offset => toPlayPiece(offset))
}
/**
* Find the best word match on the board among all matches whose
* values are less than or equal to a given value.
*/
def bestMatchUpToValue(maxValue: StripValue): StripMatch = {
if (maxValue <= 0)
return None
findMatchForValue(maxValue) orElse bestMatchUpToValue(maxValue - 1)
}
def findMatchForValue(value: StripValue): StripMatch = {
// TODO. Try not to special-case empty board here. Only in getting playable strips.
if (board.isEmpty)
return findMatchForValueOnEmptyBoard(value)
for /* option */ {
stripsByBlanks <- playableStripsGroupedByValueAndBlanks.get(value)
optimal <- findMatchForStrips(stripsByBlanks)
} yield optimal
}
/**
* First match on empty board is special - no anchor.
* For the first play we use an all-blank center strip of the given length.
*/
def findMatchForValueOnEmptyBoard(len: StripValue): StripMatch = {
for /* option */ {
combos <- trayCombosByLength.get(len)
// _ = println(combos)
optimal <- findMatchForStrip(emptyCenterStrip(len), combos)
} yield optimal
}
private def emptyCenterStrip(len: StripValue) = {
val center = dimension / 2
val mid = len / 2
val content = List.fill(len)(' ').mkString
val strip = Strip(Axis.X, center, center - mid, center + (len - mid) - 1, content)
strip
}
/**
* Find the best match for all strips of a given length - they are indexed by the
* number of blank slots. TODO. Should length be a parameter?
*/
def findMatchForStrips(stripsByBlanks: Map[NumBlanks, List[Strip]]): StripMatch = {
/*
* For each set of strips with a given number of blanks, get the
* corresponding combos of tray letters that would fill their blanks exactly.
* The result is a list of (numBlanks, List[Strip], List[LetterCombo]).
* Sort that list in descending order on the number of blanks -
* making it possible to prioritize matches by the number of filled blanks.
*/
val groupedStripsAndCombos = stripsByBlanks.toList.map {
case (blanks, strips) => (blanks, strips, trayCombosByLength(blanks))
}
val sortedGroups = groupedStripsAndCombos.sortWith(_._1 > _._1)
findMatchForStripsAndCombosGroupedByBlanks(sortedGroups)
}
/**
* Find a match for corresponding strips and tray combos
* grouped by the number of blanks in strips and equivalently by the
* length of combos in tray, so that the tray combos may exactly
* fill in the blanks of the corresponding strips.
*
* The groups are ordered in decreasing order of the number of blanks.
* The first match found in that order is returned - otherwise recurse.
*/
private def findMatchForStripsAndCombosGroupedByBlanks(
groupedStripsAndCombos: List[(NumBlanks, List[Strip], List[LetterCombo])]): StripMatch =
groupedStripsAndCombos match {
case Nil => None
case (blanks, strips, combos) :: groups =>
val headMatch = findMatchForCorrespondingStripsAndCombos(blanks, strips, combos)
headMatch match {
case Some(_) => headMatch
case None => findMatchForStripsAndCombosGroupedByBlanks(groups)
}
}
/**
* Find a match for a set of strips and a set of tray combos
* each of which can exactly fill in the blanks of each of the strips.
*
* @param blanks The number of blanks in each strip and the number
* of letters in each combo. // TODO. Unnecessary??
* @param strips List of strips to try.
* @param combos List of combos to try.
*/
private def findMatchForCorrespondingStripsAndCombos(
blanks: NumBlanks, strips: List[Strip], combos: List[LetterCombo]): StripMatch =
strips match {
case Nil => None
case strip :: rest =>
val bestStripMatch = findMatchForStrip(strip, combos)
bestStripMatch match {
case Some(_) => bestStripMatch
case None => findMatchForCorrespondingStripsAndCombos(blanks, rest, combos)
}
}
/**
* Given a list of tray letter combinations each of which can fill in
* the blank slots of a strip exactly, find a combination that when
* played on the strip produces a legal play.
*/
def findMatchForStrip(strip: Strip, combos: List[LetterCombo]): StripMatch = {
combos match {
case Nil => None
case combo :: restCombos =>
val wordCombo = WordUtil.mergeLetterCombos(strip.letters, combo)
val words = dictionary.permutations(wordCombo)
// TODO. Find all fitting words and check each for crossword compliance.
val fittingWords = strip.findFittingWords(words)
val crossCheckedFittingWords = fittingWords.filter { word =>
crossWordFinder.findStripCrossWords(strip, word).forall(crossWord => dictionary hasWord crossWord)
}
// strip.findFittingWord(words) match {
crossCheckedFittingWords.headOption match {
case None => findMatchForStrip(strip, restCombos)
case Some(word) => Some((strip, word))
}
}
}
def crossings(strip: Strip, word: String): List[String] =
crossWordFinder.findStripCrossWords(strip, word)
def groupPlayableStrips(valuation: Strip => Int): Map[StripValue, Map[NumBlanks, List[Strip]]] = {
val conformantStrips = if (board.isEmpty)
board.playableEmptyStrips(tray.pieces.length)
else board.playableStrips(tray.pieces.length)
val stripsByValue = conformantStrips.groupBy(valuation)
stripsByValue.mapValues(_.groupBy(_.numBlanks))
}
}
object StripMatcher {
/**
* The integer "value" associated with each strip.
* Optimality is based on this value.
*
* Values start at 1 - and 0 is the basis of recursion
* for decreasing values.
*/
type StripValue = Int
/**
* A possible match found on a strip - if exists include
* the strip and the matching word.
*/
type StripMatch = Option[(Strip, DictWord)]
private def findDenselyEnclosedBlanks(board: Board, maxBlanks: Int, axis: Axis) = {
def allDense(strips: List[Strip]) = strips forall { _.isDense(maxBlanks) }
val blanksToStrips = board.playableEnclosingStripsOfBlankPoints(axis)
blanksToStrips filter { case (_, strips) => allDense(strips)}
}
def findAndSetBoardBlackPoints(dictionary: WordDictionary)(board: Board): (Board, List[Point]) = {
val directDeadPoints = StripMatcher.findBlackPoints(board, dictionary).toList
val newBoard = board.setBlackPoints(directDeadPoints)
directDeadPoints match {
case Nil => (newBoard, directDeadPoints)
case _ =>
val (b, moreDeadPoints) = findAndSetBoardBlackPoints(dictionary)(newBoard)
val allDeadPoints = directDeadPoints ++ moreDeadPoints
(b, allDeadPoints)
}
}
val Caps = 'A' to 'Z'
/**
* Find blank point that can never be covered.
*
* The algorithm uses a precomputed set of masked words. A masked word
* is a word some of whose letters have been changed to blanks. If a strip
* is at all playable, then its content as a masked word must exist in the
* masked words index. However, we do not store all masked versions of
* a word: only those that are "dense", that is, those that only have a few
* blanks.
*
* The dictionary contains all masked words with up to maxMaskedWords blanks.
* We find the points that are covered only by dense strips of at most maxMaskedWords + 1 blanks.
* Then we try all letters from A to Z on each such point. The resulting strips covering
* that point now have maxMaskedWords blanks, and their content can be looked up
* as masked words in the dictionary.
*/
def findBlackPoints(board: Board, dictionary: WordDictionary): Set[Point] = {
val maxBlanks = dictionary.maxMaskedLetters + 1
val hEnclosures: Map[Point, List[Strip]] =
findDenselyEnclosedBlanks(board, maxBlanks, Axis.X)
val vEnclosures: Map[Point, List[Strip]] =
findDenselyEnclosedBlanks(board, maxBlanks, Axis.Y)
val points = hEnclosures.keySet ++ vEnclosures.keySet
def stripListForPoint(point: Point): List[(Axis, List[Strip])] = {
val hStrips: List[Strip] = hEnclosures.getOrElse(point, Nil)
val vStrips: List[Strip] = vEnclosures.getOrElse(point, Nil)
List((Axis.X, hStrips), (Axis.Y, vStrips))
}
points filter { point =>
val stripList = stripListForPoint(point)
Caps forall { ch => noMatchInStripsForPoint(board, dictionary, point, ch, stripList)}
}
}
type Anchored = Boolean
type MaskedStripContentExists = Boolean
/**
* For a blank point that is covered only by dense strips in some direction (X or Y),
* determine if the given letter were played to that point, no word would match it.
*
* @param board The existing board.
* @param dictionary The word dictionary.
* @param point The point.
* @param letter The desired letter to cover the point.
* @param enclosingDenseStrips
* A list of two 2-tuples, one each for the X and Y axis,
* each providing the list of dense strips covering the point.
* If the strip list is empty, some non-dense strip in the given
* direction may cover the point. If the strip list is non-empty,
* we know that only the given strips, all of which are dense,
* cover the strip in that direction.
* @return True if we know for sure that no word will can be played that
* covers the given point with the given letter.
*/
def noMatchInStripsForPoint(board: Board, dictionary: WordDictionary,
point: Point, letter: Char, enclosingDenseStrips: List[(Axis, List[Strip])]): Boolean = {
val statuses: List[(Anchored, MaskedStripContentExists)] =
enclosingDenseStrips map {
case (axis, strips) =>
val anchored = board.hasRealNeighbor(point, axis)
val filledContents = strips map { _.fillBlankInStrip(point, letter) }
// If the point has no dense enclosing strips, for all we know some non-dense
// strip can cover it. So pretend that it is covered by a match.
val filledContentExists = filledContents match {
case Nil => true
case _ => filledContents exists dictionary.hasMaskedWord
}
(anchored, filledContentExists)
}
val (anchored1, exists1) = statuses(0)
val (anchored2, exists2) = statuses(1)
!exists1 && !exists2 || !exists1 && anchored1 || !exists2 && anchored2
}
}
|
azadbolour/boardgame
|
scala-server/app/com/bolour/boardgame/scala/server/domain/StripMatcher.scala
|
Scala
|
agpl-3.0
| 14,950
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.columnar
import org.apache.commons.lang3.StringUtils
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.catalyst.plans.logical
import org.apache.spark.sql.catalyst.plans.logical.{HintInfo, LogicalPlan, Statistics}
import org.apache.spark.sql.catalyst.util.truncatedString
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.LongAccumulator
/**
* CachedBatch is a cached batch of rows.
*
* @param numRows The total number of rows in this batch
* @param buffers The buffers for serialized columns
* @param stats The stat of columns
*/
private[columnar]
case class CachedBatch(numRows: Int, buffers: Array[Array[Byte]], stats: InternalRow)
case class CachedRDDBuilder(
useCompression: Boolean,
batchSize: Int,
storageLevel: StorageLevel,
@transient cachedPlan: SparkPlan,
tableName: Option[String])(
@transient private var _cachedColumnBuffers: RDD[CachedBatch] = null) {
val sizeInBytesStats: LongAccumulator = cachedPlan.sqlContext.sparkContext.longAccumulator
def cachedColumnBuffers: RDD[CachedBatch] = {
if (_cachedColumnBuffers == null) {
synchronized {
if (_cachedColumnBuffers == null) {
_cachedColumnBuffers = buildBuffers()
}
}
}
_cachedColumnBuffers
}
def clearCache(blocking: Boolean = true): Unit = {
if (_cachedColumnBuffers != null) {
synchronized {
if (_cachedColumnBuffers != null) {
_cachedColumnBuffers.unpersist(blocking)
_cachedColumnBuffers = null
}
}
}
}
def withCachedPlan(cachedPlan: SparkPlan): CachedRDDBuilder = {
new CachedRDDBuilder(
useCompression,
batchSize,
storageLevel,
cachedPlan = cachedPlan,
tableName
)(_cachedColumnBuffers)
}
private def buildBuffers(): RDD[CachedBatch] = {
val output = cachedPlan.output
val cached = cachedPlan.execute().mapPartitionsInternal { rowIterator =>
new Iterator[CachedBatch] {
def next(): CachedBatch = {
val columnBuilders = output.map { attribute =>
ColumnBuilder(attribute.dataType, batchSize, attribute.name, useCompression)
}.toArray
var rowCount = 0
var totalSize = 0L
while (rowIterator.hasNext && rowCount < batchSize
&& totalSize < ColumnBuilder.MAX_BATCH_SIZE_IN_BYTE) {
val row = rowIterator.next()
// Added for SPARK-6082. This assertion can be useful for scenarios when something
// like Hive TRANSFORM is used. The external data generation script used in TRANSFORM
// may result malformed rows, causing ArrayIndexOutOfBoundsException, which is somewhat
// hard to decipher.
assert(
row.numFields == columnBuilders.length,
s"Row column number mismatch, expected ${output.size} columns, " +
s"but got ${row.numFields}." +
s"\\nRow content: $row")
var i = 0
totalSize = 0
while (i < row.numFields) {
columnBuilders(i).appendFrom(row, i)
totalSize += columnBuilders(i).columnStats.sizeInBytes
i += 1
}
rowCount += 1
}
sizeInBytesStats.add(totalSize)
val stats = InternalRow.fromSeq(
columnBuilders.flatMap(_.columnStats.collectedStatistics))
CachedBatch(rowCount, columnBuilders.map { builder =>
JavaUtils.bufferToArray(builder.build())
}, stats)
}
def hasNext: Boolean = rowIterator.hasNext
}
}.persist(storageLevel)
cached.setName(
tableName.map(n => s"In-memory table $n")
.getOrElse(StringUtils.abbreviate(cachedPlan.toString, 1024)))
cached
}
}
object InMemoryRelation {
def apply(
useCompression: Boolean,
batchSize: Int,
storageLevel: StorageLevel,
child: SparkPlan,
tableName: Option[String],
logicalPlan: LogicalPlan): InMemoryRelation = {
val cacheBuilder = CachedRDDBuilder(useCompression, batchSize, storageLevel, child, tableName)()
new InMemoryRelation(child.output, cacheBuilder, logicalPlan.outputOrdering)(
statsOfPlanToCache = logicalPlan.stats)
}
def apply(cacheBuilder: CachedRDDBuilder, logicalPlan: LogicalPlan): InMemoryRelation = {
new InMemoryRelation(cacheBuilder.cachedPlan.output, cacheBuilder, logicalPlan.outputOrdering)(
statsOfPlanToCache = logicalPlan.stats)
}
}
case class InMemoryRelation(
output: Seq[Attribute],
@transient cacheBuilder: CachedRDDBuilder,
override val outputOrdering: Seq[SortOrder])(
statsOfPlanToCache: Statistics)
extends logical.LeafNode with MultiInstanceRelation {
override protected def innerChildren: Seq[SparkPlan] = Seq(cachedPlan)
override def doCanonicalize(): logical.LogicalPlan =
copy(output = output.map(QueryPlan.normalizeExprId(_, cachedPlan.output)),
cacheBuilder,
outputOrdering)(
statsOfPlanToCache)
override def producedAttributes: AttributeSet = outputSet
@transient val partitionStatistics = new PartitionStatistics(output)
def cachedPlan: SparkPlan = cacheBuilder.cachedPlan
override def computeStats(): Statistics = {
if (cacheBuilder.sizeInBytesStats.value == 0L) {
// Underlying columnar RDD hasn't been materialized, use the stats from the plan to cache.
// Note that we should drop the hint info here. We may cache a plan whose root node is a hint
// node. When we lookup the cache with a semantically same plan without hint info, the plan
// returned by cache lookup should not have hint info. If we lookup the cache with a
// semantically same plan with a different hint info, `CacheManager.useCachedData` will take
// care of it and retain the hint info in the lookup input plan.
statsOfPlanToCache.copy(hints = HintInfo())
} else {
Statistics(sizeInBytes = cacheBuilder.sizeInBytesStats.value.longValue)
}
}
def withOutput(newOutput: Seq[Attribute]): InMemoryRelation = {
InMemoryRelation(newOutput, cacheBuilder, outputOrdering)(statsOfPlanToCache)
}
override def newInstance(): this.type = {
new InMemoryRelation(
output.map(_.newInstance()),
cacheBuilder,
outputOrdering)(
statsOfPlanToCache).asInstanceOf[this.type]
}
override protected def otherCopyArgs: Seq[AnyRef] = Seq(statsOfPlanToCache)
override def simpleString: String =
s"InMemoryRelation [${truncatedString(output, ", ")}], ${cacheBuilder.storageLevel}"
}
|
mdespriee/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/InMemoryRelation.scala
|
Scala
|
apache-2.0
| 7,779
|
package com.openstudy.sbt
import java.lang.Runtime
import scala.collection.JavaConversions._
import org.scalatest._
import mock._
import org.mockito.Mockito._
import org.mockito.Matchers._
import java.io._
import _root_.sbt.{File => SbtFile, _}
import Keys.{baseDirectory, resourceDirectory, streams, target, _}
class SassCompilationSpec extends FunSpec with MockitoSugar {
describe("SassCompilation") {
describe("should execute the correct compass command") {
it("with force unspecified") {
val mockTaskStreams = mock[TaskStreams]
val mockBaseDirectory = mock[File]
val mockRuntime = mock[Runtime]
val mockProcess = mock[java.lang.Process]
val mockLogger = mock[Logger]
val environment: Map[String, String] = Map("bacon" -> "wakka", "apple" -> "2")
val testSassCompilation = new SassCompilation {
val runtime = mockRuntime
val systemEnvironment = environment
}
when(mockTaskStreams.log).thenReturn(mockLogger)
when(mockRuntime.exec(isA(classOf[Array[String]]), anyObject(), anyObject())).thenReturn(mockProcess)
when(mockProcess.waitFor).thenReturn(0)
testSassCompilation.doSassCompile(
mockTaskStreams,
mockBaseDirectory,
bucket = None,
force = false,
production = true
)
verify(mockRuntime).exec(
Array[String]("compass", "compile", "-e", "production"),
environment.map { case (key, value) => key + "=" + value }.toArray,
mockBaseDirectory
)
}
it("with force on") {
val mockTaskStreams = mock[TaskStreams]
val mockBaseDirectory = mock[File]
val mockRuntime = mock[Runtime]
val mockProcess = mock[java.lang.Process]
val mockLogger = mock[Logger]
val environment: Map[String, String] = Map("bacon" -> "wakka", "apple" -> "2")
val testSassCompilation = new SassCompilation {
val runtime = mockRuntime
val systemEnvironment = environment
}
when(mockTaskStreams.log).thenReturn(mockLogger)
when(mockRuntime.exec(isA(classOf[Array[String]]), anyObject(), anyObject())).thenReturn(mockProcess)
when(mockProcess.waitFor).thenReturn(0)
testSassCompilation.doSassCompile(
mockTaskStreams,
mockBaseDirectory,
bucket = None,
force = true,
production = true
)
verify(mockRuntime).exec(
Array[String]("compass", "compile", "-e", "production", "--force"),
environment.map { case (key, value) => key + "=" + value }.toArray,
mockBaseDirectory
)
}
it("with production off") {
val mockTaskStreams = mock[TaskStreams]
val mockBaseDirectory = mock[File]
val mockRuntime = mock[Runtime]
val mockProcess = mock[java.lang.Process]
val mockLogger = mock[Logger]
val environment: Map[String, String] = Map("bacon" -> "wakka", "apple" -> "2")
val testSassCompilation = new SassCompilation {
val runtime = mockRuntime
val systemEnvironment = environment
}
when(mockTaskStreams.log).thenReturn(mockLogger)
when(mockRuntime.exec(isA(classOf[Array[String]]), anyObject(), anyObject())).thenReturn(mockProcess)
when(mockProcess.waitFor).thenReturn(0)
testSassCompilation.doSassCompile(
mockTaskStreams,
mockBaseDirectory,
bucket = None,
force = false,
production = false
)
verify(mockRuntime).exec(
Array[String]("compass", "compile"),
environment.map { case (key, value) => key + "=" + value }.toArray,
mockBaseDirectory
)
}
}
it("should set the asset_domain env variable if bucket is defined") {
val mockTaskStreams = mock[TaskStreams]
val mockBaseDirectory = mock[File]
val mockRuntime = mock[Runtime]
val mockProcess = mock[java.lang.Process]
val mockLogger = mock[Logger]
val environment: Map[String, String] = Map("bacon" -> "wakka", "apple" -> "2", "asset_domain" -> "bacon")
val testSassCompilation = new SassCompilation {
val runtime = mockRuntime
val systemEnvironment = environment
}
when(mockTaskStreams.log).thenReturn(mockLogger)
when(mockRuntime.exec(isA(classOf[Array[String]]), anyObject(), anyObject())).thenReturn(mockProcess)
when(mockProcess.waitFor).thenReturn(0)
testSassCompilation.doSassCompile(
mockTaskStreams,
mockBaseDirectory,
bucket = Some("bacon"),
force = true,
production = true
)
verify(mockRuntime).exec(
Array[String]("compass", "compile", "-e", "production", "--force"),
environment.map { case (key, value) => key + "=" + value }.toArray,
mockBaseDirectory
)
}
it("should throw a RuntimeException if compass exits nonzero") {
val mockTaskStreams = mock[TaskStreams]
val mockBaseDirectory = mock[File]
val mockRuntime = mock[Runtime]
val mockProcess = mock[java.lang.Process]
val mockLogger = mock[Logger]
val environment: Map[String, String] = Map("bacon" -> "wakka", "apple" -> "2", "asset_domain" -> "bacon")
val testSassCompilation = new SassCompilation {
val runtime = mockRuntime
val systemEnvironment = environment
}
when(mockTaskStreams.log).thenReturn(mockLogger)
when(mockRuntime.exec(isA(classOf[Array[String]]), anyObject(), anyObject())).thenReturn(mockProcess)
when(mockProcess.waitFor).thenReturn(1)
intercept[RuntimeException] {
testSassCompilation.doSassCompile(
mockTaskStreams,
mockBaseDirectory,
bucket = Some("bacon"),
force = true,
production = true
)
}
}
}
}
|
Shadowfiend/sbt-resource-management
|
src/test/scala/com/openstudy/sbt/SassCompilationSpec.scala
|
Scala
|
mit
| 5,975
|
package com.transgee.ebook.pdf
private abstract class GraphicsOperations {
def showGlyph(): Unit
def appendRectangle(): Unit
def clip(): Unit
def moveTo(): Unit
def lineTo(): Unit
def curveTo(): Unit
def closePath(): Unit
def endPath(): Unit
def strokePath(): Unit
def fillPath(): Unit
def fillAndStrokePath(): Unit
def shadingFill(): Unit
}
|
zenkiezhu/scala-ebook-clipper
|
src/main/scala/com/transgee/ebook/pdf/GraphicsOperations.scala
|
Scala
|
apache-2.0
| 369
|
package net.sansa_stack.query.tests
import org.scalatest.Tag
object UnsupportedFeature extends Tag("net.sansa_stack.query.UnsupportedFeature")
object ConformanceTestSuite extends Tag("net.sansa_stack.query.ConformanceTestSuite")
|
SANSA-Stack/SANSA-RDF
|
sansa-query/sansa-query-tests/src/main/scala/net/sansa_stack/query/tests/Tags.scala
|
Scala
|
apache-2.0
| 231
|
package codebook.runtime.server
import codebook.runtime.protocol.{OptimizedRequestQueue, Request, RequestQueue}
import scala.concurrent.duration.Duration
trait LimitedIntervalFeeder[S,D] { _ : UserServiceActorBase[S,D] =>
private var _lastExecution:Long = 0L
private var _penalty = 0L
private var _waiting = false
val minimumExecutionInterval:Long = 200L
val requestQueue:RequestQueue = new OptimizedRequestQueue {}
private case object Fire
final def penalty_=(d:Long):Unit = _penalty += math.abs(d)
final def penalty = _penalty
private def execute():Unit = {
val t = System.nanoTime() / (1000L * 1000L)
val d = minimumExecutionInterval + _penalty
if (t - _lastExecution >= minimumExecutionInterval + _penalty) {
requestQueue.pop.foreach {
req =>
self ! req
_lastExecution = t
_penalty = 0
}
} else {
if (!_waiting) {
val ts = _lastExecution + d - t
setTimer("RequestInterval",Fire,Duration(ts,"ms"))
_waiting = true
}
}
}
override def pushRequest(req: Request): Unit = {
requestQueue.push(req)
execute()
}
def intervalExecuton:StateFunction = {
case Event(Fire,_) =>
execute()
_waiting = false
stay()
}
addUncategorizedHandlers(intervalExecuton)
}
|
RustyRaven/CodebookRuntime
|
scala/src/main/scala/codebook/runtime/server/LimitedIntervalFeeder.scala
|
Scala
|
mit
| 1,325
|
package mesosphere.marathon
import akka.Done
import mesosphere.AkkaUnitTest
import mesosphere.marathon.test.SettableClock
import mesosphere.marathon.core.condition.Condition
import mesosphere.marathon.core.health.HealthCheckManager
import mesosphere.marathon.core.instance.{ Instance, TestInstanceBuilder }
import mesosphere.marathon.core.launcher.impl.LaunchQueueTestHelper
import mesosphere.marathon.core.launchqueue.LaunchQueue
import mesosphere.marathon.core.task.termination.{ KillReason, KillService }
import mesosphere.marathon.core.task.tracker.InstanceTracker
import mesosphere.marathon.core.task.tracker.InstanceTracker.{ InstancesBySpec, SpecInstances }
import mesosphere.marathon.state.{ AppDefinition, PathId, RootGroup, Timestamp }
import mesosphere.marathon.storage.repository.GroupRepository
import mesosphere.marathon.stream.Implicits._
import mesosphere.marathon.test.MarathonTestHelper
import org.apache.mesos.SchedulerDriver
import org.mockito.Mockito.verifyNoMoreInteractions
import org.scalatest.concurrent.PatienceConfiguration
import org.scalatest.time.{ Millis, Span }
import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
class SchedulerActionsTest extends AkkaUnitTest {
"SchedulerActions" should {
"Task reconciliation sends known running and staged tasks and empty list" in {
val f = new Fixture
val app = AppDefinition(id = PathId("/myapp"))
val rootGroup: RootGroup = RootGroup(apps = Map((app.id, app)))
val runningInstance = TestInstanceBuilder.newBuilder(app.id).addTaskRunning().getInstance()
val stagedInstance = TestInstanceBuilder.newBuilder(app.id).addTaskStaged().getInstance()
val stagedInstanceWithSlaveId = TestInstanceBuilder.newBuilder(app.id)
.addTaskWithBuilder().taskStaged().build()
.withAgentInfo(agentId = Some("slave 1"))
.getInstance()
val instances = Seq(runningInstance, stagedInstance, stagedInstanceWithSlaveId)
f.instanceTracker.instancesBySpec() returns Future.successful(InstancesBySpec.of(SpecInstances.forInstances(app.id, instances)))
f.groupRepo.root() returns Future.successful(rootGroup)
f.scheduler.reconcileTasks(f.driver).futureValue(5.seconds)
val statuses = Set(
runningInstance,
stagedInstance,
stagedInstanceWithSlaveId
).flatMap(_.tasksMap.values).flatMap(_.status.mesosStatus)
verify(f.driver, withinTimeout()).reconcileTasks(statuses.asJavaCollection)
verify(f.driver).reconcileTasks(java.util.Arrays.asList())
}
"Task reconciliation only one empty list, when no tasks are present in Marathon" in {
val f = new Fixture
f.instanceTracker.instancesBySpec() returns Future.successful(InstancesBySpec.empty)
f.groupRepo.root() returns Future.successful(RootGroup())
f.scheduler.reconcileTasks(f.driver).futureValue
verify(f.driver, times(1)).reconcileTasks(java.util.Arrays.asList())
}
"Kill orphaned task" in {
val f = new Fixture
val app = AppDefinition(id = PathId("/myapp"))
val orphanedApp = AppDefinition(id = PathId("/orphan"))
val instance = TestInstanceBuilder.newBuilder(app.id).addTaskRunning().getInstance()
val orphanedInstance = TestInstanceBuilder.newBuilder(orphanedApp.id).addTaskRunning().getInstance()
val tasksOfApp = SpecInstances.forInstances(app.id, Seq(instance))
val tasksOfOrphanedApp = SpecInstances.forInstances(orphanedApp.id, Seq(orphanedInstance))
f.instanceTracker.instancesBySpec() returns Future.successful(InstancesBySpec.of(tasksOfApp, tasksOfOrphanedApp))
val rootGroup: RootGroup = RootGroup(apps = Map((app.id, app)))
f.groupRepo.root() returns Future.successful(rootGroup)
f.scheduler.reconcileTasks(f.driver).futureValue(5.seconds)
verify(f.killService, times(1)).killInstance(orphanedInstance, KillReason.Orphaned)
}
"Scale up correctly in case of lost tasks (active queue)" in {
val f = new Fixture
Given("An active queue and unreachable tasks")
val app = MarathonTestHelper.makeBasicApp().copy(instances = 15)
val unreachableInstances = Seq.fill(5)(TestInstanceBuilder.newBuilder(app.id).addTaskUnreachableInactive().getInstance())
val runnningInstances = Seq.fill(10)(TestInstanceBuilder.newBuilder(app.id).addTaskRunning().getInstance())
f.instanceTracker.specInstances(eq(app.id))(any[ExecutionContext]) returns Future.successful(unreachableInstances ++ runnningInstances)
f.queue.get(eq(app.id)) returns Some(LaunchQueueTestHelper.zeroCounts)
When("the app is scaled")
f.scheduler.scale(app).futureValue
Then("5 tasks should be placed onto the launchQueue")
verify(f.queue, times(1)).add(app, 5)
}
"Scale up with some tasks in launch queue" in {
val f = new Fixture
Given("an app with 10 instances and an active queue with 4 tasks")
val app = MarathonTestHelper.makeBasicApp().copy(instances = 10)
f.queue.get(app.id) returns Some(LaunchQueueTestHelper.instanceCounts(instancesLeftToLaunch = 4, finalInstanceCount = 10))
f.instanceTracker.specInstances(app.id) returns Future.successful(Seq.empty[Instance])
When("app is scaled")
f.scheduler.scale(app).futureValue
Then("6 more tasks are added to the queue")
verify(f.queue, times(1)).add(app, 6)
}
"Scale up with enough tasks in launch queue" in {
val f = new Fixture
Given("an app with 10 instances and an active queue with 10 tasks")
val app = MarathonTestHelper.makeBasicApp().copy(instances = 10)
f.queue.get(app.id) returns Some(LaunchQueueTestHelper.instanceCounts(instancesLeftToLaunch = 10, finalInstanceCount = 10))
f.instanceTracker.specInstances(app.id) returns Future.successful(Seq.empty[Instance])
When("app is scaled")
f.scheduler.scale(app).futureValue
Then("no tasks are added to the queue")
verify(f.queue, never).add(eq(app), any[Int])
}
// This test was an explicit wish by Matthias E.
"Scale up with too many tasks in launch queue" in {
val f = new Fixture
Given("an app with 10 instances and an active queue with 10 tasks")
val app = MarathonTestHelper.makeBasicApp().copy(instances = 10)
f.queue.get(app.id) returns Some(LaunchQueueTestHelper.instanceCounts(instancesLeftToLaunch = 15, finalInstanceCount = 10))
f.instanceTracker.specInstances(app.id) returns Future.successful(Seq.empty[Instance])
When("app is scaled")
f.scheduler.scale(app).futureValue
Then("no tasks are added to the queue")
verify(f.queue, never).add(eq(app), any[Int])
}
// This scenario is the following:
// - There's an active queue and Marathon has 10 running + 5 staged tasks
// - Marathon receives StatusUpdates for 5 previously LOST tasks
// - A scale is initiated and Marathon realizes there are 5 tasks over capacity
// => We expect Marathon to kill the 5 staged tasks
"Kill staged tasks in correct order in case lost tasks reappear" in {
val f = new Fixture
Given("an active queue, staged tasks and 5 overCapacity")
val app = MarathonTestHelper.makeBasicApp().copy(instances = 5)
def stagedInstance(stagedAt: Long) = TestInstanceBuilder.newBuilder(app.id).addTaskStaged(Timestamp.apply(stagedAt)).getInstance()
def runningInstance() = TestInstanceBuilder.newBuilder(app.id).addTaskRunning().getInstance()
val staged_2 = stagedInstance(2L)
val staged_3 = stagedInstance(3L)
val tasks: Seq[Instance] = Seq(
runningInstance(),
stagedInstance(1L),
runningInstance(),
staged_3,
runningInstance(),
staged_2,
runningInstance()
)
f.queue.asyncPurge(app.id) returns Future.successful(Done)
f.instanceTracker.specInstances(app.id) returns Future.successful(tasks)
When("the app is scaled")
f.scheduler.scale(app).futureValue
Then("the queue is purged")
verify(f.queue, times(1)).asyncPurge(app.id)
And("the youngest STAGED tasks are killed")
verify(f.killService, withinTimeout()).killInstances(List(staged_3, staged_2), KillReason.OverCapacity)
verifyNoMoreInteractions(f.driver)
verifyNoMoreInteractions(f.killService)
}
"Kill running tasks in correct order in case of lost tasks" in {
val f = new Fixture
Given("an inactive queue, running tasks and some overCapacity")
val app: AppDefinition = MarathonTestHelper.makeBasicApp().copy(instances = 5)
def runningInstance(stagedAt: Long) = {
val instance = TestInstanceBuilder.newBuilder(app.id).addTaskRunning(stagedAt = Timestamp.apply(stagedAt), startedAt = Timestamp.apply(stagedAt)).getInstance()
val state = instance.state.copy(condition = Condition.Running)
instance.copy(state = state)
}
val running_6 = runningInstance(stagedAt = 6L)
val running_7 = runningInstance(stagedAt = 7L)
val instances = Seq(
runningInstance(stagedAt = 3L),
running_7,
runningInstance(stagedAt = 1L),
runningInstance(stagedAt = 4L),
runningInstance(stagedAt = 5L),
running_6,
runningInstance(stagedAt = 2L)
)
f.queue.get(app.id) returns None
f.queue.asyncPurge(app.id) returns Future.successful(Done)
f.instanceTracker.specInstances(app.id) returns Future.successful(instances)
When("the app is scaled")
f.scheduler.scale(app).futureValue
Then("the queue is purged")
verify(f.queue, times(1)).asyncPurge(app.id)
And("the youngest RUNNING tasks are killed")
verify(f.killService, withinTimeout()).killInstances(List(running_7, running_6), KillReason.OverCapacity)
verifyNoMoreInteractions(f.driver)
verifyNoMoreInteractions(f.killService)
}
"Kill staged and running tasks in correct order in case of lost tasks" in {
val f = new Fixture
Given("an active queue, running tasks and some overCapacity")
val app = MarathonTestHelper.makeBasicApp().copy(instances = 3)
def stagedInstance(stagedAt: Long) = {
val instance = TestInstanceBuilder.newBuilder(app.id).addTaskStaged(Timestamp.apply(stagedAt)).getInstance()
val state = instance.state.copy(condition = Condition.Staging)
instance.copy(state = state)
}
def runningInstance(stagedAt: Long) = {
val instance = TestInstanceBuilder.newBuilder(app.id).addTaskRunning(stagedAt = Timestamp.apply(stagedAt), startedAt = Timestamp.apply(stagedAt)).getInstance()
val state = instance.state.copy(condition = Condition.Running)
instance.copy(state = state)
}
val staged_1 = stagedInstance(1L)
val running_4 = runningInstance(stagedAt = 4L)
val tasks: Seq[Instance] = Seq(
runningInstance(stagedAt = 3L),
running_4,
staged_1,
runningInstance(stagedAt = 1L),
runningInstance(stagedAt = 2L)
)
f.queue.asyncPurge(app.id) returns Future.successful(Done)
f.instanceTracker.specInstances(app.id) returns Future.successful(tasks)
When("the app is scaled")
f.scheduler.scale(app).futureValue
Then("the queue is purged")
verify(f.queue, times(1)).asyncPurge(app.id)
And("all STAGED tasks plus the youngest RUNNING tasks are killed")
verify(f.killService, withinTimeout()).killInstances(List(staged_1, running_4), KillReason.OverCapacity)
verifyNoMoreInteractions(f.driver)
verifyNoMoreInteractions(f.killService)
}
import scala.language.implicitConversions
implicit def durationToPatienceConfigTimeout(d: FiniteDuration): PatienceConfiguration.Timeout = {
PatienceConfiguration.Timeout(Span(d.toMillis, Millis))
}
class Fixture {
val queue = mock[LaunchQueue]
val groupRepo = mock[GroupRepository]
val instanceTracker = mock[InstanceTracker]
val driver = mock[SchedulerDriver]
val killService = mock[KillService]
val clock = new SettableClock()
val scheduler = new SchedulerActions(
groupRepo,
mock[HealthCheckManager],
instanceTracker,
queue,
system.eventStream,
killService
)
}
}
}
|
janisz/marathon
|
src/test/scala/mesosphere/marathon/SchedulerActionsTest.scala
|
Scala
|
apache-2.0
| 12,382
|
package com.clackjones.connectivitymap.referenceprofile
trait ReferenceSetLoaderComponent {
def referenceSetLoader : ReferenceSetLoader
/**
* A class to take a [[ReferenceSet]] object and load all of its
* constituent [[ReferenceProfile]]s from file.
*/
trait ReferenceSetLoader {
/**
* load all [[ReferenceProfile]]s associated with a
* [[ReferenceSet]]
* @param set the [[ReferenceSet]]
* @return A set of [[ReferenceProfile]] objects
*/
def retrieveAllProfiles(set: ReferenceSet): Set[ReferenceProfile]
/**
* creates a [[ReferenceProfile]] whose fold-change for each Probe ID
* is the average of the fold change for each Probe ID of each
* ReferenceProfile in this [[ReferenceSet]].
* @param set the [[ReferenceSet]]
* @return a [[ReferenceProfile]] with average fold changes of this set
*/
def retrieveAverageReference(set: ReferenceSet): ReferenceProfile
}
}
trait ReferenceSetFileLoaderComponent extends ReferenceSetLoaderComponent {
this: ReferenceProfileLoaderComponent =>
val referenceSetLoader = new ReferenceSetFileLoader
/**
* A [[ReferenceSetLoader]] that loads uses a
* [[ReferenceProfileLoaderComponent]] to retrieve all the
* [[ReferenceProfile]]s of a [[ReferenceSet]]
*/
class ReferenceSetFileLoader extends ReferenceSetLoader {
def retrieveAllProfiles(set: ReferenceSet): Set[ReferenceProfile] = {
(set.filenames map (referenceProfileLoader.loadReferenceProfile(_))).toSet
}
def retrieveAverageReference(set: ReferenceSet): ReferenceProfile = {
val geneFoldChanges = set.filenames map (referenceProfileLoader.loadReferenceProfile(_).geneFoldChange)
val geneIds = geneFoldChanges.head.keys
val profileCount = geneFoldChanges.size.toFloat
val avgFoldChange : Map[String, Float] = (geneIds map (gID => {
val sumFoldChange = geneFoldChanges.foldLeft(0f)(_ + _(gID))
(gID, sumFoldChange / profileCount)
})).toMap
new ReferenceProfile(set.name, avgFoldChange)
}
}
}
|
hiraethus/scala-connectivity-map
|
src/main/scala/com/clackjones/connectivitymap/referenceprofile/ReferenceSetLoader.scala
|
Scala
|
gpl-3.0
| 2,076
|
package com.twitter.finagle.client
import com.twitter.finagle.{ClientConnection, Service, ServiceFactory}
import com.twitter.finagle.stats.InMemoryStatsReceiver
import com.twitter.util.{Await, Future, Time}
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class DefaultPoolTest extends FunSuite {
class MockServiceFactory extends ServiceFactory[Unit, Unit] {
override def apply(conn: ClientConnection): Future[Service[Unit, Unit]] =
Future.value(new MockService())
override def close(deadline: Time): Future[Unit] = Future.Done
}
class MockService extends Service[Unit, Unit] {
@volatile var closed = false
override def apply(unit: Unit): Future[Unit] =
if (closed) Future.exception(new Exception) else Future.Done
override def close(deadline: Time): Future[Unit] = {
closed = true
Future.Done
}
override def isAvailable: Boolean = !closed
}
trait DefaultPoolHelper {
val underlying = new MockServiceFactory()
val sr = new InMemoryStatsReceiver()
val factory = DefaultPool[Unit, Unit](2, 3)(sr)(underlying)
}
test("DefaultPool should be able to maintain high - low connections in the " +
"pool, and low connection in watermark") {
new DefaultPoolHelper {
val c1 = Await.result(factory())
assert(sr.gauges(Seq("pool_cached"))() === 0)
assert(sr.gauges(Seq("pool_size"))() === 1)
val c2 = Await.result(factory())
assert(sr.gauges(Seq("pool_cached"))() === 0)
assert(sr.gauges(Seq("pool_size"))() === 2)
val c3 = Await.result(factory())
assert(sr.gauges(Seq("pool_cached"))() === 0)
assert(sr.gauges(Seq("pool_size"))() === 3)
c1.close()
assert(sr.gauges(Seq("pool_cached"))() === 1)
assert(sr.gauges(Seq("pool_size"))() === 2)
c2.close()
assert(sr.gauges(Seq("pool_cached"))() === 1)
assert(sr.gauges(Seq("pool_size"))() === 2)
c3.close()
assert(sr.gauges(Seq("pool_cached"))() === 1)
assert(sr.gauges(Seq("pool_size"))() === 2)
}
}
test("DefaultPool should be able to reuse connections after they have been " +
"released.") {
new DefaultPoolHelper {
val c1 = Await.result(factory())
val c2 = Await.result(factory())
val c3 = Await.result(factory())
c1.close()
c2.close()
c3.close()
val c4 = Await.result(factory())
val c5 = Await.result(factory())
val c6 = Await.result(factory())
// should not throw exceptions
assert(Await.result(c4(())) === ())
assert(Await.result(c5(())) === ())
assert(Await.result(c6(())) === ())
}
}
}
|
JustinTulloss/finagle
|
finagle-core/src/test/scala/com/twitter/finagle/client/DefaultPoolTest.scala
|
Scala
|
apache-2.0
| 2,714
|
class Y
class H extends Y
class ScalaLowerBound[t >: H]
new ScalaLowerBound[Y<caret>]
//t >: H
|
ilinum/intellij-scala
|
testdata/parameterInfo/typeParameterInfo/Extends/ScalaLowerBound.scala
|
Scala
|
apache-2.0
| 95
|
/**
* Copyright (c) 2013-2015 Patrick Nicolas - Scala for Machine Learning - All rights reserved
*
* The source code in this file is provided by the author for the sole purpose of illustrating the
* concepts and algorithms presented in "Scala for Machine Learning". It should not be used to
* build commercial applications.
* ISBN: 978-1-783355-874-2 Packt Publishing.
* Unless required by applicable law or agreed to in writing, software is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* Version 0.98
*/
package org.scalaml.supervised.hmm
import scala.util.{Try, Success, Failure}
import org.apache.log4j.Logger
import org.scalaml.core.Matrix
import org.scalaml.util.DisplayUtils
import HMMConfig._
/**
* <p>Implementation of the Beta or backward pass of the
* HMM algorithm to compute the probability of a sequence of observations. The beta
* matrix is computed as part of the instantiation of the class. It measures the probability
* of being in state S(i) given the observations {t+1, t+2, ... T-1}</p>
* @constructor Create a Beta (or backward) pass for the 1st canonical form of HMM
* @throws IllegalArgumentException if lambda model or the observations are undefined
* @param lambda Lambda (pi, A, B) model for the HMM composed of the initial state
* probabilities, the state-transition probabilities matrix and the emission probabilities matrix.
* @param obs Array of observations as integer (categorical data)
* @see Chapter 7 Sequential Data Models / Hidden Markov model / Evaluation / Beta pass
* @see org.scalaml.supervised.hmm.Pass
*
* @author Patrick Nicolas
* @since March 14, 2014
* @note Scala for Machine Learning Chapter 7 Sequential data models / Hidden Markov Model /
* Evaluation
*/
protected class Beta(lambda: HMMLambda, obs: Array[Int]) extends Pass(lambda, obs) {
private val logger = Logger.getLogger("Beta")
/**
* Initializes the Beta values (alphaBeta is used as alpha for the Alpha pass and
* beta for the Beta pass). The initialization implements the formula M7 which
* computes the beta value at observation t as the summation of the Beta values at
* observation t+1 multiplied by the transition probability aij and the emission
* probabilities bj for the observation at t+1
* @see Chapter 7 Sequential Data Models / Hidden Markov model / Evaluation / Beta pass
*/
val complete = {
Try {
// Creates the matrix of probabilities of a state given the
// observations, and initialize the probability for the last observation
// (index T-1) as 1.0
alphaBeta = Matrix[Double](lambda.getT, lambda.getN)
alphaBeta += (lambda.d_1, 1.0)
// Normalize by computing (ct)
normalize(lambda.d_1)
// Compute the beta probabilites for all the observations.
sumUp
}
match {
case Success(t) => true
case Failure(e) => DisplayUtils.error("Beta.complete failed", logger, e); false
}
}
/*
* Update the beta values from the observations T-1 to the first observations
* (index: 0). THe value is then normalized, c(t)
* @see Chapter 7 Sequential Data Models / Hidden Markov model / Evaluation / Alpha pass
*/
private def sumUp: Unit = {
// Update and normalize the beta probabilities for all
// the observations starting with index T-2.. befor normalization.
(lambda.getT-2 to 0 by -1).foreach( t =>{
updateBeta(t)
normalize(t)
})
}
/*
* Implements the update of beta(t) from beta(t+1) for all the states using
* the transition probabilities A and the emission matrix B
*/
private def updateBeta(t: Int): Unit =
foreach(lambda.getN, i =>
alphaBeta += (t, i, lambda.beta(alphaBeta(t+1, i), i, obs(t+1))))
}
/**
* Companion object for the Beta pass that defines the constructor apply
* @author Patrick Nicolas
* @since March 14, 2014
* @note Scala for Machine Learning Chapter 7 Sequential data models / Hidden Markov Model /
* Evaluation
*/
object Beta {
/**
* Default constructor for the Beta class of forward/backward passes in HMM
* @param lambda Lambda (pi, A, B) model for the HMM composed of the initial state probabilities, the state-transition probabilities matrix and the emission proabilities matrix.
* @param obs Array of observations as integer (categorical data)
*/
def apply(lambda: HMMLambda, obs: Array[Int]): Beta = new Beta(lambda, obs)
}
// -------------------------------- EOF -------------------------------------
|
batermj/algorithm-challenger
|
books/cs/machine-learning/scala-for-machine-learning/1rst-edition/original-src-from-the-book/src/main/scala/org/scalaml/supervised/hmm/Beta.scala
|
Scala
|
apache-2.0
| 4,567
|
package lila.analyse
import play.api.libs.json._
import lila.common.PimpedJson._
import lila.game.Game
object JsonView {
def moves(analysis: Analysis) = JsArray(analysis.infoAdvices map {
case ((info, adviceOption)) => Json.obj(
"eval" -> info.score.map(_.centipawns),
"mate" -> info.mate,
"best" -> info.best.map(_.uci),
"variation" -> info.variation.nonEmpty.option(info.variation mkString " "),
"judgment" -> adviceOption.map { a =>
Json.obj(
"glyph" -> Json.obj(
"name" -> a.judgment.glyph.name,
"symbol" -> a.judgment.glyph.symbol
),
"name" -> a.judgment.name,
"comment" -> a.makeComment(false, true)
)
}
).noNull
})
def player(pov: lila.game.Pov)(analysis: Analysis) =
analysis.summary.find(_._1 == pov.color).map(_._2).map(s =>
JsObject(s map {
case (nag, nb) => nag.toString.toLowerCase -> JsNumber(nb)
}) ++ lila.analyse.Accuracy.mean(pov, analysis).fold(Json.obj()) { acpl =>
Json.obj("acpl" -> acpl)
}
)
def bothPlayers(game: Game, analysis: Analysis) = Json.obj(
"white" -> player(game.whitePov)(analysis),
"black" -> player(game.blackPov)(analysis))
def mobile(game: Game, analysis: Analysis) = Json.obj(
"summary" -> bothPlayers(game, analysis),
"moves" -> moves(analysis)
)
}
|
clarkerubber/lila
|
modules/analyse/src/main/JsonView.scala
|
Scala
|
agpl-3.0
| 1,392
|
package com.wavesplatform.crypto
import com.wavesplatform.account.KeyPair
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.crypto
import com.wavesplatform.test.PropSpec
class SigningFunctionsSpecification extends PropSpec {
property("signed message should be verifiable with appropriate public key") {
forAll { (seed1: Array[Byte], seed2: Array[Byte], message1: Array[Byte], message2: Array[Byte]) =>
whenever(!seed1.sameElements(seed2) && !message1.sameElements(message2)) {
val acc = KeyPair(ByteStr(seed1))
val sig = crypto.sign(acc.privateKey, message1)
val rightKey = acc
crypto.verify(sig, message1, rightKey.publicKey) should be(true)
val wrongKey = KeyPair(ByteStr(seed2))
crypto.verify(sig, message1, wrongKey.publicKey) shouldNot be(true)
crypto.verify(sig, message2, rightKey.publicKey) shouldNot be(true)
}
}
}
}
|
wavesplatform/Waves
|
node/src/test/scala/com/wavesplatform/crypto/SigningFunctionsSpecification.scala
|
Scala
|
mit
| 942
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver.ui
import java.util.Locale
import javax.servlet.http.HttpServletRequest
import org.mockito.Mockito.{mock, when, RETURNS_SMART_NULLS}
import org.apache.spark.SparkFunSuite
import org.apache.spark.scheduler.SparkListenerJobStart
import org.apache.spark.sql.hive.thriftserver.HiveThriftServer2
import org.apache.spark.sql.hive.thriftserver.HiveThriftServer2.HiveThriftServer2Listener
import org.apache.spark.sql.internal.SQLConf
class ThriftServerPageSuite extends SparkFunSuite {
/**
* Run a dummy session and return the listener
*/
private def getListener: HiveThriftServer2Listener = {
val listener = new HiveThriftServer2Listener(mock(classOf[HiveThriftServer2]), new SQLConf)
listener.onSessionCreated("localhost", "sessionid", "user")
listener.onStatementStart("id", "sessionid", "dummy query", "groupid", "user")
listener.onStatementParsed("id", "dummy plan")
listener.onJobStart(SparkListenerJobStart(0, System.currentTimeMillis(), Seq()))
listener.onStatementFinish("id")
listener.onOperationClosed("id")
listener.onSessionClosed("sessionid")
listener
}
test("thriftserver page should load successfully") {
val request = mock(classOf[HttpServletRequest])
val tab = mock(classOf[ThriftServerTab], RETURNS_SMART_NULLS)
when(tab.listener).thenReturn(getListener)
when(tab.appName).thenReturn("testing")
when(tab.headerTabs).thenReturn(Seq.empty)
val page = new ThriftServerPage(tab)
val html = page.render(request).toString().toLowerCase(Locale.ROOT)
// session statistics and sql statistics tables should load successfully
assert(html.contains("session statistics (1)"))
assert(html.contains("sql statistics (1)"))
assert(html.contains("dummy query"))
assert(html.contains("dummy plan"))
// Pagination support
assert(html.contains("<label>1 pages. jump to</label>"))
// Hiding table support
assert(html.contains("class=\\"collapse-aggregated-sessionstat" +
" collapse-table\\" onclick=\\"collapsetable"))
}
test("thriftserver session page should load successfully") {
val request = mock(classOf[HttpServletRequest])
when(request.getParameter("id")).thenReturn("sessionid")
val tab = mock(classOf[ThriftServerTab], RETURNS_SMART_NULLS)
when(tab.listener).thenReturn(getListener)
when(tab.appName).thenReturn("testing")
when(tab.headerTabs).thenReturn(Seq.empty)
val page = new ThriftServerSessionPage(tab)
val html = page.render(request).toString().toLowerCase(Locale.ROOT)
// session sql statistics table should load successfully
assert(html.contains("sql statistics"))
assert(html.contains("user"))
assert(html.contains("groupid"))
// Pagination support
assert(html.contains("<label>1 pages. jump to</label>"))
// Hiding table support
assert(html.contains("collapse-aggregated-sqlsessionstat collapse-table\\"" +
" onclick=\\"collapsetable"))
}
}
|
caneGuy/spark
|
sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerPageSuite.scala
|
Scala
|
apache-2.0
| 3,812
|
package com.twitter.finatra_client
import org.scalatest.FlatSpec
import org.scalatest.matchers.ShouldMatchers
import com.twitter.finagle.Service
import com.twitter.finagle.http.{Http, RichHttp, Response, Request}
import com.twitter.finagle.builder.ClientBuilder
import org.jboss.netty.handler.codec.http.HttpMethod
class FinatraHttpRequestSpec extends FlatSpec with ShouldMatchers {
val client: Service[Request, Response] = ClientBuilder()
.codec(new RichHttp[Request](Http()))
.hosts("localhost:7070")
.hostConnectionLimit(1)
.build()
val request = new FinatraHttpRequest(client)
".method()" should "set the http method" in {
val r = request.method(HttpMethod.PUT).build
r.method should equal(HttpMethod.PUT)
}
".params()" should "set the GET params" in {
val r = request.params("screen_name" -> "twoism").build
r.uri should equal("/?screen_name=twoism")
}
".params()" should "set the POST body params" in {
val r = request.method(HttpMethod.POST).params("screen_name" -> "twoism").build
r.getContentString() should equal("?screen_name=twoism")
}
".headers()" should "set HTTP Headers" in {
val r = request.headers("X-RateLimit" -> "100").build
r.getHeader("X-RateLimit") should equal("100")
}
}
|
twoism/finatra_client
|
src/test/scala/com/twitter/finatra_client/FinatraHttpRequestSpec.scala
|
Scala
|
apache-2.0
| 1,300
|
/*
* Copyright 2015 University of Basel, Graphics and Vision Research Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalismo.image.filter
import scalismo.common.interpolation.{BSplineImageInterpolator, BSplineImageInterpolator3D}
import scalismo.common.{Scalar, ScalarArray}
import scalismo.geometry._
import scalismo.image.DiscreteImage
import scalismo.utils.{CanConvertToVtk, ImageConversion}
import vtk.{vtkImageCast, vtkImageEuclideanDistance, vtkImageGaussianSmooth, vtkObjectBase}
import scala.reflect.ClassTag
import scala.reflect.runtime.universe.TypeTag
object DiscreteImageFilter {
/**
* Computes a (signed) distance transform of the image.
* @note The value that is returned is not the euclidean distance unless the image has unit spacing. Even worse, the distance might depend on the spacing of the image.
*/
def distanceTransform[D: NDSpace: CanConvertToVtk: BSplineImageInterpolator.Create, A: Scalar: ClassTag: TypeTag](
img: DiscreteImage[D, A]
): DiscreteImage[D, Float] = {
val scalar = implicitly[Scalar[A]]
def doDistanceTransformVTK(img: DiscreteImage[D, A]) = {
val imgvtk = ImageConversion.imageToVtkStructuredPoints(img)
val vtkdistTransform = new vtkImageEuclideanDistance()
vtkdistTransform.SetMaximumDistance(100000)
vtkdistTransform.SetAlgorithmToSaito()
vtkdistTransform.InitializeOn()
vtkdistTransform.ReleaseDataFlagOn()
vtkdistTransform.SetConsiderAnisotropy(1)
vtkdistTransform.SetInputData(imgvtk)
// vtk gives double precision we rather want to have float
val caster = new vtkImageCast()
caster.SetOutputScalarTypeToFloat()
caster.SetInputConnection(vtkdistTransform.GetOutputPort())
caster.Update()
val dtvtk = caster.GetOutput()
val dt = ImageConversion
.vtkStructuredPointsToScalarImage[D, Float](dtvtk)
.map { dt =>
dt.map(v => math.sqrt(v).toFloat)
}
.get // this is safe here, as it can never fail since we converted back and forth
caster.Delete()
imgvtk.Delete()
dtvtk.Delete()
vtkdistTransform.Delete()
System.gc() // make sure it deletes the intermediate resuls
dt.interpolateDifferentiable(BSplineImageInterpolator(degree = 0)).discretize(img.domain, 0)
}
val dt1 = doDistanceTransformVTK(img)
val invImg = img.map[A](v => if (v == 0) scalar.fromShort(1) else scalar.fromShort(0))
val dt2 = doDistanceTransformVTK(invImg)
val newPixelValues = dt1.values.zip(dt2.values).map { case (p1, p2) => p1 - p2 }.toArray
DiscreteImage(dt1.domain, ScalarArray(newPixelValues))
}
/**
* Smoothing of an image using a Gaussian filter kernel with the given stddev
*/
def gaussianSmoothing[D: NDSpace, A: Scalar: ClassTag: TypeTag](img: DiscreteImage[D, A], stddev: Double)(
implicit
vtkConversion: CanConvertToVtk[D]
): DiscreteImage[D, A] = {
val vtkImg = vtkConversion.toVtk[A](img)
val dim = NDSpace[D].dimensionality
val gaussianFilter = new vtkImageGaussianSmooth()
gaussianFilter.SetInputData(vtkImg)
val unitsAdjustedSpacing = img.domain.pointSet.spacing.map(s => stddev * (1f / s))
unitsAdjustedSpacing.dimensionality match {
case 2 => gaussianFilter.SetStandardDeviation(unitsAdjustedSpacing(0), unitsAdjustedSpacing(1))
case 3 =>
gaussianFilter.SetStandardDeviation(unitsAdjustedSpacing(0), unitsAdjustedSpacing(1), unitsAdjustedSpacing(2))
case _ =>
throw new IllegalArgumentException(
s"Bad dimensionality for gaussianSmoothing. Got $dim encountered but require 2 or 3."
)
}
gaussianFilter.Update()
val vtkRes = gaussianFilter.GetOutput()
// it is save to call get here, as the error can only encounter when the pixel type is not supported.
// But as we converted it ourselves to vtk, conversion is always possible.
val imgRes = vtkConversion.fromVtk(vtkRes).get
// prevent memory leaks caused by VTK
vtkObjectBase.JAVA_OBJECT_MANAGER.gc(false)
imgRes
}
}
|
unibas-gravis/scalismo
|
src/main/scala/scalismo/image/filter/DiscreteImageFilter.scala
|
Scala
|
apache-2.0
| 4,607
|
package com.rgcase.playground
object HelloWorld extends App {
println("Hello com.rgcase.playground!")
}
|
rgcase/testplayground
|
src/main/scala/App.scala
|
Scala
|
apache-2.0
| 107
|
package uk.co.morleydev.zander.client.data.map
import uk.co.morleydev.zander.client.model.arg.Project
import uk.co.morleydev.zander.client.model.arg.BuildCompiler._
import uk.co.morleydev.zander.client.model.arg.BuildMode._
import uk.co.morleydev.zander.client.model.store.ArtefactDetails
import uk.co.morleydev.zander.client.data.ProcessProjectArtefactDetailsMap
object RemoveOverlappingFilesFromArtefactDetails extends ProcessProjectArtefactDetailsMap {
def apply(details : Map[(Project, BuildCompiler, BuildMode), ArtefactDetails])
: Map[(Project, BuildCompiler, BuildMode), ArtefactDetails] = {
val filesToRemove = details
.flatMap(detail => detail._2.files)
.groupBy({x => x})
.filter({case (_, x) => x.size > 1})
.keys
.toSeq
details.map(d => (d._1, new ArtefactDetails(d._2.version, d._2.files.diff(filesToRemove)))).toMap
}
}
|
MorleyDev/zander.client
|
src/main/scala/uk/co/morleydev/zander/client/data/map/RemoveOverlappingFilesFromArtefactDetails.scala
|
Scala
|
mit
| 884
|
package silhouette
trait SecuredUser extends com.mohiva.play.silhouette.api.Identity
|
fredericoramos78/play-scala-angularjs
|
app/silhouette/SecuredUser.scala
|
Scala
|
gpl-3.0
| 86
|
package controllers
import javax.inject.Singleton
import com.fasterxml.jackson.annotation.JsonValue
import org.slf4j.{Logger, LoggerFactory}
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import play.api.libs.json._
import play.api.mvc._
import play.modules.reactivemongo.MongoController
import play.modules.reactivemongo.json.collection.JSONCollection
import reactivemongo.api.Cursor
import services.PlansService
import scala.collection.immutable.ListMap
import scala.concurrent.Future
/**
* The Plans controllers encapsulates the Rest endpoints and the interaction with the MongoDB, via ReactiveMongo
* play plugin. This provides a non-blocking driver for mongoDB as well as some useful additions for handling JSon.
* @see https://github.com/ReactiveMongo/Play-ReactiveMongo
*/
@Singleton
class Plans extends Controller with MongoController {
private final val logger: Logger = LoggerFactory.getLogger(classOf[Plans])
/*
* Get a JSONCollection (a Collection implementation that is designed to work
* with JsObject, Reads and Writes.)
* Note that the `collection` is not a `val`, but a `def`. We do _not_ store
* the collection reference to avoid potential problems in development with
* Play hot-reloading.
*/
def collection: JSONCollection = db.collection[JSONCollection]("plans")
// ------------------------------------------ //
// Using case classes + Json Writes and Reads //
// ------------------------------------------ //
import models.PlanJsonFormats._
import models._
def createPlan = Action.async(parse.json) {
request =>
/*
* request.body is a JsValue.
* There is an implicit Writes that turns this JsValue as a JsObject,
* so you can call insert() with this JsValue.
* (insert() takes a JsObject as parameter, or anything that can be
* turned into a JsObject using a Writes.)
*/
request.body.validate[Plan].map {
plan =>
// `plan` is an instance of the case class `models.Plan`
collection.insert(plan).map {
lastError =>
logger.debug(s"Successfully inserted with LastError: $lastError")
Created(s"Plan Added")
}
}.getOrElse(Future.successful(BadRequest("invalid json")))
}
def updatePlan(id: String) = Action.async(parse.json) {
request =>
request.body.validate[Plan].map {
plan =>
// find our plan by first name and last name
val nameSelector = Json.obj("_id" -> Json.obj("$oid"->id))
collection.update(nameSelector, plan).map {
lastError =>
logger.debug(s"Successfully updated with LastError: $lastError")
Created(s"Plan Changed")
}
}.getOrElse(Future.successful(BadRequest("invalid json")))
}
def softDeletePlan(id: String) = Action.async {
request =>
implicit val objectId = id;
// find our plan by first name and last name
val nameSelector = Json.obj("_id" -> Json.obj("$oid"->id))
collection.update(nameSelector, Json.obj("$set"->Json.obj("active"->"false"))).map {
lastError =>
logger.debug(s"Successfully deleted with LastError: $lastError")
Ok(s"Plan deleted")
}
}
def rawDeletePlan(id: String) = Action.async {
collection.remove(Json.obj("_id" -> Json.obj("$oid"->id))).map(_ => Ok)
}
def findPlan(id: String) = Action.async {
val futurePlans: Future[Option[Plan]] = collection.
find(Json.obj("_id" -> Json.obj("$oid"->id),"active" -> true)).one[Plan]
futurePlans.map{
case plan:Some[Plan] => Ok(Json.toJson(plan))
case None => NoContent
}
}
def fetchPlansFromService:Future[List[Plan]] = {
// let's do our query
val cursor: Cursor[Plan] = collection.
// find all
find(Json.obj("active" -> true)).
// sort them by creation date
sort(Json.obj("created" -> -1)).
// perform the query and get a cursor of JsObject
cursor[Plan]
// collect the JsonObjects in List
val futurePlansList: Future[List[Plan]] = cursor.collect[List]()
futurePlansList
}
def findPlans = Action.async {
val futurePersonsJsonArray: Future[JsArray] = fetchPlansFromService.map { plans =>
Json.arr(plans)
}
// map as json array
futurePersonsJsonArray.map {
plans =>
Ok(plans(0))
}
}
def findAllStores = Action.async{
val futureStoreList = fetchPlansFromService.map { plans => {
for {
plan <- plans
stores <- plan.store
} yield (stores)
}
}
val futureStoreArray = futureStoreList.map{ stores =>
Json.arr(stores.distinct)
}
futureStoreArray.map {
plans =>
Ok(plans(0))
}
}
def findPlansByHash = Action.async{
val futureTupleList = fetchPlansFromService.map{ plans => {
val storeToPlan:List[(String,Plan)] = for{
plan <- plans
stores <- plan.store
}yield (stores,plan)
val plansByTag = storeToPlan.groupBy{
case(store,plan)=> store
}.mapValues{
storeToPlanList=>storeToPlanList.map{storeAndPlan => storeAndPlan._2}}
plansByTag
}
}
val futurePersonsJsonArray: Future[JsArray] = futureTupleList.map { plans =>
Json.arr(plans)
}
futurePersonsJsonArray.map {
plans =>
Ok(plans(0))
}
}
}
|
ranraj/reactive-play-angular
|
app/controllers/Plans.scala
|
Scala
|
apache-2.0
| 5,405
|
package models
import db.{CouponDto, CruitedProductDto}
import models.client.OrderReceivedFromClient
case class Order(id: Option[Long],
editionId: Long,
containedProductCodes: List[String],
couponId: Option[Long],
cvFileName: Option[String],
coverLetterFileName: Option[String],
linkedinProfileFileName: Option[String],
positionSought: Option[String],
employerSought: Option[String],
jobAdUrl: Option[String],
jobAdFileName: Option[String],
customerComment: Option[String],
accountId: Option[Long],
status: Int,
creationTimestamp: Option[Long]) {
def this(orderReceivedFromClient: OrderReceivedFromClient, id: Long) = this(
id = Some(id),
editionId = orderReceivedFromClient.editionId,
containedProductCodes = orderReceivedFromClient.containedProductCodes,
couponId = orderReceivedFromClient.couponCode match {
case None => None
case Some(couponCode) => CouponDto.getOfCode(couponCode) match {
case None => None
case Some(coupon) => Some(coupon.id)
}
},
cvFileName = orderReceivedFromClient.cvFileName,
coverLetterFileName = orderReceivedFromClient.coverLetterFileName,
linkedinProfileFileName = None,
positionSought = orderReceivedFromClient.positionSought,
employerSought = orderReceivedFromClient.employerSought,
jobAdUrl = None,
jobAdFileName = None,
customerComment = None,
accountId = orderReceivedFromClient.accountId,
status = orderReceivedFromClient.status,
creationTimestamp = None
)
}
object Order {
val fileNamePrefixSeparator = "-"
val typeStringSeparator = ","
val statusIdNotPaid = -1
val statusIdPaid = 0
val statusIdInProgress = 1
val statusIdAwaitingFeedback = 4
val statusIdScheduled = 3
val statusIdComplete = 2
def getTypeForDb(containedProductCodes: List[String]): String = {
containedProductCodes.length match {
case 0 => ""
case nbAboveZero =>
val allProducts = CruitedProductDto.getAll
nbAboveZero match {
case 1 =>
allProducts.filter(p => p.code == containedProductCodes.head).head
.getTypeForDb
case nbAboveOne =>
// First item handled differently - no comma prefix
val firstProduct = allProducts.filter(p => p.code == containedProductCodes.head).head
var result = firstProduct.getTypeForDb
for (i <- 1 to nbAboveOne - 1) {
val product = allProducts.filter(p => p.code == containedProductCodes(i)).head
result = result + typeStringSeparator + product.getTypeForDb
}
result
}
}
}
def getContainedProductCodesFromTypesString(docTypes: String): List[String] = {
getContainedProductCodesFromTypesArray(docTypes.split(typeStringSeparator).toList)
}
def getContainedProductCodesFromTypesArray(docTypes: List[String]): List[String] = {
docTypes.map { typeForDb => CruitedProduct.getCodeFromType(typeForDb)}
.toList
}
def getFileNameWithoutPrefix(fileName: Option[String]): Option[String] = {
fileName match {
case None => None
case Some(fileNameWithPrefix) =>
val indexFileNameAfterPrefix = fileNameWithPrefix.indexOf(Order.fileNamePrefixSeparator, 1) + Order.fileNamePrefixSeparator.length
Some(fileNameWithPrefix.substring(indexFileNameAfterPrefix))
}
}
}
|
PanzerKunst/redesigned-cruited.com-frontend
|
document-web-service/app/models/Order.scala
|
Scala
|
gpl-3.0
| 3,602
|
/*
* Copyright 2001-2009 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatestexamples.testng
import org.scalatestexamples._
import org.testng.annotations.Test
import org.scalatest.testng.TestNGSuite
class ErrorTestNGSuite extends TestNGSuite {
@Test def testThrownException {
throw new RuntimeException
}
@Test def testThrownOutOfMemoryError {
throw new OutOfMemoryError
}
}
|
hubertp/scalatest
|
src/examples/scala/org/scalatestexamples/testng/ErrorTestNGSuite.scala
|
Scala
|
apache-2.0
| 938
|
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.kryo
import com.esotericsoftware.kryo.io.Output
import com.typesafe.scalalogging.LazyLogging
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class NonMutatingInputTest extends Specification with LazyLogging {
"NonMutatingInput" should {
"read and write strings" in {
foreach(Seq("a", "foo", "nihao你好")) { s =>
val out = new Output(128)
out.writeString(s)
val in = new NonMutatingInput()
in.setBuffer(out.toBytes)
in.readString() mustEqual s
}
}
}
}
|
ccri/geomesa
|
geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/kryo/NonMutatingInputTest.scala
|
Scala
|
apache-2.0
| 1,116
|
package lila.gameSearch
import chess.Mode
import org.joda.time.DateTime
import play.api.data._
import play.api.data.Forms._
import lila.common.Form._
import lila.search.Range
final private[gameSearch] class GameSearchForm {
val search = Form(
mapping(
"players" -> mapping(
"a" -> optional(nonEmptyText),
"b" -> optional(nonEmptyText),
"winner" -> optional(nonEmptyText),
"loser" -> optional(nonEmptyText),
"white" -> optional(nonEmptyText),
"black" -> optional(nonEmptyText)
)(SearchPlayer.apply)(SearchPlayer.unapply),
"winnerColor" -> optional(numberIn(Query.winnerColors)),
"perf" -> optional(numberIn(lila.rating.PerfType.nonPuzzle.map(_.id))),
"source" -> optional(numberIn(Query.sources)),
"mode" -> optional(numberIn(Query.modes)),
"turnsMin" -> optional(numberIn(Query.turns)),
"turnsMax" -> optional(numberIn(Query.turns)),
"ratingMin" -> optional(numberIn(Query.averageRatings)),
"ratingMax" -> optional(numberIn(Query.averageRatings)),
"hasAi" -> optional(numberIn(Query.hasAis)),
"aiLevelMin" -> optional(numberIn(Query.aiLevels)),
"aiLevelMax" -> optional(numberIn(Query.aiLevels)),
"durationMin" -> optional(numberIn(Query.durations)),
"durationMax" -> optional(numberIn(Query.durations)),
"clock" -> mapping(
"initMin" -> optional(numberIn(Query.clockInits)),
"initMax" -> optional(numberIn(Query.clockInits)),
"incMin" -> optional(numberIn(Query.clockIncs)),
"incMax" -> optional(numberIn(Query.clockIncs))
)(SearchClock.apply)(SearchClock.unapply),
"dateMin" -> GameSearchForm.dateField,
"dateMax" -> GameSearchForm.dateField,
"status" -> optional(numberIn(Query.statuses)),
"analysed" -> optional(number),
"sort" -> optional(
mapping(
"field" -> stringIn(Sorting.fields),
"order" -> stringIn(Sorting.orders)
)(SearchSort.apply)(SearchSort.unapply)
)
)(SearchData.apply)(SearchData.unapply)
) fill SearchData()
}
private[gameSearch] object GameSearchForm {
val dateField = optional(ISODateOrTimestamp.isoDateOrTimestamp)
}
private[gameSearch] case class SearchData(
players: SearchPlayer = SearchPlayer(),
winnerColor: Option[Int] = None,
perf: Option[Int] = None,
source: Option[Int] = None,
mode: Option[Int] = None,
turnsMin: Option[Int] = None,
turnsMax: Option[Int] = None,
ratingMin: Option[Int] = None,
ratingMax: Option[Int] = None,
hasAi: Option[Int] = None,
aiLevelMin: Option[Int] = None,
aiLevelMax: Option[Int] = None,
durationMin: Option[Int] = None,
durationMax: Option[Int] = None,
clock: SearchClock = SearchClock(),
dateMin: Option[DateTime] = None,
dateMax: Option[DateTime] = None,
status: Option[Int] = None,
analysed: Option[Int] = None,
sort: Option[SearchSort] = None
) {
def sortOrDefault = sort | SearchSort()
def query =
Query(
user1 = players.cleanA,
user2 = players.cleanB,
winner = players.cleanWinner,
loser = players.cleanLoser,
winnerColor = winnerColor,
perf = perf,
source = source,
rated = mode flatMap Mode.apply map (_.rated),
turns = Range(turnsMin, turnsMax),
averageRating = Range(ratingMin, ratingMax),
hasAi = hasAi map (_ == 1),
aiLevel = Range(aiLevelMin, aiLevelMax),
duration = Range(durationMin, durationMax),
clock = Clocking(clock.initMin, clock.initMax, clock.incMin, clock.incMax),
date = Range(dateMin, dateMax),
status = status,
analysed = analysed map (_ == 1),
whiteUser = players.cleanWhite,
blackUser = players.cleanBlack,
sorting = Sorting(sortOrDefault.field, sortOrDefault.order)
)
def nonEmptyQuery = Some(query).filter(_.nonEmpty)
}
private[gameSearch] case class SearchPlayer(
a: Option[String] = None,
b: Option[String] = None,
winner: Option[String] = None,
loser: Option[String] = None,
white: Option[String] = None,
black: Option[String] = None
) {
lazy val cleanA = clean(a)
lazy val cleanB = clean(b)
def cleanWinner = oneOf(winner)
def cleanLoser = oneOf(loser)
def cleanWhite = oneOf(white)
def cleanBlack = oneOf(black)
private def oneOf(s: Option[String]) = clean(s).filter(List(cleanA, cleanB).flatten.contains)
private def clean(s: Option[String]) = s map (_.trim.toLowerCase) filter (_.nonEmpty)
}
private[gameSearch] case class SearchSort(
field: String = Sorting.default.f,
order: String = Sorting.default.order
)
private[gameSearch] case class SearchClock(
initMin: Option[Int] = None,
initMax: Option[Int] = None,
incMin: Option[Int] = None,
incMax: Option[Int] = None
)
|
luanlv/lila
|
modules/gameSearch/src/main/GameSearchForm.scala
|
Scala
|
mit
| 4,889
|
import scala.reflect.runtime.universe._
object Test extends App {
def weakTypeTagIsnotManifest[T: WeakTypeTag] = {
println(manifest[T])
}
weakTypeTagIsnotManifest[Int]
weakTypeTagIsnotManifest[String]
weakTypeTagIsnotManifest[Array[Int]]
}
|
felixmulder/scala
|
test/files/neg/interop_abstypetags_arenot_manifests.scala
|
Scala
|
bsd-3-clause
| 255
|
package japgolly.scalajs.react
import scala.scalajs.js.{Any => JAny, Array => JArray, _}
import Internal._
object ReactComponentB {
// ===================================================================================================================
// Builder
@inline def apply[Props](name: String) = new P[Props](name)
implicit def defaultDomType[P,S,B](c: PSBN[P,S,B]) = c.domType[TopNode]
implicit def defaultProps[P,S,B,N <: TopNode](c: ReactComponentB[P,S,B,N]) = c.propsRequired
implicit def defaultDomTypeAndProps[P,S,B](c: PSBN[P,S,B]) = defaultProps(defaultDomType(c))
// ===================================================================================================================
// Convenience
/**
* Create a component that always displays the same content, never needs to be redrawn, never needs vdom diffing.
*/
def static(name: String, content: ReactElement) =
ReactComponentB[Unit](name)
.stateless
.noBackend
.render(_ => content)
.shouldComponentUpdate((_, _, _) => false)
// ===================================================================================================================
final class P[Props] private[ReactComponentB](name: String) {
// getInitialState is how it's named in React
def getInitialState[State](f: Props => State) = initialStateP(f)
def initialStateP[State](f: Props => State) = new PS(name, f)
def initialState[State](s: => State) = initialStateP(_ => s)
def stateless = initialState(())
def render(f: Props => ReactElement) = stateless.render((p,_) => f(p))
def render(f: (Props, PropsChildren) => ReactElement) = stateless.render((p,c,_) => f(p,c))
}
// ===================================================================================================================
final class PS[Props, State] private[ReactComponentB](name: String, initF: Props => State) {
def backend[Backend](f: BackendScope[Props, State] => Backend) = new PSB(name, initF, f)
def noBackend = backend(_ => ())
def render(f: (Props, State) => ReactElement) = noBackend.render((p,s,_) => f(p,s))
def render(f: (Props, PropsChildren, State) => ReactElement) = noBackend.render((p,c,s,_) => f(p,c,s))
def render(f: ComponentScopeU[Props, State, Unit] => ReactElement) = noBackend.render(f)
def renderS(f: (ComponentScopeU[Props, State, Unit], Props, State) => ReactElement) = noBackend.renderS(f)
}
// ===================================================================================================================
final class PSB[P, S, B] private[ReactComponentB](name: String, initF: P => S, backF: BackendScope[P, S] => B) {
def render(f: ComponentScopeU[P, S, B] => ReactElement): PSBN[P, S, B] =
new PSBN(name, initF, backF, f)
def render(f: (P, S, B) => ReactElement): PSBN[P, S, B] =
render(s => f(s.props, s.state, s.backend))
def render(f: (P, PropsChildren, S, B) => ReactElement): PSBN[P, S, B] =
render(s => f(s.props, s.propsChildren, s.state, s.backend))
def renderS(f: (ComponentScopeU[P, S, B], P, S) => ReactElement): PSBN[P, S, B] =
render(T => f(T, T.props, T.state))
}
// ===================================================================================================================
final class PSBN[P, S, B] private[ReactComponentB](name: String, initF: P => S, backF: BackendScope[P, S] => B, rendF: ComponentScopeU[P, S, B] => ReactElement) {
def domType[N <: TopNode]: ReactComponentB[P, S, B, N] =
new ReactComponentB(name, initF, backF, rendF, emptyLifeCycle, Vector.empty)
}
// ===================================================================================================================
private[react] case class LifeCycle[P,S,B,N <: TopNode](
configureSpec : UndefOr[ReactComponentSpec[P, S, B, N] => Unit],
getDefaultProps : UndefOr[() => P],
componentWillMount : UndefOr[ComponentScopeU[P, S, B] => Unit],
componentDidMount : UndefOr[ComponentScopeM[P, S, B, N] => Unit],
componentWillUnmount : UndefOr[ComponentScopeM[P, S, B, N] => Unit],
componentWillUpdate : UndefOr[(ComponentScopeWU[P, S, B, N], P, S) => Unit],
componentDidUpdate : UndefOr[(ComponentScopeM[P, S, B, N], P, S) => Unit],
componentWillReceiveProps: UndefOr[(ComponentScopeM[P, S, B, N], P) => Unit],
shouldComponentUpdate : UndefOr[(ComponentScopeM[P, S, B, N], P, S) => Boolean])
private[react] def emptyLifeCycle[P,S,B,N <: TopNode] =
LifeCycle[P,S,B,N](undefined, undefined, undefined, undefined, undefined, undefined, undefined, undefined, undefined)
}
import ReactComponentB.LifeCycle
final class ReactComponentB[P,S,B,N <: TopNode](val name: String,
initF : P => S,
backF : BackendScope[P, S] => B,
rendF : ComponentScopeU[P, S, B] => ReactElement,
lc : LifeCycle[P, S, B, N],
jsMixins: Vector[JAny]) {
@inline private def copy(name : String = name ,
initF : P => S = initF ,
backF : BackendScope[P, S] => B = backF ,
rendF : ComponentScopeU[P, S, B] => ReactElement = rendF ,
lc : LifeCycle[P, S, B, N] = lc ,
jsMixins: Vector[JAny] = jsMixins): ReactComponentB[P, S, B, N] =
new ReactComponentB(name, initF, backF, rendF, lc, jsMixins)
@inline private implicit def lcmod(a: LifeCycle[P, S, B, N]): ReactComponentB[P, S, B, N] =
copy(lc = a)
def configureSpec(modify: ReactComponentSpec[P, S, B, N] => Unit): ReactComponentB[P, S, B, N] =
lc.copy(configureSpec = modify)
def configure(fs: (ReactComponentB[P, S, B, N] => ReactComponentB[P, S, B, N])*): ReactComponentB[P, S, B, N] =
fs.foldLeft(this)((a,f) => f(a))
def getDefaultProps(p: => P): ReactComponentB[P, S, B, N] =
lc.copy(getDefaultProps = () => p)
def componentWillMount(f: ComponentScopeU[P, S, B] => Unit): ReactComponentB[P, S, B, N] =
lc.copy(componentWillMount = fcUnit(lc.componentWillMount, f))
def componentDidMount(f: ComponentScopeM[P, S, B, N] => Unit): ReactComponentB[P, S, B, N] =
lc.copy(componentDidMount = fcUnit(lc.componentDidMount, f))
def componentWillUnmount(f: ComponentScopeM[P, S, B, N] => Unit): ReactComponentB[P, S, B, N] =
lc.copy(componentWillUnmount = fcUnit(lc.componentWillUnmount, f))
def componentWillUpdate(f: (ComponentScopeWU[P, S, B, N], P, S) => Unit): ReactComponentB[P, S, B, N] =
lc.copy(componentWillUpdate = fcUnit(lc.componentWillUpdate, f))
def componentDidUpdate(f: (ComponentScopeM[P, S, B, N], P, S) => Unit): ReactComponentB[P, S, B, N] =
lc.copy(componentDidUpdate = fcUnit(lc.componentDidUpdate, f))
def componentWillReceiveProps(f: (ComponentScopeM[P, S, B, N], P) => Unit): ReactComponentB[P, S, B, N] =
lc.copy(componentWillReceiveProps = fcUnit(lc.componentWillReceiveProps, f))
def shouldComponentUpdate(f: (ComponentScopeM[P, S, B, N], P, S) => Boolean): ReactComponentB[P, S, B, N] =
lc.copy(shouldComponentUpdate = fcEither(lc.shouldComponentUpdate, f))
/**
* Install a pure-JS React mixin.
*
* Beware: There will be mixins that won't work correctly as they make assumptions that don't hold for Scala.
* If a mixin expects to inspect your props or state, forget about it; Scala-land owns that data.
*/
def mixinJS(mixins: JAny*): ReactComponentB[P, S, B, N] =
copy(jsMixins = jsMixins ++ mixins)
/**
* Modify the render function.
*/
def reRender(f: (ComponentScopeU[P, S, B] => ReactElement) => ComponentScopeU[P, S, B] => ReactElement): ReactComponentB[P, S, B, N] =
copy(rendF = f(rendF))
// ===================================================================================================================
@inline private def builder[C](cc: ReactComponentCU[P,S,B,N] => C) = new Builder(cc)
def propsRequired = builder(new ReactComponentC.ReqProps [P,S,B,N](_, undefined, undefined))
def propsDefault(p: => P) = builder(new ReactComponentC.DefaultProps[P,S,B,N](_, undefined, undefined, () => p))
def propsConst (p: => P) = builder(new ReactComponentC.ConstProps [P,S,B,N](_, undefined, undefined, () => p))
def propsUnit(implicit ev: Unit =:= P) = propsConst(ev(()))
def buildU (implicit ev: Unit =:= P) = propsUnit.build
final class Builder[C] private[ReactComponentB](cc: ReactComponentCU[P,S,B,N] => C) {
def buildSpec: ReactComponentSpec[P, S, B, N] = {
val spec = Dynamic.literal(
"displayName" -> name,
"backend" -> 0,
"render" -> (rendF: ThisFunction)
)
@inline def setFnPS[T, R](fn: UndefOr[(T, P, S) => R], name: String): Unit =
fn.foreach { f =>
val g = (t: T, p: WrapObj[P], s: WrapObj[S]) => f(t, p.v, s.v)
spec.updateDynamic(name)(g: ThisFunction)
}
val componentWillMount2 = (t: ComponentScopeU[P, S, B]) => {
val scopeB = t.asInstanceOf[BackendScope[P, S]]
t.asInstanceOf[Dynamic].updateDynamic("backend")(backF(scopeB).asInstanceOf[JAny])
lc.componentWillMount.foreach(g => g(t))
}
spec.updateDynamic("componentWillMount")(componentWillMount2: ThisFunction)
val initStateFn: ComponentScopeU[P, S, B] => WrapObj[S] = scope => WrapObj(initF(scope.props))
spec.updateDynamic("getInitialState")(initStateFn: ThisFunction)
lc.getDefaultProps.foreach(f => spec.updateDynamic("getDefaultProps")(f: Function))
lc.componentWillUnmount.foreach(f => spec.updateDynamic("componentWillUnmount")(f: ThisFunction))
lc.componentDidMount.foreach(f => spec.updateDynamic("componentDidMount")(f: ThisFunction))
setFnPS(lc.componentWillUpdate, "componentWillUpdate")
setFnPS(lc.componentDidUpdate, "componentDidUpdate")
setFnPS(lc.shouldComponentUpdate, "shouldComponentUpdate")
lc.componentWillReceiveProps.foreach { f =>
val g = (t: ComponentScopeM[P, S, B, N], p: WrapObj[P]) => f(t, p.v)
spec.updateDynamic("componentWillReceiveProps")(g: ThisFunction)
}
if (jsMixins.nonEmpty) {
val mixins = JArray(jsMixins: _*)
spec.updateDynamic("mixins")(mixins)
}
val spec2 = spec.asInstanceOf[ReactComponentSpec[P, S, B, N]]
lc.configureSpec.foreach(_(spec2))
spec2
}
def build: C =
cc(React.createFactory(React.createClass(buildSpec)))
}
}
|
chandu0101/scalajs-react
|
core/src/main/scala/japgolly/scalajs/react/ReactComponentB.scala
|
Scala
|
apache-2.0
| 11,156
|
package org.humanistika.exist.index.algolia
import DOMHelper._
import java.io.{ByteArrayInputStream, StringWriter}
import java.nio.charset.StandardCharsets
import javax.xml.namespace.QName
import javax.xml.parsers.DocumentBuilderFactory
import com.fasterxml.jackson.databind.ObjectMapper
import org.humanistika.exist.index.algolia.Serializer.{serializeElementForAttribute, serializeElementForObject}
import org.specs2.Specification
import org.w3c.dom.{Attr, Document, Element, Node}
import scalaz._
import Scalaz._
import cats.effect.{IO, Resource}
import scala.util.{Failure, Success}
class IndexableRootObjectJsonSerializerSpec extends Specification { def is = s2"""
This is a specification to check the JSON Serialization of IndexableRootObject
The basic JSON serialized result must
have a document id $e1
prefer the user specified document id $e2
have a nodeId (if provided) $e3
prefer the user specified node id $e4
The JSON serialized result attributes for DOM Attributes must
be constructable $e5
be float convertible $e6
be int convertible $e7
be boolean convertible $e8
allow multiple $e9
support arrays $e10
The JSON serialized result attributes for DOM Elements must
be constructable $e11
be float convertible $e12
be int convertible $e13
be boolean convertible $e14
allow multiple $e15
serialize all text nodes $e16
serialize all text nodes and not attributes $e17
support arrays $e18
support arrays (of text nodes and not attributes) $e19
be valid when only child text nodes are provided $e20
The JSON serialized result objects for DOM Attributes must
be the same as a result attribute $e21
support arrays $e22
The JSON serialized result objects for DOM Elements must
be constructable $e23
write nested elements $e24
write array $e25
write nested array $e26
support arrays $e27
be valid when only child text nodes are provided $e28
be valid when only attributes are provided $e29
be valid when only child text nodes and attributes are provided $e30
"""
def e1 = {
val indexableRootObject = IndexableRootObject("/db/a1", 5, 46, None, None, None, Seq.empty)
serializeJson(indexableRootObject) mustEqual """{"objectID":"5/46/0","collection":"/db/a1","documentID":46}"""
}
def e2 = {
val indexableRootObject = IndexableRootObject("/db/a1", 5, 46, Some("my-document-id"), None, None, Seq.empty)
serializeJson(indexableRootObject) mustEqual """{"objectID":"5/46/0","collection":"/db/a1","documentID":"my-document-id"}"""
}
def e3 = {
val indexableRootObject = IndexableRootObject("/db/a1", 6, 47, None, Some("1.2.2"), None, Seq.empty)
serializeJson(indexableRootObject) mustEqual """{"objectID":"6/47/1.2.2","collection":"/db/a1","documentID":47}"""
}
def e4 = {
val indexableRootObject = IndexableRootObject("/db/a1", 5, 46, None, None, Some("my-node-id"), Seq.empty)
serializeJson(indexableRootObject) mustEqual """{"objectID":"my-node-id","collection":"/db/a1","documentID":46}"""
}
def e5 = {
val attr1_kv = new AttributeKV(new QName("value"), "hello")
val attributes = Seq(-\\/(IndexableAttribute("attr1", Seq(IndexableValue("1.1", \\/-(attr1_kv))), LiteralTypeConfig.String)))
val indexableRootObject = IndexableRootObject("/db/a1", 7, 48, None, Some("1"), None, attributes)
serializeJson(indexableRootObject) mustEqual """{"objectID":"7/48/1","collection":"/db/a1","documentID":48,"attr1":"hello"}"""
}
def e6 = {
val attr1_kv = new AttributeKV(new QName("value"), "99.9")
val attributes = Seq(-\\/(IndexableAttribute("attr1", Seq(IndexableValue("1.1", \\/-(attr1_kv))), LiteralTypeConfig.Float)))
val indexableRootObject = IndexableRootObject("/db/a1", 2, 49, None, Some("1"), None, attributes)
serializeJson(indexableRootObject) mustEqual """{"objectID":"2/49/1","collection":"/db/a1","documentID":49,"attr1":99.9}"""
}
def e7 = {
val attr1_kv = new AttributeKV(new QName("value"), "1012")
val attributes = Seq(-\\/(IndexableAttribute("attr1", Seq(IndexableValue("1.1", \\/-(attr1_kv))), LiteralTypeConfig.Integer)))
val indexableRootObject = IndexableRootObject("/db/a1", 9, 50, None, Some("1"), None, attributes)
serializeJson(indexableRootObject) mustEqual """{"objectID":"9/50/1","collection":"/db/a1","documentID":50,"attr1":1012}"""
}
def e8 = {
val attr1_kv = new AttributeKV(new QName("value"), "true")
val attributes = Seq(-\\/(IndexableAttribute("attr1", Seq(IndexableValue("1.1", \\/-(attr1_kv))), LiteralTypeConfig.Boolean)))
val indexableRootObject = IndexableRootObject("/db/a1", 3, 51, None, Some("1"), None, attributes)
serializeJson(indexableRootObject) mustEqual """{"objectID":"3/51/1","collection":"/db/a1","documentID":51,"attr1":true}"""
}
def e9 = {
val attr1_kv = new AttributeKV(new QName("x"), "99.9")
val attr2_kv = new AttributeKV(new QName("y"), "11.4")
val attributes = Seq(-\\/(IndexableAttribute("attr1", Seq(IndexableValue("1.1", \\/-(attr1_kv))), LiteralTypeConfig.Float)), -\\/(IndexableAttribute("attr2", Seq(IndexableValue("1.2", \\/-(attr2_kv))), LiteralTypeConfig.Float)))
val indexableRootObject = IndexableRootObject("/db/a1", 3, 52, None, Some("1"), None, attributes)
serializeJson(indexableRootObject) mustEqual """{"objectID":"3/52/1","collection":"/db/a1","documentID":52,"attr1":99.9,"attr2":11.4}"""
}
def e10 = {
val attr1_1_kv = new AttributeKV(new QName("x"), "99.9")
val attr1_2_kv = new AttributeKV(new QName("x"), "202.2")
val attr2_1_kv = new AttributeKV(new QName("y"), "11.4")
val attr2_2_kv = new AttributeKV(new QName("y"), "10.2")
val attributes = Seq(
-\\/(IndexableAttribute("xx", Seq(IndexableValue("1.1", \\/-(attr1_1_kv)), IndexableValue("2.1", \\/-(attr1_2_kv))), LiteralTypeConfig.Float)),
-\\/(IndexableAttribute("yy", Seq(IndexableValue("1.2", \\/-(attr2_1_kv)), IndexableValue("2.2", \\/-(attr2_2_kv))), LiteralTypeConfig.Float))
)
val indexableRootObject = IndexableRootObject("/db/a1", 7, 42, None, Some("1"), None, attributes)
serializeJson(indexableRootObject) mustEqual """{"objectID":"7/42/1","collection":"/db/a1","documentID":42,"xx":[99.9,202.2],"yy":[11.4,10.2]}"""
}
def e11 = {
val elem1_kv = new ElementKV(new QName("w"), "hello")
val attributes = Seq(-\\/(IndexableAttribute("elem1", Seq(IndexableValue("1.1", -\\/(elem1_kv))), LiteralTypeConfig.String)))
val indexableRootObject = IndexableRootObject("/db/a1", 6, 48, None, Some("1"), None, attributes)
serializeJson(indexableRootObject) mustEqual """{"objectID":"6/48/1","collection":"/db/a1","documentID":48,"elem1":"hello"}"""
}
def e12 = {
val elem1_kv = new ElementKV(new QName("x"), "99.9")
val attributes = Seq(-\\/(IndexableAttribute("elem1", Seq(IndexableValue("1.1", -\\/(elem1_kv))), LiteralTypeConfig.Float)))
val indexableRootObject = IndexableRootObject("/db/a1", 7, 48, None, Some("1"), None, attributes)
serializeJson(indexableRootObject) mustEqual """{"objectID":"7/48/1","collection":"/db/a1","documentID":48,"elem1":99.9}"""
}
def e13 = {
val elem1_kv = new ElementKV(new QName("y"), "1012")
val attributes = Seq(-\\/(IndexableAttribute("elem1", Seq(IndexableValue("1.1", -\\/(elem1_kv))), LiteralTypeConfig.Integer)))
val indexableRootObject = IndexableRootObject("/db/a1", 2, 48, None, Some("1"), None, attributes)
serializeJson(indexableRootObject) mustEqual """{"objectID":"2/48/1","collection":"/db/a1","documentID":48,"elem1":1012}"""
}
def e14 = {
val elem1_kv = new ElementKV(new QName("z"), "true")
val attributes = Seq(-\\/(IndexableAttribute("elem1", Seq(IndexableValue("1.1", -\\/(elem1_kv))), LiteralTypeConfig.Boolean)))
val indexableRootObject = IndexableRootObject("/db/a1", 1, 48, None, Some("1"), None, attributes)
serializeJson(indexableRootObject) mustEqual """{"objectID":"1/48/1","collection":"/db/a1","documentID":48,"elem1":true}"""
}
def e15 = {
val elem1_kv = new ElementKV(new QName("x"), "99.9")
val elem2_kv = new ElementKV(new QName("y"), "11.3")
val attributes = Seq(-\\/(IndexableAttribute("elem1", Seq(IndexableValue("1.1", -\\/(elem1_kv))), LiteralTypeConfig.Float)), -\\/(IndexableAttribute("elem2", Seq(IndexableValue("1.2", -\\/(elem2_kv))), LiteralTypeConfig.Float)))
val indexableRootObject = IndexableRootObject("/db/a1", 7, 48, None, Some("1"), None, attributes)
serializeJson(indexableRootObject) mustEqual """{"objectID":"7/48/1","collection":"/db/a1","documentID":48,"elem1":99.9,"elem2":11.3}"""
}
def e16 = {
val elem1_kv = new ElementKV(new QName("x"), "hello world")
val attributes = Seq(-\\/(IndexableAttribute("elem1", Seq(IndexableValue("1.1", -\\/(elem1_kv))), LiteralTypeConfig.String)))
val indexableRootObject = IndexableRootObject("/db/a1", 23, 48, None, Some("1"), None, attributes)
serializeJson(indexableRootObject) mustEqual """{"objectID":"23/48/1","collection":"/db/a1","documentID":48,"elem1":"hello world"}"""
}
def e17 = {
val elem1 = elem(dom("""<x y="17">hello <b>world</b></x>"""), "x")
val elem1_kv = new ElementKV(new QName("x"), serializeElementForAttribute(elem1).valueOr(ts => throw ts.head))
val attributes = Seq(-\\/(IndexableAttribute("elem1", Seq(IndexableValue("1.1", -\\/(elem1_kv))), LiteralTypeConfig.String)))
val indexableRootObject = IndexableRootObject("/db/a1", 23, 48, None, Some("1"), None, attributes)
serializeJson(indexableRootObject) mustEqual """{"objectID":"23/48/1","collection":"/db/a1","documentID":48,"elem1":"hello world"}"""
}
def e18 = {
val dom1 = dom("""<loc><pos><x>123.4</x><y>-17.45</y></pos><pos><x>456.12</x><y>15.67</y></pos></loc>""")
val pos = elems(dom1, "pos")
val elem1_1_kv = new ElementKV(new QName("x"), serializeElementForAttribute(childElem(pos(0), "x")).valueOr(ts => throw ts.head))
val elem1_2_kv = new ElementKV(new QName("x"), serializeElementForAttribute(childElem(pos(1), "x")).valueOr(ts => throw ts.head))
val elem2_1_kv = new ElementKV(new QName("y"), serializeElementForAttribute(childElem(pos(0), "y")).valueOr(ts => throw ts.head))
val elem2_2_kv = new ElementKV(new QName("y"), serializeElementForAttribute(childElem(pos(1), "y")).valueOr(ts => throw ts.head))
val attributes = Seq(
-\\/(IndexableAttribute("xx", Seq(IndexableValue("1.1", -\\/(elem1_1_kv)), IndexableValue("2.1", -\\/(elem1_2_kv))), LiteralTypeConfig.Float)),
-\\/(IndexableAttribute("yy", Seq(IndexableValue("1.2", -\\/(elem2_1_kv)), IndexableValue("2.2", -\\/(elem2_2_kv))), LiteralTypeConfig.Float))
)
val indexableRootObject = IndexableRootObject("/db/a1", 7, 42, None, Some("1"), None, attributes)
serializeJson(indexableRootObject) mustEqual """{"objectID":"7/42/1","collection":"/db/a1","documentID":42,"xx":[123.4,456.12],"yy":[-17.45,15.67]}"""
}
def e19 = {
val dom1 = dom("""<loc><pos><x a="1">123.4</x><y b="2">-17.45</y></pos><pos><x a="8">456.12</x><y b="9">15.67</y></pos></loc>""")
val pos = elems(dom1, "pos")
val elem1_1_kv = new ElementKV(new QName("x"), serializeElementForAttribute(childElem(pos(0), "x")).valueOr(ts => throw ts.head))
val elem1_2_kv = new ElementKV(new QName("x"), serializeElementForAttribute(childElem(pos(1), "x")).valueOr(ts => throw ts.head))
val elem2_1_kv = new ElementKV(new QName("y"), serializeElementForAttribute(childElem(pos(0), "y")).valueOr(ts => throw ts.head))
val elem2_2_kv = new ElementKV(new QName("y"), serializeElementForAttribute(childElem(pos(1), "y")).valueOr(ts => throw ts.head))
val attributes = Seq(
-\\/(IndexableAttribute("xx", Seq(IndexableValue("1.1", -\\/(elem1_1_kv)), IndexableValue("2.1", -\\/(elem1_2_kv))), LiteralTypeConfig.Float)),
-\\/(IndexableAttribute("yy", Seq(IndexableValue("1.2", -\\/(elem2_1_kv)), IndexableValue("2.2", -\\/(elem2_2_kv))), LiteralTypeConfig.Float))
)
val indexableRootObject = IndexableRootObject("/db/a1", 7, 42, None, Some("1"), None, attributes)
serializeJson(indexableRootObject) mustEqual """{"objectID":"7/42/1","collection":"/db/a1","documentID":42,"xx":[123.4,456.12],"yy":[-17.45,15.67]}"""
}
def e20 = {
val dom1 = dom("""<parts><w><x>hello</x></w></parts>""")
val elem1 = firstElem(dom1, "x").get
val elem1_kv = new ElementKV(new QName("x"), serializeElementForAttribute(elem1).valueOr(ts => throw ts.head))
val attributes = Seq(
-\\/(IndexableAttribute("obj1", Seq(IndexableValue("1.1", -\\/(elem1_kv))), LiteralTypeConfig.String))
)
val indexableRootObject = IndexableRootObject("/db/a1", 6, 53, None, Some("1"), None, attributes)
serializeJson(indexableRootObject) mustEqual
"""{"objectID":"6/53/1","collection":"/db/a1","documentID":53,"obj1":"hello"}""".stripMargin
}
def e21 = {
val attr1_kv = new AttributeKV(new QName("value"), "hello")
val objects = Seq(\\/-(IndexableObject("obj1", Seq(IndexableValue("1.1", \\/-(attr1_kv))))))
val indexableRootObject = IndexableRootObject("/db/a1", 45, 48, None, Some("1"), None, objects)
serializeJson(indexableRootObject) mustEqual """{"objectID":"45/48/1","collection":"/db/a1","documentID":48,"obj1":"hello"}"""
}
def e22 = {
val attr1_1_kv = new AttributeKV(new QName("value"), "hello")
val attr1_2_kv = new AttributeKV(new QName("value"), "world")
val objects = Seq(\\/-(IndexableObject("obj1", Seq(
IndexableValue("1.1.1", \\/-(attr1_1_kv)),
IndexableValue("1.2.1", \\/-(attr1_2_kv))
))))
val indexableRootObject = IndexableRootObject("/db/a1", 46, 49, None, Some("1"), None, objects)
serializeJson(indexableRootObject) mustEqual """{"objectID":"46/49/1","collection":"/db/a1","documentID":49,"obj1":["hello","world"]}"""
}
def e23 = {
val elem1 = elem(dom("""<w><x>hello</x><y>world</y></w>"""), "w")
val elem1_kv = new ElementKV(new QName("w"), serializeElementForObject("obj1", Map.empty, Map.empty)(elem1).valueOr(ts => throw ts.head))
val objects = Seq(\\/-(IndexableObject("obj1", Seq(IndexableValue("1.1", -\\/(elem1_kv))))))
val indexableRootObject = IndexableRootObject("/db/a1", 5, 48, None, Some("1"), None, objects)
serializeJson(indexableRootObject) mustEqual """{"objectID":"5/48/1","collection":"/db/a1","documentID":48,"obj1":{"nodeId":"1.1","x":"hello","y":"world"}}"""
}
def e24 = {
val elem1 = elem(dom("""<w><x>hello</x><y><z>world</z><zz>again</zz></y></w>"""), "w")
val elem1_kv = new ElementKV(new QName("w"), serializeElementForObject("obj1", Map.empty, Map.empty)(elem1).valueOr(ts => throw ts.head))
val objects = Seq(\\/-(IndexableObject("obj1", Seq(IndexableValue("1.1", -\\/(elem1_kv))))))
val indexableRootObject = IndexableRootObject("/db/a1", 2, 49, None, Some("1"), None, objects)
serializeJson(indexableRootObject) mustEqual """{"objectID":"2/49/1","collection":"/db/a1","documentID":49,"obj1":{"nodeId":"1.1","x":"hello","y":{"z":"world","zz":"again"}}}"""
}
def e25 = {
val elem1 = elem(dom("""<w><x>hello</x><y>world</y><y>again</y></w>"""), "w")
val elem1_kv = new ElementKV(new QName("w"), serializeElementForObject("obj1", Map.empty, Map.empty)(elem1).valueOr(ts => throw ts.head))
val objects = Seq(\\/-(IndexableObject("obj1", Seq(IndexableValue("1.1", -\\/(elem1_kv))))))
val indexableRootObject = IndexableRootObject("/db/a1", 3, 50, None, Some("1"), None, objects)
serializeJson(indexableRootObject) mustEqual """{"objectID":"3/50/1","collection":"/db/a1","documentID":50,"obj1":{"nodeId":"1.1","x":"hello","y":["world","again"]}}"""
}
def e26 = {
val elem1 = elem(dom("""<w><x>hello</x><y><yy>world</yy><yy>again</yy></y></w>"""), "w")
val elem1_kv = new ElementKV(new QName("w"), serializeElementForObject("obj1", Map.empty, Map.empty)(elem1).valueOr(ts => throw ts.head))
val objects = Seq(\\/-(IndexableObject("obj1", Seq(IndexableValue("1.1", -\\/(elem1_kv))))))
val indexableRootObject = IndexableRootObject("/db/a1", 6, 51, None, Some("1"), None, objects)
serializeJson(indexableRootObject) mustEqual """{"objectID":"6/51/1","collection":"/db/a1","documentID":51,"obj1":{"nodeId":"1.1","x":"hello","y":{"yy":["world","again"]}}}"""
}
def e27 = {
val dom1 = dom("""<parts><w><x>hello</x><y><yy>world</yy><yy>again</yy></y></w><w><x>goodbye</x><y><yy>until</yy><yy>next time</yy></y></w></parts>""")
val ww = elems(dom1, "w")
val elem1_kv = new ElementKV(new QName("w"), serializeElementForObject("obj1", Map.empty, Map.empty)(ww(0)).valueOr(ts => throw ts.head))
val elem2_kv = new ElementKV(new QName("w"), serializeElementForObject("obj1", Map.empty, Map.empty)(ww(1)).valueOr(ts => throw ts.head))
val objects = Seq(\\/-(IndexableObject("obj1", Seq(
IndexableValue("1.1", -\\/(elem1_kv)),
IndexableValue("1.2", -\\/(elem2_kv))
))))
val indexableRootObject = IndexableRootObject("/db/a1", 6, 52, None, Some("1"), None, objects)
serializeJson(indexableRootObject) mustEqual """{"objectID":"6/52/1","collection":"/db/a1","documentID":52,"obj1":[{"nodeId":"1.1","x":"hello","y":{"yy":["world","again"]}},{"nodeId":"1.2","x":"goodbye","y":{"yy":["until","next time"]}}]}"""
}
def e28 = {
val dom1 = dom("""<parts><w><x>hello</x></w></parts>""")
val elem1 = firstElem(dom1, "x").get
val elem1_kv = new ElementKV(new QName("x"), serializeElementForObject("obj1", Map.empty, Map.empty)(elem1).valueOr(ts => throw ts.head))
val objects = Seq(\\/-(IndexableObject("obj1", Seq(
IndexableValue("1.1", -\\/(elem1_kv))
))))
val indexableRootObject = IndexableRootObject("/db/a1", 6, 53, None, Some("1"), None, objects)
serializeJson(indexableRootObject) mustEqual
"""{"objectID":"6/53/1","collection":"/db/a1","documentID":53,"obj1":{"nodeId":"1.1","#text":"hello"}}""".stripMargin
}
def e29 = {
val dom1 = dom("""<parts><w><x type="something"/></w></parts>""")
val elem1 = firstElem(dom1, "x").get
val elem1_kv = new ElementKV(new QName("x"), serializeElementForObject("obj1", Map.empty, Map.empty)(elem1).valueOr(ts => throw ts.head))
val objects = Seq(\\/-(IndexableObject("obj1", Seq(
IndexableValue("1.1", -\\/(elem1_kv))
))))
val indexableRootObject = IndexableRootObject("/db/a1", 6, 53, None, Some("1"), None, objects)
serializeJson(indexableRootObject) mustEqual
"""{"objectID":"6/53/1","collection":"/db/a1","documentID":53,"obj1":{"nodeId":"1.1","type":"something"}}""".stripMargin
}
def e30 = {
val dom1 = dom("""<parts><w><x type="something">hello</x></w></parts>""")
val elem = firstElem(dom1, "x").get
val elem1_kv = new ElementKV(new QName("x"), serializeElementForObject("obj1", Map.empty, Map.empty)(elem).valueOr(ts => throw ts.head))
val objects = Seq(\\/-(IndexableObject("obj1", Seq(
IndexableValue("1.1", -\\/(elem1_kv))
))))
val indexableRootObject = IndexableRootObject("/db/a1", 6, 53, None, Some("1"), None, objects)
serializeJson(indexableRootObject) mustEqual
"""{"objectID":"6/53/1","collection":"/db/a1","documentID":53,"obj1":{"nodeId":"1.1","type":"something","#text":"hello"}}""".stripMargin
}
private def serializeJson(indexableRootObject: IndexableRootObject): String = {
Resource.fromAutoCloseable(IO {new StringWriter()}).use { writer =>
IO {
val mapper = new ObjectMapper
mapper.writeValue(writer, indexableRootObject)
writer.toString
}
}.redeem(_.left, _.right)
.unsafeRunSync() match {
case \\/-(s) =>
s
case -\\/(t) =>
throw t
}
}
}
|
BCDH/exist-algolia-index
|
src/test/scala/org/humanistika/exist/index/algolia/IndexableRootObjectJsonSerializerSpec.scala
|
Scala
|
gpl-3.0
| 19,750
|
import scala.collection.mutable.ListBuffer
def dfs(nums: Array[Int], target: Int, index: Int, path: ListBuffer[Int], res: ListBuffer[ListBuffer[Int]]): Unit = {
if (target == 0) {
res += path
return
}
for (i <- index to nums.length-1) {
val next = target - nums(i)
if (next < 0) {
return
}
dfs(nums, next, i, path ++ ListBuffer[Int](nums(i)), res)
}
}
def solve(candidates: Array[Int], target: Int): ListBuffer[ListBuffer[Int]] = {
val res = new ListBuffer[ListBuffer[Int]]()
val sorted_candidates = candidates.sorted
dfs(sorted_candidates, target, 0, new ListBuffer[Int](), res)
res
}
var a = Array(2, 3, 6, 7)
println(solve(a, 7)) // should be [[2, 2, 3], [7]]
|
marcosfede/algorithms
|
backtrack/combination_sum/combination_sum.scala
|
Scala
|
gpl-3.0
| 711
|
package com.codahale.jerkson.ser
import org.codehaus.jackson.JsonGenerator
import org.codehaus.jackson.map.{SerializerProvider, JsonSerializer}
import org.codehaus.jackson.map.annotate.JsonCachable
@JsonCachable
class MapSerializer extends JsonSerializer[collection.Map[_ ,_]] {
def serialize(map: collection.Map[_,_], json: JsonGenerator, provider: SerializerProvider) {
json.writeStartObject()
for ((key, value) <- map) {
provider.defaultSerializeField(key.toString, value, json)
}
json.writeEndObject()
}
}
|
cphylabs/jerkson-old
|
src/main/scala/com/codahale/jerkson/ser/MapSerializer.scala
|
Scala
|
mit
| 538
|
package ml.combust.mleap.bundle.ops.regression
import ml.combust.bundle.BundleContext
import ml.combust.bundle.dsl._
import ml.combust.bundle.op.OpModel
import ml.combust.mleap.bundle.ops.MleapOp
import ml.combust.mleap.core.regression.AFTSurvivalRegressionModel
import ml.combust.mleap.runtime.MleapContext
import ml.combust.mleap.runtime.transformer.regression.AFTSurvivalRegression
import org.apache.spark.ml.linalg.Vectors
/**
* Created by hollinwilkins on 12/28/16.
*/
class AFTSurvivalRegressionOp extends MleapOp[AFTSurvivalRegression, AFTSurvivalRegressionModel] {
override val Model: OpModel[MleapContext, AFTSurvivalRegressionModel] = new OpModel[MleapContext, AFTSurvivalRegressionModel] {
override val klazz: Class[AFTSurvivalRegressionModel] = classOf[AFTSurvivalRegressionModel]
override def opName: String = Bundle.BuiltinOps.regression.aft_survival_regression
override def store(model: Model, obj: AFTSurvivalRegressionModel)
(implicit context: BundleContext[MleapContext]): Model = {
model.withValue("coefficients", Value.vector(obj.coefficients.toArray)).
withValue("intercept", Value.double(obj.intercept)).
withValue("quantile_probabilities", Value.doubleList(obj.quantileProbabilities)).
withValue("scale", Value.double(obj.scale))
}
override def load(model: Model)
(implicit context: BundleContext[MleapContext]): AFTSurvivalRegressionModel = {
AFTSurvivalRegressionModel(coefficients = Vectors.dense(model.value("coefficients").getTensor[Double].toArray),
intercept = model.value("intercept").getDouble,
quantileProbabilities = model.value("quantile_probabilities").getDoubleList.toArray,
scale = model.value("scale").getDouble)
}
}
override def model(node: AFTSurvivalRegression): AFTSurvivalRegressionModel = node.model
}
|
combust/mleap
|
mleap-runtime/src/main/scala/ml/combust/mleap/bundle/ops/regression/AFTSurvivalRegressionOp.scala
|
Scala
|
apache-2.0
| 1,891
|
package org.zbizaca.electric
/**
* Represents a network based on Nodes and Links
* and an energy propagator
*
* Created by zbizaca on 10/12/16.
*/
trait Network {
def addPotential(nodeId: Long, potential:Result) = {
getNode(nodeId).map { node =>
node.addResult(potential)
}
}
protected def getNode(modeId: Long): Option[Node] = None
protected def processLink(linkId: Long, potential: Result)
}
case class StaticNetwork(
nodes: scala.collection.mutable.Map[Long, Node],
links: scala.collection.mutable.Map[Long, Link],
modifiedLinks: collection.mutable.Stack[Long],
modifiedNodes: collection.mutable.Stack[Long],
nodeCapacity: Int
) extends Network {
override protected def getNode(nodeId: Long): Option[Node] =
nodes.get(nodeId)
override protected def processLink(linkId: Long, potential: Result): Unit = {
}
}
|
zbizaca/toy-land
|
src/main/scala/org/zbizaca/electric/Network.scala
|
Scala
|
apache-2.0
| 1,016
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.io._
import java.lang.reflect.Field
import java.net.{BindException, ServerSocket, URI}
import java.nio.{ByteBuffer, ByteOrder}
import java.nio.charset.StandardCharsets.UTF_8
import java.text.DecimalFormatSymbols
import java.util.Locale
import java.util.concurrent.TimeUnit
import java.util.zip.GZIPOutputStream
import scala.collection.mutable.ListBuffer
import scala.util.Random
import com.google.common.io.Files
import org.apache.commons.io.IOUtils
import org.apache.commons.lang3.{JavaVersion, SystemUtils}
import org.apache.commons.math3.stat.inference.ChiSquareTest
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.{SparkConf, SparkException, SparkFunSuite, TaskContext}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Tests.IS_TESTING
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.network.util.{ByteUnit, JavaUtils}
import org.apache.spark.scheduler.SparkListener
import org.apache.spark.util.io.ChunkedByteBufferInputStream
class UtilsSuite extends SparkFunSuite with ResetSystemProperties with Logging {
test("timeConversion") {
// Test -1
assert(Utils.timeStringAsSeconds("-1") === -1)
// Test zero
assert(Utils.timeStringAsSeconds("0") === 0)
assert(Utils.timeStringAsSeconds("1") === 1)
assert(Utils.timeStringAsSeconds("1s") === 1)
assert(Utils.timeStringAsSeconds("1000ms") === 1)
assert(Utils.timeStringAsSeconds("1000000us") === 1)
assert(Utils.timeStringAsSeconds("1m") === TimeUnit.MINUTES.toSeconds(1))
assert(Utils.timeStringAsSeconds("1min") === TimeUnit.MINUTES.toSeconds(1))
assert(Utils.timeStringAsSeconds("1h") === TimeUnit.HOURS.toSeconds(1))
assert(Utils.timeStringAsSeconds("1d") === TimeUnit.DAYS.toSeconds(1))
assert(Utils.timeStringAsMs("1") === 1)
assert(Utils.timeStringAsMs("1ms") === 1)
assert(Utils.timeStringAsMs("1000us") === 1)
assert(Utils.timeStringAsMs("1s") === TimeUnit.SECONDS.toMillis(1))
assert(Utils.timeStringAsMs("1m") === TimeUnit.MINUTES.toMillis(1))
assert(Utils.timeStringAsMs("1min") === TimeUnit.MINUTES.toMillis(1))
assert(Utils.timeStringAsMs("1h") === TimeUnit.HOURS.toMillis(1))
assert(Utils.timeStringAsMs("1d") === TimeUnit.DAYS.toMillis(1))
// Test invalid strings
intercept[NumberFormatException] {
Utils.timeStringAsMs("600l")
}
intercept[NumberFormatException] {
Utils.timeStringAsMs("This breaks 600s")
}
intercept[NumberFormatException] {
Utils.timeStringAsMs("This breaks 600ds")
}
intercept[NumberFormatException] {
Utils.timeStringAsMs("600s This breaks")
}
intercept[NumberFormatException] {
Utils.timeStringAsMs("This 123s breaks")
}
}
test("Test byteString conversion") {
// Test zero
assert(Utils.byteStringAsBytes("0") === 0)
assert(Utils.byteStringAsGb("1") === 1)
assert(Utils.byteStringAsGb("1g") === 1)
assert(Utils.byteStringAsGb("1023m") === 0)
assert(Utils.byteStringAsGb("1024m") === 1)
assert(Utils.byteStringAsGb("1048575k") === 0)
assert(Utils.byteStringAsGb("1048576k") === 1)
assert(Utils.byteStringAsGb("1k") === 0)
assert(Utils.byteStringAsGb("1t") === ByteUnit.TiB.toGiB(1))
assert(Utils.byteStringAsGb("1p") === ByteUnit.PiB.toGiB(1))
assert(Utils.byteStringAsMb("1") === 1)
assert(Utils.byteStringAsMb("1m") === 1)
assert(Utils.byteStringAsMb("1048575b") === 0)
assert(Utils.byteStringAsMb("1048576b") === 1)
assert(Utils.byteStringAsMb("1023k") === 0)
assert(Utils.byteStringAsMb("1024k") === 1)
assert(Utils.byteStringAsMb("3645k") === 3)
assert(Utils.byteStringAsMb("1024gb") === 1048576)
assert(Utils.byteStringAsMb("1g") === ByteUnit.GiB.toMiB(1))
assert(Utils.byteStringAsMb("1t") === ByteUnit.TiB.toMiB(1))
assert(Utils.byteStringAsMb("1p") === ByteUnit.PiB.toMiB(1))
assert(Utils.byteStringAsKb("1") === 1)
assert(Utils.byteStringAsKb("1k") === 1)
assert(Utils.byteStringAsKb("1m") === ByteUnit.MiB.toKiB(1))
assert(Utils.byteStringAsKb("1g") === ByteUnit.GiB.toKiB(1))
assert(Utils.byteStringAsKb("1t") === ByteUnit.TiB.toKiB(1))
assert(Utils.byteStringAsKb("1p") === ByteUnit.PiB.toKiB(1))
assert(Utils.byteStringAsBytes("1") === 1)
assert(Utils.byteStringAsBytes("1k") === ByteUnit.KiB.toBytes(1))
assert(Utils.byteStringAsBytes("1m") === ByteUnit.MiB.toBytes(1))
assert(Utils.byteStringAsBytes("1g") === ByteUnit.GiB.toBytes(1))
assert(Utils.byteStringAsBytes("1t") === ByteUnit.TiB.toBytes(1))
assert(Utils.byteStringAsBytes("1p") === ByteUnit.PiB.toBytes(1))
// Overflow handling, 1073741824p exceeds Long.MAX_VALUE if converted straight to Bytes
// This demonstrates that we can have e.g 1024^3 PiB without overflowing.
assert(Utils.byteStringAsGb("1073741824p") === ByteUnit.PiB.toGiB(1073741824))
assert(Utils.byteStringAsMb("1073741824p") === ByteUnit.PiB.toMiB(1073741824))
// Run this to confirm it doesn't throw an exception
assert(Utils.byteStringAsBytes("9223372036854775807") === 9223372036854775807L)
assert(ByteUnit.PiB.toPiB(9223372036854775807L) === 9223372036854775807L)
// Test overflow exception
intercept[IllegalArgumentException] {
// This value exceeds Long.MAX when converted to bytes
Utils.byteStringAsBytes("9223372036854775808")
}
// Test overflow exception
intercept[IllegalArgumentException] {
// This value exceeds Long.MAX when converted to TiB
ByteUnit.PiB.toTiB(9223372036854775807L)
}
// Test fractional string
intercept[NumberFormatException] {
Utils.byteStringAsMb("0.064")
}
// Test fractional string
intercept[NumberFormatException] {
Utils.byteStringAsMb("0.064m")
}
// Test invalid strings
intercept[NumberFormatException] {
Utils.byteStringAsBytes("500ub")
}
// Test invalid strings
intercept[NumberFormatException] {
Utils.byteStringAsBytes("This breaks 600b")
}
intercept[NumberFormatException] {
Utils.byteStringAsBytes("This breaks 600")
}
intercept[NumberFormatException] {
Utils.byteStringAsBytes("600gb This breaks")
}
intercept[NumberFormatException] {
Utils.byteStringAsBytes("This 123mb breaks")
}
}
test("bytesToString") {
assert(Utils.bytesToString(10) === "10.0 B")
assert(Utils.bytesToString(1500) === "1500.0 B")
assert(Utils.bytesToString(2000000) === "1953.1 KiB")
assert(Utils.bytesToString(2097152) === "2.0 MiB")
assert(Utils.bytesToString(2306867) === "2.2 MiB")
assert(Utils.bytesToString(5368709120L) === "5.0 GiB")
assert(Utils.bytesToString(5L * (1L << 40)) === "5.0 TiB")
assert(Utils.bytesToString(5L * (1L << 50)) === "5.0 PiB")
assert(Utils.bytesToString(5L * (1L << 60)) === "5.0 EiB")
assert(Utils.bytesToString(BigInt(1L << 11) * (1L << 60)) === "2.36E+21 B")
}
test("copyStream") {
// input array initialization
val bytes = Array.ofDim[Byte](9000)
Random.nextBytes(bytes)
val os = new ByteArrayOutputStream()
Utils.copyStream(new ByteArrayInputStream(bytes), os)
assert(os.toByteArray.toList.equals(bytes.toList))
}
test("copyStreamUpTo") {
// input array initialization
val bytes = Array.ofDim[Byte](1200)
Random.nextBytes(bytes)
val limit = 1000
// testing for inputLength less than, equal to and greater than limit
(limit - 2 to limit + 2).foreach { inputLength =>
val in = new ByteArrayInputStream(bytes.take(inputLength))
val mergedStream = Utils.copyStreamUpTo(in, limit)
try {
// Get a handle on the buffered data, to make sure memory gets freed once we read past the
// end of it. Need to use reflection to get handle on inner structures for this check
val byteBufferInputStream = if (mergedStream.isInstanceOf[ChunkedByteBufferInputStream]) {
assert(inputLength < limit)
mergedStream.asInstanceOf[ChunkedByteBufferInputStream]
} else {
assert(inputLength >= limit)
val sequenceStream = mergedStream.asInstanceOf[SequenceInputStream]
val fieldValue = getFieldValue(sequenceStream, "in")
assert(fieldValue.isInstanceOf[ChunkedByteBufferInputStream])
fieldValue.asInstanceOf[ChunkedByteBufferInputStream]
}
(0 until inputLength).foreach { idx =>
assert(bytes(idx) === mergedStream.read().asInstanceOf[Byte])
if (idx == limit) {
assert(byteBufferInputStream.chunkedByteBuffer === null)
}
}
assert(mergedStream.read() === -1)
assert(byteBufferInputStream.chunkedByteBuffer === null)
} finally {
JavaUtils.closeQuietly(mergedStream)
JavaUtils.closeQuietly(in)
}
}
}
private def getFieldValue(obj: AnyRef, fieldName: String): Any = {
val field: Field = obj.getClass().getDeclaredField(fieldName)
if (field.isAccessible()) {
field.get(obj)
} else {
field.setAccessible(true)
val result = field.get(obj)
field.setAccessible(false)
result
}
}
test("memoryStringToMb") {
assert(Utils.memoryStringToMb("1") === 0)
assert(Utils.memoryStringToMb("1048575") === 0)
assert(Utils.memoryStringToMb("3145728") === 3)
assert(Utils.memoryStringToMb("1024k") === 1)
assert(Utils.memoryStringToMb("5000k") === 4)
assert(Utils.memoryStringToMb("4024k") === Utils.memoryStringToMb("4024K"))
assert(Utils.memoryStringToMb("1024m") === 1024)
assert(Utils.memoryStringToMb("5000m") === 5000)
assert(Utils.memoryStringToMb("4024m") === Utils.memoryStringToMb("4024M"))
assert(Utils.memoryStringToMb("2g") === 2048)
assert(Utils.memoryStringToMb("3g") === Utils.memoryStringToMb("3G"))
assert(Utils.memoryStringToMb("2t") === 2097152)
assert(Utils.memoryStringToMb("3t") === Utils.memoryStringToMb("3T"))
}
test("splitCommandString") {
assert(Utils.splitCommandString("") === Seq())
assert(Utils.splitCommandString("a") === Seq("a"))
assert(Utils.splitCommandString("aaa") === Seq("aaa"))
assert(Utils.splitCommandString("a b c") === Seq("a", "b", "c"))
assert(Utils.splitCommandString(" a b\t c ") === Seq("a", "b", "c"))
assert(Utils.splitCommandString("a 'b c'") === Seq("a", "b c"))
assert(Utils.splitCommandString("a 'b c' d") === Seq("a", "b c", "d"))
assert(Utils.splitCommandString("'b c'") === Seq("b c"))
assert(Utils.splitCommandString("a \"b c\"") === Seq("a", "b c"))
assert(Utils.splitCommandString("a \"b c\" d") === Seq("a", "b c", "d"))
assert(Utils.splitCommandString("\"b c\"") === Seq("b c"))
assert(Utils.splitCommandString("a 'b\" c' \"d' e\"") === Seq("a", "b\" c", "d' e"))
assert(Utils.splitCommandString("a\t'b\nc'\nd") === Seq("a", "b\nc", "d"))
assert(Utils.splitCommandString("a \"b\\\\c\"") === Seq("a", "b\\c"))
assert(Utils.splitCommandString("a \"b\\\"c\"") === Seq("a", "b\"c"))
assert(Utils.splitCommandString("a 'b\\\"c'") === Seq("a", "b\\\"c"))
assert(Utils.splitCommandString("'a'b") === Seq("ab"))
assert(Utils.splitCommandString("'a''b'") === Seq("ab"))
assert(Utils.splitCommandString("\"a\"b") === Seq("ab"))
assert(Utils.splitCommandString("\"a\"\"b\"") === Seq("ab"))
assert(Utils.splitCommandString("''") === Seq(""))
assert(Utils.splitCommandString("\"\"") === Seq(""))
}
test("string formatting of time durations") {
val second = 1000
val minute = second * 60
val hour = minute * 60
def str: (Long) => String = Utils.msDurationToString(_)
val sep = new DecimalFormatSymbols(Locale.US).getDecimalSeparator
assert(str(123) === "123 ms")
assert(str(second) === "1" + sep + "0 s")
assert(str(second + 462) === "1" + sep + "5 s")
assert(str(hour) === "1" + sep + "00 h")
assert(str(minute) === "1" + sep + "0 m")
assert(str(minute + 4 * second + 34) === "1" + sep + "1 m")
assert(str(10 * hour + minute + 4 * second) === "10" + sep + "02 h")
assert(str(10 * hour + 59 * minute + 59 * second + 999) === "11" + sep + "00 h")
}
def getSuffix(isCompressed: Boolean): String = {
if (isCompressed) {
".gz"
} else {
""
}
}
def writeLogFile(path: String, content: Array[Byte]): Unit = {
val outputStream = if (path.endsWith(".gz")) {
new GZIPOutputStream(new FileOutputStream(path))
} else {
new FileOutputStream(path)
}
IOUtils.write(content, outputStream)
outputStream.close()
content.size
}
private val workerConf = new SparkConf()
def testOffsetBytes(isCompressed: Boolean): Unit = {
withTempDir { tmpDir2 =>
val suffix = getSuffix(isCompressed)
val f1Path = tmpDir2 + "/f1" + suffix
writeLogFile(f1Path, "1\n2\n3\n4\n5\n6\n7\n8\n9\n".getBytes(UTF_8))
val f1Length = Utils.getFileLength(new File(f1Path), workerConf)
// Read first few bytes
assert(Utils.offsetBytes(f1Path, f1Length, 0, 5) === "1\n2\n3")
// Read some middle bytes
assert(Utils.offsetBytes(f1Path, f1Length, 4, 11) === "3\n4\n5\n6")
// Read last few bytes
assert(Utils.offsetBytes(f1Path, f1Length, 12, 18) === "7\n8\n9\n")
// Read some nonexistent bytes in the beginning
assert(Utils.offsetBytes(f1Path, f1Length, -5, 5) === "1\n2\n3")
// Read some nonexistent bytes at the end
assert(Utils.offsetBytes(f1Path, f1Length, 12, 22) === "7\n8\n9\n")
// Read some nonexistent bytes on both ends
assert(Utils.offsetBytes(f1Path, f1Length, -3, 25) === "1\n2\n3\n4\n5\n6\n7\n8\n9\n")
}
}
test("reading offset bytes of a file") {
testOffsetBytes(isCompressed = false)
}
test("reading offset bytes of a file (compressed)") {
testOffsetBytes(isCompressed = true)
}
def testOffsetBytesMultipleFiles(isCompressed: Boolean): Unit = {
withTempDir { tmpDir =>
val suffix = getSuffix(isCompressed)
val files = (1 to 3).map(i =>
new File(tmpDir, i.toString + suffix)) :+ new File(tmpDir, "4")
writeLogFile(files(0).getAbsolutePath, "0123456789".getBytes(UTF_8))
writeLogFile(files(1).getAbsolutePath, "abcdefghij".getBytes(UTF_8))
writeLogFile(files(2).getAbsolutePath, "ABCDEFGHIJ".getBytes(UTF_8))
writeLogFile(files(3).getAbsolutePath, "9876543210".getBytes(UTF_8))
val fileLengths = files.map(Utils.getFileLength(_, workerConf))
// Read first few bytes in the 1st file
assert(Utils.offsetBytes(files, fileLengths, 0, 5) === "01234")
// Read bytes within the 1st file
assert(Utils.offsetBytes(files, fileLengths, 5, 8) === "567")
// Read bytes across 1st and 2nd file
assert(Utils.offsetBytes(files, fileLengths, 8, 18) === "89abcdefgh")
// Read bytes across 1st, 2nd and 3rd file
assert(Utils.offsetBytes(files, fileLengths, 5, 24) === "56789abcdefghijABCD")
// Read bytes across 3rd and 4th file
assert(Utils.offsetBytes(files, fileLengths, 25, 35) === "FGHIJ98765")
// Read some nonexistent bytes in the beginning
assert(Utils.offsetBytes(files, fileLengths, -5, 18) === "0123456789abcdefgh")
// Read some nonexistent bytes at the end
assert(Utils.offsetBytes(files, fileLengths, 18, 45) === "ijABCDEFGHIJ9876543210")
// Read some nonexistent bytes on both ends
assert(Utils.offsetBytes(files, fileLengths, -5, 45) ===
"0123456789abcdefghijABCDEFGHIJ9876543210")
}
}
test("reading offset bytes across multiple files") {
testOffsetBytesMultipleFiles(isCompressed = false)
}
test("reading offset bytes across multiple files (compressed)") {
testOffsetBytesMultipleFiles(isCompressed = true)
}
test("deserialize long value") {
val testval : Long = 9730889947L
val bbuf = ByteBuffer.allocate(8)
assert(bbuf.hasArray)
bbuf.order(ByteOrder.BIG_ENDIAN)
bbuf.putLong(testval)
assert(bbuf.array.length === 8)
assert(Utils.deserializeLongValue(bbuf.array) === testval)
}
test("writeByteBuffer should not change ByteBuffer position") {
// Test a buffer with an underlying array, for both writeByteBuffer methods.
val testBuffer = ByteBuffer.wrap(Array[Byte](1, 2, 3, 4))
assert(testBuffer.hasArray)
val bytesOut = new ByteBufferOutputStream(4096)
Utils.writeByteBuffer(testBuffer, bytesOut)
assert(testBuffer.position() === 0)
val dataOut = new DataOutputStream(bytesOut)
Utils.writeByteBuffer(testBuffer, dataOut: DataOutput)
assert(testBuffer.position() === 0)
// Test a buffer without an underlying array, for both writeByteBuffer methods.
val testDirectBuffer = ByteBuffer.allocateDirect(8)
assert(!testDirectBuffer.hasArray())
Utils.writeByteBuffer(testDirectBuffer, bytesOut)
assert(testDirectBuffer.position() === 0)
Utils.writeByteBuffer(testDirectBuffer, dataOut: DataOutput)
assert(testDirectBuffer.position() === 0)
}
test("get iterator size") {
val empty = Seq[Int]()
assert(Utils.getIteratorSize(empty.toIterator) === 0L)
val iterator = Iterator.range(0, 5)
assert(Utils.getIteratorSize(iterator) === 5L)
}
test("getIteratorZipWithIndex") {
val iterator = Utils.getIteratorZipWithIndex(Iterator(0, 1, 2), -1L + Int.MaxValue)
assert(iterator.toArray === Array(
(0, -1L + Int.MaxValue), (1, 0L + Int.MaxValue), (2, 1L + Int.MaxValue)
))
intercept[IllegalArgumentException] {
Utils.getIteratorZipWithIndex(Iterator(0, 1, 2), -1L)
}
}
test("SPARK-35907: createDirectory") {
val tmpDir = new File(System.getProperty("java.io.tmpdir"))
val testDir = new File(tmpDir, "createDirectory" + System.nanoTime())
val testDirPath = testDir.getCanonicalPath
// 1. Directory created successfully
val scenario1 = new File(testDir, "scenario1")
assert(Utils.createDirectory(scenario1))
assert(scenario1.exists())
assert(Utils.createDirectory(testDirPath, "scenario1").exists())
// 2. Illegal file path
val scenario2 = new File(testDir, "scenario2" * 256)
assert(!Utils.createDirectory(scenario2))
assert(!scenario2.exists())
assertThrows[IOException](Utils.createDirectory(testDirPath, "scenario2" * 256))
// 3. The parent directory cannot read
val scenario3 = new File(testDir, "scenario3")
assert(testDir.canRead)
assert(testDir.setReadable(false))
assert(Utils.createDirectory(scenario3))
assert(scenario3.exists())
assert(Utils.createDirectory(testDirPath, "scenario3").exists())
assert(testDir.setReadable(true))
// 4. The parent directory cannot write
val scenario4 = new File(testDir, "scenario4")
assert(testDir.canWrite)
assert(testDir.setWritable(false))
assert(!Utils.createDirectory(scenario4))
assert(!scenario4.exists())
assertThrows[IOException](Utils.createDirectory(testDirPath, "scenario4"))
assert(testDir.setWritable(true))
// 5. The parent directory cannot execute
val scenario5 = new File(testDir, "scenario5")
assert(testDir.canExecute)
assert(testDir.setExecutable(false))
assert(!Utils.createDirectory(scenario5))
assert(!scenario5.exists())
assertThrows[IOException](Utils.createDirectory(testDirPath, "scenario5"))
assert(testDir.setExecutable(true))
// The following 3 scenarios are only for the method: createDirectory(File)
// 6. Symbolic link
val scenario6 = java.nio.file.Files.createSymbolicLink(new File(testDir, "scenario6")
.toPath, scenario1.toPath).toFile
assert(!Utils.createDirectory(scenario6))
assert(scenario6.exists())
// 7. Directory exists
assert(scenario1.exists())
assert(Utils.createDirectory(scenario1))
assert(scenario1.exists())
// 8. Not directory
val scenario8 = new File(testDir.getCanonicalPath + File.separator + "scenario8")
assert(scenario8.createNewFile())
assert(!Utils.createDirectory(scenario8))
}
test("doesDirectoryContainFilesNewerThan") {
// create some temporary directories and files
withTempDir { parent =>
// The parent directory has two child directories
val child1: File = Utils.createTempDir(parent.getCanonicalPath)
val child2: File = Utils.createTempDir(parent.getCanonicalPath)
val child3: File = Utils.createTempDir(child1.getCanonicalPath)
// set the last modified time of child1 to 30 secs old
child1.setLastModified(System.currentTimeMillis() - (1000 * 30))
// although child1 is old, child2 is still new so return true
assert(Utils.doesDirectoryContainAnyNewFiles(parent, 5))
child2.setLastModified(System.currentTimeMillis - (1000 * 30))
assert(Utils.doesDirectoryContainAnyNewFiles(parent, 5))
parent.setLastModified(System.currentTimeMillis - (1000 * 30))
// although parent and its immediate children are new, child3 is still old
// we expect a full recursive search for new files.
assert(Utils.doesDirectoryContainAnyNewFiles(parent, 5))
child3.setLastModified(System.currentTimeMillis - (1000 * 30))
assert(!Utils.doesDirectoryContainAnyNewFiles(parent, 5))
}
}
test("resolveURI") {
def assertResolves(before: String, after: String): Unit = {
// This should test only single paths
assert(before.split(",").length === 1)
def resolve(uri: String): String = Utils.resolveURI(uri).toString
assert(resolve(before) === after)
assert(resolve(after) === after)
// Repeated invocations of resolveURI should yield the same result
assert(resolve(resolve(after)) === after)
assert(resolve(resolve(resolve(after))) === after)
}
val rawCwd = System.getProperty("user.dir")
val cwd = if (Utils.isWindows) s"/$rawCwd".replace("\\", "/") else rawCwd
assertResolves("hdfs:/root/spark.jar", "hdfs:/root/spark.jar")
assertResolves("hdfs:///root/spark.jar#app.jar", "hdfs:///root/spark.jar#app.jar")
assertResolves("spark.jar", s"file:$cwd/spark.jar")
assertResolves("spark.jar#app.jar", s"file:$cwd/spark.jar#app.jar")
assertResolves("path to/file.txt", s"file:$cwd/path%20to/file.txt")
if (Utils.isWindows) {
assertResolves("C:\\path\\to\\file.txt", "file:/C:/path/to/file.txt")
assertResolves("C:\\path to\\file.txt", "file:/C:/path%20to/file.txt")
}
assertResolves("file:/C:/path/to/file.txt", "file:/C:/path/to/file.txt")
assertResolves("file:///C:/path/to/file.txt", "file:///C:/path/to/file.txt")
assertResolves("file:/C:/file.txt#alias.txt", "file:/C:/file.txt#alias.txt")
assertResolves("file:foo", "file:foo")
assertResolves("file:foo:baby", "file:foo:baby")
}
test("resolveURIs with multiple paths") {
def assertResolves(before: String, after: String): Unit = {
def resolve(uri: String): String = Utils.resolveURIs(uri)
assert(resolve(before) === after)
assert(resolve(after) === after)
// Repeated invocations of resolveURIs should yield the same result
assert(resolve(resolve(after)) === after)
assert(resolve(resolve(resolve(after))) === after)
}
val rawCwd = System.getProperty("user.dir")
val cwd = if (Utils.isWindows) s"/$rawCwd".replace("\\", "/") else rawCwd
assertResolves("jar1,jar2", s"file:$cwd/jar1,file:$cwd/jar2")
assertResolves("file:/jar1,file:/jar2", "file:/jar1,file:/jar2")
assertResolves("hdfs:/jar1,file:/jar2,jar3", s"hdfs:/jar1,file:/jar2,file:$cwd/jar3")
assertResolves("hdfs:/jar1,file:/jar2,jar3,jar4#jar5,path to/jar6",
s"hdfs:/jar1,file:/jar2,file:$cwd/jar3,file:$cwd/jar4#jar5,file:$cwd/path%20to/jar6")
if (Utils.isWindows) {
assertResolves("""hdfs:/jar1,file:/jar2,jar3,C:\pi.py#py.pi,C:\path to\jar4""",
s"hdfs:/jar1,file:/jar2,file:$cwd/jar3,file:/C:/pi.py%23py.pi,file:/C:/path%20to/jar4")
}
assertResolves(",jar1,jar2", s"file:$cwd/jar1,file:$cwd/jar2")
// Also test resolveURIs with single paths
assertResolves("hdfs:/root/spark.jar", "hdfs:/root/spark.jar")
}
test("nonLocalPaths") {
assert(Utils.nonLocalPaths("spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("file:/spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("file:///spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("local:/spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("local:///spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("hdfs:/spark.jar") === Array("hdfs:/spark.jar"))
assert(Utils.nonLocalPaths("hdfs:///spark.jar") === Array("hdfs:///spark.jar"))
assert(Utils.nonLocalPaths("file:/spark.jar,local:/smart.jar,family.py") === Array.empty)
assert(Utils.nonLocalPaths("local:/spark.jar,file:/smart.jar,family.py") === Array.empty)
assert(Utils.nonLocalPaths("hdfs:/spark.jar,s3:/smart.jar") ===
Array("hdfs:/spark.jar", "s3:/smart.jar"))
assert(Utils.nonLocalPaths("hdfs:/spark.jar,path to/a.jar,s3:/smart.jar") ===
Array("hdfs:/spark.jar", "s3:/smart.jar"))
assert(Utils.nonLocalPaths("hdfs:/spark.jar,s3:/smart.jar,local.py,file:/hello/pi.py") ===
Array("hdfs:/spark.jar", "s3:/smart.jar"))
assert(Utils.nonLocalPaths("local.py,hdfs:/spark.jar,file:/hello/pi.py,s3:/smart.jar") ===
Array("hdfs:/spark.jar", "s3:/smart.jar"))
// Test Windows paths
assert(Utils.nonLocalPaths("C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("file:/C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("file:///C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("local:/C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("local:///C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("hdfs:/a.jar,C:/my.jar,s3:/another.jar", testWindows = true) ===
Array("hdfs:/a.jar", "s3:/another.jar"))
assert(Utils.nonLocalPaths("D:/your.jar,hdfs:/a.jar,s3:/another.jar", testWindows = true) ===
Array("hdfs:/a.jar", "s3:/another.jar"))
assert(Utils.nonLocalPaths("hdfs:/a.jar,s3:/another.jar,e:/our.jar", testWindows = true) ===
Array("hdfs:/a.jar", "s3:/another.jar"))
}
test("isBindCollision") {
// Negatives
assert(!Utils.isBindCollision(null))
assert(!Utils.isBindCollision(new Exception))
assert(!Utils.isBindCollision(new Exception(new Exception)))
assert(!Utils.isBindCollision(new Exception(new BindException)))
// Positives
val be = new BindException("Random Message")
val be1 = new Exception(new BindException("Random Message"))
val be2 = new Exception(new Exception(new BindException("Random Message")))
assert(Utils.isBindCollision(be))
assert(Utils.isBindCollision(be1))
assert(Utils.isBindCollision(be2))
// Actual bind exception
var server1: ServerSocket = null
var server2: ServerSocket = null
try {
server1 = new java.net.ServerSocket(0)
server2 = new java.net.ServerSocket(server1.getLocalPort)
} catch {
case e: Exception =>
assert(e.isInstanceOf[java.net.BindException])
assert(Utils.isBindCollision(e))
} finally {
Option(server1).foreach(_.close())
Option(server2).foreach(_.close())
}
}
// Test for using the util function to change our log levels.
test("log4j log level change") {
val current = org.apache.log4j.Logger.getRootLogger().getLevel()
try {
Utils.setLogLevel(org.apache.log4j.Level.ALL)
assert(log.isInfoEnabled())
Utils.setLogLevel(org.apache.log4j.Level.ERROR)
assert(!log.isInfoEnabled())
assert(log.isErrorEnabled())
} finally {
// Best effort at undoing changes this test made.
Utils.setLogLevel(current)
}
}
test("deleteRecursively") {
val tempDir1 = Utils.createTempDir()
assert(tempDir1.exists())
Utils.deleteRecursively(tempDir1)
assert(!tempDir1.exists())
val tempDir2 = Utils.createTempDir()
val sourceFile1 = new File(tempDir2, "foo.txt")
Files.touch(sourceFile1)
assert(sourceFile1.exists())
Utils.deleteRecursively(sourceFile1)
assert(!sourceFile1.exists())
val tempDir3 = new File(tempDir2, "subdir")
assert(tempDir3.mkdir())
val sourceFile2 = new File(tempDir3, "bar.txt")
Files.touch(sourceFile2)
assert(sourceFile2.exists())
Utils.deleteRecursively(tempDir2)
assert(!tempDir2.exists())
assert(!tempDir3.exists())
assert(!sourceFile2.exists())
}
test("loading properties from file") {
withTempDir { tmpDir =>
val outFile = File.createTempFile("test-load-spark-properties", "test", tmpDir)
System.setProperty("spark.test.fileNameLoadB", "2")
Files.write("spark.test.fileNameLoadA true\n" +
"spark.test.fileNameLoadB 1\n", outFile, UTF_8)
val properties = Utils.getPropertiesFromFile(outFile.getAbsolutePath)
properties
.filter { case (k, v) => k.startsWith("spark.")}
.foreach { case (k, v) => sys.props.getOrElseUpdate(k, v)}
val sparkConf = new SparkConf
assert(sparkConf.getBoolean("spark.test.fileNameLoadA", false))
assert(sparkConf.getInt("spark.test.fileNameLoadB", 1) === 2)
}
}
test("timeIt with prepare") {
var cnt = 0
val prepare = () => {
cnt += 1
Thread.sleep(1000)
}
val time = Utils.timeIt(2)({}, Some(prepare))
require(cnt === 2, "prepare should be called twice")
require(time < TimeUnit.MILLISECONDS.toNanos(500), "preparation time should not count")
}
test("fetch hcfs dir") {
withTempDir { tempDir =>
val sourceDir = new File(tempDir, "source-dir")
sourceDir.mkdir()
val innerSourceDir = Utils.createTempDir(root = sourceDir.getPath)
val sourceFile = File.createTempFile("someprefix", "somesuffix", innerSourceDir)
val targetDir = new File(tempDir, "target-dir")
Files.write("some text", sourceFile, UTF_8)
val path =
if (Utils.isWindows) {
new Path("file:/" + sourceDir.getAbsolutePath.replace("\\", "/"))
} else {
new Path("file://" + sourceDir.getAbsolutePath)
}
val conf = new Configuration()
val fs = Utils.getHadoopFileSystem(path.toString, conf)
assert(!targetDir.isDirectory())
Utils.fetchHcfsFile(path, targetDir, fs, new SparkConf(), conf, false)
assert(targetDir.isDirectory())
// Copy again to make sure it doesn't error if the dir already exists.
Utils.fetchHcfsFile(path, targetDir, fs, new SparkConf(), conf, false)
val destDir = new File(targetDir, sourceDir.getName())
assert(destDir.isDirectory())
val destInnerDir = new File(destDir, innerSourceDir.getName)
assert(destInnerDir.isDirectory())
val destInnerFile = new File(destInnerDir, sourceFile.getName)
assert(destInnerFile.isFile())
val filePath =
if (Utils.isWindows) {
new Path("file:/" + sourceFile.getAbsolutePath.replace("\\", "/"))
} else {
new Path("file://" + sourceFile.getAbsolutePath)
}
val testFileDir = new File(tempDir, "test-filename")
val testFileName = "testFName"
val testFilefs = Utils.getHadoopFileSystem(filePath.toString, conf)
Utils.fetchHcfsFile(filePath, testFileDir, testFilefs, new SparkConf(),
conf, false, Some(testFileName))
val newFileName = new File(testFileDir, testFileName)
assert(newFileName.isFile())
}
}
test("shutdown hook manager") {
val manager = new SparkShutdownHookManager()
val output = new ListBuffer[Int]()
val hook1 = manager.add(1, () => output += 1)
manager.add(3, () => output += 3)
manager.add(2, () => output += 2)
manager.add(4, () => output += 4)
manager.add(Int.MinValue, () => output += Int.MinValue)
manager.add(Int.MinValue, () => output += Int.MinValue)
manager.add(Int.MaxValue, () => output += Int.MaxValue)
manager.add(Int.MaxValue, () => output += Int.MaxValue)
manager.remove(hook1)
manager.runAll()
assert(output.toList === List(Int.MaxValue, Int.MaxValue, 4, 3, 2, Int.MinValue, Int.MinValue))
}
test("isInDirectory") {
val tmpDir = new File(sys.props("java.io.tmpdir"))
val parentDir = new File(tmpDir, "parent-dir")
val childDir1 = new File(parentDir, "child-dir-1")
val childDir1b = new File(parentDir, "child-dir-1b")
val childFile1 = new File(parentDir, "child-file-1.txt")
val childDir2 = new File(childDir1, "child-dir-2")
val childDir2b = new File(childDir1, "child-dir-2b")
val childFile2 = new File(childDir1, "child-file-2.txt")
val childFile3 = new File(childDir2, "child-file-3.txt")
val nullFile: File = null
parentDir.mkdir()
childDir1.mkdir()
childDir1b.mkdir()
childDir2.mkdir()
childDir2b.mkdir()
childFile1.createNewFile()
childFile2.createNewFile()
childFile3.createNewFile()
// Identity
assert(Utils.isInDirectory(parentDir, parentDir))
assert(Utils.isInDirectory(childDir1, childDir1))
assert(Utils.isInDirectory(childDir2, childDir2))
// Valid ancestor-descendant pairs
assert(Utils.isInDirectory(parentDir, childDir1))
assert(Utils.isInDirectory(parentDir, childFile1))
assert(Utils.isInDirectory(parentDir, childDir2))
assert(Utils.isInDirectory(parentDir, childFile2))
assert(Utils.isInDirectory(parentDir, childFile3))
assert(Utils.isInDirectory(childDir1, childDir2))
assert(Utils.isInDirectory(childDir1, childFile2))
assert(Utils.isInDirectory(childDir1, childFile3))
assert(Utils.isInDirectory(childDir2, childFile3))
// Inverted ancestor-descendant pairs should fail
assert(!Utils.isInDirectory(childDir1, parentDir))
assert(!Utils.isInDirectory(childDir2, parentDir))
assert(!Utils.isInDirectory(childDir2, childDir1))
assert(!Utils.isInDirectory(childFile1, parentDir))
assert(!Utils.isInDirectory(childFile2, parentDir))
assert(!Utils.isInDirectory(childFile3, parentDir))
assert(!Utils.isInDirectory(childFile2, childDir1))
assert(!Utils.isInDirectory(childFile3, childDir1))
assert(!Utils.isInDirectory(childFile3, childDir2))
// Non-existent files or directories should fail
assert(!Utils.isInDirectory(parentDir, new File(parentDir, "one.txt")))
assert(!Utils.isInDirectory(parentDir, new File(parentDir, "one/two.txt")))
assert(!Utils.isInDirectory(parentDir, new File(parentDir, "one/two/three.txt")))
// Siblings should fail
assert(!Utils.isInDirectory(childDir1, childDir1b))
assert(!Utils.isInDirectory(childDir1, childFile1))
assert(!Utils.isInDirectory(childDir2, childDir2b))
assert(!Utils.isInDirectory(childDir2, childFile2))
// Null files should fail without throwing NPE
assert(!Utils.isInDirectory(parentDir, nullFile))
assert(!Utils.isInDirectory(childFile3, nullFile))
assert(!Utils.isInDirectory(nullFile, parentDir))
assert(!Utils.isInDirectory(nullFile, childFile3))
}
test("circular buffer: if nothing was written to the buffer, display nothing") {
val buffer = new CircularBuffer(4)
assert(buffer.toString === "")
}
test("circular buffer: if the buffer isn't full, print only the contents written") {
val buffer = new CircularBuffer(10)
val stream = new PrintStream(buffer, true, UTF_8.name())
stream.print("test")
assert(buffer.toString === "test")
}
test("circular buffer: data written == size of the buffer") {
val buffer = new CircularBuffer(4)
val stream = new PrintStream(buffer, true, UTF_8.name())
// fill the buffer to its exact size so that it just hits overflow
stream.print("test")
assert(buffer.toString === "test")
// add more data to the buffer
stream.print("12")
assert(buffer.toString === "st12")
}
test("circular buffer: multiple overflow") {
val buffer = new CircularBuffer(25)
val stream = new PrintStream(buffer, true, UTF_8.name())
stream.print("test circular test circular test circular test circular test circular")
assert(buffer.toString === "st circular test circular")
}
test("isDynamicAllocationEnabled") {
val conf = new SparkConf()
conf.set("spark.master", "yarn")
conf.set(SUBMIT_DEPLOY_MODE, "client")
assert(Utils.isDynamicAllocationEnabled(conf) === false)
assert(Utils.isDynamicAllocationEnabled(
conf.set(DYN_ALLOCATION_ENABLED, false)) === false)
assert(Utils.isDynamicAllocationEnabled(
conf.set(DYN_ALLOCATION_ENABLED, true)))
assert(Utils.isDynamicAllocationEnabled(
conf.set("spark.executor.instances", "1")))
assert(Utils.isDynamicAllocationEnabled(
conf.set("spark.executor.instances", "0")))
assert(Utils.isDynamicAllocationEnabled(conf.set("spark.master", "local")) === false)
assert(Utils.isDynamicAllocationEnabled(conf.set(DYN_ALLOCATION_TESTING, true)))
}
test("getDynamicAllocationInitialExecutors") {
val conf = new SparkConf()
assert(Utils.getDynamicAllocationInitialExecutors(conf) === 0)
assert(Utils.getDynamicAllocationInitialExecutors(
conf.set(DYN_ALLOCATION_MIN_EXECUTORS, 3)) === 3)
assert(Utils.getDynamicAllocationInitialExecutors( // should use minExecutors
conf.set("spark.executor.instances", "2")) === 3)
assert(Utils.getDynamicAllocationInitialExecutors( // should use executor.instances
conf.set("spark.executor.instances", "4")) === 4)
assert(Utils.getDynamicAllocationInitialExecutors( // should use executor.instances
conf.set(DYN_ALLOCATION_INITIAL_EXECUTORS, 3)) === 4)
assert(Utils.getDynamicAllocationInitialExecutors( // should use initialExecutors
conf.set(DYN_ALLOCATION_INITIAL_EXECUTORS, 5)) === 5)
assert(Utils.getDynamicAllocationInitialExecutors( // should use minExecutors
conf.set(DYN_ALLOCATION_INITIAL_EXECUTORS, 2)
.set("spark.executor.instances", "1")) === 3)
}
test("Set Spark CallerContext") {
val context = "test"
new CallerContext(context).setCurrentContext()
if (CallerContext.callerContextSupported) {
val callerContext = Utils.classForName("org.apache.hadoop.ipc.CallerContext")
assert(s"SPARK_$context" ===
callerContext.getMethod("getCurrent").invoke(null).toString)
}
}
test("encodeFileNameToURIRawPath") {
assert(Utils.encodeFileNameToURIRawPath("abc") === "abc")
assert(Utils.encodeFileNameToURIRawPath("abc xyz") === "abc%20xyz")
assert(Utils.encodeFileNameToURIRawPath("abc:xyz") === "abc:xyz")
}
test("decodeFileNameInURI") {
assert(Utils.decodeFileNameInURI(new URI("files:///abc/xyz")) === "xyz")
assert(Utils.decodeFileNameInURI(new URI("files:///abc")) === "abc")
assert(Utils.decodeFileNameInURI(new URI("files:///abc%20xyz")) === "abc xyz")
}
test("Kill process") {
// Verify that we can terminate a process even if it is in a bad state. This is only run
// on UNIX since it does some OS specific things to verify the correct behavior.
if (SystemUtils.IS_OS_UNIX) {
def getPid(p: Process): Int = {
val f = p.getClass().getDeclaredField("pid")
f.setAccessible(true)
f.get(p).asInstanceOf[Int]
}
def pidExists(pid: Int): Boolean = {
val p = Runtime.getRuntime.exec(s"kill -0 $pid")
p.waitFor()
p.exitValue() == 0
}
def signal(pid: Int, s: String): Unit = {
val p = Runtime.getRuntime.exec(s"kill -$s $pid")
p.waitFor()
}
// Start up a process that runs 'sleep 10'. Terminate the process and assert it takes
// less time and the process is no longer there.
val startTimeNs = System.nanoTime()
val process = new ProcessBuilder("sleep", "10").start()
val pid = getPid(process)
try {
assert(pidExists(pid))
val terminated = Utils.terminateProcess(process, 5000)
assert(terminated.isDefined)
process.waitFor(5, TimeUnit.SECONDS)
val durationNs = System.nanoTime() - startTimeNs
assert(durationNs < TimeUnit.SECONDS.toNanos(5))
assert(!pidExists(pid))
} finally {
// Forcibly kill the test process just in case.
signal(pid, "SIGKILL")
}
if (SystemUtils.isJavaVersionAtLeast(JavaVersion.JAVA_1_8)) {
// We'll make sure that forcibly terminating a process works by
// creating a very misbehaving process. It ignores SIGTERM and has been SIGSTOPed. On
// older versions of java, this will *not* terminate.
val file = File.createTempFile("temp-file-name", ".tmp")
file.deleteOnExit()
val cmd =
s"""
|#!/bin/bash
|trap "" SIGTERM
|sleep 10
""".stripMargin
Files.write(cmd.getBytes(UTF_8), file)
file.getAbsoluteFile.setExecutable(true)
val process = new ProcessBuilder(file.getAbsolutePath).start()
val pid = getPid(process)
assert(pidExists(pid))
try {
signal(pid, "SIGSTOP")
val startNs = System.nanoTime()
val terminated = Utils.terminateProcess(process, 5000)
assert(terminated.isDefined)
process.waitFor(5, TimeUnit.SECONDS)
val duration = System.nanoTime() - startNs
// add a little extra time to allow a force kill to finish
assert(duration < TimeUnit.SECONDS.toNanos(6))
assert(!pidExists(pid))
} finally {
signal(pid, "SIGKILL")
}
}
}
}
test("chi square test of randomizeInPlace") {
// Parameters
val arraySize = 10
val numTrials = 1000
val threshold = 0.05
val seed = 1L
// results(i)(j): how many times Utils.randomize moves an element from position j to position i
val results = Array.ofDim[Long](arraySize, arraySize)
// This must be seeded because even a fair random process will fail this test with
// probability equal to the value of `threshold`, which is inconvenient for a unit test.
val rand = new java.util.Random(seed)
val range = 0 until arraySize
for {
_ <- 0 until numTrials
trial = Utils.randomizeInPlace(range.toArray, rand)
i <- range
} results(i)(trial(i)) += 1L
val chi = new ChiSquareTest()
// We expect an even distribution; this array will be rescaled by `chiSquareTest`
val expected = Array.fill(arraySize * arraySize)(1.0)
val observed = results.flatten
// Performs Pearson's chi-squared test. Using the sum-of-squares as the test statistic, gives
// the probability of a uniform distribution producing results as extreme as `observed`
val pValue = chi.chiSquareTest(expected, observed)
assert(pValue > threshold)
}
test("redact sensitive information") {
val sparkConf = new SparkConf
// Set some secret keys
val secretKeys = Seq(
"spark.executorEnv.HADOOP_CREDSTORE_PASSWORD",
"spark.hadoop.fs.s3a.access.key",
"spark.my.password",
"spark.my.sECreT")
secretKeys.foreach { key => sparkConf.set(key, "sensitive_value") }
// Set a non-secret key
sparkConf.set("spark.regular.property", "regular_value")
sparkConf.set("spark.hadoop.fs.s3a.access_key", "regular_value")
// Set a property with a regular key but secret in the value
sparkConf.set("spark.sensitive.property", "has_secret_in_value")
// Redact sensitive information
val redactedConf = Utils.redact(sparkConf, sparkConf.getAll).toMap
// Assert that secret information got redacted while the regular property remained the same
secretKeys.foreach { key => assert(redactedConf(key) === Utils.REDACTION_REPLACEMENT_TEXT) }
assert(redactedConf("spark.regular.property") === "regular_value")
assert(redactedConf("spark.sensitive.property") === Utils.REDACTION_REPLACEMENT_TEXT)
assert(redactedConf("spark.hadoop.fs.s3a.access.key") === Utils.REDACTION_REPLACEMENT_TEXT)
assert(redactedConf("spark.hadoop.fs.s3a.access_key") === "regular_value")
}
test("redact sensitive information in command line args") {
val sparkConf = new SparkConf
// Set some secret keys
val secretKeysWithSameValue = Seq(
"spark.executorEnv.HADOOP_CREDSTORE_PASSWORD",
"spark.my.password",
"spark.my.sECreT")
val cmdArgsForSecretWithSameValue = secretKeysWithSameValue.map(s => s"-D$s=sensitive_value")
val secretKeys = secretKeysWithSameValue ++ Seq("spark.your.password")
val cmdArgsForSecret = cmdArgsForSecretWithSameValue ++ Seq(
// Have '=' twice
"-Dspark.your.password=sensitive=sensitive2"
)
val ignoredArgs = Seq(
// starts with -D but no assignment
"-Ddummy",
// secret value contained not starting with -D (we don't care about this case for now)
"spark.my.password=sensitive_value",
// edge case: not started with -D, but matched pattern after first '-'
"--Dspark.my.password=sensitive_value")
val cmdArgs = cmdArgsForSecret ++ ignoredArgs ++ Seq(
// Set a non-secret key
"-Dspark.regular.property=regular_value",
// Set a property with a regular key but secret in the value
"-Dspark.sensitive.property=has_secret_in_value")
// Redact sensitive information
val redactedCmdArgs = Utils.redactCommandLineArgs(sparkConf, cmdArgs)
// These arguments should be left as they were:
// 1) argument without -D option is not applied
// 2) -D option without key-value assignment is not applied
assert(ignoredArgs.forall(redactedCmdArgs.contains))
val redactedCmdArgMap = redactedCmdArgs.filterNot(ignoredArgs.contains).map { cmd =>
val keyValue = cmd.substring("-D".length).split("=")
keyValue(0) -> keyValue.tail.mkString("=")
}.toMap
// Assert that secret information got redacted while the regular property remained the same
secretKeys.foreach { key =>
assert(redactedCmdArgMap(key) === Utils.REDACTION_REPLACEMENT_TEXT)
}
assert(redactedCmdArgMap("spark.regular.property") === "regular_value")
assert(redactedCmdArgMap("spark.sensitive.property") === Utils.REDACTION_REPLACEMENT_TEXT)
}
test("redact sensitive information in sequence of key value pairs") {
val secretKeys = Some("my.password".r)
assert(Utils.redact(secretKeys, Seq(("spark.my.password", "12345"))) ===
Seq(("spark.my.password", Utils.REDACTION_REPLACEMENT_TEXT)))
assert(Utils.redact(secretKeys, Seq(("anything", "spark.my.password=12345"))) ===
Seq(("anything", Utils.REDACTION_REPLACEMENT_TEXT)))
assert(Utils.redact(secretKeys, Seq((999, "spark.my.password=12345"))) ===
Seq((999, Utils.REDACTION_REPLACEMENT_TEXT)))
// Do not redact when value type is not string
assert(Utils.redact(secretKeys, Seq(("my.password", 12345))) ===
Seq(("my.password", 12345)))
}
test("tryWithSafeFinally") {
var e = new Error("Block0")
val finallyBlockError = new Error("Finally Block")
var isErrorOccurred = false
// if the try and finally blocks throw different exception instances
try {
Utils.tryWithSafeFinally { throw e }(finallyBlock = { throw finallyBlockError })
} catch {
case t: Error =>
assert(t.getSuppressed.head == finallyBlockError)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try and finally blocks throw the same exception instance then it should not
// try to add to suppressed and get IllegalArgumentException
e = new Error("Block1")
isErrorOccurred = false
try {
Utils.tryWithSafeFinally { throw e }(finallyBlock = { throw e })
} catch {
case t: Error =>
assert(t.getSuppressed.length == 0)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try throws the exception and finally doesn't throw exception
e = new Error("Block2")
isErrorOccurred = false
try {
Utils.tryWithSafeFinally { throw e }(finallyBlock = {})
} catch {
case t: Error =>
assert(t.getSuppressed.length == 0)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try and finally block don't throw exception
Utils.tryWithSafeFinally {}(finallyBlock = {})
}
test("tryWithSafeFinallyAndFailureCallbacks") {
var e = new Error("Block0")
val catchBlockError = new Error("Catch Block")
val finallyBlockError = new Error("Finally Block")
var isErrorOccurred = false
TaskContext.setTaskContext(TaskContext.empty())
// if the try, catch and finally blocks throw different exception instances
try {
Utils.tryWithSafeFinallyAndFailureCallbacks { throw e }(
catchBlock = { throw catchBlockError }, finallyBlock = { throw finallyBlockError })
} catch {
case t: Error =>
assert(t.getSuppressed.head == catchBlockError)
assert(t.getSuppressed.last == finallyBlockError)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try, catch and finally blocks throw the same exception instance then it should not
// try to add to suppressed and get IllegalArgumentException
e = new Error("Block1")
isErrorOccurred = false
try {
Utils.tryWithSafeFinallyAndFailureCallbacks { throw e }(catchBlock = { throw e },
finallyBlock = { throw e })
} catch {
case t: Error =>
assert(t.getSuppressed.length == 0)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try throws the exception, catch and finally don't throw exceptions
e = new Error("Block2")
isErrorOccurred = false
try {
Utils.tryWithSafeFinallyAndFailureCallbacks { throw e }(catchBlock = {}, finallyBlock = {})
} catch {
case t: Error =>
assert(t.getSuppressed.length == 0)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try, catch and finally blocks don't throw exceptions
Utils.tryWithSafeFinallyAndFailureCallbacks {}(catchBlock = {}, finallyBlock = {})
TaskContext.unset
}
test("load extensions") {
val extensions = Seq(
classOf[SimpleExtension],
classOf[ExtensionWithConf],
classOf[UnregisterableExtension]).map(_.getName())
val conf = new SparkConf(false)
val instances = Utils.loadExtensions(classOf[Object], extensions, conf)
assert(instances.size === 2)
assert(instances.count(_.isInstanceOf[SimpleExtension]) === 1)
val extWithConf = instances.find(_.isInstanceOf[ExtensionWithConf])
.map(_.asInstanceOf[ExtensionWithConf])
.get
assert(extWithConf.conf eq conf)
class NestedExtension { }
val invalid = Seq(classOf[NestedExtension].getName())
intercept[SparkException] {
Utils.loadExtensions(classOf[Object], invalid, conf)
}
val error = Seq(classOf[ExtensionWithError].getName())
intercept[IllegalArgumentException] {
Utils.loadExtensions(classOf[Object], error, conf)
}
val wrongType = Seq(classOf[ListenerImpl].getName())
intercept[IllegalArgumentException] {
Utils.loadExtensions(classOf[Seq[_]], wrongType, conf)
}
}
test("check Kubernetes master URL") {
val k8sMasterURLHttps = Utils.checkAndGetK8sMasterUrl("k8s://https://host:port")
assert(k8sMasterURLHttps === "k8s://https://host:port")
val k8sMasterURLHttp = Utils.checkAndGetK8sMasterUrl("k8s://http://host:port")
assert(k8sMasterURLHttp === "k8s://http://host:port")
val k8sMasterURLWithoutScheme = Utils.checkAndGetK8sMasterUrl("k8s://127.0.0.1:8443")
assert(k8sMasterURLWithoutScheme === "k8s://https://127.0.0.1:8443")
val k8sMasterURLWithoutScheme2 = Utils.checkAndGetK8sMasterUrl("k8s://127.0.0.1")
assert(k8sMasterURLWithoutScheme2 === "k8s://https://127.0.0.1")
intercept[IllegalArgumentException] {
Utils.checkAndGetK8sMasterUrl("k8s:https://host:port")
}
intercept[IllegalArgumentException] {
Utils.checkAndGetK8sMasterUrl("k8s://foo://host:port")
}
intercept[IllegalArgumentException] {
Utils.checkAndGetK8sMasterUrl("k8s:///https://host:port")
}
}
test("stringHalfWidth") {
// scalastyle:off nonascii
assert(Utils.stringHalfWidth(null) == 0)
assert(Utils.stringHalfWidth("") == 0)
assert(Utils.stringHalfWidth("ab c") == 4)
assert(Utils.stringHalfWidth("1098") == 4)
assert(Utils.stringHalfWidth("mø") == 2)
assert(Utils.stringHalfWidth("γύρ") == 3)
assert(Utils.stringHalfWidth("pê") == 2)
assert(Utils.stringHalfWidth("ー") == 2)
assert(Utils.stringHalfWidth("测") == 2)
assert(Utils.stringHalfWidth("か") == 2)
assert(Utils.stringHalfWidth("걸") == 2)
assert(Utils.stringHalfWidth("à") == 1)
assert(Utils.stringHalfWidth("焼") == 2)
assert(Utils.stringHalfWidth("羍む") == 4)
assert(Utils.stringHalfWidth("뺭ᾘ") == 3)
assert(Utils.stringHalfWidth("\u0967\u0968\u0969") == 3)
// scalastyle:on nonascii
}
test("trimExceptCRLF standalone") {
val crlfSet = Set("\r", "\n")
val nonPrintableButCRLF = (0 to 32).map(_.toChar.toString).toSet -- crlfSet
// identity for CRLF
crlfSet.foreach { s => Utils.trimExceptCRLF(s) === s }
// empty for other non-printables
nonPrintableButCRLF.foreach { s => assert(Utils.trimExceptCRLF(s) === "") }
// identity for a printable string
assert(Utils.trimExceptCRLF("a") === "a")
// identity for strings with CRLF
crlfSet.foreach { s =>
assert(Utils.trimExceptCRLF(s"${s}a") === s"${s}a")
assert(Utils.trimExceptCRLF(s"a${s}") === s"a${s}")
assert(Utils.trimExceptCRLF(s"b${s}b") === s"b${s}b")
}
// trim nonPrintableButCRLF except when inside a string
nonPrintableButCRLF.foreach { s =>
assert(Utils.trimExceptCRLF(s"${s}a") === "a")
assert(Utils.trimExceptCRLF(s"a${s}") === "a")
assert(Utils.trimExceptCRLF(s"b${s}b") === s"b${s}b")
}
}
test("pathsToMetadata") {
val paths = (0 to 4).map(i => new Path(s"path$i"))
assert(Utils.buildLocationMetadata(paths, 10) == "(5 paths)[...]")
// 11 is the minimum threshold to print at least one path
assert(Utils.buildLocationMetadata(paths, 11) == "(5 paths)[path0, ...]")
// 11 + 5 + 2 = 18 is the minimum threshold to print two paths
assert(Utils.buildLocationMetadata(paths, 18) == "(5 paths)[path0, path1, ...]")
}
test("checkHost supports both IPV4 and IPV6") {
// IPV4 ips
Utils.checkHost("0.0.0.0")
var e: AssertionError = intercept[AssertionError] {
Utils.checkHost("0.0.0.0:0")
}
assert(e.getMessage.contains("Expected hostname or IP but got 0.0.0.0:0"))
e = intercept[AssertionError] {
Utils.checkHost("0.0.0.0:")
}
assert(e.getMessage.contains("Expected hostname or IP but got 0.0.0.0:"))
// IPV6 ips
Utils.checkHost("[::1]")
e = intercept[AssertionError] {
Utils.checkHost("[::1]:0")
}
assert(e.getMessage.contains("Expected hostname or IPv6 IP enclosed in [] but got [::1]:0"))
e = intercept[AssertionError] {
Utils.checkHost("[::1]:")
}
assert(e.getMessage.contains("Expected hostname or IPv6 IP enclosed in [] but got [::1]:"))
// hostname
Utils.checkHost("localhost")
e = intercept[AssertionError] {
Utils.checkHost("localhost:0")
}
assert(e.getMessage.contains("Expected hostname or IP but got localhost:0"))
e = intercept[AssertionError] {
Utils.checkHost("localhost:")
}
assert(e.getMessage.contains("Expected hostname or IP but got localhost:"))
}
test("checkHostPort support IPV6 and IPV4") {
// IPV4 ips
Utils.checkHostPort("0.0.0.0:0")
var e: AssertionError = intercept[AssertionError] {
Utils.checkHostPort("0.0.0.0")
}
assert(e.getMessage.contains("Expected host and port but got 0.0.0.0"))
// IPV6 ips
Utils.checkHostPort("[::1]:0")
e = intercept[AssertionError] {
Utils.checkHostPort("[::1]")
}
assert(e.getMessage.contains("Expected host and port but got [::1]"))
// hostname
Utils.checkHostPort("localhost:0")
e = intercept[AssertionError] {
Utils.checkHostPort("localhost")
}
assert(e.getMessage.contains("Expected host and port but got localhost"))
}
test("parseHostPort support IPV6 and IPV4") {
// IPV4 ips
var hostnamePort = Utils.parseHostPort("0.0.0.0:80")
assert(hostnamePort._1.equals("0.0.0.0"))
assert(hostnamePort._2 === 80)
hostnamePort = Utils.parseHostPort("0.0.0.0")
assert(hostnamePort._1.equals("0.0.0.0"))
assert(hostnamePort._2 === 0)
hostnamePort = Utils.parseHostPort("0.0.0.0:")
assert(hostnamePort._1.equals("0.0.0.0"))
assert(hostnamePort._2 === 0)
// IPV6 ips
hostnamePort = Utils.parseHostPort("[::1]:80")
assert(hostnamePort._1.equals("[::1]"))
assert(hostnamePort._2 === 80)
hostnamePort = Utils.parseHostPort("[::1]")
assert(hostnamePort._1.equals("[::1]"))
assert(hostnamePort._2 === 0)
hostnamePort = Utils.parseHostPort("[::1]:")
assert(hostnamePort._1.equals("[::1]"))
assert(hostnamePort._2 === 0)
// hostname
hostnamePort = Utils.parseHostPort("localhost:80")
assert(hostnamePort._1.equals("localhost"))
assert(hostnamePort._2 === 80)
hostnamePort = Utils.parseHostPort("localhost")
assert(hostnamePort._1.equals("localhost"))
assert(hostnamePort._2 === 0)
hostnamePort = Utils.parseHostPort("localhost:")
assert(hostnamePort._1.equals("localhost"))
assert(hostnamePort._2 === 0)
}
test("executorOffHeapMemorySizeAsMb when MEMORY_OFFHEAP_ENABLED is false") {
val executorOffHeapMemory = Utils.executorOffHeapMemorySizeAsMb(new SparkConf())
assert(executorOffHeapMemory == 0)
}
test("executorOffHeapMemorySizeAsMb when MEMORY_OFFHEAP_ENABLED is true") {
val offHeapMemoryInMB = 50
val offHeapMemory: Long = offHeapMemoryInMB * 1024 * 1024
val sparkConf = new SparkConf()
.set(MEMORY_OFFHEAP_ENABLED, true)
.set(MEMORY_OFFHEAP_SIZE, offHeapMemory)
val executorOffHeapMemory = Utils.executorOffHeapMemorySizeAsMb(sparkConf)
assert(executorOffHeapMemory == offHeapMemoryInMB)
}
test("executorMemoryOverhead when MEMORY_OFFHEAP_ENABLED is true, " +
"but MEMORY_OFFHEAP_SIZE not config scene") {
val sparkConf = new SparkConf()
.set(MEMORY_OFFHEAP_ENABLED, true)
val expected =
s"${MEMORY_OFFHEAP_SIZE.key} must be > 0 when ${MEMORY_OFFHEAP_ENABLED.key} == true"
val message = intercept[IllegalArgumentException] {
Utils.executorOffHeapMemorySizeAsMb(sparkConf)
}.getMessage
assert(message.contains(expected))
}
test("isPushBasedShuffleEnabled when PUSH_BASED_SHUFFLE_ENABLED " +
"and SHUFFLE_SERVICE_ENABLED are both set to true in YARN mode with maxAttempts set to 1") {
val conf = new SparkConf()
assert(Utils.isPushBasedShuffleEnabled(conf) === false)
conf.set(PUSH_BASED_SHUFFLE_ENABLED, true)
conf.set(IS_TESTING, false)
assert(Utils.isPushBasedShuffleEnabled(conf) === false)
conf.set(SHUFFLE_SERVICE_ENABLED, true)
conf.set(SparkLauncher.SPARK_MASTER, "yarn")
conf.set("spark.yarn.maxAttempts", "1")
assert(Utils.isPushBasedShuffleEnabled(conf) === true)
conf.set("spark.yarn.maxAttempts", "2")
assert(Utils.isPushBasedShuffleEnabled(conf) === true)
}
}
private class SimpleExtension
private class ExtensionWithConf(val conf: SparkConf)
private class UnregisterableExtension {
throw new UnsupportedOperationException()
}
private class ExtensionWithError {
throw new IllegalArgumentException()
}
private class ListenerImpl extends SparkListener
|
jiangxb1987/spark
|
core/src/test/scala/org/apache/spark/util/UtilsSuite.scala
|
Scala
|
apache-2.0
| 60,775
|
/*
Copyright 2014 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.serialization.macros.impl.ordered_serialization.providers
import scala.language.experimental.macros
import scala.reflect.macros.Context
import java.io.InputStream
import com.twitter.scalding._
import com.twitter.scalding.serialization.macros.impl.ordered_serialization.{ CompileTimeLengthTypes, ProductLike, TreeOrderedBuf }
import CompileTimeLengthTypes._
import com.twitter.scalding.serialization.OrderedSerialization
import scala.reflect.ClassTag
import scala.{ collection => sc }
import scala.collection.{ immutable => sci }
sealed trait ShouldSort
case object DoSort extends ShouldSort
case object NoSort extends ShouldSort
sealed trait MaybeArray
case object IsArray extends MaybeArray
case object NotArray extends MaybeArray
object TraversablesOrderedBuf {
def dispatch(c: Context)(buildDispatcher: => PartialFunction[c.Type, TreeOrderedBuf[c.type]]): PartialFunction[c.Type, TreeOrderedBuf[c.type]] = {
case tpe if tpe.erasure =:= c.universe.typeOf[Iterable[Any]] => TraversablesOrderedBuf(c)(buildDispatcher, tpe, NoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[sci.Iterable[Any]] => TraversablesOrderedBuf(c)(buildDispatcher, tpe, NoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[List[Any]] => TraversablesOrderedBuf(c)(buildDispatcher, tpe, NoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[sci.List[Any]] => TraversablesOrderedBuf(c)(buildDispatcher, tpe, NoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[Seq[Any]] => TraversablesOrderedBuf(c)(buildDispatcher, tpe, NoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[sc.Seq[Any]] => TraversablesOrderedBuf(c)(buildDispatcher, tpe, NoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[sci.Seq[Any]] => TraversablesOrderedBuf(c)(buildDispatcher, tpe, NoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[Vector[Any]] => TraversablesOrderedBuf(c)(buildDispatcher, tpe, NoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[sci.Vector[Any]] => TraversablesOrderedBuf(c)(buildDispatcher, tpe, NoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[IndexedSeq[Any]] => TraversablesOrderedBuf(c)(buildDispatcher, tpe, NoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[sci.IndexedSeq[Any]] => TraversablesOrderedBuf(c)(buildDispatcher, tpe, NoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[sci.Queue[Any]] => TraversablesOrderedBuf(c)(buildDispatcher, tpe, NoSort, NotArray)
// Arrays are special in that the erasure doesn't do anything
case tpe if tpe.typeSymbol == c.universe.typeOf[Array[Any]].typeSymbol => TraversablesOrderedBuf(c)(buildDispatcher, tpe, NoSort, IsArray)
// The erasure of a non-covariant is Set[_], so we need that here for sets
case tpe if tpe.erasure =:= c.universe.typeOf[Set[Any]].erasure => TraversablesOrderedBuf(c)(buildDispatcher, tpe, DoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[sc.Set[Any]].erasure => TraversablesOrderedBuf(c)(buildDispatcher, tpe, DoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[sci.Set[Any]].erasure => TraversablesOrderedBuf(c)(buildDispatcher, tpe, DoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[sci.HashSet[Any]].erasure => TraversablesOrderedBuf(c)(buildDispatcher, tpe, DoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[sci.ListSet[Any]].erasure => TraversablesOrderedBuf(c)(buildDispatcher, tpe, DoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[Map[Any, Any]].erasure => TraversablesOrderedBuf(c)(buildDispatcher, tpe, DoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[sc.Map[Any, Any]].erasure => TraversablesOrderedBuf(c)(buildDispatcher, tpe, DoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[sci.Map[Any, Any]].erasure => TraversablesOrderedBuf(c)(buildDispatcher, tpe, DoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[sci.HashMap[Any, Any]].erasure => TraversablesOrderedBuf(c)(buildDispatcher, tpe, DoSort, NotArray)
case tpe if tpe.erasure =:= c.universe.typeOf[sci.ListMap[Any, Any]].erasure => TraversablesOrderedBuf(c)(buildDispatcher, tpe, DoSort, NotArray)
}
def apply(c: Context)(buildDispatcher: => PartialFunction[c.Type, TreeOrderedBuf[c.type]],
outerType: c.Type,
maybeSort: ShouldSort,
maybeArray: MaybeArray): TreeOrderedBuf[c.type] = {
import c.universe._
def freshT(id: String) = newTermName(c.fresh(s"fresh_$id"))
val dispatcher = buildDispatcher
val companionSymbol = outerType.typeSymbol.companionSymbol
// When dealing with a map we have 2 type args, and need to generate the tuple type
// it would correspond to if we .toList the Map.
val innerType = if (outerType.asInstanceOf[TypeRefApi].args.size == 2) {
val (tpe1, tpe2) = (outerType.asInstanceOf[TypeRefApi].args(0), outerType.asInstanceOf[TypeRefApi].args(1)) // linter:ignore
val containerType = typeOf[Tuple2[Any, Any]].asInstanceOf[TypeRef]
import compat._
TypeRef.apply(containerType.pre, containerType.sym, List(tpe1, tpe2))
} else {
outerType.asInstanceOf[TypeRefApi].args.head
}
val innerTypes = outerType.asInstanceOf[TypeRefApi].args
val innerBuf: TreeOrderedBuf[c.type] = dispatcher(innerType)
// TODO it would be nice to capture one instance of this rather
// than allocate in every call in the materialized class
val ioa = freshT("ioa")
val iob = freshT("iob")
val innerOrd = q"""
new _root_.scala.math.Ordering[${innerBuf.tpe}] {
def compare(a: ${innerBuf.tpe}, b: ${innerBuf.tpe}) = {
val $ioa = a
val $iob = b
${innerBuf.compare(ioa, iob)}
}
}
"""
new TreeOrderedBuf[c.type] {
override val ctx: c.type = c
override val tpe = outerType
override def compareBinary(inputStreamA: ctx.TermName, inputStreamB: ctx.TermName) = {
val innerCompareFn = freshT("innerCompareFn")
val a = freshT("a")
val b = freshT("b")
q"""
val $innerCompareFn = { (a: _root_.java.io.InputStream, b: _root_.java.io.InputStream) =>
val $a = a
val $b = b
${innerBuf.compareBinary(a, b)}
};
_root_.com.twitter.scalding.serialization.macros.impl.ordered_serialization.runtime_helpers.TraversableHelpers.rawCompare($inputStreamA, $inputStreamB)($innerCompareFn)
"""
}
override def put(inputStream: ctx.TermName, element: ctx.TermName) = {
val asArray = freshT("asArray")
val bytes = freshT("bytes")
val len = freshT("len")
val pos = freshT("pos")
val innerElement = freshT("innerElement")
val cmpRes = freshT("cmpRes")
maybeSort match {
case DoSort =>
q"""
val $len = $element.size
$inputStream.writePosVarInt($len)
if($len > 0) {
val $asArray = $element.toArray[${innerBuf.tpe}]
// Sorting on the in-memory is the same as binary
_root_.scala.util.Sorting.quickSort[${innerBuf.tpe}]($asArray)($innerOrd)
var $pos = 0
while($pos < $len) {
val $innerElement = $asArray($pos)
${innerBuf.put(inputStream, innerElement)}
$pos += 1
}
}
"""
case NoSort =>
q"""
val $len: Int = $element.size
$inputStream.writePosVarInt($len)
$element.foreach { case $innerElement =>
${innerBuf.put(inputStream, innerElement)}
}
"""
}
}
override def hash(element: ctx.TermName): ctx.Tree = {
val currentHash = freshT("currentHash")
val len = freshT("len")
val target = freshT("target")
maybeSort match {
case NoSort =>
q"""
var $currentHash: Int = _root_.com.twitter.scalding.serialization.MurmurHashUtils.seed
var $len = 0
$element.foreach { t =>
val $target = t
$currentHash =
_root_.com.twitter.scalding.serialization.MurmurHashUtils.mixH1($currentHash, ${innerBuf.hash(target)})
// go ahead and compute the length so we don't traverse twice for lists
$len += 1
}
_root_.com.twitter.scalding.serialization.MurmurHashUtils.fmix($currentHash, $len)
"""
case DoSort =>
// We actually don't sort here, which would be expensive, but combine with a commutative operation
// so the order that we see items won't matter. For this we use XOR
q"""
var $currentHash: Int = _root_.com.twitter.scalding.serialization.MurmurHashUtils.seed
var $len = 0
$element.foreach { t =>
val $target = t
$currentHash = $currentHash ^ ${innerBuf.hash(target)}
$len += 1
}
// Might as well be fancy when we mix in the length
_root_.com.twitter.scalding.serialization.MurmurHashUtils.fmix($currentHash, $len)
"""
}
}
override def get(inputStream: ctx.TermName): ctx.Tree = {
val len = freshT("len")
val firstVal = freshT("firstVal")
val travBuilder = freshT("travBuilder")
val iter = freshT("iter")
val extractionTree = maybeArray match {
case IsArray =>
q"""val $travBuilder = new Array[..$innerTypes]($len)
var $iter = 0
while($iter < $len) {
$travBuilder($iter) = ${innerBuf.get(inputStream)}
$iter = $iter + 1
}
$travBuilder : $outerType
"""
case NotArray =>
q"""val $travBuilder = $companionSymbol.newBuilder[..$innerTypes]
$travBuilder.sizeHint($len)
var $iter = 0
while($iter < $len) {
$travBuilder += ${innerBuf.get(inputStream)}
$iter = $iter + 1
}
$travBuilder.result : $outerType
"""
}
q"""
val $len: Int = $inputStream.readPosVarInt
if($len > 0) {
if($len == 1) {
val $firstVal: $innerType = ${innerBuf.get(inputStream)}
$companionSymbol.apply($firstVal) : $outerType
} else {
$extractionTree : $outerType
}
} else {
$companionSymbol.empty : $outerType
}
"""
}
override def compare(elementA: ctx.TermName, elementB: ctx.TermName): ctx.Tree = {
val a = freshT("a")
val b = freshT("b")
val cmpFnName = freshT("cmpFnName")
maybeSort match {
case DoSort =>
q"""
_root_.com.twitter.scalding.serialization.macros.impl.ordered_serialization.runtime_helpers.TraversableHelpers.sortedCompare[${innerBuf.tpe}]($elementA, $elementB)($innerOrd)
"""
case NoSort =>
q"""
_root_.com.twitter.scalding.serialization.macros.impl.ordered_serialization.runtime_helpers.TraversableHelpers.iteratorCompare[${innerBuf.tpe}]($elementA.iterator, $elementB.iterator)($innerOrd)
"""
}
}
override val lazyOuterVariables: Map[String, ctx.Tree] = innerBuf.lazyOuterVariables
override def length(element: Tree): CompileTimeLengthTypes[c.type] = {
innerBuf.length(q"$element.head") match {
case const: ConstantLengthCalculation[_] =>
FastLengthCalculation(c)(q"""{
posVarIntSize($element.size) + $element.size * ${const.toInt}
}""")
case m: MaybeLengthCalculation[_] =>
val maybeRes = freshT("maybeRes")
MaybeLengthCalculation(c)(q"""
if($element.isEmpty) {
val sizeOfZero = 1 // writing the constant 0, for length, takes 1 byte
_root_.com.twitter.scalding.serialization.macros.impl.ordered_serialization.runtime_helpers.DynamicLen(sizeOfZero)
} else {
val maybeRes = ${m.asInstanceOf[MaybeLengthCalculation[c.type]].t}
maybeRes match {
case _root_.com.twitter.scalding.serialization.macros.impl.ordered_serialization.runtime_helpers.ConstLen(constSize) =>
val sizeOverhead = posVarIntSize($element.size)
_root_.com.twitter.scalding.serialization.macros.impl.ordered_serialization.runtime_helpers.DynamicLen(constSize * $element.size + sizeOverhead)
// todo maybe we should support this case
// where we can visit every member of the list relatively fast to ask
// its length. Should we care about sizes instead maybe?
case _root_.com.twitter.scalding.serialization.macros.impl.ordered_serialization.runtime_helpers.DynamicLen(_) =>
_root_.com.twitter.scalding.serialization.macros.impl.ordered_serialization.runtime_helpers.NoLengthCalculation
case _ => _root_.com.twitter.scalding.serialization.macros.impl.ordered_serialization.runtime_helpers.NoLengthCalculation
}
}
""")
// Something we can't workout the size of ahead of time
case _ => MaybeLengthCalculation(c)(q"""
if($element.isEmpty) {
val sizeOfZero = 1 // writing the constant 0, for length, takes 1 byte
_root_.com.twitter.scalding.serialization.macros.impl.ordered_serialization.runtime_helpers.DynamicLen(sizeOfZero)
} else {
_root_.com.twitter.scalding.serialization.macros.impl.ordered_serialization.runtime_helpers.NoLengthCalculation
}
""")
}
}
}
}
}
|
tresata/scalding
|
scalding-serialization/src/main/scala/com/twitter/scalding/serialization/macros/impl/ordered_serialization/providers/TraversablesOrderedBuf.scala
|
Scala
|
apache-2.0
| 14,529
|
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.rdd.read
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.instrumentation.Timers._
import org.bdgenomics.adam.models.{ ReferencePositionPair, ReferencePositionWithOrientation, SingleReadBucket }
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.formats.avro.AlignmentRecord
private[rdd] object MarkDuplicates extends Serializable {
private def markReadsInBucket(bucket: SingleReadBucket, primaryAreDups: Boolean, secondaryAreDups: Boolean) {
bucket.primaryMapped.foreach(read => {
read.setDuplicateRead(primaryAreDups)
})
bucket.secondaryMapped.foreach(read => {
read.setDuplicateRead(secondaryAreDups)
})
bucket.unmapped.foreach(read => {
read.setDuplicateRead(false)
})
}
// Calculates the sum of the phred scores that are greater than or equal to 15
def score(record: AlignmentRecord): Int = {
record.qualityScores.filter(15 <=).sum
}
private def scoreBucket(bucket: SingleReadBucket): Int = {
bucket.primaryMapped.map(score).sum
}
private def markReads(reads: Iterable[(ReferencePositionPair, SingleReadBucket)], areDups: Boolean) {
markReads(reads, primaryAreDups = areDups, secondaryAreDups = areDups, ignore = None)
}
private def markReads(reads: Iterable[(ReferencePositionPair, SingleReadBucket)], primaryAreDups: Boolean, secondaryAreDups: Boolean,
ignore: Option[(ReferencePositionPair, SingleReadBucket)] = None) = MarkReads.time {
reads.foreach(read => {
if (ignore.isEmpty || read != ignore.get) {
markReadsInBucket(read._2, primaryAreDups, secondaryAreDups)
}
})
}
def apply(rdd: RDD[AlignmentRecord]): RDD[AlignmentRecord] = {
// Group by library and left position
def leftPositionAndLibrary(p: (ReferencePositionPair, SingleReadBucket)): (Option[ReferencePositionWithOrientation], String) = {
(p._1.read1refPos, p._2.allReads.head.getRecordGroupLibrary)
}
// Group by right position
def rightPosition(p: (ReferencePositionPair, SingleReadBucket)): Option[ReferencePositionWithOrientation] = {
p._1.read2refPos
}
rdd.adamSingleReadBuckets().keyBy(ReferencePositionPair(_)).groupBy(leftPositionAndLibrary)
.flatMap(kv => PerformDuplicateMarking.time {
val leftPos: Option[ReferencePositionWithOrientation] = kv._1._1
val readsAtLeftPos: Iterable[(ReferencePositionPair, SingleReadBucket)] = kv._2
leftPos match {
// These are all unmapped reads. There is no way to determine if they are duplicates
case None =>
markReads(readsAtLeftPos, areDups = false)
// These reads have their left position mapped
case Some(leftPosWithOrientation) =>
val readsByRightPos = readsAtLeftPos.groupBy(rightPosition)
val groupCount = readsByRightPos.size
readsByRightPos.foreach(e => {
val rightPos = e._1
val reads = e._2
val groupIsFragments = rightPos.isEmpty
// We have no pairs (only fragments) if the current group is a group of fragments
// and there is only one group in total
val onlyFragments = groupIsFragments && groupCount == 1
// If there are only fragments then score the fragments. Otherwise, if there are not only
// fragments (there are pairs as well) mark all fragments as duplicates.
// If the group does not contain fragments (it contains pairs) then always score it.
if (onlyFragments || !groupIsFragments) {
// Find the highest-scoring read and mark it as not a duplicate. Mark all the other reads in this group as duplicates.
val highestScoringRead = reads.max(ScoreOrdering)
markReadsInBucket(highestScoringRead._2, primaryAreDups = false, secondaryAreDups = true)
markReads(reads, primaryAreDups = true, secondaryAreDups = true, ignore = Some(highestScoringRead))
} else {
markReads(reads, areDups = true)
}
})
}
readsAtLeftPos.flatMap(read => { read._2.allReads })
})
}
private object ScoreOrdering extends Ordering[(ReferencePositionPair, SingleReadBucket)] {
override def compare(x: (ReferencePositionPair, SingleReadBucket), y: (ReferencePositionPair, SingleReadBucket)): Int = {
// This is safe because scores are Ints
scoreBucket(x._2) - scoreBucket(y._2)
}
}
}
|
tomwhite/adam
|
adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/MarkDuplicates.scala
|
Scala
|
apache-2.0
| 5,365
|
package atari.st.util.zip
import java.io.InputStream
import java.util.zip.ZipInputStream
class ZipEntryInputStream(zip: ZipInputStream)
extends InputStream
{
private var eof = false
@inline private def doAndEOF[T](f: => T, ifEOF: T = ()): T =
if (eof) ifEOF
else {
val r = f
eof = available() == 0
r
}
override def available(): Int =
zip.available()
override def read(): Int =
doAndEOF(zip.read(), -1)
override def read(b: Array[Byte], off: Int, len: Int): Int =
doAndEOF(zip.read(b, off, len), -1)
override def skip(n: Long): Long =
doAndEOF(zip.skip(n), 0)
override def close(): Unit =
doAndEOF(zip.closeEntry())
}
|
suiryc/atari-st-tools
|
src/main/scala/atari/st/util/zip/ZipEntryInputStream.scala
|
Scala
|
gpl-3.0
| 692
|
package com.twitter.finagle.exception
import com.fasterxml.jackson.databind.{JsonNode, ObjectMapper}
import com.twitter.finagle.core.util.InetAddressUtil
import com.twitter.util.{GZIPStringEncoder, Time}
import java.lang.{Throwable, StackTraceElement => javaSTE}
import java.net.{SocketAddress, InetSocketAddress, InetAddress}
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import scala.collection.JavaConverters._
/**
* An object that generates a service exception and verifies its JSON representation.
*/
private[exception] class TestServiceException(
serviceName: String,
exceptionMessage: String,
time: Option[Time] = None,
traceId: Option[Long] = None,
clientAddress: Option[String] = None,
sourceAddress: Option[String] = Some(InetAddressUtil.Loopback.getHostName),
cardinality: Option[Int] = None) {
private val ste = new javaSTE("badclass", "badmethod", "badfile", 42)
val throwable = new Throwable(exceptionMessage)
throwable.setStackTrace(Array(ste,ste))
private def constructServiceException = {
var se = new ServiceException(serviceName, throwable, time.getOrElse(Time.now), traceId.getOrElse(0L))
clientAddress foreach (ca => se = se.withClient(ca))
sourceAddress foreach (sa => se = se.withSource(sa))
cardinality foreach (c => se = se.incremented(c))
se
}
lazy val serviceException = constructServiceException
def verifyCompressedJSON(json: String) = {
verifyJSON(GZIPStringEncoder.decodeString(json))
}
def verifyJSON(json: String) = {
def verify[T](actual: T, expected: T, message: String, previous: Boolean = false) = {
assert(!previous, message + ": variable already set")
assert(actual == expected, message + ": " + actual)
true
}
def verifyOption[T](received: T, expected: Option[T], fieldName: String, previous: Boolean = false, enforced: Boolean = true) = {
if (enforced) {
assert(expected.isDefined, "received key for non-defined field: " + fieldName)
verify(received, expected.get, "incorrect value for " + fieldName, previous)
} else true
}
def fail(badKey: String, location: String) {
assert(false, "unknown element found in " + location + ": " + badKey)
}
val mapper = new ObjectMapper
val s: JsonNode = mapper.readTree(json)
var hasName = false
var hasTraceId = false
var hasTimestamp = false
var hasExceptionContents = false
var hasExceptionClass = false
var hasMessage = false
var hasStackTrace = false
var hasClient = false
var hasSource = false
var hasCardinality = false
assert(s.isObject)
s.fields.asScala foreach { mapEntry =>
val jsonValue = mapEntry.getValue
mapEntry.getKey match {
case "name" =>
assert(jsonValue.isTextual)
hasName = verify(jsonValue.textValue, serviceName, "bad service name", hasName)
case "traceId" =>
assert(jsonValue.isNumber)
hasTraceId = verifyOption(jsonValue.longValue, traceId, "bad traceId", hasTraceId, false)
case "timestamp" =>
assert(jsonValue.isNumber)
hasTimestamp = verifyOption(jsonValue.longValue, time, "incorrect time", hasTimestamp, false)
case "exceptionContents" => {
assert(!hasExceptionContents, "got exception contents >1 times")
hasExceptionContents = true
assert(jsonValue.isObject)
jsonValue.fields.asScala foreach { contentsMapEntry =>
val contentsJsonValue = contentsMapEntry.getValue
contentsMapEntry.getKey match {
case "exceptionClass" =>
assert(contentsJsonValue.isTextual)
hasExceptionClass = verify(contentsJsonValue.textValue, "java.lang.Throwable", "bad exception class", hasExceptionClass)
case "message" =>
assert(contentsJsonValue.isTextual)
hasMessage = verify(contentsJsonValue.textValue, exceptionMessage, "bad excepution message", hasMessage)
case "stackTrace" =>
assert(contentsJsonValue.isTextual)
hasStackTrace = verify(contentsJsonValue.textValue, ste.toString + "\\n" + ste.toString, "bad stacktrace", hasStackTrace)
case a => fail(a, "exception contents")
}
}
}
case "peer" =>
assert(jsonValue.isTextual)
hasClient = verifyOption(jsonValue.textValue, clientAddress, "peer", hasClient)
case "sourceAddress" =>
assert(jsonValue.isTextual)
hasSource = verifyOption(jsonValue.textValue, sourceAddress, "source", hasSource)
case "cardinality" =>
assert(jsonValue.isNumber)
hasCardinality = verifyOption(jsonValue.intValue, cardinality map { _+1 }, "cardinality", hasCardinality, false)
case a => fail(a, "service exception")
}
}
assert(hasName, "no name")
assert(hasTraceId, "no trace id")
assert(hasTimestamp, "no timestamp")
assert(hasExceptionContents, "no exception contents")
assert(hasExceptionClass, "no exception class")
assert(hasMessage, "no message")
assert(hasStackTrace, "no stacktrace")
def optionalAssertDefined(o: Option[_], defined: Boolean, msg: String) {
if (o.isDefined) assert(defined, msg + " expected but not found")
}
optionalAssertDefined(clientAddress, hasClient, "peer")
optionalAssertDefined(sourceAddress, hasSource, "source")
optionalAssertDefined(cardinality, hasCardinality, "cardinality")
true
}
}
class ServiceExceptionTest extends FunSuite {
test("with no endpoint reporting serialize to JSON in the proper format") {
val tse = new TestServiceException("service16", "my cool message", Some(Time.now), Some(124564L))
assert(tse.verifyJSON(tse.serviceException.toJson))
}
test("with no endpoint reporting with >1 cardinality serialize to JSON in the proper format") {
val tse = new TestServiceException("service16", "my cool message", Some(Time.now), Some(124564L), cardinality = Some(12))
assert(tse.verifyJSON(tse.serviceException.toJson))
}
test("with client endpoint reporting serialize to JSON in the proper format") {
val tse = new TestServiceException("service16", "my cool message", Some(Time.now), Some(124564L), clientAddress = Some("myendpoint"))
assert(tse.verifyJSON(tse.serviceException.toJson))
}
test("with client endpoint reporting with >1 cardinality serialize to JSON in the proper format") {
val tse = new TestServiceException("service16", "my cool message", Some(Time.now), Some(124564L), clientAddress = Some("myendpoint"), cardinality = Some(9))
assert(tse.verifyJSON(tse.serviceException.toJson))
}
test("with source endpoint reporting serialize to JSON in the proper format") {
val tse = new TestServiceException("service16", "my cool message", Some(Time.now), Some(124564L), sourceAddress = Some("myendpoint"))
assert(tse.verifyJSON(tse.serviceException.toJson))
}
test("with source endpoint reporting with >1 cardinality serialize to JSON in the proper format") {
val tse = new TestServiceException("service16", "my cool message", Some(Time.now), Some(124564L), sourceAddress = Some("myendpoint"), cardinality = Some(7))
assert(tse.verifyJSON(tse.serviceException.toJson))
}
test("with both client and source endpoint reporting serialize to JSON in the proper format") {
val tse = new TestServiceException("service16", "my cool message", Some(Time.now), Some(124564L), clientAddress = Some("myClientAddress"), sourceAddress = Some("mySourceAddress"))
assert(tse.verifyJSON(tse.serviceException.toJson))
}
test("with both client and source endpoint reporting with >1 cardinality serialize to JSON in the proper format") {
val tse = new TestServiceException("service16", "my cool message", Some(Time.now), Some(124564L), clientAddress = Some("myClientAddress"), sourceAddress = Some("mySourceAddress"), cardinality = Some(8))
assert(tse.verifyJSON(tse.serviceException.toJson))
}
}
|
jamescway/finagle
|
finagle-exception/src/test/scala/com/twitter/finagle/exception/ServiceExceptionTest.scala
|
Scala
|
apache-2.0
| 8,124
|
package com.twitter.finagle.toggle
import com.twitter.util.{Return, Throw, Try}
import org.scalacheck.Arbitrary.arbitrary
import org.scalatest.FunSuite
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
import scala.collection.JavaConverters._
class JsonToggleMapTest extends FunSuite with ScalaCheckDrivenPropertyChecks {
import JsonToggleMap.{DescriptionIgnored, DescriptionRequired}
private def assertParseFails(input: String): Unit = {
assertParseFails(input, DescriptionIgnored)
assertParseFails(input, DescriptionRequired)
}
private def assertParseFails(
input: String,
descriptionMode: JsonToggleMap.DescriptionMode
): Unit =
JsonToggleMap.parse(input, descriptionMode) match {
case Return(_) => fail(s"Parsing should not succeed for $input")
case Throw(_) => // expected
}
test("parse invalid JSON string with no toggles") {
assertParseFails("{ }")
}
test("parse invalid JSON string with no id") {
assertParseFails("""
|{"toggles": [
| { "description": "Dude, where's my id?",
| "fraction": 0.0
| }
| ]
|}""".stripMargin)
}
test("parse invalid JSON string with duplicate ids") {
assertParseFails("""
|{"toggles": [
| { "id": "com.twitter.duplicate",
| "description": "cannot have duplicate ids even if other fields differ",
| "fraction": 0.0
| },
| { "id": "com.twitter.duplicate",
| "description": "this is a duplicate",
| "fraction": 1.0
| }
| ]
|}""".stripMargin)
}
test("parse invalid JSON string with empty description") {
assertParseFails(
"""
|{"toggles": [
| { "id": "com.twitter.EmptyDescription",
| "description": " ",
| "fraction": 0.0
| }
| ]
|}""".stripMargin,
DescriptionRequired
)
}
private val jsonWithNoDescription = """
|{"toggles": [
| { "id": "com.twitter.NoDescription",
| "fraction": 0.0
| }
| ]
|}""".stripMargin
test("parse JSON string with no description and is required") {
assertParseFails(jsonWithNoDescription, DescriptionRequired)
}
test("parse JSON string with no description and is ignored") {
JsonToggleMap.parse(jsonWithNoDescription, DescriptionIgnored) match {
case Throw(t) =>
fail(t)
case Return(tm) =>
assert(tm.iterator.size == 1)
}
}
test("parse invalid JSON string with invalid fraction") {
assertParseFails("""
|{"toggles": [
| { "id": "com.twitter.BadFraction",
| "description": "fractions should be 0-1",
| "fraction": 1.1
| }
| ]
|}""".stripMargin)
}
test("parse invalid JSON string with no fraction") {
assertParseFails("""
|{"toggles": [
| { "id": "com.twitter.NoFraction",
| "description": "fractions must be present"
| }
| ]
|}""".stripMargin)
}
// NOTE: this input should match what's in the resources file for
// com/twitter/toggles/com.twitter.finagle.toggle.tests.Valid.json
private val validInput = """
|{
| "toggles": [
| {
| "id": "com.twitter.off",
| "description": "Always disabled, yo.",
| "fraction": 0.0
| },
| {
| "id": "com.twitter.on",
| "description": "Always enabled, dawg.",
| "fraction": 1.0,
| "comment": "Look, I'm on!"
| }
| ]
|}""".stripMargin
private def validateParsedJson(toggleMap: Try[ToggleMap]): Unit = {
toggleMap match {
case Throw(t) =>
fail(t)
case Return(map) =>
assert(map.iterator.size == 2)
assert(map.iterator.exists(_.id == "com.twitter.off"))
assert(map.iterator.exists(_.id == "com.twitter.on"))
val on = map("com.twitter.on")
val off = map("com.twitter.off")
val doestExist = map("com.twitter.lolcat")
forAll(arbitrary[Int]) { i =>
assert(on.isDefinedAt(i))
assert(on(i))
assert(off.isDefinedAt(i))
assert(!off(i))
assert(!doestExist.isDefinedAt(i))
}
}
}
test("parse valid JSON String") {
validateParsedJson(JsonToggleMap.parse(validInput, DescriptionIgnored))
validateParsedJson(JsonToggleMap.parse(validInput, DescriptionRequired))
}
test("parse valid JSON String with empty toggles") {
val in = """
|{
| "toggles": [ ]
|}""".stripMargin
JsonToggleMap.parse(in, DescriptionRequired) match {
case Throw(t) =>
fail(t)
case Return(map) =>
assert(0 == map.iterator.size)
}
}
test("parse valid JSON resource file") {
val rscs = getClass.getClassLoader
.getResources(
"com/twitter/toggles/configs/com.twitter.finagle.toggle.tests.Valid.json"
)
.asScala
.toSeq
assert(1 == rscs.size)
validateParsedJson(JsonToggleMap.parse(rscs.head, DescriptionIgnored))
validateParsedJson(JsonToggleMap.parse(rscs.head, DescriptionRequired))
}
test("parse invalid JSON resource file") {
// this json file is missing an "id" on a toggle definition and therefore
// should fail to parse.
val rscs = getClass.getClassLoader
.getResources(
"com/twitter/toggles/configs/com.twitter.finagle.toggle.tests.Invalid.json"
)
.asScala
.toSeq
assert(1 == rscs.size)
JsonToggleMap.parse(rscs.head, DescriptionIgnored) match {
case Return(_) => fail(s"Parsing should not succeed")
case Throw(_) => // expected
}
}
}
|
luciferous/finagle
|
finagle-toggle/src/test/scala/com/twitter/finagle/toggle/JsonToggleMapTest.scala
|
Scala
|
apache-2.0
| 5,743
|
package at.logic.gapt.proofs.algorithms.ceres
import at.logic.gapt.proofs.lk.algorithms.subsumption.StillmanSubsumptionAlgorithmHOL
import at.logic.gapt.proofs.lk._
import at.logic.gapt.proofs.lksk.TypeSynonyms.Label
import at.logic.gapt.proofs.lksk.algorithms.applySubstitution
import at.logic.gapt.proofs.occurrences.FormulaOccurrence
import at.logic.gapt.expr._
import at.logic.gapt.expr.Ti
import at.logic.gapt.proofs.algorithms.ceres.struct.Struct
import at.logic.gapt.utils.dssupport.ListSupport._
import at.logic.gapt.proofs.lk.base.{ Sequent, LKProof }
import at.logic.gapt.proofs.lksk
import at.logic.gapt.proofs.lksk.{ Axiom => LKSKAxiom, _ }
import at.logic.gapt.proofs.resolution.ral._
/**
* Created by marty on 10/6/14.
*/
object ceres_omega extends ceres_omega
class ceres_omega {
import at.logic.gapt.formats.llk.LLKFormatter._
private def check_es( s: LabelledSequent, c: LabelledSequent, es: LabelledSequent ): LabelledSequent = {
s
}
/**
* Applies the CERES_omega method to a proof.
* @param projections This is the set of projections to use. A projection to reflexvity is generated if needed.
* @param ralproof The R_al proof to use as skeleton.
* @param es The end-sequent of the original proof.
* @param struct The struct of the original proof. (unused at the moment)
* @return a pair of an LKProof with atomic cuts only and of the subsequent of its root which corresponds the Ral sequent
*/
def apply( projections: Set[LKProof], ralproof: RalResolutionProof[LabelledSequent], es: LabelledSequent, struct: Struct ): ( LKProof, LabelledSequent ) = ralproof match {
//reflexivity as initial rule
case InitialSequent( s @ LabelledSequent( Nil, List( LabelledFormulaOccurrence( Eq( x, y ), anc, label ) ) ) ) if ( x == y ) && ( x.exptype == Ti ) =>
val rule = LKSKAxiom( s.toFSequent, ( List(), List( label ) ) )
val reflexivity_occ = rule.root.succedent( 0 ).asInstanceOf[LabelledFormulaOccurrence]
val weakened_left = es.l_antecedent.foldLeft( rule )( ( r, fo ) => lksk.WeakeningLeftRule( r, fo.formula, fo.skolem_label ) )
val weakened_right = es.l_succedent.foldLeft( weakened_left )( ( r, fo ) => lksk.WeakeningRightRule( r, fo.formula, fo.skolem_label ) )
val reflexivity_successor = pickFOWithAncestor( sequentToLabelledSequent( weakened_right.root ).l_succedent, reflexivity_occ )
val clause = LabelledSequent( Nil, List( reflexivity_successor ) )
require( weakened_right.root.occurrences.size == es.occurrences.size + clause.occurrences.size, "The size of the generated end-sequent " + rule.root + " is not the size of the end-sequent " + es + " + the size of the clause " + clause )
require( ( clause.occurrences diff weakened_right.root.occurrences ).isEmpty )
( weakened_right, clause )
case InitialSequent( root ) =>
val candidates = projections.toList.flatMap( x => {
val pes = filterEndsequent( sequentToLabelledSequent( x.root ), es, struct )
StillmanSubsumptionAlgorithmHOL.subsumes_by( pes.toFSequent, root.toFSequent ) match {
case None => Nil
case Some( sub ) => List( ( x, sub ) )
}
} )
candidates match {
case ( proof, sub ) :: _ =>
val subproof = applySubstitution( proof, sub )._1
val clause = filterEndsequent( sequentToLabelledSequent( subproof.root ), es, struct )
val tocontract = LabelledSequent(
diffModuloOccurrence( clause.l_antecedent, root.l_antecedent ),
diffModuloOccurrence( clause.l_succedent, root.l_succedent ) )
val acontr = tocontract.l_antecedent.foldLeft( subproof )( ( p, occ ) =>
p.root.antecedent.find( x => x != occ && x.formula == occ.formula && x.asInstanceOf[LabelledFormulaOccurrence].skolem_label == occ.skolem_label ) match {
case Some( c ) =>
ContractionLeftRule( p, occ, c )
case None => throw new Exception( "Could not find an element to contract for " + f( occ ) + " in " + f( root ) )
} )
val scontr = tocontract.l_succedent.foldLeft( acontr )( ( p, occ ) =>
p.root.succedent.find( x => x != occ && x.formula == occ.formula && x.asInstanceOf[LabelledFormulaOccurrence].skolem_label == occ.skolem_label ) match {
case Some( c ) =>
ContractionRightRule( p, occ, c )
case None => throw new Exception( "Could not find an element to contract for " + f( occ ) + " in " + f( root ) )
} )
val nclause = filterEndsequent( sequentToLabelledSequent( scontr.root ), es, struct )
require( scontr.root.syntacticMultisetEquals( nclause compose es ), "The root " + f( scontr.root ) + " must consist of the clause " + f( nclause ) + " plus the end-sequent " + f( es ) )
require( scontr.root.occurrences.size == es.occurrences.size + nclause.occurrences.size, "The size of the generated end-sequent " + f( root ) + " is not the size of the end-sequent " + f( es ) + " + the size of the clause " + nclause )
require( ( nclause.occurrences diff scontr.root.occurrences ).isEmpty )
( scontr, nclause )
case Nil =>
throw new Exception( "Could not find a projection for the clause " + f( root ) + " in " + projections.map( x => filterEndsequent( sequentToLabelledSequent( x.root ), es, struct ) ).map( f( _ ) ).mkString( "\\n" ) )
}
case Cut( root, parent1, parent2, p1occs, p2occs ) =>
val ( lkparent1, clause1 ) = ceres_omega( projections, parent1, es, struct )
val ( lkparent2, clause2 ) = ceres_omega( projections, parent2, es, struct )
require( ( clause1.occurrences diff lkparent1.root.occurrences ).isEmpty )
require( ( clause2.occurrences diff lkparent2.root.occurrences ).isEmpty )
val leftcutformulas = p1occs.foldLeft( List[LabelledFormulaOccurrence]() )( ( list, fo ) => findAuxByFormulaAndLabel( fo.asInstanceOf[LabelledFormulaOccurrence], clause1.l_succedent, list ) :: list ).reverse
val rightcutformulas = p2occs.foldLeft( List[LabelledFormulaOccurrence]() )( ( list, fo ) => findAuxByFormulaAndLabel( fo.asInstanceOf[LabelledFormulaOccurrence], clause2.l_antecedent, list ) :: list ).reverse
val ( c1, caux1, c2, caux2 ) = ( leftcutformulas, rightcutformulas ) match {
case ( x :: xs, y :: ys ) => ( x, xs, y, ys )
case ( Nil, _ ) => throw new Exception( "Could not find the cut formula " + p1occs( 0 ).formula + " in left parent " + lkparent1.root )
case ( _, Nil ) => throw new Exception( "Could not find the cut formula " + p2occs( 0 ).formula + " in right parent " + lkparent2.root )
}
val cleft = caux1.foldLeft( lkparent1 )( ( proof, occ ) =>
ContractionRightRule( proof, pickFOWithAncestor( proof.root.succedent, c1 ), pickFOWithAncestor( proof.root.succedent, occ ) ) )
val cright = caux2.foldLeft( lkparent2 )( ( proof, occ ) =>
ContractionLeftRule( proof, pickFOWithAncestor( proof.root.antecedent, c2 ), pickFOWithAncestor( proof.root.antecedent, occ ) ) )
val cutfleft = pickFOWithAncestor( cleft.root.succedent, c1 ).asInstanceOf[LabelledFormulaOccurrence]
val cutfright = pickFOWithAncestor( cright.root.antecedent, c2 ).asInstanceOf[LabelledFormulaOccurrence]
require( cutfleft.formula == cutfright.formula,
"Found the wrong cut formulas:\\n" + cutfleft.formula + "\\n and\\n" + cutfright.formula )
// require(cutfleft.formula syntaxEquals cutfright.formula,
// "Cut formulas are alpha equal, but not syntax:\\n"+cutfleft.formula+"\\n and\\n"+cutfright.formula)
require( cutfleft.skolem_label == cutfright.skolem_label,
"Found the wrong cut labels:\\n" + cutfleft.skolem_label + "\\n and\\n" + cutfright.skolem_label )
val rule = CutRule( cleft, cright, cutfleft, cutfright )
val crule = contractEndsequent( rule, es )
val nclauses = filterByAncestor( crule.root, clause1 compose clause2 )
require( nclauses.toFSequent multiSetEquals root.toFSequent, "We tracked the clauses wrong:\\n calculated clause: " + f( nclauses ) + "\\n real clause: " + f( root ) )
require( crule.root.occurrences.size == es.occurrences.size + nclauses.occurrences.size, "The size of the generated end-sequent " + rule.root + " is not the size of the end-sequent " + es + " + the size of the clause " + nclauses )
( crule, nclauses )
case AFactorF( root, parent, contr, aux, _ ) =>
val ( lkparent, clause1 ) = ceres_omega( projections, parent, es, struct )
require( ( clause1.occurrences diff lkparent.root.occurrences ).isEmpty )
aux.length match {
case 0 => ( lkparent, clause1 ) //trivial, skipping factor inference
case 1 =>
val c1 = findAuxByFormulaAndLabel( contr, clause1.l_antecedent, Nil )
val c2 = findAuxByFormulaAndLabel( contr, clause1.l_antecedent, c1 :: Nil )
val rule = ContractionLeftRule( lkparent, c1, c2 )
val nclauses = filterByAncestor( rule.root, clause1 )
require( nclauses.toFSequent multiSetEquals root.toFSequent, "We tracked the clauses wrong:\\n calculated clause: " + f( nclauses ) + "\\n real clause: " + f( root ) )
require( rule.root.occurrences.size == es.occurrences.size + nclauses.occurrences.size, "The size of the generated end-sequent " + rule.root + " is not the size of the end-sequent " + es + " + the size of the clause " + nclauses )
( rule, nclauses )
case _ => throw new Exception( "Factor of more than two literals not supported yet!" )
}
case AFactorT( root, parent, contr, aux, _ ) =>
val ( lkparent, clause1 ) = ceres_omega( projections, parent, es, struct )
require( ( clause1.occurrences diff lkparent.root.occurrences ).isEmpty )
aux.length match {
// case 0 => throw new Exception("At least one auxiliary formula is necessary for a factor rule!")
case 1 =>
val c1 = findAuxByFormulaAndLabel( contr, clause1.l_succedent, Nil )
val c2 = findAuxByFormulaAndLabel( contr, clause1.l_succedent, c1 :: Nil )
val rule = ContractionRightRule( lkparent, c1, c2 )
val nclauses = filterByAncestor( rule.root, clause1 )
require( nclauses.toFSequent multiSetEquals root.toFSequent, "We tracked the clauses wrong:\\n calculated clause: " + f( nclauses ) + "\\n real clause: " + f( root ) )
require( rule.root.occurrences.size == es.occurrences.size + nclauses.occurrences.size, "The size of the generated end-sequent " + root + " is not the size of the end-sequent " + es + " + the size of the clause " + nclauses )
( rule, nclauses )
case 0 => ( lkparent, clause1 ) //trivial, skipping factor inference
case _ => throw new Exception( "Factor of more than two literals not supported yet!" )
}
case ParaF( root, parent1, parent2, p1occ, p2occ, principial, flipped ) =>
val ( lkparent1, clause1 ) = ceres_omega( projections, parent1, es, struct )
val ( lkparent2, clause2 ) = ceres_omega( projections, parent2, es, struct )
require( ( clause1.occurrences diff lkparent1.root.occurrences ).isEmpty )
require( ( clause2.occurrences diff lkparent2.root.occurrences ).isEmpty )
val eqn: FormulaOccurrence = findAuxByFormulaAndLabel( p1occ, clause1.l_succedent, Nil )
val modulant: FormulaOccurrence = findAuxByFormulaAndLabel( p2occ.asInstanceOf[LabelledFormulaOccurrence], clause2.l_antecedent, Nil )
val rule = EquationLeftMacroRule( lkparent1, lkparent2, eqn, modulant, principial.formula )
val crule = contractEndsequent( rule, es )
val nclauses = filterByAncestor( crule.root, clause1 compose clause2 )
require( nclauses.toFSequent multiSetEquals root.toFSequent, "We tracked the clauses wrong:\\n calculated clause: " + f( nclauses ) + "\\n real clause: " + f( root ) )
require( crule.root.occurrences.size == es.occurrences.size + nclauses.occurrences.size, "The size of the generated end-sequent " + rule.root + " is not the size of the end-sequent " + es + " + the size of the clause " + nclauses )
( crule, nclauses )
case ParaT( root, parent1, parent2, p1occ, p2occ, principial, flipped ) =>
val ( lkparent1, clause1 ) = ceres_omega( projections, parent1, es, struct )
val ( lkparent2, clause2 ) = ceres_omega( projections, parent2, es, struct )
require( ( clause1.occurrences diff lkparent1.root.occurrences ).isEmpty )
require( ( clause2.occurrences diff lkparent2.root.occurrences ).isEmpty )
val eqn: FormulaOccurrence = findAuxByFormulaAndLabel( p1occ, clause1.l_succedent, Nil )
val modulant: FormulaOccurrence = findAuxByFormulaAndLabel( p2occ.asInstanceOf[LabelledFormulaOccurrence], clause2.l_succedent, Nil )
val rule = EquationRightMacroRule( lkparent1, lkparent2, eqn, modulant, principial.formula )
val crule = contractEndsequent( rule, es )
val nclauses = filterByAncestor( crule.root, clause1 compose clause2 )
require( nclauses.toFSequent multiSetEquals root.toFSequent, "We tracked the clauses wrong:\\n calculated clause: " + f( nclauses ) + "\\n real clause: " + f( root ) )
require( crule.root.occurrences.size == es.occurrences.size + nclauses.occurrences.size, "The size of the generated end-sequent " + rule.root + " is not the size of the end-sequent " + es + " + the size of the clause " + nclauses )
( crule, nclauses )
case Sub( root, parent, sub ) =>
val ( lkparent, clauses ) = ceres_omega( projections, parent, es, struct )
require( ( clauses.occurrences diff lkparent.root.occurrences ).isEmpty )
val ( rule, mapping ) = applySubstitution( lkparent, sub )
//val axiomformulas = rule.leaves.flatMap( _.vertex.occurrences )
val lruleroot = sequentToLabelledSequent( rule.root )
/* find the sub-sequent of the substituted proof which was introduced only by axioms */
//val axiomancestoroccs_a = lruleroot.l_antecedent.filter( x => firstAncestors( x ).forall( y => axiomformulas.contains( y ) ) )
//val axiomancestoroccs_s = lruleroot.l_succedent.filter( x => firstAncestors( x ).forall( y => axiomformulas.contains( y ) ) )
/* for each element in the root, find a matching literal with axiom ancestor in the derived end-sequent */
//val nclauses_a = root.l_antecedent.foldLeft[List[LabelledFormulaOccurrence]]( List() )( ( list, fo ) => {
// findAuxByFormulaAndLabel( fo.formula, fo.skolem_label, axiomancestoroccs_a, list ) :: list
//} )
//val nclauses_s = root.l_succedent.foldLeft[List[LabelledFormulaOccurrence]]( List() )( ( list, fo ) => {
// findAuxByFormulaAndLabel( fo.formula, fo.skolem_label, axiomancestoroccs_s, list ) :: list
//} )
require( clauses.l_antecedent.filterNot( mapping.contains ).isEmpty, "Could not find a mapping for: " + clauses.l_antecedent.filterNot( mapping.contains ) + " in " + lruleroot.l_antecedent )
require( clauses.l_succedent.filterNot( mapping.contains ).isEmpty, "Could not find a mapping for: " + clauses.l_succedent.filterNot( mapping.contains ) + " in " + lruleroot.l_succedent )
val nclauses = LabelledSequent( clauses.l_antecedent.map( mapping ), clauses.l_succedent.map( mapping ) )
//val nclauses = LabelledSequent( nclauses_a.reverse, nclauses_s.reverse )
require( nclauses.toFSequent multiSetEquals root.toFSequent, "We tracked the clauses wrong:\\n calculated clause: " + f( nclauses ) + "\\n real clause: " + f( root ) + " parent rule " + parent.rule )
val rootsize = rule.root.occurrences.size
val essize = es.occurrences.size
val nclausessize = nclauses.occurrences.size
require( rootsize == essize + nclausessize, "The size of the generated end-sequent " + root + " is not the size of the end-sequent " +
es + " + the size of the clause " + nclauses + "(" + rootsize + " != " + essize + "+" + nclausessize + ")" )
( rule, nclauses )
case _ => throw new Exception( "Unhandled case: " + ralproof )
}
def filterByAncestor( sequent: Sequent, anc: LabelledSequent ): LabelledSequent = {
try {
val root = sequentToLabelledSequent( sequent )
LabelledSequent(
root.l_antecedent.filter( x => anc.l_antecedent.exists( y => tranAncestors( x ).contains( y ) ) ),
root.l_succedent.filter( x => anc.l_succedent.exists( y => tranAncestors( x ).contains( y ) ) ) )
} catch {
case e: Exception =>
throw new Exception( "Could not filter " + sequent + " by ancestors " + anc + ": " + e.getMessage, e )
}
}
/**
* Finds an occurrence in candidates - exclusion_list, which has the same formula and label as aux.
* @return the first occurrence in candidates which matches
*/
def findAuxByFormulaAndLabel( aux: LabelledFormulaOccurrence,
candidates: Seq[LabelledFormulaOccurrence],
exclusion_list: Seq[LabelledFormulaOccurrence] ): LabelledFormulaOccurrence = try {
findAux( candidates, x => x.skolem_label == aux.skolem_label && x.formula == aux.formula, exclusion_list )
} catch {
case e: IllegalArgumentException =>
throw new Exception( "Could not find a candidate for " + aux + " in " + candidates.mkString( "[", ", ", "]" ) + exclusion_list.mkString( " ignoring: ", ", ", "." ) )
}
/**
* Finds an occurrence in candidates - exclusion_list, which has the same formula and label as aux.
* @return the first occurrence in candidates which matches
*/
def findAuxByFormulaAndLabel( formula: HOLFormula,
skolem_label: Label,
candidates: Seq[LabelledFormulaOccurrence],
exclusion_list: Seq[LabelledFormulaOccurrence] ): LabelledFormulaOccurrence = try {
findAux( candidates, x => x.skolem_label == skolem_label && x.formula == formula, exclusion_list )
} catch {
case e: IllegalArgumentException =>
throw new Exception( "Could not find a candidate for " + formula + " with label " + skolem_label + " in " + candidates.mkString( "[", ", ", "]" ) + exclusion_list.mkString( " ignoring: ", ", ", "." ) )
}
/**
* Finds the first element in candidates - exclucsion_list fulfilling the predicate pred. Throws an IllegalArgumentException,
* if none is found.
* @param candidates the list of candidates to choose from
* @param pred a predicate on formula occurrences
* @param exclusion_list no candidate in exclusion_list will match
* @throws IllegalArgumentException if no candidate fulfills pred.
* @return the first element of candidates which fulfills the criteria
*/
def findAux( candidates: Seq[LabelledFormulaOccurrence],
pred: LabelledFormulaOccurrence => Boolean,
exclusion_list: Seq[LabelledFormulaOccurrence] ): LabelledFormulaOccurrence =
candidates.diff( exclusion_list ).filter( pred ).toList match {
case List( fo ) => fo
case l @ ( fo :: _ ) =>
//println("warning: multiple matching formulas"+ l.mkString(": ",", ","." ))
fo
case Nil => throw new IllegalArgumentException( "Could not find matching aux formula!" )
}
/**
* After an application of a binary rule, end-sequent material might be duplicated. This method adds contractions
* for every end-sequent formula.
* @param p a proof with an end-sequent of the form: es x es x C (where C is some additional content)
* @param es the end-sequent material which occurs twice
* @return a proof with an end-sequent of the form: es x C
*/
def contractEndsequent( p: LKProof, es: LabelledSequent ): LKProof = {
val contr_left = es.l_antecedent.foldLeft( p )( ( rp, fo ) => {
sequentToLabelledSequent( rp.root ).l_antecedent.find( x =>
x.formula == fo.formula && x.skolem_label == fo.skolem_label ) match {
case Some( occ1 ) =>
sequentToLabelledSequent( rp.root ).l_antecedent.find( x =>
occ1 != x && x.formula == fo.formula && x.skolem_label == fo.skolem_label ) match {
case Some( occ2 ) =>
ContractionLeftRule( rp, occ1, occ2 )
case None =>
println( "Warning: During contraction of the end-sequent, could not find a second antecedent occurrence of " + fo + " in " + rp.root )
rp
}
case None =>
throw new Exception( "During contraction of the end-sequent, could not find an antecedent occurrence of " + fo + " in " + rp.root )
}
} )
val contr_right = es.l_succedent.foldLeft( contr_left )( ( rp, fo ) => {
sequentToLabelledSequent( rp.root ).l_succedent.find( x =>
x.formula == fo.formula && x.skolem_label == fo.skolem_label ) match {
case Some( occ1 ) =>
sequentToLabelledSequent( rp.root ).l_succedent.find( x =>
occ1 != x && x.formula == fo.formula && x.skolem_label == fo.skolem_label ) match {
case Some( occ2 ) =>
ContractionRightRule( rp, occ1, occ2 )
case None =>
println( "Warning: During contraction of the end-sequent, could not find a second succeedent occurrence of " + fo + " in " + rp.root )
rp
}
case None =>
throw new Exception( "During contraction of the end-sequent, could not find a succedent occurrence of " + fo + " in " + rp.root )
}
} )
contr_right
}
/* TODO: this might not work if we have atom formulas in the end-sequent. then a formula which comes from a weakining
might remain and in case of subsequent substitutions, the formula decomposition steps could fail, since they expect
an introduction rule A :- A */
def filterEndsequent( root: LabelledSequent, es: LabelledSequent, struct: Struct ) = LabelledSequent(
es.antecedent.foldLeft( root.l_antecedent.toList )( ( list, fo ) =>
removeFirstWhere( list, ( x: LabelledFormulaOccurrence ) => fo.formula == x.formula ) ),
es.succedent.foldLeft( root.l_succedent.toList )( ( list, fo ) =>
removeFirstWhere( list, ( x: LabelledFormulaOccurrence ) => fo.formula == x.formula ) ) )
/**
* Computes the reflexive, transitive closure of the ancestor relation, ie. all ancestors.
* @param fo a formula occurrence
* @return the list of all ancestors
*/
def tranAncestors( fo: FormulaOccurrence ): List[FormulaOccurrence] = {
fo :: fo.parents.toList.flatMap( x => tranAncestors( x ) )
}
/**
* Computes the reflexive, transitive closure of the ancestor relation, ie. all ancestors.
* @param fo a formula occurrence
* @return the list of all ancestors
*/
def tranAncestors( fo: LabelledFormulaOccurrence ): List[LabelledFormulaOccurrence] = {
fo :: fo.parents.flatMap( x => tranAncestors( x ) )
}
/**
* Computes the list of earliest ancestors of the formula occurrence. I.e. we calculate
* the least elements of all ancestors of the occurrence with regard to the ancestorship relation.
* @param fo a formula occurrence
* @return the list of first ancestors
*/
def firstAncestors( fo: FormulaOccurrence ): List[FormulaOccurrence] = {
if ( fo.parents.isEmpty )
List( fo )
else
fo.parents.toList.flatMap( x => firstAncestors( x ) )
}
/**
* Computes the list of earliest ancestors of the formula occurrence. I.e. we calculate
* the least elements of all ancestors of the occurrence with regard to the ancestorship relation.
* @param fo a formula occurrence
* @return the list of first ancestors
*/
def firstAncestors( fo: LabelledFormulaOccurrence ): List[LabelledFormulaOccurrence] = {
if ( fo.parents.isEmpty )
List( fo )
else
fo.parents.toList.flatMap( x => firstAncestors( x ) )
}
def pickFOWithAncestor( l: Seq[FormulaOccurrence], anc: FormulaOccurrence ) = l.filter( x => tranAncestors( x ).contains( anc ) ).toList match {
case List( a ) => a
case l @ ( a :: _ ) =>
//println("warning: multiple matching formulas for "+anc+ l.mkString(": ",", ","." ))
a
case Nil => throw new Exception( "Could not find any occurrence with ancestor " + anc + " in " + l )
}
def pickFOWithAncestor( l: Seq[LabelledFormulaOccurrence], anc: LabelledFormulaOccurrence ) = l.filter( x => tranAncestors( x ).contains( anc ) ).toList match {
case List( a ) => a
case l @ ( a :: _ ) =>
//println("warning: multiple matching formulas for "+anc+ l.mkString(": ",", ","." ))
a
case Nil => throw new Exception( "Could not find any occurrence with ancestor " + anc + " in " + l )
}
//def pickFOwhere( l : Seq[LabelledFormulaOccurrence], prop : LabelledFormulaOccurrence => Boolean, blacklist : List[LabelledFormulaOccurrence]) =
def diffModuloOccurrence( from: Seq[LabelledFormulaOccurrence], what: Seq[LabelledFormulaOccurrence] ) = {
what.foldLeft( from.toList )( ( l, occ ) => removeFirstWhere( l, ( x: LabelledFormulaOccurrence ) => x.formula == occ.formula && x.skolem_label == occ.skolem_label ) )
}
}
|
gisellemnr/gapt
|
src/main/scala/at/logic/gapt/proofs/algorithms/ceres/ceres_omega.scala
|
Scala
|
gpl-3.0
| 25,199
|
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package types
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
/**
* @author Alexander Podkhalyuzin
* Date: 28.02.2008
*/
object RefineStatSeq {
def parse(builder: ScalaPsiBuilder) {
while (true) {
builder.getTokenType match {
//end of parsing when find } or builder.eof
case ScalaTokenTypes.tRBRACE | null => return
case ScalaTokenTypes.tSEMICOLON => builder.advanceLexer() //not interesting case
//otherwise parse TopStat
case _ =>
if (!RefineStat.parse(builder)) {
builder error ScalaBundle.message("wrong.top.statment.declaration")
return
}
else {
builder.getTokenType match {
case ScalaTokenTypes.tSEMICOLON => builder.advanceLexer() //it is good
case null | ScalaTokenTypes.tRBRACE => return
case _ if !builder.newlineBeforeCurrentToken => builder error ScalaBundle.message("semi.expected")
case _ =>
}
}
}
}
}
}
|
jastice/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/parser/parsing/types/RefineStatSeq.scala
|
Scala
|
apache-2.0
| 1,201
|
package suggestions
package gui
import scala.collection.mutable.ListBuffer
import scala.collection.JavaConverters._
import scala.concurrent._
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.swing._
import scala.util.{ Try, Success, Failure }
import scala.swing.event._
import swing.Swing._
import javax.swing.UIManager
import Orientation._
import rx.subscriptions.CompositeSubscription
import rx.lang.scala.Observable
import rx.lang.scala.Subscription
import observablex._
import search._
object WikipediaSuggest extends SimpleSwingApplication with ConcreteSwingApi with ConcreteWikipediaApi {
{
try {
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName())
} catch {
case t: Throwable =>
}
}
def top = new MainFrame {
/* gui setup */
title = "Query Wikipedia"
minimumSize = new Dimension(900, 600)
val button = new Button("Get") {
icon = new javax.swing.ImageIcon(javax.imageio.ImageIO.read(this.getClass.getResourceAsStream("/suggestions/wiki-icon.png")))
}
val searchTermField = new TextField
val suggestionList = new ListView(ListBuffer[String]())
val status = new Label(" ")
val editorpane = new EditorPane {
import javax.swing.border._
border = new EtchedBorder(EtchedBorder.LOWERED)
editable = false
peer.setContentType("text/html")
}
contents = new BoxPanel(orientation = Vertical) {
border = EmptyBorder(top = 5, left = 5, bottom = 5, right = 5)
contents += new BoxPanel(orientation = Horizontal) {
contents += new BoxPanel(orientation = Vertical) {
maximumSize = new Dimension(240, 900)
border = EmptyBorder(top = 10, left = 10, bottom = 10, right = 10)
contents += new BoxPanel(orientation = Horizontal) {
maximumSize = new Dimension(640, 30)
border = EmptyBorder(top = 5, left = 0, bottom = 5, right = 0)
contents += searchTermField
}
contents += new ScrollPane(suggestionList)
contents += new BorderPanel {
maximumSize = new Dimension(640, 30)
add(button, BorderPanel.Position.Center)
}
}
contents += new ScrollPane(editorpane)
}
contents += status
}
val eventScheduler = SchedulerEx.SwingEventThreadScheduler
/**
* Observables
* You may find the following methods useful when manipulating GUI elements:
* `myListView.listData = aList` : sets the content of `myListView` to `aList`
* `myTextField.text = "react"` : sets the content of `myTextField` to "react"
* `myListView.selection.items` returns a list of selected items from `myListView`
* `myEditorPane.text = "act"` : sets the content of `myEditorPane` to "act"
*/
// TO IMPLEMENT
val searchTerms: Observable[String] = searchTermField.textValues.filter(t => !t.trim.isEmpty)
// TO IMPLEMENT
val suggestions: Observable[Try[List[String]]] = searchTerms.map{
s => {
val response = wikipediaSuggestion(s)
.map(rs =>Success(rs))
.recover{
case t: Throwable => Failure(t)
}
Await.result(response, 3 seconds)
}
}
// TO IMPLEMENT
val suggestionSubscription: Subscription = suggestions.observeOn(eventScheduler) subscribe {
x => x match {
case Success(l) => suggestionList.listData_=(l)
case Failure(e) => e.printStackTrace()
}
}
// TO IMPLEMENT
val selections: Observable[String] =
button.clicks
.filter(b => suggestionList.selection.items.length > 0)
.map(b => suggestionList.selection.items.take(1)(0))
// TO IMPLEMENT
val pages: Observable[Try[String]] = selections.sanitized.map {
term => {
val response = wikipediaPage(term)
.map(rs => Success(rs))
.recover{
case t: Throwable => Failure(t)
}
Await.result(response, 3 seconds)
}
}
// TO IMPLEMENT
val pageSubscription: Subscription = pages.observeOn(eventScheduler) subscribe {
p => p match {
case Success(p) => editorpane.text_=(p)
case Failure(e) => e.printStackTrace()
}
}
}
}
trait ConcreteWikipediaApi extends WikipediaApi {
def wikipediaSuggestion(term: String) = Search.wikipediaSuggestion(term)
def wikipediaPage(term: String) = Search.wikipediaPage(term)
}
trait ConcreteSwingApi extends SwingApi {
type ValueChanged = scala.swing.event.ValueChanged
object ValueChanged {
def unapply(x: Event) = x match {
case vc: ValueChanged => Some(vc.source.asInstanceOf[TextField])
case _ => None
}
}
type ButtonClicked = scala.swing.event.ButtonClicked
object ButtonClicked {
def unapply(x: Event) = x match {
case bc: ButtonClicked => Some(bc.source.asInstanceOf[Button])
case _ => None
}
}
type TextField = scala.swing.TextField
type Button = scala.swing.Button
}
|
thiagosqr/coursera-reactive
|
suggestions/src/main/scala/suggestions/gui/WikipediaSuggest.scala
|
Scala
|
apache-2.0
| 5,061
|
package io.youi.image
import com.outr.{CanvgOptions, canvg}
import io.youi._
import io.youi.dom._
import io.youi.image.resize.ImageResizer
import io.youi.net.URL
import io.youi.path.Path
import io.youi.spatial.{BoundingBox, Size}
import io.youi.stream.StreamURL
import io.youi.util.{CanvasPool, LazyFuture}
import org.scalajs.dom.html
import org.scalajs.dom.raw._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{Future, Promise}
import scala.scalajs._
class SVGImage private(private val svg: SVGSVGElement, override protected val canvas: html.Canvas, measured: Size) extends CanvasImage {
private val reDrawer = LazyFuture {
SVGImage.drawToCanvas(canvas, svg, 0.0, 0.0, canvas.width, canvas.height).map { _ =>
modified @= System.currentTimeMillis()
}
}
def modify[R](f: SVGSVGElement => R): Future[R] = {
val result = f(svg)
reDrawer.flag().map(_ => result)
}
override def resize(width: Double, height: Double): Future[SVGImage] = if (this.width == width && this.height == height) {
Future.successful(this)
} else {
SVGImage(svg, width, height)
}
override def resizeTo(canvas: html.Canvas, width: Double, height: Double, resizer: ImageResizer): Future[html.Canvas] = {
SVGImage.drawToCanvas(canvas, svg, 0.0, 0.0, width, height).map(_ => canvas)
}
override def isVector: Boolean = true
def toXML: String = (new XMLSerializer).serializeToString(svg)
override def toString: String = s"SVGImage($width x $height)"
}
object SVGImage {
case class ViewBox(width: Double = 0.0, height: Double = 0.0)
def apply(url: URL): Future[SVGImage] = {
val stream = StreamURL.stream(url)
stream.flatMap(apply)
}
def apply(svgString: String): Future[SVGImage] = try {
val div = dom.create[html.Div]("div")
div.innerHTML = svgString
val svg = div.oneByTag[SVGSVGElement]("svg")
apply(svg)
} catch {
case t: Throwable => {
scribe.error(t)
throw t
}
}
def apply(svg: SVGSVGElement): Future[SVGImage] = {
val size = measure(svg).toSize
apply(svg, size.width, size.height)
}
def apply(svg: SVGSVGElement, width: Double, height: Double): Future[SVGImage] = {
val size = measure(svg).toSize
val canvas = CanvasPool(width, height)
drawToCanvas(canvas, svg, 0.0, 0.0, width, height).map { _ =>
new SVGImage(svg, canvas, size)
}
}
def drawToCanvas(canvas: html.Canvas, svg: SVGSVGElement, x: Double, y: Double, width: Double, height: Double): Future[Unit] = {
val promise = Promise[Unit]()
val callback: js.Function = () => {
promise.success(())
}
canvg(canvas, svg.outerHTML, new CanvgOptions {
ignoreMouse = true
ignoreAnimation = true
ignoreDimensions = true
ignoreClear = true
offsetX = math.round(x).toInt
offsetY = math.round(y).toInt
scaleWidth = math.ceil(width).toInt
scaleHeight = math.ceil(height).toInt
renderCallback = callback
})
promise.future
}
def measure(svg: SVGSVGElement, applyDimension: Boolean = true, force: Boolean = false): BoundingBox = {
val viewBox: ViewBox = if (svg.viewBox != null && svg.viewBox.animVal != null) {
ViewBox(svg.viewBox.animVal.width, svg.viewBox.animVal.height)
} else {
ViewBox()
}
val definedWidth = if (svg.width.animVal.unitType == SVGLength.SVG_LENGTHTYPE_NUMBER) {
Some(svg.width.animVal.value)
} else if (viewBox.width > 0.0) {
Some(viewBox.width)
} else {
None
}
val definedHeight = if (svg.height.animVal.unitType == SVGLength.SVG_LENGTHTYPE_NUMBER) {
Some(svg.height.animVal.value)
} else if (viewBox.height > 0.0) {
Some(viewBox.height)
} else {
None
}
val bb = if (definedWidth.isEmpty || definedHeight.isEmpty || force) {
var minX = 0.0
var minY = 0.0
var maxX = 0.0
var maxY = 0.0
def measureInternal(e: Element, offsetX: Double, offsetY: Double): Unit = e match {
case g: SVGGElement => {
var ox = offsetX
var oy = offsetY
(0 until g.transform.baseVal.numberOfItems).foreach { index =>
val transform = g.transform.baseVal.getItem(index)
if (transform.`type` == SVGTransform.SVG_TRANSFORM_TRANSLATE) {
ox += transform.matrix.e
oy += transform.matrix.f
}
}
g.children.foreach(child => measureInternal(child, ox, oy))
}
case c: SVGCircleElement => {
minX = math.min(minX, offsetX + (c.cx.baseVal.value - c.r.baseVal.value))
minY = math.min(minY, offsetY + (c.cy.baseVal.value - c.r.baseVal.value))
maxX = math.max(maxX, offsetX + (c.cx.baseVal.value + c.r.baseVal.value))
maxY = math.max(maxY, offsetY + (c.cy.baseVal.value + c.r.baseVal.value))
}
case e: SVGEllipseElement => {
minX = math.min(minX, offsetX + (e.cx.baseVal.value - e.rx.baseVal.value))
minY = math.min(minY, offsetY + (e.cy.baseVal.value - e.ry.baseVal.value))
maxX = math.max(maxX, offsetX + (e.cx.baseVal.value + e.rx.baseVal.value))
maxY = math.max(maxY, offsetY + (e.cy.baseVal.value + e.ry.baseVal.value))
}
case r: SVGRectElement => {
minX = math.min(minX, offsetX + r.x.baseVal.value)
minY = math.min(minY, offsetY + r.y.baseVal.value)
maxX = math.max(maxX, offsetX + r.x.baseVal.value + r.width.baseVal.value)
maxY = math.max(maxY, offsetY + r.y.baseVal.value + r.height.baseVal.value)
}
case i: SVGImageElement => {
minX = math.min(minX, offsetX + i.x.baseVal.value)
minY = math.min(minY, offsetY + i.y.baseVal.value)
maxX = math.max(maxX, offsetX + i.x.baseVal.value + i.width.baseVal.value)
maxY = math.max(maxY, offsetY + i.y.baseVal.value + i.height.baseVal.value)
}
case g: SVGLinearGradientElement => {
minX = math.min(minX, offsetX + g.x1.baseVal.value)
minY = math.min(minY, offsetY + g.y1.baseVal.value)
maxX = math.max(maxX, offsetX + g.x2.baseVal.value)
maxY = math.max(maxY, offsetY + g.y2.baseVal.value)
}
case p: SVGPolygonElement => {
(0 until p.points.numberOfItems).foreach { index =>
val point = p.points.getItem(index)
minX = math.min(minX, offsetX + point.x)
minY = math.min(minY, offsetY + point.y)
maxX = math.max(maxX, offsetX + point.x)
maxY = math.max(maxY, offsetY + point.y)
}
}
case p: SVGPathElement => {
val path = Path(p.getAttribute("d"))
minX = math.min(minX, offsetX + path.boundingBox.x1)
minY = math.min(minY, offsetY + path.boundingBox.y1)
maxX = math.max(maxX, offsetX + path.boundingBox.x2)
maxY = math.max(maxY, offsetY + path.boundingBox.y2)
}
case _: SVGStyleElement => // Nothing to do here
case _ => scribe.warn(s"Unsupported SVG node: $e.")
}
svg.children.foreach(child => measureInternal(child, 0.0, 0.0))
BoundingBox(minX, minY, maxX, maxY)
} else {
BoundingBox(0.0, 0.0, definedWidth.get, definedHeight.get)
}
if (applyDimension) {
svg.setAttribute("width", bb.width.toString)
svg.setAttribute("height", bb.height.toString)
}
bb
}
}
|
outr/youi
|
gui/src/main/scala/io/youi/image/SVGImage.scala
|
Scala
|
mit
| 7,455
|
/*
Author:
Kristal Curtis
*/
package siren
import it.unimi.dsi.fastutil.longs.LongList
import it.unimi.dsi.fastutil.longs.LongArrayList
import org.apache.spark._
abstract class UnionFindAbstract extends Serializable {
def size: Long
def find(v: Long): Long
def union(v: Long, w: Long)
def findClusters(readLen: Int, containsN: (Array[Byte] => Boolean))
def printClusterStats(validPositions: Long)
def getNonTrivialMembers: scala.collection.mutable.Map[Long, LongArrayList]
def getStats(validPositions: Long): String
}
class UnionFindEmpty extends UnionFindAbstract {
override def size = 0L
override def find(v: Long): Long = -1L
override def union(v: Long, w: Long) { }
override def findClusters(readLen: Int, containsN: (Array[Byte] => Boolean)) { }
override def printClusterStats(validPositions: Long) { }
override def getNonTrivialMembers: scala.collection.mutable.Map[Long, LongArrayList] = null
override def getStats(validPositions: Long): String = ""
}
// could put inside object UnionFindAbstract
// then import UnionFindAbstract._
//object Wrapper {
/*implicit*/ object UnionFindAP extends AccumulatorParam[UnionFindAbstract] {
def zero(uf: UnionFindAbstract) = new UnionFindEmpty //new UnionFindL(uf.size) // make this union find empty; just call size
// if first is empty, just return 2nd
// if first is nonempty, do this merge
def addInPlace(uf1: UnionFindAbstract /* aggregate, ie, UnionFindL */, uf2: UnionFindAbstract /* incremental, ie, UnionFindGrid(Diagonal) */) = {
/*
if (uf1.size == 0L) {
//uf2
val tmp1 = new UnionFindL(GenomeLoader.genome.totalSize) // wrong value for totalSize?
val clusters = uf2.getNonTrivialMembers
clusters.keySet.foreach(k => {
val members = clusters.get(k).get
val firstMember = members.getLong(0) // convert to pos; requires knowing whether this was grid or grid diagonal
var m = 1
while (m < members.size) {
val currentMember = members.getLong(m) // convert to pos; requires knowing whether this was grid or grid diagonal
if (tmp1.find(firstMember) != tmp1.find(currentMember))
tmp1.union(firstMember, currentMember)
m += 1
}
})
tmp1
}
*/
if (uf1.size == 0L) {
//println("in case 1 of addInPlace; uf1 type: " + uf1.getClass + ", uf2 type: " + uf2.getClass)
uf2
} else {
//println("in case 2 of addInPlace; uf1 type: " + uf1.getClass + ", uf2 type: " + uf2.getClass)
val clusters = uf2.getNonTrivialMembers
clusters.keySet.foreach(k => {
val members = clusters.get(k).get
val firstMember = members.getLong(0) // convert to pos; requires knowing whether this was grid or grid diagonal
var m = 1
while (m < members.size) {
val currentMember = members.getLong(m) // convert to pos; requires knowing whether this was grid or grid diagonal
if (uf1.find(firstMember) != uf1.find(currentMember))
uf1.union(firstMember, currentMember)
m += 1
}
})
uf1
}
}
}
//}
|
fnothaft/siren-release
|
src/main/scala/siren/UnionFindAbstract.scala
|
Scala
|
bsd-2-clause
| 3,242
|
/*§
===========================================================================
KnapScal - Core
===========================================================================
Copyright (C) 2015-2016 Gianluca Costa
===========================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===========================================================================
*/
package info.gianlucacosta.knapscal.knapsack.branchbound
import info.gianlucacosta.knapscal.knapsack.ItemsFormatter
case class Solution private[branchbound](rootNode: RootNode, bestNode: Node, nodes: List[Node]) {
require(bestNode.isSolution)
require(bestNode.upperBound == 0)
require(!nodes.isEmpty)
override def toString(): String = s"""Solution value: ${bestNode.totalProfit}
|
|Solution items: ${ItemsFormatter.format(bestNode.takenItems)}
|
|Best node: ${bestNode.index}""".stripMargin
}
|
giancosta86/KnapScal-core
|
src/main/scala/info/gianlucacosta/knapscal/knapsack/branchbound/Solution.scala
|
Scala
|
apache-2.0
| 1,452
|
package breeze.math
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import breeze.storage.DefaultArrayValue
import scala.reflect.ClassTag
/**
* Marker trait for scalar values. Scalars must be immutable.
* TODO: maybe use spire for the basis of this?
*
* @author dlwh
*/
trait Field[@specialized(Int,Short,Long,Float,Double) V] extends Ring[V] {
def /(a : V, b : V) : V
def inverse(a: V) = /(one, a)
}
object Field {
/** Not a field, but whatever. */
implicit object fieldInt extends Field[Int] {
def zero = 0
def one = 1
def nan = throw new ArithmeticException("Operation resulted in integer-valued NaN")
def ==(a : Int, b : Int) = a == b
def !=(a : Int, b : Int) = a != b
def +(a : Int, b : Int) = a + b
def -(a : Int, b : Int) = a - b
def *(a : Int, b : Int) = a * b
def /(a : Int, b : Int) = a / b
def norm(a : Int) = if (a < 0) -a else a
def toDouble(a : Int) = a
def isNaN(a : Int) = false
val manifest = implicitly[ClassTag[Int]]
val defaultArrayValue = implicitly[DefaultArrayValue[Int]]
}
/** Not a field, but whatever. */
implicit object fieldShort extends Field[Short] {
def zero = 0.asInstanceOf[Short]
def one = 1.asInstanceOf[Short]
def nan = throw new ArithmeticException("Operation resulted in short-valued NaN")
def ==(a : Short, b : Short) = a == b
def !=(a : Short, b : Short) = a != b
def +(a : Short, b : Short) = (a + b).asInstanceOf[Short]
def -(a : Short, b : Short) = (a - b).asInstanceOf[Short]
def *(a : Short, b : Short) = (a * b).asInstanceOf[Short]
def /(a : Short, b : Short) = (a / b).asInstanceOf[Short]
def norm(a : Short) = if (a < 0) -a else a
def toDouble(a : Short) = a
def isNaN(a : Short) = false
val manifest = implicitly[ClassTag[Short]]
val defaultArrayValue = implicitly[DefaultArrayValue[Short]]
}
/** Not a field, but whatever. */
implicit object fieldLong extends Field[Long] {
def zero = 0l
def one = 1l
def nan = throw new ArithmeticException("Operation resulted in long-valued NaN")
def ==(a : Long, b : Long) = a == b
def !=(a : Long, b : Long) = a != b
def +(a : Long, b : Long) = a + b
def -(a : Long, b : Long) = a - b
def *(a : Long, b : Long) = a * b
def /(a : Long, b : Long) = a / b
def norm(a : Long) = if (a < 0) -a else a
def toDouble(a : Long) = a
def isNaN(a : Long) = false
val manifest = implicitly[ClassTag[Long]]
val defaultArrayValue = implicitly[DefaultArrayValue[Long]]
}
/** Not a field, but whatever. */
implicit object fieldBigInt extends Field[BigInt] {
def zero = 0l
def one = 1l
def nan = throw new ArithmeticException("Operation resulted in long-valued NaN")
def ==(a : BigInt, b : BigInt) = a == b
def !=(a : BigInt, b : BigInt) = a != b
def +(a : BigInt, b : BigInt) = a + b
def -(a : BigInt, b : BigInt) = a - b
def *(a : BigInt, b : BigInt) = a * b
def /(a : BigInt, b : BigInt) = a / b
def norm(a : BigInt) = if (a < 0) (-a).toDouble else a.toDouble
def toDouble(a : BigInt) = a
def isNaN(a : BigInt) = false
val manifest = implicitly[ClassTag[BigInt]]
val defaultArrayValue = implicitly[DefaultArrayValue[BigInt]]
}
implicit object fieldFloat extends Field[Float] {
def zero = 0.0f
def one = 1.0f
def nan = Float.NaN
def ==(a : Float, b : Float) = a == b
def !=(a : Float, b : Float) = a != b
def +(a : Float, b : Float) = a + b
def -(a : Float, b : Float) = a - b
def *(a : Float, b : Float) = a * b
def /(a : Float, b : Float) = a / b
def norm(a : Float) = if (a < 0) -a else a
def toDouble(a : Float) = a
def isNaN(a : Float) = java.lang.Float.isNaN(a)
val manifest = implicitly[ClassTag[Float]]
val defaultArrayValue = implicitly[DefaultArrayValue[Float]]
override def close(a: Float, b: Float, tolerance: Double) = (a-b).abs <= math.max(a.abs, b.abs) * tolerance
}
implicit object fieldD extends Field[Double] {
def zero = 0.0
def one = 1.0
def nan = Double.NaN
def ==(a : Double, b : Double) = a == b
def !=(a : Double, b : Double) = a != b
def +(a : Double, b : Double) = a + b
def -(a : Double, b : Double) = a - b
def *(a : Double, b : Double) = a * b
def /(a : Double, b : Double) = a / b
def norm(a : Double) = if (a < 0) -a else a
def toDouble(a : Double) = a
def isNaN(a : Double) = java.lang.Double.isNaN(a)
val manifest = implicitly[ClassTag[Double]]
val defaultArrayValue = implicitly[DefaultArrayValue[Double]]
override def close(a: Double, b: Double, tolerance: Double) = (a-b).abs <= math.max(a.abs, b.abs) * tolerance
}
}
|
ktakagaki/breeze
|
src/main/scala/breeze/math/Field.scala
|
Scala
|
apache-2.0
| 5,238
|
package com.rasterfoundry.database
import java.util.UUID
import cats.Applicative
import cats.implicits._
import com.rasterfoundry.database.util._
import com.rasterfoundry.datamodel._
import com.rasterfoundry.datamodel.PageRequest
import doobie._
import doobie.implicits._
import doobie.postgres.implicits._
import doobie.postgres.circe.jsonb.implicits._
object AoiDao extends Dao[AOI] {
val tableName = "aois"
val selectF: Fragment =
sql"""
SELECT
id, created_at, modified_at,
created_by, owner, shape, filters, is_active, start_time,
approval_required, project_id
FROM
""" ++ tableF
def unsafeGetAoiById(id: UUID): ConnectionIO[AOI] =
query.filter(id).select
def getAoiById(id: UUID): ConnectionIO[Option[AOI]] =
query.filter(id).selectOption
def updateAOI(aoi: AOI): ConnectionIO[Int] = {
(fr"UPDATE" ++ tableF ++ fr"SET" ++ fr"""
modified_at = NOW(),
shape = ${aoi.shape},
filters = ${aoi.filters},
is_active = ${aoi.isActive},
start_time = ${aoi.startTime},
approval_required = ${aoi.approvalRequired}
WHERE
id = ${aoi.id}
""").update.run
}
def createAOI(aoi: AOI, user: User): ConnectionIO[AOI] = {
val ownerId = Ownership.checkOwner(user, Some(aoi.owner))
val aoiCreate: ConnectionIO[AOI] = (fr"INSERT INTO" ++ tableF ++ fr"""
(id, created_at, modified_at,
created_by, owner, shape, filters, is_active,
approval_required, start_time, project_id)
VALUES
(${aoi.id}, NOW(), NOW(),
${user.id}, ${ownerId}, ${aoi.shape}, ${aoi.filters}, ${aoi.isActive},
${aoi.approvalRequired}, ${aoi.startTime}, ${aoi.projectId})
""").update.withUniqueGeneratedKeys[AOI](
"id",
"created_at",
"modified_at",
"created_by",
"owner",
"shape",
"filters",
"is_active",
"start_time",
"approval_required",
"project_id"
)
aoiCreate
}
// TODO embed shape into aoi
def listAOIs(
projectId: UUID,
page: PageRequest
): ConnectionIO[PaginatedResponse[AOI]] =
query.filter(fr"project_id = ${projectId}").page(page)
def listAuthorizedAois(
user: User,
aoiQueryParams: AoiQueryParameters,
page: PageRequest
): ConnectionIO[PaginatedResponse[AOI]] = {
val authedProjectsIO = ProjectDao.authQuery(user, ObjectType.Project).list
for {
authedProjects <- authedProjectsIO
authedProjectIdsF = (authedProjects map { _.id }).toNel map {
Fragments.in(fr"project_id", _)
}
authFilterF = Fragments.orOpt(
authedProjectIdsF,
Some(fr"owner = ${user.id}")
)
aois <- {
AoiDao.query
.filter(aoiQueryParams)
.filter(authFilterF)
.page(page)
}
} yield { aois }
}
def deleteAOI(id: UUID): ConnectionIO[Int] = {
(
fr"DELETE FROM" ++ tableF ++ Fragments.whereAndOpt(Some(fr"id = ${id}"))
).update.run
}
def authorize(
aoiId: UUID,
user: User,
actionType: ActionType
): ConnectionIO[Boolean] =
for {
aoiO <- AoiDao.query.filter(aoiId).selectOption
projectAuthed <- aoiO map { _.projectId } match {
case Some(projectId) =>
ProjectDao.authorized(user, ObjectType.Project, projectId, actionType)
case _ => Applicative[ConnectionIO].pure(AuthFailure[Project]())
}
} yield { projectAuthed.toBoolean }
}
|
aaronxsu/raster-foundry
|
app-backend/db/src/main/scala/AoiDao.scala
|
Scala
|
apache-2.0
| 3,492
|
package org.jetbrains.plugins.scala.codeInspection.collections
import org.jetbrains.plugins.scala.codeInspection.InspectionBundle
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScExpression
/**
* @author t-kameyama
*/
class CollectHeadOptionInspection extends OperationOnCollectionInspection {
override def possibleSimplificationTypes: Array[SimplificationType] = Array(CollectHeadOption)
}
object CollectHeadOption extends SimplificationType {
override def hint: String = InspectionBundle.message("replace.collect.headOption.with.collectFirst")
override def getSimplification(expr: ScExpression): Option[Simplification] = {
expr match {
case qual `.collect` (f) `.headOption` () =>
Some(replace(expr).withText(invocationText(qual, "collectFirst", f)).highlightFrom(qual))
case _ => None
}
}
}
|
triplequote/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/codeInspection/collections/CollectHeadOptionInspection.scala
|
Scala
|
apache-2.0
| 845
|
package adtoyou.spark.analysis
import java.text.SimpleDateFormat
import java.util.{Calendar, Date}
import aiouniya.spark.MyRDDFunctions._
import aiouniya.spark.util.MD5Util
import com.redislabs.provider.redis._
import org.apache.spark.{SparkConf, SparkContext}
import redis.clients.util.JedisClusterCRC16
/**
* Created by wangqy on 2017/5/19.
*/
object UMLRefresh {
def main(args: Array[String]): Unit = {
val conf = new SparkConf()
.set("spark.shuffle.file.buffer", "128k")
.set("spark.reducer.maxSizeInFlight", "96m")
.set("redis.host", "192.168.3.156")
.set("redis.port", "9801")
.set("redis.timeout", "30000")
val sc = new SparkContext(conf)
rmFromRedis(sc)
val dtFormat = new SimpleDateFormat("yyyyMM")
val cal: Calendar = Calendar.getInstance
val now = new Date
cal.setTime(now)
cal.add(Calendar.MONTH, -2)
val prev2Month = dtFormat.format(cal.getTime)
cal.add(Calendar.MONTH, -1)
val prev3Month = dtFormat.format(cal.getTime)
cal.add(Calendar.MONTH, -1)
val prev4Month = dtFormat.format(cal.getTime)
val userFile: String = s"/drp/tyv2/data/user_data/{$prev2Month*,$prev3Month*,$prev4Month*}"
val userRDD = sc.textFile(userFile)
.map(_.split('\\t')(1).trim)
.filter(u => u.length != 0 && u.length != 32 && u.length != 40)
.map(u => (MD5Util.toMD516(u), u))
.filter(_._1 != null)
.map(x => ("UML|" + x._1, x._2))
.reduceByKey((v1, _) => v1)
sc.toRedisKV(userRDD, 10, 250)
sc.stop
}
def rmFromRedis(sc: SparkContext)
(implicit redisConfig: RedisConfig = new RedisConfig(new RedisEndpoint(sc.getConf))) {
// val hosts = testScaleHosts(redisConfig, 250)
// println(hosts.size)
// hosts.foreach(x => println(x.productIterator.mkString(",")))
sc.fromRedisKeyPattern("UML|*", 250)
.foreachPartition { keys =>
if (keys.hasNext) {
val keyArr = keys.map(k => (JedisClusterCRC16.getSlot(k), k)).toArray
val k = keyArr(0)._2
val conn = redisConfig.connectionForKey(k)
val pipeline = conn.pipelined()
keyArr.groupBy(_._1)
.foreach(x =>
pipeline.del(x._2.map(_._2): _*)
)
conn.close()
}
}
// redisConfig.hosts.foreach { host =>
// println("clear host=" + host.endpoint.host + ":" + host.endpoint.port)
// val jedis = host.connect()
// try {
// val pipeline = jedis.pipelined
// for (i <- '0' to 'f') {
// val response = pipeline.keys(s"UML|$i*")
// pipeline.sync
// val keySet = response.get
// val len = keySet.size
// val strArr = new Array[String](len)
// val keyArr = keySet.toArray(strArr)
// .map(k => (JedisClusterCRC16.getSlot(k), k))
// keyArr.groupBy(_._1)
// .foreach(x =>
// pipeline.del(x._2.map(_._2): _*)
// )
// pipeline.sync
// }
// } catch {
// case e: Throwable =>
// System.out.println(ExceptionUtils.getFullStackTrace(e))
// } finally {
// if (jedis != null) jedis.close()
// }
// }
}
// for test only
def testScaleHosts(redisConfig: RedisConfig, partitionNum: Int): Seq[(String, Int, Int, Int)] = {
def split(host: RedisNode, cnt: Int) = {
val endpoint = host.endpoint
val start = host.startSlot
val end = host.endSlot
val range = (end - start) / cnt
println(endpoint.host + ":" + endpoint.port)
println(start + "~" + end)
println("cnt=" + cnt)
(0 until cnt).map(i => {
(endpoint.host,
endpoint.port,
if (i == 0) start else (start + range * i + 1),
if (i != cnt - 1) (start + range * (i + 1)) else end)
})
}
val hosts = redisConfig.hosts.sortBy(_.startSlot)
println("hosts size=" + hosts.size)
if (hosts.size == partitionNum) {
hosts.map(x => (x.endpoint.host, x.endpoint.port, x.startSlot, x.endSlot))
} else if (hosts.size < partitionNum) {
val presExtCnt = partitionNum / hosts.size
val lastExtCnt = if (presExtCnt * hosts.size < partitionNum)
(partitionNum - presExtCnt * (hosts.size - 1)) else presExtCnt
println("presExtCnt=" + presExtCnt)
println("lastExtCnt=" + lastExtCnt)
hosts.zipWithIndex.flatMap {
case (host, idx) => {
split(host, if (idx == hosts.size - 1) lastExtCnt else presExtCnt)
}
}
} else {
val presExtCnt = hosts.size / partitionNum
(0 until partitionNum).map {
idx => {
val ip = hosts(idx * presExtCnt).endpoint.host
val port = hosts(idx * presExtCnt).endpoint.port
val start = hosts(idx * presExtCnt).startSlot
val end = hosts(if (idx == partitionNum - 1) {
(hosts.size - 1)
} else {
((idx + 1) * presExtCnt - 1)
}).endSlot
(ip, port, start, end)
}
}
}
}
}
|
7u/spark-learning
|
spark.learning/spark_test/src/main/scala/adtoyou/spark/analysis/UMLRefresh.scala
|
Scala
|
apache-2.0
| 5,356
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.