code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.sbt
import java.security.SecureRandom
import com.typesafe.config.ConfigValue
import com.typesafe.config.ConfigOrigin
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import sbt._
/**
* Provides tasks for generating and updating application secrets
*/
object ApplicationSecretGenerator {
private val playHttpSecretKey = "play.http.secret.key"
private val playCryptoSecret = "play.crypto.secret"
def generateSecret = {
val random = new SecureRandom()
(1 to 64)
.map { _ =>
(random.nextInt(75) + 48).toChar
}
.mkString
.replaceAll("\\\\\\\\+", "/")
}
def generateSecretTask = Def.task[String] {
val secret = generateSecret
println("Generated new secret: " + secret)
secret
}
def updateSecretTask = Def.task[File] {
val secret: String = play.sbt.PlayImport.PlayKeys.generateSecret.value
val baseDir: File = Keys.baseDirectory.value
val log = Keys.streams.value.log
val appConfFile = sys.props.get("config.file") match {
case Some(applicationConf) => new File(baseDir, applicationConf)
case None => (Keys.resourceDirectory in Compile).value / "application.conf"
}
if (appConfFile.exists()) {
log.info("Updating application secret in " + appConfFile.getCanonicalPath)
val lines = IO.readLines(appConfFile)
val config: Config = ConfigFactory.parseString(lines.mkString("\\n"))
val newLines = if (config.hasPath(playHttpSecretKey)) {
log.info("Replacing old application secret: " + config.getString(playHttpSecretKey))
getUpdatedSecretLines(secret, lines, config)
} else {
log.warn("Did not find application secret in " + appConfFile.getCanonicalPath)
log.warn("Adding application secret to start of file")
val secretConfig = s"""$playHttpSecretKey="$secret""""
secretConfig :: lines
}
IO.writeLines(appConfFile, newLines)
appConfFile
} else {
log.error("Could not find configuration file at " + appConfFile.getCanonicalPath)
throw new FeedbackProvidedException {}
}
}
def getUpdatedSecretLines(newSecret: String, lines: List[String], config: Config): List[String] = {
val secretConfigValue: ConfigValue = config.getValue(playHttpSecretKey)
val secretConfigOrigin: ConfigOrigin = secretConfigValue.origin()
if (secretConfigOrigin.lineNumber == -1) {
throw new MessageOnlyException(s"Could not change $playHttpSecretKey")
} else {
val lineNumber: Int = secretConfigOrigin.lineNumber - 1
val newLines: List[String] = lines.updated(
lineNumber,
lines(lineNumber).replace(secretConfigValue.unwrapped().asInstanceOf[String], newSecret)
)
// removes existing play.crypto.secret key
if (config.hasPath(playCryptoSecret)) {
val applicationSecretValue = config.getValue(playCryptoSecret)
val applicationSecretOrigin = applicationSecretValue.origin()
if (applicationSecretOrigin.lineNumber == -1) {
newLines
} else {
newLines.patch(applicationSecretOrigin.lineNumber() - 1, Nil, 1)
}
} else {
newLines
}
}
}
}
| marcospereira/playframework | dev-mode/sbt-plugin/src/main/scala/play/sbt/ApplicationSecretGenerator.scala | Scala | apache-2.0 | 3,332 |
package plp.expressions2.util
import plp.expressions1.util.{Visitor => VisitorE1}
import plp.expressions2.expression.{ExpDeclaracao, Id}
trait Visitor[T] extends VisitorE1[T] {
def visit(expression: ExpDeclaracao): T
def visit(expression: Id): T
override def v = super.v.orElse {
case a: ExpDeclaracao => visit(a)
case a: Id => visit(a)
}
}
| lrlucena/PLP-Scala | src/plp/expressions2/util/Visitor.scala | Scala | gpl-3.0 | 370 |
package org.openapitools.models
import io.circe._
import io.finch.circe._
import io.circe.generic.semiauto._
import io.circe.java8.time._
import org.openapitools._
import org.openapitools.models.GithubRepositorylinks
import org.openapitools.models.GithubRepositorypermissions
/**
*
* @param Underscoreclass
* @param Underscorelinks
* @param defaultBranch
* @param description
* @param name
* @param permissions
* @param _private
* @param fullName
*/
case class GithubRepository(Underscoreclass: Option[String],
Underscorelinks: Option[GithubRepositorylinks],
defaultBranch: Option[String],
description: Option[String],
name: Option[String],
permissions: Option[GithubRepositorypermissions],
_private: Option[Boolean],
fullName: Option[String]
)
object GithubRepository {
/**
* Creates the codec for converting GithubRepository from and to JSON.
*/
implicit val decoder: Decoder[GithubRepository] = deriveDecoder
implicit val encoder: ObjectEncoder[GithubRepository] = deriveEncoder
}
| cliffano/swaggy-jenkins | clients/scala-finch/generated/src/main/scala/org/openapitools/models/GithubRepository.scala | Scala | mit | 1,156 |
package benchmarks.lattices
import kofre.causality.{CausalContext, Dot}
import java.util.concurrent.TimeUnit
import org.openjdk.jmh.annotations._
import kofre.{IdUtil, Lattice}
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@Warmup(iterations = 3, time = 1000, timeUnit = TimeUnit.MILLISECONDS)
@Measurement(iterations = 3, time = 1000, timeUnit = TimeUnit.MILLISECONDS)
@Fork(3)
@Threads(1)
@State(Scope.Thread)
class ContextBench {
@Param(Array("1", "1000"))
var size: Long = _
var rep1Set: CausalContext = _
var rep1SetPlusOne: CausalContext = _
var rep2Set: CausalContext = _
val rep1id = IdUtil.genId()
val rep2id = IdUtil.genId()
var rep1single: CausalContext = _
private def makeRep(rep: IdUtil.Id, mul: Long, off: Long, len: Long): CausalContext = {
val ranges = Range.Long(0L, size, 1).map(i => Range.Long(i * mul + off, i * mul + len + off, 1))
CausalContext.fromSet(ranges.flatten.iterator.map(Dot(rep, _)).toSet)
}
@Setup
def setup(): Unit = {
rep1Set = makeRep(rep1id, 10, 0, 7)
rep2Set = makeRep(rep2id, 10, 5, 7)
rep1SetPlusOne = rep1Set.add(rep2id, 5)
rep1single = CausalContext.empty.add(rep1id, size + 10)
}
@Benchmark
def merge() = Lattice.merge(rep1Set, rep2Set)
@Benchmark
def mergeSelf() = Lattice.merge(rep1Set, rep1Set)
@Benchmark
def mergeSelfPlusOne() = Lattice.merge(rep1Set, rep1SetPlusOne)
@Benchmark
def diffSelf() = rep1Set.diff(rep1Set)
@Benchmark
def diffOther() = rep1Set.diff(rep2Set)
@Benchmark
def diffSingle() = rep1SetPlusOne.diff(rep1Set)
@Benchmark
def intersectSelf() = rep1Set.intersect(rep1Set)
@Benchmark
def intersectOther() = rep1Set.intersect(rep2Set)
}
| guidosalva/REScala | Code/Microbenchmarks/src/main/scala/benchmarks/lattices/ContextBench.scala | Scala | apache-2.0 | 1,795 |
// Calling inherited methods from subclass constructor
package localhost
class inheritance extends base {
val v1 = 4
m1()
}
abstract class base {
val v1: Int
def m1() {
println(v1)
}
}
| pabzdzdzwiagief/initialization | src/test/resources/examples/negatives/inheritance.scala | Scala | bsd-2-clause | 204 |
/*
* Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.wegtam.tensei.agent.processor
import akka.testkit.{ EventFilter, TestActorRef }
import com.wegtam.tensei.adt.ElementReference
import com.wegtam.tensei.agent.ActorSpec
import com.wegtam.tensei.agent.adt.TenseiForeignKeyValueType
import com.wegtam.tensei.agent.processor.AutoIncrementValueBuffer.{
AutoIncrementValueBufferMessages,
AutoIncrementValuePair
}
class AutoIncrementValueBufferWorkerTest extends ActorSpec {
describe("AutoIncrementValueBufferWorker") {
val agentRunIdentifier = Option("TEST")
describe("returning values") {
describe("for unknown values") {
it("should return a not found message") {
val r = ElementReference(dfasdlId = "MY-DFASDL", elementId = "MY-ELEMENT")
val a = TestActorRef(AutoIncrementValueBufferWorker.props(agentRunIdentifier, r))
val vs = Vector(
AutoIncrementValuePair(TenseiForeignKeyValueType.FkLong(Option(1L)),
TenseiForeignKeyValueType.FkLong(Option(5L))),
AutoIncrementValuePair(TenseiForeignKeyValueType.FkString(Option("FOO")),
TenseiForeignKeyValueType.FkLong(Option(6L))),
AutoIncrementValuePair(TenseiForeignKeyValueType.FkLong(Option(3L)),
TenseiForeignKeyValueType.FkLong(Option(7L))),
AutoIncrementValuePair(TenseiForeignKeyValueType.FkString(Option("BAR")),
TenseiForeignKeyValueType.FkLong(Option(8L)))
)
a ! AutoIncrementValueBufferMessages.Store(r, vs)
val unknownValue = TenseiForeignKeyValueType.FkLong(Option(-1L))
a ! AutoIncrementValueBufferMessages.Return(r, unknownValue)
expectMsg(AutoIncrementValueBufferMessages.ValueNotFound(r, unknownValue))
}
}
describe("for known values") {
it("should return the values") {
val r = ElementReference(dfasdlId = "MY-DFASDL", elementId = "MY-ELEMENT")
val a = TestActorRef(AutoIncrementValueBufferWorker.props(agentRunIdentifier, r))
val vs = Vector(
AutoIncrementValuePair(TenseiForeignKeyValueType.FkLong(Option(1L)),
TenseiForeignKeyValueType.FkLong(Option(5L))),
AutoIncrementValuePair(TenseiForeignKeyValueType.FkString(Option("FOO")),
TenseiForeignKeyValueType.FkLong(Option(6L))),
AutoIncrementValuePair(TenseiForeignKeyValueType.FkLong(Option(3L)),
TenseiForeignKeyValueType.FkLong(Option(7L))),
AutoIncrementValuePair(TenseiForeignKeyValueType.FkString(Option("BAR")),
TenseiForeignKeyValueType.FkLong(Option(8L)))
)
a ! AutoIncrementValueBufferMessages.Store(r, vs)
vs.foreach { p =>
a ! AutoIncrementValueBufferMessages.Return(r, p.oldValue)
expectMsg(AutoIncrementValueBufferMessages.ChangedValue(r, p.oldValue, p.newValue))
}
}
}
}
describe("storing values") {
describe("for unknown element references") {
it("should fail") {
val r = ElementReference(dfasdlId = "MY-DFASDL", elementId = "MY-ELEMENT")
val a = TestActorRef(AutoIncrementValueBufferWorker.props(agentRunIdentifier, r))
val wrongRef = ElementReference(dfasdlId = "SOME-DFASDL", elementId = "SOME-ELEMENT")
EventFilter.error(
occurrences = 1,
start = "Received store foreign key value message for wrong element"
) intercept (a ! AutoIncrementValueBufferMessages.Store(
wrongRef,
Vector.empty[AutoIncrementValuePair]
))
}
}
describe("for the correct element reference") {
it("should work") {
val r = ElementReference(dfasdlId = "MY-DFASDL", elementId = "MY-ELEMENT")
val a = TestActorRef(AutoIncrementValueBufferWorker.props(agentRunIdentifier, r))
val vs = Vector(
AutoIncrementValuePair(TenseiForeignKeyValueType.FkLong(Option(1L)),
TenseiForeignKeyValueType.FkLong(Option(5L))),
AutoIncrementValuePair(TenseiForeignKeyValueType.FkString(Option("FOO")),
TenseiForeignKeyValueType.FkLong(Option(6L))),
AutoIncrementValuePair(TenseiForeignKeyValueType.FkLong(Option(3L)),
TenseiForeignKeyValueType.FkLong(Option(7L))),
AutoIncrementValuePair(TenseiForeignKeyValueType.FkString(Option("BAR")),
TenseiForeignKeyValueType.FkLong(Option(8L)))
)
a ! AutoIncrementValueBufferMessages.Store(r, vs)
vs.foreach(
p =>
a.underlyingActor
.asInstanceOf[AutoIncrementValueBufferWorker]
.buffer(p.oldValue) should be(p.newValue)
)
}
}
}
}
}
| Tensei-Data/tensei-agent | src/test/scala/com/wegtam/tensei/agent/processor/AutoIncrementValueBufferWorkerTest.scala | Scala | agpl-3.0 | 5,792 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.rethink.config
import org.scalatest.wordspec.AnyWordSpec
/**
* The point of this test is to check that constants keys are not changed after the refactor of the code.
*/
class TestReThinkSourceConstants extends AnyWordSpec {
// Constants
val RETHINK_HOST ="connect.rethink.host"
val RETHINK_DB ="connect.rethink.db"
val RETHINK_PORT ="connect.rethink.port"
val ROUTE_QUERY ="connect.rethink.kcql"
"RETHINK_HOST should have the same key in ReThinkSinkConfigConstants" in {
assert(RETHINK_HOST.equals(ReThinkConfigConstants.RETHINK_HOST))
}
"RETHINK_DB should have the same key in ReThinkSinkConfigConstants" in {
assert(RETHINK_DB.equals(ReThinkConfigConstants.RETHINK_DB))
}
"RETHINK_PORT should have the same key in ReThinkSinkConfigConstants" in {
assert(RETHINK_PORT.equals(ReThinkConfigConstants.RETHINK_PORT))
}
"ROUTE_QUERY should have the same key in ReThinkSinkConfigConstants" in {
assert(ROUTE_QUERY.equals(ReThinkConfigConstants.KCQL))
}
}
| datamountaineer/stream-reactor | kafka-connect-rethink/src/test/scala/com/datamountaineer/streamreactor/connect/rethink/config/TestReThinkSourceConstants.scala | Scala | apache-2.0 | 1,650 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.builders
import monix.execution.Cancelable
import monix.reactive.Observable
import monix.reactive.observers.Subscriber
/** An observable that only signals `onComplete` */
private[reactive] object EmptyObservable extends Observable[Nothing] {
def unsafeSubscribeFn(subscriber: Subscriber[Nothing]): Cancelable = {
subscriber.onComplete()
Cancelable.empty
}
}
| monixio/monix | monix-reactive/shared/src/main/scala/monix/reactive/internal/builders/EmptyObservable.scala | Scala | apache-2.0 | 1,079 |
package org.jetbrains.plugins.scala
package lang
package folding
import com.intellij.psi.impl.source.tree.LeafPsiElement
import scaladoc.parser.ScalaDocElementTypes
import _root_.scala.collection.mutable._
import psi.api.toplevel.packaging.ScPackaging
import java.lang.String
import psi.impl.statements.ScTypeAliasDefinitionImpl
import psi.api.base.types.{ScTypeElement, ScCompoundTypeElement, ScParenthesisedTypeElement, ScTypeProjection}
import com.intellij.lang.ASTNode
import com.intellij.lang.folding.FoldingBuilder
import com.intellij.lang.folding.FoldingDescriptor
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import com.intellij.openapi.util._
import params.ScTypeParamClause
import psi.api.base.patterns.ScCaseClause
import lexer.ScalaTokenTypes
import psi.api.base.ScLiteral
import com.intellij.psi.tree.IElementType
import com.intellij.openapi.editor.{Document, FoldingGroup}
import com.intellij.psi._
import parser.ScalaElementTypes
import settings.ScalaCodeFoldingSettings
import scala.Boolean
import worksheet.WorksheetFoldingBuilder
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
/*
*
@author Ilya Sergey
*/
class ScalaFoldingBuilder extends FoldingBuilder {
import ScalaFoldingUtil._
private def appendDescriptors(node: ASTNode,
document: Document,
descriptors: ArrayBuffer[FoldingDescriptor],
processedComments: HashSet[PsiElement],
processedRegions: HashSet[PsiElement]) {
val psi = node.getPsi
if (isMultiline(node) || isMultilineImport(node)) {
node.getElementType match {
case ScalaTokenTypes.tBLOCK_COMMENT | ScalaTokenTypes.tSH_COMMENT | ScalaElementTypes.TEMPLATE_BODY |
ScalaDocElementTypes.SCALA_DOC_COMMENT => if (!isWorksheetResults(node)) descriptors += new FoldingDescriptor(node, node.getTextRange)
case ScalaElementTypes.IMPORT_STMT if isGoodImport(node) => {
descriptors += (new FoldingDescriptor(node,
new TextRange(node.getTextRange.getStartOffset + IMPORT_KEYWORD.length + 1, getImportEnd(node))))
}
case ScalaElementTypes.MATCH_STMT if isMultilineBodyInMatchStmt(node)=>
descriptors += (new FoldingDescriptor(node,
new TextRange(node.getTextRange.getStartOffset + startOffsetForMatchStmt(node),
node.getTextRange.getEndOffset)))
case ScalaElementTypes.FUNCTION_DEFINITION =>
psi match {
case f: ScFunctionDefinition => {
val (isMultilineBody, textRange, _) = isMultilineFuncBody(f)
if (isMultilineBody) descriptors += new FoldingDescriptor(node, textRange)
}
case _ =>
}
case _ =>
}
psi match {
case p: ScPackaging if p.isExplicit => {
descriptors += (new FoldingDescriptor(node,
new TextRange(node.getTextRange.getStartOffset + PACKAGE_KEYWORD.length + 1, node.getTextRange.getEndOffset)))
}
case p: ScLiteral if p.isMultiLineString =>
descriptors += new FoldingDescriptor(node, node.getTextRange)
case p: ScArgumentExprList =>
descriptors += new FoldingDescriptor(node, node.getTextRange)
case _: ScBlockExpr
if ScalaCodeFoldingSettings.getInstance().isFoldingForAllBlocks =>
descriptors += new FoldingDescriptor(node, node.getTextRange)
case _ =>
}
val treeParent: ASTNode = node.getTreeParent
if (!ScalaCodeFoldingSettings.getInstance().isFoldingForAllBlocks &&
treeParent != null && (treeParent.getPsi.isInstanceOf[ScArgumentExprList] ||
treeParent.getPsi.isInstanceOf[ScPatternDefinition] ||
treeParent.getPsi.isInstanceOf[ScVariableDefinition] ||
treeParent.getPsi.isInstanceOf[ScForStatement] ||
treeParent.getPsi.isInstanceOf[ScIfStmt])) {
psi match {
case _: ScBlockExpr => descriptors += new FoldingDescriptor(node, node.getTextRange)
case _ =>
}
}
if (treeParent != null) {
treeParent.getPsi match {
case inf: ScInfixExpr if inf.rOp == node.getPsi =>
psi match {
case _: ScBlockExpr => descriptors += new FoldingDescriptor(node, node.getTextRange)
case _ =>
}
case _ =>
}
}
if (treeParent != null && treeParent.getPsi.isInstanceOf[ScCaseClause]) {
psi match {
case _: ScBlock => descriptors += new FoldingDescriptor(node, node.getTextRange)
case _ =>
}
}
} else if (node.getElementType == ScalaElementTypes.TYPE_PROJECTION) {
node.getPsi match {
case TypeLambda(typeName, typeParamClause, aliasedType) =>
val group = FoldingGroup.newGroup("typelambda")
val range1 = new TextRange(node.getTextRange.getStartOffset, typeParamClause.getTextRange.getStartOffset)
val d1 = new FoldingDescriptor(node, range1, group) {
override def getPlaceholderText = typeName
}
val range2 = new TextRange(aliasedType.getTextRange.getEndOffset, node.getTextRange.getEndOffset)
val d2 = new FoldingDescriptor(aliasedType.getNode, range2, group) {
override def getPlaceholderText = ""
}
descriptors ++= Seq(d1, d2)
case _ =>
}
} else if (node.getElementType == ScalaTokenTypes.tLINE_COMMENT && !isWorksheetResults(node)) {
val stack = new Stack[PsiElement]
if (!isCustomRegionStart(node.getText) && !isCustomRegionEnd(node.getText)) {
addCommentFolds(node.getPsi.asInstanceOf[PsiComment], processedComments, descriptors)
} else if (isCustomRegionStart(node.getText)) {
if (isTagRegionStart(node.getText)) {
addCustomRegionFolds(node.getPsi, processedRegions, descriptors, isTagRegion = true, stack)
} else if (isSimpleRegionStart(node.getText)) {
addCustomRegionFolds(node.getPsi, processedRegions, descriptors, isTagRegion = false, stack)
}
}
}
for (child <- node.getChildren(null)) {
appendDescriptors(child, document, descriptors, processedComments, processedRegions)
}
}
def buildFoldRegions(astNode: ASTNode, document: Document): Array[FoldingDescriptor] = {
val descriptors = new ArrayBuffer[FoldingDescriptor]
val processedComments = new HashSet[PsiElement]
val processedRegions = new HashSet[PsiElement]
appendDescriptors(astNode, document, descriptors, processedComments, processedRegions)
descriptors.toArray
}
def getPlaceholderText(node: ASTNode): String = {
if (isMultiline(node) || isMultilineImport(node) && !isWorksheetResults(node)) {
node.getElementType match {
case ScalaElementTypes.BLOCK_EXPR => return "{...}"
case ScalaTokenTypes.tBLOCK_COMMENT => return "/.../"
case ScalaDocElementTypes.SCALA_DOC_COMMENT => return "/**...*/"
case ScalaElementTypes.TEMPLATE_BODY => return "{...}"
case ScalaElementTypes.PACKAGING => return "{...}"
case ScalaElementTypes.IMPORT_STMT => return "..."
case ScalaElementTypes.MATCH_STMT => return "{...}"
case ScalaTokenTypes.tSH_COMMENT if node.getText.charAt(0) == ':' => return "::#!...::!#"
case ScalaTokenTypes.tSH_COMMENT => return "#!...!#"
case ScalaElementTypes.FUNCTION_DEFINITION =>
val (isMultilineBody, _, sign) = isMultilineFuncBody(node.getPsi.asInstanceOf[ScFunctionDefinition])
if (isMultilineBody) return sign
case _ =>
}
if (node.getPsi != null) {
node.getPsi match {
case literal: ScLiteral if literal.isMultiLineString => return "\"\"\"...\"\"\""
case _ =>
}
if (node.getPsi.isInstanceOf[ScArgumentExprList])
return "(...)"
}
}
if (node.getTreeParent != null && (ScalaElementTypes.ARG_EXPRS == node.getTreeParent.getElementType
|| ScalaElementTypes.INFIX_EXPR == node.getTreeParent.getElementType
|| ScalaElementTypes.PATTERN_DEFINITION == node.getTreeParent.getElementType
|| ScalaElementTypes.VARIABLE_DEFINITION == node.getTreeParent.getElementType)) {
node.getPsi match {
case _: ScBlockExpr => return "{...}"
case _ => return null
}
}
node.getElementType match {
case ScalaTokenTypes.tLINE_COMMENT =>
if (!isWorksheetResults(node)) {
if (!isCustomRegionStart(node.getText))
return "/.../"
else {
if (isTagRegionStart(node.getText)) {
val customText: String = node.getText.replaceFirst(".*desc\\s*=\\s*\"(.*)\".*", "$1").trim
return if (customText.isEmpty) "..." else customText
} else if (isSimpleRegionStart(node.getText)) {
val customText: String = node.getText.replaceFirst("..?\\s*region(.*)", "$1").trim
return if (customText.isEmpty) "..." else customText
}
}
}
case _ => return null
}
null
}
def isCollapsedByDefault(node: ASTNode): Boolean = {
node.getPsi.getContainingFile match {
case sc: ScalaFile if sc.isWorksheetFile => return false
case _ =>
}
if (node.getTreeParent.getElementType == ScalaElementTypes.FILE &&
node.getTreePrev == null && node.getElementType != ScalaElementTypes.PACKAGING &&
ScalaCodeFoldingSettings.getInstance().isCollapseFileHeaders) true
else if (node.getTreeParent.getElementType == ScalaElementTypes.FILE &&
node.getElementType == ScalaElementTypes.IMPORT_STMT &&
ScalaCodeFoldingSettings.getInstance().isCollapseImports) true
else if (node.getTreeParent != null &&
ScalaElementTypes.PATTERN_DEFINITION == node.getTreeParent.getElementType &&
ScalaCodeFoldingSettings.getInstance().isCollapseMultilineBlocks) true
else if (node.getTreeParent != null &&
ScalaElementTypes.VARIABLE_DEFINITION == node.getTreeParent.getElementType &&
ScalaCodeFoldingSettings.getInstance().isCollapseMultilineBlocks) true
else {
node.getElementType match {
case ScalaTokenTypes.tBLOCK_COMMENT
if ScalaCodeFoldingSettings.getInstance().isCollapseBlockComments && !isWorksheetResults(node) => true
case ScalaTokenTypes.tLINE_COMMENT
if !isCustomRegionStart(node.getText) &&
ScalaCodeFoldingSettings.getInstance().isCollapseLineComments && !isWorksheetResults(node) => true
case ScalaTokenTypes.tLINE_COMMENT
if isCustomRegionStart(node.getText) &&
ScalaCodeFoldingSettings.getInstance().isCollapseCustomRegions => true
case ScalaDocElementTypes.SCALA_DOC_COMMENT
if ScalaCodeFoldingSettings.getInstance().isCollapseScalaDocComments && !isWorksheetResults(node) => true
case ScalaElementTypes.TEMPLATE_BODY
if ScalaCodeFoldingSettings.getInstance().isCollapseTemplateBodies => true
case ScalaElementTypes.PACKAGING
if ScalaCodeFoldingSettings.getInstance().isCollapsePackagings => true
case ScalaElementTypes.IMPORT_STMT
if ScalaCodeFoldingSettings.getInstance().isCollapseImports => true
case ScalaTokenTypes.tSH_COMMENT
if ScalaCodeFoldingSettings.getInstance().isCollapseShellComments && !isWorksheetResults(node) => true
case ScalaElementTypes.MATCH_STMT
if ScalaCodeFoldingSettings.getInstance().isCollapseMultilineBlocks => true
case ScalaElementTypes.BLOCK_EXPR
if ScalaCodeFoldingSettings.getInstance().isCollapseMultilineBlocks => true
case _ if node.getPsi.isInstanceOf[ScBlockExpr] &&
node.getTreeParent.getElementType == ScalaElementTypes.ARG_EXPRS &&
ScalaCodeFoldingSettings.getInstance().isCollapseMethodCallBodies => true
case _ if node.getTreeParent.getElementType == ScalaElementTypes.FUNCTION_DEFINITION &&
ScalaCodeFoldingSettings.getInstance().isCollapseMethodCallBodies &&
isMultilineFuncBody(node.getTreeParent.getPsi.asInstanceOf[ScFunctionDefinition])._1 => true
case _ if node.getPsi.isInstanceOf[ScTypeProjection] &&
ScalaCodeFoldingSettings.getInstance().isCollapseTypeLambdas => true
case _ if node.getPsi.isInstanceOf[ScTypeElement] &&
ScalaCodeFoldingSettings.getInstance().isCollapseTypeLambdas => true
case _ if node.getPsi.isInstanceOf[ScLiteral] &&
node.getPsi.asInstanceOf[ScLiteral].isMultiLineString &&
ScalaCodeFoldingSettings.getInstance().isCollapseMultilineStrings => true
case _ if node.getPsi.isInstanceOf[ScArgumentExprList] &&
ScalaCodeFoldingSettings.getInstance().isCollapseMultilineBlocks => true
case _ => false
}
}
}
private def isMultiline(node: ASTNode): Boolean = {
node.getText.indexOf("\n") != -1
}
private def isMultilineBodyInMatchStmt(node: ASTNode): Boolean = {
val children = node.getPsi.asInstanceOf[ScMatchStmt].children
var index = 0
for (ch <- children) {
if (ch.isInstanceOf[PsiElement] && ch.getNode.getElementType == ScalaTokenTypes.kMATCH) {
val result = node.getText.substring(index + MATCH_KEYWORD.length)
return result.indexOf("\n") != -1
} else {
index += ch.getTextLength
}
}
false
}
private def startOffsetForMatchStmt(node: ASTNode): Int = {
val children = node.getPsi.asInstanceOf[ScMatchStmt].children
var offset = 0
var passedMatch = false
for (ch <- children) {
if (ch.isInstanceOf[PsiElement] && ch.getNode.getElementType == ScalaTokenTypes.kMATCH) {
offset += MATCH_KEYWORD.length
passedMatch = true
} else if (passedMatch) {
if (ch.isInstanceOf[PsiElement] && ch.getNode.getElementType == TokenType.WHITE_SPACE) offset += ch.getTextLength
return offset
} else {
offset += ch.getTextLength
}
}
0
}
private def isMultilineImport(node: ASTNode): Boolean = {
if (node.getElementType != ScalaElementTypes.IMPORT_STMT) return false
var next = node.getTreeNext
var flag = false
while (next != null && (next.getPsi.isInstanceOf[LeafPsiElement] || next.getElementType == ScalaElementTypes.IMPORT_STMT)) {
if (next.getElementType == ScalaElementTypes.IMPORT_STMT) flag = true
next = next.getTreeNext
}
flag
}
private def isMultilineFuncBody(func: ScFunctionDefinition): (Boolean, TextRange, String) = {
val body = func.body.getOrElse(null)
if (body == null) return (false, null, "")
val range = body.getTextRange
body match {
case _: ScBlockExpr =>
val isCorrectRange = (range.getStartOffset + 1 < range.getEndOffset)
return (isCorrectRange, range, "{...}")
case _ =>
val isMultilineBody = (body.getText.indexOf("\n") != -1) && (range.getStartOffset + 1 < range.getEndOffset)
val textRange = if (isMultilineBody) range else null
return (isMultilineBody, textRange, "...")
}
(false, null, "")
}
private def isGoodImport(node: ASTNode): Boolean = {
var prev = node.getTreePrev
while (prev != null && prev.getPsi.isInstanceOf[LeafPsiElement]) prev = prev.getTreePrev
if (prev == null || prev.getElementType != ScalaElementTypes.IMPORT_STMT) true
else false
}
private def getImportEnd(node: ASTNode): Int = {
var next = node
var last = next.getTextRange.getEndOffset
while (next != null && (next.getPsi.isInstanceOf[LeafPsiElement] || next.getElementType == ScalaElementTypes.IMPORT_STMT)) {
if (next.getElementType == ScalaElementTypes.IMPORT_STMT || next.getElementType == ScalaTokenTypes.tSEMICOLON) last = next.getTextRange.getEndOffset
next = next.getTreeNext
}
last
}
private def addCommentFolds(comment: PsiComment, processedComments: Set[PsiElement],
descriptors: ArrayBuffer[FoldingDescriptor]) {
if (processedComments.contains(comment) || comment.getTokenType != ScalaTokenTypes.tLINE_COMMENT) {
return
}
var end: PsiElement = null
var current: PsiElement = comment.getNextSibling
var flag = true
while (current != null && flag) {
val node: ASTNode = current.getNode
if (node != null) {
val elementType: IElementType = node.getElementType
if (elementType == ScalaTokenTypes.tLINE_COMMENT && !isWorksheetResults(node)) {
end = current
processedComments.add(current)
}
if (elementType != ScalaTokenTypes.tLINE_COMMENT && elementType != TokenType.WHITE_SPACE) {
flag = false
}
}
current = current.getNextSibling
if (current != null && (isCustomRegionStart(current.getText) || isCustomRegionEnd(current.getText))) {
flag = false
}
}
if (end != null) {
descriptors += (new FoldingDescriptor(comment,
new TextRange(comment.getTextRange.getStartOffset, end.getTextRange.getEndOffset)))
}
}
private def addCustomRegionFolds(element: PsiElement, processedRegions: Set[PsiElement],
descriptors: ArrayBuffer[FoldingDescriptor], isTagRegion: Boolean,
stack: Stack[PsiElement]) {
var end: PsiElement = null
var current: PsiElement = element.getNextSibling
var flag = true
while (current != null && flag) {
val node: ASTNode = current.getNode
if (node != null) {
val elementType: IElementType = node.getElementType
if (elementType == ScalaTokenTypes.tLINE_COMMENT && isCustomRegionEnd(node.getText) && !isWorksheetResults(node)) {
if ((isTagRegion && isTagRegionEnd(node.getText)) || (!isTagRegion && isSimpleRegionEnd(node.getText))) {
if (!processedRegions.contains(current) && stack.isEmpty) {
end = current
processedRegions.add(current)
flag = false
}
}
if (!stack.isEmpty) stack.pop()
}
if (elementType == ScalaTokenTypes.tLINE_COMMENT && isCustomRegionStart(node.getText) && !isWorksheetResults(node)) {
stack.push(node.getPsi)
}
}
current = current.getNextSibling
}
if (end != null) {
descriptors += (new FoldingDescriptor(element,
new TextRange(element.getTextRange.getStartOffset, end.getTextRange.getEndOffset)))
}
}
private def isCustomRegionStart(elementText: String): Boolean = {
isTagRegionStart(elementText) || isSimpleRegionStart(elementText)
}
private def isTagRegionStart(elementText: String): Boolean = {
elementText.contains("<editor-fold")
}
private def isSimpleRegionStart(elementText: String): Boolean = {
elementText.contains("region") && elementText.matches("..?\\s*region.*")
}
private def isCustomRegionEnd(elementText: String): Boolean = {
isTagRegionEnd(elementText) || isSimpleRegionEnd(elementText)
}
private def isTagRegionEnd(elementText: String): Boolean = {
elementText.contains("</editor-fold")
}
private def isSimpleRegionEnd(elementText: String): Boolean = {
elementText.contains("endregion")
}
private def isWorksheetResults(node: ASTNode): Boolean = {
node.getPsi.isInstanceOf[PsiComment] && (node.getText.startsWith(WorksheetFoldingBuilder.FIRST_LINE_PREFIX) ||
node.getText.startsWith(WorksheetFoldingBuilder.LINE_PREFIX))
}
}
private[folding] object ScalaFoldingUtil {
val IMPORT_KEYWORD = "import"
val PACKAGE_KEYWORD = "package"
val MATCH_KEYWORD = "match"
}
/**
* Extractor for:
*
* ({type λ[α] = Either.LeftProjection[α, Int]})#λ
*
* Which can be folded to:
*
* λ[α] = Either.LeftProjection[α, Int]
*/
object TypeLambda {
def unapply(psi: PsiElement): Option[(String, ScTypeParamClause, ScTypeElement)] = psi match {
case tp: ScTypeProjection =>
val element = tp.typeElement
val nameId = tp.nameId
element match {
case pte: ScParenthesisedTypeElement =>
pte.typeElement match {
case Some(cte: ScCompoundTypeElement) if cte.components.isEmpty =>
cte.refinement match {
case Some(ref) =>
(ref.holders, ref.types) match {
case (scala.Seq(), scala.Seq(tad: ScTypeAliasDefinitionImpl)) if tad.name == nameId.getText =>
(tad.typeParametersClause, Option(tad.aliasedTypeElement)) match {
case (Some(tpc), Some(ate)) =>
return Some((nameId.getText, tpc, ate))
case _ =>
}
case _ =>
}
case None =>
}
case _ =>
}
case _ =>
}
None
}
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/folding/ScalaFoldingBuilder.scala | Scala | apache-2.0 | 21,124 |
package spinoco.protocol.mgcp
import scodec.{Attempt, Codec, DecodeResult, Err, SizeBound}
import scodec.bits.{BitVector, ByteVector}
import scodec.codecs._
import shapeless.tag.@@
import spinoco.protocol.common.codec._
import spinoco.protocol.mgcp.MGCPParameter.ConnectionId
import scala.annotation.tailrec
package object codec {
val `\\r\\n`: Codec[Unit] = constantString1("\\r\\n")
val `\\n`: Codec[Unit] = constantString1("\\n")
val WS: Codec[Unit] = dropWhile(BitVector(Array[Byte](' ')))(_.toChar.isWhitespace)
val dropWS: Codec[Unit] = dropWhile(BitVector(Array[Byte]()))(_.toChar.isWhitespace)
val transactionId: Codec[Int @@ MGCPTxId] =
tagged(guard(intAsString) { i =>
if (i < 0 || i > 999999999) Some(Err("Transaction id must be within [0, 999 999 999] range"))
else None
})
val localEndpointNameCodec: Codec[LocalEndpointName] = {
val partCodec: Codec[LocalEndpointPart] = {
import LocalEndpointPart._
ascii.xmap(
{
case "$" => `$`
case "*" => `*`
case other => NameString(other)
}
, {
case `$` => "$"
case `*` => "*"
case NameString(s) => s
}
)
}
listDelimited(BitVector.view("/".getBytes), partCodec)
.exmap(
{ ls =>
ls.headOption match {
case None => Attempt.failure(Err("At least one part of name is required"))
case Some(h) => Attempt.successful(LocalEndpointName(h, ls.tail))
}
}
, n => Attempt.successful(n.start +: n.parts)
)
}
def isHex(ch:Char):Boolean =
ch.isDigit || (ch >= 'a' && ch <= 'f') || (ch >= 'A' && ch <= 'F')
def is32Hex(field:String)(s:String):Option[Err] = {
if (s.length > 32 || s.isEmpty) Some(Err(s"$field must have 1 - 32 hex characters, size is ${s.size}"))
else if (s.exists(ch => ! isHex(ch))) Some(Err(s"$field must have 1 - 32 hex characters, but is '$s'}"))
else None
}
val connectionIdCodec: Codec[String @@ ConnectionId] =
tagged[String, ConnectionId](guard(ascii)(is32Hex("ConnectionId")))
val domainCodec: Codec[String] = utf8
val packageNameCodec: Codec[String] =
guard(ascii)(s => if (s.nonEmpty) None else Some(Err(s"Package name must have at least one character: $s")))
val eventSpecificationCodec: Codec[EventSpecification] = "Event Specification" | {
val eventOwnerCodec: Codec[EventOwner] = {
choice(
constantString1("$").decodeAs(EventOwner.`$`).upcast
, constantString1("*").decodeAs(EventOwner.`*`).upcast
, connectionIdCodec.as[EventOwner.Connection].upcast
)
}
(
("Package And Event" | takeWhileChar(PackageEventCodec.codec)('@')) ::
("Event Owner" | optional(recover2(constantString1("@")), eventOwnerCodec) )
).as[EventSpecification]
}
/** encodes `A` until end of header. Returns bytes after the last nonEmpty line of header, including the empty line that ends the header **/
def header[A](codec: Codec[A]): Codec[A] = {
val cr = '\\r'.toByte
val lf = '\\n'.toByte
val crlfDouble = ByteVector.view("\\r\\n\\r\\n".getBytes)
val lfDouble = ByteVector.view("\\n\\n".getBytes)
new Codec[A] {
def sizeBound: SizeBound = SizeBound.unknown
def encode(value: A): Attempt[BitVector] = codec.encode(value)
def decode(bits: BitVector): Attempt[DecodeResult[A]] = {
@tailrec
def go(bytes: ByteVector): Attempt[DecodeResult[A]] = {
def gethead(sepSize: Int) = {
val orig = bits.bytes
val toTake = orig.size - bytes.size
orig.take(toTake + sepSize)
}
if (bytes.isEmpty) codec.decode(bits)
else if (bytes.head == cr && bytes.startsWith(crlfDouble)) {
codec.decode(gethead(2).bits).map { _.mapRemainder { _ => bytes.drop(2).bits } }
} else if (bytes.head == lf && bytes.startsWith(lfDouble)) {
codec.decode(gethead(1).bits).map { _.mapRemainder { _ => bytes.drop(1).bits } }
} else go(bytes.tail)
}
go(bits.bytes)
}
}
}
}
| Spinoco/protocol | mgcp/src/main/scala/spinoco/protocol/mgcp/codec/codec.scala | Scala | mit | 4,128 |
package vegas.fixtures
import vegas._
import vegas.data.External._
import vegas.DSL.SpecBuilder
object BasicPlots {
val SimpleBarChart =
Vegas("A simple bar chart with embedded data.").
withData(Seq(
Map("a" -> "A", "b" -> 28), Map("a" -> "B", "b" -> 55), Map("a" -> "C", "b" -> 43),
Map("a" -> "D", "b" -> 91), Map("a" -> "E", "b" -> 81), Map("a" -> "F", "b" -> 53),
Map("a" -> "G", "b" -> 19), Map("a" -> "H", "b" -> 87), Map("a" -> "I", "b" -> 52)
)).
encodeX("a", Ordinal).
encodeY("b", Quantitative).
mark(Bar)
val AggregateBarChart =
Vegas("A bar chart showing the US population distribution of age groups in 2000.").
withURL(Population).
mark(Bar).
filter("datum.year == 2000").
encodeY("age", Ordinal, scale=Scale(bandSize=17)).
encodeX("people", Quantitative, aggregate=AggOps.Sum, axis=Axis(title="population"))
val GroupedBarChart =
Vegas().
withURL(Population).
mark(Bar).
addTransformCalculation("gender", """datum.sex == 2 ? "Female" : "Male"""").
filter("datum.year == 2000").
encodeColumn("age", Ord, scale=Scale(padding=4.0), axis=Axis(orient=Orient.Bottom, axisWidth=1.0, offset= -8.0)).
encodeY("people", Quantitative, aggregate=AggOps.Sum, axis=Axis(title="population", grid=false)).
encodeX("gender", Nominal, scale=Scale(bandSize = 6.0), hideAxis=true).
encodeColor("gender", Nominal, scale=Scale(rangeNominals=List("#EA98D2", "#659CCA"))).
configFacet(cell=CellConfig(strokeWidth = 0))
val AreaChart =
Vegas().
withURL(Unemployment).
mark(Area).
encodeX("date", Temp, timeUnit=TimeUnit.Yearmonth, scale=Scale(nice=Nice.Month),
axis=Axis(axisWidth=0, format="%Y", labelAngle=0)).
encodeY("count", Quantitative, aggregate=AggOps.Sum).
configCell(width=300, height=200)
val NormalizedStackedBarChart =
Vegas().
withURL(Population).
filter("datum.year == 2000").
addTransform("gender", "datum.sex == 2 ? \"Female\" : \"Male\"").
mark(Bar).
encodeY("people", Quant, AggOps.Sum, axis=Axis(title="population")).
encodeX("age", Ord, scale=Scale(bandSize= 17)).
encodeColor("gender", Nominal, scale=Scale(rangeNominals=List("#EA98D2", "#659CCA"))).
configMark(stacked=StackOffset.Normalize)
val BinnedChart =
Vegas("A trellis scatterplot showing Horsepower and Miles per gallons, faceted by binned values of Acceleration.").
withURL(Cars).
mark(Point).
encodeX("Horsepower", Quantitative).
encodeY("Miles_per_Gallon", Quantitative).
encodeRow("Acceleration", Quantitative, enableBin=true)
val ScatterBinnedPlot =
Vegas().
withURL(Movies).
mark(Point).
encodeX("IMDB_Rating", Quantitative, bin=Bin(maxbins=10.0)).
encodeY("Rotten_Tomatoes_Rating", Quantitative, bin=Bin(maxbins=10.0)).
encodeSize(aggregate=AggOps.Count, field="*", dataType=Quantitative)
val ScatterColorPlot =
Vegas().
withURL(Cars).
mark(Point).
encodeX("Horsepower", Quantitative).
encodeY("Miles_per_Gallon", Quantitative).
encodeColor(field="Origin", dataType=Nominal)
val ScatterBinnedColorPlot =
Vegas("A scatterplot showing horsepower and miles per gallons with binned acceleration on color.").
withURL(Cars).
mark(Point).
encodeX("Horsepower", Quantitative).
encodeY("Miles_per_Gallon", Quantitative).
encodeColor(field="Acceleration", dataType=Quantitative, bin=Bin(maxbins=5.0))
val StackedAreaBinnedPlot =
Vegas().
withURL(Cars).
mark(Area).
encodeX("Acceleration", Quantitative, bin=Bin()).
encodeY("Horsepower", Quantitative, AggOps.Mean, enableBin=false).
encodeColor(field="Cylinders", dataType=Nominal)
val SortColorPlot =
Vegas("The Trellis display by Becker et al. helped establish small multiples as a “powerful mechanism for understanding interactions in studies of how a response depends on explanatory variables”. Here we reproduce a trellis of Barley yields from the 1930s, complete with main-effects ordering to facilitate comparison.").
withURL(Barley).
mark(Point).
encodeRow("site", Ordinal).
encodeX("yield", Quantitative, aggregate=AggOps.Mean).
encodeY("variety", Ordinal, sortField=Sort("yield", AggOps.Mean), scale=Scale(bandSize = 12.0)).
encodeColor(field="year", dataType=Nominal)
val CustomShapePlot =
Vegas("A scatterplot with custom star shapes.").
withURL(Cars).
mark(Point).
encodeX("Horsepower", Quant).
encodeY("Miles_per_Gallon", Quant).
encodeColor("Cylinders", Nom).
encodeSize("Weight_in_lbs", Quant).
configMark(customShape="M0,0.2L0.2351,0.3236 0.1902,0.0618 0.3804,-0.1236 0.1175,-0.1618 0,-0.4 -0.1175,-0.1618 -0.3804,-0.1236 -0.1902,0.0618 -0.2351,0.3236 0,0.2Z")
val ScatterAggregateDetail =
Vegas("A scatterplot showing average horsepower and displacement for cars from different origins.").
withURL(Cars).
mark(Point).
encodeX("Horsepower", Quant, AggOps.Mean).
encodeY("Displacement", Quant, AggOps.Mean).
encodeDetail("Origin")
val LineDetail =
Vegas("Stock prices of 5 Tech Companies Over Time.").
withURL(Stocks, formatType = DataFormat.Csv).
mark(Line).
encodeX("date", Temp).
encodeY("price", Quant).
encodeDetailFields(Field(field="symbol", dataType=Nominal))
val GithubPunchCard =
Vegas().
withURL(Github, formatType = DataFormat.Csv).
mark(Circle).
encodeX("time", Temporal, timeUnit = TimeUnit.Hours).
encodeY("time", Temporal, timeUnit = TimeUnit.Day).
encodeSize("count", Quantitative, aggregate = AggOps.Sum)
val AnscombesQuartet =
Vegas("Anscombe's Quartet").
withURL(Anscombe).
mark(Circle).
encodeX("X", Quantitative, scale = Scale(zero = false)).
encodeY("Y", Quantitative, scale = Scale(zero = false)).
encodeColumn("Series", Nominal).
configMark(opacity = 1)
val StackedAreaChart =
Vegas("Area chart showing weight of cars over time.").
withURL(Unemployment).
mark(Area).
encodeX(
"date", Temporal, timeUnit = TimeUnit.Yearmonth,
axis = Axis(axisWidth = 0, format = "%Y", labelAngle = 0),
scale = Scale(nice = spec.Spec.NiceTimeEnums.Month)
).
encodeY("count", Quantitative, aggregate = AggOps.Sum).
encodeColor("series", Nominal, scale = Scale(rangePreset = Category20b)).
configCell(width = 300, height = 200)
val NormalizedStackedAreaChart =
Vegas().
withURL(Unemployment).
mark(Area).
encodeX(
"date", Temporal, timeUnit = TimeUnit.Yearmonth,
axis = Axis(axisWidth=0, format="%Y", labelAngle=0),
scale = Scale(nice = spec.Spec.NiceTimeEnums.Month)
).
encodeY("count", Quantitative, aggregate = AggOps.Sum, hideAxis = Some(true)).
encodeColor("series", Nominal, scale = Scale(rangePreset = Category20b)).
configCell(width = 300, height = 200).
configMark(stacked = StackOffset.Normalize)
val Streamgraph =
Vegas().
withURL(Unemployment).
mark(Area).
encodeX(
"date", Temporal, timeUnit = TimeUnit.Yearmonth,
axis = Axis(axisWidth = 0, format = "%Y", labelAngle = 0, tickSize = Some(0.0)),
scale = Scale(nice = spec.Spec.NiceTimeEnums.Month)
).
encodeY("count", Quantitative, aggregate = AggOps.Sum, hideAxis = Some(true)).
encodeColor("series", Nominal, scale = Scale(rangePreset = Category20b)).
configCell(width = 300, height = 200).
configMark(stacked = StackOffset.Center)
val StackedBarChart =
Vegas().
withURL(SeattleWeather, formatType = DataFormat.Csv).
mark(Bar).
encodeX("date", Temporal, timeUnit = TimeUnit.Month, axis = Axis(title = "Month of the year")).
encodeY("*", Quantitative, aggregate = AggOps.Count).
encodeColor("weather", Nominal, scale = Scale(
domainNominals = List("sun", "fog", "drizzle", "rain", "snow"),
rangeNominals = List("#e7ba52", "#c7c7c7", "#aec7e8", "#1f77b4", "#9467bd")),
legend = Legend(title = "Weather type"))
val StripPlot =
Vegas("Shows the relationship between horsepower and the numbver of cylinders using tick marks.").
withURL(Cars).
mark(Tick).
encodeX("Horsepower", Quantitative).
encodeY("Cylinders", Ordinal)
// Names (ex. bar, bar_aggregate, etc.) are corresponding to filenames
// of `/core/src/test/resources/example-specs/*.vl.json`
val plotsWithNames: List[(String, SpecBuilder)] = List(
"bar" -> SimpleBarChart,
"bar_aggregate" -> AggregateBarChart,
"bar_grouped" -> GroupedBarChart,
"area" -> AreaChart,
"stacked_bar_normalize" -> NormalizedStackedBarChart,
"scatter_binned" -> ScatterBinnedPlot,
"scatter_color" -> ScatterColorPlot,
"scatter_binned_color" -> ScatterBinnedColorPlot,
"stacked_area_binned" -> StackedAreaBinnedPlot,
"trellis_barley" -> SortColorPlot,
"trellis_scatter_binned_row" -> BinnedChart,
"scatter_shape_custom" -> CustomShapePlot,
"line_detail" -> LineDetail,
"github_punchcard" -> GithubPunchCard,
"trellis_anscombe" -> AnscombesQuartet,
"stacked_area" -> StackedAreaChart,
"stacked_area_normalize" -> NormalizedStackedAreaChart,
"stacked_area_stream" -> Streamgraph,
"stacked_bar_weather" -> StackedBarChart,
"tick_strip" -> StripPlot
).sortBy(_._1)
val plots: List[SpecBuilder] = plotsWithNames.map(_._2)
}
| aishfenton/Vegas | core/src/test/scala/vegas/fixtures/BasicPlots.scala | Scala | mit | 9,663 |
package models.daos
import play.api.db.slick.Config.driver.simple._
import java.util.UUID
object DBTableDefinitions {
case class DBUser (
userID: String,
firstName: Option[String],
lastName: Option[String],
fullName: Option[String],
email: Option[String],
avatarURL: Option[String]
)
class Users(tag: Tag) extends Table[DBUser](tag, "user") {
def id = column[String]("userID", O.PrimaryKey)
def firstName = column[Option[String]]("firstName")
def lastName = column[Option[String]]("lastName")
def fullName = column[Option[String]]("fullName")
def email = column[Option[String]]("email")
def avatarURL = column[Option[String]]("avatarURL")
def * = (id, firstName, lastName, fullName, email, avatarURL) <> (DBUser.tupled, DBUser.unapply)
}
case class DBLoginInfo (
id: Option[Long],
providerID: String,
providerKey: String
)
class LoginInfos(tag: Tag) extends Table[DBLoginInfo](tag, "logininfo") {
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def providerID = column[String]("providerID")
def providerKey = column[String]("providerKey")
def * = (id.?, providerID, providerKey) <> (DBLoginInfo.tupled, DBLoginInfo.unapply)
}
case class DBUserLoginInfo (
userID: String,
loginInfoId: Long
)
class UserLoginInfos(tag: Tag) extends Table[DBUserLoginInfo](tag, "userlogininfo") {
def userID = column[String]("userID", O.NotNull)
def loginInfoId = column[Long]("loginInfoId", O.NotNull)
def * = (userID, loginInfoId) <> (DBUserLoginInfo.tupled, DBUserLoginInfo.unapply)
}
case class DBPasswordInfo (
hasher: String,
password: String,
salt: Option[String],
loginInfoId: Long
)
class PasswordInfos(tag: Tag) extends Table[DBPasswordInfo](tag, "passwordinfo") {
def hasher = column[String]("hasher")
def password = column[String]("password")
def salt = column[Option[String]]("salt")
def loginInfoId = column[Long]("loginInfoId")
def * = (hasher, password, salt, loginInfoId) <> (DBPasswordInfo.tupled, DBPasswordInfo.unapply)
}
case class DBOAuth1Info (
id: Option[Long],
token: String,
secret: String,
loginInfoId: Long
)
class OAuth1Infos(tag: Tag) extends Table[DBOAuth1Info](tag, "oauth1info") {
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def token = column[String]("token")
def secret = column[String]("secret")
def loginInfoId = column[Long]("loginInfoId")
def * = (id.?, token, secret, loginInfoId) <> (DBOAuth1Info.tupled, DBOAuth1Info.unapply)
}
case class DBOAuth2Info (
id: Option[Long],
accessToken: String,
tokenType: Option[String],
expiresIn: Option[Int],
refreshToken: Option[String],
loginInfoId: Long
)
class OAuth2Infos(tag: Tag) extends Table[DBOAuth2Info](tag, "oauth2info") {
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def accessToken = column[String]("accesstoken")
def tokenType = column[Option[String]]("tokentype")
def expiresIn = column[Option[Int]]("expiresin")
def refreshToken = column[Option[String]]("refreshtoken")
def loginInfoId = column[Long]("logininfoid")
def * = (id.?, accessToken, tokenType, expiresIn, refreshToken, loginInfoId) <> (DBOAuth2Info.tupled, DBOAuth2Info.unapply)
}
case class DBPost(
id: UUID,
title: String,
body: String,
created: Long,
edited: Long,
published: Boolean,
author: String
)
class Posts(tag: Tag) extends Table[DBPost](tag, "post") {
def id = column[UUID]("id", O.PrimaryKey, O.NotNull)
def title = column[String]("title", O.DBType("LONGTEXT"))
def body = column[String]("body", O.DBType("LONGTEXT"))
def created = column[Long]("date")
def edited = column[Long]("edited")
def published = column[Boolean]("published")
def authorId = column[String]("authorId", O.NotNull)
def * = (id, title, body, created, edited, published, authorId) <> (DBPost.tupled, DBPost.unapply)
}
case class DBComment(
id: Option[Long],
body: String,
created: Long,
edited: Long,
author: String,
post: UUID
)
class Comments(tag: Tag) extends Table[DBComment](tag, "comment") {
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def body = column[String]("body", O.DBType("text"))
def created = column[Long]("date")
def edited = column[Long]("edited")
def authorId = column[String]("authorId", O.NotNull)
def postId = column[UUID]("postId", O.NotNull)
def * = (id.?, body, created, edited, authorId, postId) <> (DBComment.tupled, DBComment.unapply)
}
case class DBYoSubscriber(
id: UUID,
username: String
)
class YoSubscribers(tag: Tag) extends Table[DBYoSubscriber](tag, "yosubscriber") {
def id = column[UUID]("id", O.PrimaryKey)
def username = column[String]("username", O.NotNull)
def * = (id, username) <> (DBYoSubscriber.tupled, DBYoSubscriber.unapply)
}
val slickUsers = TableQuery[Users]
val slickLoginInfos = TableQuery[LoginInfos]
val slickUserLoginInfos = TableQuery[UserLoginInfos]
val slickPasswordInfos = TableQuery[PasswordInfos]
val slickOAuth1Infos = TableQuery[OAuth1Infos]
val slickOAuth2Infos = TableQuery[OAuth2Infos]
val slickPosts = TableQuery[Posts]
val slickComments = TableQuery[Comments]
val slickYoSubscribers = TableQuery[YoSubscribers]
}
| sne11ius/playlog | app/models/daos/DBTableDefinitions.scala | Scala | gpl-3.0 | 5,411 |
package de.frosner.broccoli.instances.storage.filesystem
import java.io._
import java.nio.file._
import de.frosner.broccoli.instances.storage.InstanceStorage
import de.frosner.broccoli.models.Instance
import play.api.libs.json.Json
import scala.util.{Failure, Success, Try}
@volatile
class FileSystemInstanceStorage(storageDirectory: File) extends InstanceStorage {
import Instance.{instancePersistenceReads, instancePersistenceWrites}
protected val log = play.api.Logger(getClass)
log.info(s"Starting $this")
require(storageDirectory.isDirectory && storageDirectory.canWrite,
s"'$storageDirectory' needs to be a writable directory")
private val lock = new File(storageDirectory, ".lock")
log.info(s"Locking $storageDirectory ($lock)")
if (!lock.createNewFile()) {
throw new IllegalStateException(s"Cannot lock $storageDirectory. Is there another Broccoli instance running?")
}
def idToTempFile(id: String): File = new File(storageDirectory, id + ".json_tmp")
def idToFile(id: String): File = new File(storageDirectory, id + ".json")
override def closeImpl(): Unit =
if (lock.delete()) {
log.info(s"Releasing lock on '$storageDirectory' ('$lock')")
closed = true
} else {
log.error(s"Could not release lock on '$storageDirectory' ('$lock')")
}
override def readInstanceImpl(id: String): Try[Instance] = {
val input = Try(new FileInputStream(idToFile(id)))
val instance = input.map(i => Json.parse(i).as[Instance])
input.foreach(_.close())
instance.flatMap { instance =>
if (instance.id != id) {
val error = s"Instance id (${instance.id}) does not match file name ($id)"
log.error(error)
Failure(new IllegalStateException(error))
} else {
Success(instance)
}
}
}
@volatile
protected override def readInstancesImpl: Try[Set[Instance]] =
readInstances(_ => true)
@volatile
override def readInstancesImpl(idFilter: String => Boolean): Try[Set[Instance]] = {
val instanceIds = Try {
val instanceFiles = storageDirectory.listFiles(new FileFilter {
override def accept(pathname: File): Boolean = {
val fileName = pathname.getName
val id = fileName.stripSuffix(".json")
fileName.endsWith(".json") && idFilter(id)
}
})
instanceFiles.map(_.getName.stripSuffix(".json"))
}
instanceIds.map(_.map { id =>
val tryInstance = readInstanceImpl(id)
tryInstance match {
case Success(instance) => instance
case Failure(throwable) =>
log.error(s"Error while reading instance: $id")
throw throwable
}
}.toSet)
}
@volatile
override def writeInstanceImpl(instance: Instance): Try[Instance] = {
val id = instance.id
val tempFile = idToTempFile(id)
val printStream = Try(new PrintStream(new FileOutputStream(tempFile)))
val afterWrite = printStream.map(_.append(Json.toJson(instance).toString()))
printStream.map(_.close())
val finishMove =
afterWrite.flatMap(_ => Try(Files.move(tempFile.toPath, idToFile(id).toPath, StandardCopyOption.ATOMIC_MOVE)))
finishMove.map(_ => instance)
}
@volatile
override def deleteInstanceImpl(toDelete: Instance): Try[Instance] = {
val id = toDelete.id
val file = idToFile(id)
val deleted = Try(file.delete())
deleted.flatMap { success =>
if (success) Success(toDelete) else Failure(new FileNotFoundException(s"Could not delete $file"))
}
}
}
| FRosner/cluster-broccoli | server/src/main/scala/de/frosner/broccoli/instances/storage/filesystem/FileSystemInstanceStorage.scala | Scala | apache-2.0 | 3,528 |
/*
* Copyright 2014 Treode, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.treode.async
import com.treode.async.stubs.StubScheduler
import com.treode.async.stubs.implicits._
import org.scalatest.FlatSpec
import Async.supply
import Callback.{ignore => disregard}
class RichExecutorSpec extends FlatSpec {
class DistinguishedException extends Exception
"Async.whilst" should "handle zero iterations" in {
implicit val s = StubScheduler.random()
var count = 0
s.whilst (false) (supply (count += 1)) .expectPass()
assertResult (0) (count)
}
it should "handle one iteration" in {
implicit val s = StubScheduler.random()
var count = 0
s.whilst (count < 1) (supply (count += 1)) .expectPass()
assertResult (1) (count)
}
it should "handle multiple iterations" in {
implicit val s = StubScheduler.random()
var count = 0
s.whilst (count < 3) (supply (count += 1)) .expectPass()
assertResult (3) (count)
}
it should "pass an exception from the condition to the callback" in {
implicit val s = StubScheduler.random()
var count = 0
s.whilst {
if (count == 3)
throw new DistinguishedException
true
} (supply (count += 1)) .expectFail [DistinguishedException]
assertResult (3) (count)
}
it should "pass an exception returned from the body to the callback" in {
implicit val s = StubScheduler.random()
var count = 0
s.whilst (true) {
supply {
count += 1
if (count == 3)
throw new DistinguishedException
}
} .expectFail [DistinguishedException]
assertResult (3) (count)
}
it should "pass an exception thrown from the body to the callback" in {
implicit val s = StubScheduler.random()
var count = 0
s.whilst (true) {
if (count == 3)
throw new DistinguishedException
supply (count += 1)
} .expectFail [DistinguishedException]
assertResult (3) (count)
}}
| Treode/store | core/test/com/treode/async/RichExecutorSpec.scala | Scala | apache-2.0 | 2,493 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.bridge
import org.apache.flink.api.scala.{DataSet, _}
import org.apache.flink.streaming.api.scala.DataStream
import org.apache.flink.table.api.internal.TableImpl
import org.apache.flink.table.api.{ImplicitExpressionConversions, ImplicitExpressionOperations, Table, ValidationException}
import org.apache.flink.types.Row
import _root_.scala.language.implicitConversions
/**
* == Table & SQL API with Flink's DataStream API ==
*
* This package contains the API of the Table & SQL API that bridges to Flink's [[DataStream]] API
* for the Scala programming language. Users can create [[Table]]s from [[DataStream]]s on which
* relational operations can be performed. Tables can also be converted back to [[DataStream]]s for
* further processing.
*
* For accessing all API classes and implicit conversions, use the following imports:
*
* {{{
* import org.apache.flink.table.api._
* import org.apache.flink.table.api.bridge.scala._
* }}}
*
* More information about the entry points of the API can be found in [[StreamTableEnvironment]].
*
* Available implicit expressions are listed in [[ImplicitExpressionConversions]] and
* [[ImplicitExpressionOperations]].
*
* Available implicit table-to-stream conversions are listed in this package object.
*
* Please refer to the website documentation about how to construct and run table programs that are
* connected to the DataStream API.
*/
package object scala {
implicit def tableConversions(table: Table): TableConversions = {
new TableConversions(table.asInstanceOf[TableImpl])
}
implicit def dataSetConversions[T](set: DataSet[T]): DataSetConversions[T] = {
new DataSetConversions[T](set, set.getType())
}
implicit def dataStreamConversions[T](set: DataStream[T]): DataStreamConversions[T] = {
new DataStreamConversions[T](set, set.dataType)
}
implicit def table2RowDataSet(table: Table): DataSet[Row] = {
val tableEnv = table.asInstanceOf[TableImpl].getTableEnvironment
if (!tableEnv.isInstanceOf[BatchTableEnvironment]) {
throw new ValidationException("Table cannot be converted into a DataSet. " +
"It is not part of a batch table environment.")
}
tableEnv.asInstanceOf[BatchTableEnvironment].toDataSet[Row](table)
}
implicit def table2RowDataStream(table: Table): DataStream[Row] = {
val tableEnv = table.asInstanceOf[TableImpl].getTableEnvironment
if (!tableEnv.isInstanceOf[StreamTableEnvironment]) {
throw new ValidationException("Table cannot be converted into a DataStream. " +
"It is not part of a stream table environment.")
}
tableEnv.asInstanceOf[StreamTableEnvironment].toAppendStream[Row](table)
}
}
| tzulitai/flink | flink-table/flink-table-api-scala-bridge/src/main/scala/org/apache/flink/table/api/bridge/scala/package.scala | Scala | apache-2.0 | 3,551 |
package org.tearne.crosser.util
import org.junit.runner.RunWith
import org.scalatest.FreeSpec
class AlleleCountTest extends FreeSpec{
"AlleleCount should" - {
"contain both success and total counts" in {
val instance = AlleleCount(3,4)
assertResult(3)(instance.success)
assertResult(4)(instance.total)
}
"be addable" in {
val instance1 = AlleleCount(3,4)
val instance2 = AlleleCount(7,96)
val instanceSum = instance1 + instance2
assertResult(10)(instanceSum.success)
assertResult(100)(instanceSum.total)
}
"calculate proportion" in {
val instance = AlleleCount(3,4)
assertResult(3.0/4.0)(instance.proportion)
}
"have value based hashcode and equals" in {
val instance1a = AlleleCount(3,4)
val instance1b = AlleleCount(3,4)
val instance2 = AlleleCount(3,5)
val instance3 = AlleleCount(2,4)
assertResult(instance1b)(instance1a)
assertResult(instance1b.hashCode)(instance1a.hashCode)
assert(instance1a != instance2)
assert(instance1a.hashCode != instance2.hashCode)
assert(instance1a != instance3)
assert(instance1a.hashCode != instance3.hashCode)
}
}
} | tearne/Crosser | src/test/scala/org/tearne/crosser/util/AlleleCountTest.scala | Scala | apache-2.0 | 1,144 |
/*
* Copyright 2006-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package mapper
import net.liftweb.util._
import net.liftweb.common._
import Helpers._
trait MetaProtoTag[ModelType <: ProtoTag[ModelType]] extends KeyedMetaMapper[Long, ModelType] {
self: ModelType =>
override def dbTableName: String // = "tags"
def cacheSize: Int
private val idCache = new LRU[Long, ModelType](cacheSize)
private val tagCache = new LRU[String, ModelType](cacheSize)
def findOrCreate(ntag: String): ModelType = synchronized {
val tag = capify(ntag)
if (tagCache.contains(tag)) tagCache(tag)
else {
find(By(name, tag)) match {
case Full(t) => tagCache(tag) = t; t
case _ => val ret: ModelType = (createInstance).name(tag).saveMe
tagCache(tag) = ret
ret
}
}
}
override def findDbByKey(dbId: ConnectionIdentifier, key: Long): Box[ModelType] = synchronized {
if (idCache.contains(key)) Full(idCache(key))
else {
val ret = super.findDbByKey(dbId,key)
ret.foreach(v => idCache(key) = v)
ret
}
}
/**
* Split the String into tags
*/
def split(in: String): List[String] = in.roboSplit(",").map(capify)
/**
* Split the String into tags and find all the tags
*/
def splitAndFind(in: String): List[ModelType] = split(in).map(findOrCreate)
def capify: String => String = Helpers.capify _
}
abstract class ProtoTag[MyType <: ProtoTag[MyType]] extends KeyedMapper[Long, MyType] with Ordered[MyType] {
self: MyType =>
def getSingleton: MetaProtoTag[MyType]
// the primary key for the database
object id extends MappedLongIndex(this)
def primaryKeyField: MappedLongIndex[MyType] = id
object name extends MappedPoliteString(this, 256) {
override def setFilter = getSingleton.capify :: super.setFilter
override def dbIndexed_? = true
}
def compare(other: MyType): Int = name.get.compare(other.name.get)
}
| lzpfmh/framework-2 | persistence/mapper/src/main/scala/net/liftweb/mapper/ProtoTag.scala | Scala | apache-2.0 | 2,510 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst
import java.beans.{Introspector, PropertyDescriptor}
import java.lang.{Iterable => JIterable}
import java.lang.reflect.Type
import java.util.{Iterator => JIterator, List => JList, Map => JMap}
import scala.language.existentials
import com.google.common.reflect.TypeToken
import org.apache.spark.sql.catalyst.analysis.{GetColumnByOrdinal, UnresolvedAttribute, UnresolvedExtractValue}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.objects._
import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, DateTimeUtils, GenericArrayData}
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.Utils
/**
* Type-inference utilities for POJOs and Java collections.
*/
object JavaTypeInference {
private val iterableType = TypeToken.of(classOf[JIterable[_]])
private val mapType = TypeToken.of(classOf[JMap[_, _]])
private val listType = TypeToken.of(classOf[JList[_]])
private val iteratorReturnType = classOf[JIterable[_]].getMethod("iterator").getGenericReturnType
private val nextReturnType = classOf[JIterator[_]].getMethod("next").getGenericReturnType
private val keySetReturnType = classOf[JMap[_, _]].getMethod("keySet").getGenericReturnType
private val valuesReturnType = classOf[JMap[_, _]].getMethod("values").getGenericReturnType
/**
* Infers the corresponding SQL data type of a JavaBean class.
* @param beanClass Java type
* @return (SQL data type, nullable)
*/
def inferDataType(beanClass: Class[_]): (DataType, Boolean) = {
inferDataType(TypeToken.of(beanClass))
}
/**
* Infers the corresponding SQL data type of a Java type.
* @param beanType Java type
* @return (SQL data type, nullable)
*/
private[sql] def inferDataType(beanType: Type): (DataType, Boolean) = {
inferDataType(TypeToken.of(beanType))
}
/**
* Infers the corresponding SQL data type of a Java type.
* @param typeToken Java type
* @return (SQL data type, nullable)
*/
private def inferDataType(typeToken: TypeToken[_], seenTypeSet: Set[Class[_]] = Set.empty)
: (DataType, Boolean) = {
typeToken.getRawType match {
case c: Class[_] if c.isAnnotationPresent(classOf[SQLUserDefinedType]) =>
(c.getAnnotation(classOf[SQLUserDefinedType]).udt().newInstance(), true)
case c: Class[_] if UDTRegistration.exists(c.getName) =>
val udt = UDTRegistration.getUDTFor(c.getName).get.newInstance()
.asInstanceOf[UserDefinedType[_ >: Null]]
(udt, true)
case c: Class[_] if c == classOf[java.lang.String] => (StringType, true)
case c: Class[_] if c == classOf[Array[Byte]] => (BinaryType, true)
case c: Class[_] if c == java.lang.Short.TYPE => (ShortType, false)
case c: Class[_] if c == java.lang.Integer.TYPE => (IntegerType, false)
case c: Class[_] if c == java.lang.Long.TYPE => (LongType, false)
case c: Class[_] if c == java.lang.Double.TYPE => (DoubleType, false)
case c: Class[_] if c == java.lang.Byte.TYPE => (ByteType, false)
case c: Class[_] if c == java.lang.Float.TYPE => (FloatType, false)
case c: Class[_] if c == java.lang.Boolean.TYPE => (BooleanType, false)
case c: Class[_] if c == classOf[java.lang.Short] => (ShortType, true)
case c: Class[_] if c == classOf[java.lang.Integer] => (IntegerType, true)
case c: Class[_] if c == classOf[java.lang.Long] => (LongType, true)
case c: Class[_] if c == classOf[java.lang.Double] => (DoubleType, true)
case c: Class[_] if c == classOf[java.lang.Byte] => (ByteType, true)
case c: Class[_] if c == classOf[java.lang.Float] => (FloatType, true)
case c: Class[_] if c == classOf[java.lang.Boolean] => (BooleanType, true)
case c: Class[_] if c == classOf[java.math.BigDecimal] => (DecimalType.SYSTEM_DEFAULT, true)
case c: Class[_] if c == classOf[java.math.BigInteger] => (DecimalType.BigIntDecimal, true)
case c: Class[_] if c == classOf[java.sql.Date] => (DateType, true)
case c: Class[_] if c == classOf[java.sql.Timestamp] => (TimestampType, true)
case _ if typeToken.isArray =>
val (dataType, nullable) = inferDataType(typeToken.getComponentType, seenTypeSet)
(ArrayType(dataType, nullable), true)
case _ if iterableType.isAssignableFrom(typeToken) =>
val (dataType, nullable) = inferDataType(elementType(typeToken), seenTypeSet)
(ArrayType(dataType, nullable), true)
case _ if mapType.isAssignableFrom(typeToken) =>
val (keyType, valueType) = mapKeyValueType(typeToken)
val (keyDataType, _) = inferDataType(keyType, seenTypeSet)
val (valueDataType, nullable) = inferDataType(valueType, seenTypeSet)
(MapType(keyDataType, valueDataType, nullable), true)
case other if other.isEnum =>
(StructType(Seq(StructField(typeToken.getRawType.getSimpleName,
StringType, nullable = false))), true)
case other =>
if (seenTypeSet.contains(other)) {
throw new UnsupportedOperationException(
"Cannot have circular references in bean class, but got the circular reference " +
s"of class $other")
}
// TODO: we should only collect properties that have getter and setter. However, some tests
// pass in scala case class as java bean class which doesn't have getter and setter.
val properties = getJavaBeanReadableProperties(other)
val fields = properties.map { property =>
val returnType = typeToken.method(property.getReadMethod).getReturnType
val (dataType, nullable) = inferDataType(returnType, seenTypeSet + other)
new StructField(property.getName, dataType, nullable)
}
(new StructType(fields), true)
}
}
def getJavaBeanReadableProperties(beanClass: Class[_]): Array[PropertyDescriptor] = {
val beanInfo = Introspector.getBeanInfo(beanClass)
beanInfo.getPropertyDescriptors.filterNot(_.getName == "class")
.filterNot(_.getName == "declaringClass")
.filter(_.getReadMethod != null)
}
private def getJavaBeanReadableAndWritableProperties(
beanClass: Class[_]): Array[PropertyDescriptor] = {
getJavaBeanReadableProperties(beanClass).filter(_.getWriteMethod != null)
}
private def elementType(typeToken: TypeToken[_]): TypeToken[_] = {
val typeToken2 = typeToken.asInstanceOf[TypeToken[_ <: JIterable[_]]]
val iterableSuperType = typeToken2.getSupertype(classOf[JIterable[_]])
val iteratorType = iterableSuperType.resolveType(iteratorReturnType)
iteratorType.resolveType(nextReturnType)
}
private def mapKeyValueType(typeToken: TypeToken[_]): (TypeToken[_], TypeToken[_]) = {
val typeToken2 = typeToken.asInstanceOf[TypeToken[_ <: JMap[_, _]]]
val mapSuperType = typeToken2.getSupertype(classOf[JMap[_, _]])
val keyType = elementType(mapSuperType.resolveType(keySetReturnType))
val valueType = elementType(mapSuperType.resolveType(valuesReturnType))
keyType -> valueType
}
/**
* Returns the Spark SQL DataType for a given java class. Where this is not an exact mapping
* to a native type, an ObjectType is returned.
*
* Unlike `inferDataType`, this function doesn't do any massaging of types into the Spark SQL type
* system. As a result, ObjectType will be returned for things like boxed Integers.
*/
private def inferExternalType(cls: Class[_]): DataType = cls match {
case c if c == java.lang.Boolean.TYPE => BooleanType
case c if c == java.lang.Byte.TYPE => ByteType
case c if c == java.lang.Short.TYPE => ShortType
case c if c == java.lang.Integer.TYPE => IntegerType
case c if c == java.lang.Long.TYPE => LongType
case c if c == java.lang.Float.TYPE => FloatType
case c if c == java.lang.Double.TYPE => DoubleType
case c if c == classOf[Array[Byte]] => BinaryType
case _ => ObjectType(cls)
}
/**
* Returns an expression that can be used to deserialize an internal row to an object of java bean
* `T` with a compatible schema. Fields of the row will be extracted using UnresolvedAttributes
* of the same name as the constructor arguments. Nested classes will have their fields accessed
* using UnresolvedExtractValue.
*/
def deserializerFor(beanClass: Class[_]): Expression = {
deserializerFor(TypeToken.of(beanClass), None)
}
private def deserializerFor(typeToken: TypeToken[_], path: Option[Expression]): Expression = {
/** Returns the current path with a sub-field extracted. */
def addToPath(part: String): Expression = path
.map(p => UnresolvedExtractValue(p, expressions.Literal(part)))
.getOrElse(UnresolvedAttribute(part))
/** Returns the current path or `GetColumnByOrdinal`. */
def getPath: Expression = path.getOrElse(GetColumnByOrdinal(0, inferDataType(typeToken)._1))
typeToken.getRawType match {
case c if !inferExternalType(c).isInstanceOf[ObjectType] => getPath
case c if c == classOf[java.lang.Short] ||
c == classOf[java.lang.Integer] ||
c == classOf[java.lang.Long] ||
c == classOf[java.lang.Double] ||
c == classOf[java.lang.Float] ||
c == classOf[java.lang.Byte] ||
c == classOf[java.lang.Boolean] =>
StaticInvoke(
c,
ObjectType(c),
"valueOf",
getPath :: Nil,
returnNullable = false)
case c if c == classOf[java.sql.Date] =>
StaticInvoke(
DateTimeUtils.getClass,
ObjectType(c),
"toJavaDate",
getPath :: Nil,
returnNullable = false)
case c if c == classOf[java.sql.Timestamp] =>
StaticInvoke(
DateTimeUtils.getClass,
ObjectType(c),
"toJavaTimestamp",
getPath :: Nil,
returnNullable = false)
case c if c == classOf[java.lang.String] =>
Invoke(getPath, "toString", ObjectType(classOf[String]))
case c if c == classOf[java.math.BigDecimal] =>
Invoke(getPath, "toJavaBigDecimal", ObjectType(classOf[java.math.BigDecimal]))
case c if c.isArray =>
val elementType = c.getComponentType
val primitiveMethod = elementType match {
case c if c == java.lang.Boolean.TYPE => Some("toBooleanArray")
case c if c == java.lang.Byte.TYPE => Some("toByteArray")
case c if c == java.lang.Short.TYPE => Some("toShortArray")
case c if c == java.lang.Integer.TYPE => Some("toIntArray")
case c if c == java.lang.Long.TYPE => Some("toLongArray")
case c if c == java.lang.Float.TYPE => Some("toFloatArray")
case c if c == java.lang.Double.TYPE => Some("toDoubleArray")
case _ => None
}
primitiveMethod.map { method =>
Invoke(getPath, method, ObjectType(c))
}.getOrElse {
Invoke(
MapObjects(
p => deserializerFor(typeToken.getComponentType, Some(p)),
getPath,
inferDataType(elementType)._1),
"array",
ObjectType(c))
}
case c if listType.isAssignableFrom(typeToken) =>
val et = elementType(typeToken)
MapObjects(
p => deserializerFor(et, Some(p)),
getPath,
inferDataType(et)._1,
customCollectionCls = Some(c))
case _ if mapType.isAssignableFrom(typeToken) =>
val (keyType, valueType) = mapKeyValueType(typeToken)
val keyDataType = inferDataType(keyType)._1
val valueDataType = inferDataType(valueType)._1
val keyData =
Invoke(
MapObjects(
p => deserializerFor(keyType, Some(p)),
Invoke(getPath, "keyArray", ArrayType(keyDataType)),
keyDataType),
"array",
ObjectType(classOf[Array[Any]]))
val valueData =
Invoke(
MapObjects(
p => deserializerFor(valueType, Some(p)),
Invoke(getPath, "valueArray", ArrayType(valueDataType)),
valueDataType),
"array",
ObjectType(classOf[Array[Any]]))
StaticInvoke(
ArrayBasedMapData.getClass,
ObjectType(classOf[JMap[_, _]]),
"toJavaMap",
keyData :: valueData :: Nil,
returnNullable = false)
case other if other.isEnum =>
StaticInvoke(JavaTypeInference.getClass, ObjectType(other), "deserializeEnumName",
expressions.Literal.create(other.getEnumConstants.apply(0), ObjectType(other))
:: getPath :: Nil)
case other =>
val properties = getJavaBeanReadableAndWritableProperties(other)
val setters = properties.map { p =>
val fieldName = p.getName
val fieldType = typeToken.method(p.getReadMethod).getReturnType
val (_, nullable) = inferDataType(fieldType)
val constructor = deserializerFor(fieldType, Some(addToPath(fieldName)))
val setter = if (nullable) {
constructor
} else {
AssertNotNull(constructor, Seq("currently no type path record in java"))
}
p.getWriteMethod.getName -> setter
}.toMap
val newInstance = NewInstance(other, Nil, ObjectType(other), propagateNull = false)
val result = InitializeJavaBean(newInstance, setters)
if (path.nonEmpty) {
expressions.If(
IsNull(getPath),
expressions.Literal.create(null, ObjectType(other)),
result
)
} else {
result
}
}
}
/**
* Returns an expression for serializing an object of the given type to an internal row.
*/
def serializerFor(beanClass: Class[_]): CreateNamedStruct = {
val inputObject = BoundReference(0, ObjectType(beanClass), nullable = true)
val nullSafeInput = AssertNotNull(inputObject, Seq("top level input bean"))
serializerFor(nullSafeInput, TypeToken.of(beanClass)) match {
case expressions.If(_, _, s: CreateNamedStruct) => s
case other => CreateNamedStruct(expressions.Literal("value") :: other :: Nil)
}
}
/** Returns a mapping from enum value to int for given enum type */
def enumSerializer[T <: Enum[T]](enum: Class[T]): T => UTF8String = {
assert(enum.isEnum)
inputObject: T =>
UTF8String.fromString(inputObject.name())
}
/** Returns value index for given enum type and value */
def serializeEnumName[T <: Enum[T]](enum: UTF8String, inputObject: T): UTF8String = {
enumSerializer(Utils.classForName(enum.toString).asInstanceOf[Class[T]])(inputObject)
}
/** Returns a mapping from int to enum value for given enum type */
def enumDeserializer[T <: Enum[T]](enum: Class[T]): InternalRow => T = {
assert(enum.isEnum)
value: InternalRow =>
Enum.valueOf(enum, value.getUTF8String(0).toString)
}
/** Returns enum value for given enum type and value index */
def deserializeEnumName[T <: Enum[T]](typeDummy: T, inputObject: InternalRow): T = {
enumDeserializer(typeDummy.getClass.asInstanceOf[Class[T]])(inputObject)
}
private def serializerFor(inputObject: Expression, typeToken: TypeToken[_]): Expression = {
def toCatalystArray(input: Expression, elementType: TypeToken[_]): Expression = {
val (dataType, nullable) = inferDataType(elementType)
if (ScalaReflection.isNativeType(dataType)) {
NewInstance(
classOf[GenericArrayData],
input :: Nil,
dataType = ArrayType(dataType, nullable))
} else {
MapObjects(serializerFor(_, elementType), input, ObjectType(elementType.getRawType))
}
}
if (!inputObject.dataType.isInstanceOf[ObjectType]) {
inputObject
} else {
typeToken.getRawType match {
case c if c == classOf[String] =>
StaticInvoke(
classOf[UTF8String],
StringType,
"fromString",
inputObject :: Nil,
returnNullable = false)
case c if c == classOf[java.sql.Timestamp] =>
StaticInvoke(
DateTimeUtils.getClass,
TimestampType,
"fromJavaTimestamp",
inputObject :: Nil,
returnNullable = false)
case c if c == classOf[java.sql.Date] =>
StaticInvoke(
DateTimeUtils.getClass,
DateType,
"fromJavaDate",
inputObject :: Nil,
returnNullable = false)
case c if c == classOf[java.math.BigDecimal] =>
StaticInvoke(
Decimal.getClass,
DecimalType.SYSTEM_DEFAULT,
"apply",
inputObject :: Nil,
returnNullable = false)
case c if c == classOf[java.lang.Boolean] =>
Invoke(inputObject, "booleanValue", BooleanType)
case c if c == classOf[java.lang.Byte] =>
Invoke(inputObject, "byteValue", ByteType)
case c if c == classOf[java.lang.Short] =>
Invoke(inputObject, "shortValue", ShortType)
case c if c == classOf[java.lang.Integer] =>
Invoke(inputObject, "intValue", IntegerType)
case c if c == classOf[java.lang.Long] =>
Invoke(inputObject, "longValue", LongType)
case c if c == classOf[java.lang.Float] =>
Invoke(inputObject, "floatValue", FloatType)
case c if c == classOf[java.lang.Double] =>
Invoke(inputObject, "doubleValue", DoubleType)
case _ if typeToken.isArray =>
toCatalystArray(inputObject, typeToken.getComponentType)
case _ if listType.isAssignableFrom(typeToken) =>
toCatalystArray(inputObject, elementType(typeToken))
case _ if mapType.isAssignableFrom(typeToken) =>
val (keyType, valueType) = mapKeyValueType(typeToken)
ExternalMapToCatalyst(
inputObject,
ObjectType(keyType.getRawType),
serializerFor(_, keyType),
keyNullable = true,
ObjectType(valueType.getRawType),
serializerFor(_, valueType),
valueNullable = true
)
case other if other.isEnum =>
CreateNamedStruct(expressions.Literal("enum") ::
StaticInvoke(JavaTypeInference.getClass, StringType, "serializeEnumName",
expressions.Literal.create(other.getName, StringType) :: inputObject :: Nil) :: Nil)
case other =>
val properties = getJavaBeanReadableAndWritableProperties(other)
val nonNullOutput = CreateNamedStruct(properties.flatMap { p =>
val fieldName = p.getName
val fieldType = typeToken.method(p.getReadMethod).getReturnType
val fieldValue = Invoke(
inputObject,
p.getReadMethod.getName,
inferExternalType(fieldType.getRawType))
expressions.Literal(fieldName) :: serializerFor(fieldValue, fieldType) :: Nil
})
val nullOutput = expressions.Literal.create(null, nonNullOutput.dataType)
expressions.If(IsNull(inputObject), nullOutput, nonNullOutput)
}
}
}
}
| stanzhai/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala | Scala | apache-2.0 | 20,110 |
package smc.nbt
import java.io.{DataInput, DataOutput}
import smc.binary._
import scala.reflect.ClassTag
import scala.reflect.runtime.universe._
/** Implements TAG_LIST specified in Named Binary Tag v19132.
*/
trait TagArrays {
this: Tags =>
/** Sticks an array with a [[TypeDef]] of the data type
* to recover the data type without exception management.
*/
case class TagArray[A](arr: Array[A])(implicit val tdef: TypeDef[A]) {
/** Extracts the type-erased array if the specified type matches, otherwise [[None]].
*
* @throws NullPointerException if the specified [[TypeDef]] does not exist.
*/
final def castSafe[B: TypeDef]: Option[Array[B]] = {
val tA = tdef.typ.tpe
val tB = implicitly[TypeDef[B]].typ.tpe
if (tA <:< tB) Some(castUnsafe[B]) else None
}
/** Extracts the type-erased array by casting the tag without type-checking.
*
* @throws ClassCastException if the specified type is not matched.
* @throws NullPointerException if the specified [[TypeDef]] does not exist.
*/
final def castUnsafe[B: TypeDef]: Array[B] = {
if (implicitly[TypeDef[B]] == null)
throw new NullPointerException
else arr.asInstanceOf[Array[B]]
}
}
/** Helps extract an [[Array]] from a [[Typed]] instance.
*
* It takes two processes to natively extract a typed array from a type-erased tag:
* extract a type-erased tagged array from the type-erased tag,
* and then extract a typed array from the type-erased tagged array.
* This class's methods make it by one function call.
*/
implicit class TagArrayOps(tag: Typed[_])(implicit tdef: TypeDef[TagArray[_]]) {
/** Extracts the type-erased array by casting the tag without type-checking.
*
* @throws ClassCastException if the specified type is not matched.
* @throws NullPointerException if the specified [[TypeDef]] does not exist.
*/
def castArrayUnsafe[A: TypeDef]: Array[A] = {
tag.castUnsafe(tdef).castUnsafe[A]
}
/** Extracts the type-erased array if the specified type matches, otherwise [[None]].
*
* @throws NullPointerException if the specified [[TypeDef]] does not exist.
*/
def castArraySafe[A: TypeDef]: Option[Array[A]] = {
tag.castSafe(tdef).flatMap(_.castSafe[A])
}
}
/** Defines (de)serialization of [[TagArray]].
*/
protected object GetPutTagArray extends GetPutAbs[TagArray[_]] {
override def get(i: DataInput) = _read(i, i.reads(GetPutTypeDef)) // IntelliJ error
// get put principle
private def _read[A](in: DataInput, tdef: TypeDef[A]): TagArray[A] = {
val size = in.readInt()
def data = in.reads(tdef.getput)
val ctag = toCtag(tdef.typ)
TagArray(Array.fill(size)(data)(ctag))(tdef)
}
private def toCtag[A](ttag: TypeTag[A]): ClassTag[A] = {
ClassTag[A](ttag.mirror.runtimeClass(ttag.tpe))
}
override def put(o: DataOutput, s: TagArray[_]) = _write(o, s)
// get put principle
private def _write[A](out: DataOutput, tarr: TagArray[A]): Unit = {
out.writes(tarr.tdef)(GetPutTypeDef) // IntelliJ error
out.writeInt(tarr.arr.length)
tarr.arr.foreach(out.writes(_)(tarr.tdef.getput))
}
}
}
| ryo0ka/smc | src/main/scala/smc/nbt/TagArrays.scala | Scala | gpl-2.0 | 3,288 |
package com.stackmob.customcode.dev
package test
package server
package sdk
import org.specs2.Specification
import com.stackmob.customcode.dev.server.sdk.cache.CachingServiceImpl
class CachingServiceImplSpecs extends Specification { def is =
"CachingServiceImpl".title ^ end ^
"CachingService is responsible for fast storage and retrieval of key/value data" ^ end ^
"round trips should work" ! roundTrip ^ end ^
end
private val svc = new CachingServiceImpl(throwableFreq0, throwableFreq0, throwableFreq0, throwableFreq0)
private val key = "testKey"
private val value = "testValue".getBytesUTF8
private val ttl = 1L
private def roundTrip = {
svc.setBytes(key, value, ttl)
svc.getBytes(key) must beEqualTo(value)
}
}
| matthewfarwell/stackmob-customcode-dev | src/test/scala/com/stackmob/customcode/dev/test/server/sdk/CachingServiceImplSpecs.scala | Scala | apache-2.0 | 966 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.nlp.lexicon
import java.net.URL
import java.nio.file.{Paths, Files, Path}
import cc.factorie.app.nlp.lexicon.{iesl => Iesl, uscensus => Uscensus, wikipedia => Wikipedia, ssdi => Ssdi, mandarin => Mandarin}
import cc.factorie.app.strings.StringSegmenter
import cc.factorie.app.nlp.lemma.{Lemmatizer,LowercaseLemmatizer}
import java.io.{InputStream, File}
import cc.factorie.util.{ModelProvider, ClasspathURL}
import scala.reflect.{ClassTag, classTag}
import scala.language.implicitConversions
import scala.util.Try
trait LexiconsProvider {
def lexiconRoot:String
implicit def provide[L : ClassTag]:ModelProvider[L]
}
object LexiconsProvider {
import cc.factorie.util.ISAble._
private def lexiconNamePieces[L:ClassTag]:Seq[String] = {
val arr = classTag[L].runtimeClass.getName.split("""\\.""").map(_.stripSuffix("$"))
val fileName = arr.last.zipWithIndex.flatMap {
case (u, 0) => u.toLower.toString
case (u, _) if u.isUpper => "-" + u.toLower
case (l, _) => l.toString
}.mkString("") + ".txt"
arr.init.map(_.toLowerCase) ++ Seq(fileName)
}
private def fullLexiconName[L:ClassTag] = lexiconNamePieces[L].mkString("/")
private def shortLexiconName[L:ClassTag] = lexiconNamePieces[L].drop(5).mkString("/")
def fromFile(f:File, useFullPath:Boolean = false):LexiconsProvider = new LexiconsProvider {
lazy val lexiconRoot = f.getAbsolutePath
override implicit def provide[L : ClassTag]: ModelProvider[L] = new ModelProvider[L] {
private val path = f.toPath.resolve(if(useFullPath) fullLexiconName[L] else shortLexiconName[L])
val coordinates = path.toString
val provide:InputStream = buffered(path)
}
}
def fromUrl(u:URL, useFullPath:Boolean = false):LexiconsProvider = new LexiconsProvider {
lazy val lexiconRoot = u.toString
implicit def provide[L:ClassTag]: ModelProvider[L] = new ModelProvider[L] {
private val modelUrl = new URL(u, if(useFullPath) fullLexiconName[L] else shortLexiconName[L])
val provide: InputStream = buffered(modelUrl)
val coordinates: String = modelUrl.toString
}
}
implicit def providePath(p:Path):LexiconsProvider = fromFile(p.toFile, false)
implicit def provideFile(f:File):LexiconsProvider = fromFile(f,false)
implicit def provideURL(u:URL):LexiconsProvider = fromUrl(u, false)
def fromString(s:String, useFullPath:Boolean=false):LexiconsProvider = s match {
case cp if cp.toLowerCase == "classpath" => classpath(useFullPath)
case urlS if Try(new URL(urlS)).isSuccess => fromUrl(new URL(urlS), useFullPath)
case p => fromFile(new File(p), useFullPath)
}
@deprecated("This exists to preserve legacy functionality", "10/27/15")
def classpath(useFullPath:Boolean=true):LexiconsProvider = new LexiconsProvider {
def lexiconRoot = "classpath"
implicit def provide[L: ClassTag]: ModelProvider[L] = new ModelProvider[L] {
private def url = if(useFullPath) ClasspathURL.fromDirectory[Lexicon](shortLexiconName[L]) else this.getClass.getResource("/" + shortLexiconName[L])
def coordinates: String = url.toString
def provide: InputStream = url
}
}
/*
@deprecated("This exists to preserve legacy functionality", "10/05/15")
def classpath:LexiconsProvider = new LexiconsProvider {
//lazy val lexiconRoot = ClasspathURL.fromDirectory[Lexicon]("")
lazy val lexiconRoot = Lexicon.getClass.getResource("")
implicit def provide[L : ClassTag]: ModelProvider[L] = new ModelProvider[L] {
private val url = {
println("root " + lexiconRoot)
println("shortname" + shortLexiconName[L])
new URL(lexiconRoot, shortLexiconName[L])
}
val coordinates: String = url.toString
val provide: InputStream = buffered(url)
}
}
*/
}
trait ProvidedLexicon[L] {
this: MutableLexicon =>
def provider:ModelProvider[L]
synchronized {
this.++=(provider.provide)
}
}
class ProvidedTriePhraseLexicon[L]()(implicit val provider:ModelProvider[L], ct:ClassTag[L]) extends TriePhraseLexicon(ct.runtimeClass.getName) with ProvidedLexicon[L]
class GenericLexicon(name:String, val provider:ModelProvider[GenericLexicon]) extends TriePhraseLexicon(name) with ProvidedLexicon[GenericLexicon]
class StaticLexicons()(implicit lp:LexiconsProvider) {
import lp._
object iesl {
object Continents extends Iesl.Continents()(lp.provide[Iesl.Continents])
object Country extends Iesl.Country()(lp.provide[Iesl.Country])
object City extends Iesl.City()(lp.provide[Iesl.City])
object UsState extends Iesl.UsState()(lp.provide[Iesl.UsState])
object PlaceSuffix extends Iesl.PlaceSuffix()(lp.provide[Iesl.PlaceSuffix])
object JobTitle extends Iesl.JobTitle()(lp.provide[Iesl.JobTitle])
object Money extends Iesl.Money()(lp.provide[Iesl.Money])
object Company extends Iesl.Company()(lp.provide[Iesl.Company])
object OrgSuffix extends Iesl.OrgSuffix()(lp.provide[Iesl.OrgSuffix])
object Month extends Iesl.Month()(lp.provide[Iesl.Month])
object Day extends Iesl.Day()(lp.provide[Iesl.Day])
object PersonHonorific extends Iesl.PersonHonorific()(lp.provide[Iesl.PersonHonorific])
object PersonFirstHighest extends Iesl.PersonFirstHighest()(lp.provide[Iesl.PersonFirstHighest])
object PersonFirstHigh extends Iesl.PersonFirstHigh()(lp.provide[Iesl.PersonFirstHigh])
object PersonFirstMedium extends Iesl.PersonFirstMedium()(lp.provide[Iesl.PersonFirstMedium])
object PersonLastHighest extends Iesl.PersonLastHighest()(lp.provide[Iesl.PersonLastHighest])
object PersonLastHigh extends Iesl.PersonLastHigh()(lp.provide[Iesl.PersonLastHigh])
object PersonLastMedium extends Iesl.PersonLastMedium()(lp.provide[Iesl.PersonLastMedium])
object Say extends Iesl.Say()(lp.provide[Iesl.Say])
object Demonym extends Iesl.Demonym()(lp.provide[Iesl.Demonym])
object DemonymMap extends Iesl.DemonymMap()(lp.provide[Iesl.Demonym])
object AllPlaces extends TrieUnionLexicon("places", Continents, Country, City, UsState)
object PersonFirst extends TrieUnionLexicon("person-first", PersonFirstHighest, PersonFirstHigh, PersonFirstMedium)
object PersonLast extends TrieUnionLexicon("person-last", PersonLastHighest, PersonLastHigh, PersonLastMedium)
}
object ssdi {
object PersonFirstHighest extends Ssdi.PersonFirstHighest()(lp.provide[Ssdi.PersonFirstHighest])
object PersonFirstHigh extends Ssdi.PersonFirstHigh()(lp.provide[Ssdi.PersonFirstHigh])
object PersonFirstMedium extends Ssdi.PersonFirstMedium()(lp.provide[Ssdi.PersonFirstMedium])
object PersonLastHighest extends Ssdi.PersonLastHighest()(lp.provide[Ssdi.PersonLastHighest])
object PersonLastHigh extends Ssdi.PersonLastHigh()(lp.provide[Ssdi.PersonLastHigh])
object PersonLastMedium extends Ssdi.PersonLastMedium()(lp.provide[Ssdi.PersonLastMedium])
object PersonFirst extends TrieUnionLexicon("person-first", PersonFirstHighest, PersonFirstHigh, PersonFirstMedium)
object PersonLast extends TrieUnionLexicon("person-last", PersonLastHighest, PersonLastHigh, PersonLastMedium)
}
object uscensus {
object PersonFirstFemale extends Uscensus.PersonFirstFemale()(lp.provide[Uscensus.PersonFirstFemale])
object PersonFirstMale extends Uscensus.PersonFirstMale()(lp.provide[Uscensus.PersonFirstMale])
object PersonLast extends Uscensus.PersonLast()(lp.provide[Uscensus.PersonLast])
}
object wikipedia {
object Battle extends Wikipedia.Battle()(lp.provide[Wikipedia.Battle])
object BattleRedirect extends Wikipedia.BattleRedirect()(lp.provide[Wikipedia.BattleRedirect])
object BattleAndRedirect extends TrieUnionLexicon("battle-and-redirect", Battle, BattleRedirect)
object BattleDisambiguation extends Wikipedia.BattleDisambiguation()(lp.provide[Wikipedia.BattleDisambiguation])
object Book extends Wikipedia.Book()(lp.provide[Wikipedia.Book])
object BookRedirect extends Wikipedia.BookRedirect()(lp.provide[Wikipedia.BookRedirect])
object BookAndRedirect extends TrieUnionLexicon("book-and-redirect", Book, BookRedirect)
object BookDisambiguation extends Wikipedia.BookDisambiguation()(lp.provide[Wikipedia.BookDisambiguation])
object Business extends Wikipedia.Business()(lp.provide[Wikipedia.Business])
object BusinessRedirect extends Wikipedia.BusinessRedirect()(lp.provide[Wikipedia.BusinessRedirect])
object BusinessAndRedirect extends TrieUnionLexicon("business-and-redirect", Business, BusinessRedirect)
object BusinessDisambiguation extends Wikipedia.BusinessDisambiguation()(lp.provide[Wikipedia.BusinessDisambiguation])
object Competition extends Wikipedia.Competition()(lp.provide[Wikipedia.Competition])
object CompetitionRedirect extends Wikipedia.CompetitionRedirect()(lp.provide[Wikipedia.CompetitionRedirect])
object CompetitionAndRedirect extends TrieUnionLexicon("competition-and-redirect", Competition, CompetitionRedirect)
object CompetitionDisambiguation extends Wikipedia.CompetitionDisambiguation()(lp.provide[Wikipedia.CompetitionDisambiguation])
object Event extends Wikipedia.Event()(lp.provide[Wikipedia.Event])
object EventRedirect extends Wikipedia.EventRedirect()(lp.provide[Wikipedia.EventRedirect])
object EventAndRedirect extends TrieUnionLexicon("event-and-redirect", Event, EventRedirect)
object EventDisambiguation extends Wikipedia.EventDisambiguation()(lp.provide[Wikipedia.EventDisambiguation])
object Film extends Wikipedia.Film()(lp.provide[Wikipedia.Film])
object FilmRedirect extends Wikipedia.FilmRedirect()(lp.provide[Wikipedia.FilmRedirect])
object FilmAndRedirect extends TrieUnionLexicon("film-and-redirect", Film, FilmRedirect)
object FilmDisambiguation extends Wikipedia.FilmDisambiguation()(lp.provide[Wikipedia.FilmDisambiguation])
object Location extends Wikipedia.Location()(lp.provide[Wikipedia.Location])
object LocationRedirect extends Wikipedia.LocationRedirect()(lp.provide[Wikipedia.LocationRedirect])
object LocationAndRedirect extends TrieUnionLexicon("location-and-redirect", Location, LocationRedirect)
object LocationDisambiguation extends Wikipedia.LocationDisambiguation()(lp.provide[Wikipedia.LocationDisambiguation])
object ManMadeThing extends Wikipedia.ManMadeThing()(lp.provide[Wikipedia.ManMadeThing])
object ManMadeThingRedirect extends Wikipedia.ManMadeThingRedirect()(lp.provide[Wikipedia.ManMadeThingRedirect])
object ManMadeThingAndRedirect extends TrieUnionLexicon("man-made-thing-and-redirect", ManMadeThing, ManMadeThingRedirect)
object ManMadeThingDisambiguation extends Wikipedia.ManMadeThingDisambiguation()(lp.provide[Wikipedia.ManMadeThingDisambiguation])
object Organization extends Wikipedia.Organization()(lp.provide[Wikipedia.Organization])
object OrganizationRedirect extends Wikipedia.OrganizationRedirect()(lp.provide[Wikipedia.OrganizationRedirect])
object OrganizationAndRedirect extends TrieUnionLexicon("organization-and-redirect", Organization, OrganizationRedirect)
object OrganizationDisambiguation extends Wikipedia.OrganizationDisambiguation()(lp.provide[Wikipedia.OrganizationDisambiguation])
object Person extends Wikipedia.Person()(lp.provide[Wikipedia.Person])
object PersonRedirect extends Wikipedia.PersonRedirect()(lp.provide[Wikipedia.PersonRedirect])
object PersonAndRedirect extends TrieUnionLexicon("person-and-redirect", Person, PersonRedirect)
object PersonDisambiguation extends Wikipedia.PersonDisambiguation()(lp.provide[Wikipedia.PersonDisambiguation])
object Song extends Wikipedia.Song()(lp.provide[Wikipedia.Song])
object SongRedirect extends Wikipedia.SongRedirect()(lp.provide[Wikipedia.SongRedirect])
object SongAndRedirect extends TrieUnionLexicon("song-and-redirect", Song, SongRedirect)
object SongDisambiguation extends Wikipedia.SongDisambiguation()(lp.provide[Wikipedia.SongDisambiguation])
}
object mandarin {
object SurnamePinyin extends Mandarin.SurnamePinyin()(lp.provide[Mandarin.SurnamePinyin])
object GivenNamePinyin extends Mandarin.GivenNamePinyin()(lp.provide[Mandarin.GivenNamePinyin])
}
object spanish {
object Continents extends Iesl.es.Continents()(lp.provide[Iesl.es.Continents])
object Day extends Iesl.es.Day()(lp.provide[Iesl.es.Day])
object Month extends Iesl.es.Month()(lp.provide[Iesl.es.Month])
object PersonFirst extends Iesl.es.PersonFirst()(lp.provide[Iesl.es.PersonFirst])
object PersonLast extends Iesl.es.PersonLast()(lp.provide[Iesl.es.PersonLast])
object Location extends Iesl.es.Location()(lp.provide[Iesl.es.Location])
object Miscellaneous extends Iesl.es.Miscellaneous()(lp.provide[Iesl.es.Miscellaneous])
object Person extends Iesl.es.Person()(lp.provide[Iesl.es.Person])
object Organization extends Iesl.es.Organization()(lp.provide[Iesl.es.Organization])
object PersonHonorific extends Iesl.es.PersonHonorific()(lp.provide[Iesl.es.PersonHonorific])
object OrgSuffix extends Iesl.es.OrgSuffix()(lp.provide[Iesl.es.OrgSuffix])
object Demonym extends Iesl.es.Demonym()(lp.provide[Iesl.es.Demonym])
object WikiBook extends Wikipedia.es.Book()(lp.provide[Wikipedia.es.Book])
object WikiFilm extends Wikipedia.es.Film()(lp.provide[Wikipedia.es.Film])
object WikiEvent extends Wikipedia.es.Event()(lp.provide[Wikipedia.es.Event])
object WikiBusiness extends Wikipedia.es.Business()(lp.provide[Wikipedia.es.Business])
object WikiLocation extends Wikipedia.es.Location()(lp.provide[Wikipedia.es.Location])
object WikiLocationRedirect extends Wikipedia.es.LocationRedirect()(lp.provide[Wikipedia.es.LocationRedirect])
object WikiLocationAndRedirect extends TrieUnionLexicon("es-location-and-redirect", WikiLocation, WikiLocationRedirect)
object WikiPerson extends Wikipedia.es.Person()(lp.provide[Wikipedia.es.Person])
object WikiPersonRedirect extends Wikipedia.es.PersonRedirect()(lp.provide[Wikipedia.es.PersonRedirect])
object WikiPersonAndRedirect extends TrieUnionLexicon("es-person-and-redirect", WikiPerson, WikiPersonRedirect)
object WikiOrganization extends Wikipedia.es.Organization()(lp.provide[Wikipedia.es.Organization])
object WikiOrganizationRedirect extends Wikipedia.es.OrganizationRedirect()(lp.provide[Wikipedia.es.OrganizationRedirect])
object WikiOrganizationAndRedirect extends TrieUnionLexicon("es-organization-and-redirect", WikiOrganization, WikiOrganizationRedirect)
}
}
| strubell/factorie | src/main/scala/cc/factorie/app/nlp/lexicon/StaticLexicons.scala | Scala | apache-2.0 | 15,358 |
package com.yourtion.TinyWeb
import scala.util.Random
/**
* Created by Yourtion on 9/5/16.
*/
object Example extends App {
def greetingViewRenderer(model: Map[String, List[String]]) =
"<h1>Friendly Greetings:</h1>%s".format(
model
getOrElse("greetings", List[String]())
map(renderGreeting)
mkString ", ")
private def renderGreeting(greeting: String) =
"<h2>%s</h2>".format(greeting)
def greetingView = new FunctionView(greetingViewRenderer)
def handleGreetingRequest(request: HttpRequest) =
Map("greetings" -> request.body.split(",").toList.map(makeGreeting))
private def random = new Random()
private def greetings = Vector("Hello", "Greetings", "Salutations", "Hola")
private def makeGreeting(name: String) =
"%s, %s".format(greetings(random.nextInt(greetings.size)), name)
def greetingController = new FunctionController(greetingView, handleGreetingRequest)
private def loggingFilter(request: HttpRequest) = {
println("In Logging Filter - request for path: %s".format(request.path))
request
}
def tinyweb = new TinyWeb(
Map("/greeting" -> greetingController),
List(loggingFilter))
def testHttpRequest = HttpRequest(body = "Mike,Joe,John,Steve", path = "/greeting")
override def main(args: Array[String]): Unit = {
val testResponse = tinyweb.handleRequest(testHttpRequest)
if (testResponse.isDefined) {
println("responseCode: " + testResponse.get.responseCode)
println("responseBody: ")
println(testResponse.get.body)
}
}
}
| yourtion/LearningFunctionalProgramming | Scala/src/com/yourtion/TinyWeb/Example.scala | Scala | mit | 1,562 |
package net.cladophora.srt
case class SimpleShiftRequest (subs: Subtitles, ms: Int)
| jasonmar/scala-subtitle-utility | src/main/scala/net/cladophora/srt/SimpleShiftRequest.scala | Scala | apache-2.0 | 85 |
/**
* User: Nuno Alves
* Date: 23-07-2013
* Time: 21:56
*/
/**
* Find the last element of a list.
* Example:
* scala> last(List(1, 1, 2, 3, 5, 8))
* res0: Int = 8
*/
object P01 {
def last[A](list: List[A]): A = list.last
}
| nunodpalves/playground | S-99/src/P01.scala | Scala | gpl-2.0 | 237 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.optim
import org.apache.spark.Logging
import org.apache.spark.mllib.linalg._
import org.apache.spark.rdd.RDD
/**
* Model fitted by [[WeightedLeastSquares]].
* @param coefficients model coefficients
* @param intercept model intercept
*/
private[ml] class WeightedLeastSquaresModel(
val coefficients: DenseVector,
val intercept: Double) extends Serializable
/**
* Weighted least squares solver via normal equation.
* Given weighted observations (w,,i,,, a,,i,,, b,,i,,), we use the following weighted least squares
* formulation:
*
* min,,x,z,, 1/2 sum,,i,, w,,i,, (a,,i,,^T^ x + z - b,,i,,)^2^ / sum,,i,, w_i
* + 1/2 lambda / delta sum,,j,, (sigma,,j,, x,,j,,)^2^,
*
* where lambda is the regularization parameter, and delta and sigma,,j,, are controlled by
* [[standardizeLabel]] and [[standardizeFeatures]], respectively.
*
* Set [[regParam]] to 0.0 and turn off both [[standardizeFeatures]] and [[standardizeLabel]] to
* match R's `lm`.
* Turn on [[standardizeLabel]] to match R's `glmnet`.
*
* @param fitIntercept whether to fit intercept. If false, z is 0.0.
* @param regParam L2 regularization parameter (lambda)
* @param standardizeFeatures whether to standardize features. If true, sigma_,,j,, is the
* population standard deviation of the j-th column of A. Otherwise,
* sigma,,j,, is 1.0.
* @param standardizeLabel whether to standardize label. If true, delta is the population standard
* deviation of the label column b. Otherwise, delta is 1.0.
*/
private[ml] class WeightedLeastSquares(
val fitIntercept: Boolean,
val regParam: Double,
val standardizeFeatures: Boolean,
val standardizeLabel: Boolean) extends Logging with Serializable {
import WeightedLeastSquares._
require(regParam >= 0.0, s"regParam cannot be negative: $regParam")
if (regParam == 0.0) {
logWarning("regParam is zero, which might cause numerical instability and overfitting.")
}
/**
* Creates a [[WeightedLeastSquaresModel]] from an RDD of [[Instance]]s.
*/
def fit(instances: RDD[Instance]): WeightedLeastSquaresModel = {
val summary = instances.treeAggregate(new Aggregator)(_.add(_), _.merge(_))
summary.validate()
logInfo(s"Number of instances: ${summary.count}.")
val triK = summary.triK
val bBar = summary.bBar
val bStd = summary.bStd
val aBar = summary.aBar
val aVar = summary.aVar
val abBar = summary.abBar
val aaBar = summary.aaBar
val aaValues = aaBar.values
if (fitIntercept) {
// shift centers
// A^T A - aBar aBar^T
BLAS.spr(-1.0, aBar, aaValues)
// A^T b - bBar aBar
BLAS.axpy(-bBar, aBar, abBar)
}
// add regularization to diagonals
var i = 0
var j = 2
while (i < triK) {
var lambda = regParam
if (standardizeFeatures) {
lambda *= aVar(j - 2)
}
if (standardizeLabel) {
// TODO: handle the case when bStd = 0
lambda /= bStd
}
aaValues(i) += lambda
i += j
j += 1
}
val x = new DenseVector(CholeskyDecomposition.solve(aaBar.values, abBar.values))
// compute intercept
val intercept = if (fitIntercept) {
bBar - BLAS.dot(aBar, x)
} else {
0.0
}
new WeightedLeastSquaresModel(x, intercept)
}
}
private[ml] object WeightedLeastSquares {
/**
* Case class for weighted observations.
* @param w weight, must be positive
* @param a features
* @param b label
*/
case class Instance(w: Double, a: Vector, b: Double) {
require(w >= 0.0, s"Weight cannot be negative: $w.")
}
/**
* Aggregator to provide necessary summary statistics for solving [[WeightedLeastSquares]].
*/
// TODO: consolidate aggregates for summary statistics
private class Aggregator extends Serializable {
var initialized: Boolean = false
var k: Int = _
var count: Long = _
var triK: Int = _
private var wSum: Double = _
private var wwSum: Double = _
private var bSum: Double = _
private var bbSum: Double = _
private var aSum: DenseVector = _
private var abSum: DenseVector = _
private var aaSum: DenseVector = _
private def init(k: Int): Unit = {
require(k <= 4096, "In order to take the normal equation approach efficiently, " +
s"we set the max number of features to 4096 but got $k.")
this.k = k
triK = k * (k + 1) / 2
count = 0L
wSum = 0.0
wwSum = 0.0
bSum = 0.0
bbSum = 0.0
aSum = new DenseVector(Array.ofDim(k))
abSum = new DenseVector(Array.ofDim(k))
aaSum = new DenseVector(Array.ofDim(triK))
initialized = true
}
/**
* Adds an instance.
*/
def add(instance: Instance): this.type = {
val Instance(w, a, b) = instance
val ak = a.size
if (!initialized) {
init(ak)
}
assert(ak == k, s"Dimension mismatch. Expect vectors of size $k but got $ak.")
count += 1L
wSum += w
wwSum += w * w
bSum += w * b
bbSum += w * b * b
BLAS.axpy(w, a, aSum)
BLAS.axpy(w * b, a, abSum)
BLAS.spr(w, a, aaSum)
this
}
/**
* Merges another [[Aggregator]].
*/
def merge(other: Aggregator): this.type = {
if (!other.initialized) {
this
} else {
if (!initialized) {
init(other.k)
}
assert(k == other.k, s"dimension mismatch: this.k = $k but other.k = ${other.k}")
count += other.count
wSum += other.wSum
wwSum += other.wwSum
bSum += other.bSum
bbSum += other.bbSum
BLAS.axpy(1.0, other.aSum, aSum)
BLAS.axpy(1.0, other.abSum, abSum)
BLAS.axpy(1.0, other.aaSum, aaSum)
this
}
}
/**
* Validates that we have seen observations.
*/
def validate(): Unit = {
assert(initialized, "Training dataset is empty.")
assert(wSum > 0.0, "Sum of weights cannot be zero.")
}
/**
* Weighted mean of features.
*/
def aBar: DenseVector = {
val output = aSum.copy
BLAS.scal(1.0 / wSum, output)
output
}
/**
* Weighted mean of labels.
*/
def bBar: Double = bSum / wSum
/**
* Weighted population standard deviation of labels.
*/
def bStd: Double = math.sqrt(bbSum / wSum - bBar * bBar)
/**
* Weighted mean of (label * features).
*/
def abBar: DenseVector = {
val output = abSum.copy
BLAS.scal(1.0 / wSum, output)
output
}
/**
* Weighted mean of (features * features^T^).
*/
def aaBar: DenseVector = {
val output = aaSum.copy
BLAS.scal(1.0 / wSum, output)
output
}
/**
* Weighted population variance of features.
*/
def aVar: DenseVector = {
val variance = Array.ofDim[Double](k)
var i = 0
var j = 2
val aaValues = aaSum.values
while (i < triK) {
val l = j - 2
val aw = aSum(l) / wSum
variance(l) = aaValues(i) / wSum - aw * aw
i += j
j += 1
}
new DenseVector(variance)
}
}
}
| pronix/spark | mllib/src/main/scala/org/apache/spark/ml/optim/WeightedLeastSquares.scala | Scala | apache-2.0 | 8,033 |
package views.html
import play.twirl.api._
import play.twirl.api.TemplateMagic._
import play.api.templates.PlayMagic._
import models._
import controllers._
import java.lang._
import java.util._
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import play.api.i18n._
import play.core.j.PlayMagicForJava._
import play.mvc._
import play.data._
import play.api.data.Field
import play.mvc.Http.Context.Implicit._
import views.html._
/**/
object PythonPage extends BaseScalaTemplate[play.twirl.api.HtmlFormat.Appendable,Format[play.twirl.api.HtmlFormat.Appendable]](play.twirl.api.HtmlFormat) with play.twirl.api.Template0[play.twirl.api.HtmlFormat.Appendable] {
/**/
def apply():play.twirl.api.HtmlFormat.Appendable = {
_display_ {
Seq[Any](_display_(/*1.2*/main("Python Programming Language")/*1.37*/{_display_(Seq[Any](format.raw/*1.38*/("""
"""),format.raw/*2.3*/("""<div class="row">
<div class="col-sm-8" style="padding-top: 7%;">
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi varius, libero molestie accumsan tempor, tellus ipsum pretium nulla, sit amet faucibus dolor nunc at velit.
Maecenas ornare ut magna id pretium. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent a nisi ante. Proin aliquet erat nisi. Aenean mauris arcu, commodo eget erat sed, lobortis rhoncus tortor.
Nulla dictum arcu sit amet facilisis varius. Donec venenatis maximus velit rhoncus euismod. Maecenas blandit facilisis sem, nec lobortis nunc dignissim nec.
</p>
<br>
<p>
Sed elementum varius nibh fermentum iaculis. Ut sit amet quam a tortor tempor ullamcorper et in nisl. Aenean sodales est eros, id dictum mi bibendum ac. Integer non ex a elit tempus interdum a eget massa. Etiam sagittis diam aliquet facilisis aliquam. Nunc a malesuada leo.
Maecenas eu est iaculis, luctus mi in, fringilla nunc. Sed tincidunt vitae lacus quis ornare. Sed convallis tortor vel tellus tincidunt, vel pulvinar tellus elementum. Nunc sit amet libero at nibh sollicitudin vehicula. Nam accumsan volutpat magna non ornare. Maecenas mattis, leo id pretium posuere, orci lacus tristique diam, id fringilla mauris urna non turpis. Morbi tincidunt augue id scelerisque sagittis.
Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
</p>
<br>
<p>
Aliquam ante ipsum, malesuada et risus quis, ultrices euismod tellus. Nam vel aliquet mauris, aliquet aliquet mauris. Ut eget diam eget justo lacinia rhoncus id eget ex.
Sed dapibus eros sed tempor convallis. Praesent in turpis sed felis luctus eleifend eget egestas purus. Aenean nec libero et sapien rutrum ultrices eu vel mauris. Suspendisse a bibendum eros.
Vivamus sed purus enim. Morbi varius porta sem in pretium. Quisque non porta nibh. Cras ac nisi eros. Fusce nibh dolor, mollis in sollicitudin finibus, ultricies quis tortor.
Vivamus sollicitudin, leo id consectetur scelerisque, justo magna sollicitudin turpis, vel tristique justo lectus sed risus. Integer scelerisque orci nec ex euismod, a cursus elit consequat.
</p>
</div>
<div class="col-sm-4"><img class="img-responsive" src=""""),_display_(/*23.61*/routes/*23.67*/.Assets.versioned("img/Python.png")),format.raw/*23.102*/("""" style="padding-top: 30%;" class="img-rounded" alt="Cinque Terre" width="304" height="236"></div>
</div>
""")))}))}
}
def render(): play.twirl.api.HtmlFormat.Appendable = apply()
def f:(() => play.twirl.api.HtmlFormat.Appendable) = () => apply()
def ref: this.type = this
}
/*
-- GENERATED --
DATE: Wed Apr 06 14:20:47 BST 2016
SOURCE: C:/Users/A587853/Documents/GitHub/WebApp/app/views/PythonPage.scala.html
HASH: ad2c7dcce660962024d8cae9fb1a9e720381a62e
MATRIX: 803->1|846->36|884->37|914->41|3368->2468|3383->2474|3440->2509
LINES: 29->1|29->1|29->1|30->2|51->23|51->23|51->23
-- GENERATED --
*/
| cwrobertson/WebApp | target/scala-2.10/twirl/main/views/html/PythonPage.template.scala | Scala | mit | 4,202 |
package docs.home.scaladsl.persistence
import scala.concurrent.Future
import akka.Done
import akka.NotUsed
import akka.persistence.query.Offset
import akka.stream.scaladsl.Flow
import com.lightbend.lagom.scaladsl.persistence.AggregateEventTag
import com.lightbend.lagom.scaladsl.persistence.EventStreamElement
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor.ReadSideHandler
//#my-database
trait MyDatabase {
/**
* Create the tables needed for this read side if not already created.
*/
def createTables(): Future[Done]
/**
* Load the offset of the last event processed.
*/
def loadOffset(tag: AggregateEventTag[BlogEvent]): Future[Offset]
/**
* Handle the post added event.
*/
def handleEvent(event: BlogEvent, offset: Offset): Future[Done]
}
//#my-database
class BlogEventProcessor(myDatabase: MyDatabase) extends ReadSideProcessor[BlogEvent] {
//#tag
override def aggregateTags: Set[AggregateEventTag[BlogEvent]] =
BlogEvent.Tag.allTags
//#tag
//#build-handler
override def buildHandler(): ReadSideProcessor.ReadSideHandler[BlogEvent] = {
new ReadSideHandler[BlogEvent] {
override def globalPrepare(): Future[Done] =
myDatabase.createTables()
override def prepare(tag: AggregateEventTag[BlogEvent]): Future[Offset] =
myDatabase.loadOffset(tag)
override def handle(): Flow[EventStreamElement[BlogEvent], Done, NotUsed] = {
Flow[EventStreamElement[BlogEvent]]
.mapAsync(1) { eventElement =>
myDatabase.handleEvent(eventElement.event, eventElement.offset)
}
}
}
}
//#build-handler
}
| edouardKaiser/lagom | docs/manual/scala/guide/cluster/code/docs/home/scaladsl/persistence/BlogEventProcessor.scala | Scala | apache-2.0 | 1,713 |
package com.markfeeney.circlet
import java.net.URI
import java.util.concurrent.{ArrayBlockingQueue, CountDownLatch, TimeUnit}
import org.eclipse.jetty.websocket.api.WebSocketAdapter
/** WebSocket that queues any string messages received. */
class TestWebSocket(wsUrlString: String) extends WebSocketAdapter {
val uri = new URI(wsUrlString)
private val messages = new ArrayBlockingQueue[String](32)
private val closeLatch = new CountDownLatch(1)
/** Returns the next string message on the socket, waiting a few seconds
* if necessary. */
def nextStringMessage: String = {
// Wait briefly; assumes most tests don't have complicated timing situations.
val result = messages.poll(2, TimeUnit.SECONDS)
if (result == null) {
sys.error(s"$uri - Timed out waiting for message from websocket")
}
result
}
override def onWebSocketText(message: String): Unit = {
messages.put(message)
}
override def onWebSocketClose(statusCode: Int, reason: String): Unit = {
closeLatch.countDown()
}
def awaitClose(): Unit = {
closeLatch.await(2, TimeUnit.SECONDS)
}
}
| overthink/circlet | src/test/scala/com/markfeeney/circlet/TestWebSocket.scala | Scala | mit | 1,117 |
/* StandardDialogs.scala
*
* Jim McBeath, July 11, 2008
*/
package net.jimmc.swing
import net.jimmc.util.StandardQueries
trait StandardDialogs extends BasicDialogs with FileDialogs with StandardQueries
| jimmc/mimprint | src/net/jimmc/swing/StandardDialogs.scala | Scala | gpl-2.0 | 208 |
/**
* Copyright 2013, 2014, 2015 Gianluca Amato <gamato@unich.it>
*
* This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains
* JANDOM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* JANDOM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty ofa
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JANDOM. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.jandom.parsers
import scala.util.parsing.combinator.JavaTokenParsers
import it.unich.jandom.domains.numerical.NumericalDomain
import it.unich.jandom.targets.Environment
/**
* A parser for numerical properties.
* @param env environment to use for parsing the property.
* @author Gianluca Amato <gamato@unich.it>
*
*/
class NumericalPropertyParser(val env: Environment) extends JavaTokenParsers with NumericExpressionParser with NumericConditionParser with VariableParser {
/**
* Parser for properties.
*/
protected def property(domain: NumericalDomain) = numcondition ^^ { _.analyze(domain.top(env.size)) }
/**
* Parsing function.
* @param s string to parse
* @param domain the numerical domain corresponding to the type Property
* @return a ParseResult[dom.Property] with the parsed property, or corresponding error condition
*/
def parseProperty(s: String, domain: NumericalDomain) = parseAll(property(domain),s)
}
| rubino22/JDBeta | core/src/main/scala/it/unich/jandom/parsers/NumericalPropertyParser.scala | Scala | lgpl-3.0 | 1,749 |
class Complex(real: Double, imaginary: Double) {
def re = real
def im = imaginary
} | P1erreGaultier/workspace | GenieLog/src/Complex.scala | Scala | unlicense | 87 |
package services
import akka.actor.ActorSystem
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.kafka.scaladsl.Consumer
import akka.stream.Materializer
import akka.stream.scaladsl.Sink
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization.StringDeserializer
import play.api.Configuration
import scala.concurrent.Future
/**
* ServiceKafkaConsumer class
*/
class ServiceKafkaConsumer(topicNames: Set[String], groupName: String, implicit val mat: Materializer,
actorSystem: ActorSystem, configuration: Configuration, handleEvent: String => Unit) {
val config = configuration.getConfig("kafka")
.getOrElse(throw new Exception("No config element for kafka!"))
.underlying
val consumerSettings = ConsumerSettings(actorSystem, new StringDeserializer, new StringDeserializer)
.withBootstrapServers(config.getString("bootstrap.servers"))
.withGroupId(groupName)
.withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, config.getString("auto.offset.reset"))
Consumer.committableSource(consumerSettings,
Subscriptions.topics(topicNames)).mapAsync(1) {
msg=>
val event = msg.record.value()
handleEvent(event)
Future.successful(msg)
}.mapAsync(1) { msg =>
msg.committableOffset.commitScaladsl()
}.runWith(Sink.ignore)
}
| getArtemUsername/play-and-events | app/services/ServiceKafkaConsumer.scala | Scala | mit | 1,369 |
package algebra.laws
import algebra._
import algebra.lattice._
import cats.kernel.laws._
import org.typelevel.discipline.Laws
import org.scalacheck.{Arbitrary, Prop}
import org.scalacheck.Prop._
import algebra.instances.boolean._
object LatticePartialOrderLaws {
def apply[A: Eq: Arbitrary] = new LatticePartialOrderLaws[A] {
def Equ = Eq[A]
def Arb = implicitly[Arbitrary[A]]
}
}
trait LatticePartialOrderLaws[A] extends Laws {
implicit def Equ: Eq[A]
implicit def Arb: Arbitrary[A]
def joinSemilatticePartialOrder(implicit A: JoinSemilattice[A], P: PartialOrder[A]) = new LatticePartialOrderProperties(
name = "joinSemilatticePartialOrder",
parents = Seq.empty,
"join+lteqv" -> forAll { (x: A, y: A) =>
P.lteqv(x, y) ?== P.eqv(y, A.join(x, y))
}
)
def meetSemilatticePartialOrder(implicit A: MeetSemilattice[A], P: PartialOrder[A]) = new LatticePartialOrderProperties(
name = "meetSemilatticePartialOrder",
parents = Seq.empty,
"meet+lteqv" -> forAll { (x: A, y: A) =>
P.lteqv(x, y) ?== P.eqv(x, A.meet(x, y))
}
)
def latticePartialOrder(implicit A: Lattice[A], P: PartialOrder[A]) = new LatticePartialOrderProperties(
name = "latticePartialOrder",
parents = Seq(joinSemilatticePartialOrder, meetSemilatticePartialOrder)
)
def boundedJoinSemilatticePartialOrder(implicit A: BoundedJoinSemilattice[A], P: PartialOrder[A]) = new LatticePartialOrderProperties(
name = "boundedJoinSemilatticePartialOrder",
parents = Seq(joinSemilatticePartialOrder),
"lteqv+zero" -> forAll { (x: A) => A.zero ?<= x }
)
def boundedMeetSemilatticePartialOrder(implicit A: BoundedMeetSemilattice[A], P: PartialOrder[A]) = new LatticePartialOrderProperties(
name = "boundedMeetSemilatticePartialOrder",
parents = Seq(meetSemilatticePartialOrder),
"lteqv+one" -> forAll { (x: A) => x ?<= A.one }
)
def boundedBelowLatticePartialOrder(implicit A: Lattice[A] with BoundedJoinSemilattice[A], P: PartialOrder[A]) = new LatticePartialOrderProperties(
name = "boundedBelowLatticePartialOrder",
parents = Seq(boundedJoinSemilatticePartialOrder, latticePartialOrder)
)
def boundedAboveLatticePartialOrder(implicit A: Lattice[A] with BoundedMeetSemilattice[A], P: PartialOrder[A]) = new LatticePartialOrderProperties(
name = "boundedAboveLatticePartialOrder",
parents = Seq(boundedMeetSemilatticePartialOrder, latticePartialOrder)
)
def boundedLatticePartialOrder(implicit A: BoundedLattice[A], P: PartialOrder[A]) = new LatticePartialOrderProperties(
name = "boundedLatticePartialOrder",
parents = Seq(boundedJoinSemilatticePartialOrder, boundedMeetSemilatticePartialOrder)
)
class LatticePartialOrderProperties(
val name: String,
val parents: Seq[LatticePartialOrderProperties],
val props: (String, Prop)*
) extends RuleSet {
def bases = Nil
}
}
| sritchie/algebra | laws/src/main/scala/algebra/laws/LatticePartialOrderLaws.scala | Scala | mit | 2,908 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.apollo.broker;
import _root_.scala.collection.JavaConversions._
import org.fusesource.hawtdispatch._
import org.apache.activemq.apollo.util._
import org.apache.activemq.apollo.util.OptionSupport._
import org.apache.activemq.apollo.dto._
import security._
import security.SecuredResource.VirtualHostKind
import store._
import java.lang.{Throwable, String}
import java.util.concurrent.ConcurrentHashMap
trait VirtualHostFactory {
def create(broker:Broker, dto:VirtualHostDTO):VirtualHost
}
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object VirtualHostFactory {
val finder = new ClassFinder[VirtualHostFactory]("META-INF/services/org.apache.activemq.apollo/virtual-host-factory.index",classOf[VirtualHostFactory])
def create(broker:Broker, dto:VirtualHostDTO):VirtualHost = {
if( dto == null ) {
return null
}
finder.singletons.foreach { provider=>
val connector = provider.create(broker, dto)
if( connector!=null ) {
return connector;
}
}
return null
}
}
object DefaultVirtualHostFactory extends VirtualHostFactory with Log {
def create(broker: Broker, dto: VirtualHostDTO): VirtualHost = dto match {
case dto:VirtualHostDTO =>
if( dto.getClass != classOf[VirtualHostDTO] ) {
// ignore sub classes of AcceptingVirtualHostDTO
null;
} else {
val rc = new VirtualHost(broker, dto.id)
rc.config = dto
rc
}
case _ =>
null
}
}
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object VirtualHost extends Log {
}
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class VirtualHost(val broker: Broker, val id:String) extends BaseService with SecuredResource with PluginStateSupport {
import VirtualHost._
override val dispatch_queue:DispatchQueue = createQueue("virtual-host")
var config:VirtualHostDTO = _
val router:Router = new LocalRouter(this)
def names:List[String] = config.host_names.toList;
var store:Store = null
val queue_id_counter = new LongCounter()
val session_counter = new PersistentLongCounter("session_counter")
var dead_topic_metrics = new DestMetricsDTO
var dead_queue_metrics = new DestMetricsDTO
var dead_dsub_metrics = new DestMetricsDTO
var authenticator:Authenticator = _
var authorizer = Authorizer()
var audit_log:Log = _
var security_log:Log = _
var connection_log:Log = _
var console_log:Log = _
var direct_buffer_allocator:DirectBufferAllocator = null
def resource_kind = VirtualHostKind
@volatile
var client_redirect:Option[String] = None
override def toString = if (config==null) "virtual-host" else "virtual-host: "+config.id
/**
* Validates and then applies the configuration.
*/
def update(config: VirtualHostDTO, on_completed:Task) = dispatch_queue {
if ( !service_state.is_started ) {
this.config = config
on_completed.run
} else {
// in some cases we have to restart the virtual host..
if( config.store != this.config.store ) {
stop(^{
this.config = config
start(on_completed)
})
} else {
this.config = config
apply_update
this.router.apply_update(on_completed)
}
}
}
def apply_update:Unit = {
// Configure the logging categories...
val log_category = config.log_category.getOrElse(new LogCategoryDTO)
security_log = Option(log_category.security).map(Log(_)).getOrElse(broker.security_log)
audit_log = Option(log_category.audit).map(Log(_)).getOrElse(broker.audit_log)
connection_log = Option(log_category.connection).map(Log(_)).getOrElse(broker.connection_log)
console_log = Option(log_category.console).map(Log(_)).getOrElse(broker.console_log)
if (config.authentication != null) {
if (config.authentication.enabled.getOrElse(true)) {
// Virtual host has it's own settings.
authenticator = new JaasAuthenticator(config.authentication, security_log)
} else {
// Don't use security on this host.
authenticator = null
}
} else {
// use the broker's settings..
authenticator = broker.authenticator
}
if( authenticator!=null ) {
val rules = config.access_rules.toList ::: broker.config.access_rules.toList
authorizer = Authorizer(broker, this)
} else {
authorizer = Authorizer()
}
}
override protected def _start(on_completed:Task):Unit = {
apply_update
if ( Option(config.heap_bypass).map(MemoryPropertyEditor.parse(_).toInt).getOrElse(0) > 0 ) {
import org.apache.activemq.apollo.util.FileSupport._
val tmp_dir = broker.tmp / "heapbypass" / id
tmp_dir.recursive_delete
direct_buffer_allocator = new ConcurrentFileDirectBufferAllocator(tmp_dir)
}
store = StoreFactory.create(config.store)
val tracker = new LoggingTracker("virtual host startup", console_log)
if( store!=null ) {
val task = tracker.task("store startup")
console_log.info("Starting store: "+store)
store.start {
if( store.service_failure ==null) {
val task = tracker.task("store get last queue key")
store.get_last_queue_key{ key=>
key match {
case Some(x)=>
queue_id_counter.set(key.get)
case None =>
warn("Could not get last queue key")
}
task.run
}
if( config.purge_on_startup.getOrElse(false) ) {
val task = tracker.task("store purge")
store.purge {
task.run
}
}
} else {
_service_failure = store.service_failure
store = null
}
task.run
}
}
tracker.callback {
val tracker = new LoggingTracker("virtual host startup", console_log)
if( _service_failure==null ) {
// The default host handles persisting the connection id counter.
if(store!=null) {
if(session_counter.get == 0) {
val task = tracker.task("load session counter")
session_counter.init(store) {
task.run()
}
} else {
session_counter.connect(store)
}
}
tracker.start(router)
}
tracker.callback(on_completed)
if( _service_failure!=null ) {
stop(NOOP)
}
}
}
override protected def _stop(on_completed:Task):Unit = {
val tracker = new LoggingTracker("virtual host shutdown", console_log)
tracker.stop(router);
tracker.callback(^{
val tracker = new LoggingTracker("virtual host shutdown", console_log)
if( store!=null ) {
val task = tracker.task("store session counter")
session_counter.disconnect{
tracker.stop(store);
task.run()
}
}
tracker.callback(dispatch_queue.runnable {
if( direct_buffer_allocator !=null ) {
direct_buffer_allocator.close
direct_buffer_allocator
}
on_completed.run()
})
})
}
def local_router = router.asInstanceOf[LocalRouter]
def reset_metrics = {
dead_queue_metrics = new DestMetricsDTO
dead_topic_metrics = new DestMetricsDTO
}
def aggregate_dest_metrics(metrics:Iterable[DestMetricsDTO]):AggregateDestMetricsDTO = {
metrics.foldLeft(new AggregateDestMetricsDTO) { (to, from) =>
DestinationMetricsSupport.add_destination_metrics(to, from)
from match {
case from:AggregateDestMetricsDTO =>
to.objects += from.objects
case _ =>
to.objects += 1
}
to
}
}
def get_topic_metrics:FutureResult[AggregateDestMetricsDTO] = sync(this) {
val topics:Iterable[Topic] = local_router.local_topic_domain.destinations
val metrics: Future[Iterable[Result[DestMetricsDTO, Throwable]]] = Future.all {
topics.map(_.status(false, false).map(_.map_success(_.metrics)))
}
metrics.map( x => Success {
val rc = aggregate_dest_metrics(x.flatMap(_.success_option))
DestinationMetricsSupport.add_destination_metrics(rc, dead_topic_metrics)
rc
})
}
import FutureResult._
def get_queue_metrics:FutureResult[AggregateDestMetricsDTO] = sync(this) {
val queues:Iterable[Queue] = local_router.local_queue_domain.destinations
val metrics = sync_all (queues) { queue =>
queue.get_queue_metrics
}
metrics.map( x => Success {
val rc = aggregate_dest_metrics(x.flatMap(_.success_option))
DestinationMetricsSupport.add_destination_metrics(rc, dead_queue_metrics)
rc
})
}
def get_dsub_metrics:FutureResult[AggregateDestMetricsDTO] = sync(this) {
val dsubs:Iterable[Queue] = local_router.local_dsub_domain.destination_by_id.values
val metrics = sync_all (dsubs) { dsub =>
dsub.get_queue_metrics
}
metrics.map( x => Success {
val rc = aggregate_dest_metrics(x.flatMap(_.success_option))
DestinationMetricsSupport.add_destination_metrics(rc, dead_dsub_metrics)
rc
})
}
def get_dest_metrics:FutureResult[AggregateDestMetricsDTO] = {
// zero out the enqueue stats on the dsubs since they will already be accounted for in the topic
// stats.
Future.all(List(get_queue_metrics, get_topic_metrics, get_dsub_metrics)).map { x =>
val y = x.toArray
val (queue, topic, dsub) = (y(0), y(1), y(2))
var rc = new AggregateDestMetricsDTO
for( queue <- queue.success_option; topic <- topic.success_option; dsub <- dsub.success_option ) {
dsub.enqueue_item_counter = 0L
dsub.enqueue_size_counter = 0L
dsub.enqueue_ts = 0L
rc = aggregate_dest_metrics(List(queue, dsub))
DestinationMetricsSupport.add_destination_metrics(rc, topic)
rc.objects += topic.objects
rc.current_time = broker.now
}
Success(rc)
}
}
}
| chirino/activemq-apollo | apollo-broker/src/main/scala/org/apache/activemq/apollo/broker/VirtualHost.scala | Scala | apache-2.0 | 10,816 |
package com.twitter.finagle.loadbalancer.aperture
import com.twitter.finagle._
import com.twitter.finagle.loadbalancer.p2c.P2CPick
import com.twitter.finagle.loadbalancer.{Balancer, NodeT, DistributorT}
import com.twitter.finagle.util.Rng
import com.twitter.util.{Future, Time}
import scala.collection.immutable.VectorBuilder
import scala.collection.mutable.ListBuffer
/**
* The aperture distributor balances load onto a window, the aperture, of
* underlying capacity. The distributor exposes a control mechanism so that a
* controller can adjust the aperture according to load conditions.
*
* The window contains a number of discrete serving units, one for each
* node. No load metric is prescribed: this can be mixed in separately.
*
* The underlying nodes are arranged in a consistent fashion: an
* aperture of a given size always refers to the same set of nodes; a
* smaller aperture to a subset of those nodes so long as the nodes are of
* equal `status` (i.e. unhealthy nodes are de-prioritized). Thus, it is
* relatively harmless to adjust apertures frequently, since underlying nodes
* are typically backed by pools, and will be warm on average.
*/
private[loadbalancer] trait Aperture[Req, Rep] extends Balancer[Req, Rep] { self =>
import DeterministicOrdering._
protected type Node <: ApertureNode
protected trait ApertureNode extends NodeT[Req, Rep] {
/**
* A token is a random integer associated with an Aperture node.
* It persists through node updates, but is not necessarily
* unique. Aperture uses this token to order the nodes when
* deterministic ordering is not enabled or available. Since
* the token is assigned at Node creation, this guarantees
* a stable order across distributor rebuilds.
*/
val token: Int = rng.nextInt()
}
/**
* The random number generator used to pick two nodes for
* comparison – since aperture uses p2c for selection.
*/
protected def rng: Rng
/**
* The minimum aperture as specified by the user config. Note this value is advisory
* and the distributor may actually derive a new min based on this. See `minUnits`
* for more details.
*/
protected def minAperture: Int
/**
* Enables [[Aperture]] to read coordinate data from [[DeterministicOrdering]]
* to derive an ordering for the endpoints used by this [[Balancer]] instance.
*/
protected def useDeterministicOrdering: Boolean
/**
* Adjust the aperture by `n` serving units.
*/
protected def adjust(n: Int): Unit = invoke(_.adjust(n))
/**
* Widen the aperture by one serving unit.
*/
protected def widen(): Unit = adjust(1)
/**
* Narrow the aperture by one serving unit.
*/
protected def narrow(): Unit = adjust(-1)
/**
* The current aperture. This is never less than 1, or more
* than `units`.
*/
protected def aperture: Int = dist.aperture
/**
* The maximum aperture serving units.
*/
protected def maxUnits: Int = dist.max
/**
* The minimum aperture serving units.
*/
protected def minUnits: Int = dist.min
private[this] val gauges = Seq(
statsReceiver.addGauge("aperture") { aperture },
statsReceiver.addGauge("use_deterministic_ordering") {
if (useDeterministicOrdering) 1F else 0F
}
)
private[this] val coordinateUpdates = statsReceiver.counter("coordinate_updates")
private[this] val coordObservation = DeterministicOrdering.changes.respond { _ =>
// One nice side-effect of deferring to the balancers `updater` is
// that we serialize and collapse concurrent updates. So if we have a volatile
// source that is updating the coord, we are resilient to that. We could
// go even further by rate limiting the changes if we need to.
coordinateUpdates.incr()
self.rebuild()
}
private[this] val nodeToken: ApertureNode => Int = _.token
private[this] val nodeOpen: ApertureNode => Boolean = _.status == Status.Open
/**
* A distributor that uses P2C to select nodes from within a window ("aperture").
*
* @param vector The ordered collection over which the aperture is applied
* and p2c selects over.
*
* @param original The original vector before any ordering is applied.
* This is necessary to keep intact since the updates we receive from the
* Balancer apply a specific ordering to the collection of nodes.
*
* @param busy The nodes which have been shuffled to the back of the collection
* because they are considered busy as per their `status`.
*
* @param coordinate The last sample read from [[DeterministicOrdering]] that
* the distributor used.
*
* @param initAperture The initial aperture to use.
*/
protected class Distributor(
vector: Vector[Node],
original: Vector[Node],
busy: Vector[Node],
coordinate: Option[Coord],
initAperture: Int)
extends DistributorT[Node](vector)
with P2CPick[Node] {
type This = Distributor
val max: Int = vector.size
val min: Int = {
val default = math.min(minAperture, vector.size)
if (!useDeterministicOrdering) default else {
coordinate match {
// We want to additionally ensure that we get full ring coverage
// when there are fewer clients than servers. For example, imagine the
// degenerate case where we have a min aperture of size 1 and fewer
// clients than servers – we know that we will at most cover `size` of
// of the `vector.size` server ring. Thus, we need an aperture size
// of `vector.size` / `size`.
case Some(FromInstanceId(_, _, size)) if size < vector.size =>
val minSize: Double = vector.size / size.toDouble
// Since `minSize` can be fractional we do our best to approximate
// the size of the min size needed to cover the entire server ring.
// Technically, we could make this "perfect" by taking the fractional
// bit and translating that to a percentage of the peer group ceiling
// the value and the remainder flooring it, but we avoid the added
// complexity by just ceiling for everyone.
math.max(default, math.ceil(minSize).toInt)
case _ => default
}
}
}
// We are guaranteed that writes to aperture are serialized since
// we only expose them via the narrow, widen, etc. methods above. Those
// defer to the balancers `updater` which is serial. Therefore, we only
// need to guarantee visibility across threads and don't need to
// provide other synchronization between threads.
@volatile private[this] var _aperture: Int = initAperture
// Make sure the aperture is within bounds [minAperture, maxAperture].
adjust(0)
/**
* Returns the current aperture.
*/
def aperture: Int = _aperture
/**
* Adjusts the aperture by `n`.
*/
def adjust(n: Int): Unit = {
_aperture = math.max(min, math.min(max, _aperture + n))
}
protected def rng: Rng = self.rng
protected def bound: Int = aperture
protected def emptyNode: Node = failingNode(emptyException)
def rebuild(): This = rebuild(original)
/**
* Returns a new vector with the nodes sorted by `token` which is
* deterministic across rebuilds but random globally, since `token`
* is assigned randomly per process when the node is created.
*/
private[this] def tokenOrder(vec: Vector[Node]): Vector[Node] =
vec.sortBy(nodeToken)
/**
* Returns a new vector with the nodes ordered relative to the coordinate in
* `coord`. This gives the distributor a deterministic order across process
* boundaries.
*/
private[this] def ringOrder(vec: Vector[Node], coord: Double): Vector[Node] = {
val order = new Ring(vec.size).alternatingIter(coord)
val builder = new VectorBuilder[Node]
while (order.hasNext) { builder += vec(order.next()) }
builder.result
}
/**
* Returns a new vector which is ordered by the node's status. Note, it is
* important that this is a stable sort since we care about the source
* order when using deterministic ordering.
*/
private[this] def statusOrder(
vec: Vector[Node],
busyBuilder: VectorBuilder[Node]
): Vector[Node] = {
val resultNodes = new VectorBuilder[Node]
val busyNodes = new ListBuffer[Node]
val closedNodes = new ListBuffer[Node]
val iter = vec.iterator
while (iter.hasNext) {
val node = iter.next()
node.status match {
case Status.Open => resultNodes += node
case Status.Busy => busyNodes += node
case Status.Closed => closedNodes += node
}
}
busyBuilder ++= busyNodes
resultNodes ++= busyNodes ++= closedNodes
resultNodes.result
}
/**
* Rebuilds the distributor and sorts the vector in two possible ways:
*
* 1. If `useDeterministicOrdering` is set to true and [[DeterministicOrdering]]
* has a coordinate set, then the coordinate is used which gives the
* distributor a well-defined, deterministic order across process boundaries.
*
* 2. Otherwise, the vector is sorted by a node's token field.
*/
def rebuild(vec: Vector[Node]): This = {
if (vec.isEmpty) {
new Distributor(vec, vec, busy, coordinate, aperture)
} else {
DeterministicOrdering() match {
case someCoord@Some(coord) if useDeterministicOrdering =>
val busyBuilder = new VectorBuilder[Node]
val newVec = statusOrder(ringOrder(vec, coord.value), busyBuilder)
new Distributor(newVec, vec, busyBuilder.result, someCoord, aperture)
case _ =>
val busyBuilder = new VectorBuilder[Node]
val newVec = statusOrder(tokenOrder(vec), busyBuilder)
new Distributor(newVec, vec, busyBuilder.result, None, aperture)
}
}
}
// To reduce the amount of rebuilds needed, we rely on the probabilistic
// nature of the p2c pick. That is, we know that only when a significant
// portion of the underlying vector is unavailable will we return an
// unavailable node to the layer above and trigger a rebuild. We do however
// want to return to our "stable" ordering as soon as we notice that a
// previously busy node is now available.
def needsRebuild: Boolean = busy.exists(nodeOpen)
}
protected def initDistributor(): Distributor =
new Distributor(Vector.empty, Vector.empty, Vector.empty, None, 1)
override def close(deadline: Time): Future[Unit] = {
gauges.foreach(_.remove())
coordObservation.close(deadline).before { super.close(deadline) }
}
}
| koshelev/finagle | finagle-core/src/main/scala/com/twitter/finagle/loadbalancer/aperture/Aperture.scala | Scala | apache-2.0 | 10,795 |
def fmap[A, B](f: A => B)(fa: Option[A]): Option[B] | hmemcpy/milewski-ctfp-pdf | src/content/1.7/code/scala/snippet05.scala | Scala | gpl-3.0 | 51 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.schema
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.types.Row
import scala.collection.JavaConversions._
/**
* Schema that describes both a logical and physical row.
*/
class RowSchema(private val logicalRowType: RelDataType) {
private lazy val physicalRowFieldTypes: Seq[TypeInformation[_]] =
logicalRowType.getFieldList map { f => FlinkTypeFactory.toTypeInfo(f.getType) }
private lazy val physicalRowTypeInfo: TypeInformation[Row] = new RowTypeInfo(
physicalRowFieldTypes.toArray, fieldNames.toArray)
/**
* Returns the arity of the schema.
*/
def arity: Int = logicalRowType.getFieldCount
/**
* Returns the [[RelDataType]] of the schema
*/
def relDataType: RelDataType = logicalRowType
/**
* Returns the [[TypeInformation]] of of the schema
*/
def typeInfo: TypeInformation[Row] = physicalRowTypeInfo
/**
* Returns the [[TypeInformation]] of fields of the schema
*/
def fieldTypeInfos: Seq[TypeInformation[_]] = physicalRowFieldTypes
/**
* Returns the fields names
*/
def fieldNames: Seq[String] = logicalRowType.getFieldNames
}
| zohar-mizrahi/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/schema/RowSchema.scala | Scala | apache-2.0 | 2,158 |
/*
* Copyright 2016-2017 original author or authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tap.pipelines
import akka.NotUsed
import akka.stream.scaladsl.{Flow, Zip}
import tap.analysis.textshapes.TfIdfShape
import tap.data.{OldTapDocument, OldTapSection}
import io.nlytx.commons.ranking.TfIdf
import scala.collection.immutable.ListMap
import scala.concurrent.Future
/**
* Created by andrew@andrewresearch.net on 27/2/17.
*/
object Tfidf {
val pipeline:Flow[List[String],List[Map[String,Double]],NotUsed] = Flow[List[String]].map(TfIdf.calculateNonWeighted(_))
/*
val termsInDocs = Flow[List[String]].map(_.map( d => TfIdf.rawTermFrequency(d)))
val idf = Flow[List[Map[String,Long]]].map(_.map( m => TfIdf.inverseDocFrequency(m ,m.size)))
val corpusPipeline = termsInDocs.via(idf)
val wtf = Flow[String].map(TfIdf.weightedTermFrequency(_))
val tfIdf = Zip[Map[String,Double],Map[String,Double]] //.zipper((v1:Map[String,Double],v2:Map[String,Double]) => TfIdf.tfIdf(v1,v2))
*/
}
| uts-cic/tap | src/main/scala/tap/pipelines/Tfidf.scala | Scala | apache-2.0 | 1,539 |
package logreceiver.processor.kinesis
import java.nio.ByteBuffer
import java.util.concurrent.TimeUnit
import com.github.vonnagy.service.container.health.{HealthInfo, HealthState}
import com.github.vonnagy.service.container.metrics.{Counter, Meter}
import io.github.cloudify.scala.aws.kinesis.Client
import logreceiver.processor.{LogBatch, Processor, ProcessorReady}
import scala.collection.JavaConversions._
import scala.util.{Failure, Success}
/**
* Created by ivannagy on 4/10/15.
*/
class KinesisProcessor extends Processor {
import context.{dispatcher, system}
val endpoint = context.system.settings.config.getString("log.processors.kinesis.endpoint")
val accessKey = context.system.settings.config.getString("log.processors.kinesis.access-key")
val accessSecret = context.system.settings.config.getString("log.processors.kinesis.access-secret")
val timeout = context.system.settings.config.getDuration("log.processors.kinesis.timeout", TimeUnit.MILLISECONDS).toInt
var connected = false
implicit val kinesisClient = Client.fromCredentials(accessKey, accessSecret, endpoint)
lazy val streams = verifyStreams()
def lineMetricPrefix = "processors.kinesis"
val batchReceivedCount = Counter("processors.kinesis.batch.receive")
val batchReceivedMeter = Meter("processors.kinesis.batch.receive.meter")
val failedPutCount = Counter("processors.kinesis.put-failure")
override def preStart() {
super.preStart
streams
self ! ProcessorReady
}
override def postStop() {
log.info("Kinesis processor stopping: {}", context.self.path)
connected = false
super.postStop
}
def running: Receive = {
// Handle the batch
case b@LogBatch(token, frameId, count, payload) => processBatch(b)
}
def getHealth: HealthInfo = connected match {
case true =>
new HealthInfo("kinesis", HealthState.OK, s"The processor running and attached to kinesis")
case false =>
new HealthInfo("kinesis", HealthState.DEGRADED, s"The processor is running, but can't attach to kinesis")
}
/**
* Make sure the the proper streams are up and running before registering or accepting any log work
*/
def verifyStreams(): Map[String, StreamManager] = {
log.info("Locating the streams {} and {}", "log-stream")
Map(("log-stream", new StreamManager("log-stream")))
}
def processBatch(batch: LogBatch): Unit = {
batchReceivedCount.incr
batchReceivedMeter.meter {
val data = processPayload(batch.payload, Seq[Tuple2[ByteBuffer, String]]())
val stream = streams.get("log-stream").get.stream
if (stream.isDefined) {
val putData = stream.get.multiPut(data.toList)
kinesisClient.execute(putData) onComplete {
case Failure(f) =>
log.error("Error trying to write records to the log-stream stream", f)
case Success(putResult) =>
putResult.result.getFailedRecordCount.toInt match {
case 0 =>
log.debug(s"Wrote ${data.size} records to log-stream")
case count =>
failedPutCount.incr(count.toLong)
log.warning(s"Failed to write $count records to log-stream")
putResult.result.getRecords.filter(r => r.getErrorCode != null && r.getErrorCode.length > 0).groupBy(_.getErrorCode).foreach { r =>
log.warning(s"${r._1}: ${r._2.size}")
}
// TODO What do we do here
}
}
}
else {
log.error(s"Unable to utilize the stream: log-stream")
}
}
}
}
| vonnagy/log-receiver | src/main/scala/logreceiver/processor/kinesis/KinesisProcessor.scala | Scala | apache-2.0 | 3,587 |
package com.gilt.pickling.avro
import org.apache.avro.Schema
import com.gilt.pickling.TestUtils._
import com.gilt.pickling.TestObjs.{InnerObject, ListOfObjects}
import org.apache.avro.generic.GenericData
import org.scalatest.{Assertions, FunSuite}
import scala.pickling._
import scala.collection.JavaConverters._
object ListOfObjectsTest {
val obj = ListOfObjects(List(InnerObject(1), InnerObject(2), InnerObject(3)))
}
class ListOfObjectsTest extends FunSuite with Assertions {
import ListOfObjectsTest.obj
test("Pickle a case class with a list of objects") {
val pckl = obj.pickle
assert(generateBytesFromAvro(obj) === pckl.value)
}
test("Unpickle a case class with a list of objects") {
val bytes = generateBytesFromAvro(obj)
val hydratedObj: ListOfObjects = bytes.unpickle[ListOfObjects]
assert(obj === hydratedObj)
}
test("Round trip a case class with a list of objects") {
val pckl = obj.pickle
val hydratedObj: ListOfObjects = pckl.unpickle[ListOfObjects]
assert(hydratedObj === obj)
}
private def generateBytesFromAvro(obj: ListOfObjects) = {
val schema: Schema = retrieveAvroSchemaFromFile("/avro/object/ListOfObjects.avsc")
val innerSchema = schema.getField("list").schema().getElementType
val list = obj.list.map {
inner =>
val innerRecord = new GenericData.Record(innerSchema)
innerRecord.put("id", inner.id)
innerRecord
}
val record = new GenericData.Record(schema)
record.put("list", list.asJava)
convertToBytes(schema, record)
}
}
| gilt/gfc-avro | src/test/scala/com/gilt/pickling/avro/ListOfObjectsTest.scala | Scala | apache-2.0 | 1,568 |
package playing
import scala.language.experimental.macros
import scala.reflect.macros.whitebox.Context
/**
* An plying with Scala macro
* Created by CAB on 01.03.2015.
*/
object Macro {
def print(msg: String): Unit = macro impl
def impl(c: Context)(msg: c.Expr[String]): c.Expr[Unit] = {
import c.universe._
reify{
println("Macro: " + msg.splice)}}} | AlexCAB/Glint | macro/src/main/scala/playing/Macro.scala | Scala | mit | 373 |
package models.attribute
import models.region.{Region, RegionTable}
import models.utils.MyPostgresDriver.simple._
import play.api.Play.current
import play.api.db.slick
import scala.slick.lifted.{ForeignKeyQuery, ProvenShape}
import scala.slick.jdbc.{StaticQuery => Q}
import scala.language.postfixOps
case class GlobalClusteringSession(globalClusteringSessionId: Int, regionId: Int, timeCreated: java.sql.Timestamp)
class GlobalClusteringSessionTable(tag: Tag) extends Table[GlobalClusteringSession](tag, Some("sidewalk"), "global_clustering_session") {
def globalClusteringSessionId: Column[Int] = column[Int]("global_clustering_session_id", O.NotNull, O.PrimaryKey, O.AutoInc)
def regionId: Column[Int] = column[Int]("region_id", O.NotNull)
def timeCreated: Column[java.sql.Timestamp] = column[java.sql.Timestamp]("time_created", O.NotNull)
def * : ProvenShape[GlobalClusteringSession] = (globalClusteringSessionId, regionId, timeCreated) <>
((GlobalClusteringSession.apply _).tupled, GlobalClusteringSession.unapply)
def region: ForeignKeyQuery[RegionTable, Region] =
foreignKey("global_clustering_session_region_id_fkey", regionId, TableQuery[RegionTable])(_.regionId)
}
/**
* Data access object for the GlobalClusteringSessionTable table.
*/
object GlobalClusteringSessionTable {
val db: slick.Database = play.api.db.slick.DB
val globalClusteringSessions: TableQuery[GlobalClusteringSessionTable] = TableQuery[GlobalClusteringSessionTable]
val globalAttributeUserAttributes: TableQuery[GlobalAttributeUserAttributeTable] = TableQuery[GlobalAttributeUserAttributeTable]
/**
* Gets list of region_ids where the underlying data has been changed during single-user clustering.
*
* Data in the `global_attribute` table that is missing from the `global_attribute_user_attribute` table means that
* the user who contributed the data has added data or has been marked as low quality. Data in the `user_attribute`
* table that is missing from the `global_attribute_user_attribute` table means that this is a new user or they've
* added new data. SELECT DISTINCT on the associated region_ids from both queries yields all regions to update.
*/
def getNeighborhoodsToReCluster: List[Int] = db.withSession { implicit session =>
// global_attribute left joins with global_attribute_user_attribute, nulls mean low quality/updated users.
val lowQualityOrUpdated = GlobalAttributeTable.globalAttributes
.leftJoin(globalAttributeUserAttributes).on(_.globalAttributeId === _.globalAttributeId)
.filter(_._2.globalAttributeId.?.isEmpty)
.map(_._1.regionId)
// global_attribute_user_attribute right joins with user_attribute, nulls mean new/updated users.
val newOrUpdated = globalAttributeUserAttributes
.rightJoin(UserAttributeTable.userAttributes).on(_.userAttributeId === _.userAttributeId)
.filter(_._1.userAttributeId.?.isEmpty)
.map(_._2.regionId)
// Combine the two (union removes duplicates)
(lowQualityOrUpdated union newOrUpdated).list
}
/**
* Truncates global_clustering_session, global_attribute, and global_attribute_user_attribute.
*/
def truncateTables(): Unit = db.withTransaction { implicit session =>
Q.updateNA("TRUNCATE TABLE global_clustering_session CASCADE").execute
}
/**
* Deletes the global attributes for the selected region_ids.
*
* We run the delete on the `global_clustering_session` table, and it cascades to the `global_attribute` and
* `global_attribute_user_attribute` tables.
*/
def deleteGlobalClusteringSessions(regionIds: List[Int]): Int = db.withTransaction { implicit session =>
globalClusteringSessions.filter(_.regionId inSet regionIds).delete
}
def save(newSess: GlobalClusteringSession): Int = db.withTransaction { implicit session =>
val newId: Int = (globalClusteringSessions returning globalClusteringSessions.map(_.globalClusteringSessionId)) += newSess
newId
}
}
| ProjectSidewalk/SidewalkWebpage | app/models/attribute/GlobalClusteringSessionTable.scala | Scala | mit | 3,979 |
package peschke.console.progressbar.cli
import scopt.OptionParser
case class Settings(initial: Int = 0, total: Int = 100, port: Int = 8091)
object Settings {
implicit val optionParser: OptionParser[Settings] = new OptionParser[Settings]("progress-bar") {
note("Starts a progress bar and listens for commands on the specified port.")
opt[Int]('i', "initial")
.text("Initial value for progress bar (defaults to 0)")
.action((i, s) => s.copy(initial = i))
opt[Int]('t', "total")
.text("Maximum value for the progress bar (defaults to 100)")
.action((t, s) => s.copy(total = t))
opt[Int]('p', "port")
.text("Port used to listen for updates (defaults to 8091)")
.action((p, s) => s.copy(port = p))
help("help")
note {
s"""|
|Commands
|========
|
|Any integer (positive or negative) will update the current count by that amount.
|The string "close" closes the connection without closing the progress bar.
|The string "quit" exits the progress bar.
|The string "finish" completes the progress bar, then exits.""".stripMargin
}
}
}
| morgen-peschke/scala-progress-bar | example/src/main/scala/peschke/console/progressbar/cli/Settings.scala | Scala | mit | 1,175 |
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scray.querying
import scray.querying.description.Clause
import java.util.UUID
import scray.querying.description.ColumnGrouping
import scray.querying.description.TableIdentifier
import scray.querying.description.QueryRange
import scray.querying.description.Columns
import scray.querying.description.ColumnOrdering
/**
* represents all possible queries
*/
trait Query extends Serializable {
/**
* query id, used to debug the query and query maintenance (e.g. status or kill)
*/
def getQueryID: UUID
/**
* the name of the query space this query should be executed reside in
*/
def getQueryspace: String
/**
* columns in the result set
*/
def getResultSetColumns: Columns
/**
* table description in which to look for data
*/
def getTableIdentifier: TableIdentifier
/**
* returns an AST in prefix-notation of the where condition of this query
*/
def getWhereAST: Option[Clause]
/**
* whether or not the results should be grouped after a column
*/
def getGrouping: Option[ColumnGrouping]
/**
* whether or not the results should be ordered according to a single column
*/
def getOrdering: Option[ColumnOrdering[_]]
/**
* whether or not this query returns an interval over the result set
*/
def getQueryRange: Option[QueryRange]
/**
* returns a modified query
*/
def transformedAstCopy(ast: Option[Clause]): Query
}
| scray/scray | scray-querying/modules/scray-querying/src/main/scala/scray/querying/Query.scala | Scala | apache-2.0 | 2,106 |
package org.jetbrains.plugins.scala.lang.scaladoc.psi.impl
import com.intellij.lang.ASTNode
import com.intellij.psi.PsiElement
import com.intellij.psi.javadoc.{PsiDocComment, PsiDocTagValue}
import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createScalaDocTagName
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementImpl
import org.jetbrains.plugins.scala.lang.scaladoc.lexer.ScalaDocTokenType
import org.jetbrains.plugins.scala.lang.scaladoc.psi.api.ScDocTag
class ScDocTagImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScDocTag{
override def toString: String = "DocTag"
override protected def acceptScala(visitor: ScalaElementVisitor): Unit = {
visitor.visitTag(this)
}
override def getContainingComment: PsiDocComment =
getParent match {
case docComment: PsiDocComment => docComment
case _ => null
}
override def getNameElement: PsiElement = findChildByType[PsiElement](ScalaDocTokenType.DOC_TAG_NAME)
override def getDataElements: Array[PsiElement] = getChildren
override def getValueElement: PsiDocTagValue = findChildByClass(classOf[PsiDocTagValue])
override def getName: String =
if (getNameElement != null)
getNameElement.getText
else
null
override def setName(name: String): PsiElement = {
val tagNameNode = findChildByType[PsiElement](ScalaDocTokenType.DOC_TAG_NAME)
if (tagNameNode != null)
tagNameNode.replace(createScalaDocTagName(name))
this
}
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/scaladoc/psi/impl/ScDocTagImpl.scala | Scala | apache-2.0 | 1,566 |
/*
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.extensions.iterativebatch.compiler
package graph
import scala.collection.JavaConversions._
import org.objectweb.asm.Type
import com.asakusafw.lang.compiler.extension.directio.{ DirectFileIoModels, OutputPattern }
import com.asakusafw.lang.compiler.model.graph.ExternalOutput
import com.asakusafw.lang.compiler.planning.SubPlan
import com.asakusafw.spark.compiler._
import com.asakusafw.spark.compiler.graph.{ CacheOnce, Instantiator }
import com.asakusafw.spark.compiler.planning.{ IterativeInfo, SubPlanInfo }
import com.asakusafw.spark.compiler.spi.NodeCompiler
import com.asakusafw.spark.extensions.iterativebatch.compiler.spi.RoundAwareNodeCompiler
class DirectOutputPrepareEachForIterativeCompiler extends RoundAwareNodeCompiler {
override def support(
subplan: SubPlan)(
implicit context: NodeCompiler.Context): Boolean = {
if (context.options.useOutputDirect) {
val subPlanInfo = subplan.getAttribute(classOf[SubPlanInfo])
val primaryOperator = subPlanInfo.getPrimaryOperator
if (primaryOperator.isInstanceOf[ExternalOutput]) {
val operator = primaryOperator.asInstanceOf[ExternalOutput]
Option(operator.getInfo).map { info =>
DirectFileIoModels.isSupported(info)
}.getOrElse(false)
} else {
false
}
} else {
false
}
}
override def instantiator: Instantiator = DirectOutputPrepareEachForIterativeInstantiator
override def compile(
subplan: SubPlan)(
implicit context: NodeCompiler.Context): Type = {
assert(support(subplan), s"The subplan is not supported: ${subplan}")
val subPlanInfo = subplan.getAttribute(classOf[SubPlanInfo])
val primaryOperator = subPlanInfo.getPrimaryOperator
assert(primaryOperator.isInstanceOf[ExternalOutput],
s"The primary operator should be external output: ${primaryOperator} [${subplan}]")
val operator = primaryOperator.asInstanceOf[ExternalOutput]
val model = DirectFileIoModels.resolve(operator.getInfo)
val dataModelRef = operator.getOperatorPort.dataModelRef
val pattern = OutputPattern.compile(dataModelRef, model.getResourcePattern, model.getOrder)
val iterativeInfo = IterativeInfo.get(subplan)
val builder = if (pattern.isGatherRequired) {
iterativeInfo.getRecomputeKind match {
case IterativeInfo.RecomputeKind.ALWAYS =>
new DirectOutputPrepareGroupEachForIterativeClassBuilder(
operator)(
pattern,
model)(
subplan.label) with CacheAlways
case IterativeInfo.RecomputeKind.PARAMETER =>
new DirectOutputPrepareGroupEachForIterativeClassBuilder(
operator)(
pattern,
model)(
subplan.label) with CacheByParameter {
override val parameters: Set[String] = iterativeInfo.getParameters.toSet
}
case IterativeInfo.RecomputeKind.NEVER =>
new DirectOutputPrepareGroupEachForIterativeClassBuilder(
operator)(
pattern,
model)(
subplan.label) with CacheOnce
}
} else {
iterativeInfo.getRecomputeKind match {
case IterativeInfo.RecomputeKind.ALWAYS =>
new DirectOutputPrepareEachFlatForIterativeClassBuilder(
operator)(
model)(
subplan.label) with CacheAlways
case IterativeInfo.RecomputeKind.PARAMETER =>
new DirectOutputPrepareEachFlatForIterativeClassBuilder(
operator)(
model)(
subplan.label) with CacheByParameter {
override val parameters: Set[String] = iterativeInfo.getParameters.toSet
}
case IterativeInfo.RecomputeKind.NEVER =>
new DirectOutputPrepareEachFlatForIterativeClassBuilder(
operator)(
model)(
subplan.label) with CacheOnce
}
}
context.addClass(builder)
}
}
| asakusafw/asakusafw-spark | extensions/iterativebatch/compiler/core/src/main/scala/com/asakusafw/spark/extensions/iterativebatch/compiler/graph/DirectOutputPrepareEachForIterativeCompiler.scala | Scala | apache-2.0 | 4,571 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Wed Oct 24 16:25:29 EDT 2012
* @see LICENSE (MIT style license file).
*/
package scalation.minima
import scalation.linalgebra.VectorD
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** This trait sets the pattern for optimization algorithms for solving Non-Linear
* Programming (NLP) problems of the form:
*
* minimize f(x)
* subject to g(x) <= 0 [ optionally g(x) == 0 ]
*
* where f is the objective function to be minimized
* g is the constraint function to be satisfied, if any
*
* Classes mixing in this trait must implement a function (fg) that rolls the
* constraints into the objective functions as penalties for constraint violation,
* a one-dimensional Line Search (LS) algorithm (lineSearch) and an iterative
* method (solve) that searches for improved solutions (x-vectors with lower
* objective function values (f(x)).
*/
trait Minimizer
{
protected val EPSILON = 1E-8 // number close to zero - roughly root of machine epsilon
protected val TOL = 100.0 * EPSILON // default tolerance level more relaxed
protected val STEP = 1.0 // default initial step size
protected val MAX_ITER = 500 // maximum number of major steps/iterations
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The objective function f plus a weighted penalty based on the constraint
* function g. Override for constrained optimization and ignore for
* unconstrained optimization.
* @param x the coordinate values of the current point
*/
def fg (x: VectorD): Double = 0.0
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Perform an exact (e.g., GoldenSectionLS) or inexact (e.g., WolfeLS) line search.
* Search in direction 'dir', returning the distance 'z' to move in that direction.
* @param x the current point
* @param dir the direction to move in
* @param step the initial step size
*/
def lineSearch (x: VectorD, dir: VectorD, step: Double = STEP): Double
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Solve the Non-Linear Programming (NLP) problem by starting at 'x0' and
* iteratively moving down in the search space to a minimal point.
* @param x0 the starting point
* @param step the initial step size
* @param toler the tolerance
*/
def solve (x0: VectorD, step: Double = STEP, toler: Double = EPSILON): VectorD
} // Minimizer trait
| mvnural/scalation | src/main/scala/scalation/minima/Minimizer.scala | Scala | mit | 2,755 |
package models
import models.user.User
trait HasAccess {
// def access(implicit user: User, session: Session) : Access
}
| kristiankime/calc-tutor | app/models/HasAccess.scala | Scala | mit | 126 |
package com.soundcloud.lsh
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.linalg.distributed._
import org.apache.spark.sql.{SQLContext, SparkSession}
import org.scalatest.{FunSuite, Matchers}
class QueryLshTest extends FunSuite with SparkLocalContext with Matchers {
lazy val lsh = new QueryLsh(
minCosineSimilarity = -1.0,
dimensions = 100,
numNeighbours = 10,
maxMatches = 1000,
rounds = 10)(new SQLContext(sc).sparkSession)
test("join bitmap") {
val bitseq = Seq(
new BitSet(2),
new BitSet(2)
)
bitseq(0).set(0)
bitseq(1).set(1)
val rdd = sc.parallelize(bitseq.map(bitSetToString(_)).zipWithIndex)
val joined = rdd.join(rdd).values.collect
joined shouldBe Seq((0,0), (1,1))
}
test("join") {
val rows = Seq(
IndexedRow(0, Vectors.dense(1, 1, 0, 0)),
IndexedRow(1, Vectors.dense(1, 2, 0, 0)),
IndexedRow(2, Vectors.dense(0, 1, 4, 2))
)
val inputMatrix = new IndexedRowMatrix(sc.parallelize(rows))
val got = lsh.join(inputMatrix, inputMatrix)
val expected = Seq(
(0, 0),
(1, 1),
(2, 2)
)
val gotIndex = got.entries.collect.map {
entry: MatrixEntry =>
(entry.i, entry.j)
}
gotIndex.sorted should be(expected.sorted)
}
test("distinct") {
val matrix = Seq(
new MatrixEntry(1, 2, 3.4),
new MatrixEntry(1, 2, 3.5),
new MatrixEntry(1, 3, 3.4)
)
val got = distinct(sc.parallelize(matrix)).collect
val expected = Seq(
matrix(0), matrix(2)
)
got should be(expected)
}
}
| soundcloud/cosine-lsh-join-spark | src/test/scala/com/soundcloud/lsh/QueryLshTest.scala | Scala | mit | 1,604 |
/*
* Copyright © 2016 Schlichtherle IT Services
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package global.namespace.neuron.di.scala.sample
import global.namespace.neuron.di.scala.Neuron
@Neuron
trait Metric extends HasCounter {
val counter: Counter
def incrementCounter: Counter = counter.increment
}
@Neuron
trait MetricModule {
val metric: Metric
}
| christian-schlichtherle/neuron-di | core-scala/src/test/scala/global/namespace/neuron/di/scala/sample/MetricModule.scala | Scala | apache-2.0 | 885 |
package com.monovore
package object coast {
// IMPLEMENTATION
// always-visible utilities; should be hidden within the coast package
private[coast] val unit: Unit = ()
private[coast] type ->[A, B] = (A, B)
private[coast] object -> {
def unapply[A, B](pair: (A, B)) = Some(pair)
}
private[coast] implicit class SeqOps[A](underlying: Seq[A]) {
def groupByKey[B,C](implicit proof: A <:< (B, C)): Map[B, Seq[C]] =
underlying.groupBy { _._1 }.mapValues { _.unzip._2 }
}
private[coast] def assuming[A](cond: Boolean)(action: => A): Option[A] =
if (cond) Some(action) else None
}
| bkirwi/coast | core/src/main/scala/com/monovore/coast/package.scala | Scala | apache-2.0 | 616 |
package test
import java.io._
import java.util
import java.util.Map.Entry
import com.google.gson.{JsonArray, JsonElement, JsonObject, JsonPrimitive}
import com.temportalist.origin.library.common.utility.Json
import com.temportalist.origin.library.common.utility.Json.Config
import net.minecraftforge.common.config.{ConfigCategory, Configuration, Property}
import net.minecraftforge.fml.common.ObfuscationReflectionHelper
/**
*
*
* @author TheTemportalist 1/31/15
*/
class ConfigJson(file: File) extends Configuration(file) {
def this(parentFile: File, name: String) {
this(new File(parentFile, name))
}
val PARENT: Configuration = ObfuscationReflectionHelper.getPrivateValue(
classOf[Configuration], null, 10)
val categories: util.Map[String, ConfigCategory] = ObfuscationReflectionHelper.getPrivateValue(
classOf[Configuration], this, 12)
val children: util.Map[String, Configuration] = ObfuscationReflectionHelper.getPrivateValue(
classOf[Configuration], this, 13)
override def save(): Unit = {
if (PARENT != null && PARENT != this) {
PARENT.save()
return
}
try {
if (file.getParentFile != null) {
file.getParentFile.mkdirs
}
if (!file.exists && !file.createNewFile) {
return
}
if (file.canWrite) {
val categories: util.HashMap[String, util.HashMap[String, JsonElement]] =
new util.HashMap[String, util.HashMap[String, JsonElement]]
val comments: util.HashMap[String, String] = new util.HashMap[String, String]
Scala.foreach(this.getCategoryNames, (categoryName: String) => {
val cate: ConfigCategory = this.getCategory(categoryName)
val options: util.HashMap[String, JsonElement] =
new util.HashMap[String, JsonElement]
if (cate.getComment != null && !cate.getComment.isEmpty)
comments.put(categoryName, cate.getComment)
Scala.foreach(cate.getValues.entrySet(), (entry: Entry[String, Property]) => {
val prop: Property = entry.getValue
val element: JsonElement = Config.toJson(prop)
if (element != null) {
options.put(entry.getKey, element)
if (prop.comment != null && !prop.comment.isEmpty)
comments.put(entry.getKey, prop.comment)
}
})
categories.put(categoryName, options)
})
val fos: FileOutputStream = new FileOutputStream(file)
val buffer: BufferedWriter = new BufferedWriter(
new OutputStreamWriter(fos, defaultEncoding)
)
buffer.write(Config.toString(Config.toJson(categories), comments, 0))
buffer.close()
fos.close()
}
}
catch {
case e: Exception => e.printStackTrace()
}
}
override def load(): Unit = {
if (PARENT != null && PARENT != this) {
return
}
try {
if (file.getParentFile != null) {
file.getParentFile.mkdirs()
}
if (!file.exists()) {
// Either a previous load attempt failed or the file is new; clear maps
this.categories.clear()
this.children.clear()
if (!file.createNewFile()) return
}
if (file.canRead) {
val json: JsonObject = Json.getJson(this.file).getAsJsonObject
Scala.foreach(json.entrySet(), (entry: Entry[String, JsonElement]) => {
val cateName: String = entry.getKey
val cate: ConfigCategory = this.getCategory(cateName)
Scala.foreach(entry.getValue.getAsJsonObject.entrySet(),
(propEntry: Entry[String, JsonElement]) => {
val name: String = propEntry.getKey
val jsonElement: JsonElement = propEntry.getValue
cate.put(name, this.getProperty(name, jsonElement))
}
)
})
}
}
catch {
case e: Exception => e.printStackTrace()
}
}
private def fromJson(element: JsonElement): Any = {
element match {
case prim: JsonPrimitive =>
if (prim.isBoolean) prim.getAsBoolean
else if (prim.isNumber) prim.getAsNumber
else prim.getAsString
case array: JsonArray =>
val str: Array[String] = new Array[String](array.size())
for (i <- 0 until array.size()) {
str(i) = array.get(i).getAsString
}
str
case _ =>
null
}
}
private def getProperty(name: String, element: JsonElement): Property = {
val datatype: Property.Type = this.getType(element)
element match {
case array: JsonArray =>
classOf[Property].getDeclaredConstructor(
classOf[String], classOf[Array[String]], classOf[Property.Type],
classOf[Boolean]
)
null
case obj: JsonObject =>
null
case _ =>
new Property(name, element.getAsString, datatype, true)
}
}
private def getType(prop: JsonElement): Property.Type = {
prop match {
case prim: JsonPrimitive =>
if (prim.isBoolean)
Property.Type.BOOLEAN
else if (prim.isNumber) {
if (prim.getAsString.contains('.'))
Property.Type.DOUBLE
else
Property.Type.INTEGER
}
else Property.Type.STRING
case array: JsonArray =>
if (array.size() > 0)
this.getType(array.get(0))
else
Property.Type.STRING
case obj: JsonObject =>
null
case _ =>
Property.Type.STRING
}
}
}
| TheTemportalist/Test | src/main/scala/test/ConfigJson.scala | Scala | apache-2.0 | 5,008 |
/*
* Copyright 2014 Joshua R. Rodgers
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ========================================================================
*/
package com.theenginerd.core.common.network.synchronization.data
import io.netty.buffer.ByteBuf
import cpw.mods.fml.common.FMLLog
abstract class SynchronizedMessage
{
protected var properties: Iterable[Property]
def getProperties = properties
protected def writeHeaderToBuffer(buffer: ByteBuf)
protected def readHeaderFromBuffer(buffer: ByteBuf)
def writeToBuffer(buffer: ByteBuf) =
{
writeHeaderToBuffer(buffer)
for(property <- properties)
{
buffer.writeByte(property.id)
buffer.writeByte(property.typeId)
writePropertyToBuffer(property, buffer)
}
}
private def writePropertyToBuffer(property: Property, buffer: ByteBuf): Any =
{
property.value match
{
case value: Boolean => buffer.writeBoolean(value)
case value: Byte => buffer.writeByte(value)
case value: Short => buffer.writeShort(value)
case value: Int => buffer.writeInt(value)
case value: Float => buffer.writeFloat(value)
case unexpected =>
val typ = unexpected.getClass
FMLLog severe s"Unexpected serialization type found: $typ."
}
}
def readFromBuffer(buffer: ByteBuf) =
{
readHeaderFromBuffer(buffer)
var parsedProperties: List[Property] = List()
while(buffer.readableBytes() > 0)
{
val propertyId = buffer.readByte()
val propertyType = buffer.readByte()
val propertyValue = readPropertyFromBuffer(propertyType, buffer)
parsedProperties :+= new Property(propertyId, propertyType, propertyValue)
}
properties = parsedProperties
}
private def readPropertyFromBuffer(propertyType: Byte, buffer: ByteBuf): AnyVal =
{
import com.theenginerd.core.common.synchronization.PropertyTypeIds._
propertyType match
{
case BOOLEAN_ID => buffer.readBoolean()
case BYTE_ID => buffer.readByte()
case SHORT_ID => buffer.readShort()
case INT_ID => buffer.readInt()
case FLOAT_ID => buffer.readFloat()
case unexpected =>
FMLLog severe s"Unexpected property type id of $unexpected encountered."
}
}
}
| Mr-Byte/Random-Redstone | core/src/main/scala/com/theenginerd/core/common/network/synchronization/data/SynchronizedMessage.scala | Scala | apache-2.0 | 3,010 |
object i7044 {
case class Seg[T](pat:Pat[T], body:T)
trait Pat[T]
object Pat {
case class Expr() extends Pat[Int]
case class Opt[S](el:Pat[S]) extends Pat[Option[S]]
}
def test[T](s:Seg[T]):Int = s match {
case Seg(Pat.Expr(),body) => body + 1
case Seg(Pat.Opt(Pat.Expr()),body) => body.get
}
}
| dotty-staging/dotty | tests/pos/i7044.scala | Scala | apache-2.0 | 345 |
package org.bitcoins.commons.jsonmodels.bitcoind
import org.bitcoins.core.currency.Bitcoins
import org.bitcoins.core.number.UInt32
import org.bitcoins.core.protocol.BitcoinAddress
import org.bitcoins.core.protocol.script.{ScriptPubKey, WitnessScriptPubKey}
import org.bitcoins.core.protocol.transaction.{
TransactionInput,
TransactionOutPoint
}
import org.bitcoins.commons.serializers.JsonWriters._
import org.bitcoins.crypto.{DoubleSha256DigestBE, ECPrivateKeyBytes}
import play.api.libs.json.{Json, Writes}
import ujson.{Num, Str, Value}
import scala.collection.mutable
object RpcOpts {
case class WalletCreateFundedPsbtOptions(
changeAddress: Option[BitcoinAddress] = None,
changePosition: Option[Int] = None,
changeType: Option[AddressType] = None,
includeWatching: Boolean = false,
lockUnspents: Boolean = false,
feeRate: Option[Bitcoins] = None,
subtractFeeFromOutputs: Option[Vector[Int]] = None,
replaceable: Boolean = false,
confTarget: Option[Int] = None,
estimateMode: FeeEstimationMode = FeeEstimationMode.Unset
)
case class FundRawTransactionOptions(
changeAddress: Option[BitcoinAddress] = None,
changePosition: Option[Int] = None,
includeWatching: Boolean = false,
lockUnspents: Boolean = false,
reverseChangeKey: Boolean = true,
feeRate: Option[Bitcoins] = None,
subtractFeeFromOutputs: Option[Vector[Int]])
sealed abstract class FeeEstimationMode
object FeeEstimationMode {
case object Unset extends FeeEstimationMode {
override def toString: String = "UNSET"
}
case object Ecnomical extends FeeEstimationMode {
override def toString: String = "ECONOMICAL"
}
case object Conservative extends FeeEstimationMode {
override def toString: String = "CONSERVATIVE"
}
}
sealed abstract class SetBanCommand
object SetBanCommand {
case object Add extends SetBanCommand {
override def toString: String = "add"
}
case object Remove extends SetBanCommand {
override def toString: String = "remove"
}
}
implicit val fundRawTransactionOptionsWrites: Writes[
FundRawTransactionOptions] = Json.writes[FundRawTransactionOptions]
case class SignRawTransactionOutputParameter(
txid: DoubleSha256DigestBE,
vout: Int,
scriptPubKey: ScriptPubKey,
redeemScript: Option[ScriptPubKey] = None,
witnessScript: Option[WitnessScriptPubKey] = None,
amount: Option[Bitcoins] = None)
implicit val signRawTransactionOutputParameterWrites: Writes[
SignRawTransactionOutputParameter] =
Json.writes[SignRawTransactionOutputParameter]
object SignRawTransactionOutputParameter {
def fromTransactionInput(
transactionInput: TransactionInput,
scriptPubKey: ScriptPubKey,
redeemScript: Option[ScriptPubKey] = None,
witnessScript: Option[WitnessScriptPubKey] = None,
amount: Option[Bitcoins] = None): SignRawTransactionOutputParameter = {
SignRawTransactionOutputParameter(
txid = transactionInput.previousOutput.txIdBE,
vout = transactionInput.previousOutput.vout.toInt,
scriptPubKey = scriptPubKey,
redeemScript = redeemScript,
witnessScript = witnessScript,
amount = amount
)
}
}
case class ImportMultiRequest(
scriptPubKey: ImportMultiAddress,
timestamp: UInt32,
redeemscript: Option[ScriptPubKey] = None,
pubkeys: Option[Vector[ScriptPubKey]] = None,
keys: Option[Vector[ECPrivateKeyBytes]] = None,
internal: Option[Boolean] = None,
watchonly: Option[Boolean] = None,
label: Option[String] = None)
case class ImportMultiAddress(address: BitcoinAddress)
case class LockUnspentOutputParameter(txid: DoubleSha256DigestBE, vout: Int) {
lazy val outPoint: TransactionOutPoint =
TransactionOutPoint(txid, UInt32(vout))
lazy val toJson: Value = {
mutable.LinkedHashMap(
"txid" -> Str(txid.hex),
"vout" -> Num(vout)
)
}
}
implicit val lockUnspentParameterWrites: Writes[LockUnspentOutputParameter] =
Json.writes[LockUnspentOutputParameter]
object LockUnspentOutputParameter {
def fromOutPoint(
outPoint: TransactionOutPoint): LockUnspentOutputParameter = {
LockUnspentOutputParameter(outPoint.txIdBE, outPoint.vout.toInt)
}
def fromJsonString(str: String): LockUnspentOutputParameter = {
val json = ujson.read(str)
fromJson(json)
}
def fromJson(json: Value): LockUnspentOutputParameter = {
val obj = json.obj
val txId = DoubleSha256DigestBE(obj("txid").str)
val vout = obj("vout").num.toInt
LockUnspentOutputParameter(txId, vout)
}
}
sealed trait AddNodeArgument
object AddNodeArgument {
case object Add extends AddNodeArgument {
override def toString: String = "add"
}
case object Remove extends AddNodeArgument {
override def toString: String = "remove"
}
case object OneTry extends AddNodeArgument {
override def toString: String = "onetry"
}
}
sealed trait WalletFlag
object WalletFlag {
case object AvoidReuse extends WalletFlag {
override def toString: String = "avoid_reuse"
}
}
sealed trait AddressType
object AddressType {
case object Legacy extends AddressType {
override def toString: String = "legacy"
}
case object P2SHSegwit extends AddressType {
override def toString: String = "p2sh-segwit"
}
case object Bech32 extends AddressType {
override def toString: String = "bech32"
}
}
sealed trait LabelPurpose
object LabelPurpose {
case object Send extends LabelPurpose {
override def toString: String = "send"
}
case object Receive extends LabelPurpose {
override def toString: String = "receive"
}
}
case class BlockTemplateRequest(
mode: String,
capabilities: Vector[String],
rules: Vector[String])
implicit val blockTemplateRequest: Writes[BlockTemplateRequest] =
Json.writes[BlockTemplateRequest]
}
| bitcoin-s/bitcoin-s | app-commons/src/main/scala/org/bitcoins/commons/jsonmodels/bitcoind/RpcOpts.scala | Scala | mit | 6,159 |
package org.jetbrains.plugins.scala.annotator.intention.sbt
import com.intellij.openapi.command.WriteCommandAction
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.text.StringUtil
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.psi.{PsiElement, PsiFile}
import org.jetbrains.plugins.scala.annotator.intention.sbt.SbtDependenciesVisitor._
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScPatternDefinition
import org.jetbrains.plugins.scala.lang.psi.api.{ScalaElementVisitor, ScalaFile}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.types.api.ParameterizedType
import org.jetbrains.plugins.scala.lang.psi.types.result._
import org.jetbrains.plugins.scala.project.ProjectContext
import org.jetbrains.sbt.resolvers.ArtifactInfo
/**
* Created by afonichkin on 7/21/17.
*/
object AddSbtDependencyUtils {
val LIBRARY_DEPENDENCIES: String = "libraryDependencies"
val SETTINGS: String = "settings"
val SEQ: String = "Seq"
val SBT_PROJECT_TYPE = "_root_.sbt.Project"
val SBT_SEQ_TYPE = "_root_.scala.collection.Seq"
val SBT_SETTING_TYPE = "_root_.sbt.Def.Setting"
private val InfixOpsSet = Set(":=", "+=", "++=")
def getPossiblePlacesToAddFromProjectDefinition(proj: ScPatternDefinition): Seq[PsiElement] = {
var res: Seq[PsiElement] = List()
def action(psiElement: PsiElement): Unit = {
psiElement match {
case e: ScInfixExpr if e.lOp.getText == LIBRARY_DEPENDENCIES && isAddableLibraryDependencies(e) => res ++= Seq(e)
case call: ScMethodCall if call.deepestInvokedExpr.getText == SEQ => res ++= Seq(call)
case typedSeq: ScTypedStmt if typedSeq.isSequenceArg =>
typedSeq.expr match {
case call: ScMethodCall if call.deepestInvokedExpr.getText == SEQ => res ++= Seq(typedSeq)
case _ =>
}
case settings: ScMethodCall if isAddableSettings(settings) =>
settings.getEffectiveInvokedExpr match {
case expr: ScReferenceExpression if expr.refName == SETTINGS => res ++= Seq(settings)
case _ =>
}
case _ =>
}
}
processPatternDefinition(proj)(action)
res
}
def getTopLevelSbtProjects(psiSbtFile: ScalaFile): Seq[ScPatternDefinition] = {
var res: Seq[ScPatternDefinition] = List()
psiSbtFile.acceptChildren(new ScalaElementVisitor {
override def visitPatternDefinition(pat: ScPatternDefinition): Unit = {
if (pat.expr.isEmpty)
return
if (pat.expr.get.`type`().getOrAny.canonicalText != SBT_PROJECT_TYPE)
return
res = res ++ Seq(pat)
super.visitPatternDefinition(pat)
}
})
res
}
def getTopLevelLibraryDependencies(psiSbtFile: ScalaFile): Seq[ScInfixExpr] = {
var res: Seq[ScInfixExpr] = List()
psiSbtFile.acceptChildren(new ScalaElementVisitor {
override def visitInfixExpression(infix: ScInfixExpr): Unit = {
if (infix.lOp.getText == LIBRARY_DEPENDENCIES && infix.getParent.isInstanceOf[PsiFile]) {
res = res ++ Seq(infix)
}
}
})
res
}
def getTopLevelPlaceToAdd(psiFile: ScalaFile)(implicit project: Project): Option[DependencyPlaceInfo] = {
val line: Int = StringUtil.offsetToLineNumber(psiFile.getText, psiFile.getTextLength) + 1
getRelativePath(psiFile).map { relpath =>
DependencyPlaceInfo(relpath, psiFile.getTextLength, line, psiFile, Seq())
}
}
def addDependency(expr: PsiElement, info: ArtifactInfo)(implicit project: Project): Option[PsiElement] = {
expr match {
case e: ScInfixExpr if e.lOp.getText == LIBRARY_DEPENDENCIES => addDependencyToLibraryDependencies(e, info)
case call: ScMethodCall if call.deepestInvokedExpr.getText == SEQ => addDependencyToSeq(call, info)
case typedSeq: ScTypedStmt if typedSeq.isSequenceArg => addDependencyToTypedSeq(typedSeq, info)
case settings: ScMethodCall if isAddableSettings(settings) =>
settings.getEffectiveInvokedExpr match {
case expr: ScReferenceExpression if expr.refName == SETTINGS =>
Option(addDependencyToSettings(settings, info)(project))
case _ => None
}
case file: PsiFile =>
Option(addDependencyToFile(file, info)(project))
case _ => None
}
}
def addDependencyToLibraryDependencies(infix: ScInfixExpr, info: ArtifactInfo)(implicit project: Project): Option[PsiElement] = {
val psiFile = infix.getContainingFile
infix.operation.refName match {
case "+=" =>
val dependency: ScExpression = infix.rOp
val seqCall: ScMethodCall = generateSeqPsiMethodCall(info)(project)
doInSbtWriteCommandAction({
seqCall.args.addExpr(dependency.copy().asInstanceOf[ScExpression])
seqCall.args.addExpr(generateArtifactPsiExpression(info)(project))
infix.operation.replace(ScalaPsiElementFactory.createElementFromText("++=")(project))
dependency.replace(seqCall)
}, psiFile)(project)
Option(infix.rOp)
case "++=" =>
val dependencies: ScExpression = infix.rOp
dependencies match {
case call: ScMethodCall if call.deepestInvokedExpr.getText == SEQ=>
val addedExpr = generateArtifactPsiExpression(info)(project)
doInSbtWriteCommandAction(call.args.addExpr(addedExpr), psiFile)(project)
Option(addedExpr)
case _ => None
}
case _ => None
}
}
def addDependencyToSeq(seqCall: ScMethodCall, info: ArtifactInfo)(implicit project: Project): Option[PsiElement] = {
def isValid(expr: ScInfixExpr) = InfixOpsSet.contains(expr.operation.refName)
val parentDef = Option(PsiTreeUtil.getParentOfType(seqCall, classOf[ScInfixExpr]))
val addedExpr = parentDef match {
case Some(expr) if isValid(expr) && expr.lOp.textMatches(LIBRARY_DEPENDENCIES) =>
generateArtifactPsiExpression(info)
case _ => generateLibraryDependency(info)
}
doInSbtWriteCommandAction(seqCall.args.addExpr(addedExpr), seqCall.getContainingFile)
Some(addedExpr)
}
def addDependencyToTypedSeq(typedSeq: ScTypedStmt, info: ArtifactInfo)(implicit project: Project): Option[PsiElement] =
typedSeq.expr match {
case seqCall: ScMethodCall =>
val addedExpr = generateLibraryDependency(info)(project)
doInSbtWriteCommandAction({
seqCall.args.addExpr(addedExpr)
}, seqCall.getContainingFile)
Option(addedExpr)
case _ => None
}
def addDependencyToFile(file: PsiFile, info: ArtifactInfo)(implicit project: Project): PsiElement = {
var addedExpr: PsiElement = null
doInSbtWriteCommandAction({
file.addAfter(generateNewLine(project), file.getLastChild)
addedExpr = file.addAfter(generateLibraryDependency(info), file.getLastChild)
}, file)
addedExpr
}
def addDependencyToSettings(settings: ScMethodCall, info: ArtifactInfo)(implicit project: Project): PsiElement = {
val addedExpr = generateLibraryDependency(info)(project)
doInSbtWriteCommandAction({
settings.args.addExpr(addedExpr)
}, settings.getContainingFile)(project)
addedExpr
}
def isAddableSettings(settings: ScMethodCall): Boolean = {
val args = settings.args.exprsArray
if (args.length == 1) {
args(0) match {
case typedStmt: ScTypedStmt if typedStmt.isSequenceArg =>
typedStmt.expr match {
case _: ScMethodCall => false
case _: ScReferenceExpression => false
case _ => true
}
case _ => true
}
} else true
}
def isAddableLibraryDependencies(libDeps: ScInfixExpr): Boolean =
libDeps.operation.refName match {
case "+=" => true
case "++=" => libDeps.rOp match {
// In this case we return false to not repeat it several times
case call: ScMethodCall if call.deepestInvokedExpr.getText == SEQ => false
case _ => true
}
case _ => false
}
private def doInSbtWriteCommandAction[T](f: => T, psiSbtFile: PsiFile)(implicit project: ProjectContext): T =
WriteCommandAction
.writeCommandAction(psiSbtFile)
.compute(() => f)
private def generateSeqPsiMethodCall(info: ArtifactInfo)(implicit ctx: ProjectContext): ScMethodCall =
ScalaPsiElementFactory.createElementFromText(s"$SEQ()").asInstanceOf[ScMethodCall]
private def generateLibraryDependency(info: ArtifactInfo)(implicit ctx: ProjectContext): ScInfixExpr =
ScalaPsiElementFactory.createElementFromText(s"$LIBRARY_DEPENDENCIES += ${generateArtifactText(info)}").asInstanceOf[ScInfixExpr]
private def generateArtifactPsiExpression(info: ArtifactInfo)(implicit ctx: ProjectContext): ScExpression =
ScalaPsiElementFactory.createElementFromText(generateArtifactText(info))(ctx).asInstanceOf[ScExpression]
private def generateNewLine(implicit ctx: ProjectContext): PsiElement = ScalaPsiElementFactory.createElementFromText("\\n")
private def generateArtifactText(info: ArtifactInfo): String = {
if (info.artifactId.matches("^.+_\\\\d+\\\\.\\\\d+$"))
s""""${info.groupId}" %% "${info.artifactId.replaceAll("_\\\\d+\\\\.\\\\d+$", "")}" % "${info.version}""""
else
s""""${info.groupId}" % "${info.artifactId}" % "${info.version}""""
}
def getRelativePath(elem: PsiElement)(implicit project: ProjectContext): Option[String] = {
for {
path <- Option(elem.getContainingFile.getVirtualFile.getCanonicalPath)
if path.startsWith(project.getBasePath)
} yield
path.substring(project.getBasePath.length + 1)
}
def toDependencyPlaceInfo(elem: PsiElement, affectedProjects: Seq[String])(implicit ctx: ProjectContext): Option[DependencyPlaceInfo] = {
val offset =
elem match {
case call: ScMethodCall =>
call.getEffectiveInvokedExpr match {
case expr: ScReferenceExpression => expr.nameId.getTextOffset
case _ => elem.getTextOffset
}
case _ => elem.getTextOffset
}
val line: Int = StringUtil.offsetToLineNumber(elem.getContainingFile.getText, offset) + 1
getRelativePath(elem).map { relpath =>
DependencyPlaceInfo(relpath, offset, line, elem, affectedProjects)
}
}
}
| triplequote/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/annotator/intention/sbt/AddSbtDependencyUtils.scala | Scala | apache-2.0 | 10,385 |
package jp.ne.opt.chronoscala
import java.time.{LocalDate, Period}
import jp.ne.opt.chronoscala.Imports._
/**
* Represents an immutable date interval
*/
case class DateInterval(startDate: LocalDate, endDate: LocalDate, step: Period)
extends Seq[LocalDate] {
def apply(idx: Int): LocalDate =
if (0 <= idx && idx < length) {
iterator.drop(idx).next()
} else {
throw new IndexOutOfBoundsException(idx.toString)
}
def iterator: Iterator[LocalDate] = Iterator.iterate(startDate)(_ + step).takeWhile(_ <= endDate)
def length: Int = iterator.length
def by(step: Period): DateInterval = this.copy(step = step)
}
| opt-tech/chronoscala | shared/src/main/scala/jp/ne/opt/chronoscala/DateInterval.scala | Scala | mit | 645 |
package com.robocubs4205.cubscout
import play.api.libs.json._
import play.api.libs.functional.syntax._
import play.api.http.Status._
sealed trait JsonWrapper[T]
final case class ResponseCtx(context: Option[String], id: Long)
sealed trait JsonResponseWrapper[T] extends JsonWrapper[T] {
def context: Option[String]
def responseId: Long
}
object JsonResponseWrapper {
def apply[T](data: T)(implicit ctx: ResponseCtx, ew: EtagWriter[T]) = JsonSingleResponseWrapper(data, ctx.context, ctx.id)
def apply[T](items: Iterable[T])(implicit ctx: ResponseCtx, ew: EtagWriter[T]) =
JsonArrayResponseWrapper(items, ctx.context, ctx.id)
implicit def JsonResponseWrapperWrites[T](implicit wt: Writes[T]): Writes[JsonResponseWrapper[T]] = {
case e: JsonErrorResponseWrapper => JsonErrorResponseWrapper.jsonErrorResponseWrapperWrites.writes(e)
case r: JsonSingleResponseWrapper[T] => JsonSingleResponseWrapper.jsonSingleResponseWrapperWrites[T].writes(r)
case r: JsonArrayResponseWrapper[T] => JsonArrayResponseWrapper.jsonArrayResponseWrapperWrites[T].writes(r)
}
}
sealed trait JsonRequestWrapper[T] extends JsonWrapper[T]
object JsonRequestWrapper {
implicit def JsonRequestWrapperReads[T](implicit rt: Reads[T]): Reads[JsonRequestWrapper[T]] =
JsonSingleRequestWrapper.jsonSingleRequestWrapperReads[T].map[JsonRequestWrapper[T]](x => x) orElse
JsonArrayRequestWrapper.jsonArrayRequestWrapperReads[T].map[JsonRequestWrapper[T]](x => x)
}
sealed trait JsonSingleWrapper[T] extends JsonWrapper[T] {
def data: T
}
sealed trait JsonArrayWrapper[T] extends JsonWrapper[T] {
def items: Iterable[T]
}
final case class JsonSingleResponseWrapper[T](data: T, context: Option[String], responseId: Long)
(implicit val ew: EtagWriter[T])
extends JsonResponseWrapper[T] with JsonSingleWrapper[T]
object JsonSingleResponseWrapper {
def jsonSingleResponseWrapperWrites[T](implicit wt: Writes[T]): Writes[JsonSingleResponseWrapper[T]] = (
(JsPath \ "data").write[T] and
(JsPath \ "context").writeNullable[String] and
(JsPath \ "responseId").write[Long] and
(JsPath \ "data" \ "etag").write[String]
) (v => (v.data, v.context, v.responseId, v.ew.etag(v.data)))
}
final case class JsonArrayResponseWrapper[T](items: Iterable[T], context: Option[String],
responseId: Long)(implicit val ew: EtagWriter[T])
extends JsonResponseWrapper[T] with JsonArrayWrapper[T]
object JsonArrayResponseWrapper {
private case class JsonDataWithEtagWrapper[T](t: T, etag: String)
implicit private def JsonDataWithEtagWrapperWrites[T](implicit wt: Writes[T]): Writes[JsonDataWithEtagWrapper[T]] = (
JsPath.write[T] and
(JsPath \ "etag").write[String]
) (unlift(JsonDataWithEtagWrapper.unapply[T]))
def jsonArrayResponseWrapperWrites[T](implicit wt: Writes[T]): Writes[JsonArrayResponseWrapper[T]] = (
(JsPath \ "data" \ "items").write[Iterable[JsonDataWithEtagWrapper[T]]] and
(JsPath \ "context").writeNullable[String] and
(JsPath \ "responseId").write[Long]
) (v => (v.items.map(i => JsonDataWithEtagWrapper(i, v.ew.etag(i))), v.context, v.responseId))
}
final case class JsonSingleRequestWrapper[T](data: T)
extends JsonRequestWrapper[T] with JsonSingleWrapper[T]
object JsonSingleRequestWrapper {
implicit def jsonSingleRequestWrapperReads[T](implicit rt: Reads[T]): Reads[JsonSingleRequestWrapper[T]] =
(JsPath \ "data").read[T].map(JsonSingleRequestWrapper.apply[T])
}
final case class JsonArrayRequestWrapper[T](items: Iterable[T])
extends JsonRequestWrapper[T] with JsonArrayWrapper[T]
object JsonArrayRequestWrapper {
implicit def jsonArrayRequestWrapperReads[T](implicit rt: Reads[T]): Reads[JsonArrayRequestWrapper[T]] =
(JsPath \ "data" \ "items").read[Iterable[T]].map(JsonArrayRequestWrapper.apply[T])
}
final case class JsonErrorResponseWrapper(errors: Iterable[JsonErrorWrapper], code: Long, message: String,
context: Option[String], responseId: Long)
extends JsonResponseWrapper[Nothing]
trait JsonErrorWrapper {
def json: JsValue
}
object JsonErrorResponseWrapper {
def apply(errors: Iterable[JsonErrorWrapper], code: Long, message: String)
(implicit ctx: ResponseCtx): JsonErrorResponseWrapper =
JsonErrorResponseWrapper(errors, code, message, ctx.context, ctx.id)
def apply(error: JsonErrorWrapper, code: Long, message: String)
(implicit ctx: ResponseCtx): JsonErrorResponseWrapper = apply(Seq(error), code, message)
def apply(code: Long, message: String)(implicit ctx: ResponseCtx): JsonErrorResponseWrapper =
apply(Seq(), code, message)
def apply(errors: JsError)(implicit ctx: ResponseCtx): JsonErrorResponseWrapper = apply(
errors.errors.flatMap(e => e._2.map(f => (e._1, f))).map(e => ParseErrorWrapper(e._1, e._2)),
UNPROCESSABLE_ENTITY, "There were parse errors when processing the request")
def apply(code:Long,exception:Exception)(implicit ctx: ResponseCtx):JsonErrorResponseWrapper =
apply(code,exception.getMessage)
final case class ParseErrorWrapper(path: JsPath, error: JsonValidationError) extends JsonErrorWrapper {
override def json: JsValue = w.writes(this)
val w: Writes[ParseErrorWrapper] = (
(JsPath \ "path").write[String] and
(JsPath \ "message").write[String] and
(JsPath \ "reason").write[String]
) (e => (e.path.toJsonString, e.error.message, "parse error"))
}
implicit def jsonErrorResponseWrapperWrites[T]: Writes[JsonErrorResponseWrapper] = (
(JsPath \ "error" \ "errors").writeNullable[Iterable[JsValue]] and
(JsPath \ "error" \ "code").write[Long] and
(JsPath \ "error" \ "message").write[String] and
(JsPath \ "context").writeNullable[String] and
(JsPath \ "responseId").write[Long]
) { e =>
val es = e.errors.map(_.json).toSeq
(if (es.isEmpty) None else Some(es), e.code, e.message, e.context, e.responseId)
}
}
| robocubs4205/cubscout-server | common/src/main/scala/com/robocubs4205/cubscout/JsonWrappers.scala | Scala | mit | 6,057 |
package scala.pickling.non.public.joint
import org.scalatest.FunSuite
import scala.pickling._, scala.pickling.Defaults._, json._
class Person(private val name: String, age: Int, val hobby: Hobby) {
// NOTE: be careful not to reference age anywhere, so that it's elided by the "constructors" phase
override def toString = s"Person(name = $name, hobby = $hobby)"
}
class Hobby(var name: String, private var notes: String, private val attitude: String) {
override def toString = s"Hobby(name = $name, notes = $notes, attitude = $attitude)"
}
class NonPublicJointTest extends FunSuite {
test("main") {
val e = new Person("Eugene", 25, new Hobby("hacking", "mostly Scala", "loving it"))
val pickle = e.pickle
assert(pickle.toString === """
|JSONPickle({
| "$type": "scala.pickling.non.public.joint.Person",
| "name": "Eugene",
| "hobby": {
| "name": "hacking",
| "notes": "mostly Scala",
| "attitude": "loving it"
| }
|})
""".stripMargin.trim)
assert(pickle.unpickle[Person].toString === e.toString)
}
}
| phaller/pickling | core/src/test/scala/pickling/run/non-public-joint.scala | Scala | bsd-3-clause | 1,099 |
package org.scalaide.core.internal.builder.zinc
import java.io.File
import org.eclipse.core.runtime.SubMonitor
import org.eclipse.core.resources.IncrementalProjectBuilder
import org.eclipse.core.resources.IProject
import org.scalaide.util.eclipse.FileUtils
import scala.tools.eclipse.contribution.weaving.jdt.jcompiler.BuildManagerStore
import org.eclipse.jdt.internal.core.JavaModelManager
import org.eclipse.jdt.internal.core.builder.JavaBuilder
import org.eclipse.jdt.internal.core.builder.State
import org.eclipse.core.resources.IncrementalProjectBuilder.INCREMENTAL_BUILD
import org.eclipse.core.resources.ResourcesPlugin
import org.eclipse.core.resources.IResource
import xsbti.compile.JavaCompiler
import xsbti.compile.Output
import xsbti.Logger
import org.scalaide.core.internal.builder.JDTBuilderFacade
import org.scalaide.core.IScalaPlugin
/** Eclipse Java compiler interface, used by the SBT builder.
* This class forwards to the internal Eclipse Java compiler, using
* reflection to circumvent private/protected modifiers.
*/
class JavaEclipseCompiler(p: IProject, monitor: SubMonitor) extends JavaCompiler with JDTBuilderFacade {
override def project = p
def compile(sources: Array[File], classpath: Array[File], output: Output, options: Array[String], log: Logger): Unit = {
val scalaProject = IScalaPlugin().getScalaProject(project)
val allSourceFiles = scalaProject.allSourceFiles()
val depends = scalaProject.directDependencies
if (allSourceFiles.exists(FileUtils.hasBuildErrors(_)))
depends.toArray
else {
ensureProject
// refresh output directories, since SBT removes classfiles that the Eclipse
// Java compiler expects to find
for (folder <- scalaProject.outputFolders) {
val container = ResourcesPlugin.getWorkspace().getRoot().getFolder(folder)
container.refreshLocal(IResource.DEPTH_INFINITE, null)
}
BuildManagerStore.INSTANCE.setJavaSourceFilesToCompile(sources, project)
try
scalaJavaBuilder.build(INCREMENTAL_BUILD, new java.util.HashMap(), monitor)
finally
BuildManagerStore.INSTANCE.setJavaSourceFilesToCompile(null, project)
refresh()
}
}
}
| Kwestor/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/core/internal/builder/zinc/JavaEclipseCompiler.scala | Scala | bsd-3-clause | 2,207 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import java.text.SimpleDateFormat
import java.util.Date
import org.apache.hadoop.conf.{Configurable, Configuration}
import org.apache.hadoop.io.Writable
import org.apache.hadoop.mapreduce._
import org.apache.spark.{InterruptibleIterator, Logging, Partition, SerializableWritable, SparkContext, TaskContext}
private[spark]
class NewHadoopPartition(rddId: Int, val index: Int, @transient rawSplit: InputSplit with Writable)
extends Partition {
val serializableHadoopSplit = new SerializableWritable(rawSplit)
override def hashCode(): Int = 41 * (41 + rddId) + index
}
/**
* An RDD that provides core functionality for reading data stored in Hadoop (e.g., files in HDFS,
* sources in HBase, or S3), using the new MapReduce API (`org.apache.hadoop.mapreduce`).
*
* @param sc The SparkContext to associate the RDD with.
* @param inputFormatClass Storage format of the data to be read.
* @param keyClass Class of the key associated with the inputFormatClass.
* @param valueClass Class of the value associated with the inputFormatClass.
* @param conf The Hadoop configuration.
*/
class NewHadoopRDD[K, V](
sc : SparkContext,
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
@transient conf: Configuration)
extends RDD[(K, V)](sc, Nil)
with SparkHadoopMapReduceUtil
with Logging {
// A Hadoop Configuration can be about 10 KB, which is pretty big, so broadcast it
private val confBroadcast = sc.broadcast(new SerializableWritable(conf))
// private val serializableConf = new SerializableWritable(conf)
private val jobtrackerId: String = {
val formatter = new SimpleDateFormat("yyyyMMddHHmm")
formatter.format(new Date())
}
@transient private val jobId = new JobID(jobtrackerId, id)
override def getPartitions: Array[Partition] = {
val inputFormat = inputFormatClass.newInstance
if (inputFormat.isInstanceOf[Configurable]) {
inputFormat.asInstanceOf[Configurable].setConf(conf)
}
val jobContext = newJobContext(conf, jobId)
val rawSplits = inputFormat.getSplits(jobContext).toArray
val result = new Array[Partition](rawSplits.size)
for (i <- 0 until rawSplits.size) {
result(i) = new NewHadoopPartition(id, i, rawSplits(i).asInstanceOf[InputSplit with Writable])
}
result
}
override def compute(theSplit: Partition, context: TaskContext) = {
val iter = new Iterator[(K, V)] {
val split = theSplit.asInstanceOf[NewHadoopPartition]
logInfo("Input split: " + split.serializableHadoopSplit)
val conf = confBroadcast.value.value
val attemptId = newTaskAttemptID(jobtrackerId, id, isMap = true, split.index, 0)
val hadoopAttemptContext = newTaskAttemptContext(conf, attemptId)
val format = inputFormatClass.newInstance
if (format.isInstanceOf[Configurable]) {
format.asInstanceOf[Configurable].setConf(conf)
}
val reader = format.createRecordReader(
split.serializableHadoopSplit.value, hadoopAttemptContext)
reader.initialize(split.serializableHadoopSplit.value, hadoopAttemptContext)
// Register an on-task-completion callback to close the input stream.
context.addOnCompleteCallback(() => close())
var havePair = false
var finished = false
override def hasNext: Boolean = {
if (!finished && !havePair) {
finished = !reader.nextKeyValue
havePair = !finished
}
!finished
}
override def next(): (K, V) = {
if (!hasNext) {
throw new java.util.NoSuchElementException("End of stream")
}
havePair = false
(reader.getCurrentKey, reader.getCurrentValue)
}
private def close() {
try {
reader.close()
} catch {
case e: Exception => logWarning("Exception in RecordReader.close()", e)
}
}
}
new InterruptibleIterator(context, iter)
}
override def getPreferredLocations(split: Partition): Seq[String] = {
val theSplit = split.asInstanceOf[NewHadoopPartition]
theSplit.serializableHadoopSplit.value.getLocations.filter(_ != "localhost")
}
def getConf: Configuration = confBroadcast.value.value
}
| dotunolafunmiloye/spark | core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala | Scala | apache-2.0 | 5,070 |
// Copyright 2017 EPFL DATA Lab (data.epfl.ch)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package squid
package quasi
import utils._
import squid.lang.Base
/* TODO: make it work with intermediate bases (eg: `ViaASTQuasiConfig`) */
abstract class QuasiTypeEmbedder[C <: scala.reflect.macros.blackbox.Context, B <: Base](val c: C, val base: B, debug: String => Unit) {
import c.universe._
val helper: meta.UniverseHelpers[c.universe.type]
import helper._
val baseTree: Tree
class Impl extends ModularEmbedding[c.universe.type, B](c.universe, base, debug) {
def insertTypeEvidence(ev: base.TypeRep): base.TypeRep = ev
/* We override this method to make sure to try and find an implicit for a given abstract type, before decomposing it;
* this is to allow the common metpaprogramming pattern where one carries bundles of abstract types and their
* implicit representations with one's code values. */
override def liftTypeUncached(tp: Type, wide: Boolean): base.TypeRep = tp match {
case TypeRef(prefix, sym, targs)
if prefix != NoPrefix
&& !sym.isClass
&& (ExtractedType unapply tp isEmpty)
=>
debug(s"$sym is not a class, so we should look for an implicit in scope first")
lookForTypeImplicit(tp) getOrElse super.liftTypeUncached(tp, wide)
case _ => super.liftTypeUncached(tp, wide)
}
val QTSym = symbolOf[QuasiBase#CodeType[_]]
def lookForTypeImplicit(tp: Type): Option[base.TypeRep] = {
val irType = c.typecheck(tq"$baseTree.CodeType[$tp]", c.TYPEmode) // TODO use internal.typeRef
debug(s"Searching for implicit of type: $irType")
c.inferImplicitValue(irType.tpe, withMacrosDisabled = true) match {
case EmptyTree =>
case impt =>
debug(s"Found: "+showCode(impt))
return Some(q"$impt.rep".asInstanceOf[base.TypeRep] // FIXME
|> insertTypeEvidence)
}
val vals = c.asInstanceOf[reflect.macros.runtime.Context].callsiteTyper.context.enclosingContextChain.flatMap {
_.scope collect {
case sym if sym.isVal
&& sym.isInitialized // If we look into the type of value being constructed (eg `val x = exp"42"`),
// it will trigger a 'recursive value needs type' error
//&& sym.name.toString == tp.typeSymbol.name.toString // used to compare names directly, but sometimes failed because they came from different cakes...
=>
sym -> sym.tpe.dealias // dealiasing so for example base.Predef.IR[_,_] ~> base.IR[_,_]
}
}.asInstanceOf[List[(TermSymbol, Type)]]
//val PredefQTSym = symbolOf[QuasiBase#Predef[_ <: QuasiConfig]#CodeType[_]]
//val PredefQTSym = typeOf[QuasiBase#Predef[_ <: QuasiConfig]#CodeType[_]].typeSymbol
// ^ For some reson, these always return a symbol s where s.fullName == "squid.quasi.QuasiBase.CodeType"
vals foreach {
case (sym, TypeRef(tpbase, QTSym /*| PredefQTSym*/, tp0::Nil))
if tpbase =:= baseTree.tpe
&& tp0 <:< tp && tp <:< tp0 // NOTE: for some godforsaken reason, sometimes in Scala this is not the same as `tp0 =:= tp`
// For example in {{{ (typs:List[CodeType[_]]) map { case typ: CodeType[t] => dbg.implicitType[t] } }}}
=>
debug("FOUND QUOTED TYPE "+sym)
return Some(q"$sym.rep".asInstanceOf[base.TypeRep] // FIXME
|> insertTypeEvidence)
//case (sym, TypeRef(tpbase, QTSym, tp::Nil)) =>
// debug(s"Note: $tpbase =/= ${baseTree.tpe}")
//case (sym, TypeRef(tpbase, qtsym, tp::Nil)) =>
// debug(s"$qtsym ${qtsym.fullName} ${PredefQTSym.fullName} ${QTSym.fullName} ${qtsym == PredefQTSym}")
case _ =>
}
debug(s"No implicit `$irType` found")
None
}
/* This method is overridden to handle extracted types; for the record, this is an example log of how they get here:
> Matching type squid.FoldTupleVarOptim.ta.Typ
> !not a class: squid.FoldTupleVarOptim.ta.Typ
> (squid.FoldTupleVarOptim.ta.Typ,class scala.reflect.internal.Types$AbstractNoArgsTypeRef,squid.FoldTupleVarOptim.ta.Typ)
> (squid.FoldTupleVarOptim.ta.type,type Typ,List())
> ( <: squid.quasi.QuasiBase.<extruded type>,List())
> Matching type squid.FoldTupleVarOptim.ta.Typ
> !not a class: squid.FoldTupleVarOptim.ta.Typ
> (squid.FoldTupleVarOptim.ta.Typ,class scala.reflect.internal.Types$AbstractNoArgsTypeRef,squid.FoldTupleVarOptim.ta.Typ)
> (squid.FoldTupleVarOptim.ta.type,type Typ,List())
> ( <: squid.quasi.QuasiBase.<extruded type>,List())
> Unknown type, falling back: squid.FoldTupleVarOptim.ta.Typ
> Lifting unknown type squid.FoldTupleVarOptim.ta.Typ (squid.FoldTupleVarOptim.ta.Typ)
> Searching for an `FoldTupleVarOptim.this.base.CodeType[squid.FoldTupleVarOptim.ta.Typ]` implicit
> FOUND QUOTED TYPE value ta
*/
override def unknownTypefallBack(tp: Type): base.TypeRep = {
debug(s"Lifting unknown type $tp (${tp.widen.dealias})")
if (tp.widen =:= ExtrudedType || tp.widen.contains(ExtrudedType.typeSymbol)) { // was: contains(symbolOf[QuasiBase.`<extruded type>`])
debug(s"Detected widened type hole: ${tp.widen}")
val purged = tp.toString.replaceAll(ExtrudedType.toString, "<extruded type>")
throw EmbeddingException(s"Precise info for extracted type was lost, " +
s"possibly because it was extruded from its defining scope " +
s"or because the least upper bound was obtained from two extracted types, in: $purged")
}
lookForTypeImplicit(tp) foreach (return _)
if (tp <:< ExtrudedType && !(tp <:< Null) // Note that: tp <:< Nothing ==> tp <:< Null so no need for the test
|| ExtractedType.unapply(tp).nonEmpty) {
throw EmbeddingException(s"Could not find type evidence associated with extracted type `$tp`.")
} else {
val tagType = c.typecheck(tq"_root_.scala.reflect.runtime.universe.TypeTag[$tp]", c.TYPEmode)
c.inferImplicitValue(tagType.tpe) match {
case EmptyTree =>
if (tp.typeSymbol == symbolOf[Array[_]]) {
// Note: srum.runtimeClass(sru.typeOf[Array[Int]]).toString == [I
throw EmbeddingException.Unsupported("Arrays of unresolved type.")
}
throw EmbeddingException(s"Could not find type representation for: $tp\\n\\t" +
s"consider providing a scala.reflect.runtime.universe.TypeTag implicit to embed it as uninterpreted.")
case impt =>
// We used to do the following, which helped to let-bind (using the caching mechanism of `ModularEmbedding`,
// since this would call the base's `uninterpretedType`). However, now `uninterpretedType` is overridden and
// inserted types are aggregated and named, so that caching can index on them, so this is no more necessary:
/*
// Note: not using `impt`! cf. below... (we just ensure it exists to give the better error message above)
super.unknownTypefallBack(tp) // unnecessary, as it just generates a call to uninterpretedType without the implicit resolved
// ^ actually useful to cache the tag!
*/
q"$baseTree.uninterpretedType($impt)".asInstanceOf[base.TypeRep] |> insertTypeEvidence
// Older:
/* Does not help, as it bypasses the underlying base:
* although the tree creation is cached, it will NOT get let-bound, and will be duplicated all over the place */
//return typeCache.getOrElseUpdate(tp, q"$baseTree.uninterpretedType[$tp]($impt)".asInstanceOf[base.TypeRep])
}
}
}
}
}
| epfldata/squid | core/src/main/scala/squid/quasi/QuasiTypeEmbedder.scala | Scala | apache-2.0 | 8,541 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.docs
import java.util.concurrent.CompletionStage
import com.google.inject.AbstractModule
import com.lightbend.lagom.javadsl.api.transport.RequestHeader
import com.lightbend.lagom.javadsl.api.transport.ResponseHeader
import com.lightbend.lagom.javadsl.api.Service
import com.lightbend.lagom.javadsl.api.ServiceCall
import com.lightbend.lagom.javadsl.api.ServiceLocator
import com.lightbend.lagom.javadsl.server.HeaderServiceCall
import com.lightbend.lagom.javadsl.server.ServiceGuiceSupport
import org.scalatest.Matchers
import org.scalatest.WordSpecLike
import play.api.Application
import play.api.Configuration
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.inject.bind
import play.core.server.Server
import scala.concurrent.Future
import scala.concurrent.Promise
import scala.concurrent.ExecutionContext.Implicits.global
import scala.reflect.ClassTag
import scala.compat.java8.FutureConverters._
import com.lightbend.lagom.internal.testkit.TestServiceLocator
import docs.services.test.ServiceTestModule
import com.lightbend.lagom.internal.testkit.TestServiceLocatorPort
import com.lightbend.lagom.javadsl.persistence.jdbc.JdbcPersistenceModule
import com.typesafe.config.ConfigFactory
import play.api.db.DBModule
import play.api.db.HikariCPModule
trait ServiceSupport extends WordSpecLike with Matchers {
def withServer[T](
applicationBuilder: GuiceApplicationBuilder = new GuiceApplicationBuilder()
)(block: Application => T): T = {
val port = Promise[Int]()
val testServiceLocatorPort = TestServiceLocatorPort(port.future)
val application =
applicationBuilder
.configure("lagom.cluster.bootstrap.enabled" -> "off", "lagom.cluster.exit-jvm-when-system-terminated" -> "off")
.bindings(bind[TestServiceLocatorPort].to(testServiceLocatorPort))
.overrides(bind[ServiceLocator].to(classOf[TestServiceLocator]))
.disable(classOf[ServiceTestModule]) // enabled in application.conf
.build()
Server.withApplication(application) { assignedPort =>
port.success(assignedPort.value)
block(application)
}
}
trait WithService[S] {
def apply[T](block: Application => S => T): T
}
def withService[S <: Service: ClassTag, I <: S: ClassTag](
applicationBuilder: GuiceApplicationBuilder = new GuiceApplicationBuilder()
): WithService[S] =
withServiceImpl(applicationBuilder.bindings(new AbstractModule with ServiceGuiceSupport {
override def configure(): Unit = {
val serviceClass: Class[S] = implicitly[ClassTag[S]].runtimeClass.asInstanceOf[Class[S]]
val implClass: Class[I] = implicitly[ClassTag[I]].runtimeClass.asInstanceOf[Class[I]]
bindService(serviceClass, implClass)
}
}))
def withServiceInstance[S <: Service: ClassTag](
impl: S,
applicationBuilder: GuiceApplicationBuilder = new GuiceApplicationBuilder()
): WithService[S] =
withServiceImpl(
applicationBuilder
.bindings(new AbstractModule with ServiceGuiceSupport {
override def configure(): Unit = {
bindService(implicitly[ClassTag[S]].runtimeClass.asInstanceOf[Class[S]], impl)
}
})
)
private def withServiceImpl[S: ClassTag](
applicationBuilder: GuiceApplicationBuilder = new GuiceApplicationBuilder()
): WithService[S] = new WithService[S] {
def apply[T](block: Application => S => T) = {
withServer(
applicationBuilder
.disable(classOf[JdbcPersistenceModule], classOf[HikariCPModule], classOf[DBModule])
) { app =>
block(app)(app.injector.instanceOf[S])
}
}
}
def serviceCall[Request, Response](function: Request => Future[Response]): ServiceCall[Request, Response] = {
new ServiceCall[Request, Response] {
override def invoke(request: Request): CompletionStage[Response] = function(request).toJava
}
}
def serviceCall[Request, Response](
function: (RequestHeader, Request) => Future[(ResponseHeader, Response)]
): ServiceCall[Request, Response] = {
new HeaderServiceCall[Request, Response] {
override def invokeWithHeaders(
header: RequestHeader,
request: Request
): CompletionStage[akka.japi.Pair[ResponseHeader, Response]] =
function(header, request).map(r => akka.japi.Pair(r._1, r._2)).toJava
}
}
}
| ignasi35/lagom | docs/src/test/scala/com/lightbend/lagom/docs/ServiceSupport.scala | Scala | apache-2.0 | 4,487 |
package com.greencatsoft.angularjs.core
import com.greencatsoft.angularjs.injectable
import scala.scalajs.js
/**
* Use the \\$locationProvider to configure how the application deep linking paths are stored.
*
* @see https://docs.angularjs.org/api/ng/provider/\\$locationProvider
*/
@js.native
@injectable("$locationProvider")
trait LocationProvider extends js.Object {
/**
* @param prefix Prefix for hash part (containing path and search)
* @return current value if used as getter or itself (chaining) if used as setter
*/
def hashPrefix(prefix: String = null): String = js.native
/**
* @param mode If boolean, sets html5Mode.enabled to value. If object, sets enabled, requireBase and rewriteLinks to
* respective values. Supported properties:
* @return html5Mode object if used as getter or itself (chaining) if used as setter
*/
def html5Mode(mode: Boolean): Html5ModeInfo = js.native
/**
* @param mode If boolean, sets html5Mode.enabled to value. If object, sets enabled, requireBase and rewriteLinks to
* respective values. Supported properties:
* @return html5Mode object if used as getter or itself (chaining) if used as setter
*/
def html5Mode(mode: Html5ModeInfo): Html5ModeInfo = js.native
}
@js.native
trait Html5ModeInfo extends js.Object {
/**
* (default: false) If true, will rely on history.pushState to change urls where supported. Will fall back to
* hash-prefixed paths in browsers that do not support pushState.
*/
var enabled: Boolean = js.native
/**
* (default: true) When html5Mode is enabled, specifies whether or not a tag is required to be present. If enabled
* and requireBase are true, and a base tag is not present, an error will be thrown when \\$location is injected. See
* the \\$location guide for more information
*/
var requireBase: Boolean = js.native
/**
* (default: true) When html5Mode is enabled, enables/disables url rewriting for relative links.
*/
var rewriteLinks: Boolean = js.native
}
object Html5ModeInfo {
def apply(enabled: Boolean = false, requireBase: Boolean = true, rewriteLinks: Boolean = true): Html5ModeInfo = {
val mode = new js.Object().asInstanceOf[Html5ModeInfo]
mode.enabled = enabled
mode.requireBase = requireBase
mode.rewriteLinks = rewriteLinks
mode
}
}
@js.native
@injectable("$location")
trait Location extends js.Object {
def absUrl(): String = js.native
def url(): String = js.native
def url(url: String, replace: String = null): Location = js.native
def protocol(): String = js.native
def host(): String = js.native
def port(): Int = js.native
def path(): String = js.native
def path(path: String): Location = js.native
def search(): js.Object = js.native
// TODO: refine argument types?
def search(search: js.Any, paramValue: js.Any = null): js.Object = js.native
def hash(hash: String = null): String = js.native
def replace(): Unit = js.native
}
| greencatsoft/scalajs-angular | src/main/scala/com/greencatsoft/angularjs/core/Location.scala | Scala | apache-2.0 | 3,002 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.eval
package internal
import cats.effect.CancelToken
import monix.catnap.CancelableF
import monix.execution.{Cancelable, Scheduler}
import monix.execution.atomic.Atomic
import scala.annotation.tailrec
private[eval] final class TaskConnectionRef extends CancelableF[Task] {
import TaskConnectionRef._
@throws(classOf[IllegalStateException])
def `:=`(token: CancelToken[Task])(implicit s: Scheduler): Unit =
unsafeSet(token)
@throws(classOf[IllegalStateException])
def `:=`(cancelable: Cancelable)(implicit s: Scheduler): Unit =
unsafeSet(cancelable)
@throws(classOf[IllegalStateException])
def `:=`(conn: CancelableF[Task])(implicit s: Scheduler): Unit =
unsafeSet(conn.cancel)
@tailrec
private def unsafeSet(ref: AnyRef /* CancelToken[Task] | CancelableF[Task] | Cancelable */ )(
implicit s: Scheduler): Unit = {
if (!state.compareAndSet(Empty, IsActive(ref))) {
state.get() match {
case IsEmptyCanceled =>
state.getAndSet(IsCanceled) match {
case IsEmptyCanceled =>
UnsafeCancelUtils.triggerCancel(ref)
case _ =>
UnsafeCancelUtils.triggerCancel(ref)
raiseError()
}
case IsCanceled | IsActive(_) =>
UnsafeCancelUtils.triggerCancel(ref)
raiseError()
case Empty =>
// $COVERAGE-OFF$
unsafeSet(ref)
// $COVERAGE-ON$
}
}
}
val cancel: CancelToken[Task] = {
@tailrec def loop(): CancelToken[Task] =
state.get() match {
case IsCanceled | IsEmptyCanceled =>
Task.unit
case IsActive(task) =>
state.set(IsCanceled)
UnsafeCancelUtils.unsafeCancel(task)
case Empty =>
if (state.compareAndSet(Empty, IsEmptyCanceled)) {
Task.unit
} else {
// $COVERAGE-OFF$
loop() // retry
// $COVERAGE-ON$
}
}
Task.suspend(loop())
}
private def raiseError(): Nothing = {
throw new IllegalStateException(
"Cannot assign to SingleAssignmentCancelable, " +
"as it was already assigned once")
}
private[this] val state = Atomic(Empty: State)
}
private[eval] object TaskConnectionRef {
/**
* Returns a new `TaskForwardConnection` reference.
*/
def apply(): TaskConnectionRef = new TaskConnectionRef()
private sealed trait State
private case object Empty extends State
private final case class IsActive(token: AnyRef /* CancelToken[Task] | CancelableF[Task] | Cancelable */ )
extends State
private case object IsCanceled extends State
private case object IsEmptyCanceled extends State
}
| alexandru/monifu | monix-eval/shared/src/main/scala/monix/eval/internal/TaskConnectionRef.scala | Scala | apache-2.0 | 3,374 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.orc
import java.net.URI
import java.util.Properties
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hadoop.hive.ql.io.orc._
import org.apache.hadoop.hive.serde2.objectinspector.{SettableStructObjectInspector, StructObjectInspector}
import org.apache.hadoop.hive.serde2.typeinfo.{StructTypeInfo, TypeInfoUtils}
import org.apache.hadoop.io.{NullWritable, Writable}
import org.apache.hadoop.mapred.{InputFormat => MapRedInputFormat, JobConf, OutputFormat => MapRedOutputFormat, RecordWriter, Reporter}
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat, FileSplit}
import org.apache.spark.TaskContext
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.{HadoopRDD, RDD}
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.execution.command.CreateDataSourceTableUtils
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.hive.{HiveInspectors, HiveShim}
import org.apache.spark.sql.sources.{Filter, _}
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.SerializableConfiguration
/**
* [[FileFormat]] for reading ORC files. If this is moved or renamed, please update
* [[DataSource]]'s backwardCompatibilityMap.
*/
class OrcFileFormat
extends FileFormat with DataSourceRegister with Serializable {
override def shortName(): String = "orc"
override def toString: String = "ORC"
override def inferSchema(
sparkSession: SparkSession,
options: Map[String, String],
files: Seq[FileStatus]): Option[StructType] = {
OrcFileOperator.readSchema(
files.map(_.getPath.toUri.toString),
Some(sparkSession.sessionState.newHadoopConf())
)
}
override def prepareWrite(
sparkSession: SparkSession,
job: Job,
options: Map[String, String],
dataSchema: StructType): OutputWriterFactory = {
val orcOptions = new OrcOptions(options)
val configuration = job.getConfiguration
configuration.set(OrcRelation.ORC_COMPRESSION, orcOptions.compressionCodec)
configuration match {
case conf: JobConf =>
conf.setOutputFormat(classOf[OrcOutputFormat])
case conf =>
conf.setClass(
"mapred.output.format.class",
classOf[OrcOutputFormat],
classOf[MapRedOutputFormat[_, _]])
}
new OutputWriterFactory {
override def newInstance(
path: String,
bucketId: Option[Int],
dataSchema: StructType,
context: TaskAttemptContext): OutputWriter = {
new OrcOutputWriter(path, bucketId, dataSchema, context)
}
}
}
override def isSplitable(
sparkSession: SparkSession,
options: Map[String, String],
path: Path): Boolean = {
true
}
override def buildReader(
sparkSession: SparkSession,
dataSchema: StructType,
partitionSchema: StructType,
requiredSchema: StructType,
filters: Seq[Filter],
options: Map[String, String],
hadoopConf: Configuration): (PartitionedFile) => Iterator[InternalRow] = {
if (sparkSession.sessionState.conf.orcFilterPushDown) {
// Sets pushed predicates
OrcFilters.createFilter(requiredSchema, filters.toArray).foreach { f =>
hadoopConf.set(OrcTableScan.SARG_PUSHDOWN, f.toKryo)
hadoopConf.setBoolean(ConfVars.HIVEOPTINDEXFILTER.varname, true)
}
}
val broadcastedHadoopConf =
sparkSession.sparkContext.broadcast(new SerializableConfiguration(hadoopConf))
(file: PartitionedFile) => {
val conf = broadcastedHadoopConf.value.value
// SPARK-8501: Empty ORC files always have an empty schema stored in their footer. In this
// case, `OrcFileOperator.readSchema` returns `None`, and we can't read the underlying file
// using the given physical schema. Instead, we simply return an empty iterator.
val maybePhysicalSchema = OrcFileOperator.readSchema(Seq(file.filePath), Some(conf))
if (maybePhysicalSchema.isEmpty) {
Iterator.empty
} else {
val physicalSchema = maybePhysicalSchema.get
OrcRelation.setRequiredColumns(conf, physicalSchema, requiredSchema)
val orcRecordReader = {
val job = Job.getInstance(conf)
FileInputFormat.setInputPaths(job, file.filePath)
val fileSplit = new FileSplit(
new Path(new URI(file.filePath)), file.start, file.length, Array.empty
)
// Custom OrcRecordReader is used to get
// ObjectInspector during recordReader creation itself and can
// avoid NameNode call in unwrapOrcStructs per file.
// Specifically would be helpful for partitioned datasets.
val orcReader = OrcFile.createReader(
new Path(new URI(file.filePath)), OrcFile.readerOptions(conf))
new SparkOrcNewRecordReader(orcReader, conf, fileSplit.getStart, fileSplit.getLength)
}
val recordsIterator = new RecordReaderIterator[OrcStruct](orcRecordReader)
Option(TaskContext.get()).foreach(_.addTaskCompletionListener(_ => recordsIterator.close()))
// Unwraps `OrcStruct`s to `UnsafeRow`s
OrcRelation.unwrapOrcStructs(
conf,
requiredSchema,
Some(orcRecordReader.getObjectInspector.asInstanceOf[StructObjectInspector]),
recordsIterator)
}
}
}
}
private[orc] class OrcSerializer(dataSchema: StructType, conf: Configuration)
extends HiveInspectors {
def serialize(row: InternalRow): Writable = {
wrapOrcStruct(cachedOrcStruct, structOI, row)
serializer.serialize(cachedOrcStruct, structOI)
}
private[this] val serializer = {
val table = new Properties()
table.setProperty("columns", dataSchema.fieldNames.mkString(","))
table.setProperty("columns.types", dataSchema.map(_.dataType.catalogString).mkString(":"))
val serde = new OrcSerde
serde.initialize(conf, table)
serde
}
// Object inspector converted from the schema of the relation to be serialized.
private[this] val structOI = {
val typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(dataSchema.catalogString)
OrcStruct.createObjectInspector(typeInfo.asInstanceOf[StructTypeInfo])
.asInstanceOf[SettableStructObjectInspector]
}
private[this] val cachedOrcStruct = structOI.create().asInstanceOf[OrcStruct]
private[this] def wrapOrcStruct(
struct: OrcStruct,
oi: SettableStructObjectInspector,
row: InternalRow): Unit = {
val fieldRefs = oi.getAllStructFieldRefs
var i = 0
val size = fieldRefs.size
while (i < size) {
oi.setStructFieldData(
struct,
fieldRefs.get(i),
wrap(
row.get(i, dataSchema(i).dataType),
fieldRefs.get(i).getFieldObjectInspector,
dataSchema(i).dataType))
i += 1
}
}
}
private[orc] class OrcOutputWriter(
path: String,
bucketId: Option[Int],
dataSchema: StructType,
context: TaskAttemptContext)
extends OutputWriter {
private[this] val conf = context.getConfiguration
private[this] val serializer = new OrcSerializer(dataSchema, conf)
// `OrcRecordWriter.close()` creates an empty file if no rows are written at all. We use this
// flag to decide whether `OrcRecordWriter.close()` needs to be called.
private var recordWriterInstantiated = false
private lazy val recordWriter: RecordWriter[NullWritable, Writable] = {
recordWriterInstantiated = true
val uniqueWriteJobId = conf.get(CreateDataSourceTableUtils.DATASOURCE_WRITEJOBUUID)
val taskAttemptId = context.getTaskAttemptID
val partition = taskAttemptId.getTaskID.getId
val bucketString = bucketId.map(BucketingUtils.bucketIdToString).getOrElse("")
val compressionExtension = {
val name = conf.get(OrcRelation.ORC_COMPRESSION)
OrcRelation.extensionsForCompressionCodecNames.getOrElse(name, "")
}
// It has the `.orc` extension at the end because (de)compression tools
// such as gunzip would not be able to decompress this as the compression
// is not applied on this whole file but on each "stream" in ORC format.
val filename = f"part-r-$partition%05d-$uniqueWriteJobId$bucketString$compressionExtension.orc"
new OrcOutputFormat().getRecordWriter(
new Path(path, filename).getFileSystem(conf),
conf.asInstanceOf[JobConf],
new Path(path, filename).toString,
Reporter.NULL
).asInstanceOf[RecordWriter[NullWritable, Writable]]
}
override def write(row: Row): Unit =
throw new UnsupportedOperationException("call writeInternal")
override protected[sql] def writeInternal(row: InternalRow): Unit = {
recordWriter.write(NullWritable.get(), serializer.serialize(row))
}
override def close(): Unit = {
if (recordWriterInstantiated) {
recordWriter.close(Reporter.NULL)
}
}
}
private[orc] case class OrcTableScan(
@transient sparkSession: SparkSession,
attributes: Seq[Attribute],
filters: Array[Filter],
@transient inputPaths: Seq[FileStatus])
extends Logging
with HiveInspectors {
def execute(): RDD[InternalRow] = {
val job = Job.getInstance(sparkSession.sessionState.newHadoopConf())
val conf = job.getConfiguration
// Figure out the actual schema from the ORC source (without partition columns) so that we
// can pick the correct ordinals. Note that this assumes that all files have the same schema.
val orcFormat = new OrcFileFormat
val dataSchema =
orcFormat
.inferSchema(sparkSession, Map.empty, inputPaths)
.getOrElse(sys.error("Failed to read schema from target ORC files."))
// Tries to push down filters if ORC filter push-down is enabled
if (sparkSession.sessionState.conf.orcFilterPushDown) {
OrcFilters.createFilter(dataSchema, filters).foreach { f =>
conf.set(OrcTableScan.SARG_PUSHDOWN, f.toKryo)
conf.setBoolean(ConfVars.HIVEOPTINDEXFILTER.varname, true)
}
}
// Sets requested columns
OrcRelation.setRequiredColumns(conf, dataSchema, StructType.fromAttributes(attributes))
if (inputPaths.isEmpty) {
// the input path probably be pruned, return an empty RDD.
return sparkSession.sparkContext.emptyRDD[InternalRow]
}
FileInputFormat.setInputPaths(job, inputPaths.map(_.getPath): _*)
val inputFormatClass =
classOf[OrcInputFormat]
.asInstanceOf[Class[_ <: MapRedInputFormat[NullWritable, Writable]]]
val rdd = sparkSession.sparkContext.hadoopRDD(
conf.asInstanceOf[JobConf],
inputFormatClass,
classOf[NullWritable],
classOf[Writable]
).asInstanceOf[HadoopRDD[NullWritable, Writable]]
val wrappedConf = new SerializableConfiguration(conf)
rdd.mapPartitionsWithInputSplit { case (split: OrcSplit, iterator) =>
val writableIterator = iterator.map(_._2)
val maybeStructOI = OrcFileOperator.getObjectInspector(split.getPath.toString, Some(conf))
OrcRelation.unwrapOrcStructs(
wrappedConf.value,
StructType.fromAttributes(attributes),
maybeStructOI,
writableIterator
)
}
}
}
private[orc] object OrcTableScan {
// This constant duplicates `OrcInputFormat.SARG_PUSHDOWN`, which is unfortunately not public.
private[orc] val SARG_PUSHDOWN = "sarg.pushdown"
}
private[orc] object OrcRelation extends HiveInspectors {
// The references of Hive's classes will be minimized.
val ORC_COMPRESSION = "orc.compress"
// The extensions for ORC compression codecs
val extensionsForCompressionCodecNames = Map(
"NONE" -> "",
"SNAPPY" -> ".snappy",
"ZLIB" -> ".zlib",
"LZO" -> ".lzo")
def unwrapOrcStructs(
conf: Configuration,
dataSchema: StructType,
maybeStructOI: Option[StructObjectInspector],
iterator: Iterator[Writable]): Iterator[InternalRow] = {
val deserializer = new OrcSerde
val mutableRow = new SpecificMutableRow(dataSchema.map(_.dataType))
val unsafeProjection = UnsafeProjection.create(dataSchema)
def unwrap(oi: StructObjectInspector): Iterator[InternalRow] = {
val (fieldRefs, fieldOrdinals) = dataSchema.zipWithIndex.map {
case (field, ordinal) => oi.getStructFieldRef(field.name) -> ordinal
}.unzip
val unwrappers = fieldRefs.map(unwrapperFor)
iterator.map { value =>
val raw = deserializer.deserialize(value)
var i = 0
val length = fieldRefs.length
while (i < length) {
val fieldValue = oi.getStructFieldData(raw, fieldRefs(i))
if (fieldValue == null) {
mutableRow.setNullAt(fieldOrdinals(i))
} else {
unwrappers(i)(fieldValue, mutableRow, fieldOrdinals(i))
}
i += 1
}
unsafeProjection(mutableRow)
}
}
maybeStructOI.map(unwrap).getOrElse(Iterator.empty)
}
def setRequiredColumns(
conf: Configuration, physicalSchema: StructType, requestedSchema: StructType): Unit = {
val ids = requestedSchema.map(a => physicalSchema.fieldIndex(a.name): Integer)
val (sortedIDs, sortedNames) = ids.zip(requestedSchema.fieldNames).sorted.unzip
HiveShim.appendReadColumns(conf, sortedIDs, sortedNames)
}
}
| gioenn/xSpark | sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala | Scala | apache-2.0 | 14,303 |
package processes.freeMonads.scalaz
import scala.language.higherKinds
import scala.language.implicitConversions
import processes.freeMonads.HttpResultImplementation
import scalaz.Coyoneda
import scalaz.Free
import scalaz.Free.FreeC
import scalaz.Monad
trait SingleMachinery extends HttpResultImplementation {
implicit def toFree[F[_], A](fa: F[A]): FreeC[F, A] =
Free.liftFC(fa)
type Partial[F[_]] = {
type Free[A] = scalaz.Free.FreeC[F, A]
}
implicit def freeMonad[F[_]] = new Monad[Partial[F]#Free] {
def point[A](a: => A):Free.FreeC[F, A] =
Free.point[Coyoneda.CoyonedaF[F]#A, A](a)
def bind[A, B](fa: Free.FreeC[F, A])(f: A => Free.FreeC[F, B]) =
fa.flatMap(f)
}
implicit val httpResultMonad = new Monad[HttpResult] {
def point[A](a: => A) = HttpResult(a)
def bind[A, B](fa: HttpResult[A])(f: A => HttpResult[B]) =
HttpResult.flatMap(fa)(f)
}
} | EECOLOR/scala-clean-code-patterns | src/main/scala/processes/freeMonads/scalaz/SingleMachinery.scala | Scala | mit | 923 |
package net.atos.cis.web.endpoint
import scala.xml.Elem
import org.eclipse.jetty.server.Request
import org.eclipse.jetty.server.handler.AbstractHandler
import javax.servlet.http.HttpServletRequest
import javax.servlet.http.HttpServletResponse
import net.atos.cis.Cis
class CISHandler extends AbstractHandler {
private val MethodPost: String = "POST"
private val MethodGet: String = "GET"
private val PageTitle: String = "CIS Details"
def handle(target: String, baseRequest: Request, request: HttpServletRequest, response: HttpServletResponse) {
request.getMethod match {
case MethodPost => {
response.setContentType("application/xml;charset=utf-8")
response.setStatus(HttpServletResponse.SC_OK);
response.getWriter().println(handlePost(request.getParameter("custId")))
}
case MethodGet => {
response.setStatus(HttpServletResponse.SC_OK);
val url = request.getRequestURI()
url match {
case "/" => {
response.setContentType("text/html;charset=utf-8")
response.getWriter().println(this.handlePageGet)
}
case _ => {
response.setContentType("application/xml;charset=utf-8")
response.getWriter().println(this.handleServiceGet(url.substring(1)))
}
}
}
case _ => {
response.setContentType("text/html;charset=utf-8")
response.getWriter().println(handleInvalidRequest)
}
}
baseRequest.setHandled(true)
}
def handleInvalidRequest = {
getCisView("Invalid Http Request Type")
}
def handlePageGet = {
getCisView("")
}
def handleServiceGet(nino: String) = {
Cis().ni2PersonDetails(nino)
}
def handlePost(nino: String) = {
Cis().ni2PersonDetails(nino)
}
def getCisView(message: String): Elem =
<html>
<head>
<title>{ PageTitle }</title>
</head>
<body>
<h1>{ PageTitle }</h1>
<form action="/" method="POST">
<table>
<tr>
<td>
<input type="text" name="custId"></input>
</td>
<td>
<input type="submit" value="Submit"/>
</td>
</tr>
</table>
</form>
{ if (message.length() > 0) <b>{ message }</b> }
</body>
</html>
} | scott-thomson/cis | src/main/scala/net/atos/cis/web/endpoint/CISHandler.scala | Scala | bsd-2-clause | 2,362 |
package scala.meta.interactive
import java.io.File
import java.net.URLClassLoader
import scala.meta.internal.SemanticdbPlugin
import scala.meta.internal.semanticdb.DatabaseOps
import scala.reflect.io.VirtualDirectory
import scala.tools.nsc.Settings
import scala.tools.nsc.interactive.Global
import scala.tools.nsc.interactive.Response
import scala.tools.nsc.reporters.StoreReporter
import scala.meta.semanticdb.Document
object InteractiveSemanticdb {
def newCompiler(): Global =
newCompiler(thisClasspath, Nil)
def newCompiler(scalacOptions: List[String]): Global =
newCompiler(thisClasspath, scalacOptions)
/** Construct new presentation compiler with given classpath and scalac flags. */
def newCompiler(classpath: String, scalacOptions: List[String]): Global = {
val vd = new VirtualDirectory("(memory)", None)
val settings = new Settings
settings.outputDirs.setSingleOutput(vd)
settings.classpath.value = classpath
if (classpath.isEmpty) {
settings.usejavacp.value = true
}
settings.processArgumentString(
("-Ypresentation-any-thread" :: scalacOptions).mkString(" ")
)
val compiler = new Global(settings, new StoreReporter)
new SemanticdbPlugin(compiler) // hijack reporter/analyzer
compiler
}
def toDocument(compiler: Global, code: String): Document =
toDocument(compiler, code, "interactive.scala", 10000)
/**
* Build semanticdb document from this snippet of code.
*
* @param compiler an instance of scalac interactive global.
* @param code the code to be compiled.
* @param filename the name of the source file.
* @param timeout max number of milliseconds to allow the presentation compiler
* to typecheck this file.
* @throws Exception note that this method can fail in many different ways
* with exceptions, including but not limited to tokenize/parse/type
* errors.
*/
def toDocument(compiler: Global, code: String, filename: String, timeout: Long): Document = {
val unit = addCompilationUnit(compiler, code, filename)
// reload seems to be necessary before askLoadedType.
ask[Unit](r => compiler.askReload(unit.source :: Nil, r)).get
val compiledTree =
ask[compiler.Tree](r => compiler.askLoadedTyped(unit.source, r))
.get(timeout)
val tree = compiledTree match {
case Some(Left(t)) => t
case Some(Right(ex)) => throw ex
case None => throw new IllegalArgumentException("Presentation compiler timed out")
}
lazy val databaseOps: DatabaseOps {
val global: compiler.type
} = new DatabaseOps {
val global: compiler.type = compiler
}
import databaseOps._
unit.body = tree
val document = unit.asInstanceOf[databaseOps.global.CompilationUnit].toDocument
document
}
/**
* Inserts "_CURSOR_" at given offset.
*
* _CURSOR_ hints to the presentation compiler that this file is being edited
* with the cursor at that offset. This hint helps completions amongst
* other things.
*/
def addCursor(code: String, offset: Int): String = {
new StringBuilder(code.length + "_CURSOR_".length)
.append(code.substring(0, offset))
.append("_CURSOR_")
.append(code.substring(offset))
.toString()
}
/** Create new compilation unit from given code. */
def addCompilationUnit(
global: Global,
code: String,
filename: String
): global.RichCompilationUnit = {
val unit = global.newCompilationUnit(code, filename)
val richUnit = new global.RichCompilationUnit(unit.source)
global.unitOfFile(richUnit.source.file) = richUnit
richUnit
}
private def thisClasspath: String = this.getClass.getClassLoader match {
case url: URLClassLoader =>
url.getURLs.map(_.toURI.getPath).mkString(File.pathSeparator)
case els =>
throw new IllegalStateException(s"Expected URLClassloader, got $els")
}
private def ask[A](f: Response[A] => Unit): Response[A] = {
val r = new Response[A]
f(r)
r
}
}
| DavidDudson/scalameta | scalameta/semanticdb-scalac-core/src/main/scala/scala/meta/interactive/InteractiveSemanticdb.scala | Scala | bsd-3-clause | 4,088 |
package lila
package object explorer extends PackageObject {
val maxPlies = 50
}
| luanlv/lila | modules/explorer/src/main/package.scala | Scala | mit | 85 |
/*
* Copyright 2020 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.pubsub.syntax
import com.spotify.scio.values.SCollection
import com.spotify.scio.coders.BeamCoders
import com.spotify.scio.pubsub.PubsubIO
import com.spotify.scio.io.ClosedTap
import scala.reflect.ClassTag
trait SCollectionSyntax {
implicit class SCollectionPubsubOps[T](private val coll: SCollection[T]) {
import coll.coder
/**
* Save this SCollection as a Pub/Sub topic.
* @group output
*/
@deprecated(
"This method has been deprecated. Use one of the following IOs instead:\\n" +
" - PubsubIO.string\\n" +
" - PubsubIO.avro\\n" +
" - PubsubIO.proto\\n" +
" - PubsubIO.pubsub\\n" +
" - PubsubIO.coder\\n" +
"\\n" +
"For example:\\n" +
" coll.write(PubsubIO.string(sub, idAttribute, timestampAttribute))(\\n" +
" PubsubIO.WriteParam()\\n" +
" )",
since = "0.10.0"
)
def saveAsPubsub(
topic: String,
idAttribute: String = null,
timestampAttribute: String = null,
maxBatchSize: Option[Int] = None,
maxBatchBytesSize: Option[Int] = None
)(implicit ct: ClassTag[T]): ClosedTap[Nothing] = {
val io = PubsubIO[T](topic, idAttribute, timestampAttribute)
coll.write(io)(PubsubIO.WriteParam(maxBatchSize, maxBatchBytesSize))
}
/**
* Save this SCollection as a Pub/Sub topic using the given map as message attributes.
* @group output
*/
@deprecated(
"This method has been deprecated. Use PubsubIO.withAttributes instead\\n" +
"\\n" +
"For example:\\n" +
" coll.write(PubsubIO.withAttributes(sub, idAttribute, timestampAttribute))(\\n" +
" PubsubIO.WriteParam()\\n" +
" )",
since = "0.10.0"
)
def saveAsPubsubWithAttributes[V: ClassTag](
topic: String,
idAttribute: String = null,
timestampAttribute: String = null,
maxBatchSize: Option[Int] = None,
maxBatchBytesSize: Option[Int] = None
)(implicit ev: T <:< (V, Map[String, String])): ClosedTap[Nothing] = {
implicit val vCoder =
BeamCoders.getTupleCoders(coll.covary_[(V, Map[String, String])])._1
val io = PubsubIO.withAttributes[V](topic, idAttribute, timestampAttribute)
coll
.covary_[(V, Map[String, String])]
.write(io)(PubsubIO.WriteParam(maxBatchSize, maxBatchBytesSize))
}
}
}
| spotify/scio | scio-google-cloud-platform/src/main/scala/com/spotify/scio/pubsub/syntax/SCollectionSyntax.scala | Scala | apache-2.0 | 3,007 |
package services
import java.util.UUID
import javax.inject.Singleton
import com.foomoo.abc.service.SubsequenceMatchService
import com.foomoo.abc.service.SubsequenceMatchService.NoteSequence
import com.foomoo.abc.tune.AbcTune
import play.api.Logger
import scala.collection.mutable
/**
* Storage for note sequences in tunes
*/
@Singleton
class AbcTuneSequenceService extends AbcTuneProcessor {
val sequenceTunesMap = new collection.mutable.HashMap[NoteSequence, collection.mutable.Set[UUID]]
with mutable.MultiMap[NoteSequence, UUID]
val tuneIdSequencesMap = new collection.mutable.HashMap[UUID, collection.mutable.Set[NoteSequence]]
with mutable.MultiMap[UUID, NoteSequence]
/**
* Process the given tuples of tune record ids and tunes to extract note sequences and maintain a mapping between
* note sequences and tune record ids.
*
* @param abcIdTunes The tuples of abc tune record ids and tunes.
*/
def addFromAbcTunes(abcIdTunes: Seq[(UUID, AbcTune)]): Unit = {
Logger.debug(s"addFromAbcTunes called with ${abcIdTunes.size} tunes")
// Build map of tune ids to tunes.
val tuneIdMap: Map[AbcTune, UUID] = abcIdTunes.map(_.swap).toMap
val sequences: Map[NoteSequence, Set[AbcTune]] =
SubsequenceMatchService.getSubSequenceTunes(8, tuneIdMap.keys.toSeq)
Logger.debug(s"Extracted ${sequences.size} sequences")
sequences.foreach {
case (noteSequence, tuneSet) =>
tuneSet.flatMap(tune => tuneIdMap.get(tune)).foreach(tuneId => {
sequenceTunesMap.addBinding(noteSequence, tuneId)
tuneIdSequencesMap.addBinding(tuneId, noteSequence)
})
}
}
/** Consume ABC Tunes.
*
* @param tunes The ABC Tunes to console.
*/
override def process(tunes: Seq[(UUID, AbcTune)]): Unit = addFromAbcTunes(tunes)
/**
* Returns the note sequences which have the given minimum number of tunes containing them.
*
* @param tuneCount The minimum number of tunes which should contain each of the returned note sequennces.
*/
def getSequences(tuneCount: Int): Map[NoteSequence, Set[UUID]] = {
sequenceTunesMap.filter(_._2.size >= tuneCount).map(entry => entry._1 -> entry._2.toSet).toMap
}
/**
* For the given tune record id, returns a map of NoteSequences found in the tune along with the
* tune record ids of the tunes that also contain those NoteSequences.
*
* @param tuneId The tune record id to find sequences for.
* @return A Map of NoteSequence to tune record ids.
*/
def getSequencesByTuneId(tuneId: UUID): Map[NoteSequence, Set[UUID]] = {
sequenceTunesMap.filter(entry => entry._2.contains(tuneId)).map(entry => (entry._1, entry._2.toSet)).toMap
}
}
| danwatford/abc-site-play-framework | app/services/AbcTuneSequenceService.scala | Scala | apache-2.0 | 2,731 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.manager.utils.zero90
import java.nio.ByteBuffer
import kafka.common.{KafkaException, OffsetAndMetadata}
import kafka.coordinator.{GroupMetadataKey, GroupTopicPartition, OffsetKey, BaseKey}
import org.apache.kafka.clients.consumer.internals.ConsumerProtocol
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.protocol.types.Type._
import org.apache.kafka.common.protocol.types.{ArrayOf, Field, Schema, Struct}
import scala.collection.Map
/*
Borrowed from kafka 0.9.0.0 GroupMetadataManager
*/
object GroupMetadataManager {
private val CURRENT_OFFSET_KEY_SCHEMA_VERSION = 1.toShort
private val CURRENT_GROUP_KEY_SCHEMA_VERSION = 2.toShort
private val OFFSET_COMMIT_KEY_SCHEMA = new Schema(new Field("group", STRING),
new Field("topic", STRING),
new Field("partition", INT32))
private val OFFSET_KEY_GROUP_FIELD = OFFSET_COMMIT_KEY_SCHEMA.get("group")
private val OFFSET_KEY_TOPIC_FIELD = OFFSET_COMMIT_KEY_SCHEMA.get("topic")
private val OFFSET_KEY_PARTITION_FIELD = OFFSET_COMMIT_KEY_SCHEMA.get("partition")
private val OFFSET_COMMIT_VALUE_SCHEMA_V0 = new Schema(new Field("offset", INT64),
new Field("metadata", STRING, "Associated metadata.", ""),
new Field("timestamp", INT64))
private val OFFSET_VALUE_OFFSET_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("offset")
private val OFFSET_VALUE_METADATA_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("metadata")
private val OFFSET_VALUE_TIMESTAMP_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("timestamp")
private val OFFSET_COMMIT_VALUE_SCHEMA_V1 = new Schema(new Field("offset", INT64),
new Field("metadata", STRING, "Associated metadata.", ""),
new Field("commit_timestamp", INT64),
new Field("expire_timestamp", INT64))
private val OFFSET_VALUE_OFFSET_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("offset")
private val OFFSET_VALUE_METADATA_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("metadata")
private val OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("commit_timestamp")
private val OFFSET_VALUE_EXPIRE_TIMESTAMP_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("expire_timestamp")
private val GROUP_METADATA_KEY_SCHEMA = new Schema(new Field("group", STRING))
private val GROUP_KEY_GROUP_FIELD = GROUP_METADATA_KEY_SCHEMA.get("group")
private val MEMBER_METADATA_V0 = new Schema(
new Field("member_id", STRING),
new Field("client_id", STRING),
new Field("client_host", STRING),
new Field("session_timeout", INT32),
new Field("subscription", BYTES),
new Field("assignment", BYTES))
private val MEMBER_METADATA_V1 = new Schema(
new Field("member_id", STRING),
new Field("client_id", STRING),
new Field("client_host", STRING),
new Field("session_timeout", INT32),
new Field("rebalance_timeout", INT32),
new Field("subscription", BYTES),
new Field("assignment", BYTES))
private val MEMBER_METADATA_MEMBER_ID_V0 = MEMBER_METADATA_V0.get("member_id")
private val MEMBER_METADATA_CLIENT_ID_V0 = MEMBER_METADATA_V0.get("client_id")
private val MEMBER_METADATA_CLIENT_HOST_V0 = MEMBER_METADATA_V0.get("client_host")
private val MEMBER_METADATA_SESSION_TIMEOUT_V0 = MEMBER_METADATA_V0.get("session_timeout")
private val MEMBER_METADATA_SUBSCRIPTION_V0 = MEMBER_METADATA_V0.get("subscription")
private val MEMBER_METADATA_ASSIGNMENT_V0 = MEMBER_METADATA_V0.get("assignment")
private val MEMBER_METADATA_MEMBER_ID_V1 = MEMBER_METADATA_V1.get("member_id")
private val MEMBER_METADATA_CLIENT_ID_V1 = MEMBER_METADATA_V1.get("client_id")
private val MEMBER_METADATA_CLIENT_HOST_V1 = MEMBER_METADATA_V1.get("client_host")
private val MEMBER_METADATA_SESSION_TIMEOUT_V1 = MEMBER_METADATA_V1.get("session_timeout")
private val MEMBER_METADATA_REBALANCE_TIMEOUT_V1 = MEMBER_METADATA_V1.get("rebalance_timeout")
private val MEMBER_METADATA_SUBSCRIPTION_V1 = MEMBER_METADATA_V1.get("subscription")
private val MEMBER_METADATA_ASSIGNMENT_V1 = MEMBER_METADATA_V1.get("assignment")
private val GROUP_METADATA_VALUE_SCHEMA_V0 = new Schema(
new Field("protocol_type", STRING),
new Field("generation", INT32),
new Field("protocol", STRING),
new Field("leader", STRING),
new Field("members", new ArrayOf(MEMBER_METADATA_V0)))
private val GROUP_METADATA_VALUE_SCHEMA_V1 = new Schema(
new Field("protocol_type", STRING),
new Field("generation", INT32),
new Field("protocol", NULLABLE_STRING),
new Field("leader", NULLABLE_STRING),
new Field("members", new ArrayOf(MEMBER_METADATA_V1)))
private val GROUP_METADATA_PROTOCOL_TYPE_V0 = GROUP_METADATA_VALUE_SCHEMA_V0.get("protocol_type")
private val GROUP_METADATA_GENERATION_V0 = GROUP_METADATA_VALUE_SCHEMA_V0.get("generation")
private val GROUP_METADATA_PROTOCOL_V0 = GROUP_METADATA_VALUE_SCHEMA_V0.get("protocol")
private val GROUP_METADATA_LEADER_V0 = GROUP_METADATA_VALUE_SCHEMA_V0.get("leader")
private val GROUP_METADATA_MEMBERS_V0 = GROUP_METADATA_VALUE_SCHEMA_V0.get("members")
private val GROUP_METADATA_PROTOCOL_TYPE_V1 = GROUP_METADATA_VALUE_SCHEMA_V1.get("protocol_type")
private val GROUP_METADATA_GENERATION_V1 = GROUP_METADATA_VALUE_SCHEMA_V1.get("generation")
private val GROUP_METADATA_PROTOCOL_V1 = GROUP_METADATA_VALUE_SCHEMA_V1.get("protocol")
private val GROUP_METADATA_LEADER_V1 = GROUP_METADATA_VALUE_SCHEMA_V1.get("leader")
private val GROUP_METADATA_MEMBERS_V1 = GROUP_METADATA_VALUE_SCHEMA_V1.get("members")
// map of versions to key schemas as data types
private val MESSAGE_TYPE_SCHEMAS = Map(
0 -> OFFSET_COMMIT_KEY_SCHEMA,
1 -> OFFSET_COMMIT_KEY_SCHEMA,
2 -> GROUP_METADATA_KEY_SCHEMA)
// map of version of offset value schemas
private val OFFSET_VALUE_SCHEMAS = Map(
0 -> OFFSET_COMMIT_VALUE_SCHEMA_V0,
1 -> OFFSET_COMMIT_VALUE_SCHEMA_V1)
private val CURRENT_OFFSET_VALUE_SCHEMA_VERSION = 1.toShort
// map of version of group metadata value schemas
private val GROUP_VALUE_SCHEMAS = Map(0 -> GROUP_METADATA_VALUE_SCHEMA_V0,1 -> GROUP_METADATA_VALUE_SCHEMA_V1)
private val CURRENT_GROUP_VALUE_SCHEMA_VERSION = 0.toShort
private val CURRENT_OFFSET_KEY_SCHEMA = schemaForKey(CURRENT_OFFSET_KEY_SCHEMA_VERSION)
private val CURRENT_GROUP_KEY_SCHEMA = schemaForKey(CURRENT_GROUP_KEY_SCHEMA_VERSION)
private val CURRENT_OFFSET_VALUE_SCHEMA = schemaForOffset(CURRENT_OFFSET_VALUE_SCHEMA_VERSION)
private val CURRENT_GROUP_VALUE_SCHEMA = schemaForGroup(CURRENT_GROUP_VALUE_SCHEMA_VERSION)
private def schemaForKey(version: Int) = {
val schemaOpt = MESSAGE_TYPE_SCHEMAS.get(version)
schemaOpt match {
case Some(schema) => schema
case _ => throw new KafkaException("Unknown offset schema version " + version)
}
}
private def schemaForOffset(version: Int) = {
val schemaOpt = OFFSET_VALUE_SCHEMAS.get(version)
schemaOpt match {
case Some(schema) => schema
case _ => throw new KafkaException("Unknown offset schema version " + version)
}
}
private def schemaForGroup(version: Int) = {
val schemaOpt = GROUP_VALUE_SCHEMAS.get(version)
schemaOpt match {
case Some(schema) => schema
case _ => throw new KafkaException("Unknown group metadata version " + version)
}
}
/**
* Generates the key for offset commit message for given (group, topic, partition)
*
* @return key for offset commit message
*/
private def offsetCommitKey(group: String, topic: String, partition: Int, versionId: Short = 0): Array[Byte] = {
val key = new Struct(CURRENT_OFFSET_KEY_SCHEMA)
key.set(OFFSET_KEY_GROUP_FIELD, group)
key.set(OFFSET_KEY_TOPIC_FIELD, topic)
key.set(OFFSET_KEY_PARTITION_FIELD, partition)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + key.sizeOf)
byteBuffer.putShort(CURRENT_OFFSET_KEY_SCHEMA_VERSION)
key.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Generates the key for group metadata message for given group
*
* @return key bytes for group metadata message
*/
private def groupMetadataKey(group: String): Array[Byte] = {
val key = new Struct(CURRENT_GROUP_KEY_SCHEMA)
key.set(GROUP_KEY_GROUP_FIELD, group)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + key.sizeOf)
byteBuffer.putShort(CURRENT_GROUP_KEY_SCHEMA_VERSION)
key.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Generates the payload for offset commit message from given offset and metadata
*
* @param offsetAndMetadata consumer's current offset and metadata
* @return payload for offset commit message
*/
private def offsetCommitValue(offsetAndMetadata: OffsetAndMetadata): Array[Byte] = {
// generate commit value with schema version 1
val value = new Struct(CURRENT_OFFSET_VALUE_SCHEMA)
value.set(OFFSET_VALUE_OFFSET_FIELD_V1, offsetAndMetadata.offset)
value.set(OFFSET_VALUE_METADATA_FIELD_V1, offsetAndMetadata.metadata)
value.set(OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V1, offsetAndMetadata.commitTimestamp)
value.set(OFFSET_VALUE_EXPIRE_TIMESTAMP_FIELD_V1, offsetAndMetadata.expireTimestamp)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + value.sizeOf)
byteBuffer.putShort(CURRENT_OFFSET_VALUE_SCHEMA_VERSION)
value.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Decodes the offset messages' key
*
* @param buffer input byte-buffer
* @return an GroupTopicPartition object
*/
def readMessageKey(buffer: ByteBuffer): BaseKey = {
val version = buffer.getShort
val keySchema = schemaForKey(version)
val key = keySchema.read(buffer).asInstanceOf[Struct]
if (version <= CURRENT_OFFSET_KEY_SCHEMA_VERSION) {
// version 0 and 1 refer to offset
val group = key.get(OFFSET_KEY_GROUP_FIELD).asInstanceOf[String]
val topic = key.get(OFFSET_KEY_TOPIC_FIELD).asInstanceOf[String]
val partition = key.get(OFFSET_KEY_PARTITION_FIELD).asInstanceOf[Int]
OffsetKey(version, GroupTopicPartition(group, new TopicPartition(topic, partition)))
} else if (version == CURRENT_GROUP_KEY_SCHEMA_VERSION) {
// version 2 refers to offset
val group = key.get(GROUP_KEY_GROUP_FIELD).asInstanceOf[String]
GroupMetadataKey(version, group)
} else {
throw new IllegalStateException("Unknown version " + version + " for group metadata message")
}
}
/**
* Decodes the offset messages' payload and retrieves offset and metadata from it
*
* @param buffer input byte-buffer
* @return an offset-metadata object from the message
*/
def readOffsetMessageValue(buffer: ByteBuffer): OffsetAndMetadata = {
if(buffer == null) { // tombstone
null
} else {
val version = buffer.getShort
val valueSchema = schemaForOffset(version)
val value = valueSchema.read(buffer).asInstanceOf[Struct]
if (version == 0) {
val offset = value.get(OFFSET_VALUE_OFFSET_FIELD_V0).asInstanceOf[Long]
val metadata = value.get(OFFSET_VALUE_METADATA_FIELD_V0).asInstanceOf[String]
val timestamp = value.get(OFFSET_VALUE_TIMESTAMP_FIELD_V0).asInstanceOf[Long]
OffsetAndMetadata(offset, metadata, timestamp)
} else if (version == 1) {
val offset = value.get(OFFSET_VALUE_OFFSET_FIELD_V1).asInstanceOf[Long]
val metadata = value.get(OFFSET_VALUE_METADATA_FIELD_V1).asInstanceOf[String]
val commitTimestamp = value.get(OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V1).asInstanceOf[Long]
val expireTimestamp = value.get(OFFSET_VALUE_EXPIRE_TIMESTAMP_FIELD_V1).asInstanceOf[Long]
OffsetAndMetadata(offset, metadata, commitTimestamp, expireTimestamp)
} else {
throw new IllegalStateException("Unknown offset message version")
}
}
}
/**
* Decodes the group metadata messages' payload and retrieves its member metadatafrom it
*
* @param buffer input byte-buffer
* @return a group metadata object from the message
*/
def readGroupMessageValue(groupId: String, buffer: ByteBuffer): GroupMetadata = {
if(buffer == null) { // tombstone
null
} else {
val version = buffer.getShort
val valueSchema = schemaForGroup(version)
val value = valueSchema.read(buffer).asInstanceOf[Struct]
if (version == 0) {
val protocolType = value.get(GROUP_METADATA_PROTOCOL_TYPE_V0).asInstanceOf[String]
val generationId = value.get(GROUP_METADATA_GENERATION_V0).asInstanceOf[Int]
val leaderId = value.get(GROUP_METADATA_LEADER_V0).asInstanceOf[String]
val protocol = value.get(GROUP_METADATA_PROTOCOL_V0).asInstanceOf[String]
val group = new GroupMetadata(groupId, protocolType, generationId, leaderId, protocol)
value.getArray(GROUP_METADATA_MEMBERS_V0).foreach {
case memberMetadataObj =>
val memberMetadata = memberMetadataObj.asInstanceOf[Struct]
val memberId = memberMetadata.get(MEMBER_METADATA_MEMBER_ID_V0).asInstanceOf[String]
val clientId = memberMetadata.get(MEMBER_METADATA_CLIENT_ID_V0).asInstanceOf[String]
val clientHost = memberMetadata.get(MEMBER_METADATA_CLIENT_HOST_V0).asInstanceOf[String]
//val sessionTimeout = memberMetadata.get(MEMBER_METADATA_SESSION_TIMEOUT_V0).asInstanceOf[Int]
val subscription = ConsumerProtocol.deserializeSubscription(memberMetadata.get(MEMBER_METADATA_SUBSCRIPTION_V0).asInstanceOf[ByteBuffer])
val assignment = ConsumerProtocol.deserializeAssignment(memberMetadata.get(MEMBER_METADATA_ASSIGNMENT_V0).asInstanceOf[ByteBuffer])
import collection.JavaConverters._
val member = new MemberMetadata(
memberId
, groupId
, clientId
, clientHost
//, sessionTimeout
, List((group.protocol, subscription.topics().asScala.toSet))
, assignment.partitions().asScala.map(tp => tp.topic() -> tp.partition()).toSet
)
group.add(memberId, member)
}
group
} else if (version == 1){
val protocolType = value.get(GROUP_METADATA_PROTOCOL_TYPE_V1).asInstanceOf[String]
val generationId = value.get(GROUP_METADATA_GENERATION_V1).asInstanceOf[Int]
val leaderId = value.get(GROUP_METADATA_LEADER_V1).asInstanceOf[String]
val protocol = value.get(GROUP_METADATA_PROTOCOL_V1).asInstanceOf[String]
val group = new GroupMetadata(groupId, protocolType, generationId, leaderId, protocol)
value.getArray(GROUP_METADATA_MEMBERS_V1).foreach {
case memberMetadataObj =>
val memberMetadata = memberMetadataObj.asInstanceOf[Struct]
val memberId = memberMetadata.get(MEMBER_METADATA_MEMBER_ID_V1).asInstanceOf[String]
val clientId = memberMetadata.get(MEMBER_METADATA_CLIENT_ID_V1).asInstanceOf[String]
val clientHost = memberMetadata.get(MEMBER_METADATA_CLIENT_HOST_V1).asInstanceOf[String]
//val sessionTimeout = memberMetadata.get(MEMBER_METADATA_SESSION_TIMEOUT_V0).asInstanceOf[Int]
val subscription = ConsumerProtocol.deserializeSubscription(memberMetadata.get(MEMBER_METADATA_SUBSCRIPTION_V1).asInstanceOf[ByteBuffer])
val assignment = ConsumerProtocol.deserializeAssignment(memberMetadata.get(MEMBER_METADATA_ASSIGNMENT_V1).asInstanceOf[ByteBuffer])
import collection.JavaConverters._
val member = new MemberMetadata(
memberId
, groupId
, clientId
, clientHost
//, sessionTimeout
, List((group.protocol, subscription.topics().asScala.toSet))
, assignment.partitions().asScala.map(tp => tp.topic() -> tp.partition()).toSet
)
group.add(memberId, member)
}
group
} else {
throw new IllegalStateException("Unknown group metadata message version")
}
}
}
}
case class GroupMetadata(groupId: String
, protocolType: String
, generationId: Int
, leaderId: String
, protocol: String
) {
private val members = new collection.mutable.HashMap[String, MemberMetadata]
def isEmpty = members.isEmpty
def allMemberMetadata = members.values.toList
def add(memberId: String, member: MemberMetadata) {
assert(supportsProtocols(member.protocols))
members.put(memberId, member)
}
private def candidateProtocols = {
// get the set of protocols that are commonly supported by all members
allMemberMetadata
.map(_.protocols)
.reduceLeft((commonProtocols, protocols) => commonProtocols & protocols)
}
def supportsProtocols(memberProtocols: Set[String]) = {
isEmpty || (memberProtocols & candidateProtocols).nonEmpty
}
}
object MemberMetadata {
import collection.JavaConverters._
def from(groupId: String, groupSummary: kafka.coordinator.GroupSummary, memberSummary: kafka.coordinator.MemberSummary) : MemberMetadata = {
val subscription = ConsumerProtocol.deserializeSubscription(ByteBuffer.wrap(memberSummary.metadata))
val assignment = ConsumerProtocol.deserializeAssignment(ByteBuffer.wrap(memberSummary.assignment))
MemberMetadata(memberSummary.memberId
, groupId, memberSummary.clientId
, memberSummary. clientHost
//, -1
, List((groupSummary.protocol, subscription.topics().asScala.toSet))
, assignment.partitions().asScala.map(tp => tp.topic() -> tp.partition()).toSet
)
}
}
case class MemberMetadata(memberId: String
, groupId: String
, clientId: String
, clientHost: String
//, sessionTimeoutMs: Int
, supportedProtocols: List[(String, Set[String])]
, assignment: Set[(String, Int)]
) {
def protocols = supportedProtocols.map(_._1).toSet
}
| radicalbit/kafka-manager | app/kafka/manager/utils/zero90/GroupMetadataManager.scala | Scala | apache-2.0 | 18,937 |
package com.rasterfoundry.lambda.overviews
import com.amazonaws.services.s3.AmazonS3URI
import geotrellis.contrib.vlm.MosaicRasterSource
import geotrellis.contrib.vlm.geotiff.GeoTiffRasterSource
import geotrellis.proj4.WebMercator
import geotrellis.raster.io.geotiff.MultibandGeoTiff
import geotrellis.raster.resample.NearestNeighbor
import geotrellis.raster.{CellSize, GridExtent}
import geotrellis.spark.io.s3.S3Client
import cats.syntax.list._
import com.typesafe.scalalogging.LazyLogging
import com.rasterfoundry.datamodel.OverviewInput
object OverviewGenerator extends LazyLogging {
def createProjectOverview(sceneURIs: List[String],
pixelSize: Double): Option[MultibandGeoTiff] = {
val rasterSources = sceneURIs.map {
GeoTiffRasterSource(_).reproject(WebMercator)
}
val reprojectedExtents = rasterSources.map(_.gridExtent)
val heights = reprojectedExtents map (_.cellheight)
val avgHeight = heights.sum / heights.length
val cs: CellSize = CellSize(avgHeight, avgHeight)
val sourceCombinedGrid = rasterSources
.map(_.gridExtent)
.reduce((a: GridExtent[Long], b: GridExtent[Long]) => {
val combined = a.extent.combine(b.extent)
GridExtent[Long](combined, cs)
})
println(s"Using ${rasterSources.length} rastersources")
rasterSources.toNel match {
case Some(rasterSourcesList) =>
println("Generating mosaic raster source")
val mosaicRasterSource =
MosaicRasterSource(rasterSourcesList, WebMercator, sourceCombinedGrid)
val gridExtent =
GridExtent[Long](mosaicRasterSource.extent,
CellSize(pixelSize, pixelSize))
val rasterOption = mosaicRasterSource
.resampleToGrid(gridExtent)
.read
rasterOption match {
case Some(raster) =>
Some(
MultibandGeoTiff(raster, WebMercator).withOverviews(
NearestNeighbor))
case _ => None
}
case _ => None
}
}
def writeOverviewToS3(tiff: MultibandGeoTiff, uri: String): Unit = {
println(s"Writing tiff to $uri")
val s3Uri = new AmazonS3URI(uri)
S3Client.DEFAULT.putObject(s3Uri.getBucket,
s3Uri.getKey,
tiff.toCloudOptimizedByteArray)
()
}
def createOverview(overviewInput: OverviewInput): Option[Int] = {
println("Retrieving JWT with Refresh Token")
val authToken = HttpClient.getSystemToken(overviewInput.refreshToken)
println("Getting project layer scenes")
val initialProjectScenes = HttpClient.getProjectLayerScenes(
authToken,
overviewInput.projectId,
overviewInput.projectLayerId
)
println("Creating project overview")
// James figured out the formula by fitting the plot of
// zoom level versus pixel size in meters
// 156412 is the pixel size on zoom level 0
val projectOverviewOption =
OverviewGenerator.createProjectOverview(
initialProjectScenes,
156412 / math.pow(2, overviewInput.minZoomLevel))
println(
"Checking if scenes have been updated or removed from project layer")
val currentProjectScenes = HttpClient.getProjectLayerScenes(
authToken,
overviewInput.projectId,
overviewInput.projectLayerId
)
(currentProjectScenes == initialProjectScenes, projectOverviewOption) match {
case (true, Some(projectOverview)) =>
writeOverviewToS3(projectOverview, overviewInput.outputLocation)
println("Updating project layer in API with overview")
val layerUpdateStatus =
HttpClient.updateProjectWithOverview(authToken, overviewInput)
Some(layerUpdateStatus)
case _ =>
println(
"Skipping adding project overview, project layer scenes have changed since overview generated")
None
}
}
}
| azavea/raster-foundry | app-backend/lambda-overviews/src/main/scala/com/rasterfoundry/lambda/overviews/OverviewGenerator.scala | Scala | apache-2.0 | 3,928 |
package test
import org.specs2.mutable.Specification
class AvroRecordComplexTest extends Specification {
"A case class with an empty `Option[String]` field" should {
"serialize and deserialize correctly" in {
val record1 = AvroRecordTest07(None)
val record2 = AvroRecordTest07(None)
val records = List(record1, record2)
TestUtil.verifyWriteAndRead(records)
}
}
"A case class with an empty `Option[Int]` field" should {
"serialize and deserialize correctly" in {
val record1 = AvroRecordTest08(None)
val record2 = AvroRecordTest08(None)
val records = List(record1, record2)
TestUtil.verifyWriteAndRead(records)
}
}
"A case class with an `List[String]` field" should {
"serialize and deserialize correctly" in {
val record1 = AvroRecordTest10(List("head", "tail"))
val record2 = AvroRecordTest10(List("top", "bottom"))
val records = List(record1, record2)
TestUtil.verifyWriteAndRead(records)
}
}
"A case class with an `List[Int]` field" should {
"serialize and deserialize correctly" in {
val record1 = AvroRecordTest11(List(1, 2))
val record2 = AvroRecordTest11(List(3, 4))
val records = List(record1, record2)
TestUtil.verifyWriteAndRead(records)
}
}
"A case class with an `Option[String]` field" should {
"serialize and deserialize correctly" in {
val record1 = AvroRecordTest12(Some("I'm here"))
val record2 = AvroRecordTest12(Some("I'm there"))
val records = List(record1, record2)
TestUtil.verifyWriteAndRead(records)
}
}
"A case class with an `Option[Int]` field" should {
"serialize and deserialize correctly" in {
val record1 = AvroRecordTest13(Some(1))
val record2 = AvroRecordTest13(Some(2))
val records = List(record1, record2)
TestUtil.verifyWriteAndRead(records)
}
}
"A case class with a `Map[String, Int]` field" should {
"serialize and deserialize correctly" in {
val record1 = AvroRecordTestMap01(Map("bongo"->2))
val record2 = AvroRecordTestMap01(Map("mongo"->3))
val records = List(record1, record2)
TestUtil.verifyWriteAndRead(records)
}
}
"A case class with a `Map[String, String]` field" should {
"serialize and deserialize correctly" in {
val record1 = AvroRecordTestMap02(Map("4"->"four"))
val record2 = AvroRecordTestMap02(Map("5"->"five"))
val records = List(record1, record2)
TestUtil.verifyWriteAndRead(records)
}
}
"A case class with a `Map[String, List[Int]]` field" should {
"serialize and deserialize correctly" in {
val record1 = AvroRecordTestMap03(Map("sherpa"->Some(List(5,6))))
val record2 = AvroRecordTestMap03(Map("autobus"->Some(List(8,9))))
val records = List(record1, record2)
TestUtil.verifyWriteAndRead(records)
}
}
}
| julianpeeters/avro-scala-macro-annotations | tests/src/test/scala/AvroRecordTests/datatypetests/AvroRecordComplexTest.scala | Scala | apache-2.0 | 2,896 |
package io.vamp.container_driver.kubernetes
import com.typesafe.scalalogging.Logger
import io.kubernetes.client.ApiException
import io.vamp.common.{ CacheStore, Namespace }
import org.slf4j.LoggerFactory
import scala.util.Try
object K8sCache {
val jobs = "jobs"
val pods = "pods"
val nodes = "nodes"
val services = "services"
val daemonSets = "daemon-sets"
val deployments = "deployments"
val replicaSets = "replica-sets"
val create = "create"
val update = "update"
val delete = "delete"
}
class K8sCache(config: K8sCacheConfig, val namespace: Namespace) {
private val logger = Logger(LoggerFactory.getLogger(getClass))
private val cache = new CacheStore()
logger.debug(s"starting Kubernetes cache: ${namespace.name}")
def readAllWithCache[T](kind: String, selector: String, request: () ⇒ T): T = {
requestWithCache[T](read = true, "list", id(kind, selector = Option(selector).getOrElse("")), request)._2
}
def readWithCache[T](kind: String, name: String, request: () ⇒ T): Option[T] = {
Try(requestWithCache[T](read = true, "read", id(kind, name), request)).toOption.map(_._2)
}
def writeWithCache(operation: String, kind: String, name: String, request: () ⇒ Any): Unit =
Try(requestWithCache[Any](read = false, operation, id(kind, name, read = false), request)).recover {
case e: ApiException if e.getCode == 409 ⇒
logger.warn("K8sCache - Conflict, previous operation still in progress - {}", e.getMessage)
// conflict, previous operation still in progress
case e ⇒
logger.error("K8sCache - error while writing with cache", e)
}
private def requestWithCache[T](read: Boolean, operation: String, id: String, request: () ⇒ T): (String, T) = {
cache.get[(String, Either[T, Exception])](id) match {
case Some((op, response)) if op == operation ⇒
logger.debug(s"K8sCache - Getting response from cache for $id on operation $operation")
response match {
case Left(r) ⇒ {
logger.debug("K8sCache - Returning from cache for {} on operation {}", id, operation)
op → r
}
case Right(e) ⇒ {
logger.error(s"K8sCache - Error while retrieving from cache for $id on operation $operation", e)
throw e
}
}
case _ ⇒
try {
logger.debug("K8sCache - Sending request for {} on operation {}", id, operation)
val response = request()
logger.debug("K8sCache - Request sent for {} on operation {}", id, operation)
val ttl = if (read) config.readTimeToLivePeriod else config.writeTimeToLivePeriod
logger.debug(s"cK8sCache - cache put [${ttl.toSeconds}s]: $id on operation $operation")
cache.put[(String, Either[T, Exception])](id, operation → Left(response), ttl)
logger.debug("K8sCache - Returning from request for {} on operation {}", id, operation)
operation → response
}
catch {
case e: Exception ⇒
logger.error(s"K8sCache - Error while running request for $id on operation $operation", e)
val ttl = config.failureTimeToLivePeriod
logger.debug(s"cache put [${ttl.toSeconds}s]: $id")
cache.put[(String, Either[T, Exception])](id, operation → Right(e), ttl)
throw e
}
}
}
def invalidate(kind: String, name: String): Unit = {
val ofKind = all(kind)
cache.keys.filter(_.startsWith(ofKind)).foreach { key ⇒
logger.debug(s"invalidate cache: $key")
cache.remove(key)
}
(id(kind, name) :: id(kind, name, read = false) :: Nil).foreach { key ⇒
logger.debug(s"invalidate cache: $key")
cache.remove(key)
}
}
def close(): Unit = {
logger.debug(s"closing Kubernetes cache: ${namespace.name}")
cache.close()
}
private def all(kind: String) = s"r/${namespace.name}/$kind/?"
private def id(kind: String, name: String = "", selector: String = "", read: Boolean = true) = s"${if (read) "r/" else "w/"}${namespace.name}/$kind/$name?$selector"
}
| magneticio/vamp | kubernetes/src/main/scala/io/vamp/container_driver/kubernetes/K8sCache.scala | Scala | apache-2.0 | 4,119 |
package com.azavea.maml.eval
import com.azavea.maml.util._
import com.azavea.maml.ast._
import com.azavea.maml.dsl._
import com.azavea.maml.error._
import com.azavea.maml.eval._
import com.azavea.maml.eval.tile._
import com.azavea.maml.ast.codec.tree._
import io.circe._
import io.circe.syntax._
import io.circe.generic.extras.auto._
import geotrellis.raster._
import geotrellis.vector._
import cats._
import cats.data.{NonEmptyList => NEL, _}
import Validated._
import org.scalatest._
import scala.reflect._
class VariableSpec extends FunSpec with Matchers {
implicit class TypeRefinement(self: Interpreted[Result]) {
def as[T: ClassTag]: Interpreted[T] = self match {
case Valid(r) => r.as[T]
case i@Invalid(_) => i
}
}
val interpreter = NaiveInterpreter.DEFAULT
it("should produce an accurate variable map in a simple case") {
Vars.vars(BoolVar("predicate1")) should be (Map("predicate1" -> MamlKind.Bool))
}
it("should produce an accurate variable map in a complex case") {
Vars.vars(Addition(List(IntVar("arg1"), IntVar("arg2")))) should be (Map("arg1" -> MamlKind.Int, "arg2" -> MamlKind.Int))
}
it("should produce an accurate variable map with buffer in a simple case") {
Vars.varsWithBuffer(FocalMax(List(RasterVar("someRaster")), Square(1))) should be (Map("someRaster" -> (MamlKind.Image, 1)))
}
it("should produce an accurate variable map with buffer in an ambiguous case") {
val ast = Addition(List(
FocalMax(List(
FocalMax(List(RasterVar("someRaster")), Square(1))
), Square(1), TargetCell.All),
RasterVar("someRaster")
))
Vars.varsWithBuffer(ast) should be (Map("someRaster" -> (MamlKind.Image, 2)))
}
}
| geotrellis/maml | jvm/src/test/scala/eval/VariableSpec.scala | Scala | apache-2.0 | 1,730 |
package mypipe.api.data
import java.lang.{Long ⇒ JLong}
case class PrimaryKey(columns: List[ColumnMetadata])
case class ColumnMetadata(name: String, colType: ColumnType.EnumVal, isPrimaryKey: Boolean)
case class Row(table: Table, columns: Map[String, Column])
case class Table(id: JLong, name: String, db: String, columns: List[ColumnMetadata], primaryKey: Option[PrimaryKey])
class UnknownTable(override val id: JLong, override val name: String, override val db: String) extends Table(id, name, db, columns = List.empty, primaryKey = None)
case class Column(metadata: ColumnMetadata, value: java.io.Serializable = null) {
def value[T]: T = {
value match {
case null ⇒ null.asInstanceOf[T]
case v ⇒ v.asInstanceOf[T]
}
}
def valueOption[T]: Option[T] = Option(value[T])
}
| mardambey/mypipe | mypipe-api/src/main/scala/mypipe/api/data/package.scala | Scala | apache-2.0 | 815 |
package org.shapelogic.sc.imageprocessing
import org.shapelogic.sc.util.Constants
import spire.implicits._
import org.shapelogic.sc.image.BufferImage
/**
* Neighbor Checker.
*
* Runs around a point and find what type all the neighbor points have
*
* @author Sami Badawi
*
*/
class NeighborChecker(
val image: BufferImage[Byte],
currentPixelIndex: Int) extends IPixelTypeFinder {
//Find and set the type of all the neighbor points
val extraNeighborPoint: FirstDirectionForType = new FirstDirectionForType()
val junction: FirstDirectionForType = new FirstDirectionForType()
val other: FirstDirectionForType = new FirstDirectionForType()
val used: FirstDirectionForType = new FirstDirectionForType()
val vCornerPoint: FirstDirectionForType = new FirstDirectionForType()
val localPixelTypeCalculator: PixelTypeCalculator = new PixelTypeCalculator()
val _pixels: Array[Byte] = image.data
val bufferLenght = image.bufferLenght
val cyclePoints: Array[Int] = image.cyclePoints
/** Run over the neighbors points and put them in categories. */
def checkNeighbors(): Unit = {
cfor(0)(_ < Constants.DIRECTIONS_AROUND_POINT, _ + 1) { iInt =>
val i = iInt.toByte
var pixelIndexI: Int = currentPixelIndex + cyclePoints(i)
var pixel: Byte = if (0 <= pixelIndexI && pixelIndexI < bufferLenght)
_pixels(pixelIndexI)
else
PixelType.BACKGROUND_POINT.color
if (pixel == PixelType.PIXEL_FOREGROUND_UNKNOWN.color) {
localPixelTypeCalculator.setup()
findPointType(pixelIndexI, localPixelTypeCalculator)
pixel = localPixelTypeCalculator.getPixelType().color
_pixels(pixelIndexI) = pixel
}
var isUsed: Boolean = PixelType.isUsed(pixel)
if (isUsed) {
used.addDirection(i, isUsed)
}
if (PixelType.BACKGROUND_POINT.color == pixel) {
// continue
} else if (PixelType.PIXEL_JUNCTION.equalsIgnore(pixel)) {
junction.addDirection(i, isUsed)
} else if (PixelType.PIXEL_EXTRA_NEIGHBOR.equalsIgnore(pixel)) {
extraNeighborPoint.addDirection(i, isUsed)
} else if (PixelType.PIXEL_V_CORNER.equalsIgnore(pixel)) {
vCornerPoint.addDirection(i, isUsed)
} else {
other.addDirection(i, isUsed)
}
}
}
def allNeighbors(): Int = {
return extraNeighborPoint.count +
junction.count +
other.count +
used.count +
vCornerPoint.count
}
def falseJunction(): Boolean = {
return 0 < vCornerPoint.count && allNeighbors() - vCornerPoint.count <= 2
}
def margin: Int = 0
lazy val xMin: Int = image.xMin + margin
lazy val xMax: Int = image.xMax - margin
lazy val yMin: Int = image.yMin + margin
lazy val yMax: Int = image.yMax - margin
lazy val priorityBasedPixelTypeFinder = new PriorityBasedPixelTypeFinder(image)
/**
* XXX Not sure if I am using the right PixelTypeFinder
*/
override def findPointType(pixelIndex: Int,
reusedPixelTypeCalculator: PixelTypeCalculator): PixelTypeCalculator = {
return priorityBasedPixelTypeFinder.findPointType(pixelIndex, reusedPixelTypeCalculator)
}
}
| sami-badawi/shapelogic-scala | src/main/scala/org/shapelogic/sc/imageprocessing/NeighborChecker.scala | Scala | mit | 3,154 |
package org.unixuser.haruyama.lein.scalac
import org.scalatest._
class ExampleTest extends FlatSpec with ShouldMatchers {
"Example" should "add integers" in {
val example = new Example
example.add(1,2) should equal (3)
}
}
| haruyama/lein-scalac-example | test/scala/org/unixuser/haruyama/lein/scalac/ExampleTest.scala | Scala | epl-1.0 | 236 |
package com.kakao.cuesheet.convert
import org.apache.avro.Schema
import org.apache.avro.Schema.Parser
import org.apache.avro.generic.{GenericDatumReader, GenericRecord}
import org.apache.spark.rdd.RDD
class ByteArrayRDD(rdd: RDD[Array[Byte]]) {
/** [[Schema]] is not Serializable, so make it a JSON record */
def parseAvro(schema: Schema): RDD[GenericRecord] = parseAvro(schema.toString)
def parseAvro(schemaJson: String): RDD[GenericRecord] = {
rdd.mapPartitions { partition =>
val schema = new Parser().parse(schemaJson)
val reader = new GenericDatumReader[GenericRecord](schema)
partition.map(Avro.recordDecoder(reader))
}
}
def parseAvroToMap(schema: Schema): RDD[Map[String, Any]] = parseAvro(schema).map(Avro.toMap)
def parseAvroToMap(schema: String): RDD[Map[String, Any]] = parseAvro(schema).map(Avro.toMap)
def parseAvroToJson(schema: Schema): RDD[String] = parseAvro(schema).map(Avro.toJson)
def parseAvroToJson(schema: String): RDD[String] = parseAvro(schema).map(Avro.toJson)
}
| kakao/cuesheet | src/main/scala/com/kakao/cuesheet/convert/ByteArrayRDD.scala | Scala | apache-2.0 | 1,041 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import java.io.{File, PrintWriter}
import java.net.URI
import java.util.Locale
import org.apache.hadoop.fs.{Path, RawLocalFileSystem}
import org.apache.hadoop.fs.permission.{AclEntry, AclEntryScope, AclEntryType, AclStatus, FsAction, FsPermission}
import org.apache.spark.{SparkException, SparkFiles}
import org.apache.spark.internal.config
import org.apache.spark.internal.config.RDD_PARALLEL_LISTING_THRESHOLD
import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SaveMode}
import org.apache.spark.sql.catalyst.{FunctionIdentifier, QualifiedTableName, TableIdentifier}
import org.apache.spark.sql.catalyst.analysis.{FunctionRegistry, NoSuchDatabaseException, NoSuchFunctionException, NoSuchPartitionException, NoSuchTableException, TempTableAlreadyExistsException}
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.connector.catalog.SupportsNamespaces.PROP_OWNER
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATION
import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils}
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
class InMemoryCatalogedDDLSuite extends DDLSuite with SharedSparkSession {
import testImplicits._
override def afterEach(): Unit = {
try {
// drop all databases, tables and functions after each test
spark.sessionState.catalog.reset()
} finally {
Utils.deleteRecursively(new File(spark.sessionState.conf.warehousePath))
super.afterEach()
}
}
protected override def generateTable(
catalog: SessionCatalog,
name: TableIdentifier,
isDataSource: Boolean = true,
partitionCols: Seq[String] = Seq("a", "b")): CatalogTable = {
val storage =
CatalogStorageFormat.empty.copy(locationUri = Some(catalog.defaultTablePath(name)))
val metadata = new MetadataBuilder()
.putString("key", "value")
.build()
val schema = new StructType()
.add("col1", "int", nullable = true, metadata = metadata)
.add("col2", "string")
CatalogTable(
identifier = name,
tableType = CatalogTableType.EXTERNAL,
storage = storage,
schema = schema.copy(
fields = schema.fields ++ partitionCols.map(StructField(_, IntegerType))),
provider = Some("parquet"),
partitionColumnNames = partitionCols,
createTime = 0L,
createVersion = org.apache.spark.SPARK_VERSION,
tracksPartitionsInCatalog = true)
}
test("create a managed Hive source table") {
assume(spark.sparkContext.conf.get(CATALOG_IMPLEMENTATION) == "in-memory")
val tabName = "tbl"
withTable(tabName) {
val e = intercept[AnalysisException] {
sql(s"CREATE TABLE $tabName (i INT, j STRING) STORED AS parquet")
}.getMessage
assert(e.contains("Hive support is required to CREATE Hive TABLE"))
}
}
test("create an external Hive source table") {
assume(spark.sparkContext.conf.get(CATALOG_IMPLEMENTATION) == "in-memory")
withTempDir { tempDir =>
val tabName = "tbl"
withTable(tabName) {
val e = intercept[AnalysisException] {
sql(
s"""
|CREATE EXTERNAL TABLE $tabName (i INT, j STRING)
|ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
|LOCATION '${tempDir.toURI}'
""".stripMargin)
}.getMessage
assert(e.contains("Hive support is required to CREATE Hive TABLE"))
}
}
}
test("Create Hive Table As Select") {
import testImplicits._
withTable("t", "t1") {
var e = intercept[AnalysisException] {
sql("CREATE TABLE t STORED AS parquet SELECT 1 as a, 1 as b")
}.getMessage
assert(e.contains("Hive support is required to CREATE Hive TABLE (AS SELECT)"))
spark.range(1).select('id as 'a, 'id as 'b).write.saveAsTable("t1")
e = intercept[AnalysisException] {
sql("CREATE TABLE t STORED AS parquet SELECT a, b from t1")
}.getMessage
assert(e.contains("Hive support is required to CREATE Hive TABLE (AS SELECT)"))
}
}
test("SPARK-22431: table with nested type col with special char") {
withTable("t") {
spark.sql("CREATE TABLE t(q STRUCT<`$a`:INT, col2:STRING>, i1 INT) USING PARQUET")
checkAnswer(spark.table("t"), Nil)
}
}
test("SPARK-22431: view with nested type") {
withView("t", "v") {
spark.sql("CREATE VIEW t AS SELECT STRUCT('a' AS `$a`, 1 AS b) q")
checkAnswer(spark.table("t"), Row(Row("a", 1)) :: Nil)
spark.sql("CREATE VIEW v AS SELECT STRUCT('a' AS `a`, 1 AS b) q")
checkAnswer(spark.table("t"), Row(Row("a", 1)) :: Nil)
}
}
// TODO: This test is copied from HiveDDLSuite, unify it later.
test("SPARK-23348: append data to data source table with saveAsTable") {
withTable("t", "t1") {
Seq(1 -> "a").toDF("i", "j").write.saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, "a"))
sql("INSERT INTO t SELECT 2, 'b'")
checkAnswer(spark.table("t"), Row(1, "a") :: Row(2, "b") :: Nil)
Seq(3 -> "c").toDF("i", "j").write.mode("append").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, "a") :: Row(2, "b") :: Row(3, "c") :: Nil)
Seq(3.5 -> 3).toDF("i", "j").write.mode("append").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, "a") :: Row(2, "b") :: Row(3, "c")
:: Row(3, "3") :: Nil)
Seq(4 -> "d").toDF("i", "j").write.saveAsTable("t1")
val e = intercept[AnalysisException] {
val format = if (spark.sessionState.conf.defaultDataSourceName.equalsIgnoreCase("json")) {
"orc"
} else {
"json"
}
Seq(5 -> "e").toDF("i", "j").write.mode("append").format(format).saveAsTable("t1")
}
assert(e.message.contains("The format of the existing table default.t1 is "))
assert(e.message.contains("It doesn't match the specified format"))
}
}
test("throw exception if Create Table LIKE USING Hive built-in ORC in in-memory catalog") {
val catalog = spark.sessionState.catalog
withTable("s", "t") {
sql("CREATE TABLE s(a INT, b INT) USING parquet")
val source = catalog.getTableMetadata(TableIdentifier("s"))
assert(source.provider == Some("parquet"))
val e = intercept[AnalysisException] {
sql("CREATE TABLE t LIKE s USING org.apache.spark.sql.hive.orc")
}.getMessage
assert(e.contains("Hive built-in ORC data source must be used with Hive support enabled"))
}
}
test("ALTER TABLE ALTER COLUMN with position is not supported") {
withTable("t") {
sql("CREATE TABLE t(i INT) USING parquet")
val e = intercept[AnalysisException] {
sql("ALTER TABLE t ALTER COLUMN i FIRST")
}
assert(e.message.contains("ALTER COLUMN ... FIRST | ALTER is only supported with v2 tables"))
}
}
test("SPARK-25403 refresh the table after inserting data") {
withTable("t") {
val catalog = spark.sessionState.catalog
val table = QualifiedTableName(catalog.getCurrentDatabase, "t")
sql("CREATE TABLE t (a INT) USING parquet")
sql("INSERT INTO TABLE t VALUES (1)")
assert(catalog.getCachedTable(table) === null, "Table relation should be invalidated.")
assert(spark.table("t").count() === 1)
assert(catalog.getCachedTable(table) !== null, "Table relation should be cached.")
}
}
test("SPARK-19784 refresh the table after altering the table location") {
withTable("t") {
withTempDir { dir =>
val catalog = spark.sessionState.catalog
val table = QualifiedTableName(catalog.getCurrentDatabase, "t")
val p1 = s"${dir.getCanonicalPath}/p1"
val p2 = s"${dir.getCanonicalPath}/p2"
sql(s"CREATE TABLE t (a INT) USING parquet LOCATION '$p1'")
sql("INSERT INTO TABLE t VALUES (1)")
assert(catalog.getCachedTable(table) === null, "Table relation should be invalidated.")
spark.range(5).toDF("a").write.parquet(p2)
spark.sql(s"ALTER TABLE t SET LOCATION '$p2'")
assert(catalog.getCachedTable(table) === null, "Table relation should be invalidated.")
assert(spark.table("t").count() === 5)
assert(catalog.getCachedTable(table) !== null, "Table relation should be cached.")
}
}
}
}
abstract class DDLSuite extends QueryTest with SQLTestUtils {
protected val reversedProperties = Seq(PROP_OWNER)
protected def isUsingHiveMetastore: Boolean = {
spark.sparkContext.conf.get(CATALOG_IMPLEMENTATION) == "hive"
}
protected def generateTable(
catalog: SessionCatalog,
name: TableIdentifier,
isDataSource: Boolean = true,
partitionCols: Seq[String] = Seq("a", "b")): CatalogTable
private val escapedIdentifier = "`(.+)`".r
private def dataSource: String = {
if (isUsingHiveMetastore) {
"HIVE"
} else {
"PARQUET"
}
}
protected def normalizeCatalogTable(table: CatalogTable): CatalogTable = table
private def normalizeSerdeProp(props: Map[String, String]): Map[String, String] = {
props.filterNot(p => Seq("serialization.format", "path").contains(p._1))
}
private def checkCatalogTables(expected: CatalogTable, actual: CatalogTable): Unit = {
assert(normalizeCatalogTable(actual) == normalizeCatalogTable(expected))
}
/**
* Strip backticks, if any, from the string.
*/
private def cleanIdentifier(ident: String): String = {
ident match {
case escapedIdentifier(i) => i
case plainIdent => plainIdent
}
}
private def assertUnsupported(query: String): Unit = {
val e = intercept[AnalysisException] {
sql(query)
}
assert(e.getMessage.toLowerCase(Locale.ROOT).contains("operation not allowed"))
}
private def maybeWrapException[T](expectException: Boolean)(body: => T): Unit = {
if (expectException) intercept[AnalysisException] { body } else body
}
private def createDatabase(catalog: SessionCatalog, name: String): Unit = {
catalog.createDatabase(
CatalogDatabase(
name, "", CatalogUtils.stringToURI(spark.sessionState.conf.warehousePath), Map()),
ignoreIfExists = false)
}
private def createTable(
catalog: SessionCatalog,
name: TableIdentifier,
isDataSource: Boolean = true,
partitionCols: Seq[String] = Seq("a", "b")): Unit = {
catalog.createTable(
generateTable(catalog, name, isDataSource, partitionCols), ignoreIfExists = false)
}
private def createTablePartition(
catalog: SessionCatalog,
spec: TablePartitionSpec,
tableName: TableIdentifier): Unit = {
val part = CatalogTablePartition(
spec, CatalogStorageFormat(None, None, None, None, false, Map()))
catalog.createPartitions(tableName, Seq(part), ignoreIfExists = false)
}
private def getDBPath(dbName: String): URI = {
val warehousePath = makeQualifiedPath(spark.sessionState.conf.warehousePath)
new Path(CatalogUtils.URIToString(warehousePath), s"$dbName.db").toUri
}
test("alter table: set location (datasource table)") {
testSetLocation(isDatasourceTable = true)
}
test("alter table: set properties (datasource table)") {
testSetProperties(isDatasourceTable = true)
}
test("alter table: unset properties (datasource table)") {
testUnsetProperties(isDatasourceTable = true)
}
test("alter table: set serde (datasource table)") {
testSetSerde(isDatasourceTable = true)
}
test("alter table: set serde partition (datasource table)") {
testSetSerdePartition(isDatasourceTable = true)
}
test("alter table: change column (datasource table)") {
testChangeColumn(isDatasourceTable = true)
}
test("alter table: add partition (datasource table)") {
testAddPartitions(isDatasourceTable = true)
}
test("alter table: drop partition (datasource table)") {
testDropPartitions(isDatasourceTable = true)
}
test("alter table: rename partition (datasource table)") {
testRenamePartitions(isDatasourceTable = true)
}
test("drop table - data source table") {
testDropTable(isDatasourceTable = true)
}
test("the qualified path of a database is stored in the catalog") {
val catalog = spark.sessionState.catalog
withTempDir { tmpDir =>
val path = tmpDir.getCanonicalPath
// The generated temp path is not qualified.
assert(!path.startsWith("file:/"))
val uri = tmpDir.toURI
sql(s"CREATE DATABASE db1 LOCATION '$uri'")
val pathInCatalog = new Path(catalog.getDatabaseMetadata("db1").locationUri).toUri
assert("file" === pathInCatalog.getScheme)
val expectedPath = new Path(path).toUri
assert(expectedPath.getPath === pathInCatalog.getPath)
sql("DROP DATABASE db1")
}
}
test("Create Database using Default Warehouse Path") {
val catalog = spark.sessionState.catalog
val dbName = "db1"
try {
sql(s"CREATE DATABASE $dbName")
val db1 = catalog.getDatabaseMetadata(dbName)
assert(db1.copy(properties = db1.properties -- reversedProperties) == CatalogDatabase(
dbName,
"",
getDBPath(dbName),
Map.empty))
sql(s"DROP DATABASE $dbName CASCADE")
assert(!catalog.databaseExists(dbName))
} finally {
catalog.reset()
}
}
test("Create/Drop Database - location") {
val catalog = spark.sessionState.catalog
val databaseNames = Seq("db1", "`database`")
withTempDir { tmpDir =>
val path = new Path(tmpDir.getCanonicalPath).toUri
databaseNames.foreach { dbName =>
try {
val dbNameWithoutBackTicks = cleanIdentifier(dbName)
sql(s"CREATE DATABASE $dbName Location '$path'")
val db1 = catalog.getDatabaseMetadata(dbNameWithoutBackTicks)
val expPath = makeQualifiedPath(tmpDir.toString)
assert(db1.copy(properties = db1.properties -- reversedProperties) == CatalogDatabase(
dbNameWithoutBackTicks,
"",
expPath,
Map.empty))
sql(s"DROP DATABASE $dbName CASCADE")
assert(!catalog.databaseExists(dbNameWithoutBackTicks))
} finally {
catalog.reset()
}
}
}
}
test("Create Database - database already exists") {
val catalog = spark.sessionState.catalog
val databaseNames = Seq("db1", "`database`")
databaseNames.foreach { dbName =>
try {
val dbNameWithoutBackTicks = cleanIdentifier(dbName)
sql(s"CREATE DATABASE $dbName")
val db1 = catalog.getDatabaseMetadata(dbNameWithoutBackTicks)
assert(db1.copy(properties = db1.properties -- reversedProperties) == CatalogDatabase(
dbNameWithoutBackTicks,
"",
getDBPath(dbNameWithoutBackTicks),
Map.empty))
// TODO: HiveExternalCatalog should throw DatabaseAlreadyExistsException
val e = intercept[AnalysisException] {
sql(s"CREATE DATABASE $dbName")
}.getMessage
assert(e.contains(s"already exists"))
} finally {
catalog.reset()
}
}
}
private def withEmptyDirInTablePath(dirName: String)(f : File => Unit): Unit = {
val tableLoc =
new File(spark.sessionState.catalog.defaultTablePath(TableIdentifier(dirName)))
try {
tableLoc.mkdir()
f(tableLoc)
} finally {
waitForTasksToFinish()
Utils.deleteRecursively(tableLoc)
}
}
test("CTAS a managed table with the existing empty directory") {
withEmptyDirInTablePath("tab1") { tableLoc =>
withTable("tab1") {
sql(s"CREATE TABLE tab1 USING ${dataSource} AS SELECT 1, 'a'")
checkAnswer(spark.table("tab1"), Row(1, "a"))
}
}
}
test("create a managed table with the existing empty directory") {
withEmptyDirInTablePath("tab1") { tableLoc =>
withTable("tab1") {
sql(s"CREATE TABLE tab1 (col1 int, col2 string) USING ${dataSource}")
sql("INSERT INTO tab1 VALUES (1, 'a')")
checkAnswer(spark.table("tab1"), Row(1, "a"))
}
}
}
test("create a managed table with the existing non-empty directory") {
withTable("tab1") {
withEmptyDirInTablePath("tab1") { tableLoc =>
val hiddenGarbageFile = new File(tableLoc.getCanonicalPath, ".garbage")
hiddenGarbageFile.createNewFile()
val exMsgWithDefaultDB =
"Can not create the managed table('`default`.`tab1`'). The associated location"
var ex = intercept[AnalysisException] {
sql(s"CREATE TABLE tab1 USING ${dataSource} AS SELECT 1, 'a'")
}.getMessage
assert(ex.contains(exMsgWithDefaultDB))
ex = intercept[AnalysisException] {
sql(s"CREATE TABLE tab1 (col1 int, col2 string) USING ${dataSource}")
}.getMessage
assert(ex.contains(exMsgWithDefaultDB))
// Always check location of managed table, with or without (IF NOT EXISTS)
withTable("tab2") {
sql(s"CREATE TABLE tab2 (col1 int, col2 string) USING ${dataSource}")
ex = intercept[AnalysisException] {
sql(s"CREATE TABLE IF NOT EXISTS tab1 LIKE tab2")
}.getMessage
assert(ex.contains(exMsgWithDefaultDB))
}
}
}
}
test("rename a managed table with existing empty directory") {
withEmptyDirInTablePath("tab2") { tableLoc =>
withTable("tab1") {
sql(s"CREATE TABLE tab1 USING $dataSource AS SELECT 1, 'a'")
val ex = intercept[AnalysisException] {
sql("ALTER TABLE tab1 RENAME TO tab2")
}.getMessage
assert(ex.contains(
"Can not rename the managed table('`default`.`tab1`'). The associated location"))
}
}
}
private def checkSchemaInCreatedDataSourceTable(
path: File,
userSpecifiedSchema: Option[String],
userSpecifiedPartitionCols: Option[String],
expectedSchema: StructType,
expectedPartitionCols: Seq[String]): Unit = {
val tabName = "tab1"
withTable(tabName) {
val partitionClause =
userSpecifiedPartitionCols.map(p => s"PARTITIONED BY ($p)").getOrElse("")
val schemaClause = userSpecifiedSchema.map(s => s"($s)").getOrElse("")
val uri = path.toURI
val sqlCreateTable =
s"""
|CREATE TABLE $tabName $schemaClause
|USING parquet
|OPTIONS (
| path '$uri'
|)
|$partitionClause
""".stripMargin
if (userSpecifiedSchema.isEmpty && userSpecifiedPartitionCols.nonEmpty) {
val e = intercept[AnalysisException](sql(sqlCreateTable)).getMessage
assert(e.contains(
"not allowed to specify partition columns when the table schema is not defined"))
} else {
sql(sqlCreateTable)
val tableMetadata = spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName))
assert(expectedSchema == tableMetadata.schema)
assert(expectedPartitionCols == tableMetadata.partitionColumnNames)
}
}
}
test("Create partitioned data source table without user specified schema") {
import testImplicits._
val df = sparkContext.parallelize(1 to 10).map(i => (i, i.toString)).toDF("num", "str")
// Case 1: with partitioning columns but no schema: Option("inexistentColumns")
// Case 2: without schema and partitioning columns: None
Seq(Option("inexistentColumns"), None).foreach { partitionCols =>
withTempPath { pathToPartitionedTable =>
df.write.format("parquet").partitionBy("num")
.save(pathToPartitionedTable.getCanonicalPath)
checkSchemaInCreatedDataSourceTable(
pathToPartitionedTable,
userSpecifiedSchema = None,
userSpecifiedPartitionCols = partitionCols,
expectedSchema = new StructType().add("str", StringType).add("num", IntegerType),
expectedPartitionCols = Seq("num"))
}
}
}
test("Create partitioned data source table with user specified schema") {
import testImplicits._
val df = sparkContext.parallelize(1 to 10).map(i => (i, i.toString)).toDF("num", "str")
// Case 1: with partitioning columns but no schema: Option("num")
// Case 2: without schema and partitioning columns: None
Seq(Option("num"), None).foreach { partitionCols =>
withTempPath { pathToPartitionedTable =>
df.write.format("parquet").partitionBy("num")
.save(pathToPartitionedTable.getCanonicalPath)
checkSchemaInCreatedDataSourceTable(
pathToPartitionedTable,
userSpecifiedSchema = Option("num int, str string"),
userSpecifiedPartitionCols = partitionCols,
expectedSchema = new StructType().add("str", StringType).add("num", IntegerType),
expectedPartitionCols = partitionCols.map(Seq(_)).getOrElse(Seq.empty[String]))
}
}
}
test("Create non-partitioned data source table without user specified schema") {
import testImplicits._
val df = sparkContext.parallelize(1 to 10).map(i => (i, i.toString)).toDF("num", "str")
// Case 1: with partitioning columns but no schema: Option("inexistentColumns")
// Case 2: without schema and partitioning columns: None
Seq(Option("inexistentColumns"), None).foreach { partitionCols =>
withTempPath { pathToNonPartitionedTable =>
df.write.format("parquet").save(pathToNonPartitionedTable.getCanonicalPath)
checkSchemaInCreatedDataSourceTable(
pathToNonPartitionedTable,
userSpecifiedSchema = None,
userSpecifiedPartitionCols = partitionCols,
expectedSchema = new StructType().add("num", IntegerType).add("str", StringType),
expectedPartitionCols = Seq.empty[String])
}
}
}
test("Create non-partitioned data source table with user specified schema") {
import testImplicits._
val df = sparkContext.parallelize(1 to 10).map(i => (i, i.toString)).toDF("num", "str")
// Case 1: with partitioning columns but no schema: Option("inexistentColumns")
// Case 2: without schema and partitioning columns: None
Seq(Option("num"), None).foreach { partitionCols =>
withTempPath { pathToNonPartitionedTable =>
df.write.format("parquet").save(pathToNonPartitionedTable.getCanonicalPath)
checkSchemaInCreatedDataSourceTable(
pathToNonPartitionedTable,
userSpecifiedSchema = Option("num int, str string"),
userSpecifiedPartitionCols = partitionCols,
expectedSchema = if (partitionCols.isDefined) {
// we skipped inference, so the partition col is ordered at the end
new StructType().add("str", StringType).add("num", IntegerType)
} else {
// no inferred partitioning, so schema is in original order
new StructType().add("num", IntegerType).add("str", StringType)
},
expectedPartitionCols = partitionCols.map(Seq(_)).getOrElse(Seq.empty[String]))
}
}
}
test("create table - duplicate column names in the table definition") {
Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
val errMsg = intercept[AnalysisException] {
sql(s"CREATE TABLE t($c0 INT, $c1 INT) USING parquet")
}.getMessage
assert(errMsg.contains(
"Found duplicate column(s) in the table definition of `default`.`t`"))
}
}
}
test("create table - partition column names not in table definition") {
val e = intercept[AnalysisException] {
sql("CREATE TABLE tbl(a int, b string) USING json PARTITIONED BY (c)")
}
assert(e.message == "partition column c is not defined in table default.tbl, " +
"defined table columns are: a, b")
}
test("create table - bucket column names not in table definition") {
val e = intercept[AnalysisException] {
sql("CREATE TABLE tbl(a int, b string) USING json CLUSTERED BY (c) INTO 4 BUCKETS")
}
assert(e.message == "bucket column c is not defined in table default.tbl, " +
"defined table columns are: a, b")
}
test("create table - column repeated in partition columns") {
Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
val errMsg = intercept[AnalysisException] {
sql(s"CREATE TABLE t($c0 INT) USING parquet PARTITIONED BY ($c0, $c1)")
}.getMessage
assert(errMsg.contains("Found duplicate column(s) in the partition schema"))
}
}
}
test("create table - column repeated in bucket/sort columns") {
Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
var errMsg = intercept[AnalysisException] {
sql(s"CREATE TABLE t($c0 INT) USING parquet CLUSTERED BY ($c0, $c1) INTO 2 BUCKETS")
}.getMessage
assert(errMsg.contains("Found duplicate column(s) in the bucket definition"))
errMsg = intercept[AnalysisException] {
sql(s"""
|CREATE TABLE t($c0 INT, col INT) USING parquet CLUSTERED BY (col)
| SORTED BY ($c0, $c1) INTO 2 BUCKETS
""".stripMargin)
}.getMessage
assert(errMsg.contains("Found duplicate column(s) in the sort definition"))
}
}
}
test("create table - append to a non-partitioned table created with different paths") {
import testImplicits._
withTempDir { dir1 =>
withTempDir { dir2 =>
withTable("path_test") {
Seq(1L -> "a").toDF("v1", "v2")
.write
.mode(SaveMode.Append)
.format("json")
.option("path", dir1.getCanonicalPath)
.saveAsTable("path_test")
val ex = intercept[AnalysisException] {
Seq((3L, "c")).toDF("v1", "v2")
.write
.mode(SaveMode.Append)
.format("json")
.option("path", dir2.getCanonicalPath)
.saveAsTable("path_test")
}.getMessage
assert(ex.contains("The location of the existing table `default`.`path_test`"))
checkAnswer(
spark.table("path_test"), Row(1L, "a") :: Nil)
}
}
}
}
test("Refresh table after changing the data source table partitioning") {
import testImplicits._
val tabName = "tab1"
val catalog = spark.sessionState.catalog
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = sparkContext.parallelize(1 to 10).map(i => (i, i.toString, i, i))
.toDF("col1", "col2", "col3", "col4")
df.write.format("json").partitionBy("col1", "col3").save(path)
val schema = new StructType()
.add("col2", StringType).add("col4", LongType)
.add("col1", IntegerType).add("col3", IntegerType)
val partitionCols = Seq("col1", "col3")
val uri = dir.toURI
withTable(tabName) {
spark.sql(
s"""
|CREATE TABLE $tabName
|USING json
|OPTIONS (
| path '$uri'
|)
""".stripMargin)
val tableMetadata = catalog.getTableMetadata(TableIdentifier(tabName))
assert(tableMetadata.schema == schema)
assert(tableMetadata.partitionColumnNames == partitionCols)
// Change the schema
val newDF = sparkContext.parallelize(1 to 10).map(i => (i, i.toString))
.toDF("newCol1", "newCol2")
newDF.write.format("json").partitionBy("newCol1").mode(SaveMode.Overwrite).save(path)
// No change on the schema
val tableMetadataBeforeRefresh = catalog.getTableMetadata(TableIdentifier(tabName))
assert(tableMetadataBeforeRefresh.schema == schema)
assert(tableMetadataBeforeRefresh.partitionColumnNames == partitionCols)
// Refresh does not affect the schema
spark.catalog.refreshTable(tabName)
val tableMetadataAfterRefresh = catalog.getTableMetadata(TableIdentifier(tabName))
assert(tableMetadataAfterRefresh.schema == schema)
assert(tableMetadataAfterRefresh.partitionColumnNames == partitionCols)
}
}
}
test("create view - duplicate column names in the view definition") {
Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
val errMsg = intercept[AnalysisException] {
sql(s"CREATE VIEW t AS SELECT * FROM VALUES (1, 1) AS t($c0, $c1)")
}.getMessage
assert(errMsg.contains("Found duplicate column(s) in the view definition"))
}
}
}
test("Alter/Describe Database") {
val catalog = spark.sessionState.catalog
val databaseNames = Seq("db1", "`database`")
databaseNames.foreach { dbName =>
try {
val dbNameWithoutBackTicks = cleanIdentifier(dbName)
val location = getDBPath(dbNameWithoutBackTicks)
sql(s"CREATE DATABASE $dbName")
checkAnswer(
sql(s"DESCRIBE DATABASE EXTENDED $dbName").toDF("key", "value")
.where("key not like 'Owner%'"), // filter for consistency with in-memory catalog
Row("Database Name", dbNameWithoutBackTicks) ::
Row("Comment", "") ::
Row("Location", CatalogUtils.URIToString(location)) ::
Row("Properties", "") :: Nil)
sql(s"ALTER DATABASE $dbName SET DBPROPERTIES ('a'='a', 'b'='b', 'c'='c')")
checkAnswer(
sql(s"DESCRIBE DATABASE EXTENDED $dbName").toDF("key", "value")
.where("key not like 'Owner%'"), // filter for consistency with in-memory catalog
Row("Database Name", dbNameWithoutBackTicks) ::
Row("Comment", "") ::
Row("Location", CatalogUtils.URIToString(location)) ::
Row("Properties", "((a,a), (b,b), (c,c))") :: Nil)
sql(s"ALTER DATABASE $dbName SET DBPROPERTIES ('d'='d')")
checkAnswer(
sql(s"DESCRIBE DATABASE EXTENDED $dbName").toDF("key", "value")
.where("key not like 'Owner%'"), // filter for consistency with in-memory catalog
Row("Database Name", dbNameWithoutBackTicks) ::
Row("Comment", "") ::
Row("Location", CatalogUtils.URIToString(location)) ::
Row("Properties", "((a,a), (b,b), (c,c), (d,d))") :: Nil)
withTempDir { tmpDir =>
if (isUsingHiveMetastore) {
val e1 = intercept[AnalysisException] {
sql(s"ALTER DATABASE $dbName SET LOCATION '${tmpDir.toURI}'")
}
assert(e1.getMessage.contains("does not support altering database location"))
} else {
sql(s"ALTER DATABASE $dbName SET LOCATION '${tmpDir.toURI}'")
val uriInCatalog = catalog.getDatabaseMetadata(dbNameWithoutBackTicks).locationUri
assert("file" === uriInCatalog.getScheme)
assert(new Path(tmpDir.getPath).toUri.getPath === uriInCatalog.getPath)
}
intercept[NoSuchDatabaseException] {
sql(s"ALTER DATABASE `db-not-exist` SET LOCATION '${tmpDir.toURI}'")
}
val e3 = intercept[IllegalArgumentException] {
sql(s"ALTER DATABASE $dbName SET LOCATION ''")
}
assert(e3.getMessage.contains("Can not create a Path from an empty string"))
}
} finally {
catalog.reset()
}
}
}
test("Drop/Alter/Describe Database - database does not exists") {
val databaseNames = Seq("db1", "`database`")
databaseNames.foreach { dbName =>
val dbNameWithoutBackTicks = cleanIdentifier(dbName)
assert(!spark.sessionState.catalog.databaseExists(dbNameWithoutBackTicks))
var message = intercept[AnalysisException] {
sql(s"DROP DATABASE $dbName")
}.getMessage
// TODO: Unify the exception.
if (isUsingHiveMetastore) {
assert(message.contains(s"NoSuchObjectException: $dbNameWithoutBackTicks"))
} else {
assert(message.contains(s"Database '$dbNameWithoutBackTicks' not found"))
}
message = intercept[AnalysisException] {
sql(s"ALTER DATABASE $dbName SET DBPROPERTIES ('d'='d')")
}.getMessage
assert(message.contains(s"Database '$dbNameWithoutBackTicks' not found"))
message = intercept[AnalysisException] {
sql(s"DESCRIBE DATABASE EXTENDED $dbName")
}.getMessage
assert(message.contains(s"Database '$dbNameWithoutBackTicks' not found"))
sql(s"DROP DATABASE IF EXISTS $dbName")
}
}
test("drop non-empty database in restrict mode") {
val catalog = spark.sessionState.catalog
val dbName = "db1"
sql(s"CREATE DATABASE $dbName")
// create a table in database
val tableIdent1 = TableIdentifier("tab1", Some(dbName))
createTable(catalog, tableIdent1)
// drop a non-empty database in Restrict mode
val message = intercept[AnalysisException] {
sql(s"DROP DATABASE $dbName RESTRICT")
}.getMessage
assert(message.contains(s"Database $dbName is not empty. One or more tables exist"))
catalog.dropTable(tableIdent1, ignoreIfNotExists = false, purge = false)
assert(catalog.listDatabases().contains(dbName))
sql(s"DROP DATABASE $dbName RESTRICT")
assert(!catalog.listDatabases().contains(dbName))
}
test("drop non-empty database in cascade mode") {
val catalog = spark.sessionState.catalog
val dbName = "db1"
sql(s"CREATE DATABASE $dbName")
// create a table in database
val tableIdent1 = TableIdentifier("tab1", Some(dbName))
createTable(catalog, tableIdent1)
// drop a non-empty database in CASCADE mode
assert(catalog.listTables(dbName).contains(tableIdent1))
assert(catalog.listDatabases().contains(dbName))
sql(s"DROP DATABASE $dbName CASCADE")
assert(!catalog.listDatabases().contains(dbName))
}
test("create table in default db") {
val catalog = spark.sessionState.catalog
val tableIdent1 = TableIdentifier("tab1", None)
createTable(catalog, tableIdent1)
val expectedTableIdent = tableIdent1.copy(database = Some("default"))
val expectedTable = generateTable(catalog, expectedTableIdent)
checkCatalogTables(expectedTable, catalog.getTableMetadata(tableIdent1))
}
test("create table in a specific db") {
val catalog = spark.sessionState.catalog
createDatabase(catalog, "dbx")
val tableIdent1 = TableIdentifier("tab1", Some("dbx"))
createTable(catalog, tableIdent1)
val expectedTable = generateTable(catalog, tableIdent1)
checkCatalogTables(expectedTable, catalog.getTableMetadata(tableIdent1))
}
test("create table using") {
val catalog = spark.sessionState.catalog
withTable("tbl") {
sql("CREATE TABLE tbl(a INT, b INT) USING parquet")
val table = catalog.getTableMetadata(TableIdentifier("tbl"))
assert(table.tableType == CatalogTableType.MANAGED)
assert(table.schema == new StructType().add("a", "int").add("b", "int"))
assert(table.provider == Some("parquet"))
}
}
test("create table using - with partitioned by") {
val catalog = spark.sessionState.catalog
withTable("tbl") {
sql("CREATE TABLE tbl(a INT, b INT) USING parquet PARTITIONED BY (a)")
val table = catalog.getTableMetadata(TableIdentifier("tbl"))
assert(table.tableType == CatalogTableType.MANAGED)
assert(table.provider == Some("parquet"))
// a is ordered last since it is a user-specified partitioning column
assert(table.schema == new StructType().add("b", IntegerType).add("a", IntegerType))
assert(table.partitionColumnNames == Seq("a"))
}
}
test("create table using - with bucket") {
val catalog = spark.sessionState.catalog
withTable("tbl") {
sql("CREATE TABLE tbl(a INT, b INT) USING parquet " +
"CLUSTERED BY (a) SORTED BY (b) INTO 5 BUCKETS")
val table = catalog.getTableMetadata(TableIdentifier("tbl"))
assert(table.tableType == CatalogTableType.MANAGED)
assert(table.provider == Some("parquet"))
assert(table.schema == new StructType().add("a", IntegerType).add("b", IntegerType))
assert(table.bucketSpec == Some(BucketSpec(5, Seq("a"), Seq("b"))))
}
}
test("create temporary view using") {
// when we test the HiveCatalogedDDLSuite, it will failed because the csvFile path above
// starts with 'jar:', and it is an illegal parameter for Path, so here we copy it
// to a temp file by withResourceTempPath
withResourceTempPath("test-data/cars.csv") { tmpFile =>
withTempView("testview") {
sql(s"CREATE OR REPLACE TEMPORARY VIEW testview (c1 String, c2 String) USING " +
"org.apache.spark.sql.execution.datasources.csv.CSVFileFormat " +
s"OPTIONS (PATH '${tmpFile.toURI}')")
checkAnswer(
sql("select c1, c2 from testview order by c1 limit 1"),
Row("1997", "Ford") :: Nil)
// Fails if creating a new view with the same name
intercept[TempTableAlreadyExistsException] {
sql(
s"""
|CREATE TEMPORARY VIEW testview
|USING org.apache.spark.sql.execution.datasources.csv.CSVFileFormat
|OPTIONS (PATH '${tmpFile.toURI}')
""".stripMargin)
}
}
}
}
test("alter table: rename") {
val catalog = spark.sessionState.catalog
val tableIdent1 = TableIdentifier("tab1", Some("dbx"))
val tableIdent2 = TableIdentifier("tab2", Some("dbx"))
createDatabase(catalog, "dbx")
createDatabase(catalog, "dby")
createTable(catalog, tableIdent1)
assert(catalog.listTables("dbx") == Seq(tableIdent1))
sql("ALTER TABLE dbx.tab1 RENAME TO dbx.tab2")
assert(catalog.listTables("dbx") == Seq(tableIdent2))
// The database in destination table name can be omitted, and we will use the database of source
// table for it.
sql("ALTER TABLE dbx.tab2 RENAME TO tab1")
assert(catalog.listTables("dbx") == Seq(tableIdent1))
catalog.setCurrentDatabase("dbx")
// rename without explicitly specifying database
sql("ALTER TABLE tab1 RENAME TO tab2")
assert(catalog.listTables("dbx") == Seq(tableIdent2))
// table to rename does not exist
intercept[AnalysisException] {
sql("ALTER TABLE dbx.does_not_exist RENAME TO dbx.tab2")
}
// destination database is different
intercept[AnalysisException] {
sql("ALTER TABLE dbx.tab1 RENAME TO dby.tab2")
}
}
test("alter table: rename cached table") {
import testImplicits._
sql("CREATE TABLE students (age INT, name STRING) USING parquet")
val df = (1 to 2).map { i => (i, i.toString) }.toDF("age", "name")
df.write.insertInto("students")
spark.catalog.cacheTable("students")
checkAnswer(spark.table("students"), df)
assert(spark.catalog.isCached("students"), "bad test: table was not cached in the first place")
sql("ALTER TABLE students RENAME TO teachers")
sql("CREATE TABLE students (age INT, name STRING) USING parquet")
// Now we have both students and teachers.
// The cached data for the old students table should not be read by the new students table.
assert(!spark.catalog.isCached("students"))
assert(spark.catalog.isCached("teachers"))
assert(spark.table("students").collect().isEmpty)
checkAnswer(spark.table("teachers"), df)
}
test("rename temporary view - destination table with database name") {
withTempView("tab1") {
sql(
"""
|CREATE TEMPORARY TABLE tab1
|USING org.apache.spark.sql.sources.DDLScanSource
|OPTIONS (
| From '1',
| To '10',
| Table 'test1'
|)
""".stripMargin)
val e = intercept[AnalysisException] {
sql("ALTER TABLE tab1 RENAME TO default.tab2")
}
assert(e.getMessage.contains(
"RENAME TEMPORARY VIEW from '`tab1`' to '`default`.`tab2`': " +
"cannot specify database name 'default' in the destination table"))
val catalog = spark.sessionState.catalog
assert(catalog.listTables("default") == Seq(TableIdentifier("tab1")))
}
}
test("rename temporary view - destination table with database name,with:CREATE TEMPORARY view") {
withTempView("view1") {
sql(
"""
|CREATE TEMPORARY VIEW view1
|USING org.apache.spark.sql.sources.DDLScanSource
|OPTIONS (
| From '1',
| To '10',
| Table 'test1'
|)
""".stripMargin)
val e = intercept[AnalysisException] {
sql("ALTER TABLE view1 RENAME TO default.tab2")
}
assert(e.getMessage.contains(
"RENAME TEMPORARY VIEW from '`view1`' to '`default`.`tab2`': " +
"cannot specify database name 'default' in the destination table"))
val catalog = spark.sessionState.catalog
assert(catalog.listTables("default") == Seq(TableIdentifier("view1")))
}
}
test("rename temporary view") {
withTempView("tab1", "tab2") {
spark.range(10).createOrReplaceTempView("tab1")
sql("ALTER TABLE tab1 RENAME TO tab2")
checkAnswer(spark.table("tab2"), spark.range(10).toDF())
val e = intercept[AnalysisException](spark.table("tab1")).getMessage
assert(e.contains("Table or view not found"))
sql("ALTER VIEW tab2 RENAME TO tab1")
checkAnswer(spark.table("tab1"), spark.range(10).toDF())
intercept[AnalysisException] { spark.table("tab2") }
}
}
test("rename temporary view - destination table already exists") {
withTempView("tab1", "tab2") {
sql(
"""
|CREATE TEMPORARY TABLE tab1
|USING org.apache.spark.sql.sources.DDLScanSource
|OPTIONS (
| From '1',
| To '10',
| Table 'test1'
|)
""".stripMargin)
sql(
"""
|CREATE TEMPORARY TABLE tab2
|USING org.apache.spark.sql.sources.DDLScanSource
|OPTIONS (
| From '1',
| To '10',
| Table 'test1'
|)
""".stripMargin)
val e = intercept[AnalysisException] {
sql("ALTER TABLE tab1 RENAME TO tab2")
}
assert(e.getMessage.contains(
"RENAME TEMPORARY VIEW from '`tab1`' to '`tab2`': destination table already exists"))
val catalog = spark.sessionState.catalog
assert(catalog.listTables("default") == Seq(TableIdentifier("tab1"), TableIdentifier("tab2")))
}
}
test("rename temporary view - destination table already exists, with: CREATE TEMPORARY view") {
withTempView("view1", "view2") {
sql(
"""
|CREATE TEMPORARY VIEW view1
|USING org.apache.spark.sql.sources.DDLScanSource
|OPTIONS (
| From '1',
| To '10',
| Table 'test1'
|)
""".stripMargin)
sql(
"""
|CREATE TEMPORARY VIEW view2
|USING org.apache.spark.sql.sources.DDLScanSource
|OPTIONS (
| From '1',
| To '10',
| Table 'test1'
|)
""".stripMargin)
val e = intercept[AnalysisException] {
sql("ALTER TABLE view1 RENAME TO view2")
}
assert(e.getMessage.contains(
"RENAME TEMPORARY VIEW from '`view1`' to '`view2`': destination table already exists"))
val catalog = spark.sessionState.catalog
assert(catalog.listTables("default") ==
Seq(TableIdentifier("view1"), TableIdentifier("view2")))
}
}
test("alter table: bucketing is not supported") {
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent)
assertUnsupported("ALTER TABLE dbx.tab1 CLUSTERED BY (blood, lemon, grape) INTO 11 BUCKETS")
assertUnsupported("ALTER TABLE dbx.tab1 CLUSTERED BY (fuji) SORTED BY (grape) INTO 5 BUCKETS")
assertUnsupported("ALTER TABLE dbx.tab1 NOT CLUSTERED")
assertUnsupported("ALTER TABLE dbx.tab1 NOT SORTED")
}
test("alter table: skew is not supported") {
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent)
assertUnsupported("ALTER TABLE dbx.tab1 SKEWED BY (dt, country) ON " +
"(('2008-08-08', 'us'), ('2009-09-09', 'uk'), ('2010-10-10', 'cn'))")
assertUnsupported("ALTER TABLE dbx.tab1 SKEWED BY (dt, country) ON " +
"(('2008-08-08', 'us'), ('2009-09-09', 'uk')) STORED AS DIRECTORIES")
assertUnsupported("ALTER TABLE dbx.tab1 NOT SKEWED")
assertUnsupported("ALTER TABLE dbx.tab1 NOT STORED AS DIRECTORIES")
}
test("alter table: recover partitions (sequential)") {
val oldRddParallelListingThreshold = spark.sparkContext.conf.get(
RDD_PARALLEL_LISTING_THRESHOLD)
try {
spark.sparkContext.conf.set(RDD_PARALLEL_LISTING_THRESHOLD.key, "10")
testRecoverPartitions()
} finally {
spark.sparkContext.conf.set(RDD_PARALLEL_LISTING_THRESHOLD, oldRddParallelListingThreshold)
}
}
test("alter table: recover partition (parallel)") {
val oldRddParallelListingThreshold = spark.sparkContext.conf.get(
RDD_PARALLEL_LISTING_THRESHOLD)
try {
spark.sparkContext.conf.set(RDD_PARALLEL_LISTING_THRESHOLD.key, "0")
testRecoverPartitions()
} finally {
spark.sparkContext.conf.set(RDD_PARALLEL_LISTING_THRESHOLD, oldRddParallelListingThreshold)
}
}
protected def testRecoverPartitions(): Unit = {
val catalog = spark.sessionState.catalog
// table to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE does_not_exist RECOVER PARTITIONS")
}
val tableIdent = TableIdentifier("tab1")
createTable(catalog, tableIdent, partitionCols = Seq("a", "b", "c"))
val part1 = Map("a" -> "1", "b" -> "5", "c" -> "19")
createTablePartition(catalog, part1, tableIdent)
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1))
val part2 = Map("a" -> "2", "b" -> "6", "c" -> "31")
val root = new Path(catalog.getTableMetadata(tableIdent).location)
val fs = root.getFileSystem(spark.sessionState.newHadoopConf())
// valid
fs.mkdirs(new Path(new Path(new Path(root, "a=1"), "b=5"), "c=19"))
fs.createNewFile(new Path(new Path(root, "a=1/b=5/c=19"), "a.csv")) // file
fs.createNewFile(new Path(new Path(root, "a=1/b=5/c=19"), "_SUCCESS")) // file
fs.mkdirs(new Path(new Path(new Path(root, "A=2"), "B=6"), "C=31"))
fs.createNewFile(new Path(new Path(root, "A=2/B=6/C=31"), "b.csv")) // file
fs.createNewFile(new Path(new Path(root, "A=2/B=6/C=31"), "c.csv")) // file
fs.createNewFile(new Path(new Path(root, "A=2/B=6/C=31"), ".hiddenFile")) // file
fs.mkdirs(new Path(new Path(root, "A=2/B=6/C=31"), "_temporary"))
val parts = (10 to 100).map { a =>
val part = Map("a" -> a.toString, "b" -> "5", "c" -> "42")
fs.mkdirs(new Path(new Path(new Path(root, s"a=$a"), "b=5"), "c=42"))
fs.createNewFile(new Path(new Path(root, s"a=$a/b=5/c=42"), "a.csv")) // file
createTablePartition(catalog, part, tableIdent)
part
}
// invalid
fs.mkdirs(new Path(new Path(root, "a"), "b")) // bad name
fs.mkdirs(new Path(new Path(root, "b=1"), "a=1")) // wrong order
fs.mkdirs(new Path(root, "a=4")) // not enough columns
fs.createNewFile(new Path(new Path(root, "a=1"), "b=4")) // file
fs.createNewFile(new Path(new Path(root, "a=1"), "_SUCCESS")) // _SUCCESS
fs.mkdirs(new Path(new Path(root, "a=1"), "_temporary")) // _temporary
fs.mkdirs(new Path(new Path(root, "a=1"), ".b=4")) // start with .
try {
sql("ALTER TABLE tab1 RECOVER PARTITIONS")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(part1, part2) ++ parts)
if (!isUsingHiveMetastore) {
assert(catalog.getPartition(tableIdent, part1).parameters("numFiles") == "1")
assert(catalog.getPartition(tableIdent, part2).parameters("numFiles") == "2")
} else {
// After ALTER TABLE, the statistics of the first partition is removed by Hive megastore
assert(catalog.getPartition(tableIdent, part1).parameters.get("numFiles").isEmpty)
assert(catalog.getPartition(tableIdent, part2).parameters("numFiles") == "2")
}
} finally {
fs.delete(root, true)
}
}
test("alter table: add partition is not supported for views") {
assertUnsupported("ALTER VIEW dbx.tab1 ADD IF NOT EXISTS PARTITION (b='2')")
}
test("alter table: drop partition is not supported for views") {
assertUnsupported("ALTER VIEW dbx.tab1 DROP IF EXISTS PARTITION (b='2')")
}
test("show databases") {
sql("CREATE DATABASE showdb2B")
sql("CREATE DATABASE showdb1A")
// check the result as well as its order
checkDataset(sql("SHOW DATABASES"), Row("default"), Row("showdb1a"), Row("showdb2b"))
checkAnswer(
sql("SHOW DATABASES LIKE '*db1A'"),
Row("showdb1a") :: Nil)
checkAnswer(
sql("SHOW DATABASES '*db1A'"),
Row("showdb1a") :: Nil)
checkAnswer(
sql("SHOW DATABASES LIKE 'showdb1A'"),
Row("showdb1a") :: Nil)
checkAnswer(
sql("SHOW DATABASES LIKE '*db1A|*db2B'"),
Row("showdb1a") ::
Row("showdb2b") :: Nil)
checkAnswer(
sql("SHOW DATABASES LIKE 'non-existentdb'"),
Nil)
}
test("drop view - temporary view") {
val catalog = spark.sessionState.catalog
sql(
"""
|CREATE TEMPORARY VIEW tab1
|USING org.apache.spark.sql.sources.DDLScanSource
|OPTIONS (
| From '1',
| To '10',
| Table 'test1'
|)
""".stripMargin)
assert(catalog.listTables("default") == Seq(TableIdentifier("tab1")))
sql("DROP VIEW tab1")
assert(catalog.listTables("default") == Nil)
}
protected def testDropTable(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
assert(catalog.listTables("dbx") == Seq(tableIdent))
sql("DROP TABLE dbx.tab1")
assert(catalog.listTables("dbx") == Nil)
sql("DROP TABLE IF EXISTS dbx.tab1")
intercept[AnalysisException] {
sql("DROP TABLE dbx.tab1")
}
}
test("drop view") {
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent)
assert(catalog.listTables("dbx") == Seq(tableIdent))
val e = intercept[AnalysisException] {
sql("DROP VIEW dbx.tab1")
}
assert(
e.getMessage.contains("Cannot drop a table with DROP VIEW. Please use DROP TABLE instead"))
}
protected def testSetProperties(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
def getProps: Map[String, String] = {
if (isUsingHiveMetastore) {
normalizeCatalogTable(catalog.getTableMetadata(tableIdent)).properties
} else {
catalog.getTableMetadata(tableIdent).properties
}
}
assert(getProps.isEmpty)
// set table properties
sql("ALTER TABLE dbx.tab1 SET TBLPROPERTIES ('andrew' = 'or14', 'kor' = 'bel')")
assert(getProps == Map("andrew" -> "or14", "kor" -> "bel"))
// set table properties without explicitly specifying database
catalog.setCurrentDatabase("dbx")
sql("ALTER TABLE tab1 SET TBLPROPERTIES ('kor' = 'belle', 'kar' = 'bol')")
assert(getProps == Map("andrew" -> "or14", "kor" -> "belle", "kar" -> "bol"))
// table to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE does_not_exist SET TBLPROPERTIES ('winner' = 'loser')")
}
}
protected def testUnsetProperties(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
def getProps: Map[String, String] = {
if (isUsingHiveMetastore) {
normalizeCatalogTable(catalog.getTableMetadata(tableIdent)).properties
} else {
catalog.getTableMetadata(tableIdent).properties
}
}
// unset table properties
sql("ALTER TABLE dbx.tab1 SET TBLPROPERTIES ('j' = 'am', 'p' = 'an', 'c' = 'lan', 'x' = 'y')")
sql("ALTER TABLE dbx.tab1 UNSET TBLPROPERTIES ('j')")
assert(getProps == Map("p" -> "an", "c" -> "lan", "x" -> "y"))
// unset table properties without explicitly specifying database
catalog.setCurrentDatabase("dbx")
sql("ALTER TABLE tab1 UNSET TBLPROPERTIES ('p')")
assert(getProps == Map("c" -> "lan", "x" -> "y"))
// table to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE does_not_exist UNSET TBLPROPERTIES ('c' = 'lan')")
}
// property to unset does not exist
val e = intercept[AnalysisException] {
sql("ALTER TABLE tab1 UNSET TBLPROPERTIES ('c', 'xyz')")
}
assert(e.getMessage.contains("xyz"))
// property to unset does not exist, but "IF EXISTS" is specified
sql("ALTER TABLE tab1 UNSET TBLPROPERTIES IF EXISTS ('c', 'xyz')")
assert(getProps == Map("x" -> "y"))
}
protected def testSetLocation(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
val partSpec = Map("a" -> "1", "b" -> "2")
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
createTablePartition(catalog, partSpec, tableIdent)
assert(catalog.getTableMetadata(tableIdent).storage.locationUri.isDefined)
assert(normalizeSerdeProp(catalog.getTableMetadata(tableIdent).storage.properties).isEmpty)
assert(catalog.getPartition(tableIdent, partSpec).storage.locationUri.isDefined)
assert(
normalizeSerdeProp(catalog.getPartition(tableIdent, partSpec).storage.properties).isEmpty)
// Verify that the location is set to the expected string
def verifyLocation(expected: URI, spec: Option[TablePartitionSpec] = None): Unit = {
val storageFormat = spec
.map { s => catalog.getPartition(tableIdent, s).storage }
.getOrElse { catalog.getTableMetadata(tableIdent).storage }
// TODO(gatorsmile): fix the bug in alter table set location.
// if (isUsingHiveMetastore) {
// assert(storageFormat.properties.get("path") === expected)
// }
assert(storageFormat.locationUri ===
Some(makeQualifiedPath(CatalogUtils.URIToString(expected))))
}
// set table location
sql("ALTER TABLE dbx.tab1 SET LOCATION '/path/to/your/lovely/heart'")
verifyLocation(new URI("/path/to/your/lovely/heart"))
// set table partition location
sql("ALTER TABLE dbx.tab1 PARTITION (a='1', b='2') SET LOCATION '/path/to/part/ways'")
verifyLocation(new URI("/path/to/part/ways"), Some(partSpec))
// set table location without explicitly specifying database
catalog.setCurrentDatabase("dbx")
sql("ALTER TABLE tab1 SET LOCATION '/swanky/steak/place'")
verifyLocation(new URI("/swanky/steak/place"))
// set table partition location without explicitly specifying database
sql("ALTER TABLE tab1 PARTITION (a='1', b='2') SET LOCATION 'vienna'")
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("tab1"))
val viennaPartPath = new Path(new Path(table. location), "vienna")
verifyLocation(CatalogUtils.stringToURI(viennaPartPath.toString), Some(partSpec))
// table to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE dbx.does_not_exist SET LOCATION '/mister/spark'")
}
// partition to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE dbx.tab1 PARTITION (b='2') SET LOCATION '/mister/spark'")
}
}
protected def testSetSerde(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
def checkSerdeProps(expectedSerdeProps: Map[String, String]): Unit = {
val serdeProp = catalog.getTableMetadata(tableIdent).storage.properties
if (isUsingHiveMetastore) {
assert(normalizeSerdeProp(serdeProp) == expectedSerdeProps)
} else {
assert(serdeProp == expectedSerdeProps)
}
}
if (isUsingHiveMetastore) {
val expectedSerde = if (isDatasourceTable) {
"org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"
} else {
"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"
}
assert(catalog.getTableMetadata(tableIdent).storage.serde == Some(expectedSerde))
} else {
assert(catalog.getTableMetadata(tableIdent).storage.serde.isEmpty)
}
checkSerdeProps(Map.empty[String, String])
// set table serde and/or properties (should fail on datasource tables)
if (isDatasourceTable) {
val e1 = intercept[AnalysisException] {
sql("ALTER TABLE dbx.tab1 SET SERDE 'whatever'")
}
val e2 = intercept[AnalysisException] {
sql("ALTER TABLE dbx.tab1 SET SERDE 'org.apache.madoop' " +
"WITH SERDEPROPERTIES ('k' = 'v', 'kay' = 'vee')")
}
assert(e1.getMessage.contains("datasource"))
assert(e2.getMessage.contains("datasource"))
} else {
val newSerde = "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"
sql(s"ALTER TABLE dbx.tab1 SET SERDE '$newSerde'")
assert(catalog.getTableMetadata(tableIdent).storage.serde == Some(newSerde))
checkSerdeProps(Map.empty[String, String])
val serde2 = "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe"
sql(s"ALTER TABLE dbx.tab1 SET SERDE '$serde2' " +
"WITH SERDEPROPERTIES ('k' = 'v', 'kay' = 'vee')")
assert(catalog.getTableMetadata(tableIdent).storage.serde == Some(serde2))
checkSerdeProps(Map("k" -> "v", "kay" -> "vee"))
}
// set serde properties only
sql("ALTER TABLE dbx.tab1 SET SERDEPROPERTIES ('k' = 'vvv', 'kay' = 'vee')")
checkSerdeProps(Map("k" -> "vvv", "kay" -> "vee"))
// set things without explicitly specifying database
catalog.setCurrentDatabase("dbx")
sql("ALTER TABLE tab1 SET SERDEPROPERTIES ('kay' = 'veee')")
checkSerdeProps(Map("k" -> "vvv", "kay" -> "veee"))
// table to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE does_not_exist SET SERDEPROPERTIES ('x' = 'y')")
}
}
protected def testSetSerdePartition(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
val spec = Map("a" -> "1", "b" -> "2")
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
createTablePartition(catalog, spec, tableIdent)
createTablePartition(catalog, Map("a" -> "1", "b" -> "3"), tableIdent)
createTablePartition(catalog, Map("a" -> "2", "b" -> "2"), tableIdent)
createTablePartition(catalog, Map("a" -> "2", "b" -> "3"), tableIdent)
def checkPartitionSerdeProps(expectedSerdeProps: Map[String, String]): Unit = {
val serdeProp = catalog.getPartition(tableIdent, spec).storage.properties
if (isUsingHiveMetastore) {
assert(normalizeSerdeProp(serdeProp) == expectedSerdeProps)
} else {
assert(serdeProp == expectedSerdeProps)
}
}
if (isUsingHiveMetastore) {
val expectedSerde = if (isDatasourceTable) {
"org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"
} else {
"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"
}
assert(catalog.getPartition(tableIdent, spec).storage.serde == Some(expectedSerde))
} else {
assert(catalog.getPartition(tableIdent, spec).storage.serde.isEmpty)
}
checkPartitionSerdeProps(Map.empty[String, String])
// set table serde and/or properties (should fail on datasource tables)
if (isDatasourceTable) {
val e1 = intercept[AnalysisException] {
sql("ALTER TABLE dbx.tab1 PARTITION (a=1, b=2) SET SERDE 'whatever'")
}
val e2 = intercept[AnalysisException] {
sql("ALTER TABLE dbx.tab1 PARTITION (a=1, b=2) SET SERDE 'org.apache.madoop' " +
"WITH SERDEPROPERTIES ('k' = 'v', 'kay' = 'vee')")
}
assert(e1.getMessage.contains("datasource"))
assert(e2.getMessage.contains("datasource"))
} else {
sql("ALTER TABLE dbx.tab1 PARTITION (a=1, b=2) SET SERDE 'org.apache.jadoop'")
assert(catalog.getPartition(tableIdent, spec).storage.serde == Some("org.apache.jadoop"))
checkPartitionSerdeProps(Map.empty[String, String])
sql("ALTER TABLE dbx.tab1 PARTITION (a=1, b=2) SET SERDE 'org.apache.madoop' " +
"WITH SERDEPROPERTIES ('k' = 'v', 'kay' = 'vee')")
assert(catalog.getPartition(tableIdent, spec).storage.serde == Some("org.apache.madoop"))
checkPartitionSerdeProps(Map("k" -> "v", "kay" -> "vee"))
}
// set serde properties only
maybeWrapException(isDatasourceTable) {
sql("ALTER TABLE dbx.tab1 PARTITION (a=1, b=2) " +
"SET SERDEPROPERTIES ('k' = 'vvv', 'kay' = 'vee')")
checkPartitionSerdeProps(Map("k" -> "vvv", "kay" -> "vee"))
}
// set things without explicitly specifying database
catalog.setCurrentDatabase("dbx")
maybeWrapException(isDatasourceTable) {
sql("ALTER TABLE tab1 PARTITION (a=1, b=2) SET SERDEPROPERTIES ('kay' = 'veee')")
checkPartitionSerdeProps(Map("k" -> "vvv", "kay" -> "veee"))
}
// table to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE does_not_exist PARTITION (a=1, b=2) SET SERDEPROPERTIES ('x' = 'y')")
}
}
protected def testAddPartitions(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
val part1 = Map("a" -> "1", "b" -> "5")
val part2 = Map("a" -> "2", "b" -> "6")
val part3 = Map("a" -> "3", "b" -> "7")
val part4 = Map("a" -> "4", "b" -> "8")
val part5 = Map("a" -> "9", "b" -> "9")
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
createTablePartition(catalog, part1, tableIdent)
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1))
// basic add partition
sql("ALTER TABLE dbx.tab1 ADD IF NOT EXISTS " +
"PARTITION (a='2', b='6') LOCATION 'paris' PARTITION (a='3', b='7')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part2, part3))
assert(catalog.getPartition(tableIdent, part1).storage.locationUri.isDefined)
val tableLocation = catalog.getTableMetadata(tableIdent).storage.locationUri
assert(tableLocation.isDefined)
val partitionLocation = makeQualifiedPath(
new Path(tableLocation.get.toString, "paris").toString)
assert(catalog.getPartition(tableIdent, part2).storage.locationUri == Option(partitionLocation))
assert(catalog.getPartition(tableIdent, part3).storage.locationUri.isDefined)
// add partitions without explicitly specifying database
catalog.setCurrentDatabase("dbx")
sql("ALTER TABLE tab1 ADD IF NOT EXISTS PARTITION (a='4', b='8')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(part1, part2, part3, part4))
// table to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE does_not_exist ADD IF NOT EXISTS PARTITION (a='4', b='9')")
}
// partition to add already exists
intercept[AnalysisException] {
sql("ALTER TABLE tab1 ADD PARTITION (a='4', b='8')")
}
// partition to add already exists when using IF NOT EXISTS
sql("ALTER TABLE tab1 ADD IF NOT EXISTS PARTITION (a='4', b='8')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(part1, part2, part3, part4))
// partition spec in ADD PARTITION should be case insensitive by default
sql("ALTER TABLE tab1 ADD PARTITION (A='9', B='9')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(part1, part2, part3, part4, part5))
}
protected def testDropPartitions(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
val part1 = Map("a" -> "1", "b" -> "5")
val part2 = Map("a" -> "2", "b" -> "6")
val part3 = Map("a" -> "3", "b" -> "7")
val part4 = Map("a" -> "4", "b" -> "8")
val part5 = Map("a" -> "9", "b" -> "9")
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
createTablePartition(catalog, part1, tableIdent)
createTablePartition(catalog, part2, tableIdent)
createTablePartition(catalog, part3, tableIdent)
createTablePartition(catalog, part4, tableIdent)
createTablePartition(catalog, part5, tableIdent)
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(part1, part2, part3, part4, part5))
// basic drop partition
sql("ALTER TABLE dbx.tab1 DROP IF EXISTS PARTITION (a='4', b='8'), PARTITION (a='3', b='7')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part2, part5))
// drop partitions without explicitly specifying database
catalog.setCurrentDatabase("dbx")
sql("ALTER TABLE tab1 DROP IF EXISTS PARTITION (a='2', b ='6')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part5))
// table to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE does_not_exist DROP IF EXISTS PARTITION (a='2')")
}
// partition to drop does not exist
intercept[AnalysisException] {
sql("ALTER TABLE tab1 DROP PARTITION (a='300')")
}
// partition to drop does not exist when using IF EXISTS
sql("ALTER TABLE tab1 DROP IF EXISTS PARTITION (a='300')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part5))
// partition spec in DROP PARTITION should be case insensitive by default
sql("ALTER TABLE tab1 DROP PARTITION (A='1', B='5')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part5))
// use int literal as partition value for int type partition column
sql("ALTER TABLE tab1 DROP PARTITION (a=9, b=9)")
assert(catalog.listPartitions(tableIdent).isEmpty)
}
protected def testRenamePartitions(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
val part1 = Map("a" -> "1", "b" -> "q")
val part2 = Map("a" -> "2", "b" -> "c")
val part3 = Map("a" -> "3", "b" -> "p")
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
createTablePartition(catalog, part1, tableIdent)
createTablePartition(catalog, part2, tableIdent)
createTablePartition(catalog, part3, tableIdent)
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part2, part3))
// basic rename partition
sql("ALTER TABLE dbx.tab1 PARTITION (a='1', b='q') RENAME TO PARTITION (a='100', b='p')")
sql("ALTER TABLE dbx.tab1 PARTITION (a='2', b='c') RENAME TO PARTITION (a='20', b='c')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(Map("a" -> "100", "b" -> "p"), Map("a" -> "20", "b" -> "c"), Map("a" -> "3", "b" -> "p")))
// rename without explicitly specifying database
catalog.setCurrentDatabase("dbx")
sql("ALTER TABLE tab1 PARTITION (a='100', b='p') RENAME TO PARTITION (a='10', b='p')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(Map("a" -> "10", "b" -> "p"), Map("a" -> "20", "b" -> "c"), Map("a" -> "3", "b" -> "p")))
// table to alter does not exist
intercept[NoSuchTableException] {
sql("ALTER TABLE does_not_exist PARTITION (c='3') RENAME TO PARTITION (c='333')")
}
// partition to rename does not exist
intercept[NoSuchPartitionException] {
sql("ALTER TABLE tab1 PARTITION (a='not_found', b='1') RENAME TO PARTITION (a='1', b='2')")
}
// partition spec in RENAME PARTITION should be case insensitive by default
sql("ALTER TABLE tab1 PARTITION (A='10', B='p') RENAME TO PARTITION (A='1', B='p')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(Map("a" -> "1", "b" -> "p"), Map("a" -> "20", "b" -> "c"), Map("a" -> "3", "b" -> "p")))
}
protected def testChangeColumn(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val resolver = spark.sessionState.conf.resolver
val tableIdent = TableIdentifier("tab1", Some("dbx"))
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
def getMetadata(colName: String): Metadata = {
val column = catalog.getTableMetadata(tableIdent).schema.fields.find { field =>
resolver(field.name, colName)
}
column.map(_.metadata).getOrElse(Metadata.empty)
}
// Ensure that change column will preserve other metadata fields.
sql("ALTER TABLE dbx.tab1 CHANGE COLUMN col1 TYPE INT")
sql("ALTER TABLE dbx.tab1 CHANGE COLUMN col1 COMMENT 'this is col1'")
assert(getMetadata("col1").getString("key") == "value")
assert(getMetadata("col1").getString("comment") == "this is col1")
}
test("drop build-in function") {
Seq("true", "false").foreach { caseSensitive =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive) {
// partition to add already exists
var e = intercept[AnalysisException] {
sql("DROP TEMPORARY FUNCTION year")
}
assert(e.getMessage.contains("Cannot drop native function 'year'"))
e = intercept[AnalysisException] {
sql("DROP TEMPORARY FUNCTION YeAr")
}
assert(e.getMessage.contains("Cannot drop native function 'YeAr'"))
e = intercept[AnalysisException] {
sql("DROP TEMPORARY FUNCTION `YeAr`")
}
assert(e.getMessage.contains("Cannot drop native function 'YeAr'"))
}
}
}
test("describe function") {
checkAnswer(
sql("DESCRIBE FUNCTION log"),
Row("Class: org.apache.spark.sql.catalyst.expressions.Logarithm") ::
Row("Function: log") ::
Row("Usage: log(base, expr) - Returns the logarithm of `expr` with `base`.") :: Nil
)
// predicate operator
checkAnswer(
sql("DESCRIBE FUNCTION or"),
Row("Class: org.apache.spark.sql.catalyst.expressions.Or") ::
Row("Function: or") ::
Row("Usage: expr1 or expr2 - Logical OR.") :: Nil
)
checkAnswer(
sql("DESCRIBE FUNCTION !"),
Row("Class: org.apache.spark.sql.catalyst.expressions.Not") ::
Row("Function: !") ::
Row("Usage: ! expr - Logical not.") :: Nil
)
// arithmetic operators
checkAnswer(
sql("DESCRIBE FUNCTION +"),
Row("Class: org.apache.spark.sql.catalyst.expressions.Add") ::
Row("Function: +") ::
Row("Usage: expr1 + expr2 - Returns `expr1`+`expr2`.") :: Nil
)
// comparison operators
checkAnswer(
sql("DESCRIBE FUNCTION <"),
Row("Class: org.apache.spark.sql.catalyst.expressions.LessThan") ::
Row("Function: <") ::
Row("Usage: expr1 < expr2 - Returns true if `expr1` is less than `expr2`.") :: Nil
)
// STRING
checkAnswer(
sql("DESCRIBE FUNCTION 'concat'"),
Row("Class: org.apache.spark.sql.catalyst.expressions.Concat") ::
Row("Function: concat") ::
Row("Usage: concat(col1, col2, ..., colN) - " +
"Returns the concatenation of col1, col2, ..., colN.") :: Nil
)
// extended mode
checkAnswer(
sql("DESCRIBE FUNCTION EXTENDED ^"),
Row("Class: org.apache.spark.sql.catalyst.expressions.BitwiseXor") ::
Row(
"""Extended Usage:
| Examples:
| > SELECT 3 ^ 5;
| 6
| """.stripMargin) ::
Row("Function: ^") ::
Row("Usage: expr1 ^ expr2 - Returns the result of " +
"bitwise exclusive OR of `expr1` and `expr2`.") :: Nil
)
}
test("create a data source table without schema") {
import testImplicits._
withTempPath { tempDir =>
withTable("tab1", "tab2") {
(("a", "b") :: Nil).toDF().write.json(tempDir.getCanonicalPath)
val e = intercept[AnalysisException] { sql("CREATE TABLE tab1 USING json") }.getMessage
assert(e.contains("Unable to infer schema for JSON. It must be specified manually"))
sql(s"CREATE TABLE tab2 using json location '${tempDir.toURI}'")
checkAnswer(spark.table("tab2"), Row("a", "b"))
}
}
}
test("create table using CLUSTERED BY without schema specification") {
import testImplicits._
withTempPath { tempDir =>
withTable("jsonTable") {
(("a", "b") :: Nil).toDF().write.json(tempDir.getCanonicalPath)
val e = intercept[AnalysisException] {
sql(
s"""
|CREATE TABLE jsonTable
|USING org.apache.spark.sql.json
|OPTIONS (
| path '${tempDir.getCanonicalPath}'
|)
|CLUSTERED BY (inexistentColumnA) SORTED BY (inexistentColumnB) INTO 2 BUCKETS
""".stripMargin)
}
assert(e.message == "Cannot specify bucketing information if the table schema is not " +
"specified when creating and will be inferred at runtime")
}
}
}
test("Create Data Source Table As Select") {
import testImplicits._
withTable("t", "t1", "t2") {
sql("CREATE TABLE t USING parquet SELECT 1 as a, 1 as b")
checkAnswer(spark.table("t"), Row(1, 1) :: Nil)
spark.range(1).select('id as 'a, 'id as 'b).write.saveAsTable("t1")
sql("CREATE TABLE t2 USING parquet SELECT a, b from t1")
checkAnswer(spark.table("t2"), spark.table("t1"))
}
}
test("drop current database") {
withDatabase("temp") {
sql("CREATE DATABASE temp")
sql("USE temp")
sql("DROP DATABASE temp")
val e = intercept[AnalysisException] {
sql("CREATE TABLE t (a INT, b INT) USING parquet")
}.getMessage
assert(e.contains("Database 'temp' not found"))
}
}
test("drop default database") {
val caseSensitiveOptions = if (isUsingHiveMetastore) Seq("false") else Seq("true", "false")
caseSensitiveOptions.foreach { caseSensitive =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive) {
var message = intercept[AnalysisException] {
sql("DROP DATABASE default")
}.getMessage
assert(message.contains("Can not drop default database"))
message = intercept[AnalysisException] {
sql("DROP DATABASE DeFault")
}.getMessage
if (caseSensitive == "true") {
assert(message.contains("Database 'DeFault' not found"))
} else {
assert(message.contains("Can not drop default database"))
}
}
}
}
test("truncate table - datasource table") {
import testImplicits._
val data = (1 to 10).map { i => (i, i) }.toDF("width", "length")
// Test both a Hive compatible and incompatible code path.
Seq("json", "parquet").foreach { format =>
withTable("rectangles") {
data.write.format(format).saveAsTable("rectangles")
assert(spark.table("rectangles").collect().nonEmpty,
"bad test; table was empty to begin with")
sql("TRUNCATE TABLE rectangles")
assert(spark.table("rectangles").collect().isEmpty)
// not supported since the table is not partitioned
assertUnsupported("TRUNCATE TABLE rectangles PARTITION (width=1)")
}
}
}
test("truncate partitioned table - datasource table") {
import testImplicits._
val data = (1 to 10).map { i => (i % 3, i % 5, i) }.toDF("width", "length", "height")
withTable("partTable") {
data.write.partitionBy("width", "length").saveAsTable("partTable")
// supported since partitions are stored in the metastore
sql("TRUNCATE TABLE partTable PARTITION (width=1, length=1)")
assert(spark.table("partTable").filter($"width" === 1).collect().nonEmpty)
assert(spark.table("partTable").filter($"width" === 1 && $"length" === 1).collect().isEmpty)
}
withTable("partTable") {
data.write.partitionBy("width", "length").saveAsTable("partTable")
// support partial partition spec
sql("TRUNCATE TABLE partTable PARTITION (width=1)")
assert(spark.table("partTable").collect().nonEmpty)
assert(spark.table("partTable").filter($"width" === 1).collect().isEmpty)
}
withTable("partTable") {
data.write.partitionBy("width", "length").saveAsTable("partTable")
// do nothing if no partition is matched for the given partial partition spec
sql("TRUNCATE TABLE partTable PARTITION (width=100)")
assert(spark.table("partTable").count() == data.count())
// throw exception if no partition is matched for the given non-partial partition spec.
intercept[NoSuchPartitionException] {
sql("TRUNCATE TABLE partTable PARTITION (width=100, length=100)")
}
// throw exception if the column in partition spec is not a partition column.
val e = intercept[AnalysisException] {
sql("TRUNCATE TABLE partTable PARTITION (unknown=1)")
}
assert(e.message.contains("unknown is not a valid partition column"))
}
}
test("SPARK-30312: truncate table - keep acl/permission") {
import testImplicits._
val ignorePermissionAcl = Seq(true, false)
ignorePermissionAcl.foreach { ignore =>
withSQLConf(
"fs.file.impl" -> classOf[FakeLocalFsFileSystem].getName,
"fs.file.impl.disable.cache" -> "true",
SQLConf.TRUNCATE_TABLE_IGNORE_PERMISSION_ACL.key -> ignore.toString) {
withTable("tab1") {
sql("CREATE TABLE tab1 (col INT) USING parquet")
sql("INSERT INTO tab1 SELECT 1")
checkAnswer(spark.table("tab1"), Row(1))
val tablePath = new Path(spark.sessionState.catalog
.getTableMetadata(TableIdentifier("tab1")).storage.locationUri.get)
val hadoopConf = spark.sessionState.newHadoopConf()
val fs = tablePath.getFileSystem(hadoopConf)
val fileStatus = fs.getFileStatus(tablePath);
fs.setPermission(tablePath, new FsPermission("777"))
assert(fileStatus.getPermission().toString() == "rwxrwxrwx")
// Set ACL to table path.
val customAcl = new java.util.ArrayList[AclEntry]()
customAcl.add(new AclEntry.Builder()
.setName("test")
.setType(AclEntryType.USER)
.setScope(AclEntryScope.ACCESS)
.setPermission(FsAction.READ).build())
fs.setAcl(tablePath, customAcl)
assert(fs.getAclStatus(tablePath).getEntries().get(0) == customAcl.get(0))
sql("TRUNCATE TABLE tab1")
assert(spark.table("tab1").collect().isEmpty)
val fileStatus2 = fs.getFileStatus(tablePath)
if (ignore) {
assert(fileStatus2.getPermission().toString() != "rwxrwxrwx")
} else {
assert(fileStatus2.getPermission().toString() == "rwxrwxrwx")
}
val aclEntries = fs.getAclStatus(tablePath).getEntries()
if (ignore) {
assert(aclEntries.size() == 0)
} else {
assert(aclEntries.size() == 4)
assert(aclEntries.get(0) == customAcl.get(0))
// Setting ACLs will also set user/group/other permissions
// as ACL entries.
val user = new AclEntry.Builder()
.setType(AclEntryType.USER)
.setScope(AclEntryScope.ACCESS)
.setPermission(FsAction.ALL).build()
val group = new AclEntry.Builder()
.setType(AclEntryType.GROUP)
.setScope(AclEntryScope.ACCESS)
.setPermission(FsAction.ALL).build()
val other = new AclEntry.Builder()
.setType(AclEntryType.OTHER)
.setScope(AclEntryScope.ACCESS)
.setPermission(FsAction.ALL).build()
assert(aclEntries.get(1) == user)
assert(aclEntries.get(2) == group)
assert(aclEntries.get(3) == other)
}
}
}
}
}
test("SPARK-31163: acl/permission should handle non-existed path when truncating table") {
withSQLConf(SQLConf.TRUNCATE_TABLE_IGNORE_PERMISSION_ACL.key -> "false") {
withTable("tab1") {
sql("CREATE TABLE tab1 (col1 STRING, col2 INT) USING parquet PARTITIONED BY (col2)")
sql("INSERT INTO tab1 SELECT 'one', 1")
checkAnswer(spark.table("tab1"), Row("one", 1))
val part = spark.sessionState.catalog.listPartitions(TableIdentifier("tab1")).head
val path = new File(part.location.getPath)
sql("TRUNCATE TABLE tab1")
// simulate incomplete/unsuccessful truncate
assert(path.exists())
path.delete()
assert(!path.exists())
// execute without java.io.FileNotFoundException
sql("TRUNCATE TABLE tab1")
// partition path should be re-created
assert(path.exists())
}
}
}
test("create temporary view with mismatched schema") {
withTable("tab1") {
spark.range(10).write.saveAsTable("tab1")
withView("view1") {
val e = intercept[AnalysisException] {
sql("CREATE TEMPORARY VIEW view1 (col1, col3) AS SELECT * FROM tab1")
}.getMessage
assert(e.contains("the SELECT clause (num: `1`) does not match")
&& e.contains("CREATE VIEW (num: `2`)"))
}
}
}
test("create temporary view with specified schema") {
withView("view1") {
sql("CREATE TEMPORARY VIEW view1 (col1, col2) AS SELECT 1, 2")
checkAnswer(
sql("SELECT * FROM view1"),
Row(1, 2) :: Nil
)
}
}
test("block creating duplicate temp table") {
withTempView("t_temp") {
sql("CREATE TEMPORARY VIEW t_temp AS SELECT 1, 2")
val e = intercept[TempTableAlreadyExistsException] {
sql("CREATE TEMPORARY TABLE t_temp (c3 int, c4 string) USING JSON")
}.getMessage
assert(e.contains("Temporary view 't_temp' already exists"))
}
}
test("block creating duplicate temp view") {
withTempView("t_temp") {
sql("CREATE TEMPORARY VIEW t_temp AS SELECT 1, 2")
val e = intercept[TempTableAlreadyExistsException] {
sql("CREATE TEMPORARY VIEW t_temp (c3 int, c4 string) USING JSON")
}.getMessage
assert(e.contains("Temporary view 't_temp' already exists"))
}
}
test("truncate table - external table, temporary table, view (not allowed)") {
import testImplicits._
withTempPath { tempDir =>
withTable("my_ext_tab") {
(("a", "b") :: Nil).toDF().write.parquet(tempDir.getCanonicalPath)
(1 to 10).map { i => (i, i) }.toDF("a", "b").createTempView("my_temp_tab")
sql(s"CREATE TABLE my_ext_tab using parquet LOCATION '${tempDir.toURI}'")
sql(s"CREATE VIEW my_view AS SELECT 1")
intercept[NoSuchTableException] {
sql("TRUNCATE TABLE my_temp_tab")
}
assertUnsupported("TRUNCATE TABLE my_ext_tab")
assertUnsupported("TRUNCATE TABLE my_view")
}
}
}
test("truncate table - non-partitioned table (not allowed)") {
withTable("my_tab") {
sql("CREATE TABLE my_tab (age INT, name STRING) using parquet")
sql("INSERT INTO my_tab values (10, 'a')")
assertUnsupported("TRUNCATE TABLE my_tab PARTITION (age=10)")
}
}
test("SPARK-16034 Partition columns should match when appending to existing data source tables") {
import testImplicits._
val df = Seq((1, 2, 3)).toDF("a", "b", "c")
withTable("partitionedTable") {
df.write.mode("overwrite").partitionBy("a", "b").saveAsTable("partitionedTable")
// Misses some partition columns
intercept[AnalysisException] {
df.write.mode("append").partitionBy("a").saveAsTable("partitionedTable")
}
// Wrong order
intercept[AnalysisException] {
df.write.mode("append").partitionBy("b", "a").saveAsTable("partitionedTable")
}
// Partition columns not specified
intercept[AnalysisException] {
df.write.mode("append").saveAsTable("partitionedTable")
}
assert(sql("select * from partitionedTable").collect().size == 1)
// Inserts new data successfully when partition columns are correctly specified in
// partitionBy(...).
// TODO: Right now, partition columns are always treated in a case-insensitive way.
// See the write method in DataSource.scala.
Seq((4, 5, 6)).toDF("a", "B", "c")
.write
.mode("append")
.partitionBy("a", "B")
.saveAsTable("partitionedTable")
Seq((7, 8, 9)).toDF("a", "b", "c")
.write
.mode("append")
.partitionBy("a", "b")
.saveAsTable("partitionedTable")
checkAnswer(
sql("select a, b, c from partitionedTable"),
Row(1, 2, 3) :: Row(4, 5, 6) :: Row(7, 8, 9) :: Nil
)
}
}
test("show functions") {
withUserDefinedFunction("add_one" -> true) {
val numFunctions = FunctionRegistry.functionSet.size.toLong +
FunctionsCommand.virtualOperators.size.toLong
assert(sql("show functions").count() === numFunctions)
assert(sql("show system functions").count() === numFunctions)
assert(sql("show all functions").count() === numFunctions)
assert(sql("show user functions").count() === 0L)
spark.udf.register("add_one", (x: Long) => x + 1)
assert(sql("show functions").count() === numFunctions + 1L)
assert(sql("show system functions").count() === numFunctions)
assert(sql("show all functions").count() === numFunctions + 1L)
assert(sql("show user functions").count() === 1L)
}
}
test("show columns - negative test") {
// When case sensitivity is true, the user supplied database name in table identifier
// should match the supplied database name in case sensitive way.
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
withTempDatabase { db =>
val tabName = s"$db.showcolumn"
withTable(tabName) {
sql(s"CREATE TABLE $tabName(col1 int, col2 string) USING parquet ")
val message = intercept[AnalysisException] {
sql(s"SHOW COLUMNS IN $db.showcolumn FROM ${db.toUpperCase(Locale.ROOT)}")
}.getMessage
assert(message.contains(
s"SHOW COLUMNS with conflicting databases: " +
s"'${db.toUpperCase(Locale.ROOT)}' != '$db'"))
}
}
}
}
test("SPARK-18009 calling toLocalIterator on commands") {
import scala.collection.JavaConverters._
val df = sql("show databases")
val rows: Seq[Row] = df.toLocalIterator().asScala.toSeq
assert(rows.length > 0)
}
test("SET LOCATION for managed table") {
withTable("tbl") {
withTempDir { dir =>
sql("CREATE TABLE tbl(i INT) USING parquet")
sql("INSERT INTO tbl SELECT 1")
checkAnswer(spark.table("tbl"), Row(1))
val defaultTablePath = spark.sessionState.catalog
.getTableMetadata(TableIdentifier("tbl")).storage.locationUri.get
try {
sql(s"ALTER TABLE tbl SET LOCATION '${dir.toURI}'")
spark.catalog.refreshTable("tbl")
// SET LOCATION won't move data from previous table path to new table path.
assert(spark.table("tbl").count() == 0)
// the previous table path should be still there.
assert(new File(defaultTablePath).exists())
sql("INSERT INTO tbl SELECT 2")
checkAnswer(spark.table("tbl"), Row(2))
// newly inserted data will go to the new table path.
assert(dir.listFiles().nonEmpty)
sql("DROP TABLE tbl")
// the new table path will be removed after DROP TABLE.
assert(!dir.exists())
} finally {
Utils.deleteRecursively(new File(defaultTablePath))
}
}
}
}
test("insert data to a data source table which has a non-existing location should succeed") {
withTable("t") {
withTempDir { dir =>
spark.sql(
s"""
|CREATE TABLE t(a string, b int)
|USING parquet
|OPTIONS(path "${dir.toURI}")
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
dir.delete
assert(!dir.exists)
spark.sql("INSERT INTO TABLE t SELECT 'c', 1")
assert(dir.exists)
checkAnswer(spark.table("t"), Row("c", 1) :: Nil)
Utils.deleteRecursively(dir)
assert(!dir.exists)
spark.sql("INSERT OVERWRITE TABLE t SELECT 'c', 1")
assert(dir.exists)
checkAnswer(spark.table("t"), Row("c", 1) :: Nil)
val newDirFile = new File(dir, "x")
val newDir = newDirFile.toURI
spark.sql(s"ALTER TABLE t SET LOCATION '$newDir'")
spark.sessionState.catalog.refreshTable(TableIdentifier("t"))
val table1 = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table1.location == makeQualifiedPath(newDir.toString))
assert(!newDirFile.exists)
spark.sql("INSERT INTO TABLE t SELECT 'c', 1")
assert(newDirFile.exists)
checkAnswer(spark.table("t"), Row("c", 1) :: Nil)
}
}
}
test("insert into a data source table with a non-existing partition location should succeed") {
withTable("t") {
withTempDir { dir =>
spark.sql(
s"""
|CREATE TABLE t(a int, b int, c int, d int)
|USING parquet
|PARTITIONED BY(a, b)
|LOCATION "${dir.toURI}"
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
spark.sql("INSERT INTO TABLE t PARTITION(a=1, b=2) SELECT 3, 4")
checkAnswer(spark.table("t"), Row(3, 4, 1, 2) :: Nil)
val partLoc = new File(s"${dir.getAbsolutePath}/a=1")
Utils.deleteRecursively(partLoc)
assert(!partLoc.exists())
// insert overwrite into a partition which location has been deleted.
spark.sql("INSERT OVERWRITE TABLE t PARTITION(a=1, b=2) SELECT 7, 8")
assert(partLoc.exists())
checkAnswer(spark.table("t"), Row(7, 8, 1, 2) :: Nil)
}
}
}
test("read data from a data source table which has a non-existing location should succeed") {
withTable("t") {
withTempDir { dir =>
spark.sql(
s"""
|CREATE TABLE t(a string, b int)
|USING parquet
|OPTIONS(path "${dir.toURI}")
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
dir.delete()
checkAnswer(spark.table("t"), Nil)
val newDirFile = new File(dir, "x")
val newDir = newDirFile.toURI
spark.sql(s"ALTER TABLE t SET LOCATION '$newDir'")
val table1 = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table1.location == newDir)
assert(!newDirFile.exists())
checkAnswer(spark.table("t"), Nil)
}
}
}
test("read data from a data source table with non-existing partition location should succeed") {
withTable("t") {
withTempDir { dir =>
spark.sql(
s"""
|CREATE TABLE t(a int, b int, c int, d int)
|USING parquet
|LOCATION "${dir.toURI}"
|PARTITIONED BY(a, b)
""".stripMargin)
spark.sql("INSERT INTO TABLE t PARTITION(a=1, b=2) SELECT 3, 4")
checkAnswer(spark.table("t"), Row(3, 4, 1, 2) :: Nil)
// select from a partition which location has been deleted.
Utils.deleteRecursively(dir)
assert(!dir.exists())
spark.sql("REFRESH TABLE t")
checkAnswer(spark.sql("select * from t where a=1 and b=2"), Nil)
}
}
}
test("create datasource table with a non-existing location") {
withTable("t", "t1") {
withTempPath { dir =>
spark.sql(s"CREATE TABLE t(a int, b int) USING parquet LOCATION '${dir.toURI}'")
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
spark.sql("INSERT INTO TABLE t SELECT 1, 2")
assert(dir.exists())
checkAnswer(spark.table("t"), Row(1, 2))
}
// partition table
withTempPath { dir =>
spark.sql(
s"CREATE TABLE t1(a int, b int) USING parquet PARTITIONED BY(a) LOCATION '${dir.toURI}'")
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
spark.sql("INSERT INTO TABLE t1 PARTITION(a=1) SELECT 2")
val partDir = new File(dir, "a=1")
assert(partDir.exists())
checkAnswer(spark.table("t1"), Row(2, 1))
}
}
}
test("Partition table should load empty static partitions") {
// All static partitions
withTable("t", "t1", "t2") {
withTempPath { dir =>
spark.sql("CREATE TABLE t(a int) USING parquet")
spark.sql("CREATE TABLE t1(a int, c string, b string) " +
s"USING parquet PARTITIONED BY(c, b) LOCATION '${dir.toURI}'")
// datasource table
validateStaticPartitionTable("t1")
// hive table
if (isUsingHiveMetastore) {
spark.sql("CREATE TABLE t2(a int) " +
s"PARTITIONED BY(c string, b string) LOCATION '${dir.toURI}'")
validateStaticPartitionTable("t2")
}
def validateStaticPartitionTable(tableName: String): Unit = {
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier(tableName))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
assert(spark.sql(s"SHOW PARTITIONS $tableName").count() == 0)
spark.sql(
s"INSERT INTO TABLE $tableName PARTITION(b='b', c='c') SELECT * FROM t WHERE 1 = 0")
assert(spark.sql(s"SHOW PARTITIONS $tableName").count() == 1)
assert(new File(dir, "c=c/b=b").exists())
checkAnswer(spark.table(tableName), Nil)
}
}
}
// Partial dynamic partitions
withTable("t", "t1", "t2") {
withTempPath { dir =>
spark.sql("CREATE TABLE t(a int) USING parquet")
spark.sql("CREATE TABLE t1(a int, b string, c string) " +
s"USING parquet PARTITIONED BY(c, b) LOCATION '${dir.toURI}'")
// datasource table
validatePartialStaticPartitionTable("t1")
// hive table
if (isUsingHiveMetastore) {
spark.sql("CREATE TABLE t2(a int) " +
s"PARTITIONED BY(c string, b string) LOCATION '${dir.toURI}'")
validatePartialStaticPartitionTable("t2")
}
def validatePartialStaticPartitionTable(tableName: String): Unit = {
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier(tableName))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
assert(spark.sql(s"SHOW PARTITIONS $tableName").count() == 0)
spark.sql(
s"INSERT INTO TABLE $tableName PARTITION(c='c', b) SELECT *, 'b' FROM t WHERE 1 = 0")
assert(spark.sql(s"SHOW PARTITIONS $tableName").count() == 0)
assert(!new File(dir, "c=c/b=b").exists())
checkAnswer(spark.table(tableName), Nil)
}
}
}
}
Seq(true, false).foreach { shouldDelete =>
val tcName = if (shouldDelete) "non-existing" else "existed"
test(s"CTAS for external data source table with a $tcName location") {
withTable("t", "t1") {
withTempDir { dir =>
if (shouldDelete) dir.delete()
spark.sql(
s"""
|CREATE TABLE t
|USING parquet
|LOCATION '${dir.toURI}'
|AS SELECT 3 as a, 4 as b, 1 as c, 2 as d
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
checkAnswer(spark.table("t"), Row(3, 4, 1, 2))
}
// partition table
withTempDir { dir =>
if (shouldDelete) dir.delete()
spark.sql(
s"""
|CREATE TABLE t1
|USING parquet
|PARTITIONED BY(a, b)
|LOCATION '${dir.toURI}'
|AS SELECT 3 as a, 4 as b, 1 as c, 2 as d
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
val partDir = new File(dir, "a=3")
assert(partDir.exists())
checkAnswer(spark.table("t1"), Row(1, 2, 3, 4))
}
}
}
}
Seq("a b", "a:b", "a%b", "a,b").foreach { specialChars =>
test(s"data source table:partition column name containing $specialChars") {
// On Windows, it looks colon in the file name is illegal by default. See
// https://support.microsoft.com/en-us/help/289627
assume(!Utils.isWindows || specialChars != "a:b")
withTable("t") {
withTempDir { dir =>
spark.sql(
s"""
|CREATE TABLE t(a string, `$specialChars` string)
|USING parquet
|PARTITIONED BY(`$specialChars`)
|LOCATION '${dir.toURI}'
""".stripMargin)
assert(dir.listFiles().isEmpty)
spark.sql(s"INSERT INTO TABLE t PARTITION(`$specialChars`=2) SELECT 1")
val partEscaped = s"${ExternalCatalogUtils.escapePathName(specialChars)}=2"
val partFile = new File(dir, partEscaped)
assert(partFile.listFiles().nonEmpty)
checkAnswer(spark.table("t"), Row("1", "2") :: Nil)
}
}
}
}
Seq("a b", "a:b", "a%b").foreach { specialChars =>
test(s"location uri contains $specialChars for datasource table") {
// On Windows, it looks colon in the file name is illegal by default. See
// https://support.microsoft.com/en-us/help/289627
assume(!Utils.isWindows || specialChars != "a:b")
withTable("t", "t1") {
withTempDir { dir =>
val loc = new File(dir, specialChars)
loc.mkdir()
// The parser does not recognize the backslashes on Windows as they are.
// These currently should be escaped.
val escapedLoc = loc.getAbsolutePath.replace("\\\\", "\\\\\\\\")
spark.sql(
s"""
|CREATE TABLE t(a string)
|USING parquet
|LOCATION '$escapedLoc'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(loc.getAbsolutePath))
assert(new Path(table.location).toString.contains(specialChars))
assert(loc.listFiles().isEmpty)
spark.sql("INSERT INTO TABLE t SELECT 1")
assert(loc.listFiles().nonEmpty)
checkAnswer(spark.table("t"), Row("1") :: Nil)
}
withTempDir { dir =>
val loc = new File(dir, specialChars)
loc.mkdir()
// The parser does not recognize the backslashes on Windows as they are.
// These currently should be escaped.
val escapedLoc = loc.getAbsolutePath.replace("\\\\", "\\\\\\\\")
spark.sql(
s"""
|CREATE TABLE t1(a string, b string)
|USING parquet
|PARTITIONED BY(b)
|LOCATION '$escapedLoc'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
assert(table.location == makeQualifiedPath(loc.getAbsolutePath))
assert(new Path(table.location).toString.contains(specialChars))
assert(loc.listFiles().isEmpty)
spark.sql("INSERT INTO TABLE t1 PARTITION(b=2) SELECT 1")
val partFile = new File(loc, "b=2")
assert(partFile.listFiles().nonEmpty)
checkAnswer(spark.table("t1"), Row("1", "2") :: Nil)
spark.sql("INSERT INTO TABLE t1 PARTITION(b='2017-03-03 12:13%3A14') SELECT 1")
val partFile1 = new File(loc, "b=2017-03-03 12:13%3A14")
assert(!partFile1.exists())
if (!Utils.isWindows) {
// Actual path becomes "b=2017-03-03%2012%3A13%253A14" on Windows.
val partFile2 = new File(loc, "b=2017-03-03 12%3A13%253A14")
assert(partFile2.listFiles().nonEmpty)
checkAnswer(
spark.table("t1"), Row("1", "2") :: Row("1", "2017-03-03 12:13%3A14") :: Nil)
}
}
}
}
}
Seq("a b", "a:b", "a%b").foreach { specialChars =>
test(s"location uri contains $specialChars for database") {
// On Windows, it looks colon in the file name is illegal by default. See
// https://support.microsoft.com/en-us/help/289627
assume(!Utils.isWindows || specialChars != "a:b")
withDatabase ("tmpdb") {
withTable("t") {
withTempDir { dir =>
val loc = new File(dir, specialChars)
// The parser does not recognize the backslashes on Windows as they are.
// These currently should be escaped.
val escapedLoc = loc.getAbsolutePath.replace("\\\\", "\\\\\\\\")
spark.sql(s"CREATE DATABASE tmpdb LOCATION '$escapedLoc'")
spark.sql("USE tmpdb")
import testImplicits._
Seq(1).toDF("a").write.saveAsTable("t")
val tblloc = new File(loc, "t")
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(tblloc.getAbsolutePath))
assert(tblloc.listFiles().nonEmpty)
}
}
}
}
}
test("the qualified path of a datasource table is stored in the catalog") {
withTable("t", "t1") {
withTempDir { dir =>
assert(!dir.getAbsolutePath.startsWith("file:/"))
// The parser does not recognize the backslashes on Windows as they are.
// These currently should be escaped.
val escapedDir = dir.getAbsolutePath.replace("\\\\", "\\\\\\\\")
spark.sql(
s"""
|CREATE TABLE t(a string)
|USING parquet
|LOCATION '$escapedDir'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location.toString.startsWith("file:/"))
}
withTempDir { dir =>
assert(!dir.getAbsolutePath.startsWith("file:/"))
spark.sql(s"ALTER TABLE t SET LOCATION '$dir'")
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location.toString.startsWith("file:/"))
}
withTempDir { dir =>
assert(!dir.getAbsolutePath.startsWith("file:/"))
// The parser does not recognize the backslashes on Windows as they are.
// These currently should be escaped.
val escapedDir = dir.getAbsolutePath.replace("\\\\", "\\\\\\\\")
spark.sql(
s"""
|CREATE TABLE t1(a string, b string)
|USING parquet
|PARTITIONED BY(b)
|LOCATION '$escapedDir'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
assert(table.location.toString.startsWith("file:/"))
}
}
}
test("the qualified path of a partition is stored in the catalog") {
withTable("t") {
withTempDir { dir =>
spark.sql(
s"""
|CREATE TABLE t(a STRING, b STRING)
|USING ${dataSource} PARTITIONED BY(b) LOCATION '$dir'
""".stripMargin)
spark.sql("INSERT INTO TABLE t PARTITION(b=1) SELECT 2")
val part = spark.sessionState.catalog.getPartition(TableIdentifier("t"), Map("b" -> "1"))
assert(part.storage.locationUri.contains(
makeQualifiedPath(new File(dir, "b=1").getAbsolutePath)))
assert(part.storage.locationUri.get.toString.startsWith("file:/"))
}
withTempDir { dir =>
spark.sql(s"ALTER TABLE t PARTITION(b=1) SET LOCATION '$dir'")
val part = spark.sessionState.catalog.getPartition(TableIdentifier("t"), Map("b" -> "1"))
assert(part.storage.locationUri.contains(makeQualifiedPath(dir.getAbsolutePath)))
assert(part.storage.locationUri.get.toString.startsWith("file:/"))
}
withTempDir { dir =>
spark.sql(s"ALTER TABLE t ADD PARTITION(b=2) LOCATION '$dir'")
val part = spark.sessionState.catalog.getPartition(TableIdentifier("t"), Map("b" -> "2"))
assert(part.storage.locationUri.contains(makeQualifiedPath(dir.getAbsolutePath)))
assert(part.storage.locationUri.get.toString.startsWith("file:/"))
}
}
}
protected def testAddColumn(provider: String): Unit = {
withTable("t1") {
sql(s"CREATE TABLE t1 (c1 int) USING $provider")
sql("INSERT INTO t1 VALUES (1)")
sql("ALTER TABLE t1 ADD COLUMNS (c2 int)")
checkAnswer(
spark.table("t1"),
Seq(Row(1, null))
)
checkAnswer(
sql("SELECT * FROM t1 WHERE c2 is null"),
Seq(Row(1, null))
)
sql("INSERT INTO t1 VALUES (3, 2)")
checkAnswer(
sql("SELECT * FROM t1 WHERE c2 = 2"),
Seq(Row(3, 2))
)
}
}
protected def testAddColumnPartitioned(provider: String): Unit = {
withTable("t1") {
sql(s"CREATE TABLE t1 (c1 int, c2 int) USING $provider PARTITIONED BY (c2)")
sql("INSERT INTO t1 PARTITION(c2 = 2) VALUES (1)")
sql("ALTER TABLE t1 ADD COLUMNS (c3 int)")
checkAnswer(
spark.table("t1"),
Seq(Row(1, null, 2))
)
checkAnswer(
sql("SELECT * FROM t1 WHERE c3 is null"),
Seq(Row(1, null, 2))
)
sql("INSERT INTO t1 PARTITION(c2 =1) VALUES (2, 3)")
checkAnswer(
sql("SELECT * FROM t1 WHERE c3 = 3"),
Seq(Row(2, 3, 1))
)
checkAnswer(
sql("SELECT * FROM t1 WHERE c2 = 1"),
Seq(Row(2, 3, 1))
)
}
}
val supportedNativeFileFormatsForAlterTableAddColumns = Seq("csv", "json", "parquet",
"org.apache.spark.sql.execution.datasources.csv.CSVFileFormat",
"org.apache.spark.sql.execution.datasources.json.JsonFileFormat",
"org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat")
supportedNativeFileFormatsForAlterTableAddColumns.foreach { provider =>
test(s"alter datasource table add columns - $provider") {
testAddColumn(provider)
}
}
supportedNativeFileFormatsForAlterTableAddColumns.foreach { provider =>
test(s"alter datasource table add columns - partitioned - $provider") {
testAddColumnPartitioned(provider)
}
}
test("alter datasource table add columns - text format not supported") {
withTable("t1") {
sql("CREATE TABLE t1 (c1 string) USING text")
val e = intercept[AnalysisException] {
sql("ALTER TABLE t1 ADD COLUMNS (c2 int)")
}.getMessage
assert(e.contains("ALTER ADD COLUMNS does not support datasource table with type"))
}
}
test("alter table add columns -- not support temp view") {
withTempView("tmp_v") {
sql("CREATE TEMPORARY VIEW tmp_v AS SELECT 1 AS c1, 2 AS c2")
val e = intercept[AnalysisException] {
sql("ALTER TABLE tmp_v ADD COLUMNS (c3 INT)")
}
assert(e.message.contains("'tmp_v' is a view not a table"))
}
}
test("alter table add columns -- not support view") {
withView("v1") {
sql("CREATE VIEW v1 AS SELECT 1 AS c1, 2 AS c2")
val e = intercept[AnalysisException] {
sql("ALTER TABLE v1 ADD COLUMNS (c3 INT)")
}
assert(e.message.contains("ALTER ADD COLUMNS does not support views"))
}
}
test("alter table add columns with existing column name") {
withTable("t1") {
sql("CREATE TABLE t1 (c1 int) USING PARQUET")
val e = intercept[AnalysisException] {
sql("ALTER TABLE t1 ADD COLUMNS (c1 string)")
}.getMessage
assert(e.contains("Found duplicate column(s)"))
}
}
test("create temporary function with if not exists") {
withUserDefinedFunction("func1" -> true) {
val sql1 =
"""
|CREATE TEMPORARY FUNCTION IF NOT EXISTS func1 as
|'com.matthewrathbone.example.SimpleUDFExample' USING JAR '/path/to/jar1',
|JAR '/path/to/jar2'
""".stripMargin
val e = intercept[AnalysisException] {
sql(sql1)
}.getMessage
assert(e.contains("It is not allowed to define a TEMPORARY function with IF NOT EXISTS"))
}
}
test("create function with both if not exists and replace") {
withUserDefinedFunction("func1" -> false) {
val sql1 =
"""
|CREATE OR REPLACE FUNCTION IF NOT EXISTS func1 as
|'com.matthewrathbone.example.SimpleUDFExample' USING JAR '/path/to/jar1',
|JAR '/path/to/jar2'
""".stripMargin
val e = intercept[AnalysisException] {
sql(sql1)
}.getMessage
assert(e.contains("CREATE FUNCTION with both IF NOT EXISTS and REPLACE is not allowed"))
}
}
test("create temporary function by specifying a database") {
val dbName = "mydb"
withDatabase(dbName) {
sql(s"CREATE DATABASE $dbName")
sql(s"USE $dbName")
withUserDefinedFunction("func1" -> true) {
val sql1 =
s"""
|CREATE TEMPORARY FUNCTION $dbName.func1 as
|'com.matthewrathbone.example.SimpleUDFExample' USING JAR '/path/to/jar1',
|JAR '/path/to/jar2'
""".stripMargin
val e = intercept[AnalysisException] {
sql(sql1)
}.getMessage
assert(e.contains(s"Specifying a database in CREATE TEMPORARY FUNCTION " +
s"is not allowed: '$dbName'"))
}
}
}
Seq(true, false).foreach { caseSensitive =>
test(s"alter table add columns with existing column name - caseSensitive $caseSensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> s"$caseSensitive") {
withTable("t1") {
sql("CREATE TABLE t1 (c1 int) USING PARQUET")
if (!caseSensitive) {
val e = intercept[AnalysisException] {
sql("ALTER TABLE t1 ADD COLUMNS (C1 string)")
}.getMessage
assert(e.contains("Found duplicate column(s)"))
} else {
sql("ALTER TABLE t1 ADD COLUMNS (C1 string)")
assert(spark.table("t1").schema ==
new StructType().add("c1", IntegerType).add("C1", StringType))
}
}
}
}
test(s"basic DDL using locale tr - caseSensitive $caseSensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> s"$caseSensitive") {
withLocale("tr") {
val dbName = "DaTaBaSe_I"
withDatabase(dbName) {
sql(s"CREATE DATABASE $dbName")
sql(s"USE $dbName")
val tabName = "tAb_I"
withTable(tabName) {
sql(s"CREATE TABLE $tabName(col_I int) USING PARQUET")
sql(s"INSERT OVERWRITE TABLE $tabName SELECT 1")
checkAnswer(sql(s"SELECT col_I FROM $tabName"), Row(1) :: Nil)
}
}
}
}
}
}
test("set command rejects SparkConf entries") {
val ex = intercept[AnalysisException] {
sql(s"SET ${config.CPUS_PER_TASK.key} = 4")
}
assert(ex.getMessage.contains("Spark config"))
}
test("Refresh table before drop database cascade") {
withTempDir { tempDir =>
val file1 = new File(tempDir + "/first.csv")
Utils.tryWithResource(new PrintWriter(file1)) { writer =>
writer.write("first")
}
val file2 = new File(tempDir + "/second.csv")
Utils.tryWithResource(new PrintWriter(file2)) { writer =>
writer.write("second")
}
withDatabase("foo") {
withTable("foo.first") {
sql("CREATE DATABASE foo")
sql(
s"""CREATE TABLE foo.first (id STRING)
|USING csv OPTIONS (path='${file1.toURI}')
""".stripMargin)
sql("SELECT * FROM foo.first")
checkAnswer(spark.table("foo.first"), Row("first"))
// Dropping the database and again creating same table with different path
sql("DROP DATABASE foo CASCADE")
sql("CREATE DATABASE foo")
sql(
s"""CREATE TABLE foo.first (id STRING)
|USING csv OPTIONS (path='${file2.toURI}')
""".stripMargin)
sql("SELECT * FROM foo.first")
checkAnswer(spark.table("foo.first"), Row("second"))
}
}
}
}
test("Create Table LIKE USING provider") {
val catalog = spark.sessionState.catalog
withTable("s", "t1", "t2", "t3", "t4") {
sql("CREATE TABLE s(a INT, b INT) USING parquet")
val source = catalog.getTableMetadata(TableIdentifier("s"))
assert(source.provider == Some("parquet"))
sql("CREATE TABLE t1 LIKE s USING orc")
val table1 = catalog.getTableMetadata(TableIdentifier("t1"))
assert(table1.provider == Some("orc"))
sql("CREATE TABLE t2 LIKE s USING hive")
val table2 = catalog.getTableMetadata(TableIdentifier("t2"))
assert(table2.provider == Some("hive"))
val e1 = intercept[ClassNotFoundException] {
sql("CREATE TABLE t3 LIKE s USING unknown")
}.getMessage
assert(e1.contains("Failed to find data source"))
withGlobalTempView("src") {
val globalTempDB = spark.sharedState.globalTempViewManager.database
sql("CREATE GLOBAL TEMP VIEW src AS SELECT 1 AS a, '2' AS b")
sql(s"CREATE TABLE t4 LIKE $globalTempDB.src USING parquet")
val table = catalog.getTableMetadata(TableIdentifier("t4"))
assert(table.provider == Some("parquet"))
}
}
}
test(s"Add a directory when ${SQLConf.LEGACY_ADD_SINGLE_FILE_IN_ADD_FILE.key} set to false") {
val directoryToAdd = Utils.createTempDir("/tmp/spark/addDirectory/")
val testFile = File.createTempFile("testFile", "1", directoryToAdd)
spark.sql(s"ADD FILE $directoryToAdd")
assert(new File(SparkFiles.get(s"${directoryToAdd.getName}/${testFile.getName}")).exists())
}
test(s"Add a directory when ${SQLConf.LEGACY_ADD_SINGLE_FILE_IN_ADD_FILE.key} set to true") {
withTempDir { testDir =>
withSQLConf(SQLConf.LEGACY_ADD_SINGLE_FILE_IN_ADD_FILE.key -> "true") {
val msg = intercept[SparkException] {
spark.sql(s"ADD FILE $testDir")
}.getMessage
assert(msg.contains("is a directory and recursive is not turned on"))
}
}
}
test("REFRESH FUNCTION") {
val msg = intercept[AnalysisException] {
sql("REFRESH FUNCTION md5")
}.getMessage
assert(msg.contains("Cannot refresh built-in function"))
val msg2 = intercept[NoSuchFunctionException] {
sql("REFRESH FUNCTION default.md5")
}.getMessage
assert(msg2.contains(s"Undefined function: 'md5'. This function is neither a registered " +
s"temporary function nor a permanent function registered in the database 'default'."))
withUserDefinedFunction("func1" -> true) {
sql("CREATE TEMPORARY FUNCTION func1 AS 'test.org.apache.spark.sql.MyDoubleAvg'")
val msg = intercept[AnalysisException] {
sql("REFRESH FUNCTION func1")
}.getMessage
assert(msg.contains("Cannot refresh temporary function"))
}
withUserDefinedFunction("func1" -> false) {
val func = FunctionIdentifier("func1", Some("default"))
assert(!spark.sessionState.catalog.isRegisteredFunction(func))
intercept[NoSuchFunctionException] {
sql("REFRESH FUNCTION func1")
}
assert(!spark.sessionState.catalog.isRegisteredFunction(func))
sql("CREATE FUNCTION func1 AS 'test.org.apache.spark.sql.MyDoubleAvg'")
assert(!spark.sessionState.catalog.isRegisteredFunction(func))
sql("REFRESH FUNCTION func1")
assert(spark.sessionState.catalog.isRegisteredFunction(func))
val msg = intercept[NoSuchFunctionException] {
sql("REFRESH FUNCTION func2")
}.getMessage
assert(msg.contains(s"Undefined function: 'func2'. This function is neither a registered " +
s"temporary function nor a permanent function registered in the database 'default'."))
assert(spark.sessionState.catalog.isRegisteredFunction(func))
spark.sessionState.catalog.externalCatalog.dropFunction("default", "func1")
assert(spark.sessionState.catalog.isRegisteredFunction(func))
intercept[NoSuchFunctionException] {
sql("REFRESH FUNCTION func1")
}
assert(!spark.sessionState.catalog.isRegisteredFunction(func))
val function = CatalogFunction(func, "test.non.exists.udf", Seq.empty)
spark.sessionState.catalog.createFunction(function, false)
assert(!spark.sessionState.catalog.isRegisteredFunction(func))
val err = intercept[AnalysisException] {
sql("REFRESH FUNCTION func1")
}.getMessage
assert(err.contains("Can not load class"))
assert(!spark.sessionState.catalog.isRegisteredFunction(func))
}
}
test("REFRESH FUNCTION persistent function with the same name as the built-in function") {
withUserDefinedFunction("default.rand" -> false) {
val rand = FunctionIdentifier("rand", Some("default"))
sql("CREATE FUNCTION rand AS 'test.org.apache.spark.sql.MyDoubleAvg'")
assert(!spark.sessionState.catalog.isRegisteredFunction(rand))
val msg = intercept[AnalysisException] {
sql("REFRESH FUNCTION rand")
}.getMessage
assert(msg.contains("Cannot refresh built-in function"))
assert(!spark.sessionState.catalog.isRegisteredFunction(rand))
sql("REFRESH FUNCTION default.rand")
assert(spark.sessionState.catalog.isRegisteredFunction(rand))
}
}
}
object FakeLocalFsFileSystem {
var aclStatus = new AclStatus.Builder().build()
}
// A fake test local filesystem used to test ACL. It keeps a ACL status. If deletes
// a path of this filesystem, it will clean up the ACL status. Note that for test purpose,
// it has only one ACL status for all paths.
class FakeLocalFsFileSystem extends RawLocalFileSystem {
import FakeLocalFsFileSystem._
override def delete(f: Path, recursive: Boolean): Boolean = {
aclStatus = new AclStatus.Builder().build()
super.delete(f, recursive)
}
override def getAclStatus(path: Path): AclStatus = aclStatus
override def setAcl(path: Path, aclSpec: java.util.List[AclEntry]): Unit = {
aclStatus = new AclStatus.Builder().addEntries(aclSpec).build()
}
}
| wzhfy/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala | Scala | apache-2.0 | 122,450 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules
import org.apache.flink.table.planner.plan.nodes.logical._
import org.apache.flink.table.planner.plan.rules.logical.{RemoveUnreachableCoalesceArgumentsRule, _}
import org.apache.flink.table.planner.plan.rules.physical.FlinkExpandConversionRule
import org.apache.flink.table.planner.plan.rules.physical.batch._
import org.apache.calcite.rel.core.RelFactories
import org.apache.calcite.rel.logical.{LogicalIntersect, LogicalMinus, LogicalUnion}
import org.apache.calcite.rel.rules._
import org.apache.calcite.tools.{RuleSet, RuleSets}
import scala.collection.JavaConverters._
object FlinkBatchRuleSets {
val SEMI_JOIN_RULES: RuleSet = RuleSets.ofList(
SimplifyFilterConditionRule.EXTENDED,
FlinkRewriteSubQueryRule.FILTER,
FlinkSubQueryRemoveRule.FILTER,
JoinConditionTypeCoerceRule.INSTANCE,
FlinkJoinPushExpressionsRule.INSTANCE
)
/**
* Convert sub-queries before query decorrelation.
*/
val TABLE_SUBQUERY_RULES: RuleSet = RuleSets.ofList(
CoreRules.FILTER_SUB_QUERY_TO_CORRELATE,
CoreRules.PROJECT_SUB_QUERY_TO_CORRELATE,
CoreRules.JOIN_SUB_QUERY_TO_CORRELATE
)
/**
* Expand plan by replacing references to tables into a proper plan sub trees. Those rules
* can create new plan nodes.
*/
val EXPAND_PLAN_RULES: RuleSet = RuleSets.ofList(
LogicalCorrelateToJoinFromTemporalTableRule.LOOKUP_JOIN_WITH_FILTER,
LogicalCorrelateToJoinFromTemporalTableRule.LOOKUP_JOIN_WITHOUT_FILTER)
val POST_EXPAND_CLEAN_UP_RULES: RuleSet = RuleSets.ofList(
EnumerableToLogicalTableScan.INSTANCE)
/**
* Convert table references before query decorrelation.
*/
val TABLE_REF_RULES: RuleSet = RuleSets.ofList(
EnumerableToLogicalTableScan.INSTANCE
)
/**
* RuleSet to reduce expressions
*/
private val REDUCE_EXPRESSION_RULES: RuleSet = RuleSets.ofList(
CoreRules.FILTER_REDUCE_EXPRESSIONS,
CoreRules.PROJECT_REDUCE_EXPRESSIONS,
CoreRules.CALC_REDUCE_EXPRESSIONS,
CoreRules.JOIN_REDUCE_EXPRESSIONS
)
/**
* RuleSet to simplify coalesce invocations
*/
private val SIMPLIFY_COALESCE_RULES: RuleSet = RuleSets.ofList(
RemoveUnreachableCoalesceArgumentsRule.PROJECT_INSTANCE,
RemoveUnreachableCoalesceArgumentsRule.FILTER_INSTANCE,
RemoveUnreachableCoalesceArgumentsRule.JOIN_INSTANCE,
RemoveUnreachableCoalesceArgumentsRule.CALC_INSTANCE
)
private val LIMIT_RULES: RuleSet = RuleSets.ofList(
//push down localLimit
PushLimitIntoTableSourceScanRule.INSTANCE,
PushLimitIntoLegacyTableSourceScanRule.INSTANCE)
/**
* RuleSet to simplify predicate expressions in filters and joins
*/
private val PREDICATE_SIMPLIFY_EXPRESSION_RULES: RuleSet = RuleSets.ofList(
SimplifyFilterConditionRule.INSTANCE,
SimplifyJoinConditionRule.INSTANCE,
JoinConditionTypeCoerceRule.INSTANCE,
CoreRules.JOIN_PUSH_EXPRESSIONS
)
/**
* RuleSet to normalize plans for batch
*/
val DEFAULT_REWRITE_RULES: RuleSet = RuleSets.ofList((
PREDICATE_SIMPLIFY_EXPRESSION_RULES.asScala ++
SIMPLIFY_COALESCE_RULES.asScala ++
REDUCE_EXPRESSION_RULES.asScala ++
List(
// Transform window to LogicalWindowAggregate
BatchLogicalWindowAggregateRule.INSTANCE,
// slices a project into sections which contain window agg functions
// and sections which do not.
CoreRules.PROJECT_TO_LOGICAL_PROJECT_AND_WINDOW,
// adjust the sequence of window's groups.
WindowGroupReorderRule.INSTANCE,
WindowPropertiesRules.WINDOW_PROPERTIES_RULE,
WindowPropertiesRules.WINDOW_PROPERTIES_HAVING_RULE,
// let project transpose window operator.
CoreRules.PROJECT_WINDOW_TRANSPOSE,
//ensure union set operator have the same row type
new CoerceInputsRule(classOf[LogicalUnion], false),
//ensure intersect set operator have the same row type
new CoerceInputsRule(classOf[LogicalIntersect], false),
//ensure except set operator have the same row type
new CoerceInputsRule(classOf[LogicalMinus], false),
ConvertToNotInOrInRule.INSTANCE,
// optimize limit 0
FlinkLimit0RemoveRule.INSTANCE,
// unnest rule
LogicalUnnestRule.INSTANCE,
// Wrap arguments for JSON aggregate functions
WrapJsonAggFunctionArgumentsRule.INSTANCE
)).asJava)
/**
* RuleSet about filter
*/
private val FILTER_RULES: RuleSet = RuleSets.ofList(
// push a filter into a join
CoreRules.FILTER_INTO_JOIN,
// push filter into the children of a join
CoreRules.JOIN_CONDITION_PUSH,
// push filter through an aggregation
CoreRules.FILTER_AGGREGATE_TRANSPOSE,
// push a filter past a project
CoreRules.FILTER_PROJECT_TRANSPOSE,
CoreRules.FILTER_SET_OP_TRANSPOSE,
CoreRules.FILTER_MERGE
)
val JOIN_NULL_FILTER_RULES: RuleSet = RuleSets.ofList(
JoinDeriveNullFilterRule.INSTANCE
)
val JOIN_PREDICATE_REWRITE_RULES: RuleSet = RuleSets.ofList((
RuleSets.ofList(JoinDependentConditionDerivationRule.INSTANCE).asScala ++
JOIN_NULL_FILTER_RULES.asScala
).asJava)
/**
* RuleSet to do predicate pushdown
*/
val FILTER_PREPARE_RULES: RuleSet = RuleSets.ofList((
FILTER_RULES.asScala
// simplify predicate expressions in filters and joins
++ PREDICATE_SIMPLIFY_EXPRESSION_RULES.asScala
// reduce expressions in filters and joins
++ REDUCE_EXPRESSION_RULES.asScala
).asJava
)
/**
* RuleSet to push down partitions into table source
*/
val PUSH_PARTITION_DOWN_RULES: RuleSet = RuleSets.ofList(
// push partition into the table scan
PushPartitionIntoLegacyTableSourceScanRule.INSTANCE,
// push partition into the dynamic table scan
PushPartitionIntoTableSourceScanRule.INSTANCE
)
/**
* RuleSet to push down filters into table source
*/
val PUSH_FILTER_DOWN_RULES: RuleSet = RuleSets.ofList(
// push a filter down into the table scan
PushFilterIntoTableSourceScanRule.INSTANCE,
PushFilterIntoLegacyTableSourceScanRule.INSTANCE
)
/**
* RuleSet to prune empty results rules
*/
val PRUNE_EMPTY_RULES: RuleSet = RuleSets.ofList(
PruneEmptyRules.AGGREGATE_INSTANCE,
PruneEmptyRules.FILTER_INSTANCE,
PruneEmptyRules.JOIN_LEFT_INSTANCE,
FlinkPruneEmptyRules.JOIN_RIGHT_INSTANCE,
PruneEmptyRules.PROJECT_INSTANCE,
PruneEmptyRules.SORT_INSTANCE,
PruneEmptyRules.UNION_INSTANCE
)
/**
* RuleSet about project
*/
val PROJECT_RULES: RuleSet = RuleSets.ofList(
// push a projection past a filter
CoreRules.PROJECT_FILTER_TRANSPOSE,
// push a projection to the children of a non semi/anti join
// push all expressions to handle the time indicator correctly
new FlinkProjectJoinTransposeRule(
PushProjector.ExprCondition.FALSE, RelFactories.LOGICAL_BUILDER),
// push a projection to the children of a semi/anti Join
ProjectSemiAntiJoinTransposeRule.INSTANCE,
// merge projections
CoreRules.PROJECT_MERGE,
// remove identity project
CoreRules.PROJECT_REMOVE,
//removes constant keys from an Agg
CoreRules.AGGREGATE_PROJECT_PULL_UP_CONSTANTS,
// push project through a Union
CoreRules.PROJECT_SET_OP_TRANSPOSE
)
val JOIN_COND_EQUAL_TRANSFER_RULES: RuleSet = RuleSets.ofList((
RuleSets.ofList(JoinConditionEqualityTransferRule.INSTANCE).asScala ++
PREDICATE_SIMPLIFY_EXPRESSION_RULES.asScala ++
FILTER_RULES.asScala
).asJava)
val JOIN_REORDER_PREPARE_RULES: RuleSet = RuleSets.ofList(
// merge join to MultiJoin
CoreRules.JOIN_TO_MULTI_JOIN,
// merge project to MultiJoin
CoreRules.PROJECT_MULTI_JOIN_MERGE,
// merge filter to MultiJoin
CoreRules.FILTER_MULTI_JOIN_MERGE
)
val JOIN_REORDER_RULES: RuleSet = RuleSets.ofList(
// equi-join predicates transfer
RewriteMultiJoinConditionRule.INSTANCE,
// join reorder
CoreRules.MULTI_JOIN_OPTIMIZE
)
/**
* RuleSet to do logical optimize.
* This RuleSet is a sub-set of [[LOGICAL_OPT_RULES]].
*/
private val LOGICAL_RULES: RuleSet = RuleSets.ofList(
// scan optimization
PushProjectIntoTableSourceScanRule.INSTANCE,
PushProjectIntoLegacyTableSourceScanRule.INSTANCE,
PushFilterIntoTableSourceScanRule.INSTANCE,
PushFilterIntoLegacyTableSourceScanRule.INSTANCE,
// reorder sort and projection
CoreRules.SORT_PROJECT_TRANSPOSE,
// remove unnecessary sort rule
CoreRules.SORT_REMOVE,
// join rules
FlinkJoinPushExpressionsRule.INSTANCE,
SimplifyJoinConditionRule.INSTANCE,
// remove union with only a single child
CoreRules.UNION_REMOVE,
// convert non-all union into all-union + distinct
CoreRules.UNION_TO_DISTINCT,
// aggregation and projection rules
CoreRules.AGGREGATE_PROJECT_MERGE,
CoreRules.AGGREGATE_PROJECT_PULL_UP_CONSTANTS,
// remove aggregation if it does not aggregate and input is already distinct
FlinkAggregateRemoveRule.INSTANCE,
// push aggregate through join
FlinkAggregateJoinTransposeRule.EXTENDED,
// aggregate union rule
CoreRules.AGGREGATE_UNION_AGGREGATE,
// expand distinct aggregate to normal aggregate with groupby
FlinkAggregateExpandDistinctAggregatesRule.INSTANCE,
// reduce aggregate functions like AVG, STDDEV_POP etc.
CoreRules.AGGREGATE_REDUCE_FUNCTIONS,
WindowAggregateReduceFunctionsRule.INSTANCE,
// reduce group by columns
AggregateReduceGroupingRule.INSTANCE,
// reduce useless aggCall
PruneAggregateCallRule.PROJECT_ON_AGGREGATE,
PruneAggregateCallRule.CALC_ON_AGGREGATE,
// expand grouping sets
DecomposeGroupingSetsRule.INSTANCE,
// rank rules
FlinkLogicalRankRule.CONSTANT_RANGE_INSTANCE,
// transpose calc past rank to reduce rank input fields
CalcRankTransposeRule.INSTANCE,
// remove output of rank number when it is a constant
ConstantRankNumberColumnRemoveRule.INSTANCE,
// calc rules
CoreRules.FILTER_CALC_MERGE,
CoreRules.PROJECT_CALC_MERGE,
CoreRules.FILTER_TO_CALC,
CoreRules.PROJECT_TO_CALC,
FlinkCalcMergeRule.INSTANCE,
// semi/anti join transpose rule
FlinkSemiAntiJoinJoinTransposeRule.INSTANCE,
FlinkSemiAntiJoinProjectTransposeRule.INSTANCE,
FlinkSemiAntiJoinFilterTransposeRule.INSTANCE,
// set operators
ReplaceIntersectWithSemiJoinRule.INSTANCE,
RewriteIntersectAllRule.INSTANCE,
ReplaceMinusWithAntiJoinRule.INSTANCE,
RewriteMinusAllRule.INSTANCE
)
/**
* RuleSet to translate calcite nodes to flink nodes
*/
private val LOGICAL_CONVERTERS: RuleSet = RuleSets.ofList(
FlinkLogicalAggregate.BATCH_CONVERTER,
FlinkLogicalOverAggregate.CONVERTER,
FlinkLogicalCalc.CONVERTER,
FlinkLogicalCorrelate.CONVERTER,
FlinkLogicalJoin.CONVERTER,
FlinkLogicalSort.BATCH_CONVERTER,
FlinkLogicalUnion.CONVERTER,
FlinkLogicalValues.CONVERTER,
FlinkLogicalTableSourceScan.CONVERTER,
FlinkLogicalLegacyTableSourceScan.CONVERTER,
FlinkLogicalTableFunctionScan.CONVERTER,
FlinkLogicalDataStreamTableScan.CONVERTER,
FlinkLogicalIntermediateTableScan.CONVERTER,
FlinkLogicalExpand.CONVERTER,
FlinkLogicalRank.CONVERTER,
FlinkLogicalWindowAggregate.CONVERTER,
FlinkLogicalSnapshot.CONVERTER,
FlinkLogicalSink.CONVERTER,
FlinkLogicalLegacySink.CONVERTER,
FlinkLogicalDistribution.BATCH_CONVERTER
)
/**
* RuleSet to do logical optimize for batch
*/
val LOGICAL_OPT_RULES: RuleSet = RuleSets.ofList((
LIMIT_RULES.asScala ++
FILTER_RULES.asScala ++
PROJECT_RULES.asScala ++
PRUNE_EMPTY_RULES.asScala ++
LOGICAL_RULES.asScala ++
LOGICAL_CONVERTERS.asScala
).asJava)
/**
* RuleSet to do rewrite on FlinkLogicalRel for batch
*/
val LOGICAL_REWRITE: RuleSet = RuleSets.ofList(
// transpose calc past snapshot
CalcSnapshotTransposeRule.INSTANCE,
// Rule that splits python ScalarFunctions from join conditions
SplitPythonConditionFromJoinRule.INSTANCE,
// Rule that splits python ScalarFunctions from
// java/scala ScalarFunctions in correlate conditions
SplitPythonConditionFromCorrelateRule.INSTANCE,
// Rule that transpose the conditions after the Python correlate node.
CalcPythonCorrelateTransposeRule.INSTANCE,
// Rule that splits java calls from python TableFunction
PythonCorrelateSplitRule.INSTANCE,
// merge calc after calc transpose
FlinkCalcMergeRule.INSTANCE,
// Rule that splits python ScalarFunctions from java/scala ScalarFunctions
PythonCalcSplitRule.SPLIT_CONDITION_REX_FIELD,
PythonCalcSplitRule.SPLIT_PROJECTION_REX_FIELD,
PythonCalcSplitRule.SPLIT_CONDITION,
PythonCalcSplitRule.SPLIT_PROJECT,
PythonCalcSplitRule.SPLIT_PANDAS_IN_PROJECT,
PythonCalcSplitRule.EXPAND_PROJECT,
PythonCalcSplitRule.PUSH_CONDITION,
PythonCalcSplitRule.REWRITE_PROJECT,
PythonMapMergeRule.INSTANCE,
// remove output of rank number when it is not used by successor calc
RedundantRankNumberColumnRemoveRule.INSTANCE
)
/**
* RuleSet to do physical optimize for batch
*/
val PHYSICAL_OPT_RULES: RuleSet = RuleSets.ofList(
FlinkExpandConversionRule.BATCH_INSTANCE,
// source
BatchPhysicalBoundedStreamScanRule.INSTANCE,
BatchPhysicalTableSourceScanRule.INSTANCE,
BatchPhysicalLegacyTableSourceScanRule.INSTANCE,
BatchPhysicalIntermediateTableScanRule.INSTANCE,
BatchPhysicalValuesRule.INSTANCE,
// calc
BatchPhysicalCalcRule.INSTANCE,
BatchPhysicalPythonCalcRule.INSTANCE,
// union
BatchPhysicalUnionRule.INSTANCE,
// sort
BatchPhysicalSortRule.INSTANCE,
BatchPhysicalLimitRule.INSTANCE,
BatchPhysicalSortLimitRule.INSTANCE,
// rank
BatchPhysicalRankRule.INSTANCE,
RemoveRedundantLocalRankRule.INSTANCE,
// expand
BatchPhysicalExpandRule.INSTANCE,
// group agg
BatchPhysicalHashAggRule.INSTANCE,
BatchPhysicalSortAggRule.INSTANCE,
RemoveRedundantLocalSortAggRule.WITHOUT_SORT,
RemoveRedundantLocalSortAggRule.WITH_SORT,
RemoveRedundantLocalHashAggRule.INSTANCE,
BatchPhysicalPythonAggregateRule.INSTANCE,
// over agg
BatchPhysicalOverAggregateRule.INSTANCE,
// window agg
BatchPhysicalWindowAggregateRule.INSTANCE,
BatchPhysicalPythonWindowAggregateRule.INSTANCE,
// join
BatchPhysicalHashJoinRule.INSTANCE,
BatchPhysicalSortMergeJoinRule.INSTANCE,
BatchPhysicalNestedLoopJoinRule.INSTANCE,
BatchPhysicalSingleRowJoinRule.INSTANCE,
BatchPhysicalLookupJoinRule.SNAPSHOT_ON_TABLESCAN,
BatchPhysicalLookupJoinRule.SNAPSHOT_ON_CALC_TABLESCAN,
// correlate
BatchPhysicalConstantTableFunctionScanRule.INSTANCE,
BatchPhysicalCorrelateRule.INSTANCE,
BatchPhysicalPythonCorrelateRule.INSTANCE,
// sink
BatchPhysicalSinkRule.INSTANCE,
BatchPhysicalLegacySinkRule.INSTANCE,
// hive distribution
BatchPhysicalDistributionRule.INSTANCE
)
/**
* RuleSet to optimize plans after batch exec execution.
*/
val PHYSICAL_REWRITE: RuleSet = RuleSets.ofList(
EnforceLocalHashAggRule.INSTANCE,
EnforceLocalSortAggRule.INSTANCE,
PushLocalHashAggIntoScanRule.INSTANCE,
PushLocalHashAggWithCalcIntoScanRule.INSTANCE,
PushLocalSortAggIntoScanRule.INSTANCE,
PushLocalSortAggWithSortIntoScanRule.INSTANCE,
PushLocalSortAggWithCalcIntoScanRule.INSTANCE,
PushLocalSortAggWithSortAndCalcIntoScanRule.INSTANCE
)
}
| StephanEwen/incubator-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/FlinkBatchRuleSets.scala | Scala | apache-2.0 | 16,437 |
package org.http4s.server.blaze
import cats.effect._
import cats.implicits._
import fs2.concurrent.SignallingRef
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets._
import java.util.concurrent.atomic.AtomicBoolean
import org.http4s._
import org.http4s.blaze.pipeline.LeafBuilder
import org.http4s.blazecore.websocket.Http4sWSStage
import org.http4s.headers._
import org.http4s.internal.unsafeRunAsync
import org.http4s.syntax.string._
import org.http4s.websocket.WebSocketHandshake
import scala.concurrent.Future
import scala.util.{Failure, Success}
private[blaze] trait WebSocketSupport[F[_]] extends Http1ServerStage[F] {
protected implicit def F: ConcurrentEffect[F]
override protected def renderResponse(
req: Request[F],
resp: Response[F],
cleanup: () => Future[ByteBuffer]): Unit = {
val ws = resp.attributes.lookup(org.http4s.server.websocket.websocketKey[F])
logger.debug(s"Websocket key: $ws\\nRequest headers: " + req.headers)
ws match {
case None => super.renderResponse(req, resp, cleanup)
case Some(wsContext) =>
val hdrs = req.headers.toList.map(h => (h.name.toString, h.value))
if (WebSocketHandshake.isWebSocketRequest(hdrs)) {
WebSocketHandshake.serverHandshake(hdrs) match {
case Left((code, msg)) =>
logger.info(s"Invalid handshake $code, $msg")
unsafeRunAsync {
wsContext.failureResponse
.map(
_.withHeaders(
Connection("close".ci),
Header.Raw(headers.`Sec-WebSocket-Version`.name, "13")
))
} {
case Right(resp) =>
IO(super.renderResponse(req, resp, cleanup))
case Left(_) =>
IO.unit
}
case Right(hdrs) => // Successful handshake
val sb = new StringBuilder
sb.append("HTTP/1.1 101 Switching Protocols\\r\\n")
hdrs.foreach {
case (k, v) => sb.append(k).append(": ").append(v).append('\\r').append('\\n')
}
wsContext.headers.foreach { hdr =>
sb.append(hdr.name).append(": ").append(hdr.value).append('\\r').append('\\n')
()
}
sb.append('\\r').append('\\n')
// write the accept headers and reform the pipeline
channelWrite(ByteBuffer.wrap(sb.result().getBytes(ISO_8859_1))).onComplete {
case Success(_) =>
logger.debug("Switching pipeline segments for websocket")
val deadSignal = F.toIO(SignallingRef[F, Boolean](false)).unsafeRunSync()
val sentClose = new AtomicBoolean(false)
val segment =
LeafBuilder(new Http4sWSStage[F](wsContext.webSocket, sentClose, deadSignal))
.prepend(new WSFrameAggregator)
.prepend(new WebSocketDecoder)
this.replaceTail(segment, true)
case Failure(t) => fatalError(t, "Error writing Websocket upgrade response")
}(executionContext)
}
} else super.renderResponse(req, resp, cleanup)
}
}
}
| aeons/http4s | blaze-server/src/main/scala/org/http4s/server/blaze/WebSocketSupport.scala | Scala | apache-2.0 | 3,292 |
package cs220.queue2
/** A "faster" functional queue.
*
* This functional queue uses a functional [List] as its internal
* implementation of a queue.
*
* In this impementation we keep two lists: one for the leading
* elements that we will dequeue from and trailing elements which
* represent the end of the queue.
*/
class FasterQueue[T] private [queue2] (
// We use the val syntax and private in the constructor to create
// member variables representing the leading and trailing elements
// as well as private for information hiding:
private val leading: List[T],
private val trailing: List[T]
) extends Queue[T] {
// mirror returns a queue with leading elements.
//
// In particular, if we have no leading elements we create a new
// queue with its leading elements being the reverse of the trailing
// elements and Nil as its trailing elements. If we do have leading
// elements then we do nothing.
//
// We use private to hide this method from outside this class.
private def mirror =
if (leading.isEmpty)
new FasterQueue(trailing.reverse, Nil)
else
this
// The head of the queue is the head of the mirror of this queue.
//
// i-clicker: In what circumstance is this method not efficient?
//
// A) When leading contains elements.
// B) When trailing is empty.
// C) When head is called repeatedly without a call to tail.
// D) When tail is called repeatedly without a call to head.
// E) This method is always efficent.
//
def head = mirror.leading.head
// The tail of the queue is a new queue where the leading elements
// are the tail of the mirror of this queue and its trailing elements
// are the trailing elements of the mirror.
def tail = {
val q = mirror
new FasterQueue(q.leading.tail, q.trailing)
}
// enqueue create a new queue with the leading elements being the
// leading elements of the new queue and the trailing elements being
// a new list constructed by consing the new element x to the
// trailing elements.
def enqueue(x: T) =
new FasterQueue(leading, x :: trailing)
// We add a toString method to print it out nicely:
override def toString = {
val xs = leading ::: trailing.reverse
"Queue(" + xs.mkString(",") + ")"
}
}
| umass-cs-220/week-09-libraries | code/types/src/main/scala/cs220/queue2/FasterQueue.scala | Scala | apache-2.0 | 2,284 |
package com.basrikahveci
package cardgame.messaging.response
import cardgame.messaging.{Success, Response}
import cardgame.domain.UserIdentity
class SignInResponse(val userId: Long, val firstName: String, val signInTime: Long, val points: Int, val onlineFriends: Seq[UserIdentity], val success: Boolean = true, val reason: Int = Success.ordinal) extends Response
| metanet/cardgame-server-scala | src/main/scala/com/basrikahveci/cardgame/messaging/response/SignInResponse.scala | Scala | mit | 365 |
package com.olvind.crud
package server
import slick.jdbc.JdbcBackend
trait integrationDb extends integrationSlick {
def db: JdbcBackend#Database
}
| elacin/slick-crud | crud/jvm/src/main/scala/com/olvind/crud/server/integrationDb.scala | Scala | apache-2.0 | 151 |
package edu.berkeley.nlp.entity.sem
import edu.berkeley.nlp.futile.fig.basic.IOUtils
import scala.collection.mutable.HashMap
import edu.berkeley.nlp.futile.util.Logger
object BrownClusterInterface {
def loadBrownClusters(path: String, cutoff: Int): Map[String,String] = {
val wordsToClusters = new HashMap[String,String];
val iterator = IOUtils.lineIterator(path);
while (iterator.hasNext) {
val nextLine = iterator.next;
val fields = nextLine.split("\\\\s+");
if (fields.size == 3 && fields(fields.size - 1).toInt >= cutoff) {
wordsToClusters.put(fields(1), fields(0));
}
}
Logger.logss(wordsToClusters.size + " Brown cluster definitions read in");
wordsToClusters.toMap;
}
} | matthewfl/berkeley-entity | src/main/java/edu/berkeley/nlp/entity/sem/BrownClusterInterface.scala | Scala | gpl-3.0 | 739 |
/*
* Copyright 2015 Tupleware
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.sql.Timestamp
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.sql
import org.apache.spark.sql._
object Tpch1 {
case class Lineitem(l_quantity: Float,
l_extendedprice: Float,
l_discount: Float,
l_tax: Float,
l_returnflag: Byte,
l_linestatus: Byte,
l_shipdate: Long)
def main(args: Array[String]) {
val filename = args(0)
val compress = args(1)
val numParts = args(2)
val codegen = args(3)
val conf = new SparkConf()
.setAppName("tpch1")
.set("spark.sql.inMemoryColumnarStorage.compressed", compress)
.set("spark.sql.shuffle.partitions", numParts)
.set("spark.sql.codegen", codegen)
val sc = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
import sqlContext.createSchemaRDD
val fmt = new java.text.SimpleDateFormat("yyyy-MM-dd")
val lineitem = sc.textFile(filename)
.map(line => line.split('|'))
.map(t => Lineitem(t(4).toFloat,
t(5).toFloat,
t(6).toFloat,
t(7).toFloat,
t(8)(0).toByte,
t(9)(0).toByte,
fmt.parse(t(10)).getTime()/1000))
lineitem.registerTempTable("lineitem")
sqlContext.cacheTable("lineitem")
val result = sqlContext.sql("""
select
l_returnflag,
l_linestatus,
sum(l_quantity) as sum_qty,
sum(l_extendedprice) as sum_base_price,
sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,
sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,
sum(l_discount) as avg_disc,
count(*) as count_order
from
lineitem
where
l_shipdate <= 904708800
group by
l_returnflag,
l_linestatus""")
for (i <- 1 to 10) {
val start = System.nanoTime()
result.collect()
val stop = System.nanoTime()
println("compress=" + compress + ",numparts=" + numParts + ",codegen=" + codegen + ",time: " + (stop - start))
}
println(result.collect().foreach(println))
}
}
| twareproj/tware | benchmarks/spark/src/main/scala/tpch/Tpch1.scala | Scala | apache-2.0 | 3,128 |
/**
* @author Christoph Knabe on 2014-06-27
*/
package object crawl {
val browsableURIs = Seq( "spray.io"
, "www.espero.com.cn/old/epch/Cel/", "www.esperanto.cl", "www.esperanto.org.ar", "www.epa.jp", "kleg.jp"
, "www.jej.jp", "www.jei.or.jp"
, "www.wikipedia.org", "stackoverflow.com/questions/tagged/scala", "scala-lang.org"
, "doc.akka.io"
, "www.beuth-hochschule.de/vrp", "public.beuth-hochschule.de/~knabe/", "fb6.beuth-hochschule.de"
, "uea.org", "www.tejo.org"
, "esperanto.de", "esperantoland.org", "www.eventoj.hu", "www.esperanto.mv.ru/KompLeks/KOVRILO.html"
, "eo.wikibooks.org", "www.esperanto.de", "www.esperanto.org", "brila-aktivulo.com"
, "kongresszentrum-fulda.com", "www.esperanto.net", "www.esperanto.hr", "www.usej.org"
, "www.esperanto.org.za", "www.esperanto.es/hef/"
, "www.esperanto-mexico.org", "www.esperantocolombia.org", "ameriko.org"
, "esperanto.org.br/bel/", "www.esperanto-df.org.br", "aerj.org.br", "reu.ru"
, "tatoeba.org"
, "www.meetup.com/Scala-Berlin-Brandenburg/", "www.immobilienscout24.de/"
, "www.assembla.com", "www.thefreedictionary.com", "junit.org", "frankwestphal.de"
, "www.sevenforums.com/tutorials/"
, "www.xprogramming.com", "typesafe.com", "web.de", "www.leo.org"
, "www.gmx.net", "www.t-online.de", "www.gi.de", "www.tu-chemnitz.de/informatik/"
, "www.dmoz.org", "www.in.tum.de", "www.inf.uni-hamburg.de", "www.ft-informatik.de"
, "www.berlinerbaeder.de", "www.berlin.de", "www.staatsballett-berlin.de"
//No-text resources should not be accepted:
//, "ixquick.com/graphics/ixquick_res.gif", "partners.adobe.com/public/developer/en/pdf/PDFReference16.pdf"
)
}
| ChristophKnabe/sprayreactivedemo | src/main/scala/crawl/package.scala | Scala | lgpl-3.0 | 1,707 |
/*
* StructuredVEBPChooserTest.scala
* Test of structured hybrid VE/BP algorithm.
*
* Created By: Avi Pfeffer (apfeffer@cra.com)
* Creation Date: March 1, 2015
*
* Copyright 2015 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email figaro@cra.com for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.test.algorithm.structured.strategy
import org.scalatest.{WordSpec, Matchers}
import com.cra.figaro.language._
import com.cra.figaro.library.compound.If
import com.cra.figaro.algorithm.structured.algorithm.hybrid.StructuredVEBPChooser
import com.cra.figaro.algorithm.lazyfactored.ValueSet._
import com.cra.figaro.language.Element.toBooleanElement
class StructuredVEBPChooserTest extends WordSpec with Matchers {
"Executing a recursive structured VE solver strategy" when {
"given a flat model with an atomic flip without evidence" should {
"produce the correct answer" in {
Universe.createNew()
val e2 = Flip(0.6)
val e3 = Apply(e2, (b: Boolean) => b)
StructuredVEBPChooser.probability(e3, true) should equal (0.6)
}
}
"given a flat model with a compound Flip without evidence" should {
"produce the correct answer" in {
Universe.createNew()
val e1 = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9)
val e2 = Flip(e1)
val e3 = Apply(e2, (b: Boolean) => b)
StructuredVEBPChooser.probability(e3, true) should equal (0.6)
}
}
"given a flat model with evidence" should {
"produce the correct answer" in {
Universe.createNew()
val e1 = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9)
val e2 = Flip(e1)
val e3 = Apply(e2, (b: Boolean) => b)
e3.observe(true)
StructuredVEBPChooser.probability(e1, 0.3) should be (0.125 +- 0.000000001)
}
}
"given a model with multiple targets and no evidence" should {
"produce the correct probability over both targets" in {
Universe.createNew()
val e1 = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9)
val e2 = Flip(e1)
val e3 = Apply(e2, (b: Boolean) => b)
val alg = StructuredVEBPChooser(0.0, 100, e2, e3)
alg.start()
alg.probability(e2, true) should equal (0.6)
alg.probability(e3, true) should equal (0.6)
}
}
"given a model with multiple targets with evidence" should {
"produce the correct probability over both targets" in {
Universe.createNew()
val e1 = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9)
val e2 = Flip(e1)
val e3 = Apply(e2, (b: Boolean) => b)
e3.observe(true)
val alg = StructuredVEBPChooser(0.0, 100, e2, e1)
alg.start()
alg.probability(e2, true) should equal (1.0)
alg.probability(e1, 0.3) should be (0.125 +- 0.000000001)
}
}
"given a one-level nested model without evidence" should {
"produce the correct answer" in {
Universe.createNew()
val e1 = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9)
val e2 = Flip(e1)
val e3 = If(e2, Constant(true), Constant(false))
val alg = StructuredVEBPChooser(0.0, 100, e3)
alg.start()
alg.probability(e3, true) should equal (0.6)
}
}
"given a one-level nested model with nested evidence" should {
"produce the correct answer" in {
Universe.createNew()
val e1 = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9)
val e2 = Flip(e1)
val e3 = If(e2, { val e = Flip(0.5); e.observe(true); e }, Constant(false))
val alg = StructuredVEBPChooser(0.0, 100, e3)
alg.start()
alg.probability(e3, true) should equal (0.6)
}
}
"given a two-level nested model" should {
"produce the correct answer" in {
Universe.createNew()
val e1 = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9)
val e2 = Flip(e1)
val e3 = If(e2, If(Flip(0.9), Constant(true), Constant(false)), Constant(false))
val alg = StructuredVEBPChooser(0.0, 100, e3)
alg.start()
alg.probability(e3, true) should be ((0.6 * 0.9) +- 0.000000001)
}
}
"expanding an element with two different arguments" should {
"expand both the arguments" in {
Universe.createNew()
val e1 = Flip(0.4)
val e2 = Flip(0.3)
val e3 = e1 && e2
StructuredVEBPChooser.probability(e3, true) should be (0.12 +- 0.000000001)
}
}
"expanding an argument that is used more than once" should {
"only expand the argument once" in {
var count = 0
Universe.createNew()
val e1 = Apply(Constant(true), (b: Boolean) => { count += 1; 5 })
val e2 = e1 === e1
StructuredVEBPChooser.probability(e2, true) should equal (1.0)
count should equal (1)
// Note that this should now only expand once since Apply Maps have been added to Components
}
}
"expanding an argument that needs another argument later expanded" should {
"create values for the ancestor argument first" in {
Universe.createNew()
val e1 = Flip(0.4)
val e2 = If(e1, Constant(1), Constant(2))
val e3 = Apply(e2, e1, (i: Int, b: Boolean) => if (b) i + 1 else i + 2)
// e3 is 2 iff e1 is true, because then e2 is 1
StructuredVEBPChooser.probability(e3, 2) should be (0.4 +- 0.000000001)
}
}
"solving a problem with a reused nested subproblem" should {
"only process the nested subproblem once" in {
var count = 0
val f = (p: Boolean) => {
count += 1
Constant(p)
}
val e1 = Chain(Flip(0.5), f)
val e2 = Chain(Flip(0.4), f)
val e3 = e1 && e2
StructuredVEBPChooser.probability(e3, true) should be ((0.5 * 0.4) +- 0.000000001)
count should equal (2) // One each for p = true and p = false, but only expanded once
}
}
"given a problem with unneeded elements in the universe" should {
"not process the unneeded elements" in {
var count = 0
val e1 = Apply(Constant(1), (i: Int) => { count += 1; 5 })
val e2 = Flip(0.5)
StructuredVEBPChooser.probability(e2, true) should equal (0.5)
count should equal (0)
}
}
}
}
| scottcb/figaro | Figaro/src/test/scala/com/cra/figaro/test/algorithm/structured/strategy/StructuredVEBPChooserTest.scala | Scala | bsd-3-clause | 6,518 |
/*******************************************************************************/
/* */
/* Copyright (C) 2017 by Max Lv <max.c.lv@gmail.com> */
/* Copyright (C) 2017 by Mygod Studio <contact-shadowsocks-android@mygod.be> */
/* */
/* This program is free software: you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation, either version 3 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* */
/*******************************************************************************/
package com.github.shadowsocks
import java.nio.charset.Charset
import android.app.Activity
import android.nfc.{NdefMessage, NdefRecord, NfcAdapter}
import android.os.Bundle
import android.view.{LayoutInflater, View, ViewGroup}
import android.widget.{ImageView, LinearLayout}
import net.glxn.qrgen.android.QRCode
object QRCodeDialog {
private final val KEY_URL = "com.github.shadowsocks.QRCodeDialog.KEY_URL"
}
final class QRCodeDialog extends DialogFragment {
import QRCodeDialog._
def this(url: String) {
this()
val bundle = new Bundle()
bundle.putString(KEY_URL, url)
setArguments(bundle)
}
private def url = getArguments.getString(KEY_URL)
private lazy val nfcShareItem = url.getBytes(Charset.forName("UTF-8"))
private var adapter: NfcAdapter = _
override def onCreateView(inflater: LayoutInflater, container: ViewGroup, savedInstanceState: Bundle): View = {
val image = new ImageView(getActivity)
image.setLayoutParams(new LinearLayout.LayoutParams(-1, -1))
val size = getResources.getDimensionPixelSize(R.dimen.qr_code_size)
val qrcode = QRCode.from(url)
.withSize(size, size)
.asInstanceOf[QRCode].bitmap()
image.setImageBitmap(qrcode)
image
}
override def onAttach(activity: Activity) {
superOnAttach(activity)
adapter = NfcAdapter.getDefaultAdapter(getActivity)
if (adapter != null) adapter.setNdefPushMessage(new NdefMessage(Array(
new NdefRecord(NdefRecord.TNF_ABSOLUTE_URI, nfcShareItem, Array[Byte](), nfcShareItem))), activity)
}
override def onDetach() {
if (adapter != null) {
adapter.setNdefPushMessage(null, getActivity)
adapter = null
}
super.onDetach()
}
}
| hangox/shadowsocks-android | mobile/src/main/scala/com/github/shadowsocks/QRCodeDialog.scala | Scala | gpl-3.0 | 3,263 |
/*
Copyright (c) 2009-2012, The Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of California nor the names of
its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.cdlib.was.weari;
import org.xml.sax.{Attributes,ContentHandler,Locator};
/**
* A proxy ContentHandler that passes everything off to multiple
* child ContentHandlers.
*/
class MultiContentHander (handlers : Seq[ContentHandler])
extends ContentHandler {
def eachHandler(f : ContentHandler=>Unit) {
for (h <- handlers) f(h);
}
def characters(ch : Array[Char], start : Int, length : Int) =
eachHandler(_.characters(ch, start, length));
def endDocument = eachHandler(_.endDocument);
def endElement(namespaceURI : String, localName : String, qName : String) =
eachHandler(_.endElement(namespaceURI, localName, qName));
def endPrefixMapping(prefix : String) =
eachHandler(_.endPrefixMapping(prefix));
def ignorableWhitespace(ch : Array[Char], start : Int, length : Int) =
eachHandler(_.ignorableWhitespace(ch, start, length));
def processingInstruction(target : String, data : String) =
eachHandler(_.processingInstruction(target, data));
def setDocumentLocator(locator : Locator) =
eachHandler(_.setDocumentLocator(locator));
def skippedEntity(name : String) =
eachHandler(_.skippedEntity(name));
def startDocument =
eachHandler(_.startDocument);
def startElement(namespaceURI : String, localName : String, qName : String, atts : Attributes) =
eachHandler(_.startElement(namespaceURI, localName, qName, atts))
def startPrefixMapping(prefix : String, uri : String) =
eachHandler(_.startPrefixMapping(prefix, uri))
}
| cdlib/weari | src/main/scala/org/cdlib/was/weari/MultiContentHandler.scala | Scala | bsd-3-clause | 3,051 |
package akkaprototype.test
import akka.actor.{ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
import akkaprototype.internal.actors.AggregateMessageDataActor
import akkaprototype.internal.messages.{ActionData, ProcessAction, ProcessActionTimeOut}
import akkaprototype.stubs.{FindDataOneStub, FindDataThreeStub, FindDataTwoStub, FindDataTwoStubTimeOut}
import org.scalatest.WordSpecLike
import org.scalatest.matchers.MustMatchers
import scala.concurrent.duration._
class TestService extends TestKit(ActorSystem("TestAS")) with ImplicitSender with WordSpecLike with MustMatchers {
"An Agregegate message" should {
"return a list of diferente action data" in {
val probe1 = TestProbe()
val probe2 = TestProbe()
val actionOneActor = system.actorOf(Props[FindDataOneStub], "find-data-one-stub")
val actionTwoActor = system.actorOf(Props[FindDataTwoStub], "find-data-two-stub")
val actionThreeActor = system.actorOf(Props[FindDataThreeStub], "find-data-three-stub")
val agregateDataActor = system.actorOf(
Props(new AggregateMessageDataActor(actionOneActor,actionTwoActor,actionThreeActor)), "agregate-data-actor")
within(4000 milliseconds) {
probe1.send(agregateDataActor, ProcessAction(1L))
val result = probe1.expectMsgType[ActionData]
result must equal(ActionData(Some(List((3l, 15000d))), Some(List((1l, 150000d), (2l, 29000d))),
Some(List())))
}
within(4000 milliseconds) {
probe2.send(agregateDataActor, ProcessAction(2L))
val result = probe2.expectMsgType[ActionData]
result must equal(
ActionData(Some(List((6l, 640000d), (7l, 1125000d), (8l, 40000d))), Some(List((5l, 80000d))),
Some(List((9l, 640000d), (10l, 1125000d), (11l, 40000d)))))
}
}
"return a TimeoutException when timeout is exceeded" in {
val actionOneActor = system.actorOf(Props[FindDataOneStub], "find-data-one-stub-timeout")
val actionTwoActorTimeOut = system.actorOf(Props[FindDataTwoStubTimeOut], "find-data-two-stub-timeout")
val actionThreeActor = system.actorOf(Props[FindDataThreeStub], "find-data-three-stub-timeout")
val agregateDataActor = system.actorOf(
Props(new AggregateMessageDataActor(actionOneActor, actionTwoActorTimeOut, actionThreeActor)),
"agregate-data-actor-timeout")
val probe = TestProbe()
within(250 milliseconds, 500 milliseconds) {
probe.send(agregateDataActor, ProcessAction(1L))
probe.expectMsg(ProcessActionTimeOut)
}
}
}
} | fenoloco/akka-prototype | akka-prototype/src/test/scala/akkaprototype/test/AgregateDataActor.scala | Scala | gpl-2.0 | 2,600 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.domain.policies
import scala.collection.mutable.Buffer
import scala.xml._
import com.normation.utils.Utils.nonEmpty
import com.normation.cfclerk.domain.TechniqueVersion
import com.normation.utils.HashcodeCaching
import com.normation.cfclerk.domain.SectionSpec
import com.normation.cfclerk.domain.Technique
case class DirectiveId(value:String) extends HashcodeCaching
/**
* Define a directive.
*
* From a business point of view, a directive is a general
* policy about your infrastructure, like "our password must be
* 10 chars, mixed symbol, case, number".
*
* In Rudder, a Directive is derived from a technique on which
* we are going to bind parameter to values matching the business
* directive. For example, in our example, it could be
* "Unix Password management with passwd"
*
* A directive also keep other information, like the priority
* of that directive compared to other directive derived from the
* the same technique.
*
*/
case class Directive(
id:DirectiveId,
//TODO: why not keeping techniqueName here ? data duplication ?
/**
* They reference one and only one Technique version
*/
techniqueVersion:TechniqueVersion,
/**
* The list or parameters with their values.
* TODO: I really would like to be able to not allow to set bad parameter here,
* what mean parameter that are not in the technique.
* For now, say it's done by construction.
*/
parameters:Map[String, Seq[String]],
/**
* A human readable name for that directive,
* typically used for CSV/grid header
* i.e: "SEC-042 Debian Etch"
* Can not be empty nor null.
*/
name:String,
/**
* Short description, typically used as field description
* Can not be empty nor null.
*/
shortDescription:String,
/**
* A long, detailed description, typically used for
* tooltip. It allows reach content.
* Can be empty (and is by default).
*/
longDescription:String = "",
/**
* For policies which allows only one configured instance at
* a given time for a given node, priority allows to choose
* the policy to deploy.
* Higher priority is better, default is 5
*/
priority:Int = 5,
/**
* Define if the policy is activated.
* If it is not, configuration based on that policy should not be considered
* for deployment on nodes.
*/
isEnabled:Boolean = false,
isSystem:Boolean = false
) extends HashcodeCaching
final case class SectionVal(
sections : Map[String, Seq[SectionVal]] = Map() //name -> values
, variables : Map[String, String] = Map() //name -> values
) extends HashcodeCaching
object SectionVal {
val ROOT_SECTION_NAME = "sections"
def toXml(sv:SectionVal, sectionName:String = ROOT_SECTION_NAME): Node = {
<section name={sectionName}>
{ //variables
sv.variables.toSeq.sortBy(_._1).map { case (variable,value) =>
<var name={variable}>{value}</var>
} ++
//section
(for {
(sectionName, sectionIterations) <- sv.sections.toSeq.sortBy(_._1)
sectionValue <- sectionIterations
} yield {
this.toXml(sectionValue,sectionName)
})
}
</section>
}
def directiveValToSectionVal(rootSection:SectionSpec, allValues:Map[String,Seq[String]]) : SectionVal = {
/*
* build variables with a parent section multivalued.
*/
def buildMonoSectionWithMultivaluedParent(spec:SectionSpec, index:Int) : SectionVal = {
if(spec.isMultivalued) throw new RuntimeException("We found a multivalued subsection of a multivalued section: " + spec)
//variable for that section: Map[String, String]
val variables = spec.getDirectVariables.map { vspec =>
(vspec.name, allValues(vspec.name)(index))
}.toMap
/*
* Get subsection. We can have several, all mono-valued
*/
val subsections = spec.getDirectSections.map { sspec =>
(sspec.name, Seq(buildMonoSectionWithMultivaluedParent(sspec,index)))
}.toMap
SectionVal(subsections, variables)
}
def buildMultiSectionWithoutMultiParent(spec:SectionSpec) : Seq[SectionVal] = {
if(!spec.isMultivalued) throw new RuntimeException("We found a monovalued section where a multivalued section was asked for: " + spec)
// find the number of iteration for that multivalued section.
// try with a direct variable, and if the section has no direct variable, with the first direct section with a variable
val cardinal = {
val name = spec.getDirectVariables.toList match {
case v :: tail => v.name
case _ => //look for the first section with a var
spec.getDirectSections.find { s => s.getDirectVariables.nonEmpty }.map { s =>
s.getDirectVariables.head.name
}.getOrElse("NO VARIABLE !!!") //used name should not be a key
}
allValues.get(name).map( _.size ).getOrElse(0)
}
//find variable of that section
val multiVariables : Seq[Map[String,String]] = {
for {
i <- 0 until cardinal
} yield {
spec.getDirectVariables.map { vspec => (vspec.name, allValues(vspec.name)(i)) }.toMap
}
}
//build subsections:
val multiSections : Seq[Map[String, SectionVal]] = {
for {
i <- 0 until cardinal
} yield {
spec.getDirectSections.map { sspec =>
( sspec.name, buildMonoSectionWithMultivaluedParent(sspec, i) )
}.toMap
}
}
for {
i <- 0 until cardinal
} yield {
//here, children section must be with a cardinal of 1 (monovalued)
val sections = multiSections(i).map { case(k,s) => (k,Seq(s)) }.toMap
SectionVal(sections, multiVariables(i))
}
}
def buildMonoSectionWithoutMultivaluedParent(spec:SectionSpec) : SectionVal = {
val variables = spec.getDirectVariables.map { vspec =>
//we can have a empty value for a variable, for non mandatory ones
(vspec.name, allValues.getOrElse(vspec.name,Seq(""))(0))
}.toMap
val sections = spec.getDirectSections.map { vspec =>
if(vspec.isMultivalued) {
(vspec.name, buildMultiSectionWithoutMultiParent(vspec))
} else {
(vspec.name, Seq(buildMonoSectionWithoutMultivaluedParent(vspec)))
}
}.toMap
SectionVal(sections,variables)
}
buildMonoSectionWithoutMultivaluedParent(rootSection)
}
def toMapVariables(sv:SectionVal) : Map[String,Seq[String]] = {
import scala.collection.mutable.{Map, Buffer}
val res = Map[String, Buffer[String]]()
def recToMap(sec:SectionVal) : Unit = {
sec.variables.foreach { case (name,value) =>
res.getOrElseUpdate(name, Buffer()).append(value)
}
sec.sections.foreach { case (_, sections) =>
sections.foreach { recToMap( _ ) }
}
}
recToMap(sv)
res.map { case (k,buf) => (k,buf.toSeq) }.toMap
}
} | Kegeruneku/rudder | rudder-core/src/main/scala/com/normation/rudder/domain/policies/Directive.scala | Scala | agpl-3.0 | 8,698 |
package org.crockeo.genericplatformer.launcher
import javax.swing._
class LauncherWindow extends JFrame {
setTitle("Generic Platformer")
val panel: LauncherPanel = new LauncherPanel(this)
add(panel)
pack
setResizable(false)
setLocationRelativeTo(null)
setVisible(true)
} | crockeo/generic-platformer | src/org/crockeo/genericplatformer/launcher/LauncherWindow.scala | Scala | gpl-3.0 | 292 |
package com.twitter.inject.tests.thrift.utils
import com.twitter.inject.Test
import com.twitter.inject.thrift.utils.ThriftMethodUtils
import com.twitter.scrooge.{ThriftStructCodec3, ThriftMethod}
class ThriftMethodUtilsTest extends Test {
"ThriftMethodUtils" should {
"return pretty string" in {
val method = new ThriftMethod {
override val name = "Foo"
/** Convert a function implementation of this method into a service implementation */
override def functionToService(f: FunctionType): ServiceType = ???
/** Convert a service implementation of this method into a function implementation */
override def serviceToFunction(svc: ServiceType): FunctionType = ???
/** Thrift service name. A thrift service is a list of methods. */
override val serviceName: String = "FooService"
/** Codec for the request args */
override def argsCodec: ThriftStructCodec3[Args] = ???
/** Codec for the response */
override def responseCodec: ThriftStructCodec3[Result] = ???
/** True for oneway thrift methods */
override val oneway: Boolean = false
}
val prettyString = ThriftMethodUtils.prettyStr(method)
prettyString should be("FooService.Foo")
}
}
}
| syamantm/finatra | inject/inject-thrift/src/test/scala/com/twitter/inject/tests/thrift/utils/ThriftMethodUtilsTest.scala | Scala | apache-2.0 | 1,288 |
/*
* Copyright (C) 2011, Mysema Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mysema.scalagen
import java.io.File
import java.io.FileInputStream
import java.io.IOException
import org.junit.Test
import japa.parser.JavaParser
import com.mysema.scala.CompileTestUtils
import org.junit.Assert._
class ScalaCompilationTest extends AbstractParserTest with CompileTestUtils {
@Test
def Compile {
val resources = List[File](new File("src/test/scala/com/mysema/examples").listFiles():_*)
// parallel compilation
val failures = resources.filter(_.getName.endsWith(".java")).map { f =>
var unit = JavaParser.parse(new FileInputStream(f))
val source = toScala(unit)
try {
assertCompileSuccess(source)
null
} catch {
case e: AssertionError => (f.getName, e.getMessage)
//case e: Exception => (f.getName, e.getMessage)
}
}.toList.filter(_ != null).toMap
failures.foreach { case (n,m) => System.err.println(n + " => " + m)}
assertTrue(
failures.size + " of " + resources.size + " failures : " + failures.keys.mkString(", "),
failures.isEmpty)
}
} | cessationoftime/scalagen | scalagen/src/test/scala/com/mysema/scalagen/ScalaCompilationTest.scala | Scala | apache-2.0 | 1,686 |
package org.lolhens.renderengine.vector
abstract class Vector3[@specialized(Int, Long, Float, Double) T] protected(val x: T,
val y: T,
val z: T) {
type Self <: Vector3[T]
def Vector3(x: T, y: T, z: T): Self
def withX(x: T): Self = Vector3(x, y, z)
def withY(y: T): Self = Vector3(x, y, z)
def withZ(z: T): Self = Vector3(x, y, z)
def mapX(f: T => T): Self = withX(f(x))
def mapY(f: T => T): Self = withY(f(y))
def mapZ(f: T => T): Self = withZ(f(z))
def isZero: Boolean
def isOne: Boolean
def unary_- : Self
def +(x: T, y: T, z: T): Self
def -(x: T, y: T, z: T): Self
def *(x: T, y: T, z: T): Self
def /(x: T, y: T, z: T): Self
def +(vec: Self): Self = if (isZero) vec else this + (vec.x, vec.y, vec.z)
def -(vec: Self): Self = this - (vec.x, vec.y, vec.z)
def *(vec: Self): Self = if (isOne) vec else this * (vec.x, vec.y, vec.z)
def /(vec: Self): Self = this / (vec.x, vec.y, vec.z)
def +(value: T): Self = this + (value, value, value)
def -(value: T): Self = this - (value, value, value)
def *(value: T): Self = this * (value, value, value)
def /(value: T): Self = this / (value, value, value)
def `length²`: T
def length: T
def normalized: Self = this / length
}
| LolHens/LibRenderEngine | src/main/scala/org/lolhens/renderengine/vector/Vector3.scala | Scala | gpl-2.0 | 1,380 |
package akka
import aims.routing.PatternMatcher
import aims.routing.Patterns._
import akka.http.model.Uri.Path
import akka.http.server.PathMatcher.{ Matched, _ }
import org.scalatest.FunSuite
/**
* Component:
* Description:
* Date: 15/1/13
* @author Andy Ai
*/
class HttpTest extends FunSuite {
test("http test") {
val matcher = "systems" / Segment / "applications" / IntNumber / "users"
matcher / IntNumber
m {
matcher.apply(Path("systems/1234/applications/4321/users/789"))
}
m {
ph("ping" / Segment) apply Path("/ping/name")
}
}
test("Path context 1") {
mp {
val pattern = "systems/:systemId/applications/#applicationId"
val matcher = PatternMatcher(pattern)
matcher.apply(Path("systems/system-2/applications/521"))
}
}
test("Path context 2") {
mp {
val matcher = PatternMatcher("systems" / Segment / "applications" / LongNumber)
matcher.apply(Path("systems/system-1/applications/520"))
}
}
test("Question maker") {
val matcher = Slash ~ "system" / IntNumber
m { matcher.apply(Path("/system")) }
m { matcher.apply(Path("/system/123")) }
m { matcher.apply(Path("system")) }
m { matcher.apply(Path("system/123")) }
}
def m(m: ⇒ {}) = {
m match {
case Matched(_, exts) ⇒ println(exts.getClass)
case _ ⇒ println("unmatched")
}
}
def mp(m: ⇒ {}) = {
m match {
case Some(s) ⇒ println(s)
case None ⇒ println("unmatched")
}
}
}
| aiyanbo/aims | aims-core/src/test/scala/akka/HttpTest.scala | Scala | mit | 1,533 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.