code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package org.orbeon.oxf.xml
import java.{util => ju}
import javax.xml.transform.{Source, SourceLocator}
import org.orbeon.oxf.util.{IndentedLogger, Logging, StaticXPath}
import org.orbeon.saxon.Configuration
import org.orbeon.saxon.expr._
import org.orbeon.saxon.functions.{FunctionLibrary, FunctionLibraryList}
import org.orbeon.saxon.om.{NamespaceResolver, StructuredQName}
import org.orbeon.saxon.sxpath.{AbstractStaticContext, XPathStaticContext}
import org.orbeon.saxon.trans.XPathException
import org.orbeon.saxon.value.{QNameValue, SequenceType}
import org.orbeon.xml.NamespaceMapping
import scala.jdk.CollectionConverters._
// Similar to Saxon JAXPXPathStaticContext. JAXPXPathStaticContext holds a reference to an XPathVariableResolver, which
// is not desirable as variable resolution occurs at runtime. So here instead we create a fully shareable context.
class ShareableXPathStaticContext(
config : Configuration,
namespaceMapping : NamespaceMapping,
functionLibrary : FunctionLibrary)(implicit
logger : IndentedLogger
) extends AbstractStaticContext
with XPathStaticContext
with Logging {
// This also creates an Executable
setConfiguration(config)
// Add function library
setDefaultFunctionLibrary()
getFunctionLibrary.asInstanceOf[FunctionLibraryList].libraryList.asInstanceOf[ju.List[FunctionLibrary]].add(0, functionLibrary)
// This should be unused as we handle global variables differently
private val stackFrameMap = config.makeSlotManager
def getStackFrameMap = stackFrameMap
// Return the names of global variables referenced by the expression after it has been parsed
private var boundVariables = Set.empty[StructuredQName]
def referencedVariables: Iterable[StructuredQName] = boundVariables
def declareVariable(qname: QNameValue) = throw new IllegalStateException // never used in Saxon
def declareVariable(namespaceURI: String, localName: String) = throw new IllegalStateException // shouldn't be called in our case
def bindVariable(qName: StructuredQName): VariableReference = {
// Q: Can this be called multiple time with the same name, and if so should we return the same VariableReference?
boundVariables += qName
new VariableReference(new VariableBinding(qName))
}
// Per Saxon: "used to represent the run-time properties and methods associated with a variable: specifically, a
// method to get the value of the variable".
class VariableBinding(qName: StructuredQName) extends Binding {
def isGlobal = true
def isAssignable = false
def getLocalSlotNumber = -1 // "If this is a local variable held on the local stack frame"
def getVariableQName = qName
def getRequiredType = SequenceType.ANY_SEQUENCE
// Saxon does something similar but different in XPathVariable, where it stores variables in the the dynamic
// context. That uses slots however, which means we cannot resolve variables fully dynamically. So I think
// our approach below is ok.
def evaluateVariable(context: XPathContext) = {
if (context.getController eq null)
throw new NullPointerException
val variableResolver =
context.getController.getUserData(
classOf[ShareableXPathStaticContext].getName,
"variableResolver"
).asInstanceOf[StaticXPath.VariableResolver]
variableResolver(qName, context)
}
}
// Namespace resolver
object NSResolver extends NamespaceResolver {
def getURIForPrefix(prefix: String, useDefault: Boolean) =
if (prefix == "") {
if (useDefault)
getDefaultElementNamespace
else
""
} else
namespaceMapping.mapping.getOrElse(prefix, null)
def iteratePrefixes =
namespaceMapping.mapping.keySet.iterator.asJava
}
def getURIForPrefix(prefix: String): String = {
val uri = NSResolver.getURIForPrefix(prefix, useDefault = false)
if (uri == null)
throw new XPathException("Prefix " + prefix + " has not been declared")
uri
}
def getNamespaceResolver = NSResolver
def setNamespaceResolver(resolver: NamespaceResolver) = throw new IllegalStateException
// Schema stuff which we don't support
def importSchema(source: Source) = getConfiguration.addSchemaSource(source, getConfiguration.getErrorListener)
def isImportedSchema(namespace: String) = getConfiguration.isSchemaAvailable(namespace)
def getImportedSchemaNamespaces = getConfiguration.getImportedNamespaces
override def issueWarning(s: String, locator: SourceLocator) =
if (logger ne null)
debug(s)
}
| orbeon/orbeon-forms | core-cross-platform/jvm/src/main/scala/org/orbeon/oxf/xml/ShareableXPathStaticContext.scala | Scala | lgpl-2.1 | 4,625 |
package example
object Lists {
/**
* This method computes the sum of all elements in the list xs. There are
* multiple techniques that can be used for implementing this method, and
* you will learn during the class.
*
* For this example assignment you can use the following methods in class
* `List`:
*
* - `xs.isEmpty: Boolean` returns `true` if the list `xs` is empty
* - `xs.head: Int` returns the head element of the list `xs`. If the list
* is empty an exception is thrown
* - `xs.tail: List[Int]` returns the tail of the list `xs`, i.e. the the
* list `xs` without its `head` element
*
* ''Hint:'' instead of writing a `for` or `while` loop, think of a recursive
* solution.
*
* @param xs A list of natural numbers
* @return The sum of all elements in `xs`
*/
def sum(xs: List[Int]): Int = {
if (xs.isEmpty) {
return 0
}
return xs.head + sum(xs.tail)
}
/**
* This method returns the largest element in a list of integers. If the
* list `xs` is empty it throws a `java.util.NoSuchElementException`.
*
* You can use the same methods of the class `List` as mentioned above.
*
* ''Hint:'' Again, think of a recursive solution instead of using looping
* constructs. You might need to define an auxiliary method.
*
* @param xs A list of natural numbers
* @return The largest element in `xs`
* @throws java.util.NoSuchElementException if `xs` is an empty list
*/
def max(xs: List[Int]): Int = {
if (xs.isEmpty) {
throw new java.util.NoSuchElementException
}
val tail = xs.tail
val head = xs.head
if (tail.isEmpty) {
return head
}
return head.max(max(tail))
}
}
| Gerula/Books | Courses/Functional_Programming_in_Scala/Test/example/src/main/scala/example/Lists.scala | Scala | isc | 1,780 |
package com.ibm.watson.developer_cloud.visual_recognition.v1.model
import com.ibm.watson.developer_cloud.service.GenericModel
/**
* Created by Martin Harvan (martin.harvan@sk.ibm.com) on 20/03/16.
*/
case class LabelSet(labelGroups: List[String], labels: List[String]) extends GenericModel {
def withLabel(label: String) : LabelSet = {
Option(labels) match {
case Some(l) => LabelSet(labelGroups, label :: labels)
case _ => LabelSet(labelGroups, List(label))
}
}
def withLabelGroup(labelGroup: String) : LabelSet = {
Option(labelGroups) match {
case Some(lg) => LabelSet(labelGroup :: labelGroups, labels)
case _ => LabelSet(List(labelGroup), labels)
}
}
}
| kane77/watson-scala-wrapper | src/main/scala/com/ibm/watson/developer_cloud/visual_recognition/v1/model/LabelSet.scala | Scala | apache-2.0 | 712 |
package org.scaladebugger.api.lowlevel.requests.filters.processors
import com.sun.jdi.request._
import org.scalamock.scalatest.MockFactory
import org.scalatest.{FunSpec, Matchers, ParallelTestExecution}
import org.scaladebugger.api.lowlevel.requests.filters.CountFilter
import org.scaladebugger.test.helpers.ParallelMockFunSpec
class CountFilterProcessorSpec extends ParallelMockFunSpec
{
private val testCount = 3
private val countFilter = CountFilter(count = testCount)
private val countProcessor = new CountFilterProcessor(countFilter)
describe("CountFilterProcessor") {
describe("#process") {
it("should add the count for all requests") {
val mockEventRequest = mock[EventRequest]
(mockEventRequest.addCountFilter _).expects(testCount)
countProcessor.process(mockEventRequest)
}
}
}
}
| chipsenkbeil/scala-debugger | scala-debugger-api/src/test/scala/org/scaladebugger/api/lowlevel/requests/filters/processors/CountFilterProcessorSpec.scala | Scala | apache-2.0 | 848 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package javaguide.http
import javaguide.application.`def`.ErrorHandler
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.mvc.Action
import play.api.test._
import scala.reflect.ClassTag
class JavaErrorHandling extends PlaySpecification with WsTestClient {
def fakeApp[A](implicit ct: ClassTag[A]) = {
GuiceApplicationBuilder()
.configure("play.http.errorHandler" -> ct.runtimeClass.getName)
.routes {
case (_, "/error") => Action(_ => throw new RuntimeException("foo"))
}
.build()
}
"java error handling" should {
"allow providing a custom error handler" in new WithServer(fakeApp[javaguide.application.root.ErrorHandler]) {
await(wsUrl("/error").get()).body must startWith("A server error occurred: ")
}
"allow providing a custom error handler" in new WithServer(fakeApp[ErrorHandler]) {
await(wsUrl("/error").get()).body must not startWith("A server error occurred: ")
}
}
}
| wsargent/playframework | documentation/manual/working/javaGuide/main/http/code/javaguide/http/JavaErrorHandling.scala | Scala | apache-2.0 | 1,052 |
package tables.seed
import java.text.SimpleDateFormat
import models.EntityBalance
/**
*
* Created by cravefm on 10/2/15.
*/
object InitBalances {
val dateFormat = new SimpleDateFormat("MM/dd/yyyy")
val list = List(
EntityBalance(
id = 1
, account = "CHQ"
, balance = 500000
, balanceDiff = -10000
, time = "5d9h"
)
,
EntityBalance(
id = 1
, account = "SAV"
, balance = 132551045
, balanceDiff = 1150000
, time = "1d0h"
)
, EntityBalance(
id = 1
, account = "VISA"
, balance = 499999
, balanceDiff = 29
, time = "15d0h"
)
, EntityBalance(
id = 1
, account = "MC"
, balance = 1500
, balanceDiff = -250000
, time = "45d0h"
)
)
}
| setrar/rbchackaton | backend/app/tables/seed/InitBalances.scala | Scala | apache-2.0 | 785 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.executionplan
import org.neo4j.cypher.internal.compiler.v2_3.{ExecutionMode, InternalNotificationLogger}
import org.neo4j.cypher.internal.compiler.v2_3.pipes._
import org.neo4j.cypher.internal.compiler.v2_3.spi.QueryContext
import org.neo4j.cypher.internal.frontend.v2_3.CypherException
trait ExecutionResultBuilder {
def setQueryContext(context: QueryContext)
def setLoadCsvPeriodicCommitObserver(batchRowCount: Long)
def setPipeDecorator(newDecorator: PipeDecorator)
def setExceptionDecorator(newDecorator: CypherException => CypherException)
def build(queryId: AnyRef, planType: ExecutionMode, params: Map[String, Any], notificationLogger: InternalNotificationLogger): InternalExecutionResult
}
trait ExecutionResultBuilderFactory {
def create(): ExecutionResultBuilder
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/executionplan/ExecutionResultBuilder.scala | Scala | apache-2.0 | 1,634 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fuberlin.wiwiss.silk.testutil
import org.scalatest.matchers.{MatchResult, BeMatcher}
/**
* Matcher to test if 2 values are approximately equal.
*/
case class approximatelyEqualTo(r: Double) extends BeMatcher[Double] {
val epsilon = 0.001
def apply(l: Double) =
MatchResult(
compare(l, r),
l + " is not approximately equal to " + r,
l + " is approximately equal to " + r
)
private def compare(l: Double, r: Double): Boolean = {
math.abs(l - r) < epsilon
}
} | fusepoolP3/p3-silk | silk-core/src/test/scala/de/fuberlin/wiwiss/silk/testutil/approximatelyEqualTo.scala | Scala | apache-2.0 | 1,076 |
import tw.com.ehanlin.mde.MongoEmbedder
class RootAndParentTest extends ReadmeExampleTest { def is = s2"""
RootAndParentTest $test
"""
def test = {
val dsl =
"""
|<
| @find (db=user coll=user query={ postal_code : @.code } projection={ _id : 0 , friends : 1, height : 1 })
| players
| [
| @count (db=user coll=user query={ height : { $gte : @.height } , postal_code : @../../.code })
| higher
|
| @count (db=user coll=user query={ height : { $gte : @.height } , postal_code : @.../.code })
| higherFriend
|
| @findOneById [db=user coll=user, projection={ _id : 0 , height : 1 }]
| friends
| [
| @count (db=user coll=user query={ height : { $gte : @.height } , postal_code : @../../../../.code })
| higher
|
| @count (db=user coll=user query={ height : { $gte : @.height } , postal_code : @.../.code })
| higherFriend
| ]
| ]
|>
""".stripMargin
val result = MongoEmbedder.instance.embed(MObj("code" -> Oid("557e56287a8ea2a9dfe2ef71")), dsl)
mustString(result,
"""{
| "code":{
| "$oid":"557e56287a8ea2a9dfe2ef71"
| },
| "players":[
| {
| "height":201,
| "friends":[
| {"height":220,"higherFriend":1,"higher":1},
| {"height":183,"higherFriend":4,"higher":4},
| {"height":208,"higherFriend":2,"higher":2}],
| "higherFriend":3,
| "higher":3
| },
| {
| "height":178,
| "friends":[
| {"height":183,"higherFriend":4,"higher":4}],
| "higherFriend":5,
| "higher":5
| },
| {
| "height":220,
| "friends":[
| {"height":201,"higherFriend":3,"higher":3},
| {"height":211,"higherFriend":2,"higher":2},
| {"height":218,"higherFriend":1,"higher":1},
| {"height":208,"higherFriend":2,"higher":2}],
| "higherFriend":1,
| "higher":1
| },
| {
| "height":211,
| "friends":[
| {"height":220,"higherFriend":1,"higher":1}],
| "higherFriend":2,
| "higher":2
| },
| {
| "height":183,
| "friends":[
| {"height":201,"higherFriend":3,"higher":3},
| {"height":178,"higherFriend":5,"higher":5}],
| "higherFriend":4,
| "higher":4
| }
| ]
|}""".stripMargin)
}
}
| eHanlin/mongodb-dbobject-embedder | src/test/scala/RootAndParentTest.scala | Scala | mit | 2,772 |
package maven2sbt.core
import cats.syntax.all._
import hedgehog._
import hedgehog.runner._
/**
* @author Kevin Lee
* @since 2020-09-05
*/
object BuildSbtSpec extends Properties {
override def tests: List[Test] = List(
property(
"[Render][Repository] test BuildSbt.renderListOfFieldValue(None, List.empty[Repository], n)",
RenderRepositorySpec.testRenderToResolvers0
),
property(
"[Render][Repository] test BuildSbt.renderListOfFieldValue(None, List(repository), n)",
RenderRepositorySpec.testRenderToResolvers1
),
property(
"[Render][Repository] test BuildSbt.renderListOfFieldValue(None, List(repository with empty name), n)",
RenderRepositorySpec.testRenderToResolvers1WithEmptyName
),
property(
"[Render][Repository] test BuildSbt.renderListOfFieldValue(None, List(repository with no name (None)), n)",
RenderRepositorySpec.testRenderToResolvers1WithNoName
),
property(
"[Render][Repository] test BuildSbt.renderListOfFieldValue(None, List(repository with empty id and empty name), n)",
RenderRepositorySpec.testRenderToResolvers1WithEmptyIdEmptyName
),
property(
"[Render][Repository] test BuildSbt.renderListOfFieldValue(None, List(repository with no id (None) and no name (None)), n)",
RenderRepositorySpec.testRenderToResolvers1WithNoIdNoName
),
property(
"[Render][Repository] test BuildSbt.renderListOfFieldValue(None, List(repository1, repository2, ...), n)",
RenderRepositorySpec.testRenderToResolversMany
),
property(
"[Render][Repository] test BuildSbt.renderListOfFieldValue(None, List(repository1, repository2, ... which may have empty names), n)",
RenderRepositorySpec.testRenderToResolversManyWithEmptyRepoNames
),
property(
"[Render][Repository] test BuildSbt.renderListOfFieldValue(None, List(repository1, repository2, ... which may have empty id and empty names), n)",
RenderRepositorySpec.testRenderToResolversManyWithEmptyRepoIdEmptyRepoNames
),
property(
"[Render][Repository] test BuildSbt.renderListOfFieldValue(None, List(repository1, repository2, ... which may have no names), n)",
RenderRepositorySpec.testRenderToResolversManyWithNoRepoNames
),
property(
"[Render][Repository] test BuildSbt.renderListOfFieldValue(None, List(repository1, repository2, ... which may have no id and no names), n)",
RenderRepositorySpec.testRenderToResolversManyWithNoRepoIdNoRepoNames
),
property(
"[Render][Dependency] test BuildSbt.renderListOfFieldValue(None, List.empty[Dependency], n)",
RenderDependencySpec.testRenderLibraryDependenciesEmpty
),
property(
"[Render][Dependency] test BuildSbt.renderListOfFieldValue(None, List(dependency), n)",
RenderDependencySpec.testRenderLibraryDependencies1
),
property(
"[Render][Dependency] test BuildSbt.renderListOfFieldValue(None, List(dependency1, dependency2, ...), n)",
RenderDependencySpec.testRenderLibraryDependenciesMany
)
)
object RenderRepositorySpec {
def testRenderToResolvers0: Property = for {
n <- Gen.int(Range.linear(0, 10)).log("n")
} yield {
val expected = none[String]
val propsName = Props.PropsName("testProps")
val actual = BuildSbt.renderListOfFieldValue(
none[String],
List.empty[Repository],
n
)(repo => Render[Repository].render(propsName, repo))
actual ==== expected
}
def testRenderToResolvers1: Property = for {
n <- Gen.int(Range.linear(0, 10)).log("n")
repository <- Gens.genRepository.log("repository")
} yield {
(repository.id, repository.name) match {
case (_, Some(repoName)) =>
val propsName = Props.PropsName("testProps")
val expected = s"""resolvers += "${repoName.value}" at "${repository.url.value}"""".some
val actual = BuildSbt.renderListOfFieldValue(
none[String],
List(repository),
n
)(repo => Render[Repository].render(propsName, repo))
actual ==== expected
case (Some(_), None) =>
Result.failure.log(
s"""> Repository generated by Gens.genRepository has no name.
|> If you see this message, it means there is a bug in Gens.genRepository.
|> repository: ${repository.show}
|""".stripMargin)
case (None, None) =>
Result.failure.log(
s"""> Repository generated by Gens.genRepository has neither id nor name.
|> If you see this message, it means there is a bug in Gens.genRepository.
|> repository: ${repository.show}
|""".stripMargin)
}
}
def testRenderToResolvers1WithEmptyName: Property = for {
n <- Gen.int(Range.linear(0, 10)).log("n")
repository <- Gens.genRepositoryWithEmptyName.log("repository")
} yield {
(repository.id, repository.name) match {
case (Some(repoId), Some(Repository.RepoName(""))) =>
val propsName = Props.PropsName("testProps")
val expected = s"""resolvers += "${repoId.value}" at "${repository.url.value}"""".some
val actual = BuildSbt.renderListOfFieldValue(
none[String],
List(repository),
n
)(repo => Render[Repository].render(propsName, repo))
actual ==== expected
case (repoId, repoNmae) =>
Result.failure.log(
s"""> Repository generated by Gens.genRepositoryWithEmptyName is supposed to have id and an empty name
|> but it has something else. repoId: ${repoId.map(_.show).show}, repoNmae: ${repoNmae.map(_.show).show}
|> If you see this message, it means there is a bug in Gens.genRepositoryWithEmptyName or using a wrong Gen.
|> repository: ${repository.show}
|""".stripMargin)
}
}
def testRenderToResolvers1WithNoName: Property = for {
n <- Gen.int(Range.linear(0, 10)).log("n")
repository <- Gens.genRepositoryWithNoName.log("repository")
} yield {
(repository.id, repository.name) match {
case (Some(repoId), None) =>
val propsName = Props.PropsName("testProps")
val expected = s"""resolvers += "${repoId.value}" at "${repository.url.value}"""".some
val actual = BuildSbt.renderListOfFieldValue(
none[String],
List(repository),
n
)(repo => Render[Repository].render(propsName, repo))
actual ==== expected
case (repoId, repoNmae) =>
Result.failure.log(
s"""> Repository generated by Gens.genRepositoryWithNoName is supposed to have id and no name (None)
|> but it has something else. repoId: ${repoId.map(_.show).show}, repoNmae: ${repoNmae.map(_.show).show}
|> If you see this message, it means there is a bug in Gens.genRepositoryWithNoName or using a wrong Gen.
|> repository: ${repository.show}
|""".stripMargin)
}
}
def testRenderToResolvers1WithEmptyIdEmptyName: Property = for {
n <- Gen.int(Range.linear(0, 10)).log("n")
repository <- Gens.genRepositoryWithEmptyIdEmptyName.log("repository")
} yield {
(repository.id, repository.name) match {
case (Some(Repository.RepoId("")), Some(Repository.RepoName(""))) =>
val propsName = Props.PropsName("testProps")
val expected = s"""resolvers += "${repository.url.value}" at "${repository.url.value}"""".some
val actual = BuildSbt.renderListOfFieldValue(
none[String],
List(repository),
n
)(repo => Render[Repository].render(propsName, repo))
actual ==== expected
case (repoId, repoNmae) =>
Result.failure.log(
s"""> Repository generated by Gens.genRepositoryWithEmptyIdEmptyName is supposed to have an empty id and an empty name
|> but it has something else. repoId: ${repoId.map(_.show).show}, repoNmae: ${repoNmae.map(_.show).show}
|> If you see this message, it means there is a bug in Gens.genRepositoryWithEmptyIdEmptyName or using a wrong Gen.
|> repository: ${repository.show}
|""".stripMargin)
}
}
def testRenderToResolvers1WithNoIdNoName: Property = for {
n <- Gen.int(Range.linear(0, 10)).log("n")
repository <- Gens.genRepositoryWithNoIdNoName.log("repository")
} yield {
(repository.id, repository.name) match {
case (None, None) =>
val propsName = Props.PropsName("testProps")
val expected = s"""resolvers += "${repository.url.value}" at "${repository.url.value}"""".some
val actual = BuildSbt.renderListOfFieldValue(
none[String],
List(repository),
n
)(repo => Render[Repository].render(propsName, repo))
actual ==== expected
case (repoId, repoNmae) =>
Result.failure.log(
s"""> Repository generated by Gens.genRepositoryWithNoIdNoName is supposed to have neither id nor name (None for both)
|> but it has something else. repoId: ${repoId.map(_.show).show}, repoNmae: ${repoNmae.map(_.show).show}
|> If you see this message, it means there is a bug in Gens.genRepositoryWithNoIdNoName or using a wrong Gen.
|> repository: ${repository.show}
|""".stripMargin)
}
}
def testRenderToResolversMany: Property = for {
n <- Gen.int(Range.linear(0, 10)).log("n")
repositories <- Gens.genRepository.list(Range.linear(2, 10)).log("repositories")
} yield {
val propsName = Props.PropsName("testProps")
val idt = StringUtils.indent(n)
val expected =
s"""resolvers ++= List(
|$idt${repositories.map(repo => Repository.render(propsName, repo).toQuotedString).stringsMkString(" ", s",\\n$idt ", "")}
|$idt)""".stripMargin.some
val actual = BuildSbt.renderListOfFieldValue(
none[String],
repositories,
n
)(repo => Render[Repository].render(propsName, repo))
actual ==== expected
}
def testRenderToResolversManyWithEmptyRepoNames: Property = for {
n <- Gen.int(Range.linear(0, 10)).log("n")
repositories <- Gens.genRepository.list(Range.linear(2, 10)).log("repositories")
repositoriesWithEmptyNames <- Gens.genRepositoryWithEmptyName.list(Range.linear(2, 10)).log("repositoriesWithEmptyNames")
} yield {
val propsName = Props.PropsName("testProps")
val idt = StringUtils.indent(n)
val expected =
s"""resolvers ++= List(
|$idt${repositories.map(repo => Repository.render(propsName, repo).toQuotedString).stringsMkString(" ", s",\\n$idt ", ",")}
|$idt${repositoriesWithEmptyNames.map(repo => Repository.render(propsName, repo).toQuotedString).stringsMkString(" ", s",\\n$idt ", "")}
|$idt)""".stripMargin.some
val input = repositories ++ repositoriesWithEmptyNames
val actual = BuildSbt.renderListOfFieldValue(
none[String],
input,
n
)(repo => Render[Repository].render(propsName, repo))
actual ==== expected
}
def testRenderToResolversManyWithEmptyRepoIdEmptyRepoNames: Property = for {
n <- Gen.int(Range.linear(0, 10)).log("n")
repositories <- Gens.genRepository.list(Range.linear(2, 10)).log("repositories")
repositoriesWithEmptyNames <- Gens.genRepositoryWithEmptyIdEmptyName.list(Range.linear(2, 10)).log("repositoriesWithEmptyNames")
} yield {
val propsName = Props.PropsName("testProps")
val idt = StringUtils.indent(n)
val expected =
s"""resolvers ++= List(
|$idt${repositories.map(repo => Repository.render(propsName, repo).toQuotedString).stringsMkString(" ", s",\\n$idt ", ",")}
|$idt${repositoriesWithEmptyNames.map(repo => Repository.render(propsName, repo).toQuotedString).stringsMkString(" ", s",\\n$idt ", "")}
|$idt)""".stripMargin.some
val input = repositories ++ repositoriesWithEmptyNames
val actual = BuildSbt.renderListOfFieldValue(
none[String],
input,
n
)(repo => Render[Repository].render(propsName, repo))
actual ==== expected
}
def testRenderToResolversManyWithNoRepoNames: Property = for {
n <- Gen.int(Range.linear(0, 10)).log("n")
repositories <- Gens.genRepository.list(Range.linear(2, 10)).log("repositories")
repositoriesWithEmptyNames <- Gens.genRepositoryWithNoName.list(Range.linear(2, 10)).log("repositoriesWithEmptyNames")
} yield {
val propsName = Props.PropsName("testProps")
val idt = StringUtils.indent(n)
val expected =
s"""resolvers ++= List(
|$idt${repositories.map(repo => Repository.render(propsName, repo).toQuotedString).stringsMkString(" ", s",\\n$idt ", ",")}
|$idt${repositoriesWithEmptyNames.map(repo => Repository.render(propsName, repo).toQuotedString).stringsMkString(" ", s",\\n$idt ", "")}
|$idt)""".stripMargin.some
val input = repositories ++ repositoriesWithEmptyNames
val actual = BuildSbt.renderListOfFieldValue(
none[String],
input,
n
)(repo => Render[Repository].render(propsName, repo))
actual ==== expected
}
def testRenderToResolversManyWithNoRepoIdNoRepoNames: Property = for {
n <- Gen.int(Range.linear(0, 10)).log("n")
repositories <- Gens.genRepository.list(Range.linear(2, 10)).log("repositories")
repositoriesWithEmptyNames <- Gens.genRepositoryWithNoIdNoName.list(Range.linear(2, 10)).log("repositoriesWithEmptyNames")
} yield {
val propsName = Props.PropsName("testProps")
val idt = StringUtils.indent(n)
val expected =
s"""resolvers ++= List(
|$idt${repositories.map(repo => Repository.render(propsName, repo).toQuotedString).stringsMkString(" ", s",\\n$idt ", ",")}
|$idt${repositoriesWithEmptyNames.map(repo => Repository.render(propsName, repo).toQuotedString).stringsMkString(" ", s",\\n$idt ", "")}
|$idt)""".stripMargin.some
val input = repositories ++ repositoriesWithEmptyNames
val actual = BuildSbt.renderListOfFieldValue(
none[String],
input,
n
)(repo => Render[Repository].render(propsName, repo))
actual ==== expected
}
}
object RenderDependencySpec {
def testRenderLibraryDependenciesEmpty: Property = for {
n <- Gen.int(Range.linear(0, 10)).log("n")
} yield {
val propsName = Props.PropsName("testProps")
val libsName = Libs.LibsName("testLibs")
val libs = Libs(List.empty[(Libs.LibValName, Dependency)])
val expected = none[String]
val actual = BuildSbt.renderListOfFieldValue(
none[String],
List.empty[Dependency],
n
)(dep => ReferencedRender[Dependency].render(propsName, libsName, libs, dep))
actual ==== expected
}
def testRenderLibraryDependencies1: Property = for {
n <- Gen.int(Range.linear(0, 10)).log("n")
dependency <- Gens.genDependency.log("dependency")
} yield {
val propsName = Props.PropsName("testProps")
val libsName = Libs.LibsName("testLibs")
val libs = Libs(List.empty[(Libs.LibValName, Dependency)])
val expected = s"libraryDependencies += ${Dependency.render(propsName, libsName, libs, dependency).toQuotedString}".some
val actual = BuildSbt.renderListOfFieldValue(
none[String],
List(dependency),
n
)(dep => ReferencedRender[Dependency].render(propsName, libsName, libs, dep))
actual ==== expected
}
def testRenderLibraryDependenciesMany: Property = for {
n <- Gen.int(Range.linear(0, 10)).log("n")
libraryDependencies <- Gens.genDependency.list(Range.linear(2, 10)).log("libraryDependencies")
} yield {
val propsName = Props.PropsName("testProps")
val libsName = Libs.LibsName("testLibs")
val libs = Libs(List.empty[(Libs.LibValName, Dependency)])
val idt = StringUtils.indent(n)
val expected =
s"""libraryDependencies ++= List(
|$idt${libraryDependencies
.map(dep => Dependency.render(propsName, libsName, libs, dep).toQuotedString)
.stringsMkString(" ", s",\\n$idt ", "")}
|$idt)""".stripMargin.some
val actual = BuildSbt.renderListOfFieldValue(
none[String],
libraryDependencies,
n
)(dep => ReferencedRender[Dependency].render(propsName, libsName, libs, dep))
actual ==== expected
}
}
}
| Kevin-Lee/maven2sbt | core/src/test/scala/maven2sbt/core/BuildSbtSpec.scala | Scala | mit | 16,953 |
package org.juanitodread.pitayafinch.nlp.tools.chunking
import scala.concurrent.Await
import scala.concurrent.duration._
import opennlp.tools.chunker.ChunkerME
import org.juanitodread.pitayafinch.model.nlp.chunking.Chunk
import org.juanitodread.pitayafinch.nlp.tools.models.chunking.{ ChunkerModel, ChunkerModelAsync }
class Chunker(model: ChunkerModel) {
def chunk(tokens: List[String], tags: List[String]): List[Chunk] = {
val chunker: ChunkerME = new ChunkerME(model.getNlpModel)
(for (span <- chunker.chunkAsSpans(tokens.toArray, tags.toArray)) yield {
Chunk(tokens.slice(span.getStart, span.getEnd).mkString(" "), span.getType)
}).toList
}
}
object Chunker {
private final val chunker = new Chunker(Await.result(ChunkerModelAsync(), 5.seconds))
def apply(tokens: List[String], tags: List[String]): List[Chunk] = {
chunker.chunk(tokens, tags)
}
}
| juanitodread/pitaya-finch | src/main/scala/org/juanitodread/pitayafinch/nlp/tools/chunking/Chunker.scala | Scala | apache-2.0 | 888 |
package io.iteratee
import cats.Eval
import io.iteratee.testing.{ EnumerateeSuite, IterateeSuite, StackSafeEnumeratorSuite }
import io.iteratee.tests.EvalSuite
class EvalEnumerateeTests extends EnumerateeSuite[Eval] with EvalSuite
class EvalEnumeratorTests extends StackSafeEnumeratorSuite[Eval] with EvalSuite {
"perform" should "perform an action" in forAll { (eav: EnumeratorAndValues[Int]) =>
var counter = 0
val action = perform[Int](Eval.always(counter += 1))
val enumerator = action.append(eav.enumerator).append(action)
assert(counter === 0)
assert(enumerator.toVector === Eval.now(eav.values))
assert(counter === 2)
}
"generateM" should "enumerate values generated by an effectful function" in forAll { (n: Short) =>
val count = math.abs(n.toInt)
var counter = 0
val enumerator = generateM(
Eval.always(
if (counter > count) None else Some {
val result = counter
counter += 1
result
}
)
)
assert(enumerator.toVector === Eval.now((0 to count).toVector))
assert(counter == count + 1)
}
"StackUnsafe.generateM" should "enumerate values generated by an effectful function" in forAll { (n: Short) =>
val count = math.abs(n.toInt)
var counter = 0
val enumerator = Enumerator.StackUnsafe.generateM(
Eval.always(
if (counter > count) None else Some {
val result = counter
counter += 1
result
}
)
)
assert(enumerator.toVector === Eval.now((0 to count).toVector))
assert(counter == count + 1)
}
}
class EvalIterateeTests extends IterateeSuite[Eval] with EvalSuite
| flyingwalrusllc/iteratee | tests/shared/src/test/scala/io/iteratee/EvalTests.scala | Scala | apache-2.0 | 1,672 |
package br.unb.cic.sma.sade.fipa
case class Agree(parameters: Map[ACLMessageParameter.ACLMessageParameter, Any]) extends ACLMessage(parameters: Map[ACLMessageParameter.ACLMessageParameter, Any]) {
override def getParameters = (parameters - ACLMessageParameter.PERFORMATIVE) + (ACLMessageParameter.PERFORMATIVE -> Performative.AGREE)
override def performative = Performative.AGREE
} | brenoxp/SADE | src/br/unb/cic/sma/sade/fipa/Agree.scala | Scala | mit | 388 |
package com.twitter.finatra.utils
import com.twitter.util.Memoize
object ClassUtils {
val simpleName = Memoize { clazz: Class[_] =>
clazz.getSimpleName
}
}
| syamantm/finatra | utils/src/main/scala/com/twitter/finatra/utils/ClassUtils.scala | Scala | apache-2.0 | 167 |
package com.twitter.finagle
import com.twitter.conversions.time._
import com.twitter.finagle.client.StringClient
import com.twitter.finagle.server.StringServer
import com.twitter.util._
import java.net.{InetAddress, InetSocketAddress}
import org.scalatest.FunSuite
class ResolutionRaceTest extends FunSuite {
private[this] val Echoer = Service.mk[String, String](Future.value)
/*
* Tries to trigger a race condition related to inet resolution -- it has been observed that
* the the load balancer may throw NoBrokersAvailableException if resolution is asynchronous.
*
* If this test fails intermittently, IT IS NOT FLAKY, it's broken.
* Or maybe its flakey in terms of port allocations.
*/
test("resolution raciness") {
val socketAddr = new InetSocketAddress(InetAddress.getLoopbackAddress, 0)
val server = StringServer.server.serve(socketAddr, Echoer)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val dest = s"asyncinet!localhost:${addr.getPort}"
try {
val phrase = s"[$dest]"
val echo = StringClient.client.newService(dest)
try {
val echoed = Await.result(echo(phrase), 5.seconds)
assert(echoed == phrase)
} finally Await.ready(echo.close(), 5.seconds)
} finally {
Await.result(server.close(), 5.seconds)
}
}
}
| mkhq/finagle | finagle-core/src/test/scala/com/twitter/finagle/ResolutionRaceTest.scala | Scala | apache-2.0 | 1,334 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sysml.api.ml
import org.apache.spark.rdd.RDD
import java.io.File
import org.apache.spark.SparkContext
import org.apache.spark.ml.{ Estimator, Model }
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.types.StructType
import org.apache.spark.ml.param.{ DoubleParam, Param, ParamMap, Params }
import org.apache.sysml.runtime.matrix.MatrixCharacteristics
import org.apache.sysml.runtime.matrix.data.MatrixBlock
import org.apache.sysml.runtime.DMLRuntimeException
import org.apache.sysml.runtime.instructions.spark.utils.{ RDDConverterUtilsExt => RDDConverterUtils }
import org.apache.sysml.api.mlcontext._
import org.apache.sysml.api.mlcontext.ScriptFactory._
object LogisticRegression {
final val scriptPath = "scripts" + File.separator + "algorithms" + File.separator + "MultiLogReg.dml"
}
/**
* Logistic Regression Scala API
*/
class LogisticRegression(override val uid: String, val sc: SparkContext)
extends Estimator[LogisticRegressionModel]
with HasIcpt
with HasRegParam
with HasTol
with HasMaxOuterIter
with HasMaxInnerIter
with BaseSystemMLClassifier {
def setIcpt(value: Int) = set(icpt, value)
def setMaxOuterIter(value: Int) = set(maxOuterIter, value)
def setMaxInnerIter(value: Int) = set(maxInnerIter, value)
def setRegParam(value: Double) = set(regParam, value)
def setTol(value: Double) = set(tol, value)
override def copy(extra: ParamMap): LogisticRegression = {
val that = new LogisticRegression(uid, sc)
copyValues(that, extra)
}
// Note: will update the y_mb as this will be called by Python mllearn
def fit(X_mb: MatrixBlock, y_mb: MatrixBlock): LogisticRegressionModel = {
mloutput = baseFit(X_mb, y_mb, sc)
new LogisticRegressionModel(this)
}
def fit(df: ScriptsUtils.SparkDataType): LogisticRegressionModel = {
mloutput = baseFit(df, sc)
new LogisticRegressionModel(this)
}
def getTrainingScript(isSingleNode: Boolean): (Script, String, String) = {
val script = dml(ScriptsUtils.getDMLScript(LogisticRegression.scriptPath))
.in("$X", " ")
.in("$Y", " ")
.in("$B", " ")
.in("$icpt", toDouble(getIcpt))
.in("$reg", toDouble(getRegParam))
.in("$tol", toDouble(getTol))
.in("$moi", toDouble(getMaxOuterIte))
.in("$mii", toDouble(getMaxInnerIter))
.out("B_out")
(script, "X", "Y_vec")
}
}
object LogisticRegressionModel {
final val scriptPath = "scripts" + File.separator + "algorithms" + File.separator + "GLM-predict.dml"
}
/**
* Logistic Regression Scala API
*/
class LogisticRegressionModel(override val uid: String)(estimator: LogisticRegression, val sc: SparkContext)
extends Model[LogisticRegressionModel]
with HasIcpt
with HasRegParam
with HasTol
with HasMaxOuterIter
with HasMaxInnerIter
with BaseSystemMLClassifierModel {
override def copy(extra: ParamMap): LogisticRegressionModel = {
val that = new LogisticRegressionModel(uid)(estimator, sc)
copyValues(that, extra)
}
var outputRawPredictions = true
def setOutputRawPredictions(outRawPred: Boolean): Unit = outputRawPredictions = outRawPred
def this(estimator: LogisticRegression) = {
this("model")(estimator, estimator.sc)
}
def getPredictionScript(isSingleNode: Boolean): (Script, String) =
PredictionUtils.getGLMPredictionScript(estimator.mloutput.getMatrix("B_out"), isSingleNode, 3)
def baseEstimator(): BaseSystemMLEstimator = estimator
def modelVariables(): List[String] = List[String]("B_out")
def transform(X: MatrixBlock): MatrixBlock = baseTransform(X, sc, "means")
def transform_probability(X: MatrixBlock): MatrixBlock = baseTransformProbability(X, sc, "means")
def transform(df: ScriptsUtils.SparkDataType): DataFrame = baseTransform(df, sc, "means")
}
/**
* Example code for Logistic Regression
*/
object LogisticRegressionExample {
import org.apache.spark.{ SparkConf, SparkContext }
import org.apache.spark.sql._
import org.apache.spark.sql.types._
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.feature.LabeledPoint
def main(args: Array[String]) = {
val sparkSession = SparkSession.builder().master("local").appName("TestLocal").getOrCreate();
val sc: SparkContext = sparkSession.sparkContext;
import sparkSession.implicits._
val training = sc.parallelize(
Seq(
LabeledPoint(1.0, Vectors.dense(1.0, 0.0, 3.0)),
LabeledPoint(1.0, Vectors.dense(1.0, 0.4, 2.1)),
LabeledPoint(2.0, Vectors.dense(1.2, 0.0, 3.5)),
LabeledPoint(1.0, Vectors.dense(1.0, 0.5, 2.2)),
LabeledPoint(2.0, Vectors.dense(1.6, 0.8, 3.6)),
LabeledPoint(1.0, Vectors.dense(1.0, 0.0, 2.3))
)
)
val lr = new LogisticRegression("log", sc)
val lrmodel = lr.fit(training.toDF)
// lrmodel.mloutput.getDF(sparkSession, "B_out").show()
val testing = sc.parallelize(
Seq(
LabeledPoint(1.0, Vectors.dense(1.0, 0.0, 3.0)),
LabeledPoint(1.0, Vectors.dense(1.0, 0.4, 2.1)),
LabeledPoint(2.0, Vectors.dense(1.2, 0.0, 3.5)),
LabeledPoint(1.0, Vectors.dense(1.0, 0.5, 2.2)),
LabeledPoint(2.0, Vectors.dense(1.6, 0.8, 3.6)),
LabeledPoint(1.0, Vectors.dense(1.0, 0.0, 2.3))
)
)
lrmodel.transform(testing.toDF).show
}
}
| asurve/incubator-systemml | src/main/scala/org/apache/sysml/api/ml/LogisticRegression.scala | Scala | apache-2.0 | 6,227 |
package sampleclean.clean
import org.apache.spark.{SparkConf, SparkContext}
import sampleclean.api.SampleCleanContext
/**
* Provides a method to run tests against a {@link SparkContext} variable that is correctly stopped
* after each test.
*/
trait LocalSCContext extends Serializable{
/** Runs `f` on a new SparkContext and ensures that it is stopped afterwards. */
def withSampleCleanContext[T](f: SampleCleanContext => T): T = {
val conf = new SparkConf()
.set("spark.driver.allowMultipleContexts","true")
val sc = new SparkContext("local[*]", "test", conf)
val scc = new SampleCleanContext(sc)
try {
f(scc)
} finally {
sc.stop()
}
}
def withSingleAttribute[T](sample:Int,f: SampleCleanContext => T): T = {
val conf = new SparkConf()
.set("spark.driver.allowMultipleContexts","true")
val sc = new SparkContext("local[*]", "test", conf)
val scc = new SampleCleanContext(sc)
val context = List("id", "col0")
val contextString = context.mkString(" String,") + " String"
val hiveContext = scc.getHiveContext()
scc.closeHiveSession()
scc.hql("DROP TABLE IF EXISTS test")
scc.hql("CREATE TABLE IF NOT EXISTS test(%s) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\\\n'".format(contextString))
scc.hql("LOAD DATA LOCAL INPATH './src/test/resources/csvJaccard100dupsAttr' OVERWRITE INTO TABLE test")
scc.initializeConsistent("test", "test_sample", "id", sample)
try {
f(scc)
} finally {
sc.stop()
}
}
def withFullRecords[T](sample:Double, f: SampleCleanContext => T): T = {
val conf = new SparkConf()
.set("spark.driver.allowMultipleContexts","true")
val sc = new SparkContext("local[*]", "test", conf)
val scc = new SampleCleanContext(sc)
val context = List("id") ++ (0 until 20).toList.map("col" + _.toString)
val contextString = context.mkString(" String,") + " String"
val hiveContext = scc.getHiveContext()
scc.closeHiveSession()
scc.hql("DROP TABLE IF EXISTS test")
scc.hql("CREATE TABLE IF NOT EXISTS test(%s) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\\\n'".format(contextString))
scc.hql("LOAD DATA LOCAL INPATH './src/test/resources/csvJaccard100dups' OVERWRITE INTO TABLE test")
scc.initializeConsistent("test", "test_sample", "id", sample)
try {
f(scc)
} finally {
sc.stop()
}
}
def withFullRecordsLarge[T](sample:Double, f: SampleCleanContext => T): T = {
val conf = new SparkConf()
.set("spark.driver.allowMultipleContexts","true")
val sc = new SparkContext("local[*]", "test", conf)
val scc = new SampleCleanContext(sc)
val context = List("id") ++ (0 until 20).toList.map("col" + _.toString)
val contextString = context.mkString(" String,") + " String"
val hiveContext = scc.getHiveContext()
scc.closeHiveSession()
scc.hql("DROP TABLE IF EXISTS test")
scc.hql("CREATE TABLE IF NOT EXISTS test(%s) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\\\n'".format(contextString))
scc.hql("LOAD DATA LOCAL INPATH './src/test/resources/csvJaccard100000dups' OVERWRITE INTO TABLE test")
scc.initializeConsistent("test", "test_sample", "id", sample)
try {
f(scc)
} finally {
sc.stop()
}
}
}
| sjyk/sampleclean-async | src/test/scala/sampleclean/clean/LocalSCContext.scala | Scala | apache-2.0 | 3,352 |
package ar.edu.unq.tpi.qsim.model
/**
* Copyright 2014 Tatiana Molinari.
* Copyright 2014 Susana Rosito
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
import java.util.Calendar
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.Map
import org.uqbar.commons.utils.Observable
import ar.edu.unq.tpi.qsim.utils.Util
import ar.edu.unq.tpi.qsim.exeptions._
import ar.edu.unq.tpi.qsim.parser._
@Observable
object Ciclo {
var fetch = true
var decode = false
var execute = false
var execute_complete = true
def ninguna_etapa() {
fetch = false
decode = false
execute = false
execute_complete = false
}
def pasarAFetch() {
fetch = true
execute = false
execute_complete = true
}
def pasarADecode() {
fetch = false
decode = true
execute_complete = false
}
def pasarAExecute() {
decode = false
execute = true
execute_complete = false
}
}
@Observable
case class Simulador() {
var ciclo = Ciclo
var mensaje_al_usuario = ""
var cpu: CPU = _
var busIO: BusEntradaSalida = _
var instruccionActual: Instruccion = _
var celdaInstruccionActual: ArrayBuffer[Celda] = ArrayBuffer[Celda]()
/**
* Inicializa el sumulador, crea la memoria y el CPU.
*/
def inicializarSim() {
println("--------INIT------")
cpu = CPU()
busIO = new BusEntradaSalida()
busIO.initialize()
agregarMensaje("******************INFORMACION*******************")
agregarMensaje("El programa compilado ha sido cargado en la memoria con exito")
}
/**
* Toma un programa y devuelve si tiene alguna etiqueta invalida
* @param Programa
* @return Boolean
*/
def etiquetasInvalidas(programa: Programa): Boolean = {
programa.instrucciones.exists(instr ⇒ instr match {
case inst_dp: Instruccion_DosOperandos ⇒ verificarOperandoEtiqueta(inst_dp.origen, programa)
case inst_up: Instruccion_UnOperando ⇒ verificarOperandoEtiqueta(inst_up.operando, programa)
case inst_sc: JUMP_condicional ⇒ verificarOperandoEtiqueta(inst_sc.desplazamiento.asInstanceOf[SaltoEtiqueta].etiqueta, programa)
case _ ⇒ false
})
}
/**
* Verifica deacuerdo al operando que le pasan si es una etiqueta y si es invalida
* @param Operando , Programa
* @return Boolean
*/
def verificarOperandoEtiqueta(operando: ModoDireccionamiento, programa: Programa): Boolean = {
var respuesta = false
if (operando.codigo.equals("111111")) {
respuesta = (!programa.etiquetas.contains(operando.representacionString()))
}
respuesta
}
/**
* Toma un pc de inicio (W16) y un programa y le asigna a cada instruccion una posicion
* @param W16, Programa
* @return Programa
*/
def asignarPosiciones(pc: W16, programa: Programa): Programa = {
var pcAsignar: W16 = pc
programa.instrucciones.foreach(inst ⇒ {
inst.position = pcAsignar
pcAsignar = pcAsignar.ss(inst.cantidadCeldas())
})
programa
}
/**
* Calcula de acuerdo al operando que le pasan el valor de la etiqueta
* @param Operando , Programa
* @return W16
*/
def calcularValorOrigenEtiqueta(instruccion: Instruccion_DosOperandos, programa: Programa) = {
var origen = instruccion.origen
if (instruccion.origen.codigo.equals("111111")) {
origen = new Inmediato(programa.etiquetas(instruccion.origen.representacionString).position)
}
origen
}
/**
* Calcula de acuerdo al operando que le pasan el valor de la etiqueta
* @param Operando , Programa
* @return W16
*/
def calcularValorOperandoEtiqueta(instruccion: Instruccion_UnOperando, programa: Programa) = {
var operando = instruccion.operando
if (instruccion.operando.codigo.equals("111111")) {
operando = new Inmediato(programa.etiquetas(instruccion.operando.representacionString).position)
}
operando
}
/**
* Calcula de acuerdo al operando que le pasan el valor de la etiqueta
* @param Operando , Programa
* @return W16
*/
def calcularValorSaltoEtiqueta(instruccion: JUMP_condicional, programa: Programa) = {
var resultado = instruccion.desplazamiento.salto
if (instruccion.desplazamiento.asInstanceOf[SaltoEtiqueta].etiqueta.codigo.equals("111111")) {
instruccion.position.++
var posicionActual = instruccion.position
var posicionASaltar = programa.etiquetas(instruccion.desplazamiento.asInstanceOf[SaltoEtiqueta].etiqueta.representacionString).position
resultado = posicionASaltar.value - posicionActual.value
}
if (resultado >= -128 & resultado <= 127) {
Util.binaryToInteger(Util.intToCa2_8B(resultado))
} else {
throw new DesplazamientoSaltoInvalidoException("Revisar los saltos utilizados, uno desplazamiento sobrepasa el limite permitido.")
}
}
def calcularEtiquetas(programa: Programa): Programa = {
programa.instrucciones.foreach(inst ⇒ {
inst match {
case inst_dp: Instruccion_DosOperandos ⇒ inst_dp.origen = calcularValorOrigenEtiqueta(inst_dp, programa)
case inst_up: Instruccion_UnOperando ⇒ inst_up.operando = calcularValorOperandoEtiqueta(inst_up, programa)
case inst_sc: JUMP_condicional ⇒ inst_sc.desplazamiento.salto = calcularValorSaltoEtiqueta(inst_sc, programa)
case inst ⇒
}
})
programa
}
/**
* Carga el programa en memoria, a partir de un pc hexadecimal (String) y los registros que recibe dentro de un map
* @param Programa, String, Map[String,W16]
*/
def cargarProgramaYRegistros(programa: Programa, pc: String, registros: Map[String, W16]) {
var pcInicial = new W16(pc)
cpu.cargarPc(pc)
cpu.actualizarRegistros(registros)
if (!(etiquetasInvalidas(programa))) {
var programaConPosiciones = asignarPosiciones(pcInicial, programa)
var programaSinEtiquetas = calcularEtiquetas(programa)
busIO.memoria.cargarPrograma(programaSinEtiquetas, pc)
ciclo.ninguna_etapa
ciclo.pasarAFetch
} else {
throw new EtiquetaInvalidaException("Una de las etiquetas utilizadas es invalida")
println("ERROR ------- ETIQUETAS INVALIDAS -----NO SE CARGA EN MEMORIA!! ")
}
}
/**
* Obtiene la proxima instruccion en representacion binaria. Toma tres celdas (ya que es el maximo que pueda ocupar una instruccion), si en la
* memoria no quedan tantas, toma las que quedan nada mas. De no haber ninguna lanza una exepcion.
* @throws CeldaFueraDeMemoriaExeption
* @return String
*/
def obtenerProximaInstruccionBinario(): String =
{
var int_pc = cpu.pc.value
var celdas_binario = busIO.memoria.getValor(int_pc).toBinary
try { Util.rep(2) { celdas_binario += busIO.memoria.getValor(int_pc + 1).toBinary; int_pc = int_pc + 1 } }
catch {
case cfme: CeldaFueraDeMemoriaException ⇒ celdas_binario
}
celdas_binario
}
/**
* Simula el fech de la instruccion. La obtiene de memoria segun marque el pc, la ensambla y aumenta el pc.
*
*/
def fetch() {
println("----------FETCH ---------")
println("Valor del Pc: " + cpu.pc.toString())
cambiarEstadoCeldasInstruccionActual(State.EXECUTED)
val cadena_binaria = obtenerProximaInstruccionBinario()
instruccionActual = Interprete.interpretarInstruccion(cadena_binaria)
val instruccion_fech = instruccionActual.representacionHexadecimal()
println("------Trajo la instruccion a Ejecutar que apunta pc :" + instruccion_fech)
cpu.ir = instruccion_fech
agregarMensaje("La intruccion actual ocupa: " + instruccionActual.cantidadCeldas().toString)
celdaInstruccionActual = obtenerCeldasInstruccionActual()
cambiarEstadoCeldasInstruccionActual(State.FECH_DECODE)
cpu.incrementarPc(instruccionActual.cantidadCeldas())
ciclo.pasarADecode
println("Cual es el valor de Pc luego del Fetch: " + cpu.pc)
}
def obtenerCeldasInstruccionActual(): ArrayBuffer[Celda] =
{
busIO.memoria.getCeldas(cpu.pc.value, instruccionActual.cantidadCeldas())
}
def cambiarEstadoCeldasInstruccionActual(estado: State.Type) {
celdaInstruccionActual.foreach(celda ⇒
celda.state = estado)
}
/**
* Simula el decode de la instruccion. Simplemente muestra lo que ensamblo.
*
*/
def decode() = {
println("----------DECODE------------")
agregarMensaje("Se decodifico la instruccion : " + (instruccionActual.toString))
println(mensaje_al_usuario)
ciclo.pasarAExecute
(instruccionActual.toString)
}
/**
* Obtiene el valor alojado en el modo de direccionamiento que es pasado por parametro
* @param ModoDireccionamiento
* @return W16
*/
def obtenerValor(modoDir: ModoDireccionamiento): W16 = modoDir match {
case Directo(inmediato: Inmediato) ⇒ busIO.getValor(inmediato.getValorString())
case Indirecto(directo: Directo) ⇒ busIO.getValor(obtenerValor(directo))
case RegistroIndirecto(registro: Registro) ⇒ busIO.getValor(obtenerValor(registro))
case _ ⇒ modoDir.getValor
}
/**
* Delega en la ALU la ejecucion de una instruccion matematica. Recibe el resutlado y lo guarda en
* el primer operando.
*
*/
def execute_instruccion_matematica(): W16 = {
println("--------INSTRUCCION PARA ALU------")
var resultado = Map[String, Any]()
instruccionActual match {
case ADD(op1, op2) ⇒ resultado = ALU.execute_add(obtenerValor(op1), obtenerValor(op2))
case MUL(op1, op2) ⇒ {
var result_mult = ALU.execute_mul(obtenerValor(op1), obtenerValor(op2))
cpu.actualizarR7(result_mult)
resultado = result_mult
}
case DIV(op1, op2) ⇒ resultado = ALU.execute_div(obtenerValor(op1), obtenerValor(op2))
case SUB(op1, op2) ⇒ resultado = ALU.execute_sub(obtenerValor(op1), obtenerValor(op2))
}
cpu.actualizarFlags(resultado)
resultado("resultado").asInstanceOf[W16]
}
/**
* Simula el fetch decode and execute. Ejecuta todas las etapas de ciclo de instruccion a la ves,
* de la instruccion actual anteriormente ensamblada.
*/
def execute_complete() {
fetch()
decode()
execute()
}
/**
* Simula el execute. Ejecuta la instruccion actual anteriormente ensamblada.
*/
def execute() {
println("-------------EXECUTE---------")
instruccionActual match {
case RET() ⇒ executeRet()
case CALL(op1) ⇒ executeCall(obtenerValor(op1))
case JMP(op1) ⇒ executeJMP(obtenerValor(op1))
case PUSH(op1) ⇒ executePUSH(obtenerValor(op1))
case POP(op1) ⇒ executePOP(op1)
case NOT(op1) ⇒ store(op1, ALU.NOT(obtenerValor(op1)))
case JE(salto) ⇒ executeJMPCondicional(salto, ALU.interpretarBit(cpu.z))
case JNE(salto) ⇒ executeJMPCondicional(salto, ALU.interpretarBit(ALU.NOT(cpu.z)))
case JLE(salto) ⇒ executeJMPCondicional(salto, ALU.interpretarBit(ALU.OR(cpu.z, ALU.XOR(cpu.n, cpu.v))))
case JG(salto) ⇒ executeJMPCondicional(salto, ALU.interpretarBit(ALU.NOT(ALU.OR(cpu.z, ALU.XOR(cpu.n, cpu.v)))))
case JL(salto) ⇒ executeJMPCondicional(salto, ALU.interpretarBit(ALU.XOR(cpu.n, cpu.v)))
case JGE(salto) ⇒ executeJMPCondicional(salto, ALU.interpretarBit(ALU.NOT(ALU.XOR(cpu.n, cpu.v))))
case JLEU(salto) ⇒ executeJMPCondicional(salto, ALU.interpretarBit(ALU.OR(cpu.c, cpu.z)))
case JGU(salto) ⇒ executeJMPCondicional(salto, ALU.interpretarBit(ALU.NOT(ALU.OR(cpu.c, cpu.z))))
case JCS(salto) ⇒ executeJMPCondicional(salto, ALU.interpretarBit(cpu.c))
case JNEG(salto) ⇒ executeJMPCondicional(salto, ALU.interpretarBit(cpu.n))
case JVS(salto) ⇒ executeJMPCondicional(salto, ALU.interpretarBit(cpu.v))
case CMP(op1, op2) ⇒ executeCmp(obtenerValor(op1), obtenerValor(op2))
case MOV(op1, op2) ⇒ store(op1, obtenerValor(op2))
case AND(op1, op2) ⇒ {
var mapa = ALU.AND(obtenerValor(op1), obtenerValor(op2))
cpu.actualizarFlags(mapa)
store(op1, mapa("resultado").asInstanceOf[W16])
}
case OR(op1, op2) ⇒ {
var mapa = ALU.OR(obtenerValor(op1), obtenerValor(op2))
cpu.actualizarFlags(mapa)
store(op1, mapa("resultado").asInstanceOf[W16])
}
case iOp2: Instruccion_DosOperandos ⇒ store(iOp2.destino, execute_instruccion_matematica())
}
ciclo.pasarAFetch
println("Ejecuta la instruccion!!!")
}
/**
* Simula el store. Recibe un valor que es guardado en el modo de direccionamiento enviado por parametro.
* @param ModoDireccionamento, W16
*/
def store(modoDir: ModoDireccionamiento, un_valor: W16) {
var direccion: Int = 0
modoDir match {
case Inmediato(valor: W16) ⇒ { ciclo.pasarAFetch(); throw new ModoDeDireccionamientoInvalidoException("Un Inmediato no puede ser un operando destino."); }
case Directo(inmediato: Inmediato) ⇒ { direccion = inmediato.getValor().value; busIO.setValorC(direccion, un_valor); busIO.setStateCelda(direccion, State.STORE); }
case Indirecto(directo: Directo) ⇒ { direccion = obtenerValor(directo).value; busIO.setValorC(direccion, un_valor); busIO.setStateCelda(direccion, State.STORE); }
case RegistroIndirecto(registro: Registro) ⇒ { direccion = obtenerValor(registro).value; busIO.setValorC(direccion, un_valor); busIO.setStateCelda(direccion, State.STORE); }
case r: Registro ⇒
r.valor = un_valor
println(s"Se guarda el resutado $un_valor en " + modoDir.toString)
agregarMensaje(s"Se guardado el resutado $un_valor en " + modoDir.toString)
}
}
/**
* Ejecuta la instruccion RET. Aumenta el stack pointer (sp) y setea en el pc el valor que este tenia al momento del CALL previo.
*
*/
def executeRet() {
if (cpu.sp.value >= busIO.memoria.tamanioMemoria - 1) {
throw new StackPointerExeption("Error, Estado de Pila Vacia.")
}
cpu.sp.++
cpu.pc.:=(busIO.getValor(cpu.sp).toString)
}
/**
* Delega en la ALU la ejecucion del CMP y luego actualiza los flags.
*/
def executeCmp(op1: W16, op2: W16) {
var resultados = ALU.execute_cmp(op1, op2)
cpu.actualizarFlags(resultados)
}
/**
* Ejecuta el JMP, es decir, cambia el valor del pc por el que recibe por parametro que es el que tiene el JMP.
* @param W16
*/
def executeJMP(valor: W16) = cpu.pc.:=(valor.hex)
/**
* Ejecuta el JUMP condicional. Recibe el valor de la condicion y si este es verdadero, incrementa el pc segun lo indique el salto.
* @param Salto, Boolean
*/
def executeJMPCondicional(salto: Salto, condicion: Boolean) {
if (condicion) {
var desplazamiento = sacarSaltoCA2(salto.salto)
cpu.incrementarPc(desplazamiento)
}
}
def sacarSaltoCA2(salto: Int) = {
var saltoCa2 = salto
if (salto > 127) {
saltoCa2 = (-1) * Util.binaryToInteger(Util.representarNumeroEnCA2(salto))
}
saltoCa2
}
/**
* Ejecuta el CALL. Guarda el pc segund donde aputa el stack pointer (sp), decrementa el stack pointer y
* pone en el pc el valor que tiene el CALL para llamar a la subrutina correspondiente.
* @param W16
*/
def executeCall(valor: W16) {
val prepararValorsp = new W16(cpu.sp.toString)
val prepararValorpc = new W16(cpu.pc.toString)
store(Directo(Inmediato(prepararValorsp)), prepararValorpc)
if (cpu.sp.value == 65520) {
val nuevo_sp = (busIO.memoria.tamanioMemoria - 1)
cpu.sp.:=(Util.toHex4(nuevo_sp))
} else { cpu.sp.-- }
cpu.pc.:=(valor.hex)
}
/**
* Ejecuta el PUSH.
* @param W16
*/
def executePUSH(valor: W16) {
busIO.memoria.setValor(cpu.sp.toString, valor)
cpu.sp.--
}
/**
* Ejecuta el POP aumenta el stack pointer (sp), y guarda en el modo de direccionamiento
* recibido por parametro el valor que se encuetra en el en esa celda a la que apunta el sp.
* @param W16
*/
def executePOP(modoDir: ModoDireccionamiento) {
cpu.sp.++
store(modoDir, busIO.memoria.getValor(cpu.sp.toString))
}
def obtenerHora(): String = {
val hoy = Calendar.getInstance().getTime()
"[" + hoy.getDate().toString() + "/" + hoy.getMonth().toString + " " + hoy.getHours().toString + ":" + hoy.getMinutes().toString + "]"
}
def agregarMensaje(mensaje: String) {
mensaje_al_usuario = mensaje_al_usuario + obtenerHora + " " + mensaje + "\\n"
}
}
object tt extends App {
//var l = ArrayBuffer[Int]()
//l.+=(1)
//l.+=(2)
//println(l)
// var programa = Parser.parse("""
//MOV R5, 0x0001
//MOV R2, 0xFFE0
//ADD R2, R5""", Parser.programQ5).get
// var sim = Simulador()
// sim.inicializarSim()
// sim.busIO.memoria.cargarPrograma(programa,"0000")
} | molinarirosito/QSim | src/main/scala/ar/edu/unq/tpi/qsim/model/Simulador.scala | Scala | gpl-3.0 | 17,259 |
def isEmptyF: Boolean =
fold(true, _ => false)
| grzegorzbalcerek/scala-exercises | Optioon/stepOptioonIsEmptyF.scala | Scala | bsd-2-clause | 49 |
package daos.doobie
import daos.UserDao
import javax.inject.Inject
import models.{ Role, User }
import doobie.imports._
import play.api.db.Database
import play.api.Logger
import daos.doobie.DoobieImports._
import scala.util.Try
class UserDaoDoobie @Inject() (
db: Database
) extends UserDao {
import UserDaoDoobie._
/**
* Crea un transactor (por llamado). En teoria db deberia usar hikari,
* de manera que deberia funcionar bien
*/
// private[this] implicit def xa() = DataSourceTransactor[IOLite](db.dataSource)
private[this] implicit def xa() = DoobieTransactor.transactor(db)
def byId(id: Long): Option[User] = {
Logger.debug(s"Consulta de usuario por id: $id")
userIdQuery(id).option.transact(xa()).unsafePerformIO
}
def byLogin(login: String): Option[User] = {
Logger.debug(s"Consulta de usuario por login: $login")
userLoginQuery(login).option.transact(xa()).unsafePerformIO
}
def updateConnected(login: String): Unit = {
val updated = setConnected(login).run.transact(xa()).unsafePerformIO
Logger.debug(s"$updated usuarios marcados como conectados")
}
def usuariosInternos(): List[User] = {
val usuarios = qUsersByRole("interno").list.transact(xa()).unsafePerformIO
Logger.debug(s"Consulta de usuarios internos: ${usuarios.size} usuarios")
usuarios
}
def crearUsuario(login: String, clave: String, salt: Int): Unit = {
qCrearUsuario(login, clave, salt, "interno").run.transact(xa()).unsafePerformIO
}
def actualizarClave(idUsuario: Long, nuevaClave: String, nuevoSalt: Int): Unit = {
val up = qCambiarClave(idUsuario, nuevaClave, nuevoSalt)
.run.transact(xa()).unsafePerformIO
Logger.debug(s"Cambio de clave de usuario $idUsuario (updated=$up)")
}
def byLoginWithRole(login: String): Option[(User, Role)] = {
Logger.debug(s"Consulta de usuario y rol por login $login")
qUserRole(login).option.transact(xa()).unsafePerformIO
}
}
/** Los queries, para poder chequearlos */
object UserDaoDoobie {
private lazy val userFrag = fr"Select id, login, password, salt, role_id, connected, last_activity, cambio_clave "
// Query para conseguir Option[User] por id
def userIdQuery(id: Long): Query0[User] =
(userFrag ++ fr""" from users where id=$id """).query[User]
def userLoginQuery(login: String): Query0[User] =
(userFrag ++ fr""" from users where login=$login """).query[User]
def setConnected(login: String): Update0 =
sql"""update users set connected=true, last_activity=now()
where login = $login""".update
def qUsersByRole(rolename: String) = sql"""select u.*
from users u join roles r on u.role_id = r.id
where r.name = $rolename""".query[User]
def qCrearUsuario(
login: String,
clave: String,
salt: Int,
rolename: String
) =
sql"""INSERT INTO users(login, password, role_id, salt)
VALUES($login, $clave, (select id from roles where "name" = $rolename), $salt);""".update
def qCambiarClave(idUsuario: Long, nuevaClave: String, nuevoSalt: Int) =
sql"""UPDATE users set password=$nuevaClave, salt=$nuevoSalt where id = $idUsuario""".update
def qUserRole(login: String) = sql"""select u.id, u.login, u.password, u.salt, u.role_id, u.connected, u.last_activity, u.cambio_clave,
r.id, r.name
from users u join roles r on u.role_id = r.id
where login = $login""".query[(User, Role)]
}
| kdoomsday/kaminalapp | app/daos/doobie/UserDaoDoobie.scala | Scala | mit | 3,603 |
/*
* Copyright (c) 2014 Paul Bernard
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Spectrum Finance is based in part on:
* QuantLib. http://quantlib.org/
*
*/
package org.quantintel.ql.termstructures
import org.quantintel.ql.time.{Frequency, Date}
import org.quantintel.ql.time.daycounters.DayCounter
import org.quantintel.ql.termstructures.Compounding._
import org.quantintel.ql.time.Frequency._
/**
* Encapsulates interest rate compounding algebra. Invoke day count convention,
* compounding conventions, conversions between different conventions, discount &
* compound factor calculations, and implied and/or equivalent rate calculations.
* @author Paul Bernard
*/
class InterestRate(var rate: Double) {
var dc: DayCounter = null
var compound: Compounding = null
var freqMakesSense : Boolean = false
var freq: Int = 0
/**
* The default constructor returns a 0.0 interest rate
*/
def this () {
this(0.0)
}
/**
* Constructor
*
* @param r the rate
* @param dc the DayCounter
* @param comp the Compounding method
* @param freq the Frequency
*/
def this(r: Double, dc: DayCounter, comp: Compounding, freq: Frequency) {
this(r)
this.dc = dc
this.compound = comp
this.freqMakesSense = false
if (this.compound == COMPOUNDED || this.compound == SIMPLE_THEN_COMPOUNDED) {
freqMakesSense = true
this.freq = freq.id
}
}
def this(r: Double, dc: DayCounter, comp: Compounding) {
this(r, dc, comp, ANNUAL)
}
def this(r: Double, dc: DayCounter) {
this(r, dc, CONTINUOUS)
}
/**
*
* @param time Time must be measured using the InterestRate's own day counter
* @return the compound (also known as: capitalization) factor implied by the rate compound at time t.
*/
def compoundFactor(time: Double) : Double = {
val t: Double = time
val r: Double = rate
if(compound == SIMPLE){
1.0 + r * t
} else if (compound == COMPOUNDED){
math.pow(1 + r /freq, freq * t)
} else if (compound == CONTINUOUS){
math.exp(r * t)
} else if (compound == SIMPLE_THEN_COMPOUNDED){
if (t < 1 / freq.asInstanceOf[Double]){
1.0 + r * t
} else {
math.pow(1 + r / freq, freq * t)
}
}
else throw new Exception("unknown compounding convention")
}
def compoundFactor(d1: Date, d2: Date) : Double = {
compoundFactor(d1, d2, new Date, new Date)
}
def compoundFactor(d1: Date, d2: Date, refStart: Date, refEnd: Date) : Double = {
val t: Double = dc.yearFraction(d1, d2, refStart, refEnd)
compoundFactor(t)
}
def dayCounter : DayCounter = this.dc
def compounding: Compounding = this.compound
def frequency: Frequency = if (freqMakesSense) Frequency.valueOf(this.freq) else NO_FREQUENCY
/**
*
* @param t time must be measured using the InterestRate's own day Calendar
* @return discount factor implied by the reate compounded at time t
*/
def discountFactor (t: Double) : Double = {
val factor: Double = compoundFactor(t)
1.0d / factor
}
/**
* discount and or compound factor calculations
*
* @param d1 start date
* @param d2 end date
* @return discount factor implied by the reate compounded between two dates
*/
def discountFactor (d1: Date, d2: Date) : Double = discountFactor(d1, d2, new Date())
/**
* discount and or compound factor calculations
*
* @param d1 start date
* @param d2 end date
* @param refStart reference start date
* @return discount factor
*/
def discountFactor (d1: Date, d2: Date, refStart: Date) : Double = {
discountFactor(d1, d2, refStart, new Date())
}
def discountFactor(d1: Date, d2: Date, refStart: Date, refEnd: Date) : Double = {
val t: Double = this.dc.yearFraction(d1, d2, refStart, refEnd)
discountFactor(t)
}
def equivalentRate(t: Double, comp: Compounding): InterestRate = {
equivalentRate(t, comp, ANNUAL)
}
def equivalentRate(t: Double, comp: Compounding, freq: Frequency) : InterestRate = {
InterestRate.impliedRate(compoundFactor(t), t, this.dc, comp, freq)
}
def equivalentRate(d1: Date, d2: Date, resultDC: DayCounter, comp: Compounding) : InterestRate = {
equivalentRate(d1, d2, resultDC, comp, ANNUAL)
}
def equivalentRate(d1: Date, d2: Date, resultDC: DayCounter, comp: Compounding, freq: Frequency) : InterestRate = {
val t1: Double = this.dc.yearFraction(d1, d2)
val t2: Double = resultDC.yearFraction(d1, d2)
InterestRate.impliedRate(compoundFactor(t1), t2, resultDC, comp, freq)
}
override def toString : String = {
if (rate == 0.0) return "null interest rate"
val sb : java.lang.StringBuilder = new java.lang.StringBuilder()
sb.append(rate).append(" ").append(dc).append(" ")
if (compound == SIMPLE) {
sb.append("simple compounding")
} else if (compound == COMPOUNDED) {
if((freq == NO_FREQUENCY.id) || freq == ONCE.id) {
throw new Exception(" frequency not allowed for this interest rate")
} else {
sb.append(freq + " compounding")
}
} else if (compound == CONTINUOUS) {
sb.append("continuous compounding")
} else if (compound == SIMPLE_THEN_COMPOUNDED) {
if ((freq == NO_FREQUENCY.id) || (freq == ONCE.id)) {
throw new Exception(freq + " frequency not allowed for this interest rate")
} else {
sb.append("simple compounding up to " + (12 / freq) + " months, then " + freq + " compounding")
}
} else {
throw new Exception("unknown compounding convention")
}
sb.toString
}
}
object InterestRate {
def impliedRate(c: Double, time: Double, resultDC: DayCounter, comp: Compounding, freq: Frequency) : InterestRate = {
val t : Double = time
val f : Double = freq.asInstanceOf[Int]
var rate : Double = 0.0
comp match {
case SIMPLE => rate = (c -1) / t
case COMPOUNDED => rate = (math.pow(c, 1 / (f * t)) -1 ) * f
case CONTINUOUS => math.log(c) / t
case SIMPLE_THEN_COMPOUNDED => if (t < (1 / f)) {
rate = (c -1) /t
} else {
rate = (math.pow(c, 1 / (f * t)) - 1) * f
}
case _ => throw new Exception("unknown compounding convention")
}
new InterestRate(rate, resultDC, comp, freq)
}
def impliedRate(compound: Double , t: Double, resultDC: DayCounter, comp: Compounding) : InterestRate = {
impliedRate(compound, t, resultDC, comp, ANNUAL)
}
def impliedRate(compound: Double, d1: Date, d2: Date, resultDC: DayCounter, comp: Compounding) : InterestRate = {
impliedRate(compound, d1, d2, resultDC, comp, ANNUAL)
}
def impliedRate(compound: Double, d1: Date, d2: Date, resultDC: DayCounter, comp: Compounding, freq: Frequency) :
InterestRate = {
val t: Double = resultDC.yearFraction(d1, d2)
impliedRate(compound, t, resultDC, comp, freq)
}
}
| quantintel/spectrum | financial/src/main/scala/org/quantintel/ql/termstructures/InterestRate.scala | Scala | apache-2.0 | 7,447 |
package org.openurp.edu.eams.teach.election.service.rule
trait ElectBuildInPrepare
| openurp/edu-eams-webapp | election/src/main/scala/org/openurp/edu/eams/teach/election/service/rule/ElectBuildInPrepare.scala | Scala | gpl-3.0 | 87 |
package com.typesafe.sbt.packager.docker
import org.scalatest._
class DockerVersionSpec extends FlatSpec with DiagrammedAssertions {
"DockerVersion" should "parse 18.09.2" in {
val v = DockerVersion.parse("18.09.2")
assert(v == Some(DockerVersion(18, 9, 2, None)))
}
it should "parse 18.06.1-ce" in {
val v = DockerVersion.parse("18.06.1-ce")
assert(v == Some(DockerVersion(18, 6, 1, Some("ce"))))
}
it should "parse 18.03.1-ee-8" in {
val v = DockerVersion.parse("18.03.1-ee-8")
assert(v == Some(DockerVersion(18, 3, 1, Some("ee-8"))))
}
it should "parse 18.09.ee.2-1.el7.rhel" in {
val v = DockerVersion.parse("18.09.ee.2-1.el7.rhel")
assert(v == Some(DockerVersion(18, 9, 0, Some("ee.2-1.el7.rhel"))))
}
it should "parse 17.05.0~ce-0ubuntu-xenial" in {
val v = DockerVersion.parse("17.05.0~ce-0ubuntu-xenial")
assert(v == Some(DockerVersion(17, 5, 0, Some("ce-0ubuntu-xenial"))))
}
}
| sbt/sbt-native-packager | src/test/scala/com/typesafe/sbt/packager/docker/DockerVersionSpec.scala | Scala | bsd-2-clause | 953 |
package lila.simul
package actorApi
import lila.socket.SocketMember
import lila.user.User
import lila.game.Game
private[simul] case class Member(
channel: JsChannel,
userId: Option[String],
troll: Boolean) extends SocketMember
private[simul] object Member {
def apply(channel: JsChannel, user: Option[User]): Member = Member(
channel = channel,
userId = user map (_.id),
troll = user.??(_.troll))
}
private[simul] case class Messadata(trollish: Boolean = false)
private[simul] case class Join(
uid: String,
user: Option[User])
private[simul] case class Talk(tourId: String, u: String, t: String, troll: Boolean)
private[simul] case class StartGame(game: Game, hostId: String)
private[simul] case class StartSimul(firstGame: Game, hostId: String)
private[simul] case class HostIsOn(gameId: String)
private[simul] case object Reload
private[simul] case object Aborted
private[simul] case class Connected(enumerator: JsEnumerator, member: Member)
private[simul] case object NotifyCrowd
case class SimulTable(simuls: List[Simul])
| JimmyMow/lila | modules/simul/src/main/actorApi.scala | Scala | mit | 1,057 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.lang.management.ManagementFactory
import java.nio.ByteBuffer
import java.util.Properties
import scala.language.existentials
import org.apache.spark._
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.shuffle.ShuffleWriter
/**
* A ShuffleMapTask divides the elements of an RDD into multiple buckets (based on a partitioner
* specified in the ShuffleDependency).
*
* See [[org.apache.spark.scheduler.Task]] for more information.
*
* @param stageId id of the stage this task belongs to
* @param stageAttemptId attempt id of the stage this task belongs to
* @param taskBinary broadcast version of the RDD and the ShuffleDependency. Once deserialized,
* the type should be (RDD[_], ShuffleDependency[_, _, _]).
* @param partition partition of the RDD this task is associated with
* @param locs preferred task execution locations for locality scheduling
* @param localProperties copy of thread-local properties set by the user on the driver side.
* @param serializedTaskMetrics a `TaskMetrics` that is created and serialized on the driver side
* and sent to executor side.
*
* The parameters below are optional:
* @param jobId id of the job this task belongs to
* @param appId id of the app this task belongs to
* @param appAttemptId attempt id of the app this task belongs to
* @param isBarrier whether this task belongs to a barrier stage. Spark must launch all the tasks
* at the same time for a barrier stage.
*/
private[spark] class ShuffleMapTask(
stageId: Int,
stageAttemptId: Int,
taskBinary: Broadcast[Array[Byte]],
partition: Partition,
@transient private var locs: Seq[TaskLocation],
localProperties: Properties,
serializedTaskMetrics: Array[Byte],
jobId: Option[Int] = None,
appId: Option[String] = None,
appAttemptId: Option[String] = None,
isBarrier: Boolean = false)
extends Task[MapStatus](stageId, stageAttemptId, partition.index, localProperties,
serializedTaskMetrics, jobId, appId, appAttemptId, isBarrier)
with Logging {
/** A constructor used only in test suites. This does not require passing in an RDD. */
def this(partitionId: Int) {
this(0, 0, null, new Partition { override def index: Int = 0 }, null, new Properties, null)
}
@transient private val preferredLocs: Seq[TaskLocation] = {
if (locs == null) Nil else locs.toSet.toSeq
}
override def runTask(context: TaskContext): MapStatus = {
// Deserialize the RDD using the broadcast variable.
val threadMXBean = ManagementFactory.getThreadMXBean
val deserializeStartTimeNs = System.nanoTime()
val deserializeStartCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
threadMXBean.getCurrentThreadCpuTime
} else 0L
val ser = SparkEnv.get.closureSerializer.newInstance()
val (rdd, dep) = ser.deserialize[(RDD[_], ShuffleDependency[_, _, _])](
ByteBuffer.wrap(taskBinary.value), Thread.currentThread.getContextClassLoader)
_executorDeserializeTimeNs = System.nanoTime() - deserializeStartTimeNs
_executorDeserializeCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
threadMXBean.getCurrentThreadCpuTime - deserializeStartCpuTime
} else 0L
dep.shuffleWriterProcessor.write(rdd, dep, partitionId, context, partition)
}
override def preferredLocations: Seq[TaskLocation] = preferredLocs
override def toString: String = "ShuffleMapTask(%d, %d)".format(stageId, partitionId)
}
| WindCanDie/spark | core/src/main/scala/org/apache/spark/scheduler/ShuffleMapTask.scala | Scala | apache-2.0 | 4,435 |
object SeeSaw {
def s(x: BigInt, y: BigInt, z: BigInt): BigInt = {
require(y >= 0)
if (x >= 100) {
y
} else if (x <= z) { //some condition
s(x + 1, y + 2, z)
} else if (x <= z + 9) { //some condition
s(x + 1, y + 3, z)
} else {
s(x + 2, y + 1, z)
}
} ensuring (res => (100 - x <= 2 * res))
} | regb/leon | testcases/orb-testcases/numerical/see-saw.scala | Scala | gpl-3.0 | 344 |
package org.jetbrains.plugins.scala
package lang
package psi
package types
import com.intellij.psi._
import org.jetbrains.plugins.scala.editor.documentationProvider.ScalaDocumentationProvider
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.base.ScFieldId
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.{ScBindingPattern, ScReferencePattern}
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScParameter, ScTypeParam}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypedDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTypeDefinition}
import org.jetbrains.plugins.scala.lang.psi.types.api._
import org.jetbrains.plugins.scala.lang.psi.types.api.designator.{ScDesignatorType, ScProjectionType, ScThisType}
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.{ScMethodType, ScTypePolymorphicType}
import org.jetbrains.plugins.scala.lang.refactoring.util.ScTypeUtil
import scala.annotation.tailrec
import scala.collection.mutable.ArrayBuffer
object ScTypePresentation extends api.ScTypePresentation {
override implicit lazy val typeSystem = ScalaTypeSystem
protected override def typeText(t: ScType, nameFun: PsiNamedElement => String, nameWithPointFun: PsiNamedElement => String): String = {
def typeSeqText(ts: Seq[ScType], start: String, sep: String, end: String, checkWildcard: Boolean = false): String = {
ts.map(innerTypeText(_, needDotType = true, checkWildcard = checkWildcard)).mkString(start, sep, end)
}
def typeTail(need: Boolean) = if (need) ".type" else ""
def existentialArgWithBounds(wildcard: ScExistentialArgument, argText: String): String = {
val lowerBoundText = if (wildcard.lower != Nothing) " >: " + innerTypeText(wildcard.lower) else ""
val upperBoundText = if (wildcard.upper != Any) " <: " + innerTypeText(wildcard.upper) else ""
s"$argText$lowerBoundText$upperBoundText"
}
def typeParamText(param: ScTypeParam, subst: ScSubstitutor): String = {
def typeText0(tp: ScType) = typeText(subst.subst(tp), nameFun, nameWithPointFun)
val buffer = new StringBuilder
if (param.isContravariant) buffer ++= "-"
else if (param.isCovariant) buffer ++= "+"
buffer ++= param.name
param.lowerBound foreach {
case Nothing =>
case tp: ScType => buffer ++= s" >: ${typeText0(tp)}"
}
param.upperBound foreach {
case Any =>
case tp: ScType => buffer ++= s" <: ${typeText0(tp)}"
}
param.viewBound foreach {
(tp: ScType) => buffer ++= s" <% ${typeText0(tp)}"
}
param.contextBound foreach {
(tp: ScType) =>
buffer ++= s" : ${typeText0(ScTypeUtil.stripTypeArgs(subst.subst(tp)))}"
}
buffer.toString()
}
def projectionTypeText(projType: ScProjectionType, needDotType: Boolean): String = {
val ScProjectionType(p, _, _) = projType
val e = projType.actualElement
val refName = e.name
def checkIfStable(e: PsiElement): Boolean = {
e match {
case _: ScObject | _: ScBindingPattern | _: ScParameter | _: ScFieldId => true
case _ => false
}
}
val typeTailForProjection = typeTail(checkIfStable(e) && needDotType)
def isInnerStaticJavaClassForParent(clazz: PsiClass): Boolean = {
clazz.getLanguage != ScalaFileType.SCALA_LANGUAGE &&
e.isInstanceOf[PsiModifierListOwner] &&
e.asInstanceOf[PsiModifierListOwner].getModifierList.hasModifierProperty("static")
}
p match {
case ScDesignatorType(pack: PsiPackage) =>
nameWithPointFun(pack) + refName
case ScDesignatorType(named) if checkIfStable(named) =>
nameWithPointFun(named) + refName + typeTailForProjection
case ScThisType(obj: ScObject) =>
nameWithPointFun(obj) + refName + typeTailForProjection
case ScThisType(td: ScTypeDefinition) if checkIfStable(e) =>
s"${innerTypeText(p, needDotType = false)}.$refName$typeTailForProjection"
case p: ScProjectionType if checkIfStable(p.actualElement) =>
s"${projectionTypeText(p, needDotType = false)}.$refName$typeTailForProjection"
case ScDesignatorType(clazz: PsiClass) if isInnerStaticJavaClassForParent(clazz) =>
nameWithPointFun(clazz) + refName
case ParameterizedType(ScDesignatorType(clazz: PsiClass), _) if isInnerStaticJavaClassForParent(clazz) =>
nameWithPointFun(clazz) + refName
case _: ScCompoundType | _: ScExistentialType =>
s"(${innerTypeText(p)})#$refName"
case _ =>
val innerText = innerTypeText(p)
if (innerText.endsWith(".type")) innerText.stripSuffix("type") + refName
else s"$innerText#$refName"
}
}
def compoundTypeText(compType: ScCompoundType): String = {
val ScCompoundType(comps, signatureMap, typeMap) = compType
def typeText0(tp: ScType) = innerTypeText(tp)
val componentsText = if (comps.isEmpty) Nil else Seq(comps.map {
case tp@FunctionType(_, _) => "(" + innerTypeText(tp) + ")"
case tp => innerTypeText(tp)
}.mkString(" with "))
val declsTexts = (signatureMap ++ typeMap).flatMap {
case (s: Signature, rt: ScType) if s.namedElement.isInstanceOf[ScFunction] =>
val fun = s.namedElement.asInstanceOf[ScFunction]
val funCopy =
ScFunction.getCompoundCopy(s.substitutedTypes.map(_.map(_()).toList), s.typeParams.toList, rt, fun)
val paramClauses = funCopy.paramClauses.clauses.map(_.parameters.map(param =>
ScalaDocumentationProvider.parseParameter(param)(typeText0)).mkString("(", ", ", ")")).mkString("")
val retType = if (!compType.equiv(rt)) typeText0(rt) else "this.type"
val typeParams = if (funCopy.typeParameters.nonEmpty)
funCopy.typeParameters.map(typeParamText(_, ScSubstitutor.empty)).mkString("[", ", ", "]")
else ""
Seq(s"def ${s.name}$typeParams$paramClauses: $retType")
case (s: Signature, rt: ScType) if s.namedElement.isInstanceOf[ScTypedDefinition] =>
if (s.paramLength.sum > 0) Seq.empty
else {
s.namedElement match {
case bp: ScBindingPattern =>
val b = ScBindingPattern.getCompoundCopy(rt, bp)
Seq((if (b.isVar) "var " else "val ") + b.name + " : " + typeText0(rt))
case fi: ScFieldId =>
val f = ScFieldId.getCompoundCopy(rt, fi)
Seq((if (f.isVar) "var " else "val ") + f.name + " : " + typeText0(rt))
case _ => Seq.empty
}
}
case (s: String, sign: TypeAliasSignature) =>
val ta = ScTypeAlias.getCompoundCopy(sign, sign.ta)
val paramsText = if (ta.typeParameters.nonEmpty)
ta.typeParameters.map(typeParamText(_, ScSubstitutor.empty)).mkString("[", ", ", "]")
else ""
val decl = s"type ${ta.name}$paramsText"
val defnText = ta match {
case tad: ScTypeAliasDefinition =>
tad.aliasedType.map {
case Nothing => ""
case tpe => s" = ${typeText0(tpe)}"
}.getOrElse("")
case _ =>
val (lowerBound, upperBound) = (ta.lowerBound.getOrNothing, ta.upperBound.getOrAny)
val lowerText = if (lowerBound == Nothing) "" else s" >: ${typeText0(lowerBound)}"
val upperText = if (upperBound == Any) "" else s" <: ${typeText0(upperBound)}"
lowerText + upperText
}
Seq(decl + defnText)
case _ => Seq.empty
}
val refinementText = if (declsTexts.isEmpty) Nil else Seq(declsTexts.mkString("{", "; ", "}"))
(componentsText ++ refinementText).mkString(" ")
}
@tailrec
def existentialTypeText(existType: ScExistentialType, checkWildcard: Boolean, stable: Boolean): String = {
existType match {
case ScExistentialType(q, wilds) if checkWildcard && wilds.length == 1 =>
q match {
case ScExistentialArgument(name, _, _, _) if name == wilds.head.name =>
existentialArgWithBounds(wilds.head, "_")
case _ =>
existentialTypeText(existType, checkWildcard = false, stable)
}
case ex@ScExistentialType(ParameterizedType(des, typeArgs), wilds) =>
val wildcardsMap = ex.wildcardsMap()
val replacingArgs = new ArrayBuffer[(ScType, ScExistentialArgument)]()
val left = wilds.filter {
case arg: ScExistentialArgument =>
val seq = wildcardsMap.getOrElse(arg, Seq.empty)
if (seq.length == 1 && typeArgs.exists(_ eq seq.head)) {
replacingArgs += ((seq.head, arg))
false
} else true
}
val designatorText = innerTypeText(des)
val typeArgsText = typeArgs.map {t =>
replacingArgs.find(_._1 eq t) match {
case Some((_, wildcard)) => existentialArgWithBounds(wildcard, "_")
case _ => innerTypeText(t, needDotType = true, checkWildcard)
}
}.mkString("[", ", ", "]")
val existentialArgsText = left.map(arg => existentialArgWithBounds(arg, "type " + arg.name)).mkString("{", "; ", "}")
if (left.isEmpty) s"$designatorText$typeArgsText"
else s"($designatorText$typeArgsText) forSome $existentialArgsText"
case ScExistentialType(q, wilds) =>
val wildsWithBounds = wilds.map(w => existentialArgWithBounds(w, "type " + w.name))
wildsWithBounds.mkString(s"(${innerTypeText(q)}) forSome {", "; ", "}")
}
}
def innerTypeText(t: ScType, needDotType: Boolean = true, checkWildcard: Boolean = false): String = {
t match {
case namedType: NamedType => namedType.name
case ScAbstractType(tpt, lower, upper) => tpt.name.capitalize + api.ScTypePresentation.ABSTRACT_TYPE_POSTFIX
case f@FunctionType(ret, params) if t.isAliasType.isEmpty =>
val projectOption = f.extractClass().map(_.getProject)
val arrow = projectOption.map(ScalaPsiUtil.functionArrow).getOrElse("=>")
typeSeqText(params, "(", ", ", s") $arrow ") + innerTypeText(ret)
case ScThisType(clazz: ScTypeDefinition) =>
clazz.name + ".this" + typeTail(needDotType)
case ScThisType(clazz) =>
"this" + typeTail(needDotType)
case TupleType(Seq(tpe)) =>
s"Tuple1[${innerTypeText(tpe)}]"
case TupleType(comps) =>
typeSeqText(comps, "(",", ",")")
case ScDesignatorType(e@(_: ScObject | _: ScReferencePattern | _: ScParameter)) =>
nameFun(e) + typeTail(needDotType)
case ScDesignatorType(e) =>
nameFun(e)
case proj: ScProjectionType if proj != null =>
projectionTypeText(proj, needDotType)
case ParameterizedType(des, typeArgs) =>
innerTypeText(des) + typeSeqText(typeArgs, "[", ", ", "]", checkWildcard = true)
case JavaArrayType(argument) => s"Array[${innerTypeText(argument)}]"
case UndefinedType(tpt, _) => "NotInfered" + tpt.name
case c: ScCompoundType if c != null =>
compoundTypeText(c)
case ex: ScExistentialType if ex != null =>
existentialTypeText(ex, checkWildcard, needDotType)
case ScTypePolymorphicType(internalType, typeParameters) =>
typeParameters.map(tp => {
val lowerBound = if (tp.lowerType.v.equiv(Nothing)) "" else " >: " + tp.lowerType.v.toString
val upperBound = if (tp.upperType.v.equiv(Any)) "" else " <: " + tp.upperType.v.toString
tp.name + lowerBound + upperBound
}).mkString("[", ", ", "] ") + internalType.toString
case mt@ScMethodType(retType, params, isImplicit) =>
innerTypeText(FunctionType(retType, params.map(_.paramType))(mt.project, mt.scope), needDotType)
case _ => ""//todo
}
}
innerTypeText(t)
}
}
| whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/types/ScTypePresentation.scala | Scala | apache-2.0 | 12,220 |
package com.xantoria.flippy.condition
import java.net.InetAddress
import com.xantoria.flippy.utils.Net
object Networking {
class IPRange(val addr: InetAddress, val prefix: Int) extends Condition {
override def appliesTo(value: Any): Boolean = {
value.isInstanceOf[String] &&
Net.addressInRange(InetAddress.getByName(value.asInstanceOf[String]), addr, prefix)
}
}
object IPRange {
def apply(s: String): IPRange = {
val (addr, prefix) = Net.interpretRange(s)
new IPRange(addr, prefix)
}
}
}
| giftig/flippy | core/src/main/scala/com/xantoria/flippy/condition/Networking.scala | Scala | mit | 541 |
package edu.gemini.pot.sp.validator
import edu.gemini.pot.sp.{SPNodeKey, ISPNode, ISPProgram, ISPContainerNode}
import java.awt.{AWTEvent, EventQueue}
import scala.util.Try
object EventCache {
private var previousEvent: Option[AWTEvent] = None
private val cache: collection.mutable.Map[(ISPContainerNode, Option[SPNodeKey]), TypeTree] = collection.mutable.Map()
def tree(prog: ISPContainerNode, contextKey: Option[SPNodeKey])(a: => TypeTree): TypeTree =
Option(Try(EventQueue.getCurrentEvent).toOption.orNull) match {
case None => a
case o =>
if (o != previousEvent) {
previousEvent = o
cache.clear()
}
cache.getOrElseUpdate((prog, contextKey), a)
}
}
object Validator {
// def canAdd(prog: ISPProgram, newNode: ISPNode, parent: ISPNode): Boolean =
// canAdd(prog, Array(newNode), parent)
//
// def canAdd(prog: ISPProgram, newNodes: Array[ISPNode], parent: ISPNode): Boolean = {
// def deepCheck: Boolean = {
// val tt = EventCache.tree(prog)(TypeTree(prog))
// val ins = (tt/:newNodes) { (typeTree, newNode) =>
// typeTree.insert(parent.getNodeKey, TypeTree(newNode).withoutKeys)
// }
// validate(ins).isRight
// }
//
// val parentType = NodeType.forNode(parent)
// val shallowCheck = newNodes.forall { nn =>
// parentType.cardinalityOf(NodeType.forNode(nn)).toInt > 0
// }
//
// shallowCheck && deepCheck
// }
// The 'context' node, if provided, is used to trim the type tree down to the
// minimum required for an accurate validation. In particular, irrelevant
// observations and groups are removed. Irrelevant in the sense that they do
// not contain the context node and are not in the path from the root down to
// the context node.
def canAdd(prog: ISPProgram, newNodes: Array[ISPNode], parent: ISPNode, context: Option[ISPNode]): Boolean = {
def deepCheck: Boolean = {
val nodeKey = context.flatMap(c => Option(c.getNodeKey))
val tt = EventCache.tree(prog, nodeKey) {
nodeKey.flatMap(k => TypeTree.validationTree(prog, k)).getOrElse(TypeTree(prog))
}
val ins = (tt/:newNodes) { (typeTree, newNode) =>
typeTree.insert(parent.getNodeKey, TypeTree(newNode).withoutKeys)
}
validate(ins).isRight
}
val parentType = NodeType.forNode(parent)
val shallowCheck = newNodes.forall { nn =>
parentType.cardinalityOf(NodeType.forNode(nn)).toInt > 0
}
shallowCheck && deepCheck
}
/** Validate the given type tree, which must be of a container node type. */
def validate(tree: TypeTree): Either[Violation, Constraint] =
for {
// RCN: this is very expensive and is just a sanity check, so we're turning it off for now
// _ <- validateKeysAreUnique(tree).right
c <- Constraint.forType(tree.node).toRight(CardinalityViolation(tree.node, tree.key, null)).right // TODO: FIX
c <- validate(c, tree).right
} yield c
/** Validate the given container node. */
def validate(n: ISPContainerNode): Either[Violation, Constraint] =
validate(EventCache.tree(n, None)(TypeTree(n)))
// /** Check for duplicate keys. We can do this in a single pass. */
// private def validateKeysAreUnique(tree: TypeTree): Either[Violation, Set[SPNodeKey]] = {
//
// @tailrec // breadth-first accumulation of keys
// def accum(ks: Set[SPNodeKey], q: Queue[TypeTree]): Either[Violation, Set[SPNodeKey]] =
// q.headOption match {
// case None => Right(ks)
// case Some(TypeTree(_, None, children)) => accum(ks, q.tail ++ children)
// case Some(t@TypeTree(_, Some(k), children)) =>
// if (ks.contains(k))
// Left(DuplicateKeyViolation(k))
// else
// accum(ks + k, q.tail ++ children)
// }
//
// accum(Set(), Queue(tree))
//
// }
// TODO: bind to the event
private var previousEvent: Option[AWTEvent] = None
private val cache = collection.mutable.Map[(Constraint, TypeTree), Either[Violation, Constraint]]()
private def validate(c: Constraint, tree: TypeTree): Either[Violation, Constraint] = {
lazy val a = validate1(c, tree)
Option(Try(EventQueue.getCurrentEvent).toOption.orNull) match {
case None => a
case o =>
if (o != previousEvent) {
previousEvent = o
cache.clear()
}
cache.getOrElseUpdate((c, tree), a)
}
}
/** Validate the given tree with the specified constraint. */
private def validate1(c: Constraint, tree: TypeTree): Either[Violation, Constraint] = {
// N.B. these differ from standard folds in subtle ways. Use caution if you want to
// replace with something from scalaz, for example.
// Flat validation across a single ply
def flat(c: Constraint, ns: List[TypeTree]): Either[Violation, Constraint] =
((Right(c): Either[Violation, Constraint]) /: ns) {
case (v@Left(_), _) => v
case (Right(c0), n) => c0(n.node, n.key)
}
// Deep validation
def deep(c: Constraint, ns: List[TypeTree]): Either[Violation, Constraint] =
ns match {
case n :: ns0 if n.children.nonEmpty =>
c.childConstraint(n.node) match {
case Some(ConflictConstraint) => deep(c, ns0)
case Some(cc) => validate(cc, n) match {
case v@Left(_) => v
case Right(c0) => deep(if (c.returns) c0 else c, ns0)
}
// There is no child constraint because this kind of node can't have children.
case None => Left(CardinalityViolation(n.node, n.key, c))
}
case _ :: ns0 => deep(c, ns0)
case Nil => Right(c)
}
def existence(c: Constraint, ns: List[TypeTree]): Either[Violation, Constraint] =
if (c.requiredTypes.forall(t => ns.exists(_.node.ct == t))) Right(c)
else Left(CardinalityViolation(tree.node, tree.key, c))
// Validate the children in a shallow manner first, to generate the new
// constraints that we use to validate the children
val ns = tree.children
for {
c1 <- flat(c, ns).right
_ <- deep(c1, ns).right
_ <- existence(c1, ns).right
} yield c1
}
} | arturog8m/ocs | bundle/edu.gemini.pot/src/main/scala/edu/gemini/pot/sp/validator/Validator.scala | Scala | bsd-3-clause | 6,187 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package router.resources
import config.AppConfig
import javax.inject.Inject
import play.api.libs.json.JsValue
import play.api.mvc.{Action, AnyContent, ControllerComponents}
import router.services.CharitableGivingService
import uk.gov.hmrc.auth.core.AuthConnector
import scala.concurrent.ExecutionContext
class CharitableGivingResource @Inject()(service: CharitableGivingService,
val cc: ControllerComponents,
val authConnector: AuthConnector)
(implicit ec: ExecutionContext, appConfig: AppConfig) extends BaseResource(cc, authConnector) {
def put(param:Any*): Action[JsValue] = AuthAction.async(parse.json) {
implicit request =>
withJsonBody[JsValue]{
service.put(_).map {
case Left(error) => buildErrorResponse(error)
case Right(apiResponse) => buildResponse(apiResponse)
}
}
}
def get(param:Any*): Action[AnyContent] =
AuthAction.async {
implicit request =>
service.get().map{
case Left(error) => buildErrorResponse(error)
case Right(apiResponse) => buildResponse(apiResponse)
}
}
}
| hmrc/self-assessment-api | app/router/resources/CharitableGivingResource.scala | Scala | apache-2.0 | 1,826 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.cassandra.utils
import java.nio.ByteBuffer
import java.util.concurrent._
import com.datastax.driver.core._
import org.locationtech.geomesa.cassandra.data.CassandraQueryPlan
import org.locationtech.geomesa.index.utils.AbstractBatchScan
import org.locationtech.geomesa.index.utils.ThreadManagement.{LowLevelScanner, ManagedScan, Timeout}
import org.locationtech.geomesa.utils.collection.CloseableIterator
import org.opengis.filter.Filter
private class CassandraBatchScan(session: Session, ranges: Seq[Statement], threads: Int, buffer: Int)
extends AbstractBatchScan[Statement, Row](ranges, threads, buffer, CassandraBatchScan.Sentinel) {
override protected def scan(range: Statement, out: BlockingQueue[Row]): Unit = {
val results = session.execute(range).iterator()
while (results.hasNext) {
out.put(results.next)
}
}
}
object CassandraBatchScan {
private val Sentinel: Row = new AbstractGettableData(ProtocolVersion.NEWEST_SUPPORTED) with Row {
override def getIndexOf(name: String): Int = -1
override def getColumnDefinitions: ColumnDefinitions = null
override def getToken(i: Int): Token = null
override def getToken(name: String): Token = null
override def getPartitionKeyToken: Token = null
override def getType(i: Int): DataType = null
override def getValue(i: Int): ByteBuffer = null
override def getName(i: Int): String = null
override def getCodecRegistry: CodecRegistry = null
}
def apply(
plan: CassandraQueryPlan,
session: Session,
ranges: Seq[Statement],
threads: Int,
timeout: Option[Timeout]): CloseableIterator[Row] = {
val scanner = new CassandraBatchScan(session, ranges, threads, 100000)
timeout match {
case None => scanner.start()
case Some(t) => new ManagedScanIterator(t, new CassandraScanner(scanner), plan)
}
}
private class ManagedScanIterator(
override val timeout: Timeout,
override protected val underlying: CassandraScanner,
plan: CassandraQueryPlan
) extends ManagedScan[Row] {
override protected def typeName: String = plan.filter.index.sft.getTypeName
override protected def filter: Option[Filter] = plan.filter.filter
}
private class CassandraScanner(scanner: CassandraBatchScan) extends LowLevelScanner[Row] {
override def iterator: Iterator[Row] = scanner.start()
override def close(): Unit = scanner.close()
}
}
| ccri/geomesa | geomesa-cassandra/geomesa-cassandra-datastore/src/main/scala/org/locationtech/geomesa/cassandra/utils/CassandraBatchScan.scala | Scala | apache-2.0 | 2,925 |
/*
* Copyright (c) 2012-2013 SnowPlow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.hadoop.hive
// Specs2
import org.specs2.mutable.Specification
// SnowPlow Utils
import com.snowplowanalytics.util.Tap._
// Deserializer
import test.{SnowPlowDeserializer, SnowPlowEvent, SnowPlowTest}
class PagePingTest extends Specification {
// Toggle if tests are failing and you want to inspect the struct contents
implicit val _DEBUG = false
// Transaction item
val row = "2012-05-27 11:35:53 DFW3 3343 99.116.172.58 GET d3gs014xn8p70.cloudfront.net /ice.png 200 http://www.psychicbazaar.com/2-tarot-cards/genre/all/type/all?p=5 Mozilla/5.0%20(Windows%20NT%206.1;%20WOW64;%20rv:12.0)%20Gecko/20100101%20Firefox/12.0 &e=pp&page=Async%20Test&pp_mix=28&pp_max=109&pp_miy=12&pp_may=22&tid=405338&vp=479x161&ds=584x193&p=web&tv=js-0.10.0&fp=119997519&aid=CFe23a&lang=en-GB&cs=UTF-8&tz=Europe%2FLondon&f_pdf=0&f_qt=1&f_realp=0&f_wma=1&f_dir=0&f_fla=1&f_java=1&f_gears=0&f_ag=0&res=2560x1336&cd=24&cookie=1&url=file%3A%2F%2F%2Fhome%2Falex%2FDevelopment%2FSnowPlow%2Fsnowplow%2F1-trackers%2Fjavascript-tracker%2Fexamples%2Fweb%2Fasync.html"
val expected = new SnowPlowEvent().tap { e =>
e.dt = "2012-05-27"
e.collector_tm = "11:35:53"
e.event = "page_ping" // Page ping
e.event_vendor = "com.snowplowanalytics"
e.txn_id = "405338"
e.page_url = "http://www.psychicbazaar.com/2-tarot-cards/genre/all/type/all?p=5"
e.page_title = "Async Test"
e.pp_xoffset_min = 28
e.pp_xoffset_max = 109
e.pp_yoffset_min = 12
e.pp_yoffset_max = 22
}
"The SnowPlow page ping row \\"%s\\"".format(row) should {
val actual = SnowPlowDeserializer.deserialize(row)
// General fields
"have dt (Legacy Hive Date) = %s".format(expected.dt) in {
actual.dt must_== expected.dt
}
"have collector_tm (Collector Time) = %s".format(expected.collector_tm) in {
actual.collector_tm must_== expected.collector_tm
}
"have event (Event Type) = %s".format(expected.event) in {
actual.event must_== expected.event
}
"have event_vendor (Event Vendor) = %s".format(expected.event_vendor) in {
actual.event_vendor must_== expected.event_vendor
}
"have a valid (stringly-typed UUID) event_id" in {
SnowPlowTest.stringlyTypedUuid(actual.event_id) must_== actual.event_id
}
"have txn_id (Transaction ID) = %s".format(expected.txn_id) in {
actual.txn_id must_== expected.txn_id
}
// Page fields
"have page_url (Page URL) = %s".format(expected.page_url) in {
actual.page_url must_== expected.page_url
}
"have page_title (Page Title) = %s".format(expected.page_title) in {
actual.page_title must_== expected.page_title
}
// The page ping fields
"have pp_xoffset_min (Page Ping Minimum X Offset) = %s".format(expected.pp_xoffset_min) in {
actual.pp_xoffset_min must_== expected.pp_xoffset_min
}
"have pp_xoffset_max (Page Ping Maximum X Offset) = %s".format(expected.pp_xoffset_max) in {
actual.pp_xoffset_max must_== expected.pp_xoffset_max
}
"have pp_yoffset_min (Page Ping Minimum Y Offset) = %s".format(expected.pp_yoffset_min) in {
actual.pp_yoffset_min must_== expected.pp_yoffset_min
}
"have pp_yoffset_max (Page Ping Maximum Y Offset) = %s".format(expected.pp_yoffset_max) in {
actual.pp_yoffset_max must_== expected.pp_yoffset_max
}
}
} | richo/snowplow | 3-etl/hive-etl/snowplow-log-deserializers/src/test/scala/com/snowplowanalytics/snowplow/hadoop/hive/PagePingTest.scala | Scala | apache-2.0 | 4,098 |
package org.scalafmt.util
import scalatags.Text
import scalatags.Text.all._
import org.scalafmt.config.ScalafmtConfig
import org.scalafmt.stats.TestStats
object Report {
val MaxVisits = 8 // 2 ** 6
def heatmap(results: Seq[Result]): String =
reportBody(
div(
h1(id := "title", "Heatmap"),
explanation,
for (
result <- results.sortBy(-_.maxVisitsOnSingleToken)
if result.test.name != "Warmup"
) yield {
div(
h2(result.title),
pre(
fontFamily := "monospace",
background := "#fff",
fontSize := "16px",
width := testWidth(result),
code(
heatmapBar(result.test.style),
raw(result.obtainedHtml),
span("\n" + ("‾" * result.test.style.maxColumn))
)
)
)
}
)
).render
def heatmapBar(scalaStyle: ScalafmtConfig): Seq[Text.Modifier] =
(1 to MaxVisits).map { i =>
val v = Math.pow(2, i).toInt
val color = red(v)
span(
background := s"rgb(256, $color, $color)",
s" $v "
)
} :+ span("\n" + ("_" * scalaStyle.maxColumn) + "\n")
def red(visits: Int): Int = {
val v = log(visits, 2)
val ratio = v / MaxVisits.toDouble
val result = Math.min(256, 20 + 256 - (ratio * 256)).toInt
result
}
def log(x: Int, base: Int): Double = {
Math.log(x) / Math.log(base)
}
def explanation =
div(
p("""Formatting output from scalafmt's test suite.
|The formatter uses Dijkstra's shortest path to determine the
|formatting with the "cheapest" cost. The red regions are
|tokens the formatter visits often.
""".stripMargin),
ul(
li("Declaration arguments: bin packed"),
li("Callsite arguments: one arg per line if overflowing")
)
)
def testWidth(result: Result) = result.test.style.maxColumn.toDouble * 9.625
def reportBody(xs: Text.Modifier*) =
html(
body(background := "#f9f9f9", xs)
)
def compare(before: TestStats, after: TestStats): String =
reportBody(
div(
h1(
id := "title",
s"Compare ${after.gitInfo.branch} and" +
s" ${before.gitInfo.branch}" +
s" (${before.shortCommit}...${after.shortCommit})"
),
explanation,
after
.intersectResults(before)
.sortBy { case (aft, bef) =>
-Math.abs(aft.visitedStates - bef.visitedStates)
}
.map { case (aft, bef) =>
div(
h2(aft.test.fullName),
table(
tr(
th(""),
th("Before"),
th("After"),
th("Diff")
),
tr(
td("Time (ms)"),
td(bef.timeMs),
td(aft.timeMs),
td(bef.timeMs - aft.timeMs)
),
tr(
td("States"),
td(bef.visitedStates),
td(aft.visitedStates),
td(aft.visitedStates - bef.visitedStates)
)
),
pre(
fontFamily := "monospace",
background := "#fff",
fontSize := "16px",
width := testWidth(aft),
code(
heatmapBar(aft.test.style),
raw(mkHtml(mergeResults(aft, bef)))
)
)
)
}
)
).render
def mergeResults(after: Result, before: Result): Seq[FormatOutput] =
after.tokens.zip(before.tokens).map { case (aft, bef) =>
FormatOutput(aft.token, aft.visits - bef.visits)
}
def mkHtml(output: Seq[FormatOutput]): String = {
val sb = new StringBuilder()
output.foreach { x =>
import scalatags.Text.all._
val redness = red(Math.abs(x.visits))
val isRed =
if (x.visits > 0) true
else false
val styleBackground =
if (isRed) s"rgb(256, $redness, $redness)"
else s"rgb($redness, 256, $redness)" // green
val html = span(background := styleBackground, x.token).render
sb.append(html)
}
sb.toString()
}
}
| scalameta/scalafmt | scalafmt-tests/src/test/scala/org/scalafmt/util/Report.scala | Scala | apache-2.0 | 4,353 |
package io.buoyant.linkerd
import io.buoyant.config.ConfigInitializer
abstract class RequestAuthorizerInitializer extends ConfigInitializer
| linkerd/linkerd | linkerd/core/src/main/scala/io/buoyant/linkerd/RequestAuthorizerInitializer.scala | Scala | apache-2.0 | 142 |
package org.bitcoins.core.script.stack
import org.bitcoins.core.script.ScriptOperationFactory
import org.bitcoins.core.script.constant.ScriptOperation
/**
* Created by chris on 1/6/16.
*/
sealed trait StackOperation extends ScriptOperation
/**
* Puts the input onto the top of the alt stack. Removes it from the main stack.
*/
case object OP_TOALTSTACK extends StackOperation {
override val opCode: Int = 107
}
/**
* Puts the input onto the top of the main stack. Removes it from the alt stack.
*/
case object OP_FROMALTSTACK extends StackOperation {
override val opCode: Int = 108
}
/**
* If the top stack value is not 0, duplicate it.
*/
case object OP_IFDUP extends StackOperation {
override val opCode: Int = 115
}
/**
* Puts the number of stack items onto the stack.
*/
case object OP_DEPTH extends StackOperation {
override val opCode: Int = 116
}
/**
* Removes the top stack item
*/
case object OP_DROP extends StackOperation {
override val opCode: Int = 117
}
/**
* Duplicates the top stack item.
*/
case object OP_DUP extends StackOperation {
override val opCode: Int = 118
}
/**
* Removes the second-to-top stack item.
*/
case object OP_NIP extends StackOperation {
override val opCode: Int = 119
}
/**
* Copies the second-to-top stack item to the top.
*/
case object OP_OVER extends StackOperation {
override val opCode: Int = 120
}
/**
* The item n back in the stack is copied to the top.
*/
case object OP_PICK extends StackOperation {
override val opCode: Int = 121
}
/**
* The item n back in the stack is moved to the top.
*/
case object OP_ROLL extends StackOperation {
override val opCode: Int = 122
}
/**
* The top three items on the stack are rotated to the left.
*/
case object OP_ROT extends StackOperation {
override val opCode: Int = 123
}
/**
* The top two items on the stack are swapped.
*/
case object OP_SWAP extends StackOperation {
override val opCode: Int = 124
}
/**
* The item at the top of the stack is copied and inserted before the second-to-top item.
*/
case object OP_TUCK extends StackOperation {
override val opCode: Int = 125
}
/**
* Removes the top two stack items.
*/
case object OP_2DROP extends StackOperation {
override val opCode: Int = 109
}
/**
* Duplicates the top two stack items
*/
case object OP_2DUP extends StackOperation {
override val opCode: Int = 110
}
/**
* Duplicates the top 3 stack items
*/
case object OP_3DUP extends StackOperation {
override val opCode: Int = 111
}
/**
* Copies the pair of items two spaces back in the stack to the front.
*/
case object OP_2OVER extends StackOperation {
override val opCode: Int = 112
}
/**
* The fifth and sixth items back are moved to the top of the stack.
*/
case object OP_2ROT extends StackOperation {
override val opCode: Int = 113
}
/**
* Swaps the top two pairs of items.
*/
case object OP_2SWAP extends StackOperation {
override val opCode: Int = 114
}
object StackOperation extends ScriptOperationFactory[StackOperation] {
override def operations =
Seq(
OP_TOALTSTACK,
OP_FROMALTSTACK,
OP_IFDUP,
OP_DEPTH,
OP_DEPTH,
OP_DROP,
OP_DUP,
OP_NIP,
OP_OVER,
OP_ROLL,
OP_ROT,
OP_SWAP,
OP_TUCK,
OP_2DROP,
OP_2DUP,
OP_3DUP,
OP_2OVER,
OP_2ROT,
OP_2SWAP,
OP_PICK
)
}
| bitcoin-s/bitcoin-s-core | core/src/main/scala/org/bitcoins/core/script/stack/StackOperations.scala | Scala | mit | 3,439 |
package org.atnos.benchmark
import org.scalameter.api._
import org.atnos.eff._
import EvalEffect._
import Eff._
import cats.implicits._
import cats.Eval
import org.scalameter.picklers.Implicits._
object EffBenchmark extends Bench.OfflineReport {
type E = Fx.fx1[Eval]
val sizes = Gen.enumeration("size")(10, 100, 1000, 10000, 100000)
val lists = for {
size <- sizes
} yield (0 until size).toList
def simpleSend[R, V](v: =>V)(implicit m: Member[Eval, R]) =
delay(v)
performance of "send" in {
measure method "simple send" in {
using(lists) in { list =>
run(runEval(list.traverse(a => simpleSend[E, Int](a))))
}
}
measure method "optimised send" in {
using(lists) in { list =>
run(runEval(list.traverse(a => delay[E, Int](a))))
}
}
}
}
| etorreborre/eff-cats | src/test/scala/org/atnos/benchmark/EffBenchmark.scala | Scala | mit | 821 |
package ipfix.ie
import ipfix.ByteIterCounter
import ipfix.protocol.Field
trait IEMap {
val ieByID: Map[Int, IE]
def load(id: Int, length: Int, byteIter: ByteIterCounter): Field = ieByID(id).load(byteIter, length)
} | ConnorDillon/ipfix | src/main/scala/ipfix/ie/IEMap.scala | Scala | gpl-3.0 | 221 |
package glasskey.model.validation
/**
* Created by loande on 6/8/15.
*/
sealed trait OAuthClaim {
def name : String
}
case object Scope extends OAuthClaim {
override def name: String = "scope"
}
case object Client_Id extends OAuthClaim {
override def name: String = "client_id"
}
case object Org extends OAuthClaim {
override def name: String = "org"
}
case object UserName extends OAuthClaim {
override def name: String = "username"
}
object OAuthClaim {
val values = Seq(Scope, Client_Id, Org, UserName)
def toValue(name: String) : Option[OAuthClaim] = values.find(_.name == name)
} | MonsantoCo/glass-key | glass-key-common/src/main/scala/glasskey/model/validation/OAuthClaim.scala | Scala | bsd-3-clause | 603 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.support.requisite
import io.truthencode.ddo.support.requisite.RequirementImplicits.progressionToReq
import io.truthencode.ddo.support.tree.TreeLike
/**
* Represents a required amount of points spent (Action, Survival, Epic Destiny Points)
*/
sealed trait ProgressionRequisite {
self: Requisite =>
}
sealed trait ProgressionInTreeRequisite extends ProgressionRequisite {
self: Requisite =>
def pointsInTree: Seq[(TreeLike, Int)]
}
// /**
// * Represents total points spent
// */
// sealed trait PointsSpentRequisite extends ProgressionRequisite {
// self: Requisite =>
// def pointsSpent: Seq[(Points, Int)]
// }
/**
* Base Stackable trait implementation used to initialize when no other has been used.
* @note
* we should be able to create just one of these instead of a Race / Class / Feat etc specific one
*/
trait ProgressionRequisiteImpl
extends MustContainImpl[Requirement] with ProgressionInTreeRequisite {
self: Requisite with RequisiteType =>
override def pointsInTree: Seq[(TreeLike, Int)] = Nil
}
trait RequiresTreeProgression
extends ProgressionInTreeRequisite with RequiresAllOf[Requirement] with Requisite {
abstract override def allOf: Seq[Requirement] = super.allOf ++ {
pointsInTree.collect(progressionToReq)
}
}
| adarro/ddo-calc | subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/support/requisite/ProgressionRequisite.scala | Scala | apache-2.0 | 1,940 |
package org.katis.capnproto.runtime
import java.io.IOException
import java.nio.{ByteBuffer, ByteOrder}
import java.nio.channels.{ReadableByteChannel, WritableByteChannel}
import java.util
object Serialize {
def makeByteBuffer(bytes: Int): ByteBuffer = {
val result = ByteBuffer.allocate(bytes)
result.order(ByteOrder.LITTLE_ENDIAN)
result.mark()
result
}
def fillBuffer(buffer: ByteBuffer, bc: ReadableByteChannel) {
while (buffer.hasRemaining) {
val r = bc.read(buffer)
if (r < 0) {
throw new IOException("premature EOF")
}
}
}
def read(bc: ReadableByteChannel): MessageReader = {
read(bc, ReaderOptions.DEFAULT_READER_OPTIONS)
}
def read(bc: ReadableByteChannel, options: ReaderOptions): MessageReader = {
val firstWord = makeByteBuffer(Constants.BYTES_PER_WORD)
fillBuffer(firstWord, bc)
val segmentCount = 1 + firstWord.getInt(0)
var segment0Size = 0
if (segmentCount > 0) {
segment0Size = firstWord.getInt(4)
}
var totalWords = segment0Size
if (segmentCount > 512) {
throw new IOException("too many segments")
}
val moreSizes = new util.ArrayList[Integer]()
if (segmentCount > 1) {
val moreSizesRaw = makeByteBuffer(4 * (segmentCount & ~1))
fillBuffer(moreSizesRaw, bc)
for (ii <- 0 until segmentCount - 1) {
val size = moreSizesRaw.getInt(ii * 4)
moreSizes.add(size)
totalWords += size
}
}
if (totalWords > options.traversalLimitInWords) {
throw new DecodeException("Message size exceeds traversal limit.")
}
val allSegments = makeByteBuffer(totalWords * Constants.BYTES_PER_WORD)
fillBuffer(allSegments, bc)
val segmentSlices = Array.ofDim[ByteBuffer](segmentCount)
allSegments.rewind()
segmentSlices(0) = allSegments.slice()
segmentSlices(0).limit(segment0Size * Constants.BYTES_PER_WORD)
segmentSlices(0).order(ByteOrder.LITTLE_ENDIAN)
var offset = segment0Size
for (ii <- 1 until segmentCount) {
allSegments.position(offset * Constants.BYTES_PER_WORD)
segmentSlices(ii) = allSegments.slice()
segmentSlices(ii).limit(moreSizes.get(ii - 1) * Constants.BYTES_PER_WORD)
segmentSlices(ii).order(ByteOrder.LITTLE_ENDIAN)
offset += moreSizes.get(ii - 1)
}
new MessageReader(segmentSlices, options)
}
def read(bb: ByteBuffer): MessageReader = {
read(bb, ReaderOptions.DEFAULT_READER_OPTIONS)
}
def read(bb: ByteBuffer, options: ReaderOptions): MessageReader = {
bb.order(ByteOrder.LITTLE_ENDIAN)
val segmentCount = 1 + bb.getInt
if (segmentCount > 512) {
throw new IOException("too many segments")
}
val segmentSlices = Array.ofDim[ByteBuffer](segmentCount)
val segmentSizesBase = bb.position()
val segmentSizesSize = segmentCount * 4
val align = Constants.BYTES_PER_WORD - 1
val segmentBase = (segmentSizesBase + segmentSizesSize + align) & ~align
var totalWords = 0
for (ii <- 0 until segmentCount) {
val segmentSize = bb.getInt(segmentSizesBase + ii * 4)
bb.position(segmentBase + totalWords * Constants.BYTES_PER_WORD)
segmentSlices(ii) = bb.slice()
segmentSlices(ii).limit(segmentSize * Constants.BYTES_PER_WORD)
segmentSlices(ii).order(ByteOrder.LITTLE_ENDIAN)
totalWords += segmentSize
}
bb.position(segmentBase + totalWords * Constants.BYTES_PER_WORD)
if (totalWords > options.traversalLimitInWords) {
throw new DecodeException("Message size exceeds traversal limit.")
}
new MessageReader(segmentSlices, options)
}
def computeSerializedSizeInWords(message: MessageBuilder): Long = {
val segments = message.getSegmentsForOutput()
var bytes: Long = 0
bytes += 4
bytes += segments.length * 4
if (bytes % 8 != 0) {
bytes += 4
}
for (i <- segments.indices) {
val s = segments(i)
bytes += s.limit()
}
bytes / Constants.BYTES_PER_WORD
}
def writeToByteBuffer(message: MessageBuilder): ByteBuffer = {
val segments = message.getSegmentsForOutput()
val segmentsSize = segments.map(_.remaining()).sum
val tableSize = (segments.length + 2) & (~1)
val table = ByteBuffer.allocateDirect(4 * tableSize + segmentsSize)
table.order(ByteOrder.LITTLE_ENDIAN)
table.putInt(0, segments.length - 1)
for (i <- segments.indices) {
table.putInt(4 * (i + 1), segments(i).limit() / 8)
}
table.position(tableSize * 4)
for (buffer <- segments) {
while (buffer.hasRemaining) {
table.put(buffer)
}
}
table.flip()
table
}
def write(outputChannel: WritableByteChannel, message: MessageBuilder) {
val segments = message.getSegmentsForOutput()
val tableSize = (segments.length + 2) & (~1)
val table = ByteBuffer.allocate(4 * tableSize)
table.order(ByteOrder.LITTLE_ENDIAN)
table.putInt(0, segments.length - 1)
for (i <- segments.indices) {
table.putInt(4 * (i + 1), segments(i).limit() / 8)
}
while (table.hasRemaining) {
outputChannel.write(table)
}
for (buffer <- segments) {
while (buffer.hasRemaining) {
outputChannel.write(buffer)
}
}
}
}
| katis/capnp-scala | runtime/shared/src/main/scala-2.11/org/katis/capnproto/runtime/Serialize.scala | Scala | mit | 5,242 |
/*
* Copyright 2013 Akiyoshi Sugiki, University of Tsukuba
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kumoi.impl.vm.kvm
import kumoi.shell.aaa._
import kumoi.shell.vm._
import kumoi.shell.vm.net._
import kumoi.shell.vm.store._
import kumoi.shell.pm._
import kumoi.core._
import kumoi.core.log._
import java.util.UUID
//import java.rmi.server._
import kumoi.core.rmi._
import java.io._
import scala.xml._
import scala.xml.transform._
import kumoi.impl.vm._
/**
* A KVM ColdVM implementation.
*
* @author Akiyoshi SUGIKI
*/
class KVMColdVM(auth: AAA) extends LibvirtColdVM(auth) {
def createLog = Logging("KVM")
def createClone(implicit auth: AAA) = {
val clone = new KVMColdVM(auth)
clone.parch = parch
clone.phvm = phvm
clone
}
override def dcache() = "none"
private val logging = createLog()
val defaultCdDev = Config("impl.vm.kvm.cdromDevice", "hdc")
val defaultFdDev = Config("impl.vm.kvm.floppyDevice", "fda")
val defaultDiskDev = Config("impl.vm.kvm.diskDevice", "hd")
val defaultDiskBus = Config("impl.vm.kvm.diskBus", "ide")
private val qemuKvm = Config("impl.vm.qemu.emuKvm", "/usr/libexec/qemu-kvm")
private val qemu32 = Config("impl.vm.qemu.emu32", "/usr/bin/qemu")
private val qemu64 = Config("impl.vm.qemu.emu64", "/usr/bin/qemu-system-x86_64")
private val fmap = Map("raw" -> ("qemu", "raw"), "qcow2" -> ("qemu", "qcow2"))
protected def diskFormat(fmt: String) = fmap(fmt)
val defaultDiskDrvName = "qemu"
val defaultDiskDrvType = "raw"
val defaultBridge = "br0"
val defaultNat = "default"
val domainType = "kvm"
val bootLoader = null
def emulator = if (phvm) qemuKvm else {
parch match {
case "x86_64" => qemu64
case _ => qemu32
}
}
private var parch = "i686"
private var phvm = true
//def arch_=(a: String) { arch = (a, Anonymous) }
def arch_=(aa: (String, AAA)) { parch = aa._1 }
def arch(implicit auth: AAA) = { parch }
//def hvm_=(h: Boolean) { hvm = (h, Anonymous) }
def hvm_=(ha: (Boolean, AAA)) { phvm = ha._1 }
def hvm(implicit auth: AAA) = { phvm }
//def direct_=(b: Boolean) { logging.warn("unsupported operation") }
def direct_=(ba: (Boolean, AAA)) { logging.warn("unsupported operation") }
def direct(implicit auth: AAA) = false
def xmlOS =
<os>
<type arch={parch} machine={ if (!phvm) "pc" else null }>hvm</type>
</os>
def xmlFeatures = <features>
{ if (parch == "i686") <pae /> else null }
<acpi />
<apic />
</features>
//val xmlLifecycle = null
//val xmlClock = <clock sync="localtime" />
//val xmlGraphics = null // <graphics type="vnc" port="-1"></graphics>
def xmlConsole =
<console type="pty">
<target port="0" />
</console>
}
| axi-sugiki/kumoi | src/kumoi/impl/vm/kvm/KVMColdVM.scala | Scala | apache-2.0 | 3,211 |
package org.akoshterek.backgammon.train
import org.akoshterek.backgammon.Constants
import org.akoshterek.backgammon.board.{Board, PositionClass}
import org.akoshterek.backgammon.data.TrainDataLoader
import org.encog.engine.network.activation.{ActivationFunction, ActivationLinear}
import org.encog.mathutil.randomize.RangeRandomizer
import org.encog.ml.data.basic.{BasicMLData, BasicMLDataPair, BasicMLDataSet}
import org.encog.ml.data.{MLData, MLDataPair, MLDataSet}
import org.encog.ml.train.strategy.StopTrainingStrategy
import org.encog.ml.train.strategy.end.SimpleEarlyStoppingStrategy
import org.encog.neural.networks.BasicNetwork
import org.encog.neural.networks.layers.BasicLayer
import org.encog.neural.networks.training.propagation.Propagation
import org.encog.neural.networks.training.propagation.resilient.{RPROPType, ResilientPropagation}
import scala.util.Random
/**
* @author Alex
* date 22.09.2015.
*/
object NetworkTrainer {
private def createNetwork(inputNeurons: Int, hiddenNeurons: Int, outputNeurons: Int, activationFunction: ActivationFunction): BasicNetwork = {
val network: BasicNetwork = new BasicNetwork
network.addLayer(new BasicLayer(new ActivationLinear, false, inputNeurons))
network.addLayer(new BasicLayer(activationFunction, false, hiddenNeurons))
network.addLayer(new BasicLayer(new ActivationLinear, false, outputNeurons))
network.getStructure.finalizeStructure()
new RangeRandomizer(-0.5, 0.5).randomize(network)
network.reset()
network
}
private def createPropagation(holder: NetworkHolder, trainingSet: MLDataSet): Propagation = {
val train: ResilientPropagation = new ResilientPropagation(holder.network, trainingSet)
train.setRPROPType(RPROPType.iRPROPp)
val stop: StopTrainingStrategy = new StopTrainingStrategy(0.00001, 100)
train.addStrategy(new SimpleEarlyStoppingStrategy(trainingSet, 10))
train.addStrategy(stop)
if (holder.continuation != null) {
train.resume(holder.continuation)
}
train
}
}
class NetworkTrainer(val settings: AgentSettings, val networkType: PositionClass) {
def trainNetwork: NetworkHolder = {
if (NetworkHolder.deserializeTrainedNetwork(settings, networkType).isDefined) {
System.out.println("The network is already trained. Exiting.")
}
val trainingSet: MLDataSet = loadTrainingSet(getResourceName)
val holder: NetworkHolder = createLoadNetwork
val train: Propagation = NetworkTrainer.createPropagation(holder, trainingSet)
trainingLoop(holder, train)
holder
}
private def trainingLoop(holder: NetworkHolder, train: Propagation) {
do {
train.iteration()
System.out.println("Epoch #" + holder.epoch + " Error:" + train.getError)
holder.incEpoch()
if (holder.epoch % 10 == 0) {
holder.continuation = train.pause
holder.serialize(settings)
}
} while (!train.isTrainingDone)
train.finishTraining()
holder.serializeTrainedNetwork(settings)
}
private def createLoadNetwork: NetworkHolder = {
val holder: NetworkHolder = new NetworkHolder(
NetworkTrainer.createNetwork(getInputNeuronsCount, settings.hiddenNeuronCount, Constants.NUM_OUTPUTS, settings.activationFunction),
networkType)
val loadedHolder: NetworkHolder = NetworkHolder.deserialize(holder, settings).orNull
if (loadedHolder != null) loadedHolder else holder
}
private def getResourceName: String = {
String.format("/org/akoshterek/backgammon/data/%s-train-data.gz", PositionClass.getNetworkType(networkType))
}
private def getInputNeuronsCount: Int = {
networkType match {
case PositionClass.CLASS_CONTACT => settings.representation.contactInputsCount
case PositionClass.CLASS_CRASHED => settings.representation.crashedInputsCount
case PositionClass.CLASS_RACE => settings.representation.raceInputsCount
case _ => throw new IllegalArgumentException("Unknown network type " + networkType)
}
}
private def loadTrainingSet(resource: String): MLDataSet = {
val data = Random.shuffle(TrainDataLoader.loadGzipResourceData(resource)).toArray
val trainingSet: MLDataSet = new BasicMLDataSet
var i = 0
while (i < data.length) {
val input: MLData = new BasicMLData(
settings.representation.calculateContactInputs(Board.positionFromID(data(i).positionId)).map(_.toDouble))
val ideal: MLData = new BasicMLData(data(i).reward)
val pair: MLDataPair = new BasicMLDataPair(input, ideal)
trainingSet.add(pair)
i += 1
if (i % 100000 == 0) println(s"loaded $i entries")
}
trainingSet
}
} | akoshterek/MultiGammonJava | multi-gammon-util/src/main/java/org/akoshterek/backgammon/train/NetworkTrainer.scala | Scala | gpl-3.0 | 4,645 |
package spoiwo.model
//TODO conversion utility
import org.apache.poi.ss.util.CellReference
object CellRange {
val None: CellRange = CellRange(0 -> 0, 0 -> 0)
}
case class CellRange(rowRange: (Int, Int), columnRange: (Int, Int)) {
require(rowRange._1 <= rowRange._2, "First row can't be greater than the last row!")
require(columnRange._1 <= columnRange._2, "First column can't be greater than the last column!")
}
object RowRange {
val None: RowRange = RowRange(0, 0)
def apply(rowRange: (Int, Int)): RowRange = RowRange(rowRange._1, rowRange._2)
}
case class RowRange(firstRowIndex: Int, lastRowIndex: Int) {
require(firstRowIndex <= lastRowIndex, "First row index can't be greater than the last row index!")
}
object ColumnRange {
val None: ColumnRange = ColumnRange("A", "A")
def apply(columnRange: (String, String)): ColumnRange = ColumnRange(columnRange._1, columnRange._2)
def apply(firstColumnIndex: Int, lastColumnIndex: Int): ColumnRange = {
require(firstColumnIndex <= lastColumnIndex, "First column index can't be greater that the last column index!")
ColumnRange(
CellReference.convertNumToColString(firstColumnIndex),
CellReference.convertNumToColString(lastColumnIndex)
)
}
}
case class ColumnRange(firstColumnName: String, lastColumnName: String)
| norbert-radyk/spoiwo | core/src/main/scala/spoiwo/model/Range.scala | Scala | mit | 1,321 |
/*
* Twitter Korean Text - Scala library to process Korean text
*
* Copyright 2014 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.penguin.korean.v1.tokenizer
import com.twitter.penguin.korean.v1.tokenizer.KoreanTokenizer._
import com.twitter.penguin.korean.v1.util.KoreanPos._
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class KoreanTokenizerTest extends FunSuite {
test("buildTrie should build Trie correctly for initial optionals with final non-optionals") {
// 0 -> 1
assert(
buildTrie("p0N1") === List(
KoreanPosTrie(NounPrefix, List(
KoreanPosTrie(Noun, List(), ending = true)
), ending = false),
KoreanPosTrie(Noun, List(), ending = true)
)
)
// * -> +
assert(
buildTrie("p*N+") === List(
KoreanPosTrie(NounPrefix, List(
selfNode,
KoreanPosTrie(Noun, List(selfNode), ending = true)
), ending = false),
KoreanPosTrie(Noun, List(selfNode), ending = true)
)
)
}
test("buildTrie should build Trie correctly for initial optionals with multiple non-optionals") {
// 0 -> 0 -> 1
assert(
buildTrie("p0N0s1") === List(
KoreanPosTrie(NounPrefix, List(
KoreanPosTrie(Noun, List(
KoreanPosTrie(Suffix, List(), ending = true)
), ending = false),
KoreanPosTrie(Suffix, List(), ending = true)
), ending = false),
KoreanPosTrie(Noun, List(
KoreanPosTrie(Suffix, List(), ending = true)
), ending = false),
KoreanPosTrie(Suffix, List(), ending = true)
)
)
}
test("buildTrie should build Trie correctly for initial non-optionals with final non-optionals") {
// 1 -> +
assert(
buildTrie("p1N+") === List(
KoreanPosTrie(NounPrefix, List(
KoreanPosTrie(Noun, List(
selfNode
), ending = true)
), ending = false)
)
)
// + -> 1
assert(
buildTrie("N+s1") === List(
KoreanPosTrie(Noun, List(
selfNode,
KoreanPosTrie(Suffix, List(), ending = true)
), ending = false)
)
)
}
test("buildTrie should build Trie correctly for initial non-optionals with final optionals") {
// 1 -> *
assert(
buildTrie("p1N*") === List(
KoreanPosTrie(NounPrefix, List(
KoreanPosTrie(Noun, List(
selfNode
), ending = true)
), ending = true)
)
)
// + -> 0
assert(
buildTrie("N+s0") === List(
KoreanPosTrie(Noun, List(
selfNode,
KoreanPosTrie(Suffix, List(), ending = true)
), ending = true)
)
)
}
test("buildTrie should build Trie correctly for initial non-optionals with multiple non-optionals") {
// + -> + -> 0
assert(
buildTrie("A+V+A0") === List(
KoreanPosTrie(Adverb, List(
selfNode,
KoreanPosTrie(Verb, List(
selfNode,
KoreanPosTrie(Adverb, List(), ending = true)
), ending = true)
), ending = false)
)
)
}
val parsedChunk = ParsedChunk(
List(KoreanToken("하", Noun), KoreanToken("하", Noun), KoreanToken("하", Noun)), 1
)
val parsedChunkWithTwoTokens = ParsedChunk(
List(KoreanToken("하", Noun), KoreanToken("하", Noun)), 1
)
val parsedChunkWithUnknowns = ParsedChunk(
List(KoreanToken("하하", Noun, unknown = true), KoreanToken("하", Noun, unknown = true), KoreanToken("하", Noun)), 1
)
val parsedChunkWithCommonNouns = ParsedChunk(
List(KoreanToken("사람", Noun), KoreanToken("강아지", Noun)), 1
)
val parsedChunkWithVerbs = ParsedChunk(
List(KoreanToken("사람", Noun), KoreanToken("하다", Verb)), 1
)
val parsedChunkWithExactMatch = ParsedChunk(
List(KoreanToken("강아지", Noun)), 1
)
test("ParsedChunk should correctly count unknowns") {
assert(
parsedChunkWithUnknowns.countUnknowns === 2
)
assert(
parsedChunk.countUnknowns === 0
)
}
test("ParsedChunk should correctly count tokens") {
assert(
parsedChunk.countTokens === 3
)
assert(
parsedChunkWithTwoTokens.countTokens === 2
)
}
test("ParsedChunk should correctly return unknown coverage") {
assert(
parsedChunkWithUnknowns.getUnknownCoverage === 3
)
assert(
parsedChunkWithTwoTokens.getUnknownCoverage === 0
)
}
test("ParsedChunk should get correct frequency score") {
assert(
parsedChunkWithTwoTokens.getFreqScore === 1.0f
)
assert(
parsedChunkWithCommonNouns.getFreqScore === 0.4544f
)
}
test("ParsedChunk should correctly count POSes") {
assert(
parsedChunk.countPos(Noun) === 3
)
assert(
parsedChunkWithVerbs.countPos(Noun) === 1
)
assert(
parsedChunkWithVerbs.countPos(Verb) === 1
)
}
test("ParsedChunk should correctly determine if the chunk is an exact match") {
assert(
parsedChunk.isExactMatch === 1
)
assert(
parsedChunkWithExactMatch.isExactMatch === 0
)
}
test("ParsedChunk should correctly determine if the chunk is all noun") {
assert(
parsedChunk.isAllNouns === 0
)
assert(
parsedChunkWithVerbs.isAllNouns === 1
)
}
test("tokenize should return expected tokens") {
assert(
tokenize("개루루야") ===
List(KoreanToken("개", Noun), KoreanToken("루루", Noun), KoreanToken("야", Josa))
)
assert(
tokenize("쵸귀여운") ===
List(KoreanToken("쵸", VerbPrefix), KoreanToken("귀여운", Adjective))
)
assert(
tokenize("이사람의") ===
List(KoreanToken("이", Determiner), KoreanToken("사람", Noun), KoreanToken("의", Josa))
)
assert(
tokenize("엄청작아서귀엽다") ===
List(
KoreanToken("엄청", Adverb),
KoreanToken("작아", Adjective), KoreanToken("서", Eomi),
KoreanToken("귀엽", Adjective), KoreanToken("다", Eomi))
)
assert(
tokenize("안녕하셨어요") ===
List(
KoreanToken("안녕하셨", Adjective), KoreanToken("어요", Eomi)
)
)
assert(
tokenize("쵸귀여운개루루") ===
List(
KoreanToken("쵸", VerbPrefix), KoreanToken("귀여운", Adjective),
KoreanToken("개", Noun), KoreanToken("루루", Noun)
)
)
assert(
tokenize("그리고") ===
List(KoreanToken("그리고", Conjunction))
)
assert(
tokenize("안녕ㅋㅋ") ===
List(KoreanToken("안녕", Noun), KoreanToken("ㅋㅋ", KoreanParticle))
)
}
test("tokenize should handle unknown nouns") {
assert(
tokenize("개컁컁아") ===
List(KoreanToken("개컁컁", Noun, unknown = true), KoreanToken("아", Josa))
)
assert(
tokenize("안녕하세요쿛툐캬님") ===
List(KoreanToken("안녕하세", Adjective), KoreanToken("요", Eomi),
KoreanToken("쿛툐캬", Noun, unknown = true), KoreanToken("님", Suffix))
)
}
test("tokenize should handle edge cases") {
assert(
tokenize("이승기가") ===
List(KoreanToken("이승기", Noun), KoreanToken("가", Josa))
)
}
test("tokenize should be able to tokenize long non-space-correctable ones") {
assert(
tokenize("훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌")
.map(_.text).mkString(" ") ===
"훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 " +
"훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌"
)
}
} | NamHosung/SE | src/test/scala/com/twitter/penguin/korean/v1/tokenizer/KoreanTokenizerTest.scala | Scala | apache-2.0 | 8,476 |
package akkaviz.server
import akka.http.scaladsl.marshalling.Marshaller
import akka.http.scaladsl.marshalling.Marshalling.WithFixedContentType
import akka.http.scaladsl.model.MediaTypes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.stream.scaladsl.{Flow, Source}
import akkaviz.config.Config
import akkaviz.persistence.{PersistenceSources, ReceivedRecord}
import akkaviz.rest
import com.datastax.driver.core.utils.UUIDs
import org.reactivestreams.Publisher
import scala.concurrent.ExecutionContext.Implicits.global
trait ArchiveSupport {
def isArchiveEnabled: Boolean
def receivedOf(ref: String): Source[ReceivedRecord, _]
def receivedBetween(ref: String, ref2: String): Source[ReceivedRecord, _]
def archiveRouting: Route = get {
pathPrefix("messages") {
if (isArchiveEnabled) {
path("of" / Segment) {
ref =>
AkkaHttpHelpers.completeAsJson(receivedOf(ref).via(receivedRecordToRestReceived))
} ~
path("between" / Segment / Segment) {
(ref, ref2) =>
AkkaHttpHelpers.completeAsJson(receivedBetween(ref, ref2).via(receivedRecordToRestReceived))
}
} else {
reject
}
}
}
private[this] implicit val receivedRecordMarshaller: Marshaller[rest.Received, String] = Marshaller.strict {
received =>
WithFixedContentType(MediaTypes.`application/json`, () => upickle.default.write(received))
}
private[this] def receivedRecordToRestReceived = Flow[ReceivedRecord].map {
rr =>
rest.Received(rr.millis, rr.direction, rr.first, rr.second, rr.data)
}
}
| blstream/akka-viz | monitoring/src/main/scala/akkaviz/server/ArchiveSupport.scala | Scala | mit | 1,652 |
package net.tyler.sopwith.ingame
import com.badlogic.gdx.Gdx
import com.badlogic.gdx.Input.Keys
import com.badlogic.gdx.Input.Peripheral
import com.badlogic.gdx.InputProcessor
import com.badlogic.gdx.utils.TimeUtils
import net.tyler.math.CartesianVector2f
import net.tyler.messaging.MessagingComponent
/**
* Class is responsible for handling input device polling and converting the
* results into state change messages to pass back to the game model.
*/
class InGameInputProcessor(private val querier: InGameStateQuerier,
private val messagingComponent: MessagingComponent) extends InputProcessor {
/**
* Called once per render loop to process any new input and convert it into
* game state messages.
*
* Accelerometer input is only available via polling (rather than event
* based).
*/
def processInput {
/*
* Control of the plane is either done via the accelerometer (when
* available) or via the keyboard when the accelerometer is not available.
*/
if (Gdx.input.isPeripheralAvailable(Peripheral.Accelerometer)) {
processAccelerometerInput(TimeUtils.millis)
}
}
private def processAccelerometerInput(t: Long) {
}
override def keyDown(keyCode: Int) = {
val t = TimeUtils.millis
val plane = querier.planeState(t)
keyCode match {
case Keys.SPACE => {
messagingComponent.send(new PlaneOrientationFlip(t))
}
case Keys.UP => {
val newAcc = new CartesianVector2f(plane.acceleration.x, 1f)
messagingComponent.send(new PlaneAccelerationChange(newAcc, t))
}
case Keys.DOWN => {
val newAcc = new CartesianVector2f(plane.acceleration.x, -1f)
messagingComponent.send(new PlaneAccelerationChange(newAcc, t))
}
case Keys.LEFT => {
val newAcc = new CartesianVector2f(-1f, plane.acceleration.y)
messagingComponent.send(new PlaneAccelerationChange(newAcc, t))
}
case Keys.RIGHT => {
val newAcc = new CartesianVector2f(1f, plane.acceleration.y)
messagingComponent.send(new PlaneAccelerationChange(newAcc, t))
}
case _ => {}
}
true
}
override def keyTyped(char: Char) = { false }
override def keyUp(keyCode: Int) = { false }
override def scrolled(amount: Int) = { false }
override def touchDown(x: Int, y: Int, pointer: Int, button: Int) = true
/**
* Touch based input (includes mouse pointer input) is used to interact with
* the planes weapons.
*/
override def touchUp(x: Int, y: Int, pointer: Int, button: Int) = {
val t = TimeUtils.millis
/*
* TODO - DAT - Should add in check to make sure that the touch is near
* the plane. Blocked on collision code probably.
*/
if (querier.bombsRemaining(t) > 0) {
messagingComponent.send(new BombReleased(querier.planeState(t).position, t))
}
/*
* Flag that we have processed this message (not really necessary).
*/
true
}
override def touchDragged(x: Int, y: Int, pointer: Int) = { false }
override def mouseMoved(x: Int, y: Int) = { false }
} | DaveTCode/SopwithLibgdx | SopwithCoreProject/src/net/tyler/sopwith/ingame/InGameInputProcessor.scala | Scala | mit | 3,165 |
package com.monsanto.arch.kamon.prometheus.converter
import com.monsanto.arch.kamon.prometheus.metric.MetricFamily
/** The default post-processor which adds some help text to certain predefined metric families. Currently, this
* supports `kamon-akka`’s actor and dispatcher metrics.
*
* @author Daniel Solano Gómez
*/
class DefaultPostprocessor extends Postprocessor {
import DefaultPostprocessor._
/** Post-processes a metric family. */
override def apply(metricFamily: MetricFamily): MetricFamily = {
metricFamily.name match {
case "akka_actor_time_in_mailbox_nanoseconds" ⇒
metricFamily.withHelp(AkkaActorTimeInMailboxHelp)
case "akka_actor_processing_time_nanoseconds" ⇒
metricFamily.withHelp(AkkaActorProcessingTimeHelp)
case "akka_actor_mailbox_size" ⇒
metricFamily.withHelp(AkkaActorMailboxSizeHelp)
case "akka_actor_errors" ⇒
metricFamily.withHelp(AkkaActorErrorsHelp)
case "akka_fork_join_pool_dispatcher_parallelism" ⇒
metricFamily.withHelp(AkkaForkJoinPoolDispatcherParallelismHelp)
case "akka_fork_join_pool_dispatcher_pool_size" ⇒
metricFamily.withHelp(AkkaForkJoinPoolDispatcherPoolSizeHelp)
case "akka_fork_join_pool_dispatcher_active_threads" ⇒
metricFamily.withHelp(AkkaForkJoinPoolDispatcherActiveThreadsHelp)
case "akka_fork_join_pool_dispatcher_running_threads" ⇒
metricFamily.withHelp(AkkaForkJoinPoolDispatcherRunningThreadsHelp)
case "akka_fork_join_pool_dispatcher_queued_task_count" ⇒
metricFamily.withHelp(AkkaForkJoinPoolDispatcherQueuedTaskCountHelp)
case "akka_thread_pool_executor_dispatcher_core_pool_size" ⇒
metricFamily.withHelp(AkkaThreadPoolExecutorDispatcherCorePoolSizeHelp)
case "akka_thread_pool_executor_dispatcher_max_pool_size" ⇒
metricFamily.withHelp(AkkaThreadPoolExecutorDispatcherMaxPoolSizeHelp)
case "akka_thread_pool_executor_dispatcher_pool_size" ⇒
metricFamily.withHelp(AkkaThreadPoolExecutorDispatcherPoolSizeHelp)
case "akka_thread_pool_executor_dispatcher_active_threads" ⇒
metricFamily.withHelp(AkkaThreadPoolExecutorDispatcherActiveThreadsHelp)
case "akka_thread_pool_executor_dispatcher_processed_tasks" ⇒
metricFamily.withHelp(AkkaThreadPoolExecutorDispatcherProcessedTasksHelp)
case "akka_router_routing_time_nanoseconds" ⇒
metricFamily.withHelp(AkkaRouterRoutingTimeHelp)
case "akka_router_time_in_mailbox_nanoseconds" ⇒
metricFamily.withHelp(AkkaRouterTimeInMailboxHelp)
case "akka_router_processing_time_nanoseconds" ⇒
metricFamily.withHelp(AkkaRouterProcessingTimeHelp)
case "akka_router_errors" ⇒
metricFamily.withHelp(AkkaRouterErrorsHelp)
case _ ⇒ metricFamily
}
}
}
object DefaultPostprocessor {
/** Help text for time in mailbox metrics for Akka actors. */
val AkkaActorTimeInMailboxHelp = "Tracks the time measured from the moment a message was enqueued into an actor’s " +
"mailbox until the moment it was dequeued for processing"
/** Help text for processing time metrics for Akka actors. */
val AkkaActorProcessingTimeHelp = "Tracks how long did it take for the actor to process every message."
/** Help text for mailbox size metrics for Akka actors. */
val AkkaActorMailboxSizeHelp = "Tracks the size of the actor’s mailbox"
/** Help text for errors metrics for Akka actors. */
val AkkaActorErrorsHelp = "Counts the number of failures the actor has experienced"
/** Help text for parallelism metrics for fork join pool dispatchers. */
val AkkaForkJoinPoolDispatcherParallelismHelp = "The desired parallelism value for the fork join pool, remains " +
"steady over time"
/** Help text for pool size metrics for fork join pool dispatchers. */
val AkkaForkJoinPoolDispatcherPoolSizeHelp = "The number of worker threads that have started but not yet " +
"terminated. This value will differ from that of akka_fork_join_pool_dispatcher_parallelism if threads are " +
"created to maintain parallelism when others are cooperatively blocked."
/** Help text for active thread metrics for fork join pool dispatchers. */
val AkkaForkJoinPoolDispatcherActiveThreadsHelp = "An estimate of the number of worker threads that are currently " +
"stealing or executing tasks"
/** Help text for running thread metrics for fork join pool dispatchers. */
val AkkaForkJoinPoolDispatcherRunningThreadsHelp = "An estimate of the number of worker threads that are not " +
"blocked waiting to join tasks or for other managed synchronisation"
/** Help text for queued task count metrics for fork join pool dispatchers. */
val AkkaForkJoinPoolDispatcherQueuedTaskCountHelp = "An approximation of the total number of tasks currently held " +
"in queues by worker threads (but not including tasks submitted to the pool that have not begun executing)"
/** Help text for core pool size metrics for thread pool executor dispatchers. */
val AkkaThreadPoolExecutorDispatcherCorePoolSizeHelp = "The core pool size of the executor"
/** Help text for max pool size metrics for thread pool executor dispatchers. */
val AkkaThreadPoolExecutorDispatcherMaxPoolSizeHelp = "The maximum number of threads allowed by the executor"
/** Help text for pool size metrics for thread pool executor dispatchers. */
val AkkaThreadPoolExecutorDispatcherPoolSizeHelp = "The current number of threads in the pool"
/** Help text for active thread metrics for thread pool executor dispatchers. */
val AkkaThreadPoolExecutorDispatcherActiveThreadsHelp = "The number of threads actively executing tasks"
/** Help text for processed task metrics for thread pool executor dispatchers. */
val AkkaThreadPoolExecutorDispatcherProcessedTasksHelp = "The number of processed tasks for the executor"
/** Help text for routing time metrics for Akka routers. */
val AkkaRouterRoutingTimeHelp = "Tracks how long it took the router to decide which routee will process a message"
/** Help text for time in mailbox metrics for Akka routers. */
val AkkaRouterTimeInMailboxHelp = "Tracks the combined time measured from the moment a message was enqueued into a " +
"routee‘s mailbox until the moment it was dequeued for processing."
/** Help text for processing time metrics for Akka routers. */
val AkkaRouterProcessingTimeHelp = "Tracks how long it took for routees to process incoming messages"
/** Help text for error metrics for Akka routers. */
val AkkaRouterErrorsHelp = "Tracks how long it took for routees to process incoming messages"
}
| MonsantoCo/kamon-prometheus | library/src/main/scala/com/monsanto/arch/kamon/prometheus/converter/DefaultPostprocessor.scala | Scala | bsd-3-clause | 6,681 |
/* __ *\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013-2015, LAMP/EPFL **
** __\ \/ /__/ __ |/ /__/ __ |/_// /_\ \ http://scala-js.org/ **
** /____/\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\* */
package org.scalajs.testsuite.niobuffer
import java.nio._
import org.scalajs.testsuite.niobuffer.ByteBufferFactories._
abstract class DoubleBufferTest extends BaseBufferTest {
type Factory = BufferFactory.DoubleBufferFactory
class AllocDoubleBufferFactory extends Factory {
def allocBuffer(capacity: Int): DoubleBuffer =
DoubleBuffer.allocate(capacity)
}
class WrappedDoubleBufferFactory extends Factory with BufferFactory.WrappedBufferFactory {
def baseWrap(array: Array[Double]): DoubleBuffer =
DoubleBuffer.wrap(array)
def baseWrap(array: Array[Double], offset: Int, length: Int): DoubleBuffer =
DoubleBuffer.wrap(array, offset, length)
}
class ByteBufferDoubleViewFactory(
byteBufferFactory: BufferFactory.ByteBufferFactory,
order: ByteOrder)
extends Factory with BufferFactory.ByteBufferViewFactory {
require(!byteBufferFactory.createsReadOnly)
def baseAllocBuffer(capacity: Int): DoubleBuffer =
byteBufferFactory.allocBuffer(capacity * 8).order(order).asDoubleBuffer()
}
}
class AllocDoubleBufferTest extends DoubleBufferTest {
val factory: Factory = new AllocDoubleBufferFactory
}
class WrappedDoubleBufferTest extends DoubleBufferTest {
val factory: Factory = new WrappedDoubleBufferFactory
}
class WrappedDoubleReadOnlyBufferTest extends DoubleBufferTest {
val factory: Factory =
new WrappedDoubleBufferFactory with BufferFactory.ReadOnlyBufferFactory
}
class AllocDoubleSlicedBufferTest extends DoubleBufferTest {
val factory: Factory =
new AllocDoubleBufferFactory with BufferFactory.SlicedBufferFactory
}
// Double views of byte buffers
abstract class DoubleViewOfByteBufferTest(
byteBufferFactory: BufferFactory.ByteBufferFactory, order: ByteOrder)
extends DoubleBufferTest {
val factory: BufferFactory.DoubleBufferFactory =
new ByteBufferDoubleViewFactory(byteBufferFactory, order)
}
class DoubleViewOfAllocByteBufferBigEndianTest
extends DoubleViewOfByteBufferTest(new AllocByteBufferFactory, ByteOrder.BIG_ENDIAN)
class DoubleViewOfWrappedByteBufferBigEndianTest
extends DoubleViewOfByteBufferTest(new WrappedByteBufferFactory, ByteOrder.BIG_ENDIAN)
class DoubleViewOfSlicedAllocByteBufferBigEndianTest
extends DoubleViewOfByteBufferTest(new SlicedAllocByteBufferFactory, ByteOrder.BIG_ENDIAN)
class DoubleViewOfAllocByteBufferLittleEndianTest
extends DoubleViewOfByteBufferTest(new AllocByteBufferFactory, ByteOrder.LITTLE_ENDIAN)
class DoubleViewOfWrappedByteBufferLittleEndianTest
extends DoubleViewOfByteBufferTest(new WrappedByteBufferFactory, ByteOrder.LITTLE_ENDIAN)
class DoubleViewOfSlicedAllocByteBufferLittleEndianTest
extends DoubleViewOfByteBufferTest(new SlicedAllocByteBufferFactory, ByteOrder.LITTLE_ENDIAN)
// Read only Double views of byte buffers
abstract class ReadOnlyDoubleViewOfByteBufferTest(
byteBufferFactory: BufferFactory.ByteBufferFactory, order: ByteOrder)
extends DoubleBufferTest {
val factory: BufferFactory.DoubleBufferFactory = {
new ByteBufferDoubleViewFactory(byteBufferFactory, order)
with BufferFactory.ReadOnlyBufferFactory
}
}
class ReadOnlyDoubleViewOfAllocByteBufferBigEndianTest
extends ReadOnlyDoubleViewOfByteBufferTest(new AllocByteBufferFactory, ByteOrder.BIG_ENDIAN)
class ReadOnlyDoubleViewOfWrappedByteBufferBigEndianTest
extends ReadOnlyDoubleViewOfByteBufferTest(new WrappedByteBufferFactory, ByteOrder.BIG_ENDIAN)
class ReadOnlyDoubleViewOfSlicedAllocByteBufferBigEndianTest
extends ReadOnlyDoubleViewOfByteBufferTest(new SlicedAllocByteBufferFactory, ByteOrder.BIG_ENDIAN)
class ReadOnlyDoubleViewOfAllocByteBufferLittleEndianTest
extends ReadOnlyDoubleViewOfByteBufferTest(new AllocByteBufferFactory, ByteOrder.LITTLE_ENDIAN)
class ReadOnlyDoubleViewOfWrappedByteBufferLittleEndianTest
extends ReadOnlyDoubleViewOfByteBufferTest(new WrappedByteBufferFactory, ByteOrder.LITTLE_ENDIAN)
class ReadOnlyDoubleViewOfSlicedAllocByteBufferLittleEndianTest
extends ReadOnlyDoubleViewOfByteBufferTest(new SlicedAllocByteBufferFactory, ByteOrder.LITTLE_ENDIAN)
| mdedetrich/scala-js | test-suite/shared/src/test/scala/org/scalajs/testsuite/niobuffer/DoubleBufferTest.scala | Scala | bsd-3-clause | 4,656 |
object A {
inline def callInline: Int = inlinedInt
inline def inlinedInt: Int = 47
}
| lampepfl/dotty | sbt-test/source-dependencies/inline-rec/changes/A1.scala | Scala | apache-2.0 | 90 |
package dregex
import dregex.impl.UnicodeChar
import dregex.impl.RegexTree.CharRange
import dregex.impl.RangeOps
import org.scalatest.funsuite.AnyFunSuite
import scala.collection.immutable.Seq
class RangeOpsTest extends AnyFunSuite {
implicit def intToUnicodeCharConversion(int: Int) = UnicodeChar(int)
implicit def pairToRange(pair: (Int, Int)): CharRange = {
pair match {
case (from, to) => CharRange(UnicodeChar(from), UnicodeChar(to))
}
}
test("union") {
val ranges = Seq[CharRange]((10, 20), (9, 9), (25, 28), (3, 3), (10, 11), (9, 10), (100, 100), (101, 101))
// [CROSS-BUILD] Comparing codepoints and not UnicodeChars to help Scala < 2.13
val union = RangeOps.union(ranges.sortBy(x => (x.from.codePoint, x.to.codePoint)))
val expected: Seq[CharRange] = Seq((3, 3), (9, 20), (25, 28), (100, 101))
assertResult(expected)(union)
}
}
| marianobarrios/dregex | src/test/scala/dregex/RangeOpsTest.scala | Scala | bsd-2-clause | 888 |
package es.weso.reconciliator
import java.io.FileNotFoundException
import scala.io.Source
import scala.util.parsing.json.JSON
import org.apache.lucene.document.Document
import org.apache.lucene.document.Field
import org.apache.lucene.document.TextField
import org.apache.lucene.index.DirectoryReader
import org.apache.lucene.index.IndexDeletionPolicy
import org.apache.lucene.index.IndexWriter
import org.apache.lucene.index.IndexWriterConfig
import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy
import org.apache.lucene.queryparser.classic.QueryParser
import org.apache.lucene.search.IndexSearcher
import org.apache.lucene.search.Query
import org.apache.lucene.search.ScoreDoc
import org.apache.lucene.search.TopScoreDocCollector
import org.apache.lucene.store.RAMDirectory
import org.apache.lucene.util.Version
import org.slf4j.Logger
import org.slf4j.LoggerFactory
import es.weso.reconciliator.results.CountryResult
import org.apache.lucene.search.ScoreDoc
import org.apache.lucene.search.TopScoreDocCollector
import org.apache.lucene.search.FuzzyQuery
import org.apache.lucene.index.Term
class CountryReconciliator(path: String, relativePath: Boolean) {
import CountryReconciliator._
private val idx: RAMDirectory = new RAMDirectory
private val analyzer: CountryAnalyzer = new CountryAnalyzer
load(getFilePath(path, relativePath))
def getFilePath(path: String, relativePath: Boolean): String = {
if (path == null) {
throw new IllegalArgumentException("Path cannot be null")
}
if (relativePath) {
val resource = getClass.getClassLoader().getResource(path)
if (resource == null)
throw new FileNotFoundException("File especifies does not exist")
resource.getPath()
} else
path
}
private val indexSearcher: IndexSearcher = new IndexSearcher(
DirectoryReader.open(idx))
private def load(path: String) = {
val deletionPolicy: IndexDeletionPolicy = new KeepOnlyLastCommitDeletionPolicy
val indexConfiguration: IndexWriterConfig = new IndexWriterConfig(Version.LUCENE_40, new CountryAnalyzer)
indexConfiguration.setIndexDeletionPolicy(deletionPolicy)
val indexWriter: IndexWriter = new IndexWriter(idx, indexConfiguration)
val textSource = Source.fromFile(path, "UTF-8")
val textContent = textSource.mkString("")
textSource.close
val json = JSON.parseFull(textContent).getOrElse(
throw new IllegalArgumentException("File specified is not a json file"))
val map = json.asInstanceOf[Map[Any, Any]]
val countries = map.get("countries").getOrElse(
throw new IllegalArgumentException("Invalid format to json file"))
.asInstanceOf[List[Map[Any, Any]]]
countries.foreach(country => {
val doc: Document = new Document
val wiName = country.get("webIndexName").getOrElse(
throw new IllegalArgumentException).asInstanceOf[String]
val iso2 = country.get("iso-2").getOrElse(
throw new IllegalArgumentException).asInstanceOf[String]
val iso3 = country.get("iso-3").getOrElse(
throw new IllegalArgumentException).asInstanceOf[String]
val names = country.get("names").getOrElse(
throw new IllegalArgumentException).asInstanceOf[List[String]]
val iso2Field: Field = new TextField(CountryIso2CodeField, iso2,
Field.Store.YES)
val iso3Field: Field = new TextField(CountryIso3CodeField, iso3,
Field.Store.YES)
val wiNameField: Field = new TextField(CountryWinameField, wiName,
Field.Store.YES)
doc.add(iso2Field)
doc.add(iso3Field)
doc.add(wiNameField)
val altNames = new StringBuilder()
names.foreach(name => {
altNames.append(name).append("; ")
})
val altNamesField: Field = new TextField(CountryAltName, altNames.toString, Field.Store.YES)
doc.add(altNamesField)
logger.debug("Indexing country with name " + wiName)
indexWriter.addDocument(doc)
})
indexWriter.close
}
def searchCountry(name: String): Option[String] = {
val document = search(name)
document match {
case Some(doc) => Some(doc.getField(CountryWinameField).stringValue())
case None => None
}
}
def searchCountryResult(name: String): Option[CountryResult] = {
val document = search(name)
document match {
case Some(doc) =>
val wiName = doc.getField(CountryWinameField).stringValue()
val iso2Code = doc.getField(CountryIso2CodeField).stringValue()
val iso3Code = doc.getField(CountryIso3CodeField).stringValue()
Some(CountryResult(wiName, iso2Code, iso3Code))
case None => None
}
}
private[reconciliator] def createQueryFromString(query: String): Query = {
val parser: QueryParser = new QueryParser(Version.LUCENE_40,
CountryAltName, analyzer)
logger.debug("QUERY: " + query)
val tempQuery = new StringBuilder()
query.replace("-", " ").split(" ").foreach(part => {
logger.debug("PART: " + part)
tempQuery.append(part).append(" OR ")
logger.debug("PART QUERY: " + tempQuery)
})
val strQuery = tempQuery.toString.replace(".", "")
.replace("(", "").replace(")", "")
logger.debug("Fuzzy Query: " + strQuery)
parser.parse {
if (strQuery.contains("OR"))
strQuery.substring(0, strQuery.lastIndexOf("OR"))
else
strQuery
}
}
private[reconciliator] def search(name: String): Option[Document] = {
val collector = TopScoreDocCollector.create(MaxResults, true)
val query = createQueryFromString(name)
logger.debug(query.toString())
indexSearcher.search(query, collector)
logger.debug("Searching country from given string " + name)
val scoreDocs: Array[ScoreDoc] = collector.topDocs().scoreDocs
if (scoreDocs.size == 0) {
None
} else {
val doc: Document = indexSearcher.doc(scoreDocs.head.doc)
Some(doc)
}
}
}
object CountryReconciliator {
private val CountryWinameField = "wiName"
private val CountryIso2CodeField = "iso2"
private val CountryIso3CodeField = "iso3"
private val CountryAltName = "altName"
private val MaxResults = 1
private val logger: Logger = LoggerFactory.getLogger(this.getClass())
} | weso/CountryReconciliator | src/main/scala/es/weso/reconciliator/CountryReconciliator.scala | Scala | apache-2.0 | 6,235 |
package json
import container._
import container.HttpResponse
import scala.concurrent.{Await, Future}
import scala.reflect.runtime.universe._
import net.liftweb.json._
import java.text.SimpleDateFormat
import scala.concurrent.duration._
import rest.TsConnection
import scala.util.Failure
import rest.TsConnectionDetail
import scala.Some
import rest.TsDispatch
import scala.util.Success
import rest.TsAddress
import rest.TsDocument
import rest.TsAccountInfo
class LiftHttpResponse[T](res: Future[(Int, String)])(implicit man: Manifest[T]) extends HttpResponse[T](res: Future[(Int, String)]) {
implicit val formats = new DefaultFormats {
override def dateFormatter = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'")
}
override def create: (Boolean, Any) = {
Await.ready(res, Duration.Inf)
res.value match {
case Some(res) => {
res match {
case Success(pair) => {
pair._1.toInt / 100 match {
case 4 => println("Received a 4xx: " + HttpStatusCodes.description(pair._1.toInt)); (false, None)
case 5 => println("Received a 5xx: " + HttpStatusCodes.description(pair._1.toInt)); (false, None)
case 2 => {
if (typeOf[T] =:= typeOf[String]) {
(true, pair._2)
}
else if (typeOf[T] =:= typeOf[TsAccountInfo]) {
//JsonParser.parse(json._2).extract[TsAccountInfo]
(true, buildTsAccountInfo(pair._2))
}
else if (typeOf[T] =:= typeOf[List[TsDocument]]) {
(true, buildDocuments(pair._2))
}
else if (typeOf[T] =:= typeOf[TsConnectionDetail]) {
(true, buildConnectionDetail(pair._2))
}
else if (typeOf[T] =:= typeOf[List[TsConnection]]) {
(true, buildConnections(pair._2))
}
else
(true, None)
}
}
}
case Failure(f) => println(f); (false, None)
}
}
case None => println("Empty response"); (false, None)
}
}
// endpoint: network/connections
def buildConnections(connections: String): List[TsConnection] = {
for {JField("Connection", doc) <- parse(connections)
JObject(o) <- doc
JField("ConnectionId", JString(a)) <- o
JField("ConnectionType", JString(b)) <- o
JField("FromCompanyAccountId", JString(c)) <- o
JField("CompanyName", JString(d)) <- o
JField("Country", JString(e)) <- o
JField("Email", JString(g)) <- o
} yield TsConnection(a, b, c, d, e, List(), g)
}
// endpoint: network/conections/{connectionId}
def buildConnectionDetail(json: String): TsConnectionDetail = {
(parse(json) transform {
case JField("ConnectionType", x) => JField("connectionType", x)
case JField("ConnectionId", x) => JField("connectionId", x)
case JField("CompanyName", x) => JField("companyName", x)
case JField("Country", x) => JField("country", x)
case JField("Identifiers", x) => JField("identifiers", x)
case JField("DispatchChannelID", x) => JField("dispatchChannelId", x)
case JField("Email", x) => JField("email", x)
}).extract[TsConnectionDetail]
}
// endpoint: documents
def buildDocuments(documents: String): List[TsDocument] = {
for {JField("Document", doc) <- parse(documents)
JObject(o) <- doc
JField("DocumentId", JString(documentId)) <- o
JField("ID", JString(id)) <- o
JField("URI", JString(uri)) <- o
JField("LatestDispatch", JObject(d)) <- o
JField("DispatchId", JString(dispatchId)) <- d
JField("ObjectId", JString(objectId)) <- d
JField("Created", created) <- d
JField("SenderUserId", JString(senderUserId)) <- d
JField("DispatchState", JString(dispatchState)) <- d
JField("LastStateChange", lastStateChange) <- d
JField("ReceiverConnectionId", JString(receiverConnectionId)) <- d
JField("DispatchChannel", JString(dispatchChannel)) <- d
}
yield TsDocument(documentId, id, uri, "", new java.util.Date(), "",
TsDispatch(dispatchId,
objectId,
created.extract[java.util.Date],
senderUserId,
dispatchState,
lastStateChange.extract[java.util.Date],
receiverConnectionId,
dispatchChannel
))
}
// endpoint : account/info
def buildTsAccountInfo(accountInfo: String): TsAccountInfo = {
val p = (for {
JObject(json) <- parse(accountInfo)
JField("CompanyName", JString(companyName)) <- json
JField("Country", JString(country)) <- json
JField("CompanyAccountId", JString(companyAccountId)) <- json
JField("PublicProfile", JBool(publicProfile)) <- json
JField("Created", created) <- json
} yield (companyName, country, companyAccountId, publicProfile, created.extract[java.util.Date])).head
val b = for {JField("AddressLines", addr) <- parse(accountInfo)
JField("value", JString(v)) <- addr
} yield v
val addresses = for (i <- 0 until b.length by 5)
yield TsAddress(b(i), b(i + 1), b(i + 2), b(i + 3), b(i + 4))
TsAccountInfo(p._1,
p._2,
p._3,
Map(),
addresses.toList,
List(),
List(),
List(),
p._4,
p._5)
}
}
| anderssonfilip/tradeshift-external-api-scala | json.lift/src/main/scala/json/LiftHttpResponse.scala | Scala | mit | 5,448 |
package com.twitter.finagle.stats
import com.twitter.common.metrics.Metrics
import com.twitter.finagle.httpx._
import com.twitter.finagle.loadbalancer.perHostStats
import com.twitter.io.Buf
import com.twitter.util.Future
class MetricsHostStatsReceiver(val registry: Metrics) extends HostStatsReceiver {
def this() = this(MetricsStatsReceiver.defaultHostRegistry)
private[this] val _self = new MetricsStatsReceiver(registry)
def self = _self
}
class HostMetricsExporter(val registry: Metrics)
extends JsonExporter(registry)
with HttpMuxHandler
{
def this() = this(MetricsStatsReceiver.defaultHostRegistry)
val pattern = "/admin/per_host_metrics.json"
override def apply(request: Request): Future[Response] = {
if (perHostStats()) {
super.apply(request)
} else {
val response = Response()
response.contentType = MediaType.Json
response.content = Buf.Utf8(
s"""{
| "com.twitter.finagle.loadbalancer.perHostStats": {
| "enabled": "false",
| "to enable": "run with -${perHostStats.name} and configure LoadBalancerFactory.HostStats"
| }
|}""".stripMargin)
Future.value(response)
}
}
}
| travisbrown/finagle | finagle-stats/src/main/scala/com/twitter/finagle/stats/MetricsHostStatsReceiver.scala | Scala | apache-2.0 | 1,201 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle
import org.apache.spark.annotation.{Experimental, Since}
import org.apache.spark.network.buffer.ManagedBuffer
import org.apache.spark.network.client.StreamCallbackWithID
import org.apache.spark.serializer.SerializerManager
import org.apache.spark.storage.BlockId
/**
* :: Experimental ::
* An experimental trait to allow Spark to migrate shuffle blocks.
*/
@Experimental
@Since("3.1.0")
trait MigratableResolver {
/**
* Get the shuffle ids that are stored locally. Used for block migrations.
*/
def getStoredShuffles(): Seq[ShuffleBlockInfo]
/**
* Write a provided shuffle block as a stream. Used for block migrations.
*/
def putShuffleBlockAsStream(blockId: BlockId, serializerManager: SerializerManager):
StreamCallbackWithID
/**
* Get the blocks for migration for a particular shuffle and map.
*/
def getMigrationBlocks(shuffleBlockInfo: ShuffleBlockInfo): List[(BlockId, ManagedBuffer)]
}
| witgo/spark | core/src/main/scala/org/apache/spark/shuffle/MigratableResolver.scala | Scala | apache-2.0 | 1,771 |
package com.twitter.monoloco.tricks
import java.util.concurrent.TimeUnit.MINUTES
import com.twitter.monoloco.CommandTrick
class KillTunnel extends CommandTrick {
def duration() = (2L, MINUTES)
def start() = {
"monit stop autossh"
}
def stop() = {
"monit start autossh"
}
}
| capotej/monoloco | src/main/scala/com/twitter/monoloco/tricks/KillTunnel.scala | Scala | apache-2.0 | 297 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ml.dmlc.mxnet.spark
import ml.dmlc.mxnet.spark.io.PointIter
import ml.dmlc.mxnet.{FeedForward, NDArray, Shape}
import org.apache.spark.SparkContext
import org.apache.spark.mllib.linalg.Vector
/**
* Wrapper for [[ml.dmlc.mxnet.Model]] which used in Spark application
* @author Yizhi Liu
*/
class MXNetModel private[mxnet](
@transient private var model: FeedForward,
private val dimension: Shape,
private val batchSize: Int,
private val dataName: String = "data",
private val labelName: String = "label") extends Serializable {
require(model != null, "try to serialize an empty FeedForward model")
require(dimension != null, "unknown dimension")
require(batchSize > 0, s"invalid batchSize: $batchSize")
val serializedModel = model.serialize()
/**
* Get inner model [[FeedForward]]
* @return the underlying model used to train & predict
*/
def innerModel: FeedForward = {
if (model == null) {
model = FeedForward.deserialize(serializedModel)
}
model
}
/**
* Predict a bunch of Vectors
* @param dataset points
* @return predicted results.
*/
def predict(dataset: Iterator[Vector]): Array[MXNDArray] = {
val dt = new PointIter(dataset, dimension, batchSize, dataName, labelName)
val results = innerModel.predict(dt)
results.map(arr => MXNDArray(arr))
}
def predict(data: Vector): Array[MXNDArray] = {
predict(Iterator(data))
}
/**
* Save [[MXNetModel]] as object file
* @param sc SparkContext
* @param path output path
*/
def save(sc: SparkContext, path: String): Unit = {
sc.parallelize(Seq(this), 1).saveAsObjectFile(path)
}
}
object MXNetModel {
/**
* Load [[MXNetModel]] from path
* @param sc SparkContext
* @param path input path
* @return Loaded [[MXNetModel]]
*/
def load(sc: SparkContext, path: String): MXNetModel = {
sc.objectFile[MXNetModel](path, 1).first()
}
}
| Mega-DatA-Lab/mxnet | scala-package/spark/src/main/scala/ml/dmlc/mxnet/spark/MXNetModel.scala | Scala | apache-2.0 | 2,735 |
package com.edropple.scratchpad.basketball.heatmapper
import com.edropple.scratchpad.basketball.domain.Universe
import java.io.File
import com.edropple.scratchpad.basketball.heatmapper.core.Heatmapper
import javax.imageio.ImageIO
import java.awt.image.RenderedImage
object EntryPoint {
// val players = Seq("Jeff Adrien", "Bismack Biyombo", "DeSagana Diop", "Ben Gordon", "Brendan Haywood",
// "Gerald Henderson", "Michael Kidd-Gilchrist", "Josh McRoberts", "Byron Mullens",
// "Jannero Pargo", "Ramon Sessions", "Tyrus Thomas", "Kemba Walker", "Reggie Williams");
val players = Seq("Dwight Howard", "Kobe Bryant", "Steve Nash", "Paul Pierce", "Kevin Garnett");
def main(args: Array[String]): Unit = {
val directory = new File("/tmp/12-13");
val universe = new Universe(directory.listFiles().toSeq);
val heatmapper = new Heatmapper(universe, false);
val heatmapperEFG = new Heatmapper(universe, true);
players.foreach(p => {
val result = heatmapper.buildHeatmap(p);
val resultEFG = heatmapperEFG.buildHeatmap(p);
result.image match {
case None => println("No image created. Probably an error.");
case t: Some[RenderedImage] => {
val file = new File("/tmp/SCALEDBG5_" + result.player.name.replace(" ", "_") + ".png");
ImageIO.write(t.get, "png", file);
println("Wrote heatmap for '%s' to '%s'.", result.player.name, file.getAbsolutePath);
}
}
resultEFG.image match {
case None => println("No image created. Probably an error.");
case t: Some[RenderedImage] => {
val file = new File("/tmp/SCALEDBG5_" + resultEFG.player.name.replace(" ", "_") + "_EFG.png");
ImageIO.write(t.get, "png", file);
println("Wrote heatmap for '%s' to '%s'.", resultEFG.player.name, file.getAbsolutePath);
}
}
});
}
}
| eropple/basketball | heatmapper-runner/src/main/scala/com/edropple/scratchpad/basketball/heatmapper/EntryPoint.scala | Scala | bsd-3-clause | 2,094 |
import java.lang.reflect.Modifier
class Bar[T]
class Foo[T] {
object A extends Bar[T]
}
class Baz[S] extends Foo[S] {
override object A extends Bar[S] {
def foo(): String = "ok"
}
}
object Test {
def main(a: Array[String]) {
val b = new Baz[Any]
println(b.A.foo())
println(Modifier.isFinal(classOf[Baz[Any]].getModifiers()))
println(Modifier.isFinal(Test.getClass.getModifiers()))
}
}
| felixmulder/scala | test/files/run/t5676.scala | Scala | bsd-3-clause | 421 |
package dotty.tools
package dotc
package ast
import core._
import Types._, Contexts._
import Symbols._, Annotations._, Trees._, Symbols._
import Decorators._
import dotty.tools.dotc.transform.SymUtils._
import core.tasty.TreePickler.Hole
/** A map that applies three functions and a substitution together to a tree and
* makes sure they are coordinated so that the result is well-typed. The functions are
* @param typeMap A function from Type to Type that gets applied to the
* type of every tree node and to all locally defined symbols,
* followed by the substitution [substFrom := substTo].
* @param treeMap A transformer that translates all encountered subtrees in
* prefix traversal orders
* @param oldOwners Previous owners. If a top-level local symbol in the mapped tree
* has one of these as an owner, the owner is replaced by the corresponding
* symbol in `newOwners`.
* @param newOwners New owners, replacing previous owners.
* @param substFrom The symbols that need to be substituted.
* @param substTo The substitution targets.
*
* The reason the substitution is broken out from the rest of the type map is
* that all symbols have to be substituted at the same time. If we do not do this,
* we risk data races on named types. Example: Say we have `outer#1.inner#2` and we
* have two substitutions S1 = [outer#1 := outer#3], S2 = [inner#2 := inner#4] where
* hashtags precede symbol ids. If we do S1 first, we get outer#2.inner#3. If we then
* do S2 we get outer#2.inner#4. But that means that the named type outer#2.inner
* gets two different denotations in the same period. Hence, if -Yno-double-bindings is
* set, we would get a data race assertion error.
*/
class TreeTypeMap(
val typeMap: Type => Type = IdentityTypeMap,
val treeMap: tpd.Tree => tpd.Tree = identity _,
val oldOwners: List[Symbol] = Nil,
val newOwners: List[Symbol] = Nil,
val substFrom: List[Symbol] = Nil,
val substTo: List[Symbol] = Nil)(implicit ctx: Context) extends tpd.TreeMap {
import tpd._
/** If `sym` is one of `oldOwners`, replace by corresponding symbol in `newOwners` */
def mapOwner(sym: Symbol): Symbol = sym.subst(oldOwners, newOwners)
/** Replace occurrences of `This(oldOwner)` in some prefix of a type
* by the corresponding `This(newOwner)`.
*/
private val mapOwnerThis = new TypeMap {
private def mapPrefix(from: List[Symbol], to: List[Symbol], tp: Type): Type = from match {
case Nil => tp
case (cls: ClassSymbol) :: from1 => mapPrefix(from1, to.tail, tp.substThis(cls, to.head.thisType))
case _ :: from1 => mapPrefix(from1, to.tail, tp)
}
def apply(tp: Type): Type = tp match {
case tp: NamedType => tp.derivedSelect(mapPrefix(oldOwners, newOwners, tp.prefix))
case _ => mapOver(tp)
}
}
def mapType(tp: Type): Type =
mapOwnerThis(typeMap(tp).substSym(substFrom, substTo))
private def updateDecls(prevStats: List[Tree], newStats: List[Tree]): Unit =
if (prevStats.isEmpty) assert(newStats.isEmpty)
else {
prevStats.head match {
case pdef: MemberDef =>
val prevSym = pdef.symbol
val newSym = newStats.head.symbol
val newCls = newSym.owner.asClass
if (prevSym != newSym) newCls.replace(prevSym, newSym)
case _ =>
}
updateDecls(prevStats.tail, newStats.tail)
}
override def transform(tree: tpd.Tree)(implicit ctx: Context): tpd.Tree = treeMap(tree) match {
case impl @ Template(constr, parents, self, _) =>
val tmap = withMappedSyms(localSyms(impl :: self :: Nil))
cpy.Template(impl)(
constr = tmap.transformSub(constr),
parents = parents.mapconserve(transform),
self = tmap.transformSub(self),
body = impl.body mapconserve
(tmap.transform(_)(ctx.withOwner(mapOwner(impl.symbol.owner))))
).withType(tmap.mapType(impl.tpe))
case tree1 =>
tree1.withType(mapType(tree1.tpe)) match {
case id: Ident if tpd.needsSelect(id.tpe) =>
ref(id.tpe.asInstanceOf[TermRef]).withSpan(id.span)
case ddef @ DefDef(name, tparams, vparamss, tpt, _) =>
val (tmap1, tparams1) = transformDefs(ddef.tparams)
val (tmap2, vparamss1) = tmap1.transformVParamss(vparamss)
val res = cpy.DefDef(ddef)(name, tparams1, vparamss1, tmap2.transform(tpt), tmap2.transform(ddef.rhs))
res.symbol.setParamssFromDefs(tparams1, vparamss1)
res.symbol.transformAnnotations {
case ann: BodyAnnotation => ann.derivedAnnotation(transform(ann.tree))
case ann => ann
}
res
case tdef @ LambdaTypeTree(tparams, body) =>
val (tmap1, tparams1) = transformDefs(tparams)
cpy.LambdaTypeTree(tdef)(tparams1, tmap1.transform(body))
case blk @ Block(stats, expr) =>
val (tmap1, stats1) = transformDefs(stats)
val expr1 = tmap1.transform(expr)
cpy.Block(blk)(stats1, expr1)
case inlined @ Inlined(call, bindings, expanded) =>
val (tmap1, bindings1) = transformDefs(bindings)
val expanded1 = tmap1.transform(expanded)
cpy.Inlined(inlined)(call, bindings1, expanded1)
case cdef @ CaseDef(pat, guard, rhs) =>
val tmap = withMappedSyms(patVars(pat))
val pat1 = tmap.transform(pat)
val guard1 = tmap.transform(guard)
val rhs1 = tmap.transform(rhs)
cpy.CaseDef(cdef)(pat1, guard1, rhs1)
case labeled @ Labeled(bind, expr) =>
val tmap = withMappedSyms(bind.symbol :: Nil)
val bind1 = tmap.transformSub(bind)
val expr1 = tmap.transform(expr)
cpy.Labeled(labeled)(bind1, expr1)
case Hole(isTermHole, n, args) =>
Hole(isTermHole, n, args.mapConserve(transform)).withSpan(tree.span).withType(mapType(tree.tpe))
case tree1 =>
super.transform(tree1)
}
}
override def transformStats(trees: List[tpd.Tree])(implicit ctx: Context): List[Tree] =
transformDefs(trees)._2
def transformDefs[TT <: tpd.Tree](trees: List[TT])(implicit ctx: Context): (TreeTypeMap, List[TT]) = {
val tmap = withMappedSyms(tpd.localSyms(trees))
(tmap, tmap.transformSub(trees))
}
private def transformVParamss(vparamss: List[List[ValDef]]): (TreeTypeMap, List[List[ValDef]]) = vparamss match {
case vparams :: rest =>
val (tmap1, vparams1) = transformDefs(vparams)
val (tmap2, vparamss2) = tmap1.transformVParamss(rest)
(tmap2, vparams1 :: vparamss2)
case nil =>
(this, vparamss)
}
def apply[ThisTree <: tpd.Tree](tree: ThisTree): ThisTree = transform(tree).asInstanceOf[ThisTree]
def apply(annot: Annotation): Annotation = annot.derivedAnnotation(apply(annot.tree))
/** The current tree map composed with a substitution [from -> to] */
def withSubstitution(from: List[Symbol], to: List[Symbol]): TreeTypeMap =
if (from eq to) this
else {
// assert that substitution stays idempotent, assuming its parts are
// TODO: It might be better to cater for the asserted-away conditions, by
// setting up a proper substitution abstraction with a compose operator that
// guarantees idempotence. But this might be too inefficient in some cases.
// We'll cross that bridge when we need to.
assert(!from.exists(substTo contains _))
assert(!to.exists(substFrom contains _))
assert(!from.exists(newOwners contains _))
assert(!to.exists(oldOwners contains _))
new TreeTypeMap(
typeMap,
treeMap,
from ++ oldOwners,
to ++ newOwners,
from ++ substFrom,
to ++ substTo)
}
/** Apply `typeMap` and `ownerMap` to given symbols `syms`
* and return a treemap that contains the substitution
* between original and mapped symbols.
*/
def withMappedSyms(syms: List[Symbol], mapAlways: Boolean = false): TreeTypeMap =
withMappedSyms(syms, ctx.mapSymbols(syms, this, mapAlways))
/** The tree map with the substitution between originals `syms`
* and mapped symbols `mapped`. Also goes into mapped classes
* and substitutes their declarations.
*/
def withMappedSyms(syms: List[Symbol], mapped: List[Symbol]): TreeTypeMap = {
val symsChanged = syms ne mapped
val substMap = withSubstitution(syms, mapped)
val fullMap = mapped.filter(_.isClass).foldLeft(substMap) { (tmap, cls) =>
val origDcls = cls.info.decls.toList
val mappedDcls = ctx.mapSymbols(origDcls, tmap)
val tmap1 = tmap.withMappedSyms(origDcls, mappedDcls)
if (symsChanged)
origDcls.lazyZip(mappedDcls).foreach(cls.asClass.replace)
tmap1
}
if (symsChanged || (fullMap eq substMap)) fullMap
else withMappedSyms(syms, mapAlways = true)
}
}
| som-snytt/dotty | compiler/src/dotty/tools/dotc/ast/TreeTypeMap.scala | Scala | apache-2.0 | 8,927 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.mllib
// $example on$
import org.apache.spark.mllib.tree.DecisionTree
import org.apache.spark.mllib.tree.model.DecisionTreeModel
import org.apache.spark.mllib.util.MLUtils
// $example off$
import org.apache.spark.{SparkConf, SparkContext}
object DecisionTreeRegressionExample {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("DecisionTreeRegressionExample")
val sc = new SparkContext(conf)
// $example on$
// Load and parse the data file.
val data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
// Split the data into training and test sets (30% held out for testing)
val splits = data.randomSplit(Array(0.7, 0.3))
val (trainingData, testData) = (splits(0), splits(1))
// Train a DecisionTree model.
// Empty categoricalFeaturesInfo indicates all features are continuous.
val categoricalFeaturesInfo = Map[Int, Int]()
val impurity = "variance"
val maxDepth = 5
val maxBins = 32
val model = DecisionTree.trainRegressor(trainingData, categoricalFeaturesInfo, impurity,
maxDepth, maxBins)
// Evaluate model on test instances and compute test error
val labelsAndPredictions = testData.map { point =>
val prediction = model.predict(point.features)
(point.label, prediction)
}
val testMSE = labelsAndPredictions.map{ case (v, p) => math.pow(v - p, 2) }.mean()
println("Test Mean Squared Error = " + testMSE)
println("Learned regression tree model:\\n" + model.toDebugString)
// Save and load model
model.save(sc, "target/tmp/myDecisionTreeRegressionModel")
val sameModel = DecisionTreeModel.load(sc, "target/tmp/myDecisionTreeRegressionModel")
// $example off$
}
}
// scalastyle:on println
| chenc10/Spark-PAF | examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRegressionExample.scala | Scala | apache-2.0 | 2,626 |
package enumeratum.values
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
import EnumFormats._
import play.api.libs.json.{JsNumber, JsString}
/** Created by Lloyd on 4/13/16.
*
* Copyright 2016
*/
class EnumFormatsSpec extends AnyFunSpec with Matchers with EnumJsonFormatHelpers {
describe(".reads") {
testNumericReads("IntEnum", LibraryItem)
testNumericReads("LongEnum", ContentType)
testNumericReads("ShortEnum", Drinks)
testReads("StringEnum", OperatingSystem, JsString)
testReads(
"CharEnum",
Alphabet,
{ c: Char =>
JsString(s"$c")
}
)
testReads(
"ByteEnum",
Bites,
{ b: Byte =>
JsNumber(b.toInt)
}
)
}
describe(".writes") {
testNumericWrites("IntEnum", LibraryItem)
testNumericWrites("LongEnum", ContentType)
testNumericWrites("ShortEnum", Drinks)
testWrites("StringEnum", OperatingSystem, JsString)
testWrites(
"CharEnum",
Alphabet,
{ c: Char =>
JsString(s"$c")
}
)
testWrites(
"ByteEnum",
Bites,
{ b: Byte =>
JsNumber(b.toInt)
}
)
}
describe(".formats") {
testNumericFormats("IntEnum", LibraryItem)
testNumericFormats("LongEnum", ContentType)
testNumericFormats("ShortEnum", Drinks)
testFormats("StringEnum", OperatingSystem, JsString)
testFormats(
"ByteEnum",
Bites,
{ b: Byte =>
JsNumber(b.toInt)
}
)
testNumericFormats("PlayJsonValueEnum", JsonDrinks, Some(JsonDrinks.format))
}
}
| lloydmeta/enumeratum | enumeratum-play-json/src/test/scala/enumeratum/values/EnumFormatsSpec.scala | Scala | mit | 1,611 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.kafka010
import java.io._
import java.nio.charset.StandardCharsets.UTF_8
import java.nio.file.{Files, Paths}
import java.util.Locale
import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.JavaConverters._
import scala.io.Source
import scala.util.Random
import org.apache.commons.io.FileUtils
import org.apache.kafka.clients.producer.{ProducerRecord, RecordMetadata}
import org.apache.kafka.common.TopicPartition
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.time.SpanSugar._
import org.apache.spark.sql.{Dataset, ForeachWriter, Row, SparkSession}
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.sql.connector.read.streaming.SparkDataStream
import org.apache.spark.sql.execution.datasources.v2.StreamingDataSourceV2Relation
import org.apache.spark.sql.execution.exchange.ReusedExchangeExec
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution
import org.apache.spark.sql.functions.{count, window}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.kafka010.KafkaSourceProvider._
import org.apache.spark.sql.streaming.{StreamTest, Trigger}
import org.apache.spark.sql.streaming.util.StreamManualClock
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.Utils
abstract class KafkaSourceTest extends StreamTest with SharedSparkSession with KafkaTest {
protected var testUtils: KafkaTestUtils = _
override val streamingTimeout = 30.seconds
protected val brokerProps = Map[String, Object]()
override def beforeAll(): Unit = {
super.beforeAll()
testUtils = new KafkaTestUtils(brokerProps)
testUtils.setup()
}
override def afterAll(): Unit = {
if (testUtils != null) {
testUtils.teardown()
testUtils = null
}
super.afterAll()
}
protected def makeSureGetOffsetCalled = AssertOnQuery { q =>
// Because KafkaSource's initialPartitionOffsets is set lazily, we need to make sure
// its "getOffset" is called before pushing any data. Otherwise, because of the race condition,
// we don't know which data should be fetched when `startingOffsets` is latest.
q match {
case c: ContinuousExecution => c.awaitEpoch(0)
case m: MicroBatchExecution => m.processAllAvailable()
}
true
}
protected def setTopicPartitions(topic: String, newCount: Int, query: StreamExecution) : Unit = {
testUtils.addPartitions(topic, newCount)
}
/**
* Add data to Kafka.
*
* `topicAction` can be used to run actions for each topic before inserting data.
*/
case class AddKafkaData(topics: Set[String], data: Int*)
(implicit ensureDataInMultiplePartition: Boolean = false,
concurrent: Boolean = false,
message: String = "",
topicAction: (String, Option[Int]) => Unit = (_, _) => {}) extends AddData {
override def addData(query: Option[StreamExecution]): (SparkDataStream, Offset) = {
query match {
// Make sure no Spark job is running when deleting a topic
case Some(m: MicroBatchExecution) => m.processAllAvailable()
case _ =>
}
val existingTopics = testUtils.getAllTopicsAndPartitionSize().toMap
val newTopics = topics.diff(existingTopics.keySet)
for (newTopic <- newTopics) {
topicAction(newTopic, None)
}
for (existingTopicPartitions <- existingTopics) {
topicAction(existingTopicPartitions._1, Some(existingTopicPartitions._2))
}
require(
query.nonEmpty,
"Cannot add data when there is no query for finding the active kafka source")
val sources: Seq[SparkDataStream] = {
query.get.logicalPlan.collect {
case StreamingExecutionRelation(source: KafkaSource, _) => source
case r: StreamingDataSourceV2Relation if r.stream.isInstanceOf[KafkaMicroBatchStream] ||
r.stream.isInstanceOf[KafkaContinuousStream] =>
r.stream
}
}.distinct
if (sources.isEmpty) {
throw new Exception(
"Could not find Kafka source in the StreamExecution logical plan to add data to")
} else if (sources.size > 1) {
throw new Exception(
"Could not select the Kafka source in the StreamExecution logical plan as there" +
"are multiple Kafka sources:\\n\\t" + sources.mkString("\\n\\t"))
}
val kafkaSource = sources.head
val topic = topics.toSeq(Random.nextInt(topics.size))
val sentMetadata = testUtils.sendMessages(topic, data.map { _.toString }.toArray)
def metadataToStr(m: (String, RecordMetadata)): String = {
s"Sent ${m._1} to partition ${m._2.partition()}, offset ${m._2.offset()}"
}
// Verify that the test data gets inserted into multiple partitions
if (ensureDataInMultiplePartition) {
require(
sentMetadata.groupBy(_._2.partition).size > 1,
s"Added data does not test multiple partitions: ${sentMetadata.map(metadataToStr)}")
}
val offset = KafkaSourceOffset(testUtils.getLatestOffsets(topics))
logInfo(s"Added data, expected offset $offset")
(kafkaSource, offset)
}
override def toString: String =
s"AddKafkaData(topics = $topics, data = $data, message = $message)"
}
object WithOffsetSync {
/**
* Run `func` to write some Kafka messages and wait until the latest offset of the given
* `TopicPartition` is not less than `expectedOffset`.
*/
def apply(
topicPartition: TopicPartition,
expectedOffset: Long)(func: () => Unit): StreamAction = {
Execute("Run Kafka Producer")(_ => {
func()
// This is a hack for the race condition that the committed message may be not visible to
// consumer for a short time.
testUtils.waitUntilOffsetAppears(topicPartition, expectedOffset)
})
}
}
private val topicId = new AtomicInteger(0)
protected def newTopic(): String = s"topic-${topicId.getAndIncrement()}"
}
abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
import testImplicits._
test("(de)serialization of initial offsets") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 5)
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", topic)
testStream(reader.load)(
makeSureGetOffsetCalled,
StopStream,
StartStream(),
StopStream)
}
test("SPARK-26718 Rate limit set to Long.Max should not overflow integer " +
"during end offset calculation") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 1)
// fill in 5 messages to trigger potential integer overflow
testUtils.sendMessages(topic, (0 until 5).map(_.toString).toArray, Some(0))
val partitionOffsets = Map(
new TopicPartition(topic, 0) -> 5L
)
val startingOffsets = JsonUtils.partitionOffsets(partitionOffsets)
val kafka = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
// use latest to force begin to be 5
.option("startingOffsets", startingOffsets)
// use Long.Max to try to trigger overflow
.option("maxOffsetsPerTrigger", Long.MaxValue)
.option("subscribe", topic)
.option("kafka.metadata.max.age.ms", "1")
.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped: org.apache.spark.sql.Dataset[_] = kafka.map(kv => kv._2.toInt)
testStream(mapped)(
makeSureGetOffsetCalled,
AddKafkaData(Set(topic), 30, 31, 32, 33, 34),
CheckAnswer(30, 31, 32, 33, 34),
StopStream
)
}
test("maxOffsetsPerTrigger") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 3)
testUtils.sendMessages(topic, (100 to 200).map(_.toString).toArray, Some(0))
testUtils.sendMessages(topic, (10 to 20).map(_.toString).toArray, Some(1))
testUtils.sendMessages(topic, Array("1"), Some(2))
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("maxOffsetsPerTrigger", 10)
.option("subscribe", topic)
.option("startingOffsets", "earliest")
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped: org.apache.spark.sql.Dataset[_] = kafka.map(kv => kv._2.toInt)
val clock = new StreamManualClock
val waitUntilBatchProcessed = AssertOnQuery { q =>
eventually(Timeout(streamingTimeout)) {
if (!q.exception.isDefined) {
assert(clock.isStreamWaitingAt(clock.getTimeMillis()))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
true
}
testStream(mapped)(
StartStream(Trigger.ProcessingTime(100), clock),
waitUntilBatchProcessed,
// 1 from smallest, 1 from middle, 8 from biggest
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// smallest now empty, 1 more from middle, 9 more from biggest
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107,
11, 108, 109, 110, 111, 112, 113, 114, 115, 116
),
StopStream,
StartStream(Trigger.ProcessingTime(100), clock),
waitUntilBatchProcessed,
// smallest now empty, 1 more from middle, 9 more from biggest
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107,
11, 108, 109, 110, 111, 112, 113, 114, 115, 116,
12, 117, 118, 119, 120, 121, 122, 123, 124, 125
),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// smallest now empty, 1 more from middle, 9 more from biggest
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107,
11, 108, 109, 110, 111, 112, 113, 114, 115, 116,
12, 117, 118, 119, 120, 121, 122, 123, 124, 125,
13, 126, 127, 128, 129, 130, 131, 132, 133, 134
)
)
// When Trigger.Once() is used, the read limit should be ignored
val allData = Seq(1) ++ (10 to 20) ++ (100 to 200)
withTempDir { dir =>
testStream(mapped)(
StartStream(Trigger.Once(), checkpointLocation = dir.getCanonicalPath),
AssertOnQuery { q =>
q.processAllAvailable()
true
},
CheckAnswer(allData: _*),
StopStream,
AddKafkaData(Set(topic), 1000 to 1010: _*),
StartStream(Trigger.Once(), checkpointLocation = dir.getCanonicalPath),
AssertOnQuery { q =>
q.processAllAvailable()
true
},
CheckAnswer((allData ++ 1000.to(1010)): _*)
)
}
}
test("input row metrics") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 5)
testUtils.sendMessages(topic, Array("-1"))
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val kafka = spark
.readStream
.format("kafka")
.option("subscribe", topic)
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
testStream(mapped)(
StartStream(trigger = Trigger.ProcessingTime(1)),
makeSureGetOffsetCalled,
AddKafkaData(Set(topic), 1, 2, 3),
CheckAnswer(2, 3, 4),
AssertOnQuery { query =>
val recordsRead = query.recentProgress.map(_.numInputRows).sum
recordsRead == 3
}
)
}
test("subscribing topic by pattern with topic deletions") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-seems"
val topic2 = topicPrefix + "-bad"
testUtils.createTopic(topic, partitions = 5)
testUtils.sendMessages(topic, Array("-1"))
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("kafka.request.timeout.ms", "3000")
.option("kafka.default.api.timeout.ms", "3000")
.option("subscribePattern", s"$topicPrefix-.*")
.option("failOnDataLoss", "false")
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
testStream(mapped)(
makeSureGetOffsetCalled,
AddKafkaData(Set(topic), 1, 2, 3),
CheckAnswer(2, 3, 4),
Assert {
testUtils.deleteTopic(topic)
testUtils.createTopic(topic2, partitions = 5)
true
},
AddKafkaData(Set(topic2), 4, 5, 6),
CheckAnswer(2, 3, 4, 5, 6, 7)
)
}
test("subscribe topic by pattern with topic recreation between batches") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-good"
val topic2 = topicPrefix + "-bad"
testUtils.createTopic(topic, partitions = 1)
testUtils.sendMessages(topic, Array("1", "3"))
testUtils.createTopic(topic2, partitions = 1)
testUtils.sendMessages(topic2, Array("2", "4"))
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("kafka.request.timeout.ms", "3000")
.option("kafka.default.api.timeout.ms", "3000")
.option("startingOffsets", "earliest")
.option("subscribePattern", s"$topicPrefix-.*")
val ds = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
.map(kv => kv._2.toInt)
testStream(ds)(
StartStream(),
AssertOnQuery { q =>
q.processAllAvailable()
true
},
CheckAnswer(1, 2, 3, 4),
// Restart the stream in this test to make the test stable. When recreating a topic when a
// consumer is alive, it may not be able to see the recreated topic even if a fresh consumer
// has seen it.
StopStream,
// Recreate `topic2` and wait until it's available
WithOffsetSync(new TopicPartition(topic2, 0), expectedOffset = 1) { () =>
testUtils.deleteTopic(topic2)
testUtils.createTopic(topic2)
testUtils.sendMessages(topic2, Array("6"))
},
StartStream(),
ExpectFailure[IllegalStateException](e => {
// The offset of `topic2` should be changed from 2 to 1
assert(e.getMessage.contains("was changed from 2 to 1"))
})
)
}
test("ensure that initial offset are written with an extra byte in the beginning (SPARK-19517)") {
withTempDir { metadataPath =>
val topic = "kafka-initial-offset-current"
testUtils.createTopic(topic, partitions = 1)
val initialOffsetFile = Paths.get(s"${metadataPath.getAbsolutePath}/sources/0/0").toFile
val df = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", topic)
.option("startingOffsets", s"earliest")
.load()
// Test the written initial offset file has 0 byte in the beginning, so that
// Spark 2.1.0 can read the offsets (see SPARK-19517)
testStream(df)(
StartStream(checkpointLocation = metadataPath.getAbsolutePath),
makeSureGetOffsetCalled)
val binarySource = Source.fromFile(initialOffsetFile)
try {
assert(binarySource.next().toInt == 0) // first byte is binary 0
} finally {
binarySource.close()
}
}
}
test("deserialization of initial offset written by Spark 2.1.0 (SPARK-19517)") {
withTempDir { metadataPath =>
val topic = "kafka-initial-offset-2-1-0"
testUtils.createTopic(topic, partitions = 3)
testUtils.sendMessages(topic, Array("0", "1", "2"), Some(0))
testUtils.sendMessages(topic, Array("0", "10", "20"), Some(1))
testUtils.sendMessages(topic, Array("0", "100", "200"), Some(2))
// Copy the initial offset file into the right location inside the checkpoint root directory
// such that the Kafka source can read it for initial offsets.
val from = new File(
getClass.getResource("/kafka-source-initial-offset-version-2.1.0.bin").toURI).toPath
val to = Paths.get(s"${metadataPath.getAbsolutePath}/sources/0/0")
Files.createDirectories(to.getParent)
Files.copy(from, to)
val df = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", topic)
.option("startingOffsets", s"earliest")
.load()
.selectExpr("CAST(value AS STRING)")
.as[String]
.map(_.toInt)
// Test that the query starts from the expected initial offset (i.e. read older offsets,
// even though startingOffsets is latest).
testStream(df)(
StartStream(checkpointLocation = metadataPath.getAbsolutePath),
AddKafkaData(Set(topic), 1000),
CheckAnswer(0, 1, 2, 10, 20, 200, 1000))
}
}
test("deserialization of initial offset written by future version") {
withTempDir { metadataPath =>
val topic = "kafka-initial-offset-future-version"
testUtils.createTopic(topic, partitions = 3)
// Copy the initial offset file into the right location inside the checkpoint root directory
// such that the Kafka source can read it for initial offsets.
val from = new File(
getClass.getResource("/kafka-source-initial-offset-future-version.bin").toURI).toPath
val to = Paths.get(s"${metadataPath.getAbsolutePath}/sources/0/0")
Files.createDirectories(to.getParent)
Files.copy(from, to)
val df = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", topic)
.load()
.selectExpr("CAST(value AS STRING)")
.as[String]
.map(_.toInt)
testStream(df)(
StartStream(checkpointLocation = metadataPath.getAbsolutePath),
ExpectFailure[IllegalStateException](e => {
Seq(
s"maximum supported log version is v1, but encountered v99999",
"produced by a newer version of Spark and cannot be read by this version"
).foreach { message =>
assert(e.toString.contains(message))
}
}))
}
}
test("KafkaSource with watermark") {
val now = System.currentTimeMillis()
val topic = newTopic()
testUtils.createTopic(newTopic(), partitions = 1)
testUtils.sendMessages(topic, Array(1).map(_.toString))
val kafka = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("startingOffsets", s"earliest")
.option("subscribe", topic)
.load()
val windowedAggregation = kafka
.withWatermark("timestamp", "10 seconds")
.groupBy(window($"timestamp", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start") as 'window, $"count")
val query = windowedAggregation
.writeStream
.format("memory")
.outputMode("complete")
.queryName("kafkaWatermark")
.start()
query.processAllAvailable()
val rows = spark.table("kafkaWatermark").collect()
assert(rows.length === 1, s"Unexpected results: ${rows.toList}")
val row = rows(0)
// We cannot check the exact window start time as it depends on the time that messages were
// inserted by the producer. So here we just use a low bound to make sure the internal
// conversion works.
assert(
row.getAs[java.sql.Timestamp]("window").getTime >= now - 5 * 1000,
s"Unexpected results: $row")
assert(row.getAs[Int]("count") === 1, s"Unexpected results: $row")
query.stop()
}
test("delete a topic when a Spark job is running") {
KafkaSourceSuite.collectedData.clear()
val topic = newTopic()
testUtils.createTopic(topic, partitions = 1)
testUtils.sendMessages(topic, (1 to 10).map(_.toString).toArray)
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("kafka.request.timeout.ms", "3000")
.option("kafka.default.api.timeout.ms", "3000")
.option("subscribe", topic)
// If a topic is deleted and we try to poll data starting from offset 0,
// the Kafka consumer will just block until timeout and return an empty result.
// So set the timeout to 1 second to make this test fast.
.option("kafkaConsumer.pollTimeoutMs", "1000")
.option("startingOffsets", "earliest")
.option("failOnDataLoss", "false")
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
KafkaSourceSuite.globalTestUtils = testUtils
// The following ForeachWriter will delete the topic before fetching data from Kafka
// in executors.
val query = kafka.map(kv => kv._2.toInt).writeStream.foreach(new ForeachWriter[Int] {
override def open(partitionId: Long, version: Long): Boolean = {
// Re-create topic since Kafka auto topic creation is not supported by Spark
KafkaSourceSuite.globalTestUtils.deleteTopic(topic)
KafkaSourceSuite.globalTestUtils.createTopic(topic)
true
}
override def process(value: Int): Unit = {
KafkaSourceSuite.collectedData.add(value)
}
override def close(errorOrNull: Throwable): Unit = {}
}).start()
query.processAllAvailable()
query.stop()
// `failOnDataLoss` is `false`, we should not fail the query
assert(query.exception.isEmpty)
}
test("SPARK-22956: currentPartitionOffsets should be set when no new data comes in") {
def getSpecificDF(range: Range.Inclusive): org.apache.spark.sql.Dataset[Int] = {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 1)
testUtils.sendMessages(topic, range.map(_.toString).toArray, Some(0))
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("maxOffsetsPerTrigger", 5)
.option("subscribe", topic)
.option("startingOffsets", "earliest")
reader.load()
.selectExpr("CAST(value AS STRING)")
.as[String]
.map(k => k.toInt)
}
val df1 = getSpecificDF(0 to 9)
val df2 = getSpecificDF(100 to 199)
val kafka = df1.union(df2)
val clock = new StreamManualClock
val waitUntilBatchProcessed = AssertOnQuery { q =>
eventually(Timeout(streamingTimeout)) {
if (!q.exception.isDefined) {
assert(clock.isStreamWaitingAt(clock.getTimeMillis()))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
true
}
testStream(kafka)(
StartStream(Trigger.ProcessingTime(100), clock),
waitUntilBatchProcessed,
// 5 from smaller topic, 5 from bigger one
CheckLastBatch((0 to 4) ++ (100 to 104): _*),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// 5 from smaller topic, 5 from bigger one
CheckLastBatch((5 to 9) ++ (105 to 109): _*),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// smaller topic empty, 5 from bigger one
CheckLastBatch(110 to 114: _*),
StopStream,
StartStream(Trigger.ProcessingTime(100), clock),
waitUntilBatchProcessed,
// smallest now empty, 5 from bigger one
CheckLastBatch(115 to 119: _*),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// smallest now empty, 5 from bigger one
CheckLastBatch(120 to 124: _*)
)
}
test("allow group.id prefix") {
// Group ID prefix is only supported by consumer based offset reader
if (spark.conf.get(SQLConf.USE_DEPRECATED_KAFKA_OFFSET_FETCHING)) {
testGroupId("groupIdPrefix", (expected, actual) => {
assert(actual.exists(_.startsWith(expected)) && !actual.exists(_ === expected),
"Valid consumer groups don't contain the expected group id - " +
s"Valid consumer groups: $actual / expected group id: $expected")
})
}
}
test("allow group.id override") {
// Group ID override is only supported by consumer based offset reader
if (spark.conf.get(SQLConf.USE_DEPRECATED_KAFKA_OFFSET_FETCHING)) {
testGroupId("kafka.group.id", (expected, actual) => {
assert(actual.exists(_ === expected), "Valid consumer groups don't " +
s"contain the expected group id - Valid consumer groups: $actual / " +
s"expected group id: $expected")
})
}
}
private def testGroupId(groupIdKey: String,
validateGroupId: (String, Iterable[String]) => Unit): Unit = {
// Tests code path KafkaSourceProvider.{sourceSchema(.), createSource(.)}
// as well as KafkaOffsetReader.createConsumer(.)
val topic = newTopic()
testUtils.createTopic(topic, partitions = 3)
testUtils.sendMessages(topic, (1 to 10).map(_.toString).toArray, Some(0))
testUtils.sendMessages(topic, (11 to 20).map(_.toString).toArray, Some(1))
testUtils.sendMessages(topic, (21 to 30).map(_.toString).toArray, Some(2))
val customGroupId = "id-" + Random.nextInt()
val dsKafka = spark
.readStream
.format("kafka")
.option(groupIdKey, customGroupId)
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", topic)
.option("startingOffsets", "earliest")
.load()
.selectExpr("CAST(value AS STRING)")
.as[String]
.map(_.toInt)
testStream(dsKafka)(
makeSureGetOffsetCalled,
CheckAnswer(1 to 30: _*),
Execute { _ =>
val consumerGroups = testUtils.listConsumerGroups()
val validGroups = consumerGroups.valid().get()
val validGroupsId = validGroups.asScala.map(_.groupId())
validateGroupId(customGroupId, validGroupsId)
}
)
}
test("ensure stream-stream self-join generates only one offset in log and correct metrics") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 2)
require(testUtils.getLatestOffsets(Set(topic)).size === 2)
val kafka = spark
.readStream
.format("kafka")
.option("subscribe", topic)
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.load()
val values = kafka
.selectExpr("CAST(CAST(value AS STRING) AS INT) AS value",
"CAST(CAST(value AS STRING) AS INT) % 5 AS key")
val join = values.join(values, "key")
def checkQuery(check: AssertOnQuery): Unit = {
testStream(join)(
makeSureGetOffsetCalled,
AddKafkaData(Set(topic), 1, 2),
CheckAnswer((1, 1, 1), (2, 2, 2)),
AddKafkaData(Set(topic), 6, 3),
CheckAnswer((1, 1, 1), (2, 2, 2), (3, 3, 3), (1, 6, 1), (1, 1, 6), (1, 6, 6)),
check
)
}
withSQLConf(SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
checkQuery(AssertOnQuery { q =>
assert(q.availableOffsets.iterator.size == 1)
// The kafka source is scanned twice because of self-join
assert(q.recentProgress.map(_.numInputRows).sum == 8)
true
})
}
withSQLConf(SQLConf.EXCHANGE_REUSE_ENABLED.key -> "true") {
checkQuery(AssertOnQuery { q =>
assert(q.availableOffsets.iterator.size == 1)
assert(q.lastExecution.executedPlan.collect {
case r: ReusedExchangeExec => r
}.length == 1)
// The kafka source is scanned only once because of exchange reuse.
assert(q.recentProgress.map(_.numInputRows).sum == 4)
true
})
}
}
test("read Kafka transactional messages: read_committed") {
// This test will cover the following cases:
// 1. the whole batch contains no data messages
// 2. the first offset in a batch is not a committed data message
// 3. the last offset in a batch is not a committed data message
// 4. there is a gap in the middle of a batch
val topic = newTopic()
testUtils.createTopic(topic, partitions = 1)
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("kafka.isolation.level", "read_committed")
.option("maxOffsetsPerTrigger", 3)
.option("subscribe", topic)
.option("startingOffsets", "earliest")
// Set a short timeout to make the test fast. When a batch doesn't contain any visible data
// messages, "poll" will wait until timeout.
.option("kafkaConsumer.pollTimeoutMs", 5000)
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped: org.apache.spark.sql.Dataset[_] = kafka.map(kv => kv._2.toInt)
val clock = new StreamManualClock
// Wait until the manual clock is waiting on further instructions to move forward. Then we can
// ensure all batches we are waiting for have been processed.
val waitUntilBatchProcessed = Execute { q =>
eventually(Timeout(streamingTimeout)) {
if (!q.exception.isDefined) {
assert(clock.isStreamWaitingAt(clock.getTimeMillis()))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
}
val topicPartition = new TopicPartition(topic, 0)
// The message values are the same as their offsets to make the test easy to follow
testUtils.withTransactionalProducer { producer =>
testStream(mapped)(
StartStream(Trigger.ProcessingTime(100), clock),
waitUntilBatchProcessed,
CheckAnswer(),
WithOffsetSync(topicPartition, expectedOffset = 5) { () =>
// Send 5 messages. They should be visible only after being committed.
producer.beginTransaction()
(0 to 4).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
}
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
// Should not see any uncommitted messages
CheckNewAnswer(),
WithOffsetSync(topicPartition, expectedOffset = 6) { () =>
producer.commitTransaction()
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(0, 1, 2), // offset 0, 1, 2
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(3, 4), // offset: 3, 4, 5* [* means it's not a committed data message]
WithOffsetSync(topicPartition, expectedOffset = 12) { () =>
// Send 5 messages and abort the transaction. They should not be read.
producer.beginTransaction()
(6 to 10).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
}
producer.abortTransaction()
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(), // offset: 6*, 7*, 8*
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(), // offset: 9*, 10*, 11*
WithOffsetSync(topicPartition, expectedOffset = 18) { () =>
// Send 5 messages again. The consumer should skip the above aborted messages and read
// them.
producer.beginTransaction()
(12 to 16).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
}
producer.commitTransaction()
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(12, 13, 14), // offset: 12, 13, 14
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(15, 16), // offset: 15, 16, 17*
WithOffsetSync(topicPartition, expectedOffset = 25) { () =>
producer.beginTransaction()
producer.send(new ProducerRecord[String, String](topic, "18")).get()
producer.commitTransaction()
producer.beginTransaction()
producer.send(new ProducerRecord[String, String](topic, "20")).get()
producer.commitTransaction()
producer.beginTransaction()
producer.send(new ProducerRecord[String, String](topic, "22")).get()
producer.send(new ProducerRecord[String, String](topic, "23")).get()
producer.commitTransaction()
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(18, 20), // offset: 18, 19*, 20
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(22, 23), // offset: 21*, 22, 23
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer() // offset: 24*
)
}
}
test("read Kafka transactional messages: read_uncommitted") {
// This test will cover the following cases:
// 1. the whole batch contains no data messages
// 2. the first offset in a batch is not a committed data message
// 3. the last offset in a batch is not a committed data message
// 4. there is a gap in the middle of a batch
val topic = newTopic()
testUtils.createTopic(topic, partitions = 1)
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("kafka.isolation.level", "read_uncommitted")
.option("maxOffsetsPerTrigger", 3)
.option("subscribe", topic)
.option("startingOffsets", "earliest")
// Set a short timeout to make the test fast. When a batch doesn't contain any visible data
// messages, "poll" will wait until timeout.
.option("kafkaConsumer.pollTimeoutMs", 5000)
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped: org.apache.spark.sql.Dataset[_] = kafka.map(kv => kv._2.toInt)
val clock = new StreamManualClock
// Wait until the manual clock is waiting on further instructions to move forward. Then we can
// ensure all batches we are waiting for have been processed.
val waitUntilBatchProcessed = Execute { q =>
eventually(Timeout(streamingTimeout)) {
if (!q.exception.isDefined) {
assert(clock.isStreamWaitingAt(clock.getTimeMillis()))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
}
val topicPartition = new TopicPartition(topic, 0)
// The message values are the same as their offsets to make the test easy to follow
testUtils.withTransactionalProducer { producer =>
testStream(mapped)(
StartStream(Trigger.ProcessingTime(100), clock),
waitUntilBatchProcessed,
CheckNewAnswer(),
WithOffsetSync(topicPartition, expectedOffset = 5) { () =>
// Send 5 messages. They should be visible only after being committed.
producer.beginTransaction()
(0 to 4).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
}
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(0, 1, 2), // offset 0, 1, 2
WithOffsetSync(topicPartition, expectedOffset = 6) { () =>
producer.commitTransaction()
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(3, 4), // offset: 3, 4, 5* [* means it's not a committed data message]
WithOffsetSync(topicPartition, expectedOffset = 12) { () =>
// Send 5 messages and abort the transaction. They should not be read.
producer.beginTransaction()
(6 to 10).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
}
producer.abortTransaction()
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(6, 7, 8), // offset: 6, 7, 8
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(9, 10), // offset: 9, 10, 11*
WithOffsetSync(topicPartition, expectedOffset = 18) { () =>
// Send 5 messages again. The consumer should skip the above aborted messages and read
// them.
producer.beginTransaction()
(12 to 16).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
}
producer.commitTransaction()
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(12, 13, 14), // offset: 12, 13, 14
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(15, 16), // offset: 15, 16, 17*
WithOffsetSync(topicPartition, expectedOffset = 25) { () =>
producer.beginTransaction()
producer.send(new ProducerRecord[String, String](topic, "18")).get()
producer.commitTransaction()
producer.beginTransaction()
producer.send(new ProducerRecord[String, String](topic, "20")).get()
producer.commitTransaction()
producer.beginTransaction()
producer.send(new ProducerRecord[String, String](topic, "22")).get()
producer.send(new ProducerRecord[String, String](topic, "23")).get()
producer.commitTransaction()
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(18, 20), // offset: 18, 19*, 20
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(22, 23), // offset: 21*, 22, 23
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer() // offset: 24*
)
}
}
test("SPARK-25495: FetchedData.reset should reset all fields") {
val topic = newTopic()
val topicPartition = new TopicPartition(topic, 0)
testUtils.createTopic(topic, partitions = 1)
val ds = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("kafka.isolation.level", "read_committed")
.option("subscribe", topic)
.option("startingOffsets", "earliest")
.load()
.select($"value".as[String])
testUtils.withTransactionalProducer { producer =>
producer.beginTransaction()
(0 to 3).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
}
producer.commitTransaction()
}
testUtils.waitUntilOffsetAppears(topicPartition, 5)
val q = ds.writeStream.foreachBatch { (ds: Dataset[String], epochId: Long) =>
if (epochId == 0) {
// Send more message before the tasks of the current batch start reading the current batch
// data, so that the executors will prefetch messages in the next batch and drop them. In
// this case, if we forget to reset `FetchedData._nextOffsetInFetchedData` or
// `FetchedData._offsetAfterPoll` (See SPARK-25495), the next batch will see incorrect
// values and return wrong results hence fail the test.
testUtils.withTransactionalProducer { producer =>
producer.beginTransaction()
(4 to 7).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
}
producer.commitTransaction()
}
testUtils.waitUntilOffsetAppears(topicPartition, 10)
checkDatasetUnorderly(ds, (0 to 3).map(_.toString): _*)
} else {
checkDatasetUnorderly(ds, (4 to 7).map(_.toString): _*)
}
}.start()
try {
q.processAllAvailable()
} finally {
q.stop()
}
}
test("SPARK-27494: read kafka record containing null key/values.") {
testNullableKeyValue(Trigger.ProcessingTime(100))
}
test("SPARK-30656: minPartitions") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 3)
testUtils.sendMessages(topic, (0 to 9).map(_.toString).toArray, Some(0))
testUtils.sendMessages(topic, (10 to 19).map(_.toString).toArray, Some(1))
testUtils.sendMessages(topic, Array("20"), Some(2))
val ds = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("subscribe", topic)
.option("startingOffsets", "earliest")
.option("minPartitions", "6")
.load()
.select($"value".as[String])
val q = ds.writeStream.foreachBatch { (batch: Dataset[String], _: Long) =>
val partitions = batch.rdd.collectPartitions()
assert(partitions.length >= 6)
assert(partitions.flatten.toSet === (0 to 20).map(_.toString).toSet): Unit
}.start()
try {
q.processAllAvailable()
} finally {
q.stop()
}
}
}
class KafkaMicroBatchV1SourceWithAdminSuite extends KafkaMicroBatchV1SourceSuite {
override def beforeAll(): Unit = {
super.beforeAll()
spark.conf.set(SQLConf.USE_DEPRECATED_KAFKA_OFFSET_FETCHING.key, "false")
}
}
class KafkaMicroBatchV2SourceWithAdminSuite extends KafkaMicroBatchV2SourceSuite {
override def beforeAll(): Unit = {
super.beforeAll()
spark.conf.set(SQLConf.USE_DEPRECATED_KAFKA_OFFSET_FETCHING.key, "false")
}
}
class KafkaMicroBatchV1SourceSuite extends KafkaMicroBatchSourceSuiteBase {
override def beforeAll(): Unit = {
super.beforeAll()
spark.conf.set(
SQLConf.DISABLED_V2_STREAMING_MICROBATCH_READERS.key,
classOf[KafkaSourceProvider].getCanonicalName)
}
test("V1 Source is used when disabled through SQLConf") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 5)
val kafka = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("subscribePattern", s"$topic.*")
.load()
testStream(kafka)(
makeSureGetOffsetCalled,
AssertOnQuery { query =>
query.logicalPlan.collect {
case StreamingExecutionRelation(_: KafkaSource, _) => true
}.nonEmpty
}
)
}
}
class KafkaMicroBatchV2SourceSuite extends KafkaMicroBatchSourceSuiteBase {
test("V2 Source is used by default") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 5)
val kafka = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("subscribePattern", s"$topic.*")
.load()
testStream(kafka)(
makeSureGetOffsetCalled,
AssertOnQuery { query =>
query.logicalPlan.find {
case r: StreamingDataSourceV2Relation => r.stream.isInstanceOf[KafkaMicroBatchStream]
case _ => false
}.isDefined
}
)
}
testWithUninterruptibleThread("minPartitions is supported") {
val topic = newTopic()
val tp = new TopicPartition(topic, 0)
testUtils.createTopic(topic, partitions = 1)
def test(
minPartitions: String,
numPartitionsGenerated: Int,
reusesConsumers: Boolean): Unit = {
SparkSession.setActiveSession(spark)
withTempDir { dir =>
val provider = new KafkaSourceProvider()
val options = Map(
"kafka.bootstrap.servers" -> testUtils.brokerAddress,
"subscribe" -> topic
) ++ Option(minPartitions).map { p => "minPartitions" -> p}
val dsOptions = new CaseInsensitiveStringMap(options.asJava)
val table = provider.getTable(dsOptions)
val stream = table.newScanBuilder(dsOptions).build().toMicroBatchStream(dir.getAbsolutePath)
val inputPartitions = stream.planInputPartitions(
KafkaSourceOffset(Map(tp -> 0L)),
KafkaSourceOffset(Map(tp -> 100L))).map(_.asInstanceOf[KafkaBatchInputPartition])
withClue(s"minPartitions = $minPartitions generated factories $inputPartitions\\n\\t") {
assert(inputPartitions.size == numPartitionsGenerated)
}
}
}
// Test cases when minPartitions is used and not used
test(minPartitions = null, numPartitionsGenerated = 1, reusesConsumers = true)
test(minPartitions = "1", numPartitionsGenerated = 1, reusesConsumers = true)
test(minPartitions = "4", numPartitionsGenerated = 4, reusesConsumers = false)
// Test illegal minPartitions values
intercept[IllegalArgumentException] { test(minPartitions = "a", 1, true) }
intercept[IllegalArgumentException] { test(minPartitions = "1.0", 1, true) }
intercept[IllegalArgumentException] { test(minPartitions = "0", 1, true) }
intercept[IllegalArgumentException] { test(minPartitions = "-1", 1, true) }
}
test("default config of includeHeader doesn't break existing query from Spark 2.4") {
import testImplicits._
// This topic name is migrated from Spark 2.4.3 test run
val topic = "spark-test-topic-2b8619f5-d3c4-4c2d-b5d1-8d9d9458aa62"
// create same topic and messages as test run
testUtils.createTopic(topic, partitions = 5, overwrite = true)
testUtils.sendMessages(topic, Array(-20, -21, -22).map(_.toString), Some(0))
testUtils.sendMessages(topic, Array(-10, -11, -12).map(_.toString), Some(1))
testUtils.sendMessages(topic, Array(0, 1, 2).map(_.toString), Some(2))
testUtils.sendMessages(topic, Array(10, 11, 12).map(_.toString), Some(3))
testUtils.sendMessages(topic, Array(20, 21, 22).map(_.toString), Some(4))
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val headers = Seq(("a", "b".getBytes(UTF_8)), ("c", "d".getBytes(UTF_8)))
(31 to 35).map { num =>
new RecordBuilder(topic, num.toString).partition(num - 31).headers(headers).build()
}.foreach { rec => testUtils.sendMessage(rec) }
val kafka = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("subscribePattern", topic)
.option("startingOffsets", "earliest")
.load()
val query = kafka.dropDuplicates()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
.map(kv => kv._2.toInt + 1)
val resourceUri = this.getClass.getResource(
"/structured-streaming/checkpoint-version-2.4.3-kafka-include-headers-default/").toURI
val checkpointDir = Utils.createTempDir().getCanonicalFile
// Copy the checkpoint to a temp dir to prevent changes to the original.
// Not doing this will lead to the test passing on the first run, but fail subsequent runs.
FileUtils.copyDirectory(new File(resourceUri), checkpointDir)
testStream(query)(
StartStream(checkpointLocation = checkpointDir.getAbsolutePath),
/*
Note: The checkpoint was generated using the following input in Spark version 2.4.3
testUtils.createTopic(topic, partitions = 5, overwrite = true)
testUtils.sendMessages(topic, Array(-20, -21, -22).map(_.toString), Some(0))
testUtils.sendMessages(topic, Array(-10, -11, -12).map(_.toString), Some(1))
testUtils.sendMessages(topic, Array(0, 1, 2).map(_.toString), Some(2))
testUtils.sendMessages(topic, Array(10, 11, 12).map(_.toString), Some(3))
testUtils.sendMessages(topic, Array(20, 21, 22).map(_.toString), Some(4))
*/
makeSureGetOffsetCalled,
CheckNewAnswer(32, 33, 34, 35, 36)
)
}
}
abstract class KafkaSourceSuiteBase extends KafkaSourceTest {
import testImplicits._
test("cannot stop Kafka stream") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 5)
testUtils.sendMessages(topic, (101 to 105).map { _.toString }.toArray)
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("subscribePattern", s"$topic.*")
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
testStream(mapped)(
makeSureGetOffsetCalled,
StopStream
)
}
for (failOnDataLoss <- Seq(true, false)) {
test(s"assign from latest offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromLatestOffsets(
topic,
addPartitions = false,
failOnDataLoss = failOnDataLoss,
"assign" -> assignString(topic, 0 to 4))
}
test(s"assign from earliest offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromEarliestOffsets(
topic,
addPartitions = false,
failOnDataLoss = failOnDataLoss,
"assign" -> assignString(topic, 0 to 4))
}
test(s"assign from specific offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromSpecificOffsets(
topic,
failOnDataLoss = failOnDataLoss,
"assign" -> assignString(topic, 0 to 4),
"failOnDataLoss" -> failOnDataLoss.toString)
}
test(s"assign from specific timestamps (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromSpecificTimestamps(
topic,
failOnDataLoss = failOnDataLoss,
addPartitions = false,
"assign" -> assignString(topic, 0 to 4),
"failOnDataLoss" -> failOnDataLoss.toString)
}
test(s"subscribing topic by name from latest offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromLatestOffsets(
topic,
addPartitions = true,
failOnDataLoss = failOnDataLoss,
"subscribe" -> topic)
}
test(s"subscribing topic by name from earliest offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromEarliestOffsets(
topic,
addPartitions = true,
failOnDataLoss = failOnDataLoss,
"subscribe" -> topic)
}
test(s"subscribing topic by name from specific offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromSpecificOffsets(topic, failOnDataLoss = failOnDataLoss, "subscribe" -> topic)
}
test(s"subscribing topic by name from specific timestamps (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromSpecificTimestamps(topic, failOnDataLoss = failOnDataLoss, addPartitions = true,
"subscribe" -> topic)
}
test(s"subscribing topic by pattern from latest offsets (failOnDataLoss: $failOnDataLoss)") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-suffix"
testFromLatestOffsets(
topic,
addPartitions = true,
failOnDataLoss = failOnDataLoss,
"subscribePattern" -> s"$topicPrefix-.*")
}
test(s"subscribing topic by pattern from earliest offsets (failOnDataLoss: $failOnDataLoss)") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-suffix"
testFromEarliestOffsets(
topic,
addPartitions = true,
failOnDataLoss = failOnDataLoss,
"subscribePattern" -> s"$topicPrefix-.*")
}
test(s"subscribing topic by pattern from specific offsets (failOnDataLoss: $failOnDataLoss)") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-suffix"
testFromSpecificOffsets(
topic,
failOnDataLoss = failOnDataLoss,
"subscribePattern" -> s"$topicPrefix-.*")
}
test(s"subscribing topic by pattern from specific timestamps " +
s"(failOnDataLoss: $failOnDataLoss)") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-suffix"
testFromSpecificTimestamps(
topic,
failOnDataLoss = failOnDataLoss,
addPartitions = true,
"subscribePattern" -> s"$topicPrefix-.*")
}
}
test("bad source options") {
def testBadOptions(options: (String, String)*)(expectedMsgs: String*): Unit = {
val ex = intercept[IllegalArgumentException] {
val reader = spark
.readStream
.format("kafka")
options.foreach { case (k, v) => reader.option(k, v) }
reader.load()
}
expectedMsgs.foreach { m =>
assert(ex.getMessage.toLowerCase(Locale.ROOT).contains(m.toLowerCase(Locale.ROOT)))
}
}
// Specifying an ending offset
testBadOptions("endingOffsets" -> "latest")("Ending offset not valid in streaming queries")
testBadOptions("subscribe" -> "t", "endingOffsetsByTimestamp" -> "{\\"t\\": {\\"0\\": 1000}}")(
"Ending timestamp not valid in streaming queries")
// No strategy specified
testBadOptions()("options must be specified", "subscribe", "subscribePattern")
// Multiple strategies specified
testBadOptions("subscribe" -> "t", "subscribePattern" -> "t.*")(
"only one", "options can be specified")
testBadOptions("subscribe" -> "t", "assign" -> """{"a":[0]}""")(
"only one", "options can be specified")
testBadOptions("assign" -> "")("no topicpartitions to assign")
testBadOptions("subscribe" -> "")("no topics to subscribe")
testBadOptions("subscribePattern" -> "")("pattern to subscribe is empty")
}
test("unsupported kafka configs") {
def testUnsupportedConfig(key: String, value: String = "someValue"): Unit = {
val ex = intercept[IllegalArgumentException] {
val reader = spark
.readStream
.format("kafka")
.option("subscribe", "topic")
.option("kafka.bootstrap.servers", "somehost")
.option(s"$key", value)
reader.load()
}
assert(ex.getMessage.toLowerCase(Locale.ROOT).contains("not supported"))
}
testUnsupportedConfig("kafka.auto.offset.reset")
testUnsupportedConfig("kafka.enable.auto.commit")
testUnsupportedConfig("kafka.interceptor.classes")
testUnsupportedConfig("kafka.key.deserializer")
testUnsupportedConfig("kafka.value.deserializer")
testUnsupportedConfig("kafka.auto.offset.reset", "none")
testUnsupportedConfig("kafka.auto.offset.reset", "someValue")
testUnsupportedConfig("kafka.auto.offset.reset", "earliest")
testUnsupportedConfig("kafka.auto.offset.reset", "latest")
}
test("get offsets from case insensitive parameters") {
for ((optionKey, optionValue, answer) <- Seq(
(STARTING_OFFSETS_OPTION_KEY, "earLiEst", EarliestOffsetRangeLimit),
(ENDING_OFFSETS_OPTION_KEY, "laTest", LatestOffsetRangeLimit),
(STARTING_OFFSETS_OPTION_KEY, """{"topic-A":{"0":23}}""",
SpecificOffsetRangeLimit(Map(new TopicPartition("topic-A", 0) -> 23))))) {
val offset = getKafkaOffsetRangeLimit(
CaseInsensitiveMap[String](Map(optionKey -> optionValue)), "dummy", optionKey,
answer)
assert(offset === answer)
}
for ((optionKey, answer) <- Seq(
(STARTING_OFFSETS_OPTION_KEY, EarliestOffsetRangeLimit),
(ENDING_OFFSETS_OPTION_KEY, LatestOffsetRangeLimit))) {
val offset = getKafkaOffsetRangeLimit(
CaseInsensitiveMap[String](Map.empty), "dummy", optionKey, answer)
assert(offset === answer)
}
}
private def assignString(topic: String, partitions: Iterable[Int]): String = {
JsonUtils.partitions(partitions.map(p => new TopicPartition(topic, p)))
}
private def testFromSpecificOffsets(
topic: String,
failOnDataLoss: Boolean,
options: (String, String)*): Unit = {
val partitionOffsets = Map(
new TopicPartition(topic, 0) -> -2L,
new TopicPartition(topic, 1) -> -1L,
new TopicPartition(topic, 2) -> 0L,
new TopicPartition(topic, 3) -> 1L,
new TopicPartition(topic, 4) -> 2L
)
val startingOffsets = JsonUtils.partitionOffsets(partitionOffsets)
testUtils.createTopic(topic, partitions = 5)
// part 0 starts at earliest, these should all be seen
testUtils.sendMessages(topic, Array(-20, -21, -22).map(_.toString), Some(0))
// part 1 starts at latest, these should all be skipped
testUtils.sendMessages(topic, Array(-10, -11, -12).map(_.toString), Some(1))
// part 2 starts at 0, these should all be seen
testUtils.sendMessages(topic, Array(0, 1, 2).map(_.toString), Some(2))
// part 3 starts at 1, first should be skipped
testUtils.sendMessages(topic, Array(10, 11, 12).map(_.toString), Some(3))
// part 4 starts at 2, first and second should be skipped
testUtils.sendMessages(topic, Array(20, 21, 22).map(_.toString), Some(4))
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val reader = spark
.readStream
.format("kafka")
.option("startingOffsets", startingOffsets)
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("failOnDataLoss", failOnDataLoss.toString)
options.foreach { case (k, v) => reader.option(k, v) }
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped: org.apache.spark.sql.Dataset[_] = kafka.map(kv => kv._2.toInt)
testStream(mapped)(
makeSureGetOffsetCalled,
Execute { q =>
// wait to reach the last offset in every partition
q.awaitOffset(0,
KafkaSourceOffset(partitionOffsets.mapValues(_ => 3L).toMap), streamingTimeout.toMillis)
},
CheckAnswer(-20, -21, -22, 0, 1, 2, 11, 12, 22),
StopStream,
StartStream(),
CheckAnswer(-20, -21, -22, 0, 1, 2, 11, 12, 22), // Should get the data back on recovery
AddKafkaData(Set(topic), 30, 31, 32, 33, 34)(ensureDataInMultiplePartition = true),
CheckAnswer(-20, -21, -22, 0, 1, 2, 11, 12, 22, 30, 31, 32, 33, 34),
StopStream
)
}
private def testFromSpecificTimestamps(
topic: String,
failOnDataLoss: Boolean,
addPartitions: Boolean,
options: (String, String)*): Unit = {
def sendMessages(topic: String, msgs: Seq[String], part: Int, ts: Long): Unit = {
val records = msgs.map { msg =>
new RecordBuilder(topic, msg).partition(part).timestamp(ts).build()
}
testUtils.sendMessages(records)
}
testUtils.createTopic(topic, partitions = 5)
val firstTimestamp = System.currentTimeMillis() - 5000
sendMessages(topic, Array(-20).map(_.toString), 0, firstTimestamp)
sendMessages(topic, Array(-10).map(_.toString), 1, firstTimestamp)
sendMessages(topic, Array(0, 1).map(_.toString), 2, firstTimestamp)
sendMessages(topic, Array(10, 11).map(_.toString), 3, firstTimestamp)
sendMessages(topic, Array(20, 21, 22).map(_.toString), 4, firstTimestamp)
val secondTimestamp = firstTimestamp + 1000
sendMessages(topic, Array(-21, -22).map(_.toString), 0, secondTimestamp)
sendMessages(topic, Array(-11, -12).map(_.toString), 1, secondTimestamp)
sendMessages(topic, Array(2).map(_.toString), 2, secondTimestamp)
sendMessages(topic, Array(12).map(_.toString), 3, secondTimestamp)
// no data after second timestamp for partition 4
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
// we intentionally starts from second timestamp,
// except for partition 4 - it starts from first timestamp
val startPartitionTimestamps: Map[TopicPartition, Long] = Map(
(0 to 3).map(new TopicPartition(topic, _) -> secondTimestamp): _*
) ++ Map(new TopicPartition(topic, 4) -> firstTimestamp)
val startingTimestamps = JsonUtils.partitionTimestamps(startPartitionTimestamps)
val reader = spark
.readStream
.format("kafka")
.option("startingOffsetsByTimestamp", startingTimestamps)
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("failOnDataLoss", failOnDataLoss.toString)
options.foreach { case (k, v) => reader.option(k, v) }
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped: org.apache.spark.sql.Dataset[_] = kafka.map(kv => kv._2.toInt)
testStream(mapped)(
makeSureGetOffsetCalled,
Execute { q =>
val partitions = (0 to 4).map(new TopicPartition(topic, _))
// wait to reach the last offset in every partition
q.awaitOffset(
0, KafkaSourceOffset(partitions.map(tp => tp -> 3L).toMap), streamingTimeout.toMillis)
},
CheckAnswer(-21, -22, -11, -12, 2, 12, 20, 21, 22),
StopStream,
StartStream(),
CheckAnswer(-21, -22, -11, -12, 2, 12, 20, 21, 22), // Should get the data back on recovery
StopStream,
AddKafkaData(Set(topic), 30, 31, 32), // Add data when stream is stopped
StartStream(),
CheckAnswer(-21, -22, -11, -12, 2, 12, 20, 21, 22, 30, 31, 32), // Should get the added data
AssertOnQuery("Add partitions") { query: StreamExecution =>
if (addPartitions) setTopicPartitions(topic, 10, query)
true
},
AddKafkaData(Set(topic), 40, 41, 42, 43, 44)(ensureDataInMultiplePartition = true),
CheckAnswer(-21, -22, -11, -12, 2, 12, 20, 21, 22, 30, 31, 32, 40, 41, 42, 43, 44),
StopStream
)
}
test("Kafka column types") {
val now = System.currentTimeMillis()
val topic = newTopic()
testUtils.createTopic(newTopic(), partitions = 1)
testUtils.sendMessage(
new RecordBuilder(topic, "1")
.headers(Seq(("a", "b".getBytes(UTF_8)), ("c", "d".getBytes(UTF_8)))).build()
)
val kafka = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("startingOffsets", s"earliest")
.option("subscribe", topic)
.option("includeHeaders", "true")
.load()
val query = kafka
.writeStream
.format("memory")
.queryName("kafkaColumnTypes")
.trigger(defaultTrigger)
.start()
eventually(timeout(streamingTimeout)) {
assert(spark.table("kafkaColumnTypes").count == 1,
s"Unexpected results: ${spark.table("kafkaColumnTypes").collectAsList()}")
}
val row = spark.table("kafkaColumnTypes").head()
assert(row.getAs[Array[Byte]]("key") === null, s"Unexpected results: $row")
assert(row.getAs[Array[Byte]]("value") === "1".getBytes(UTF_8), s"Unexpected results: $row")
assert(row.getAs[String]("topic") === topic, s"Unexpected results: $row")
assert(row.getAs[Int]("partition") === 0, s"Unexpected results: $row")
assert(row.getAs[Long]("offset") === 0L, s"Unexpected results: $row")
// We cannot check the exact timestamp as it's the time that messages were inserted by the
// producer. So here we just use a low bound to make sure the internal conversion works.
assert(row.getAs[java.sql.Timestamp]("timestamp").getTime >= now, s"Unexpected results: $row")
assert(row.getAs[Int]("timestampType") === 0, s"Unexpected results: $row")
def checkHeader(row: Row, expected: Seq[(String, Array[Byte])]): Unit = {
// array<struct<key:string,value:binary>>
val headers = row.getList[Row](row.fieldIndex("headers")).asScala
assert(headers.length === expected.length)
(0 until expected.length).foreach { idx =>
val key = headers(idx).getAs[String]("key")
val value = headers(idx).getAs[Array[Byte]]("value")
assert(key === expected(idx)._1)
assert(value === expected(idx)._2)
}
}
checkHeader(row, Seq(("a", "b".getBytes(UTF_8)), ("c", "d".getBytes(UTF_8))))
query.stop()
}
private def testFromLatestOffsets(
topic: String,
addPartitions: Boolean,
failOnDataLoss: Boolean,
options: (String, String)*): Unit = {
testUtils.createTopic(topic, partitions = 5)
testUtils.sendMessages(topic, Array("-1"))
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val reader = spark
.readStream
.format("kafka")
.option("startingOffsets", "latest")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("failOnDataLoss", failOnDataLoss.toString)
options.foreach { case (k, v) => reader.option(k, v) }
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
testStream(mapped)(
makeSureGetOffsetCalled,
AddKafkaData(Set(topic), 1, 2, 3),
CheckAnswer(2, 3, 4),
StopStream,
StartStream(),
CheckAnswer(2, 3, 4), // Should get the data back on recovery
StopStream,
AddKafkaData(Set(topic), 4, 5, 6), // Add data when stream is stopped
StartStream(),
CheckAnswer(2, 3, 4, 5, 6, 7), // Should get the added data
AddKafkaData(Set(topic), 7, 8),
CheckAnswer(2, 3, 4, 5, 6, 7, 8, 9),
AssertOnQuery("Add partitions") { query: StreamExecution =>
if (addPartitions) setTopicPartitions(topic, 10, query)
true
},
AddKafkaData(Set(topic), 9, 10, 11, 12, 13, 14, 15, 16),
CheckAnswer(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)
)
}
private def testFromEarliestOffsets(
topic: String,
addPartitions: Boolean,
failOnDataLoss: Boolean,
options: (String, String)*): Unit = {
testUtils.createTopic(topic, partitions = 5)
testUtils.sendMessages(topic, (1 to 3).map { _.toString }.toArray)
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val reader = spark.readStream
reader
.format(classOf[KafkaSourceProvider].getCanonicalName.stripSuffix("$"))
.option("startingOffsets", s"earliest")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("failOnDataLoss", failOnDataLoss.toString)
options.foreach { case (k, v) => reader.option(k, v) }
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
testStream(mapped)(
AddKafkaData(Set(topic), 4, 5, 6), // Add data when stream is stopped
CheckAnswer(2, 3, 4, 5, 6, 7),
StopStream,
StartStream(),
CheckAnswer(2, 3, 4, 5, 6, 7),
StopStream,
AddKafkaData(Set(topic), 7, 8),
StartStream(),
CheckAnswer(2, 3, 4, 5, 6, 7, 8, 9),
AssertOnQuery("Add partitions") { query: StreamExecution =>
if (addPartitions) setTopicPartitions(topic, 10, query)
true
},
AddKafkaData(Set(topic), 9, 10, 11, 12, 13, 14, 15, 16),
CheckAnswer(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)
)
}
protected def testNullableKeyValue(trigger: Trigger): Unit = {
val table = "kafka_null_key_value_source_test"
withTable(table) {
val topic = newTopic()
testUtils.createTopic(topic)
testUtils.withTransactionalProducer { producer =>
val df = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.isolation.level", "read_committed")
.option("startingOffsets", "earliest")
.option("subscribe", topic)
.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val q = df
.writeStream
.format("memory")
.queryName(table)
.trigger(trigger)
.start()
try {
var idx = 0
producer.beginTransaction()
val expected1 = Seq.tabulate(5) { _ =>
producer.send(new ProducerRecord[String, String](topic, null, null)).get()
(null, null)
}.asInstanceOf[Seq[(String, String)]]
val expected2 = Seq.tabulate(5) { _ =>
idx += 1
producer.send(new ProducerRecord[String, String](topic, idx.toString, null)).get()
(idx.toString, null)
}.asInstanceOf[Seq[(String, String)]]
val expected3 = Seq.tabulate(5) { _ =>
idx += 1
producer.send(new ProducerRecord[String, String](topic, null, idx.toString)).get()
(null, idx.toString)
}.asInstanceOf[Seq[(String, String)]]
producer.commitTransaction()
eventually(timeout(streamingTimeout)) {
checkAnswer(spark.table(table), (expected1 ++ expected2 ++ expected3).toDF())
}
} finally {
q.stop()
}
}
}
}
}
object KafkaSourceSuite {
@volatile var globalTestUtils: KafkaTestUtils = _
val collectedData = new ConcurrentLinkedQueue[Any]()
}
class KafkaSourceStressSuite extends KafkaSourceTest {
import testImplicits._
val topicId = new AtomicInteger(1)
@volatile var topics: Seq[String] = (1 to 5).map(_ => newStressTopic)
def newStressTopic: String = s"stress${topicId.getAndIncrement()}"
private def nextInt(start: Int, end: Int): Int = {
start + Random.nextInt(start + end - 1)
}
test("stress test with multiple topics and partitions") {
topics.foreach { topic =>
testUtils.createTopic(topic, partitions = nextInt(1, 6))
testUtils.sendMessages(topic, (101 to 105).map { _.toString }.toArray)
}
// Create Kafka source that reads from latest offset
val kafka =
spark.readStream
.format(classOf[KafkaSourceProvider].getCanonicalName.stripSuffix("$"))
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("subscribePattern", "stress.*")
.option("failOnDataLoss", "false")
.option("kafka.request.timeout.ms", "3000")
.option("kafka.default.api.timeout.ms", "3000")
.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
runStressTest(
mapped,
Seq(makeSureGetOffsetCalled),
(d, running) => {
Random.nextInt(5) match {
case 0 => // Add a new topic
topics = topics ++ Seq(newStressTopic)
AddKafkaData(topics.toSet, d: _*)(message = s"Add topic $newStressTopic",
topicAction = (topic, partition) => {
if (partition.isEmpty) {
testUtils.createTopic(topic, partitions = nextInt(1, 6))
}
})
case 1 if running =>
// Only delete a topic when the query is running. Otherwise, we may lost data and
// cannot check the correctness.
val deletedTopic = topics(Random.nextInt(topics.size))
if (deletedTopic != topics.head) {
topics = topics.filterNot(_ == deletedTopic)
}
AddKafkaData(topics.toSet, d: _*)(message = s"Delete topic $deletedTopic",
topicAction = (topic, partition) => {
// Never remove the first topic to make sure we have at least one topic
if (topic == deletedTopic && deletedTopic != topics.head) {
testUtils.deleteTopic(deletedTopic)
}
})
case 2 => // Add new partitions
AddKafkaData(topics.toSet, d: _*)(message = "Add partition",
topicAction = (topic, partition) => {
testUtils.addPartitions(topic, partition.get + nextInt(1, 6))
})
case _ => // Just add new data
AddKafkaData(topics.toSet, d: _*)
}
},
iterations = 50)
}
}
| witgo/spark | external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala | Scala | apache-2.0 | 73,520 |
package com.arcusys.learn.scorm.tracking.storage.impl.liferay
import com.arcusys.learn.persistence.liferay.model.LFAttemptData
import com.arcusys.learn.persistence.liferay.service.LFAttemptDataLocalServiceUtil
import com.arcusys.valamis.lesson.scorm.model.tracking.AttemptData
import com.arcusys.valamis.lesson.scorm.storage.tracking.DataModelStorage
import scala.collection.JavaConverters._
/**
* Created by mminin on 16.10.14.
*/
class DataModelStorageImpl extends DataModelStorage {
override def renew(): Unit = {
LFAttemptDataLocalServiceUtil.removeAll()
}
override def getKeyedValues(attemptID: Int, activityID: String): Map[String, Option[String]] = {
LFAttemptDataLocalServiceUtil.findByAttemptIDWithActivityID(attemptID, activityID).asScala
.map(extract)
.map(data => (data.dataKey -> data.dataValue)).toMap
}
override def getValuesByKey(attemptID: Int, key: String): Map[String, Option[String]] = {
LFAttemptDataLocalServiceUtil.findByAttemptIDWithDataKey(attemptID, key).asScala
.map(extract)
.map(data => (data.activityID -> data.dataValue)).toMap
}
override def getValue(attemptID: Int, activityID: String, key: String): Option[String] = {
LFAttemptDataLocalServiceUtil.findBySingleKey(attemptID, activityID, key, 0, 1).asScala.headOption
.map(extract)
.flatMap(_.dataValue)
}
override def setValue(attemptID: Int, activityID: String, key: String, value: String) {
getValue(attemptID, activityID, key) match {
case None =>
val newEntity: LFAttemptData = LFAttemptDataLocalServiceUtil.createLFAttemptData()
newEntity.setAttemptID(attemptID)
newEntity.setActivityID(activityID)
newEntity.setDataKey(key)
newEntity.setDataValue(value)
LFAttemptDataLocalServiceUtil.addLFAttemptData(newEntity)
case Some(_) =>
val found = LFAttemptDataLocalServiceUtil.findBySingleKey(attemptID, activityID, key, 0, 1)
if (!found.isEmpty) {
val entity = found.get(0)
entity.setDataValue(value)
LFAttemptDataLocalServiceUtil.updateLFAttemptData(entity)
}
}
}
override def getCollectionValues(attemptID: Int, activityID: String, key: String): Map[String, Option[String]] = {
// add matcher sign "%"
val flatData = LFAttemptDataLocalServiceUtil.findByCollectionValues(attemptID, activityID, key + "%").asScala
.map(extract)
.map(data => data.dataKey -> data.dataValue)
Map(flatData: _*)
}
private def extract(entity: LFAttemptData) = {
import com.arcusys.learn.storage.impl.liferay.LiferayCommon._
new AttemptData(entity.getDataKey, entity.getDataValue.toOption, entity.getAttemptID, entity.getActivityID)
}
}
| ViLPy/Valamis | learn-persistence-liferay-wrapper/src/main/scala/com/arcusys/learn/scorm/tracking/storage/impl/liferay/DataModelStorageImpl.scala | Scala | lgpl-3.0 | 2,743 |
/*
Copyright (c) 2016, Rice University
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Rice University
nor the names of its contributors may be used to endorse or
promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.apache.spark.rdd.cl
import scala.reflect.ClassTag
import java.nio.ByteBuffer
import java.nio.ByteOrder
import java.lang.reflect.Constructor
import java.lang.reflect.Field
import com.amd.aparapi.internal.model.ClassModel
import com.amd.aparapi.internal.model.Entrypoint
import com.amd.aparapi.internal.model.ClassModel.NameMatcher
import com.amd.aparapi.internal.model.ClassModel.FieldDescriptor
import com.amd.aparapi.internal.util.UnsafeWrapper
class PrimitiveArrayOutputBufferWrapper[T](val N : Int, val devicePointerSize : Int,
val heapSize : Int, val sample : T) extends OutputBufferWrapper[T] {
val maxBuffers : Int = 5
val buffers : Array[Long] = new Array[Long](maxBuffers)
var currSlot : Int = 0
var nLoaded : Int = -1
val primitiveClass = sample.asInstanceOf[Array[_]](0).getClass
val primitiveTypeId : Int = if (primitiveClass.equals(classOf[java.lang.Integer])) {
1337
} else if (primitiveClass.equals(classOf[java.lang.Float])) {
1338
} else if (primitiveClass.equals(classOf[java.lang.Double])) {
1339
} else {
throw new RuntimeException("Unsupported type " + primitiveClass.getName)
}
var outArgBuffer : Long = 0L
var iterBuffer : Long = 0L
override def next() : T = {
val values = OpenCLBridge.getArrayValuesFromOutputBuffers(
buffers, outArgBuffer, iterBuffer, currSlot, primitiveTypeId)
.asInstanceOf[T]
currSlot += 1
values
}
override def hasNext() : Boolean = {
currSlot < nLoaded
}
override def countArgumentsUsed() : Int = { 2 }
override def fillFrom(kernel_ctx : Long,
nativeOutputBuffers : NativeOutputBuffers[T]) {
currSlot = 0
nLoaded = OpenCLBridge.getNLoaded(kernel_ctx)
assert(nLoaded <= N)
val natives : PrimitiveArrayNativeOutputBuffers[T] =
nativeOutputBuffers.asInstanceOf[PrimitiveArrayNativeOutputBuffers[T]]
outArgBuffer = natives.pinnedOutBuffer
iterBuffer = natives.pinnedIterBuffer
OpenCLBridge.fillHeapBuffersFromKernelContext(kernel_ctx, buffers,
maxBuffers)
}
override def generateNativeOutputBuffer(N : Int, outArgNum : Int, dev_ctx : Long,
ctx : Long, sampleOutput : T, entryPoint : Entrypoint) :
NativeOutputBuffers[T] = {
new PrimitiveArrayNativeOutputBuffers(N, outArgNum, dev_ctx, ctx,
entryPoint)
}
}
| agrippa/spark-swat | swat/src/main/scala/org/apache/spark/rdd/cl/PrimitiveArrayOutputBufferWrapper.scala | Scala | bsd-3-clause | 3,929 |
package dsmoq.maintenance.services
import java.util.UUID
import org.joda.time.DateTime
import org.scalatest.BeforeAndAfter
import org.scalatest.FreeSpec
import org.scalatest.Matchers._
import scalikejdbc.config.DBsWithEnv
import dsmoq.maintenance.data.user
import dsmoq.maintenance.data.apikey.AddParameter
import dsmoq.maintenance.data.apikey.DisableParameter
import dsmoq.maintenance.data.apikey.SearchResultApiKey
class ApiKeyServiceSpec extends FreeSpec with BeforeAndAfter {
DBsWithEnv("test").setup()
SpecCommonLogic.deleteAllCreateData()
before {
SpecCommonLogic.deleteAllCreateData()
SpecCommonLogic.insertDummyData()
}
after {
SpecCommonLogic.deleteAllCreateData()
}
"order by" - {
"create datetime" in {
SpecCommonLogic.deleteAllCreateData()
for (i <- 1 to 5) {
val dt = new DateTime(2016, 9, 13, 0, 0, i)
SpecCommonLogic.insertUser(SpecCommonLogic.UserDetail(name = s"test${i}", ts = dt))
}
val raw = ApiKeyService.list()
val sorted = raw.sortWith((x1, x2) => x1.createdAt.isBefore(x2.createdAt))
raw should be(sorted)
}
}
"create for" - {
"invalid name" in {
val orgs = ApiKeyService.list()
val thrown = the[ServiceException] thrownBy {
val param = AddParameter.fromMap(Map("name" -> "hoge"))
ApiKeyService.add(param).get
}
thrown.getMessage should be("無効なユーザーが指定されました。")
ApiKeyService.list() should be(orgs)
}
"disabled user" in {
UserService.updateDisabled(user.UpdateParameter(Seq.empty, Seq("023bfa40-e897-4dad-96db-9fd3cf001e79")))
val orgs = ApiKeyService.list()
val thrown = the[ServiceException] thrownBy {
val param = AddParameter.fromMap(Map("name" -> "dummy"))
ApiKeyService.add(param).get
}
thrown.getMessage should be("無効なユーザーが指定されました。")
ApiKeyService.list() should be(orgs)
}
"valid user with" - {
for {
n <- 0 to 2
} {
s"${n} key" in {
ApiKeyService.disable(DisableParameter.fromMap(Map("id" -> "0cebc943-a0b9-4aa5-927d-65fa374bf0ec")))
ApiKeyService.disable(DisableParameter.fromMap(Map("id" -> "0cebc943-a0b9-4aa5-927d-65fa374bf0ed")))
ApiKeyService.list().size should be(0)
val addParam = AddParameter.fromMap(Map("name" -> "dummy1"))
for {
_ <- 1 to n
} {
ApiKeyService.add(addParam).get
}
val orgs = ApiKeyService.list()
orgs.size should be(n)
ApiKeyService.add(addParam).get
ApiKeyService.list().size should be(orgs.size + 1)
}
}
}
}
"disable to" - {
"none key" in {
ApiKeyService.list().size should be(2)
val thrown = the[ServiceException] thrownBy {
val param = DisableParameter.fromMap(Map.empty)
ApiKeyService.disable(param).get
}
thrown.getMessage should be("キーが未選択です。")
ApiKeyService.list().size should be(2)
}
"invalid key id" in {
ApiKeyService.list().size should be(2)
val param = DisableParameter.fromMap(Map("id" -> UUID.randomUUID.toString))
ApiKeyService.disable(param).get
ApiKeyService.list().size should be(2)
}
"disabled key" in {
ApiKeyService.list().size should be(2)
val id = ApiKeyService.add(AddParameter.fromMap(Map("name" -> "dummy1"))).get
ApiKeyService.list().size should be(3)
val param = DisableParameter.fromMap(Map("id" -> id))
ApiKeyService.disable(param).get
ApiKeyService.list().size should be(2)
ApiKeyService.disable(param).get
ApiKeyService.list().size should be(2)
}
"disabled user's" in {
ApiKeyService.list().size should be(2)
val id = ApiKeyService.add(AddParameter.fromMap(Map("name" -> "dummy1"))).get
ApiKeyService.list().size should be(3)
UserService.updateDisabled(user.UpdateParameter(Seq.empty, Seq("023bfa40-e897-4dad-96db-9fd3cf001e79")))
ApiKeyService.list().size should be(1)
val param = DisableParameter.fromMap(Map("id" -> id))
ApiKeyService.disable(param).get
ApiKeyService.list().size should be(1)
UserService.updateDisabled(user.UpdateParameter(Seq("023bfa40-e897-4dad-96db-9fd3cf001e79"), Seq.empty))
ApiKeyService.list().size should be(2)
}
"disabled user's disabled key" in {
ApiKeyService.list().size should be(2)
val id = ApiKeyService.add(AddParameter.fromMap(Map("name" -> "dummy1"))).get
ApiKeyService.list().size should be(3)
val param = DisableParameter.fromMap(Map("id" -> id))
ApiKeyService.disable(param).get
ApiKeyService.list().size should be(2)
UserService.updateDisabled(user.UpdateParameter(Seq.empty, Seq("023bfa40-e897-4dad-96db-9fd3cf001e79")))
ApiKeyService.list().size should be(1)
ApiKeyService.disable(param).get
ApiKeyService.list().size should be(1)
UserService.updateDisabled(user.UpdateParameter(Seq("023bfa40-e897-4dad-96db-9fd3cf001e79"), Seq.empty))
ApiKeyService.list().size should be(2)
}
"valid key" in {
ApiKeyService.list().size should be(2)
val param = DisableParameter.fromMap(Map("id" -> "0cebc943-a0b9-4aa5-927d-65fa374bf0ec"))
ApiKeyService.disable(param)
ApiKeyService.list().size should be(1)
}
}
}
| nkawa/dsmoq | server/maintenance/src/test/scala/dsmoq/maintenance/services/ApiKeyServiceSpec.scala | Scala | apache-2.0 | 5,441 |
package sample.persistence
//#persistent-actor-example
import akka.actor._
import akka.persistence._
case class Cmd(data: String)
case class Evt(data: String)
case class ExampleState(events: List[String] = Nil) {
def updated(evt: Evt): ExampleState = copy(evt.data :: events)
def size: Int = events.length
override def toString: String = events.reverse.toString
}
class ExamplePersistentActor extends PersistentActor {
override def persistenceId = "sample-id-1"
var state = ExampleState()
def updateState(event: Evt): Unit =
state = state.updated(event)
def numEvents =
state.size
val receiveRecover: Receive = {
case evt: Evt => updateState(evt)
case SnapshotOffer(_, snapshot: ExampleState) => state = snapshot
}
val receiveCommand: Receive = {
case Cmd(data) =>
persist(Evt(s"${data}-${numEvents}"))(updateState)
persist(Evt(s"${data}-${numEvents + 1}")) { event =>
updateState(event)
context.system.eventStream.publish(event)
}
case "snap" => saveSnapshot(state)
case "print" => println(state)
}
}
//#persistent-actor-example
object PersistentActorExample extends App {
val system = ActorSystem("example")
val persistentActor = system.actorOf(Props[ExamplePersistentActor], "persistentActor-4-scala")
persistentActor ! Cmd("foo")
persistentActor ! Cmd("baz")
persistentActor ! Cmd("bar")
persistentActor ! "snap"
persistentActor ! Cmd("buzz")
persistentActor ! "print"
Thread.sleep(10000)
system.terminate()
}
| JetBrains/intellij-scala | scala/scala-impl/testdata/localProjects/akka-samples/src/main/scala/sample/persistence/PersistentActorExample.scala | Scala | apache-2.0 | 1,565 |
package com.nrinaudo.fetch.net
import java.net.{ProtocolException, HttpURLConnection}
import javax.net.ssl.HttpsURLConnection
import com.nrinaudo.fetch._
import com.nrinaudo.fetch.Response
import com.nrinaudo.fetch.Status
import scala.collection.JavaConverters._
import com.nrinaudo.fetch.Request.HttpEngine
import java.io.InputStream
object UrlEngine {
/** Default chunk size (in bytes) when chunked transfer encoding is used. */
val DefaultChunkSize = 4096
private lazy val methodField = {
val field = classOf[HttpURLConnection].getDeclaredField("method")
field.setAccessible(true)
field
}
}
/**
* `java.net` connector for fetch.
*/
case class UrlEngine(readTimeout: Int = 0, connectTimeout: Int = 0, followsRedirect: Boolean = false,
chunkSize: Int = UrlEngine.DefaultChunkSize) extends HttpEngine {
/** Configures the specified connection to this client's preferences. */
private def configure(con: HttpURLConnection): Unit = {
if(connectTimeout > 0) con.setConnectTimeout(connectTimeout)
if(readTimeout > 0) con.setReadTimeout(readTimeout)
con.setInstanceFollowRedirects(followsRedirect)
}
/** Work around for some (all?) JREs not supporting all HTTP methods.
* See https://java.net/jira/browse/JERSEY-639
*/
private def setMethod(con: HttpURLConnection, method: String): Unit = {
try {con.setRequestMethod(method)}
catch {
case _: ProtocolException =>
con match {
case https: HttpsURLConnection => https.getClass.getDeclaredFields.find {_.getName == "delegate"}.foreach {d =>
d.setAccessible(true)
UrlEngine.methodField.set(d.get(con), method)
}
case _ => UrlEngine.methodField.set(con, method)
}
}
}
/** Best effort attempt at finding a workable stream. If all else fails, use an empty stream. */
private def responseStream(status: Status, con: HttpURLConnection) = {
val stream = if(status.isError) con.getErrorStream else con.getInputStream
if(stream == null) new InputStream {
override def read(): Int = -1
}
else stream
}
private def process(con: HttpURLConnection, method: Method, body: Option[RequestEntity], headers: Headers) = {
// Generic configuration.
configure(con)
setMethod(con, method.name)
// Entity body configuration.
body.foreach {b =>
con.setDoOutput(true)
// Note: this is currently somewhat broken because of Java 1.6 that does not support fixed-length streaming mode
// as longs. If the entity's content length is larger than an int, we have an issue.
// TODO: check against maxint and used chunked encoding if larger?
b.contentLength.fold(con.setChunkedStreamingMode(chunkSize)) { length => con.setFixedLengthStreamingMode(length.toInt) }
}
// Headers.
headers.values.foreach {case (name, value) => con.setRequestProperty(name, value)}
con.connect()
// Writes the request body if necessary.
body.foreach {_(con.getOutputStream)}
val status = Status(con.getResponseCode)
Response.fromStream(status,
new Headers(con.getHeaderFields.asScala.mapValues(_.asScala.mkString(", ")).toMap),
responseStream(status, con))
}
def apply(url: Url, method: Method, body: Option[RequestEntity], headers: Headers): Response[ResponseEntity] =
url.toURI.toURL.openConnection() match {
case con: HttpURLConnection => process(con, method, body, headers)
case _ => throw new AssertionError("An URL opened a non-URL HTTP connection.")
}
}
| nrinaudo/fetch | core/src/main/scala/com/nrinaudo/fetch/net/UrlEngine.scala | Scala | mit | 3,595 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.controller
import scala.concurrent.Await
import scala.concurrent.duration.DurationInt
import scala.util.{Failure, Success}
import akka.actor.ActorSystem
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.server.Route
import akka.stream.ActorMaterializer
import spray.json._
import spray.json.DefaultJsonProtocol._
import whisk.common.AkkaLogging
import whisk.common.Logging
import whisk.common.LoggingMarkers
import whisk.common.TransactionId
import whisk.core.WhiskConfig
import whisk.core.database.RemoteCacheInvalidation
import whisk.core.database.CacheChangeNotification
import whisk.core.entitlement._
import whisk.core.entity._
import whisk.core.entity.ActivationId.ActivationIdGenerator
import whisk.core.entity.ExecManifest.Runtimes
import whisk.core.loadBalancer.{LoadBalancerService}
import whisk.http.BasicHttpService
import whisk.http.BasicRasService
/**
* The Controller is the service that provides the REST API for OpenWhisk.
*
* It extends the BasicRasService so it includes a ping endpoint for monitoring.
*
* Akka sends messages to akka Actors -- the Controller is an Actor, ready to receive messages.
*
* It is possible to deploy a hot-standby controller. Each controller needs its own instance. This instance is a
* consecutive numbering, starting with 0.
* The state and cache of each controller is not shared to the other controllers.
* If the base controller crashes, the hot-standby controller will be used. After the base controller is up again,
* it will be used again. Because of the empty cache after restart, there are no problems with inconsistency.
* The only problem that could occur is, that the base controller is not reachable, but does not restart. After switching
* back to the base controller, there could be an inconsistency in the cache (e.g. if a user has updated an action). This
* inconsistency will be resolved by its own after removing the cached item, 5 minutes after it has been generated.
*
* Uses the Akka routing DSL: http://doc.akka.io/docs/akka-http/current/scala/http/routing-dsl/overview.html
*
* @param config A set of properties needed to run an instance of the controller service
* @param instance if running in scale-out, a unique identifier for this instance in the group
* @param verbosity logging verbosity
* @param executionContext Scala runtime support for concurrent operations
*/
class Controller(val instance: InstanceId,
runtimes: Runtimes,
implicit val whiskConfig: WhiskConfig,
implicit val actorSystem: ActorSystem,
implicit val materializer: ActorMaterializer,
implicit val logging: Logging)
extends BasicRasService {
override val numberOfInstances = whiskConfig.controllerInstances.toInt
override val instanceOrdinal = instance.toInt
TransactionId.controller.mark(
this,
LoggingMarkers.CONTROLLER_STARTUP(instance.toInt),
s"starting controller instance ${instance.toInt}")
/**
* A Route in Akka is technically a function taking a RequestContext as a parameter.
*
* The "~" Akka DSL operator composes two independent Routes, building a routing tree structure.
* @see http://doc.akka.io/docs/akka-http/current/scala/http/routing-dsl/routes.html#composing-routes
*/
override def routes(implicit transid: TransactionId): Route = {
super.routes ~ {
(pathEndOrSingleSlash & get) {
complete(info)
}
} ~ apiV1.routes ~ swagger.swaggerRoutes ~ internalInvokerHealth
}
// initialize datastores
private implicit val authStore = WhiskAuthStore.datastore(whiskConfig)
private implicit val entityStore = WhiskEntityStore.datastore(whiskConfig)
private implicit val activationStore = WhiskActivationStore.datastore(whiskConfig)
private implicit val cacheChangeNotification = Some(new CacheChangeNotification {
val remoteCacheInvalidaton = new RemoteCacheInvalidation(whiskConfig, "controller", instance)
override def apply(k: CacheKey) = remoteCacheInvalidaton.notifyOtherInstancesAboutInvalidation(k)
})
// initialize backend services
private implicit val loadBalancer = new LoadBalancerService(whiskConfig, instance, entityStore)
private implicit val entitlementProvider = new LocalEntitlementProvider(whiskConfig, loadBalancer)
private implicit val activationIdFactory = new ActivationIdGenerator {}
// register collections
Collection.initialize(entityStore)
/** The REST APIs. */
implicit val controllerInstance = instance
private val apiV1 = new RestAPIVersion(whiskConfig, "api", "v1")
private val swagger = new SwaggerDocs(Uri.Path.Empty, "infoswagger.json")
/**
* Handles GET /invokers URI.
*
* @return JSON of invoker health
*/
private val internalInvokerHealth = {
implicit val executionContext = actorSystem.dispatcher
(path("invokers") & get) {
complete {
loadBalancer.allInvokers.map(_.map {
case (instance, state) => s"invoker${instance.toInt}" -> state.asString
}.toMap.toJson.asJsObject)
}
}
}
// controller top level info
private val info = Controller.info(whiskConfig, runtimes, List(apiV1.basepath()))
}
/**
* Singleton object provides a factory to create and start an instance of the Controller service.
*/
object Controller {
// requiredProperties is a Map whose keys define properties that must be bound to
// a value, and whose values are default values. A null value in the Map means there is
// no default value specified, so it must appear in the properties file
def requiredProperties =
Map(WhiskConfig.controllerInstances -> null) ++
ExecManifest.requiredProperties ++
RestApiCommons.requiredProperties ++
LoadBalancerService.requiredProperties ++
EntitlementProvider.requiredProperties
private def info(config: WhiskConfig, runtimes: Runtimes, apis: List[String]) =
JsObject(
"description" -> "OpenWhisk".toJson,
"support" -> JsObject(
"github" -> "https://github.com/apache/incubator-openwhisk/issues".toJson,
"slack" -> "http://slack.openwhisk.org".toJson),
"api_paths" -> apis.toJson,
"limits" -> JsObject(
"actions_per_minute" -> config.actionInvokePerMinuteLimit.toInt.toJson,
"triggers_per_minute" -> config.triggerFirePerMinuteLimit.toInt.toJson,
"concurrent_actions" -> config.actionInvokeConcurrentLimit.toInt.toJson),
"runtimes" -> runtimes.toJson)
def main(args: Array[String]): Unit = {
implicit val actorSystem = ActorSystem("controller-actor-system")
implicit val logger = new AkkaLogging(akka.event.Logging.getLogger(actorSystem, this))
// extract configuration data from the environment
val config = new WhiskConfig(requiredProperties)
val port = config.servicePort.toInt
// if deploying multiple instances (scale out), must pass the instance number as the
require(args.length >= 1, "controller instance required")
val instance = args(0).toInt
def abort() = {
logger.error(this, "Bad configuration, cannot start.")
actorSystem.terminate()
Await.result(actorSystem.whenTerminated, 30.seconds)
sys.exit(1)
}
if (!config.isValid) {
abort()
}
ExecManifest.initialize(config) match {
case Success(_) =>
val controller = new Controller(
InstanceId(instance),
ExecManifest.runtimesManifest,
config,
actorSystem,
ActorMaterializer.create(actorSystem),
logger)
BasicHttpService.startService(controller.route, port)(actorSystem, controller.materializer)
case Failure(t) =>
logger.error(this, s"Invalid runtimes manifest: $t")
abort()
}
}
}
| tysonnorris/openwhisk | core/controller/src/main/scala/whisk/core/controller/Controller.scala | Scala | apache-2.0 | 8,655 |
package com.mesosphere.cosmos.handler
import cats.Eval
import com.mesosphere.cosmos.Cosmos
import com.mesosphere.cosmos.circe.{DispatchingMediaTypedEncoder, MediaTypedDecoder, MediaTypedEncoder}
import com.mesosphere.cosmos.http.{MediaType, MediaTypes, RequestSession}
import com.twitter.finagle.http.RequestBuilder
import com.twitter.io.Buf
import com.twitter.util.{Await, Future, Try}
import io.circe.Json
import io.circe.generic.semiauto
import io.circe.syntax._
import io.finch.{/, Endpoint, Input, Output, post}
import org.scalatest.FreeSpec
final class VersionedResponsesSpec extends FreeSpec {
import VersionedResponsesSpec._
"The Accept header determines the version of the response to send" - {
"Foo version" in {
val input = buildInput(Foo.encoder.mediaType, "42")
val result = FoobarEndpoint(input)
val jsonBody = extractBody(result)
assertResult(Json.obj("whole" -> 42.asJson))(jsonBody)
}
"Bar version" in {
val input = buildInput(Bar.encoder.mediaType, "3.14159")
val result = FoobarEndpoint(input)
val jsonBody = extractBody(result)
assertResult(Json.obj("decimal" -> 3.14159.asJson))(jsonBody)
}
}
}
object VersionedResponsesSpec {
final case class FoobarResponse(foo: Int, bar: Double)
final case class Foo(whole: Int)
final case class Bar(decimal: Double)
object Foo {
val encoder: MediaTypedEncoder[FoobarResponse] = MediaTypedEncoder(
encoder = semiauto.deriveFor[Foo].encoder.contramap(foobar => Foo(foobar.foo)),
mediaType = versionedJson(1)
)
}
object Bar {
val encoder: MediaTypedEncoder[FoobarResponse] = MediaTypedEncoder(
encoder = semiauto.deriveFor[Bar].encoder.contramap(foobar => Bar(foobar.bar)),
mediaType = versionedJson(2)
)
}
def versionedJson(version: Int): MediaType = {
MediaTypes.applicationJson.copy(parameters = Map("version" -> s"v$version"))
}
val endpointPath: Seq[String] = Seq("package", "foobar")
def buildInput(acceptHeader: MediaType, body: String): Input = {
val request = RequestBuilder()
.url(s"http://some.host/${endpointPath.mkString("/")}")
.setHeader("Accept", acceptHeader.show)
.setHeader("Content-Type", MediaTypes.applicationJson.show)
.buildPost(Buf.Utf8(body))
Input(request)
}
def extractBody[A](result: Option[(Input, Eval[Future[Output[A]]])]): A = {
val Some((_, eval)) = result
Await.result(eval.value).value
}
implicit val stringRichDecoder: MediaTypedDecoder[String] =
MediaTypedDecoder(MediaTypes.applicationJson)
implicit val foobarResponseEnocoder: DispatchingMediaTypedEncoder[FoobarResponse] =
DispatchingMediaTypedEncoder(Seq(Foo.encoder, Bar.encoder))
object FoobarHandler extends EndpointHandler[String, FoobarResponse] {
override def apply(request: String)(implicit session: RequestSession): Future[FoobarResponse] = {
val asInt = Try(request.toInt).getOrElse(0)
val asDouble = Try(request.toDouble).getOrElse(0.0)
Future(FoobarResponse(asInt, asDouble))
}
}
val FoobarEndpoint: Endpoint[Json] = Cosmos.route(post(/), FoobarHandler)(RequestReaders.standard)
}
| movicha/cosmos | cosmos-server/src/test/scala/com/mesosphere/cosmos/handler/VersionedResponsesSpec.scala | Scala | apache-2.0 | 3,189 |
package challenge
import java.util.regex.Pattern
import scala.util.matching.Regex
import scalaz.Scalaz._
object Easy190 {
def main(args: Array[String]): Unit = {
val page = io.Source.fromURL("https://plus.googleapis" +
".com/u/0/_/widget/render/comments?first_party_property=YOUTUBE&href="
+ args(0))
val pageData = page.mkString("")
val comments = findComments(pageData)
val (happy, sad, overall) = interpretComments(comments)
println(("From a sample size of %d, there were %d 'Happy' keyword(s) " +
"and %d 'Sad' keyword(s).").format(comments.length, happy, sad))
val sentiment =
if (overall > 0) "Happy" else if (overall < 0) "Sad" else "Neutral"
println("Overall, the general feelings towards this video were " +
"'%s'.".format(sentiment))
}
def findComments(pageData: String): List[String] = {
val comment = """<div class="[cC][tT]">([^<].*?)</div>""".r
comment.findAllIn(pageData).matchData.map(_.group(1)).toList
}
def interpretComments(comments: List[String]): (Int, Int,
Int) = comments match {
case List() => (0, 0, 0)
case comment :: rest =>
val happyWords = extractKeywords("data/challenge/positive-words.txt")
val sadWords = extractKeywords("data/challenge/negative-words.txt")
val happy = countKeywords(comment, happyWords)
val sad = countKeywords(comment, sadWords)
val overall = if (happy > sad) 1 else if (happy < sad) -1 else 0
(happy, sad, overall) |+| interpretComments(rest)
}
def extractKeywords(src: String): Regex = {
val lines = io.Source.fromFile(src, "UTF-8").getLines().toList
val wordLines = lines.filterNot(line => line.startsWith(";") | line.isEmpty)
val words = wordLines map (word => Pattern.quote(word))
val keywords = words.mkString("|")
("(?i)" + keywords).r
}
def countKeywords(comment: String, keywords: Regex): Int =
keywords.findAllIn(comment).length
}
| nichwn/dailyprogrammer-scala | src/main/scala/challenge/Easy190.scala | Scala | mit | 1,955 |
package com.github.vonnagy.service.container.http
import java.util.concurrent.TimeUnit
import akka.actor.{ActorSystem, Cancellable}
import com.github.vonnagy.service.container.log.LoggingAdapter
import com.github.vonnagy.service.container.metrics._
import scala.concurrent.duration._
case class Stats(
uptime: FiniteDuration,
totalRequests: Long,
openRequests: Long,
maxOpenRequests: Long,
totalConnections: Long,
openConnections: Long,
maxOpenConnections: Long,
requestTimeouts: Long)
private[http] trait HttpMetrics extends LoggingAdapter {
implicit def system: ActorSystem
var metricsJob: Option[Cancellable] = None
var lastStats = Stats(FiniteDuration(0, TimeUnit.MILLISECONDS), 0, 0, 0, 0, 0, 0, 0)
/// TODO def httpListener: Option[ActorSelection]
val totConn = Gauge("container.http.connections.total") {
lastStats.totalConnections
}
val openConn = Gauge("container.http.connections.open") {
lastStats.openConnections
}
val maxOpenConn = Gauge("container.http.connections.max-open") {
lastStats.maxOpenConnections
}
val totReq = Gauge("container.http.requests.total") {
lastStats.totalRequests
}
val openReq = Gauge("container.http.requests.open") {
lastStats.openRequests
}
val maxOpenReq = Gauge("container.http.requests.max-open") {
lastStats.maxOpenRequests
}
val uptime = Gauge("container.http.uptime") {
lastStats.uptime.toMillis
}
val idle = Gauge("container.http.idle-timeouts") {
lastStats.requestTimeouts
}
protected[http] def scheduleHttpMetrics(interval: FiniteDuration): Unit = {
// Schedule an event to gather the http statistics so that we can add information to our metrics system
log.info("Scheduling http server metrics handler")
implicit val dis = system.dispatcher
metricsJob = Some(system.scheduler.schedule(interval, interval)(getMetrics))
}
protected[http] def cancelHttpMetrics(): Unit = {
metricsJob.exists(_.cancel())
metricsJob = None
}
private def getMetrics(): Unit = {
try {
// TODO - No stats
// if (httpListener.isDefined) httpListener.get ? Http.GetStats onSuccess {
// case x: Stats => lastStats = x
// }
lastStats = Stats(0 seconds, 0, 0, 0, 0, 0, 0, 0)
}
catch {
case e: Exception =>
log.error("An error occurred when trying to fetch and record the http server metrics", e)
}
}
}
| vonnagy/service-container | service-container/src/main/scala/com/github/vonnagy/service/container/http/HttpMetrics.scala | Scala | apache-2.0 | 2,547 |
package org.apache.spark.sql.cassandra
import scala.collection.mutable
import org.apache.spark.sql.SaveMode._
import org.apache.spark.sql.cassandra.DefaultSource._
import org.apache.spark.sql.sources.{BaseRelation, CreatableRelationProvider, RelationProvider, SchemaRelationProvider}
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{DataFrame, SQLContext, SaveMode}
import com.datastax.spark.connector.util.Logging
import com.datastax.spark.connector.cql.{AuthConfFactory, CassandraConnectorConf, DefaultAuthConfFactory}
import com.datastax.spark.connector.rdd.ReadConf
import com.datastax.spark.connector.util.Logging
import com.datastax.spark.connector.writer.WriteConf
/**
* Cassandra data source extends [[RelationProvider]], [[SchemaRelationProvider]] and [[CreatableRelationProvider]].
* It's used internally by Spark SQL to create Relation for a table which specifies the Cassandra data source
* e.g.
*
* CREATE TEMPORARY TABLE tmpTable
* USING org.apache.spark.sql.cassandra
* OPTIONS (
* table "table",
* keyspace "keyspace",
* cluster "test_cluster",
* pushdown "true",
* spark.cassandra.input.fetch.size_in_rows "10",
* spark.cassandra.output.consistency.level "ONE",
* spark.cassandra.connection.timeout_ms "1000"
* )
*/
class DefaultSource extends RelationProvider with SchemaRelationProvider with CreatableRelationProvider with Logging {
/**
* Creates a new relation for a cassandra table.
* The parameters map stores table level data. User can specify vale for following keys
*
* table -- table name, required
* keyspace -- keyspace name, required
* cluster -- cluster name, optional, default name is "default"
* pushdown -- true/false, optional, default is true
* Cassandra connection settings -- optional, e.g. spark.cassandra.connection.timeout_ms
* Cassandra Read Settings -- optional, e.g. spark.cassandra.input.fetch.size_in_rows
* Cassandra Write settings -- optional, e.g. spark.cassandra.output.consistency.level
*
* When push_down is true, some filters are pushed down to CQL.
*
*/
override def createRelation(
sqlContext: SQLContext,
parameters: Map[String, String]): BaseRelation = {
val (tableRef, options) = TableRefAndOptions(parameters)
CassandraSourceRelation(tableRef, sqlContext, options)
}
/**
* Creates a new relation for a cassandra table given table, keyspace, cluster and push_down
* as parameters and explicitly pass schema [[StructType]] as a parameter
*/
override def createRelation(
sqlContext: SQLContext,
parameters: Map[String, String],
schema: StructType): BaseRelation = {
val (tableRef, options) = TableRefAndOptions(parameters)
CassandraSourceRelation(tableRef, sqlContext, options, Option(schema))
}
/**
* Creates a new relation for a cassandra table given table, keyspace, cluster, push_down and schema
* as parameters. It saves the data to the Cassandra table depends on [[SaveMode]]
*/
override def createRelation(
sqlContext: SQLContext,
mode: SaveMode,
parameters: Map[String, String],
data: DataFrame): BaseRelation = {
val (tableRef, options) = TableRefAndOptions(parameters)
val table = CassandraSourceRelation(tableRef, sqlContext, options)
mode match {
case Append => table.insert(data, overwrite = false)
case Overwrite => table.insert(data, overwrite = true)
case ErrorIfExists =>
if (table.buildScan().isEmpty()) {
table.insert(data, overwrite = false)
} else {
throw new UnsupportedOperationException(
s"""'SaveMode is set to ErrorIfExists and Table
|${tableRef.keyspace + "." + tableRef.table} already exists and contains data.
|Perhaps you meant to set the DataFrame write mode to Append?
|Example: df.write.format.options.mode(SaveMode.Append).save()" '""".stripMargin)
}
case Ignore =>
if (table.buildScan().isEmpty()) {
table.insert(data, overwrite = false)
}
}
CassandraSourceRelation(tableRef, sqlContext, options)
}
}
/** Store data source options */
case class CassandraSourceOptions(pushdown: Boolean = true, cassandraConfs: Map[String, String] = Map.empty)
object DefaultSource {
val CassandraDataSourceTableNameProperty = "table"
val CassandraDataSourceKeyspaceNameProperty = "keyspace"
val CassandraDataSourceClusterNameProperty = "cluster"
val CassandraDataSourceUserDefinedSchemaNameProperty = "schema"
val CassandraDataSourcePushdownEnableProperty = "pushdown"
val CassandraDataSourceProviderPackageName = DefaultSource.getClass.getPackage.getName
val CassandraDataSourceProviderClassName = CassandraDataSourceProviderPackageName + ".DefaultSource"
/** Parse parameters into CassandraDataSourceOptions and TableRef object */
def TableRefAndOptions(parameters: Map[String, String]) : (TableRef, CassandraSourceOptions) = {
val tableName = parameters(CassandraDataSourceTableNameProperty)
val keyspaceName = parameters(CassandraDataSourceKeyspaceNameProperty)
val clusterName = parameters.get(CassandraDataSourceClusterNameProperty)
val pushdown : Boolean = parameters.getOrElse(CassandraDataSourcePushdownEnableProperty, "true").toBoolean
val cassandraConfs = parameters
(TableRef(tableName, keyspaceName, clusterName), CassandraSourceOptions(pushdown, cassandraConfs))
}
val confProperties = ReadConf.Properties.map(_.name) ++
WriteConf.Properties.map(_.name) ++
CassandraConnectorConf.Properties.map(_.name) ++
CassandraSourceRelation.Properties.map(_.name) ++
AuthConfFactory.Properties.map(_.name) ++
DefaultAuthConfFactory.properties
/** Check whether the provider is Cassandra datasource or not */
def cassandraSource(provider: String) : Boolean = {
provider == CassandraDataSourceProviderPackageName || provider == CassandraDataSourceProviderClassName
}
}
| shashwat7/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/org/apache/spark/sql/cassandra/DefaultSource.scala | Scala | apache-2.0 | 6,098 |
/*
* Copyright 2014 Michael Krolikowski
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.mkroli.dns4s.section.resource
import com.github.mkroli.dns4s.MessageBuffer
import com.github.mkroli.dns4s.section.Resource
case class TXTResource(txt: Seq[String]) extends Resource {
def apply(buf: MessageBuffer) =
(buf /: txt)(_ putCharacterString _)
}
object TXTResource {
def apply(buf: MessageBuffer, size: Int) = {
def getCharacterStrings(cs: Seq[String], updatedCsSize: Int): Seq[String] = {
if (updatedCsSize < size) {
val str = buf.getCharacterString
val updatedCs = cs :+ str
getCharacterStrings(updatedCs, updatedCsSize + str.getBytes.length + 1)
} else
cs
}
new TXTResource(getCharacterStrings(Nil, 0))
}
}
| mesosphere/dns4s | core/src/main/scala/com/github/mkroli/dns4s/section/resource/TXTResource.scala | Scala | apache-2.0 | 1,309 |
package models
import java.util.UUID
import qxsl.draft.Call
import play.api.data.{Form, Forms, OptionalMapping}
object SectionForm extends Form[SectionData](
Forms.mapping(
"sect" -> Forms.text,
"city" -> Forms.text
)(SectionData.apply)(SectionData.unapply).verifying(p => Rule.absent(p.sect) || p.city.nonEmpty), Map.empty, Nil, None
)
object StationForm extends Form[StationData](
Forms.mapping(
"call" -> Forms.nonEmptyText.verifying(Call.isValid(_)).transform(new Call(_).value(), identity[String]),
"name" -> Forms.nonEmptyText,
"post" -> Forms.nonEmptyText,
"mail" -> Forms.email,
"note" -> Forms.text,
"uuid" -> OptionalMapping(Forms.uuid).transform(_.getOrElse(StationData.uuid), Some[UUID](_))
)(StationData.apply)(StationData.unapply), Map.empty, Nil, None
)
object ContestForm extends Form[ContestData](
Forms.mapping(
"station" -> StationForm.mapping,
"section" -> Forms.seq(SectionForm.mapping)
)(ContestData.apply)(ContestData.unapply), Map.empty, Nil, None
)
| nextzlog/ats4 | app/models/form.scala | Scala | gpl-3.0 | 1,006 |
/******************************************************************
* See the NOTICE file distributed with this work for additional *
* information regarding Copyright ownership. The author/authors *
* license this file to you under the terms of the Apache License *
* Version 2.0 (the "License"); you may not use this file except *
* in compliance with the License. You may obtain a copy of the *
* License at: *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, *
* software distributed under the License is distributed on an *
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, *
* either express or implied. See the License for the specific *
* language governing permissions and limitations under the *
* License. *
******************************************************************/
package scalatime.impl
import java.time.DateTimeException
import java.time.temporal.{Temporal, TemporalUnit}
import scala.language.implicitConversions
/** Enriches [[TemporalUnit]] with additional methods. */
final case class TemporalUnitOps(underlying: TemporalUnit) {
/** Returns a copy of the specified temporal object with the specified amount of this unit added.
*
* @throws DateTimeException - if the amount cannot be added.
*/
def <<+[A <: Temporal](temporal: A, amount: Long): A = underlying.addTo(temporal, amount)
}
trait ToTemporalUnitOps extends Any {
implicit final def toTemporalUnitOpsFromTemporalUnit(f: TemporalUnit): TemporalUnitOps = new TemporalUnitOps(f)
}
| reactivecodes/scala-time | src/main/scala/scalatime/impl/TemporalUnitOps.scala | Scala | apache-2.0 | 1,859 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.examples.refspec.info
import org.scalatest._
import refspec.RefSpec
import collection.mutable
class SetSpec extends RefSpec with GivenWhenThen {
object `A mutable Set` {
def `should allow an element to be added` {
Given("an empty mutable Set")
val set = mutable.Set.empty[String]
When("an element is added")
set += "clarity"
Then("the Set should have size 1")
assert(set.size === 1)
And("the Set should contain the added element")
assert(set.contains("clarity"))
info("That's all folks!")
}
}
}
| dotty-staging/scalatest | examples/src/test/scala/org/scalatest/examples/refspec/info/SetSpec.scala | Scala | apache-2.0 | 1,194 |
package de.leanovate.swaggercheck
import de.leanovate.swaggercheck.schema.gen.GeneratableSchema
import de.leanovate.swaggercheck.schema.gen.formats.{GeneratableFormat, GeneratableIntegerFormats, GeneratableNumberFormats, GeneratableStringFormats}
import de.leanovate.swaggercheck.schema.model.Definition
case class TestSchema(
randomAdditionalFields:Boolean = false,
maxItems: Int = 20
) extends GeneratableSchema {
val integerFormats = GeneratableIntegerFormats.defaultFormats
val numberFormats = GeneratableNumberFormats.defaultFormats
val stringFormats = GeneratableStringFormats.defaultFormats
override def withMaxItems(newMaxItems: Int): TestSchema = copy(maxItems = newMaxItems)
override def findGeneratableStringFormat(format: String): Option[GeneratableFormat[String]] =
stringFormats.get(format)
override def findGeneratableNumberFormat(format: String): Option[GeneratableFormat[BigDecimal]] =
numberFormats.get(format)
override def findGeneratableIntegerFormat(format: String): Option[GeneratableFormat[BigInt]] =
integerFormats.get(format)
override def findByRef(ref: String): Option[Definition] = None
}
| leanovate/swagger-check | json-schema-gen/src/test/scala/de/leanovate/swaggercheck/TestSchema.scala | Scala | mit | 1,218 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import org.scalatest.BeforeAndAfter
import org.apache.spark.sql.catalyst.catalog.{InMemoryCatalog, SessionCatalog}
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.expressions.Literal.{FalseLiteral, TrueLiteral}
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, Project, Union}
import org.apache.spark.sql.types._
class DecimalPrecisionSuite extends PlanTest with BeforeAndAfter {
private val catalog = new SessionCatalog(new InMemoryCatalog, EmptyFunctionRegistry, conf)
private val analyzer = new Analyzer(catalog, conf)
private val relation = LocalRelation(
AttributeReference("i", IntegerType)(),
AttributeReference("d1", DecimalType(2, 1))(),
AttributeReference("d2", DecimalType(5, 2))(),
AttributeReference("u", DecimalType.SYSTEM_DEFAULT)(),
AttributeReference("f", FloatType)(),
AttributeReference("b", DoubleType)()
)
private val i: Expression = UnresolvedAttribute("i")
private val d1: Expression = UnresolvedAttribute("d1")
private val d2: Expression = UnresolvedAttribute("d2")
private val u: Expression = UnresolvedAttribute("u")
private val f: Expression = UnresolvedAttribute("f")
private val b: Expression = UnresolvedAttribute("b")
before {
catalog.createTempView("table", relation, overrideIfExists = true)
}
private def checkType(expression: Expression, expectedType: DataType): Unit = {
val plan = Project(Seq(Alias(expression, "c")()), relation)
assert(analyzer.execute(plan).schema.fields(0).dataType === expectedType)
}
private def checkComparison(expression: Expression, expectedType: DataType): Unit = {
val plan = Project(Alias(expression, "c")() :: Nil, relation)
val comparison = analyzer.execute(plan).collect {
case Project(Alias(e: BinaryComparison, _) :: Nil, _) => e
}.head
assert(comparison.left.dataType === expectedType)
assert(comparison.right.dataType === expectedType)
}
private def checkUnion(left: Expression, right: Expression, expectedType: DataType): Unit = {
val plan =
Union(Project(Seq(Alias(left, "l")()), relation),
Project(Seq(Alias(right, "r")()), relation))
val (l, r) = analyzer.execute(plan).collect {
case Union(Seq(child1, child2)) => (child1.output.head, child2.output.head)
}.head
assert(l.dataType === expectedType)
assert(r.dataType === expectedType)
}
test("basic operations") {
checkType(Add(d1, d2), DecimalType(6, 2))
checkType(Subtract(d1, d2), DecimalType(6, 2))
checkType(Multiply(d1, d2), DecimalType(8, 3))
checkType(Divide(d1, d2), DecimalType(10, 7))
checkType(Divide(d2, d1), DecimalType(10, 6))
checkType(Remainder(d1, d2), DecimalType(3, 2))
checkType(Remainder(d2, d1), DecimalType(3, 2))
checkType(Sum(d1), DecimalType(12, 1))
checkType(Average(d1), DecimalType(6, 5))
checkType(Add(Add(d1, d2), d1), DecimalType(7, 2))
checkType(Add(Add(Add(d1, d2), d1), d2), DecimalType(8, 2))
checkType(Add(Add(d1, d2), Add(d1, d2)), DecimalType(7, 2))
}
test("Comparison operations") {
checkComparison(EqualTo(i, d1), DecimalType(11, 1))
checkComparison(EqualNullSafe(d2, d1), DecimalType(5, 2))
checkComparison(LessThan(i, d1), DecimalType(11, 1))
checkComparison(LessThanOrEqual(d1, d2), DecimalType(5, 2))
checkComparison(GreaterThan(d2, u), DecimalType.SYSTEM_DEFAULT)
checkComparison(GreaterThanOrEqual(d1, f), DoubleType)
checkComparison(GreaterThan(d2, d2), DecimalType(5, 2))
}
test("decimal precision for union") {
checkUnion(d1, i, DecimalType(11, 1))
checkUnion(i, d2, DecimalType(12, 2))
checkUnion(d1, d2, DecimalType(5, 2))
checkUnion(d2, d1, DecimalType(5, 2))
checkUnion(d1, f, DoubleType)
checkUnion(f, d2, DoubleType)
checkUnion(d1, b, DoubleType)
checkUnion(b, d2, DoubleType)
checkUnion(d1, u, DecimalType.SYSTEM_DEFAULT)
checkUnion(u, d2, DecimalType.SYSTEM_DEFAULT)
}
test("bringing in primitive types") {
checkType(Add(d1, i), DecimalType(12, 1))
checkType(Add(d1, f), DoubleType)
checkType(Add(i, d1), DecimalType(12, 1))
checkType(Add(f, d1), DoubleType)
checkType(Add(d1, Cast(i, LongType)), DecimalType(22, 1))
checkType(Add(d1, Cast(i, ShortType)), DecimalType(7, 1))
checkType(Add(d1, Cast(i, ByteType)), DecimalType(5, 1))
checkType(Add(d1, Cast(i, DoubleType)), DoubleType)
}
test("maximum decimals") {
for (expr <- Seq(d1, d2, i, u)) {
checkType(Add(expr, u), DecimalType.SYSTEM_DEFAULT)
checkType(Subtract(expr, u), DecimalType.SYSTEM_DEFAULT)
}
checkType(Multiply(d1, u), DecimalType(38, 19))
checkType(Multiply(d2, u), DecimalType(38, 20))
checkType(Multiply(i, u), DecimalType(38, 18))
checkType(Multiply(u, u), DecimalType(38, 36))
checkType(Divide(u, d1), DecimalType(38, 18))
checkType(Divide(u, d2), DecimalType(38, 19))
checkType(Divide(u, i), DecimalType(38, 23))
checkType(Divide(u, u), DecimalType(38, 18))
checkType(Remainder(d1, u), DecimalType(19, 18))
checkType(Remainder(d2, u), DecimalType(21, 18))
checkType(Remainder(i, u), DecimalType(28, 18))
checkType(Remainder(u, u), DecimalType.SYSTEM_DEFAULT)
for (expr <- Seq(f, b)) {
checkType(Add(expr, u), DoubleType)
checkType(Subtract(expr, u), DoubleType)
checkType(Multiply(expr, u), DoubleType)
checkType(Divide(expr, u), DoubleType)
checkType(Remainder(expr, u), DoubleType)
}
}
test("DecimalType.isWiderThan") {
val d0 = DecimalType(2, 0)
val d1 = DecimalType(2, 1)
val d2 = DecimalType(5, 2)
val d3 = DecimalType(15, 3)
val d4 = DecimalType(25, 4)
assert(d0.isWiderThan(d1) === false)
assert(d1.isWiderThan(d0) === false)
assert(d1.isWiderThan(d2) === false)
assert(d2.isWiderThan(d1) === true)
assert(d2.isWiderThan(d3) === false)
assert(d3.isWiderThan(d2) === true)
assert(d4.isWiderThan(d3) === true)
assert(d1.isWiderThan(ByteType) === false)
assert(d2.isWiderThan(ByteType) === true)
assert(d2.isWiderThan(ShortType) === false)
assert(d3.isWiderThan(ShortType) === true)
assert(d3.isWiderThan(IntegerType) === true)
assert(d3.isWiderThan(LongType) === false)
assert(d4.isWiderThan(LongType) === true)
assert(d4.isWiderThan(FloatType) === false)
assert(d4.isWiderThan(DoubleType) === false)
}
test("strength reduction for integer/decimal comparisons - basic test") {
Seq(ByteType, ShortType, IntegerType, LongType).foreach { dt =>
val int = AttributeReference("a", dt)()
ruleTest(int > Literal(Decimal(4)), int > Literal(4L))
ruleTest(int > Literal(Decimal(4.7)), int > Literal(4L))
ruleTest(int >= Literal(Decimal(4)), int >= Literal(4L))
ruleTest(int >= Literal(Decimal(4.7)), int >= Literal(5L))
ruleTest(int < Literal(Decimal(4)), int < Literal(4L))
ruleTest(int < Literal(Decimal(4.7)), int < Literal(5L))
ruleTest(int <= Literal(Decimal(4)), int <= Literal(4L))
ruleTest(int <= Literal(Decimal(4.7)), int <= Literal(4L))
ruleTest(Literal(Decimal(4)) > int, Literal(4L) > int)
ruleTest(Literal(Decimal(4.7)) > int, Literal(5L) > int)
ruleTest(Literal(Decimal(4)) >= int, Literal(4L) >= int)
ruleTest(Literal(Decimal(4.7)) >= int, Literal(4L) >= int)
ruleTest(Literal(Decimal(4)) < int, Literal(4L) < int)
ruleTest(Literal(Decimal(4.7)) < int, Literal(4L) < int)
ruleTest(Literal(Decimal(4)) <= int, Literal(4L) <= int)
ruleTest(Literal(Decimal(4.7)) <= int, Literal(5L) <= int)
}
}
test("strength reduction for integer/decimal comparisons - overflow test") {
val maxValue = Literal(Decimal(Long.MaxValue))
val overflow = Literal(Decimal(Long.MaxValue) + Decimal(0.1))
val minValue = Literal(Decimal(Long.MinValue))
val underflow = Literal(Decimal(Long.MinValue) - Decimal(0.1))
Seq(ByteType, ShortType, IntegerType, LongType).foreach { dt =>
val int = AttributeReference("a", dt)()
ruleTest(int > maxValue, int > Literal(Long.MaxValue))
ruleTest(int > overflow, FalseLiteral)
ruleTest(int > minValue, int > Literal(Long.MinValue))
ruleTest(int > underflow, TrueLiteral)
ruleTest(int >= maxValue, int >= Literal(Long.MaxValue))
ruleTest(int >= overflow, FalseLiteral)
ruleTest(int >= minValue, int >= Literal(Long.MinValue))
ruleTest(int >= underflow, TrueLiteral)
ruleTest(int < maxValue, int < Literal(Long.MaxValue))
ruleTest(int < overflow, TrueLiteral)
ruleTest(int < minValue, int < Literal(Long.MinValue))
ruleTest(int < underflow, FalseLiteral)
ruleTest(int <= maxValue, int <= Literal(Long.MaxValue))
ruleTest(int <= overflow, TrueLiteral)
ruleTest(int <= minValue, int <= Literal(Long.MinValue))
ruleTest(int <= underflow, FalseLiteral)
ruleTest(maxValue > int, Literal(Long.MaxValue) > int)
ruleTest(overflow > int, TrueLiteral)
ruleTest(minValue > int, Literal(Long.MinValue) > int)
ruleTest(underflow > int, FalseLiteral)
ruleTest(maxValue >= int, Literal(Long.MaxValue) >= int)
ruleTest(overflow >= int, TrueLiteral)
ruleTest(minValue >= int, Literal(Long.MinValue) >= int)
ruleTest(underflow >= int, FalseLiteral)
ruleTest(maxValue < int, Literal(Long.MaxValue) < int)
ruleTest(overflow < int, FalseLiteral)
ruleTest(minValue < int, Literal(Long.MinValue) < int)
ruleTest(underflow < int, TrueLiteral)
ruleTest(maxValue <= int, Literal(Long.MaxValue) <= int)
ruleTest(overflow <= int, FalseLiteral)
ruleTest(minValue <= int, Literal(Long.MinValue) <= int)
ruleTest(underflow <= int, TrueLiteral)
}
}
/** strength reduction for integer/decimal comparisons */
def ruleTest(initial: Expression, transformed: Expression): Unit = {
val testRelation = LocalRelation(AttributeReference("a", IntegerType)())
comparePlans(
DecimalPrecision(Project(Seq(Alias(initial, "a")()), testRelation)),
Project(Seq(Alias(transformed, "a")()), testRelation))
}
}
| wangyixiaohuihui/spark2-annotation | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/DecimalPrecisionSuite.scala | Scala | apache-2.0 | 11,493 |
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.jni
/** Abstract class for representing TensorFlow exceptions.
*
* @author Emmanouil Antonios Platanios
*/
abstract class TensorFlowException(message: String, cause: Throwable) extends RuntimeException(message, cause)
class CancelledException(message: String, cause: Throwable) extends TensorFlowException(message, cause) {
def this(message: String) = this(message, null)
}
class UnknownException(message: String, cause: Throwable) extends TensorFlowException(message, cause) {
def this(message: String) = this(message, null)
}
class InvalidArgumentException(message: String, cause: Throwable) extends TensorFlowException(message, cause) {
def this(message: String) = this(message, null)
}
class DeadlineExceededException(message: String, cause: Throwable) extends TensorFlowException(message, cause) {
def this(message: String) = this(message, null)
}
class NotFoundException(message: String, cause: Throwable) extends TensorFlowException(message, cause) {
def this(message: String) = this(message, null)
}
class AlreadyExistsException(message: String, cause: Throwable) extends TensorFlowException(message, cause) {
def this(message: String) = this(message, null)
}
class PermissionDeniedException(message: String, cause: Throwable) extends TensorFlowException(message, cause) {
def this(message: String) = this(message, null)
}
class UnauthenticatedException(message: String, cause: Throwable) extends TensorFlowException(message, cause) {
def this(message: String) = this(message, null)
}
class ResourceExhaustedException(message: String, cause: Throwable) extends TensorFlowException(message, cause) {
def this(message: String) = this(message, null)
}
class FailedPreconditionException(message: String, cause: Throwable) extends TensorFlowException(message, cause) {
def this(message: String) = this(message, null)
}
class AbortedException(message: String, cause: Throwable) extends TensorFlowException(message, cause) {
def this(message: String) = this(message, null)
}
class OutOfRangeException(message: String, cause: Throwable) extends TensorFlowException(message, cause) {
def this(message: String) = this(message, null)
}
class UnimplementedException(message: String, cause: Throwable) extends TensorFlowException(message, cause) {
def this(message: String) = this(message, null)
}
class InternalException(message: String, cause: Throwable) extends TensorFlowException(message, cause) {
def this(message: String) = this(message, null)
}
class UnavailableException(message: String, cause: Throwable) extends TensorFlowException(message, cause) {
def this(message: String) = this(message, null)
}
class DataLossException(message: String, cause: Throwable) extends TensorFlowException(message, cause) {
def this(message: String) = this(message, null)
}
object CancelledException {
def apply(message: String): CancelledException = new CancelledException(message, null)
def apply(message: String, cause: Throwable): CancelledException = new CancelledException(message, cause)
}
object UnknownException {
def apply(message: String): UnknownException = new UnknownException(message, null)
def apply(message: String, cause: Throwable): UnknownException = new UnknownException(message, cause)
}
object InvalidArgumentException {
def apply(message: String): InvalidArgumentException = new InvalidArgumentException(message, null)
def apply(message: String, cause: Throwable): InvalidArgumentException = new InvalidArgumentException(message, cause)
}
object DeadlineExceededException {
def apply(message: String): DeadlineExceededException = new DeadlineExceededException(message, null)
def apply(message: String, cause: Throwable): DeadlineExceededException = new DeadlineExceededException(message, cause)
}
object NotFoundException {
def apply(message: String): NotFoundException = new NotFoundException(message, null)
def apply(message: String, cause: Throwable): NotFoundException = new NotFoundException(message, cause)
}
object AlreadyExistsException {
def apply(message: String): AlreadyExistsException = new AlreadyExistsException(message, null)
def apply(message: String, cause: Throwable): AlreadyExistsException = new AlreadyExistsException(message, cause)
}
object PermissionDeniedException {
def apply(message: String): PermissionDeniedException = new PermissionDeniedException(message, null)
def apply(message: String, cause: Throwable): PermissionDeniedException = new PermissionDeniedException(message, cause)
}
object UnauthenticatedException {
def apply(message: String): UnauthenticatedException = new UnauthenticatedException(message, null)
def apply(message: String, cause: Throwable): UnauthenticatedException = new UnauthenticatedException(message, cause)
}
object ResourceExhaustedException {
def apply(message: String): ResourceExhaustedException = new ResourceExhaustedException(message, null)
def apply(message: String, cause: Throwable): ResourceExhaustedException = new ResourceExhaustedException(message, cause)
}
object FailedPreconditionException {
def apply(message: String): FailedPreconditionException = new FailedPreconditionException(message, null)
def apply(message: String, cause: Throwable): FailedPreconditionException = new FailedPreconditionException(message, cause)
}
object AbortedException {
def apply(message: String): AbortedException = new AbortedException(message, null)
def apply(message: String, cause: Throwable): AbortedException = new AbortedException(message, cause)
}
object OutOfRangeException {
def apply(message: String): OutOfRangeException = new OutOfRangeException(message, null)
def apply(message: String, cause: Throwable): OutOfRangeException = new OutOfRangeException(message, cause)
}
object UnimplementedException {
def apply(message: String): UnimplementedException = new UnimplementedException(message, null)
def apply(message: String, cause: Throwable): UnimplementedException = new UnimplementedException(message, cause)
}
object InternalException {
def apply(message: String): InternalException = new InternalException(message, null)
def apply(message: String, cause: Throwable): InternalException = new InternalException(message, cause)
}
object UnavailableException {
def apply(message: String): UnavailableException = new UnavailableException(message, null)
def apply(message: String, cause: Throwable): UnavailableException = new UnavailableException(message, cause)
}
object DataLossException {
def apply(message: String): DataLossException = new DataLossException(message, null)
def apply(message: String, cause: Throwable): DataLossException = new DataLossException(message, cause)
}
| eaplatanios/tensorflow | tensorflow/scala/jni/src/main/scala/org/platanios/tensorflow/jni/TensorFlowException.scala | Scala | apache-2.0 | 7,368 |
package de.frosner.broccoli.nomad.models
import de.frosner.broccoli.util
import org.specs2.mutable.Specification
import play.api.libs.json.Json
class AllocationSpec extends Specification {
"Allocation" should {
"decode from JSON" in {
val allocations = Json
.parse(util.Resources.readAsString("/de/frosner/broccoli/services/nomad/allocations.json"))
.validate[List[Allocation]]
.asEither
allocations should beRight(
List(Allocation(
id = shapeless.tag[Allocation.Id]("520bc6c3-53c9-fd2e-5bea-7d0b9dbef254"),
jobId = shapeless.tag[Job.Id]("tvftarcxrPoy9wNhghqQogihjha"),
nodeId = shapeless.tag[Node.Id]("cf3338e9-5ed0-88ef-df7b-9dd9708130c8"),
clientStatus = ClientStatus.Running,
taskStates = Map(shapeless.tag[Task.Name]("http-task") -> TaskStateEvents(TaskState.Running))
)))
}
}
}
| FRosner/cluster-broccoli | server/src/test/scala/de/frosner/broccoli/nomad/models/AllocationSpec.scala | Scala | apache-2.0 | 903 |
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.params.wrappers.spark
import org.apache.spark.ml
import io.deepsense.deeplang.params.MultipleColumnCreatorParam
class MultipleColumnCreatorParamWrapper[P <: ml.param.Params](
override val name: String,
override val description: Option[String],
val sparkParamGetter: P => ml.param.StringArrayParam)
extends MultipleColumnCreatorParam(name, description)
with ForwardSparkParamWrapper[P, Array[String]] {
override def replicate(name: String): MultipleColumnCreatorParamWrapper[P] =
new MultipleColumnCreatorParamWrapper[P](name, description, sparkParamGetter)
}
| deepsense-io/seahorse-workflow-executor | deeplang/src/main/scala/io/deepsense/deeplang/params/wrappers/spark/MultipleColumnCreatorParamWrapper.scala | Scala | apache-2.0 | 1,214 |
package io.taric
package unpure
import java.io.InputStream
import java.security.Security
import org.bouncycastle.openpgp.{ PGPLiteralData, PGPCompressedData, PGPObjectFactory }
import java.util.zip.GZIPInputStream
/**
* File created: 2013-01-16 13:18
*
* Copyright Solvies AB 2013
* For licensing information see LICENSE file
*/
object StreamPipeline {
def decryptUnzip( stream: InputStream ): InputStream = ( decryptPgpWrapper _ andThen unzipStream )( stream )
private[this] def decryptPgpWrapper( messageStream: InputStream ): InputStream = {
import org.bouncycastle.jce.provider.BouncyCastleProvider
import org.bouncycastle.openpgp.PGPUtil
Security.addProvider( new BouncyCastleProvider() )
val armored = PGPUtil.getDecoderStream( messageStream )
val pgpF = new PGPObjectFactory( armored )
val compressed = pgpF.nextObject().asInstanceOf[PGPCompressedData]
val pgpF2 = new PGPObjectFactory( compressed getDataStream )
pgpF2.nextObject() // Skip signature list
val literal = pgpF2.nextObject().asInstanceOf[PGPLiteralData]
literal.getInputStream
}
private[this] def unzipStream( zippedStream: InputStream ): InputStream = new GZIPInputStream( zippedStream )
}
| magnusart/taric.io | taric-import/src/main/scala/io/taric/unpure/StreamPipeline.scala | Scala | apache-2.0 | 1,222 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.cache.model
import play.api.libs.json._
import reactivemongo.bson.BSONObjectID
import uk.gov.hmrc.mongo.CreationAndLastModifiedDetail
case class Cache(
id : Id,
data : Option[JsValue] = None,
modifiedDetails: CreationAndLastModifiedDetail = CreationAndLastModifiedDetail(),
atomicId : Option[BSONObjectID] = None
) extends Cacheable
object Cache {
import uk.gov.hmrc.mongo.json.ReactiveMongoFormats
final val DATA_ATTRIBUTE_NAME = "data"
implicit val format = ReactiveMongoFormats.objectIdFormats
implicit val cacheFormat = Json.format[Cache]
val mongoFormats = ReactiveMongoFormats.mongoEntity {
implicit val dateFormat = ReactiveMongoFormats.dateTimeFormats
cacheFormat
}
}
| hmrc/mongo-caching | src/main/scala/uk/gov/hmrc/cache/model/Cache.scala | Scala | apache-2.0 | 1,387 |
package org.http4s
package headers
import org.http4s.parser.HttpHeaderParser
import org.http4s.util.Writer
object `Content-Type` extends HeaderKey.Internal[`Content-Type`] with HeaderKey.Singleton {
def apply(mediaType: MediaType, charset: Charset): `Content-Type` =
apply(mediaType, Some(charset))
def apply(mediaType: MediaType): `Content-Type` = apply(mediaType, None)
override def parse(s: String): ParseResult[`Content-Type`] =
HttpHeaderParser.CONTENT_TYPE(s)
}
final case class `Content-Type`(mediaType: MediaType, charset: Option[Charset])
extends Header.Parsed {
override def key: `Content-Type`.type = `Content-Type`
override def renderValue(writer: Writer): writer.type = charset match {
case Some(cs) => writer << mediaType << "; charset=" << cs
case _ => mediaType.render(writer)
}
def withMediaType(mediaType: MediaType): `Content-Type` =
if (mediaType != this.mediaType) copy(mediaType = mediaType) else this
def withCharset(charset: Charset): `Content-Type` =
if (noCharsetDefined || charset != this.charset.get) copy(charset = Some(charset)) else this
def withoutDefinedCharset: `Content-Type` =
if (isCharsetDefined) copy(charset = None) else this
def isCharsetDefined: Boolean = charset.isDefined
def noCharsetDefined: Boolean = charset.isEmpty
}
| reactormonk/http4s | core/src/main/scala/org/http4s/headers/Content-Type.scala | Scala | apache-2.0 | 1,326 |
package com.ponkotuy.data
import org.json4s._
/**
*
* @author ponkotuy
* Date: 14/03/24.
*/
case class MapInfo(id: Int, cleared: Boolean, exbossFlag: Boolean)
object MapInfo {
implicit val formats = DefaultFormats
def fromJson(obj: JValue): List[MapInfo] = {
val JArray(xs) = obj
xs.map { x =>
val id = (x \\ "api_id").extract[Int]
val cleared = (x \\ "api_cleared").extract[Int] != 0
val exbossFlag = (x \\ "api_exboss_flag").extract[Int] != 0
MapInfo(id, cleared, exbossFlag)
}
}
}
| Moesugi/MFG | library/src/main/scala/com/ponkotuy/data/MapInfo.scala | Scala | mit | 530 |
package com.sksamuel.elastic4s.requests.searches.aggs.builders
import com.sksamuel.elastic4s.handlers
import com.sksamuel.elastic4s.handlers.script.ScriptBuilderFn
import com.sksamuel.elastic4s.json.{XContentBuilder, XContentFactory}
import com.sksamuel.elastic4s.requests.searches.aggs.WeightedAvgAggregation
object WeightedAvgAggregationBuilder {
def apply(agg: WeightedAvgAggregation): XContentBuilder = {
val builder = XContentFactory.jsonBuilder()
builder.startObject("weighted_avg")
agg.weight.foreach { weight =>
builder.startObject("weight")
weight.field.foreach(builder.field("field", _))
weight.script.foreach(script => builder.rawField("script", handlers.script.ScriptBuilderFn(script)))
builder.endObject()
}
agg.value.foreach { value =>
builder.startObject("value")
value.field.foreach(builder.field("field", _))
value.script.foreach(script => builder.rawField("script", handlers.script.ScriptBuilderFn(script)))
builder.endObject()
}
builder.endObject()
builder.endObject()
}
}
| sksamuel/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/searches/aggs/builders/WeightedAvgAggregationBuilder.scala | Scala | apache-2.0 | 1,078 |
package se.gigurra.wallace.audio
import se.gigurra.wallace.util.Time
import scala.collection.mutable.ArrayBuffer
case class AudioManager(loader: AudioLoader) {
private val playingSounds = new ArrayBuffer[Sound]
def update(): Unit = {
implicit val time = Time.seconds
val expired = playingSounds.filter(_.expired)
playingSounds --= expired
expired.foreach(_.stopNow())
}
def close(): Unit = {
playingSounds.foreach(_.stopNow())
playingSounds.clear()
}
def playOnce(id: String,
volume: Float = 1.0f): Sound = ???
def loop(id: String,
n: Int = Int.MaxValue,
volume: Float = 1.0f): Sound = ???
}
object AudioManager {
def apply(searchPaths: Seq[String]): AudioManager = {
AudioManager(AudioLoader(searchPaths))
}
}
| GiGurra/Wall-Ace | lib_audio/src/main/java/se/gigurra/wallace/audio/AudioManager.scala | Scala | gpl-2.0 | 806 |
// goseumdochi: experiments with incarnation
// Copyright 2016 John V. Sichi
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.goseumdochi.perception
import org.goseumdochi.common._
import org.goseumdochi.vision._
import org.goseumdochi.control._
import scala.concurrent.duration._
import java.io._
import org.specs2.specification.core._
class PerceptualLogSpec extends VisualizableSpecification
{
private val firstEvent =
PerceptualEvent(
"first event",
ControlActor.CONTROL_ACTOR_NAME,
ControlActor.BEHAVIOR_ACTOR_NAME,
ControlActor.BodyMovedMsg(
PlanarPos(25.0, 10.0),
TimePoint(TimeSpan(10, SECONDS))))
"PerceptualLog" should
{
"read log JSON" in
{
val path = resourcePath("/unit/perceptual.json")
val seq = PerceptualLog.readJsonFile(path)
seq.size must be equalTo(2)
val first = seq.head
first must be equalTo firstEvent
}
"write event JSON" in
{
val src = sourceFromPath(resourcePath("/unit/event.json"))
val expected = src.getLines.mkString("\n")
val result = PerceptualLog.toJsonString(firstEvent)
result must be equalTo expected
}
"serialize and deserialize event" >> {
Fragment.foreach(Seq(".ser", ".json")) { fileExt =>
"using format " + fileExt ! {
val file = File.createTempFile("event", fileExt)
val filePath = file.getAbsolutePath
PerceptualLog.serialize(filePath, Seq(firstEvent))
val result = PerceptualLog.deserialize(filePath)
file.delete
result.size must be equalTo 1
result.head must be equalTo firstEvent
}
}
}
}
}
| lingeringsocket/goseumdochi | base/src/test/scala/org/goseumdochi/perception/PerceptualLogSpec.scala | Scala | apache-2.0 | 2,204 |
package com.github.diegopacheco.scala.sandbox.fp.cats.datatypes
/**
* https://typelevel.org/cats/datatypes/optiont.html
*/
object OptionTMain extends App {
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
import cats.data.OptionT
import cats.implicits._
val greetingFO: Future[Option[String]] = Future.successful(Some("Hello"))
val firstnameF: Future[String] = Future.successful("Jane")
val lastnameO: Option[String] = Some("Doe")
val ot: OptionT[Future, String] = for {
g <- OptionT(greetingFO)
f <- OptionT.liftF(firstnameF)
l <- OptionT.fromOption[Future](lastnameO)
} yield s"$g $f $l"
val result: Future[Option[String]] = ot.value
println( result )
Thread.sleep(2000)
println( result )
} | diegopacheco/scala-playground | cats-scala-fp/src/main/scala/com/github/diegopacheco/scala/sandbox/fp/cats/datatypes/OptionTMain.scala | Scala | unlicense | 789 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.io._
import java.lang.{Byte => JByte}
import java.lang.management.{LockInfo, ManagementFactory, MonitorInfo, ThreadInfo}
import java.lang.reflect.InvocationTargetException
import java.math.{MathContext, RoundingMode}
import java.net._
import java.nio.ByteBuffer
import java.nio.channels.{Channels, FileChannel, WritableByteChannel}
import java.nio.charset.StandardCharsets
import java.nio.file.Files
import java.security.SecureRandom
import java.util.{Locale, Properties, Random, UUID}
import java.util.concurrent._
import java.util.concurrent.TimeUnit.NANOSECONDS
import java.util.zip.{GZIPInputStream, ZipInputStream}
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.collection.Map
import scala.collection.mutable.ArrayBuffer
import scala.io.Source
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Try}
import scala.util.control.{ControlThrowable, NonFatal}
import scala.util.matching.Regex
import _root_.io.netty.channel.unix.Errors.NativeIoException
import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
import com.google.common.collect.Interners
import com.google.common.io.{ByteStreams, Files => GFiles}
import com.google.common.net.InetAddresses
import org.apache.commons.codec.binary.Hex
import org.apache.commons.io.IOUtils
import org.apache.commons.lang3.SystemUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, FileUtil, Path}
import org.apache.hadoop.io.compress.{CompressionCodecFactory, SplittableCompressionCodec}
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.util.{RunJar, StringUtils}
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.logging.log4j.{Level, LogManager}
import org.apache.logging.log4j.core.LoggerContext
import org.eclipse.jetty.util.MultiException
import org.slf4j.Logger
import org.apache.spark._
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.{config, Logging}
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Streaming._
import org.apache.spark.internal.config.Tests.IS_TESTING
import org.apache.spark.internal.config.UI._
import org.apache.spark.internal.config.Worker._
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.serializer.{DeserializationStream, SerializationStream, Serializer, SerializerInstance}
import org.apache.spark.status.api.v1.{StackTrace, ThreadStackTrace}
import org.apache.spark.util.io.ChunkedByteBufferOutputStream
/** CallSite represents a place in user code. It can have a short and a long form. */
private[spark] case class CallSite(shortForm: String, longForm: String)
private[spark] object CallSite {
val SHORT_FORM = "callSite.short"
val LONG_FORM = "callSite.long"
val empty = CallSite("", "")
}
/**
* Various utility methods used by Spark.
*/
private[spark] object Utils extends Logging {
val random = new Random()
private val sparkUncaughtExceptionHandler = new SparkUncaughtExceptionHandler
@volatile private var cachedLocalDir: String = ""
/**
* Define a default value for driver memory here since this value is referenced across the code
* base and nearly all files already use Utils.scala
*/
val DEFAULT_DRIVER_MEM_MB = JavaUtils.DEFAULT_DRIVER_MEM_MB.toInt
val MAX_DIR_CREATION_ATTEMPTS: Int = 10
@volatile private var localRootDirs: Array[String] = null
/** Scheme used for files that are locally available on worker nodes in the cluster. */
val LOCAL_SCHEME = "local"
private val weakStringInterner = Interners.newWeakInterner[String]()
private val PATTERN_FOR_COMMAND_LINE_ARG = "-D(.+?)=(.+)".r
/** Serialize an object using Java serialization */
def serialize[T](o: T): Array[Byte] = {
val bos = new ByteArrayOutputStream()
val oos = new ObjectOutputStream(bos)
oos.writeObject(o)
oos.close()
bos.toByteArray
}
/** Deserialize an object using Java serialization */
def deserialize[T](bytes: Array[Byte]): T = {
val bis = new ByteArrayInputStream(bytes)
val ois = new ObjectInputStream(bis)
ois.readObject.asInstanceOf[T]
}
/** Deserialize an object using Java serialization and the given ClassLoader */
def deserialize[T](bytes: Array[Byte], loader: ClassLoader): T = {
val bis = new ByteArrayInputStream(bytes)
val ois = new ObjectInputStream(bis) {
override def resolveClass(desc: ObjectStreamClass): Class[_] = {
// scalastyle:off classforname
Class.forName(desc.getName, false, loader)
// scalastyle:on classforname
}
}
ois.readObject.asInstanceOf[T]
}
/** Deserialize a Long value (used for [[org.apache.spark.api.python.PythonPartitioner]]) */
def deserializeLongValue(bytes: Array[Byte]) : Long = {
// Note: we assume that we are given a Long value encoded in network (big-endian) byte order
var result = bytes(7) & 0xFFL
result = result + ((bytes(6) & 0xFFL) << 8)
result = result + ((bytes(5) & 0xFFL) << 16)
result = result + ((bytes(4) & 0xFFL) << 24)
result = result + ((bytes(3) & 0xFFL) << 32)
result = result + ((bytes(2) & 0xFFL) << 40)
result = result + ((bytes(1) & 0xFFL) << 48)
result + ((bytes(0) & 0xFFL) << 56)
}
/** Serialize via nested stream using specific serializer */
def serializeViaNestedStream(os: OutputStream, ser: SerializerInstance)(
f: SerializationStream => Unit): Unit = {
val osWrapper = ser.serializeStream(new OutputStream {
override def write(b: Int): Unit = os.write(b)
override def write(b: Array[Byte], off: Int, len: Int): Unit = os.write(b, off, len)
})
try {
f(osWrapper)
} finally {
osWrapper.close()
}
}
/** Deserialize via nested stream using specific serializer */
def deserializeViaNestedStream(is: InputStream, ser: SerializerInstance)(
f: DeserializationStream => Unit): Unit = {
val isWrapper = ser.deserializeStream(new InputStream {
override def read(): Int = is.read()
override def read(b: Array[Byte], off: Int, len: Int): Int = is.read(b, off, len)
})
try {
f(isWrapper)
} finally {
isWrapper.close()
}
}
/** String interning to reduce the memory usage. */
def weakIntern(s: String): String = {
weakStringInterner.intern(s)
}
/**
* Get the ClassLoader which loaded Spark.
*/
def getSparkClassLoader: ClassLoader = getClass.getClassLoader
/**
* Get the Context ClassLoader on this thread or, if not present, the ClassLoader that
* loaded Spark.
*
* This should be used whenever passing a ClassLoader to Class.ForName or finding the currently
* active loader when setting up ClassLoader delegation chains.
*/
def getContextOrSparkClassLoader: ClassLoader =
Option(Thread.currentThread().getContextClassLoader).getOrElse(getSparkClassLoader)
/** Determines whether the provided class is loadable in the current thread. */
def classIsLoadable(clazz: String): Boolean = {
Try { classForName(clazz, initialize = false) }.isSuccess
}
// scalastyle:off classforname
/**
* Preferred alternative to Class.forName(className), as well as
* Class.forName(className, initialize, loader) with current thread's ContextClassLoader.
*/
def classForName[C](
className: String,
initialize: Boolean = true,
noSparkClassLoader: Boolean = false): Class[C] = {
if (!noSparkClassLoader) {
Class.forName(className, initialize, getContextOrSparkClassLoader).asInstanceOf[Class[C]]
} else {
Class.forName(className, initialize, Thread.currentThread().getContextClassLoader).
asInstanceOf[Class[C]]
}
// scalastyle:on classforname
}
/**
* Run a segment of code using a different context class loader in the current thread
*/
def withContextClassLoader[T](ctxClassLoader: ClassLoader)(fn: => T): T = {
val oldClassLoader = Thread.currentThread().getContextClassLoader()
try {
Thread.currentThread().setContextClassLoader(ctxClassLoader)
fn
} finally {
Thread.currentThread().setContextClassLoader(oldClassLoader)
}
}
/**
* Primitive often used when writing [[java.nio.ByteBuffer]] to [[java.io.DataOutput]]
*/
def writeByteBuffer(bb: ByteBuffer, out: DataOutput): Unit = {
if (bb.hasArray) {
out.write(bb.array(), bb.arrayOffset() + bb.position(), bb.remaining())
} else {
val originalPosition = bb.position()
val bbval = new Array[Byte](bb.remaining())
bb.get(bbval)
out.write(bbval)
bb.position(originalPosition)
}
}
/**
* Primitive often used when writing [[java.nio.ByteBuffer]] to [[java.io.OutputStream]]
*/
def writeByteBuffer(bb: ByteBuffer, out: OutputStream): Unit = {
if (bb.hasArray) {
out.write(bb.array(), bb.arrayOffset() + bb.position(), bb.remaining())
} else {
val originalPosition = bb.position()
val bbval = new Array[Byte](bb.remaining())
bb.get(bbval)
out.write(bbval)
bb.position(originalPosition)
}
}
/**
* JDK equivalent of `chmod 700 file`.
*
* @param file the file whose permissions will be modified
* @return true if the permissions were successfully changed, false otherwise.
*/
def chmod700(file: File): Boolean = {
file.setReadable(false, false) &&
file.setReadable(true, true) &&
file.setWritable(false, false) &&
file.setWritable(true, true) &&
file.setExecutable(false, false) &&
file.setExecutable(true, true)
}
/**
* Create a directory given the abstract pathname
* @return true, if the directory is successfully created; otherwise, return false.
*/
def createDirectory(dir: File): Boolean = {
try {
// SPARK-35907: The check was required by File.mkdirs() because it could sporadically
// fail silently. After switching to Files.createDirectories(), ideally, there should
// no longer be silent fails. But the check is kept for the safety concern. We can
// remove the check when we're sure that Files.createDirectories() would never fail silently.
Files.createDirectories(dir.toPath)
if ( !dir.exists() || !dir.isDirectory) {
logError(s"Failed to create directory " + dir)
}
dir.isDirectory
} catch {
case e: Exception =>
logError(s"Failed to create directory " + dir, e)
false
}
}
/**
* Create a directory inside the given parent directory. The directory is guaranteed to be
* newly created, and is not marked for automatic deletion.
*/
def createDirectory(root: String, namePrefix: String = "spark"): File = {
var attempts = 0
val maxAttempts = MAX_DIR_CREATION_ATTEMPTS
var dir: File = null
while (dir == null) {
attempts += 1
if (attempts > maxAttempts) {
throw new IOException("Failed to create a temp directory (under " + root + ") after " +
maxAttempts + " attempts!")
}
try {
dir = new File(root, namePrefix + "-" + UUID.randomUUID.toString)
// SPARK-35907:
// This could throw more meaningful exception information if directory creation failed.
Files.createDirectories(dir.toPath)
} catch {
case e @ (_ : IOException | _ : SecurityException) =>
logError(s"Failed to create directory $dir", e)
dir = null
}
}
dir.getCanonicalFile
}
/**
* Create a temporary directory inside the given parent directory. The directory will be
* automatically deleted when the VM shuts down.
*/
def createTempDir(
root: String = System.getProperty("java.io.tmpdir"),
namePrefix: String = "spark"): File = {
val dir = createDirectory(root, namePrefix)
ShutdownHookManager.registerShutdownDeleteDir(dir)
dir
}
/**
* Copy all data from an InputStream to an OutputStream. NIO way of file stream to file stream
* copying is disabled by default unless explicitly set transferToEnabled as true,
* the parameter transferToEnabled should be configured by spark.file.transferTo = [true|false].
*/
def copyStream(
in: InputStream,
out: OutputStream,
closeStreams: Boolean = false,
transferToEnabled: Boolean = false): Long = {
tryWithSafeFinally {
(in, out) match {
case (input: FileInputStream, output: FileOutputStream) if transferToEnabled =>
// When both streams are File stream, use transferTo to improve copy performance.
val inChannel = input.getChannel
val outChannel = output.getChannel
val size = inChannel.size()
copyFileStreamNIO(inChannel, outChannel, 0, size)
size
case (input, output) =>
var count = 0L
val buf = new Array[Byte](8192)
var n = 0
while (n != -1) {
n = input.read(buf)
if (n != -1) {
output.write(buf, 0, n)
count += n
}
}
count
}
} {
if (closeStreams) {
try {
in.close()
} finally {
out.close()
}
}
}
}
/**
* Copy the first `maxSize` bytes of data from the InputStream to an in-memory
* buffer, primarily to check for corruption.
*
* This returns a new InputStream which contains the same data as the original input stream.
* It may be entirely on in-memory buffer, or it may be a combination of in-memory data, and then
* continue to read from the original stream. The only real use of this is if the original input
* stream will potentially detect corruption while the data is being read (e.g. from compression).
* This allows for an eager check of corruption in the first maxSize bytes of data.
*
* @return An InputStream which includes all data from the original stream (combining buffered
* data and remaining data in the original stream)
*/
def copyStreamUpTo(in: InputStream, maxSize: Long): InputStream = {
var count = 0L
val out = new ChunkedByteBufferOutputStream(64 * 1024, ByteBuffer.allocate)
val fullyCopied = tryWithSafeFinally {
val bufSize = Math.min(8192L, maxSize)
val buf = new Array[Byte](bufSize.toInt)
var n = 0
while (n != -1 && count < maxSize) {
n = in.read(buf, 0, Math.min(maxSize - count, bufSize).toInt)
if (n != -1) {
out.write(buf, 0, n)
count += n
}
}
count < maxSize
} {
try {
if (count < maxSize) {
in.close()
}
} finally {
out.close()
}
}
if (fullyCopied) {
out.toChunkedByteBuffer.toInputStream(dispose = true)
} else {
new SequenceInputStream( out.toChunkedByteBuffer.toInputStream(dispose = true), in)
}
}
def copyFileStreamNIO(
input: FileChannel,
output: WritableByteChannel,
startPosition: Long,
bytesToCopy: Long): Unit = {
val outputInitialState = output match {
case outputFileChannel: FileChannel =>
Some((outputFileChannel.position(), outputFileChannel))
case _ => None
}
var count = 0L
// In case transferTo method transferred less data than we have required.
while (count < bytesToCopy) {
count += input.transferTo(count + startPosition, bytesToCopy - count, output)
}
assert(count == bytesToCopy,
s"request to copy $bytesToCopy bytes, but actually copied $count bytes.")
// Check the position after transferTo loop to see if it is in the right position and
// give user information if not.
// Position will not be increased to the expected length after calling transferTo in
// kernel version 2.6.32, this issue can be seen in
// https://bugs.openjdk.java.net/browse/JDK-7052359
// This will lead to stream corruption issue when using sort-based shuffle (SPARK-3948).
outputInitialState.foreach { case (initialPos, outputFileChannel) =>
val finalPos = outputFileChannel.position()
val expectedPos = initialPos + bytesToCopy
assert(finalPos == expectedPos,
s"""
|Current position $finalPos do not equal to expected position $expectedPos
|after transferTo, please check your kernel version to see if it is 2.6.32,
|this is a kernel bug which will lead to unexpected behavior when using transferTo.
|You can set spark.file.transferTo = false to disable this NIO feature.
""".stripMargin)
}
}
/**
* A file name may contain some invalid URI characters, such as " ". This method will convert the
* file name to a raw path accepted by `java.net.URI(String)`.
*
* Note: the file name must not contain "/" or "\\"
*/
def encodeFileNameToURIRawPath(fileName: String): String = {
require(!fileName.contains("/") && !fileName.contains("\\\\"))
// `file` and `localhost` are not used. Just to prevent URI from parsing `fileName` as
// scheme or host. The prefix "/" is required because URI doesn't accept a relative path.
// We should remove it after we get the raw path.
new URI("file", null, "localhost", -1, "/" + fileName, null, null).getRawPath.substring(1)
}
/**
* Get the file name from uri's raw path and decode it. If the raw path of uri ends with "/",
* return the name before the last "/".
*/
def decodeFileNameInURI(uri: URI): String = {
val rawPath = uri.getRawPath
val rawFileName = rawPath.split("/").last
new URI("file:///" + rawFileName).getPath.substring(1)
}
/**
* Download a file or directory to target directory. Supports fetching the file in a variety of
* ways, including HTTP, Hadoop-compatible filesystems, and files on a standard filesystem, based
* on the URL parameter. Fetching directories is only supported from Hadoop-compatible
* filesystems.
*
* If `useCache` is true, first attempts to fetch the file to a local cache that's shared
* across executors running the same application. `useCache` is used mainly for
* the executors, and not in local mode.
*
* Throws SparkException if the target file already exists and has different contents than
* the requested file.
*
* If `shouldUntar` is true, it untars the given url if it is a tar.gz or tgz into `targetDir`.
* This is a legacy behavior, and users should better use `spark.archives` configuration or
* `SparkContext.addArchive`
*/
def fetchFile(
url: String,
targetDir: File,
conf: SparkConf,
hadoopConf: Configuration,
timestamp: Long,
useCache: Boolean,
shouldUntar: Boolean = true): File = {
val fileName = decodeFileNameInURI(new URI(url))
val targetFile = new File(targetDir, fileName)
val fetchCacheEnabled = conf.getBoolean("spark.files.useFetchCache", defaultValue = true)
if (useCache && fetchCacheEnabled) {
val cachedFileName = s"${url.hashCode}${timestamp}_cache"
val lockFileName = s"${url.hashCode}${timestamp}_lock"
// Set the cachedLocalDir for the first time and re-use it later
if (cachedLocalDir.isEmpty) {
this.synchronized {
if (cachedLocalDir.isEmpty) {
cachedLocalDir = getLocalDir(conf)
}
}
}
val localDir = new File(cachedLocalDir)
val lockFile = new File(localDir, lockFileName)
val lockFileChannel = new RandomAccessFile(lockFile, "rw").getChannel()
// Only one executor entry.
// The FileLock is only used to control synchronization for executors download file,
// it's always safe regardless of lock type (mandatory or advisory).
val lock = lockFileChannel.lock()
val cachedFile = new File(localDir, cachedFileName)
try {
if (!cachedFile.exists()) {
doFetchFile(url, localDir, cachedFileName, conf, hadoopConf)
}
} finally {
lock.release()
lockFileChannel.close()
}
copyFile(
url,
cachedFile,
targetFile,
conf.getBoolean("spark.files.overwrite", false)
)
} else {
doFetchFile(url, targetDir, fileName, conf, hadoopConf)
}
if (shouldUntar) {
// Decompress the file if it's a .tar or .tar.gz
if (fileName.endsWith(".tar.gz") || fileName.endsWith(".tgz")) {
logWarning(
"Untarring behavior will be deprecated at spark.files and " +
"SparkContext.addFile. Consider using spark.archives or SparkContext.addArchive " +
"instead.")
logInfo("Untarring " + fileName)
executeAndGetOutput(Seq("tar", "-xzf", fileName), targetDir)
} else if (fileName.endsWith(".tar")) {
logWarning(
"Untarring behavior will be deprecated at spark.files and " +
"SparkContext.addFile. Consider using spark.archives or SparkContext.addArchive " +
"instead.")
logInfo("Untarring " + fileName)
executeAndGetOutput(Seq("tar", "-xf", fileName), targetDir)
}
}
// Make the file executable - That's necessary for scripts
FileUtil.chmod(targetFile.getAbsolutePath, "a+x")
// Windows does not grant read permission by default to non-admin users
// Add read permission to owner explicitly
if (isWindows) {
FileUtil.chmod(targetFile.getAbsolutePath, "u+r")
}
targetFile
}
/**
* Unpacks an archive file into the specified directory. It expects .jar, .zip, .tar.gz, .tgz
* and .tar files. This behaves same as Hadoop's archive in distributed cache. This method is
* basically copied from `org.apache.hadoop.yarn.util.FSDownload.unpack`.
*/
def unpack(source: File, dest: File): Unit = {
val lowerSrc = StringUtils.toLowerCase(source.getName)
if (lowerSrc.endsWith(".jar")) {
RunJar.unJar(source, dest, RunJar.MATCH_ANY)
} else if (lowerSrc.endsWith(".zip")) {
FileUtil.unZip(source, dest)
} else if (
lowerSrc.endsWith(".tar.gz") || lowerSrc.endsWith(".tgz") || lowerSrc.endsWith(".tar")) {
FileUtil.unTar(source, dest)
} else {
logWarning(s"Cannot unpack $source, just copying it to $dest.")
copyRecursive(source, dest)
}
}
/** Records the duration of running `body`. */
def timeTakenMs[T](body: => T): (T, Long) = {
val startTime = System.nanoTime()
val result = body
val endTime = System.nanoTime()
(result, math.max(NANOSECONDS.toMillis(endTime - startTime), 0))
}
/**
* Download `in` to `tempFile`, then move it to `destFile`.
*
* If `destFile` already exists:
* - no-op if its contents equal those of `sourceFile`,
* - throw an exception if `fileOverwrite` is false,
* - attempt to overwrite it otherwise.
*
* @param url URL that `sourceFile` originated from, for logging purposes.
* @param in InputStream to download.
* @param destFile File path to move `tempFile` to.
* @param fileOverwrite Whether to delete/overwrite an existing `destFile` that does not match
* `sourceFile`
*/
private def downloadFile(
url: String,
in: InputStream,
destFile: File,
fileOverwrite: Boolean): Unit = {
val tempFile = File.createTempFile("fetchFileTemp", null,
new File(destFile.getParentFile.getAbsolutePath))
logInfo(s"Fetching $url to $tempFile")
try {
val out = new FileOutputStream(tempFile)
Utils.copyStream(in, out, closeStreams = true)
copyFile(url, tempFile, destFile, fileOverwrite, removeSourceFile = true)
} finally {
// Catch-all for the couple of cases where for some reason we didn't move `tempFile` to
// `destFile`.
if (tempFile.exists()) {
tempFile.delete()
}
}
}
/**
* Copy `sourceFile` to `destFile`.
*
* If `destFile` already exists:
* - no-op if its contents equal those of `sourceFile`,
* - throw an exception if `fileOverwrite` is false,
* - attempt to overwrite it otherwise.
*
* @param url URL that `sourceFile` originated from, for logging purposes.
* @param sourceFile File path to copy/move from.
* @param destFile File path to copy/move to.
* @param fileOverwrite Whether to delete/overwrite an existing `destFile` that does not match
* `sourceFile`
* @param removeSourceFile Whether to remove `sourceFile` after / as part of moving/copying it to
* `destFile`.
*/
private def copyFile(
url: String,
sourceFile: File,
destFile: File,
fileOverwrite: Boolean,
removeSourceFile: Boolean = false): Unit = {
if (destFile.exists) {
if (!filesEqualRecursive(sourceFile, destFile)) {
if (fileOverwrite) {
logInfo(
s"File $destFile exists and does not match contents of $url, replacing it with $url"
)
if (!destFile.delete()) {
throw new SparkException(
"Failed to delete %s while attempting to overwrite it with %s".format(
destFile.getAbsolutePath,
sourceFile.getAbsolutePath
)
)
}
} else {
throw new SparkException(
s"File $destFile exists and does not match contents of $url")
}
} else {
// Do nothing if the file contents are the same, i.e. this file has been copied
// previously.
logInfo(
"%s has been previously copied to %s".format(
sourceFile.getAbsolutePath,
destFile.getAbsolutePath
)
)
return
}
}
// The file does not exist in the target directory. Copy or move it there.
if (removeSourceFile) {
Files.move(sourceFile.toPath, destFile.toPath)
} else {
logInfo(s"Copying ${sourceFile.getAbsolutePath} to ${destFile.getAbsolutePath}")
copyRecursive(sourceFile, destFile)
}
}
private def filesEqualRecursive(file1: File, file2: File): Boolean = {
if (file1.isDirectory && file2.isDirectory) {
val subfiles1 = file1.listFiles()
val subfiles2 = file2.listFiles()
if (subfiles1.size != subfiles2.size) {
return false
}
subfiles1.sortBy(_.getName).zip(subfiles2.sortBy(_.getName)).forall {
case (f1, f2) => filesEqualRecursive(f1, f2)
}
} else if (file1.isFile && file2.isFile) {
GFiles.equal(file1, file2)
} else {
false
}
}
private def copyRecursive(source: File, dest: File): Unit = {
if (source.isDirectory) {
if (!dest.mkdir()) {
throw new IOException(s"Failed to create directory ${dest.getPath}")
}
val subfiles = source.listFiles()
subfiles.foreach(f => copyRecursive(f, new File(dest, f.getName)))
} else {
Files.copy(source.toPath, dest.toPath)
}
}
/**
* Download a file or directory to target directory. Supports fetching the file in a variety of
* ways, including HTTP, Hadoop-compatible filesystems, and files on a standard filesystem, based
* on the URL parameter. Fetching directories is only supported from Hadoop-compatible
* filesystems.
*
* Throws SparkException if the target file already exists and has different contents than
* the requested file.
*/
def doFetchFile(
url: String,
targetDir: File,
filename: String,
conf: SparkConf,
hadoopConf: Configuration): File = {
val targetFile = new File(targetDir, filename)
val uri = new URI(url)
val fileOverwrite = conf.getBoolean("spark.files.overwrite", defaultValue = false)
Option(uri.getScheme).getOrElse("file") match {
case "spark" =>
if (SparkEnv.get == null) {
throw new IllegalStateException(
"Cannot retrieve files with 'spark' scheme without an active SparkEnv.")
}
val source = SparkEnv.get.rpcEnv.openChannel(url)
val is = Channels.newInputStream(source)
downloadFile(url, is, targetFile, fileOverwrite)
case "http" | "https" | "ftp" =>
val uc = new URL(url).openConnection()
val timeoutMs =
conf.getTimeAsSeconds("spark.files.fetchTimeout", "60s").toInt * 1000
uc.setConnectTimeout(timeoutMs)
uc.setReadTimeout(timeoutMs)
uc.connect()
val in = uc.getInputStream()
downloadFile(url, in, targetFile, fileOverwrite)
case "file" =>
// In the case of a local file, copy the local file to the target directory.
// Note the difference between uri vs url.
val sourceFile = if (uri.isAbsolute) new File(uri) else new File(uri.getPath)
copyFile(url, sourceFile, targetFile, fileOverwrite)
case _ =>
val fs = getHadoopFileSystem(uri, hadoopConf)
val path = new Path(uri)
fetchHcfsFile(path, targetDir, fs, conf, hadoopConf, fileOverwrite,
filename = Some(filename))
}
targetFile
}
/**
* Fetch a file or directory from a Hadoop-compatible filesystem.
*
* Visible for testing
*/
private[spark] def fetchHcfsFile(
path: Path,
targetDir: File,
fs: FileSystem,
conf: SparkConf,
hadoopConf: Configuration,
fileOverwrite: Boolean,
filename: Option[String] = None): Unit = {
if (!targetDir.exists() && !targetDir.mkdir()) {
throw new IOException(s"Failed to create directory ${targetDir.getPath}")
}
val dest = new File(targetDir, filename.getOrElse(path.getName))
if (fs.isFile(path)) {
val in = fs.open(path)
try {
downloadFile(path.toString, in, dest, fileOverwrite)
} finally {
in.close()
}
} else {
fs.listStatus(path).foreach { fileStatus =>
fetchHcfsFile(fileStatus.getPath(), dest, fs, conf, hadoopConf, fileOverwrite)
}
}
}
/**
* Validate that a given URI is actually a valid URL as well.
* @param uri The URI to validate
*/
@throws[MalformedURLException]("when the URI is an invalid URL")
def validateURL(uri: URI): Unit = {
Option(uri.getScheme).getOrElse("file") match {
case "http" | "https" | "ftp" =>
try {
uri.toURL
} catch {
case e: MalformedURLException =>
val ex = new MalformedURLException(s"URI (${uri.toString}) is not a valid URL.")
ex.initCause(e)
throw ex
}
case _ => // will not be turned into a URL anyway
}
}
/**
* Get the path of a temporary directory. Spark's local directories can be configured through
* multiple settings, which are used with the following precedence:
*
* - If called from inside of a YARN container, this will return a directory chosen by YARN.
* - If the SPARK_LOCAL_DIRS environment variable is set, this will return a directory from it.
* - Otherwise, if the spark.local.dir is set, this will return a directory from it.
* - Otherwise, this will return java.io.tmpdir.
*
* Some of these configuration options might be lists of multiple paths, but this method will
* always return a single directory. The return directory is chosen randomly from the array
* of directories it gets from getOrCreateLocalRootDirs.
*/
def getLocalDir(conf: SparkConf): String = {
val localRootDirs = getOrCreateLocalRootDirs(conf)
if (localRootDirs.isEmpty) {
val configuredLocalDirs = getConfiguredLocalDirs(conf)
throw new IOException(
s"Failed to get a temp directory under [${configuredLocalDirs.mkString(",")}].")
} else {
localRootDirs(scala.util.Random.nextInt(localRootDirs.length))
}
}
private[spark] def isRunningInYarnContainer(conf: SparkConf): Boolean = {
// These environment variables are set by YARN.
conf.getenv("CONTAINER_ID") != null
}
/**
* Returns if the current codes are running in a Spark task, e.g., in executors.
*/
def isInRunningSparkTask: Boolean = TaskContext.get() != null
/**
* Gets or creates the directories listed in spark.local.dir or SPARK_LOCAL_DIRS,
* and returns only the directories that exist / could be created.
*
* If no directories could be created, this will return an empty list.
*
* This method will cache the local directories for the application when it's first invoked.
* So calling it multiple times with a different configuration will always return the same
* set of directories.
*/
private[spark] def getOrCreateLocalRootDirs(conf: SparkConf): Array[String] = {
if (localRootDirs == null) {
this.synchronized {
if (localRootDirs == null) {
localRootDirs = getOrCreateLocalRootDirsImpl(conf)
}
}
}
localRootDirs
}
/**
* Return the configured local directories where Spark can write files. This
* method does not create any directories on its own, it only encapsulates the
* logic of locating the local directories according to deployment mode.
*/
def getConfiguredLocalDirs(conf: SparkConf): Array[String] = {
val shuffleServiceEnabled = conf.get(config.SHUFFLE_SERVICE_ENABLED)
if (isRunningInYarnContainer(conf)) {
// If we are in yarn mode, systems can have different disk layouts so we must set it
// to what Yarn on this system said was available. Note this assumes that Yarn has
// created the directories already, and that they are secured so that only the
// user has access to them.
randomizeInPlace(getYarnLocalDirs(conf).split(","))
} else if (conf.getenv("SPARK_EXECUTOR_DIRS") != null) {
conf.getenv("SPARK_EXECUTOR_DIRS").split(File.pathSeparator)
} else if (conf.getenv("SPARK_LOCAL_DIRS") != null) {
conf.getenv("SPARK_LOCAL_DIRS").split(",")
} else if (conf.getenv("MESOS_SANDBOX") != null && !shuffleServiceEnabled) {
// Mesos already creates a directory per Mesos task. Spark should use that directory
// instead so all temporary files are automatically cleaned up when the Mesos task ends.
// Note that we don't want this if the shuffle service is enabled because we want to
// continue to serve shuffle files after the executors that wrote them have already exited.
Array(conf.getenv("MESOS_SANDBOX"))
} else {
if (conf.getenv("MESOS_SANDBOX") != null && shuffleServiceEnabled) {
logInfo("MESOS_SANDBOX available but not using provided Mesos sandbox because " +
s"${config.SHUFFLE_SERVICE_ENABLED.key} is enabled.")
}
// In non-Yarn mode (or for the driver in yarn-client mode), we cannot trust the user
// configuration to point to a secure directory. So create a subdirectory with restricted
// permissions under each listed directory.
conf.get("spark.local.dir", System.getProperty("java.io.tmpdir")).split(",")
}
}
private def getOrCreateLocalRootDirsImpl(conf: SparkConf): Array[String] = {
val configuredLocalDirs = getConfiguredLocalDirs(conf)
val uris = configuredLocalDirs.filter { root =>
// Here, we guess if the given value is a URI at its best - check if scheme is set.
Try(new URI(root).getScheme != null).getOrElse(false)
}
if (uris.nonEmpty) {
logWarning(
"The configured local directories are not expected to be URIs; however, got suspicious " +
s"values [${uris.mkString(", ")}]. Please check your configured local directories.")
}
configuredLocalDirs.flatMap { root =>
try {
val rootDir = new File(root)
if (rootDir.exists || rootDir.mkdirs()) {
val dir = createTempDir(root)
chmod700(dir)
Some(dir.getAbsolutePath)
} else {
logError(s"Failed to create dir in $root. Ignoring this directory.")
None
}
} catch {
case e: IOException =>
logError(s"Failed to create local root dir in $root. Ignoring this directory.")
None
}
}
}
/** Get the Yarn approved local directories. */
private def getYarnLocalDirs(conf: SparkConf): String = {
val localDirs = Option(conf.getenv("LOCAL_DIRS")).getOrElse("")
if (localDirs.isEmpty) {
throw new Exception("Yarn Local dirs can't be empty")
}
localDirs
}
/** Used by unit tests. Do not call from other places. */
private[spark] def clearLocalRootDirs(): Unit = {
localRootDirs = null
}
/**
* Shuffle the elements of a collection into a random order, returning the
* result in a new collection. Unlike scala.util.Random.shuffle, this method
* uses a local random number generator, avoiding inter-thread contention.
*/
def randomize[T: ClassTag](seq: TraversableOnce[T]): Seq[T] = {
randomizeInPlace(seq.toArray)
}
/**
* Shuffle the elements of an array into a random order, modifying the
* original array. Returns the original array.
*/
def randomizeInPlace[T](arr: Array[T], rand: Random = new Random): Array[T] = {
for (i <- (arr.length - 1) to 1 by -1) {
val j = rand.nextInt(i + 1)
val tmp = arr(j)
arr(j) = arr(i)
arr(i) = tmp
}
arr
}
/**
* Get the local host's IP address in dotted-quad format (e.g. 1.2.3.4).
* Note, this is typically not used from within core spark.
*/
private lazy val localIpAddress: InetAddress = findLocalInetAddress()
private def findLocalInetAddress(): InetAddress = {
val defaultIpOverride = System.getenv("SPARK_LOCAL_IP")
if (defaultIpOverride != null) {
InetAddress.getByName(defaultIpOverride)
} else {
val address = InetAddress.getLocalHost
if (address.isLoopbackAddress) {
// Address resolves to something like 127.0.1.1, which happens on Debian; try to find
// a better address using the local network interfaces
// getNetworkInterfaces returns ifs in reverse order compared to ifconfig output order
// on unix-like system. On windows, it returns in index order.
// It's more proper to pick ip address following system output order.
val activeNetworkIFs = NetworkInterface.getNetworkInterfaces.asScala.toSeq
val reOrderedNetworkIFs = if (isWindows) activeNetworkIFs else activeNetworkIFs.reverse
for (ni <- reOrderedNetworkIFs) {
val addresses = ni.getInetAddresses.asScala
.filterNot(addr => addr.isLinkLocalAddress || addr.isLoopbackAddress).toSeq
if (addresses.nonEmpty) {
val addr = addresses.find(_.isInstanceOf[Inet4Address]).getOrElse(addresses.head)
// because of Inet6Address.toHostName may add interface at the end if it knows about it
val strippedAddress = InetAddress.getByAddress(addr.getAddress)
// We've found an address that looks reasonable!
logWarning("Your hostname, " + InetAddress.getLocalHost.getHostName + " resolves to" +
" a loopback address: " + address.getHostAddress + "; using " +
strippedAddress.getHostAddress + " instead (on interface " + ni.getName + ")")
logWarning("Set SPARK_LOCAL_IP if you need to bind to another address")
return strippedAddress
}
}
logWarning("Your hostname, " + InetAddress.getLocalHost.getHostName + " resolves to" +
" a loopback address: " + address.getHostAddress + ", but we couldn't find any" +
" external IP address!")
logWarning("Set SPARK_LOCAL_IP if you need to bind to another address")
}
address
}
}
private var customHostname: Option[String] = sys.env.get("SPARK_LOCAL_HOSTNAME")
/**
* Allow setting a custom host name because when we run on Mesos we need to use the same
* hostname it reports to the master.
*/
def setCustomHostname(hostname: String): Unit = {
// DEBUG code
Utils.checkHost(hostname)
customHostname = Some(hostname)
}
/**
* Get the local machine's FQDN.
*/
def localCanonicalHostName(): String = {
customHostname.getOrElse(localIpAddress.getCanonicalHostName)
}
/**
* Get the local machine's hostname.
*/
def localHostName(): String = {
customHostname.getOrElse(localIpAddress.getHostAddress)
}
/**
* Get the local machine's URI.
*/
def localHostNameForURI(): String = {
customHostname.getOrElse(InetAddresses.toUriString(localIpAddress))
}
/**
* Checks if the host contains only valid hostname/ip without port
* NOTE: Incase of IPV6 ip it should be enclosed inside []
*/
def checkHost(host: String): Unit = {
if (host != null && host.split(":").length > 2) {
assert(host.startsWith("[") && host.endsWith("]"),
s"Expected hostname or IPv6 IP enclosed in [] but got $host")
} else {
assert(host != null && host.indexOf(':') == -1, s"Expected hostname or IP but got $host")
}
}
def checkHostPort(hostPort: String): Unit = {
if (hostPort != null && hostPort.split(":").length > 2) {
assert(hostPort != null && hostPort.indexOf("]:") != -1,
s"Expected host and port but got $hostPort")
} else {
assert(hostPort != null && hostPort.indexOf(':') != -1,
s"Expected host and port but got $hostPort")
}
}
// Typically, this will be of order of number of nodes in cluster
// If not, we should change it to LRUCache or something.
private val hostPortParseResults = new ConcurrentHashMap[String, (String, Int)]()
def parseHostPort(hostPort: String): (String, Int) = {
// Check cache first.
val cached = hostPortParseResults.get(hostPort)
if (cached != null) {
return cached
}
def setDefaultPortValue: (String, Int) = {
val retval = (hostPort, 0)
hostPortParseResults.put(hostPort, retval)
retval
}
// checks if the hostport contains IPV6 ip and parses the host, port
if (hostPort != null && hostPort.split(":").length > 2) {
val index: Int = hostPort.lastIndexOf("]:")
if (-1 == index) {
return setDefaultPortValue
}
val port = hostPort.substring(index + 2).trim()
val retval = (hostPort.substring(0, index + 1).trim(), if (port.isEmpty) 0 else port.toInt)
hostPortParseResults.putIfAbsent(hostPort, retval)
} else {
val index: Int = hostPort.lastIndexOf(':')
if (-1 == index) {
return setDefaultPortValue
}
val port = hostPort.substring(index + 1).trim()
val retval = (hostPort.substring(0, index).trim(), if (port.isEmpty) 0 else port.toInt)
hostPortParseResults.putIfAbsent(hostPort, retval)
}
hostPortParseResults.get(hostPort)
}
/**
* Return the string to tell how long has passed in milliseconds.
* @param startTimeNs - a timestamp in nanoseconds returned by `System.nanoTime`.
*/
def getUsedTimeNs(startTimeNs: Long): String = {
s"${TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeNs)} ms"
}
/**
* Lists files recursively.
*/
def recursiveList(f: File): Array[File] = {
require(f.isDirectory)
val result = f.listFiles.toBuffer
val dirList = result.filter(_.isDirectory)
while (dirList.nonEmpty) {
val curDir = dirList.remove(0)
val files = curDir.listFiles()
result ++= files
dirList ++= files.filter(_.isDirectory)
}
result.toArray
}
/**
* Delete a file or directory and its contents recursively.
* Don't follow directories if they are symlinks.
* Throws an exception if deletion is unsuccessful.
*/
def deleteRecursively(file: File): Unit = {
if (file != null) {
JavaUtils.deleteRecursively(file)
ShutdownHookManager.removeShutdownDeleteDir(file)
}
}
/**
* Determines if a directory contains any files newer than cutoff seconds.
*
* @param dir must be the path to a directory, or IllegalArgumentException is thrown
* @param cutoff measured in seconds. Returns true if there are any files or directories in the
* given directory whose last modified time is later than this many seconds ago
*/
def doesDirectoryContainAnyNewFiles(dir: File, cutoff: Long): Boolean = {
if (!dir.isDirectory) {
throw new IllegalArgumentException(s"$dir is not a directory!")
}
val filesAndDirs = dir.listFiles()
val cutoffTimeInMillis = System.currentTimeMillis - (cutoff * 1000)
filesAndDirs.exists(_.lastModified() > cutoffTimeInMillis) ||
filesAndDirs.filter(_.isDirectory).exists(
subdir => doesDirectoryContainAnyNewFiles(subdir, cutoff)
)
}
/**
* Convert a time parameter such as (50s, 100ms, or 250us) to milliseconds for internal use. If
* no suffix is provided, the passed number is assumed to be in ms.
*/
def timeStringAsMs(str: String): Long = {
JavaUtils.timeStringAsMs(str)
}
/**
* Convert a time parameter such as (50s, 100ms, or 250us) to seconds for internal use. If
* no suffix is provided, the passed number is assumed to be in seconds.
*/
def timeStringAsSeconds(str: String): Long = {
JavaUtils.timeStringAsSec(str)
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m) to bytes for internal use.
*
* If no suffix is provided, the passed number is assumed to be in bytes.
*/
def byteStringAsBytes(str: String): Long = {
JavaUtils.byteStringAsBytes(str)
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m) to kibibytes for internal use.
*
* If no suffix is provided, the passed number is assumed to be in kibibytes.
*/
def byteStringAsKb(str: String): Long = {
JavaUtils.byteStringAsKb(str)
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m) to mebibytes for internal use.
*
* If no suffix is provided, the passed number is assumed to be in mebibytes.
*/
def byteStringAsMb(str: String): Long = {
JavaUtils.byteStringAsMb(str)
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m, 500g) to gibibytes for internal use.
*
* If no suffix is provided, the passed number is assumed to be in gibibytes.
*/
def byteStringAsGb(str: String): Long = {
JavaUtils.byteStringAsGb(str)
}
/**
* Convert a Java memory parameter passed to -Xmx (such as 300m or 1g) to a number of mebibytes.
*/
def memoryStringToMb(str: String): Int = {
// Convert to bytes, rather than directly to MiB, because when no units are specified the unit
// is assumed to be bytes
(JavaUtils.byteStringAsBytes(str) / 1024 / 1024).toInt
}
/**
* Convert a quantity in bytes to a human-readable string such as "4.0 MiB".
*/
def bytesToString(size: Long): String = bytesToString(BigInt(size))
def bytesToString(size: BigInt): String = {
val EiB = 1L << 60
val PiB = 1L << 50
val TiB = 1L << 40
val GiB = 1L << 30
val MiB = 1L << 20
val KiB = 1L << 10
if (size >= BigInt(1L << 11) * EiB) {
// The number is too large, show it in scientific notation.
BigDecimal(size, new MathContext(3, RoundingMode.HALF_UP)).toString() + " B"
} else {
val (value, unit) = {
if (size >= 2 * EiB) {
(BigDecimal(size) / EiB, "EiB")
} else if (size >= 2 * PiB) {
(BigDecimal(size) / PiB, "PiB")
} else if (size >= 2 * TiB) {
(BigDecimal(size) / TiB, "TiB")
} else if (size >= 2 * GiB) {
(BigDecimal(size) / GiB, "GiB")
} else if (size >= 2 * MiB) {
(BigDecimal(size) / MiB, "MiB")
} else if (size >= 2 * KiB) {
(BigDecimal(size) / KiB, "KiB")
} else {
(BigDecimal(size), "B")
}
}
"%.1f %s".formatLocal(Locale.US, value, unit)
}
}
/**
* Returns a human-readable string representing a duration such as "35ms"
*/
def msDurationToString(ms: Long): String = {
val second = 1000
val minute = 60 * second
val hour = 60 * minute
val locale = Locale.US
ms match {
case t if t < second =>
"%d ms".formatLocal(locale, t)
case t if t < minute =>
"%.1f s".formatLocal(locale, t.toFloat / second)
case t if t < hour =>
"%.1f m".formatLocal(locale, t.toFloat / minute)
case t =>
"%.2f h".formatLocal(locale, t.toFloat / hour)
}
}
/**
* Convert a quantity in megabytes to a human-readable string such as "4.0 MiB".
*/
def megabytesToString(megabytes: Long): String = {
bytesToString(megabytes * 1024L * 1024L)
}
/**
* Execute a command and return the process running the command.
*/
def executeCommand(
command: Seq[String],
workingDir: File = new File("."),
extraEnvironment: Map[String, String] = Map.empty,
redirectStderr: Boolean = true): Process = {
val builder = new ProcessBuilder(command: _*).directory(workingDir)
val environment = builder.environment()
for ((key, value) <- extraEnvironment) {
environment.put(key, value)
}
val process = builder.start()
if (redirectStderr) {
val threadName = "redirect stderr for command " + command(0)
def log(s: String): Unit = logInfo(s)
processStreamByLine(threadName, process.getErrorStream, log)
}
process
}
/**
* Execute a command and get its output, throwing an exception if it yields a code other than 0.
*/
def executeAndGetOutput(
command: Seq[String],
workingDir: File = new File("."),
extraEnvironment: Map[String, String] = Map.empty,
redirectStderr: Boolean = true): String = {
val process = executeCommand(command, workingDir, extraEnvironment, redirectStderr)
val output = new StringBuilder
val threadName = "read stdout for " + command(0)
def appendToOutput(s: String): Unit = output.append(s).append("\\n")
val stdoutThread = processStreamByLine(threadName, process.getInputStream, appendToOutput)
val exitCode = process.waitFor()
stdoutThread.join() // Wait for it to finish reading output
if (exitCode != 0) {
logError(s"Process $command exited with code $exitCode: $output")
throw new SparkException(s"Process $command exited with code $exitCode")
}
output.toString
}
/**
* Return and start a daemon thread that processes the content of the input stream line by line.
*/
def processStreamByLine(
threadName: String,
inputStream: InputStream,
processLine: String => Unit): Thread = {
val t = new Thread(threadName) {
override def run(): Unit = {
for (line <- Source.fromInputStream(inputStream).getLines()) {
processLine(line)
}
}
}
t.setDaemon(true)
t.start()
t
}
/**
* Execute a block of code that evaluates to Unit, forwarding any uncaught exceptions to the
* default UncaughtExceptionHandler
*
* NOTE: This method is to be called by the spark-started JVM process.
*/
def tryOrExit(block: => Unit): Unit = {
try {
block
} catch {
case e: ControlThrowable => throw e
case t: Throwable => sparkUncaughtExceptionHandler.uncaughtException(t)
}
}
/**
* Execute a block of code that evaluates to Unit, stop SparkContext if there is any uncaught
* exception
*
* NOTE: This method is to be called by the driver-side components to avoid stopping the
* user-started JVM process completely; in contrast, tryOrExit is to be called in the
* spark-started JVM process .
*/
def tryOrStopSparkContext(sc: SparkContext)(block: => Unit): Unit = {
try {
block
} catch {
case e: ControlThrowable => throw e
case t: Throwable =>
val currentThreadName = Thread.currentThread().getName
if (sc != null) {
logError(s"uncaught error in thread $currentThreadName, stopping SparkContext", t)
sc.stopInNewThread()
}
if (!NonFatal(t)) {
logError(s"throw uncaught fatal error in thread $currentThreadName", t)
throw t
}
}
}
/**
* Execute a block of code that returns a value, re-throwing any non-fatal uncaught
* exceptions as IOException. This is used when implementing Externalizable and Serializable's
* read and write methods, since Java's serializer will not report non-IOExceptions properly;
* see SPARK-4080 for more context.
*/
def tryOrIOException[T](block: => T): T = {
try {
block
} catch {
case e: IOException =>
logError("Exception encountered", e)
throw e
case NonFatal(e) =>
logError("Exception encountered", e)
throw new IOException(e)
}
}
/** Executes the given block. Log non-fatal errors if any, and only throw fatal errors */
def tryLogNonFatalError(block: => Unit): Unit = {
try {
block
} catch {
case NonFatal(t) =>
logError(s"Uncaught exception in thread ${Thread.currentThread().getName}", t)
}
}
/**
* Execute a block of code, then a finally block, but if exceptions happen in
* the finally block, do not suppress the original exception.
*
* This is primarily an issue with `finally { out.close() }` blocks, where
* close needs to be called to clean up `out`, but if an exception happened
* in `out.write`, it's likely `out` may be corrupted and `out.close` will
* fail as well. This would then suppress the original/likely more meaningful
* exception from the original `out.write` call.
*/
def tryWithSafeFinally[T](block: => T)(finallyBlock: => Unit): T = {
var originalThrowable: Throwable = null
try {
block
} catch {
case t: Throwable =>
// Purposefully not using NonFatal, because even fatal exceptions
// we don't want to have our finallyBlock suppress
originalThrowable = t
throw originalThrowable
} finally {
try {
finallyBlock
} catch {
case t: Throwable if (originalThrowable != null && originalThrowable != t) =>
originalThrowable.addSuppressed(t)
logWarning(s"Suppressing exception in finally: ${t.getMessage}", t)
throw originalThrowable
}
}
}
/**
* Execute a block of code and call the failure callbacks in the catch block. If exceptions occur
* in either the catch or the finally block, they are appended to the list of suppressed
* exceptions in original exception which is then rethrown.
*
* This is primarily an issue with `catch { abort() }` or `finally { out.close() }` blocks,
* where the abort/close needs to be called to clean up `out`, but if an exception happened
* in `out.write`, it's likely `out` may be corrupted and `abort` or `out.close` will
* fail as well. This would then suppress the original/likely more meaningful
* exception from the original `out.write` call.
*/
def tryWithSafeFinallyAndFailureCallbacks[T](block: => T)
(catchBlock: => Unit = (), finallyBlock: => Unit = ()): T = {
var originalThrowable: Throwable = null
try {
block
} catch {
case cause: Throwable =>
// Purposefully not using NonFatal, because even fatal exceptions
// we don't want to have our finallyBlock suppress
originalThrowable = cause
try {
logError("Aborting task", originalThrowable)
if (TaskContext.get() != null) {
TaskContext.get().markTaskFailed(originalThrowable)
}
catchBlock
} catch {
case t: Throwable =>
if (originalThrowable != t) {
originalThrowable.addSuppressed(t)
logWarning(s"Suppressing exception in catch: ${t.getMessage}", t)
}
}
throw originalThrowable
} finally {
try {
finallyBlock
} catch {
case t: Throwable if (originalThrowable != null && originalThrowable != t) =>
originalThrowable.addSuppressed(t)
logWarning(s"Suppressing exception in finally: ${t.getMessage}", t)
throw originalThrowable
}
}
}
// A regular expression to match classes of the internal Spark API's
// that we want to skip when finding the call site of a method.
private val SPARK_CORE_CLASS_REGEX =
"""^org\\.apache\\.spark(\\.api\\.java)?(\\.util)?(\\.rdd)?(\\.broadcast)?\\.[A-Z]""".r
private val SPARK_SQL_CLASS_REGEX = """^org\\.apache\\.spark\\.sql.*""".r
/** Default filtering function for finding call sites using `getCallSite`. */
private def sparkInternalExclusionFunction(className: String): Boolean = {
val SCALA_CORE_CLASS_PREFIX = "scala"
val isSparkClass = SPARK_CORE_CLASS_REGEX.findFirstIn(className).isDefined ||
SPARK_SQL_CLASS_REGEX.findFirstIn(className).isDefined
val isScalaClass = className.startsWith(SCALA_CORE_CLASS_PREFIX)
// If the class is a Spark internal class or a Scala class, then exclude.
isSparkClass || isScalaClass
}
/**
* When called inside a class in the spark package, returns the name of the user code class
* (outside the spark package) that called into Spark, as well as which Spark method they called.
* This is used, for example, to tell users where in their code each RDD got created.
*
* @param skipClass Function that is used to exclude non-user-code classes.
*/
def getCallSite(skipClass: String => Boolean = sparkInternalExclusionFunction): CallSite = {
// Keep crawling up the stack trace until we find the first function not inside of the spark
// package. We track the last (shallowest) contiguous Spark method. This might be an RDD
// transformation, a SparkContext function (such as parallelize), or anything else that leads
// to instantiation of an RDD. We also track the first (deepest) user method, file, and line.
var lastSparkMethod = "<unknown>"
var firstUserFile = "<unknown>"
var firstUserLine = 0
var insideSpark = true
val callStack = new ArrayBuffer[String]() :+ "<unknown>"
Thread.currentThread.getStackTrace().foreach { ste: StackTraceElement =>
// When running under some profilers, the current stack trace might contain some bogus
// frames. This is intended to ensure that we don't crash in these situations by
// ignoring any frames that we can't examine.
if (ste != null && ste.getMethodName != null
&& !ste.getMethodName.contains("getStackTrace")) {
if (insideSpark) {
if (skipClass(ste.getClassName)) {
lastSparkMethod = if (ste.getMethodName == "<init>") {
// Spark method is a constructor; get its class name
ste.getClassName.substring(ste.getClassName.lastIndexOf('.') + 1)
} else {
ste.getMethodName
}
callStack(0) = ste.toString // Put last Spark method on top of the stack trace.
} else {
if (ste.getFileName != null) {
firstUserFile = ste.getFileName
if (ste.getLineNumber >= 0) {
firstUserLine = ste.getLineNumber
}
}
callStack += ste.toString
insideSpark = false
}
} else {
callStack += ste.toString
}
}
}
val callStackDepth = System.getProperty("spark.callstack.depth", "20").toInt
val shortForm =
if (firstUserFile == "HiveSessionImpl.java") {
// To be more user friendly, show a nicer string for queries submitted from the JDBC
// server.
"Spark JDBC Server Query"
} else {
s"$lastSparkMethod at $firstUserFile:$firstUserLine"
}
val longForm = callStack.take(callStackDepth).mkString("\\n")
CallSite(shortForm, longForm)
}
private var compressedLogFileLengthCache: LoadingCache[String, java.lang.Long] = null
private def getCompressedLogFileLengthCache(
sparkConf: SparkConf): LoadingCache[String, java.lang.Long] = this.synchronized {
if (compressedLogFileLengthCache == null) {
val compressedLogFileLengthCacheSize = sparkConf.get(
UNCOMPRESSED_LOG_FILE_LENGTH_CACHE_SIZE_CONF)
compressedLogFileLengthCache = CacheBuilder.newBuilder()
.maximumSize(compressedLogFileLengthCacheSize)
.build[String, java.lang.Long](new CacheLoader[String, java.lang.Long]() {
override def load(path: String): java.lang.Long = {
Utils.getCompressedFileLength(new File(path))
}
})
}
compressedLogFileLengthCache
}
/**
* Return the file length, if the file is compressed it returns the uncompressed file length.
* It also caches the uncompressed file size to avoid repeated decompression. The cache size is
* read from workerConf.
*/
def getFileLength(file: File, workConf: SparkConf): Long = {
if (file.getName.endsWith(".gz")) {
getCompressedLogFileLengthCache(workConf).get(file.getAbsolutePath)
} else {
file.length
}
}
/** Return uncompressed file length of a compressed file. */
private def getCompressedFileLength(file: File): Long = {
var gzInputStream: GZIPInputStream = null
try {
// Uncompress .gz file to determine file size.
var fileSize = 0L
gzInputStream = new GZIPInputStream(new FileInputStream(file))
val bufSize = 1024
val buf = new Array[Byte](bufSize)
var numBytes = ByteStreams.read(gzInputStream, buf, 0, bufSize)
while (numBytes > 0) {
fileSize += numBytes
numBytes = ByteStreams.read(gzInputStream, buf, 0, bufSize)
}
fileSize
} catch {
case e: Throwable =>
logError(s"Cannot get file length of ${file}", e)
throw e
} finally {
if (gzInputStream != null) {
gzInputStream.close()
}
}
}
/** Return a string containing part of a file from byte 'start' to 'end'. */
def offsetBytes(path: String, length: Long, start: Long, end: Long): String = {
val file = new File(path)
val effectiveEnd = math.min(length, end)
val effectiveStart = math.max(0, start)
val buff = new Array[Byte]((effectiveEnd-effectiveStart).toInt)
val stream = if (path.endsWith(".gz")) {
new GZIPInputStream(new FileInputStream(file))
} else {
new FileInputStream(file)
}
try {
ByteStreams.skipFully(stream, effectiveStart)
ByteStreams.readFully(stream, buff)
} finally {
stream.close()
}
Source.fromBytes(buff).mkString
}
/**
* Return a string containing data across a set of files. The `startIndex`
* and `endIndex` is based on the cumulative size of all the files take in
* the given order. See figure below for more details.
*/
def offsetBytes(files: Seq[File], fileLengths: Seq[Long], start: Long, end: Long): String = {
assert(files.length == fileLengths.length)
val startIndex = math.max(start, 0)
val endIndex = math.min(end, fileLengths.sum)
val fileToLength = files.zip(fileLengths).toMap
logDebug("Log files: \\n" + fileToLength.mkString("\\n"))
val stringBuffer = new StringBuffer((endIndex - startIndex).toInt)
var sum = 0L
files.zip(fileLengths).foreach { case (file, fileLength) =>
val startIndexOfFile = sum
val endIndexOfFile = sum + fileToLength(file)
logDebug(s"Processing file $file, " +
s"with start index = $startIndexOfFile, end index = $endIndex")
/*
____________
range 1: | |
| case A |
files: |==== file 1 ====|====== file 2 ======|===== file 3 =====|
| case B . case C . case D |
range 2: |___________.____________________.______________|
*/
if (startIndex <= startIndexOfFile && endIndex >= endIndexOfFile) {
// Case C: read the whole file
stringBuffer.append(offsetBytes(file.getAbsolutePath, fileLength, 0, fileToLength(file)))
} else if (startIndex > startIndexOfFile && startIndex < endIndexOfFile) {
// Case A and B: read from [start of required range] to [end of file / end of range]
val effectiveStartIndex = startIndex - startIndexOfFile
val effectiveEndIndex = math.min(endIndex - startIndexOfFile, fileToLength(file))
stringBuffer.append(Utils.offsetBytes(
file.getAbsolutePath, fileLength, effectiveStartIndex, effectiveEndIndex))
} else if (endIndex > startIndexOfFile && endIndex < endIndexOfFile) {
// Case D: read from [start of file] to [end of require range]
val effectiveStartIndex = math.max(startIndex - startIndexOfFile, 0)
val effectiveEndIndex = endIndex - startIndexOfFile
stringBuffer.append(Utils.offsetBytes(
file.getAbsolutePath, fileLength, effectiveStartIndex, effectiveEndIndex))
}
sum += fileToLength(file)
logDebug(s"After processing file $file, string built is ${stringBuffer.toString}")
}
stringBuffer.toString
}
/**
* Clone an object using a Spark serializer.
*/
def clone[T: ClassTag](value: T, serializer: SerializerInstance): T = {
serializer.deserialize[T](serializer.serialize(value))
}
private def isSpace(c: Char): Boolean = {
" \\t\\r\\n".indexOf(c) != -1
}
/**
* Split a string of potentially quoted arguments from the command line the way that a shell
* would do it to determine arguments to a command. For example, if the string is 'a "b c" d',
* then it would be parsed as three arguments: 'a', 'b c' and 'd'.
*/
def splitCommandString(s: String): Seq[String] = {
val buf = new ArrayBuffer[String]
var inWord = false
var inSingleQuote = false
var inDoubleQuote = false
val curWord = new StringBuilder
def endWord(): Unit = {
buf += curWord.toString
curWord.clear()
}
var i = 0
while (i < s.length) {
val nextChar = s.charAt(i)
if (inDoubleQuote) {
if (nextChar == '"') {
inDoubleQuote = false
} else if (nextChar == '\\\\') {
if (i < s.length - 1) {
// Append the next character directly, because only " and \\ may be escaped in
// double quotes after the shell's own expansion
curWord.append(s.charAt(i + 1))
i += 1
}
} else {
curWord.append(nextChar)
}
} else if (inSingleQuote) {
if (nextChar == '\\'') {
inSingleQuote = false
} else {
curWord.append(nextChar)
}
// Backslashes are not treated specially in single quotes
} else if (nextChar == '"') {
inWord = true
inDoubleQuote = true
} else if (nextChar == '\\'') {
inWord = true
inSingleQuote = true
} else if (!isSpace(nextChar)) {
curWord.append(nextChar)
inWord = true
} else if (inWord && isSpace(nextChar)) {
endWord()
inWord = false
}
i += 1
}
if (inWord || inDoubleQuote || inSingleQuote) {
endWord()
}
buf.toSeq
}
/* Calculates 'x' modulo 'mod', takes to consideration sign of x,
* i.e. if 'x' is negative, than 'x' % 'mod' is negative too
* so function return (x % mod) + mod in that case.
*/
def nonNegativeMod(x: Int, mod: Int): Int = {
val rawMod = x % mod
rawMod + (if (rawMod < 0) mod else 0)
}
// Handles idiosyncrasies with hash (add more as required)
// This method should be kept in sync with
// org.apache.spark.network.util.JavaUtils#nonNegativeHash().
def nonNegativeHash(obj: AnyRef): Int = {
// Required ?
if (obj eq null) return 0
val hash = obj.hashCode
// math.abs fails for Int.MinValue
val hashAbs = if (Int.MinValue != hash) math.abs(hash) else 0
// Nothing else to guard against ?
hashAbs
}
/**
* Returns the system properties map that is thread-safe to iterator over. It gets the
* properties which have been set explicitly, as well as those for which only a default value
* has been defined.
*/
def getSystemProperties: Map[String, String] = {
System.getProperties.stringPropertyNames().asScala
.map(key => (key, System.getProperty(key))).toMap
}
/**
* Method executed for repeating a task for side effects.
* Unlike a for comprehension, it permits JVM JIT optimization
*/
def times(numIters: Int)(f: => Unit): Unit = {
var i = 0
while (i < numIters) {
f
i += 1
}
}
/**
* Timing method based on iterations that permit JVM JIT optimization.
*
* @param numIters number of iterations
* @param f function to be executed. If prepare is not None, the running time of each call to f
* must be an order of magnitude longer than one nanosecond for accurate timing.
* @param prepare function to be executed before each call to f. Its running time doesn't count.
* @return the total time across all iterations (not counting preparation time) in nanoseconds.
*/
def timeIt(numIters: Int)(f: => Unit, prepare: Option[() => Unit] = None): Long = {
if (prepare.isEmpty) {
val startNs = System.nanoTime()
times(numIters)(f)
System.nanoTime() - startNs
} else {
var i = 0
var sum = 0L
while (i < numIters) {
prepare.get.apply()
val startNs = System.nanoTime()
f
sum += System.nanoTime() - startNs
i += 1
}
sum
}
}
/**
* Counts the number of elements of an iterator using a while loop rather than calling
* [[scala.collection.Iterator#size]] because it uses a for loop, which is slightly slower
* in the current version of Scala.
*/
def getIteratorSize(iterator: Iterator[_]): Long = {
var count = 0L
while (iterator.hasNext) {
count += 1L
iterator.next()
}
count
}
/**
* Generate a zipWithIndex iterator, avoid index value overflowing problem
* in scala's zipWithIndex
*/
def getIteratorZipWithIndex[T](iter: Iterator[T], startIndex: Long): Iterator[(T, Long)] = {
new Iterator[(T, Long)] {
require(startIndex >= 0, "startIndex should be >= 0.")
var index: Long = startIndex - 1L
def hasNext: Boolean = iter.hasNext
def next(): (T, Long) = {
index += 1L
(iter.next(), index)
}
}
}
/**
* Creates a symlink.
*
* @param src absolute path to the source
* @param dst relative path for the destination
*/
def symlink(src: File, dst: File): Unit = {
if (!src.isAbsolute()) {
throw new IOException("Source must be absolute")
}
if (dst.isAbsolute()) {
throw new IOException("Destination must be relative")
}
Files.createSymbolicLink(dst.toPath, src.toPath)
}
/** Return the class name of the given object, removing all dollar signs */
def getFormattedClassName(obj: AnyRef): String = {
getSimpleName(obj.getClass).replace("$", "")
}
/**
* Return a Hadoop FileSystem with the scheme encoded in the given path.
*/
def getHadoopFileSystem(path: URI, conf: Configuration): FileSystem = {
FileSystem.get(path, conf)
}
/**
* Return a Hadoop FileSystem with the scheme encoded in the given path.
*/
def getHadoopFileSystem(path: String, conf: Configuration): FileSystem = {
getHadoopFileSystem(new URI(path), conf)
}
/**
* Whether the underlying operating system is Windows.
*/
val isWindows = SystemUtils.IS_OS_WINDOWS
/**
* Whether the underlying operating system is Mac OS X.
*/
val isMac = SystemUtils.IS_OS_MAC_OSX
/**
* Whether the underlying operating system is Mac OS X and processor is Apple Silicon.
*/
val isMacOnAppleSilicon = SystemUtils.IS_OS_MAC_OSX && SystemUtils.OS_ARCH.equals("aarch64")
/**
* Pattern for matching a Windows drive, which contains only a single alphabet character.
*/
val windowsDrive = "([a-zA-Z])".r
/**
* Indicates whether Spark is currently running unit tests.
*/
def isTesting: Boolean = {
// Scala's `sys.env` creates a ton of garbage by constructing Scala immutable maps, so
// we directly use the Java APIs instead.
System.getenv("SPARK_TESTING") != null || System.getProperty(IS_TESTING.key) != null
}
/**
* Terminates a process waiting for at most the specified duration.
*
* @return the process exit value if it was successfully terminated, else None
*/
def terminateProcess(process: Process, timeoutMs: Long): Option[Int] = {
// Politely destroy first
process.destroy()
if (process.waitFor(timeoutMs, TimeUnit.MILLISECONDS)) {
// Successful exit
Option(process.exitValue())
} else {
try {
process.destroyForcibly()
} catch {
case NonFatal(e) => logWarning("Exception when attempting to kill process", e)
}
// Wait, again, although this really should return almost immediately
if (process.waitFor(timeoutMs, TimeUnit.MILLISECONDS)) {
Option(process.exitValue())
} else {
logWarning("Timed out waiting to forcibly kill process")
None
}
}
}
/**
* Return the stderr of a process after waiting for the process to terminate.
* If the process does not terminate within the specified timeout, return None.
*/
def getStderr(process: Process, timeoutMs: Long): Option[String] = {
val terminated = process.waitFor(timeoutMs, TimeUnit.MILLISECONDS)
if (terminated) {
Some(Source.fromInputStream(process.getErrorStream).getLines().mkString("\\n"))
} else {
None
}
}
/**
* Execute the given block, logging and re-throwing any uncaught exception.
* This is particularly useful for wrapping code that runs in a thread, to ensure
* that exceptions are printed, and to avoid having to catch Throwable.
*/
def logUncaughtExceptions[T](f: => T): T = {
try {
f
} catch {
case ct: ControlThrowable =>
throw ct
case t: Throwable =>
logError(s"Uncaught exception in thread ${Thread.currentThread().getName}", t)
throw t
}
}
/** Executes the given block in a Try, logging any uncaught exceptions. */
def tryLog[T](f: => T): Try[T] = {
try {
val res = f
scala.util.Success(res)
} catch {
case ct: ControlThrowable =>
throw ct
case t: Throwable =>
logError(s"Uncaught exception in thread ${Thread.currentThread().getName}", t)
scala.util.Failure(t)
}
}
/** Returns true if the given exception was fatal. See docs for scala.util.control.NonFatal. */
def isFatalError(e: Throwable): Boolean = {
e match {
case NonFatal(_) |
_: InterruptedException |
_: NotImplementedError |
_: ControlThrowable |
_: LinkageError =>
false
case _ =>
true
}
}
/**
* Return a well-formed URI for the file described by a user input string.
*
* If the supplied path does not contain a scheme, or is a relative path, it will be
* converted into an absolute path with a file:// scheme.
*/
def resolveURI(path: String): URI = {
try {
val uri = new URI(path)
if (uri.getScheme() != null) {
return uri
}
// make sure to handle if the path has a fragment (applies to yarn
// distributed cache)
if (uri.getFragment() != null) {
val absoluteURI = new File(uri.getPath()).getAbsoluteFile().toURI()
return new URI(absoluteURI.getScheme(), absoluteURI.getHost(), absoluteURI.getPath(),
uri.getFragment())
}
} catch {
case e: URISyntaxException =>
}
new File(path).getCanonicalFile().toURI()
}
/** Resolve a comma-separated list of paths. */
def resolveURIs(paths: String): String = {
if (paths == null || paths.trim.isEmpty) {
""
} else {
paths.split(",").filter(_.trim.nonEmpty).map { p => Utils.resolveURI(p) }.mkString(",")
}
}
/** Check whether a path is an absolute URI. */
def isAbsoluteURI(path: String): Boolean = {
try {
val uri = new URI(path: String)
uri.isAbsolute
} catch {
case _: URISyntaxException =>
false
}
}
/** Return all non-local paths from a comma-separated list of paths. */
def nonLocalPaths(paths: String, testWindows: Boolean = false): Array[String] = {
val windows = isWindows || testWindows
if (paths == null || paths.trim.isEmpty) {
Array.empty
} else {
paths.split(",").filter { p =>
val uri = resolveURI(p)
Option(uri.getScheme).getOrElse("file") match {
case windowsDrive(d) if windows => false
case "local" | "file" => false
case _ => true
}
}
}
}
/**
* Load default Spark properties from the given file. If no file is provided,
* use the common defaults file. This mutates state in the given SparkConf and
* in this JVM's system properties if the config specified in the file is not
* already set. Return the path of the properties file used.
*/
def loadDefaultSparkProperties(conf: SparkConf, filePath: String = null): String = {
val path = Option(filePath).getOrElse(getDefaultPropertiesFile())
Option(path).foreach { confFile =>
getPropertiesFromFile(confFile).filter { case (k, v) =>
k.startsWith("spark.")
}.foreach { case (k, v) =>
conf.setIfMissing(k, v)
sys.props.getOrElseUpdate(k, v)
}
}
path
}
/**
* Updates Spark config with properties from a set of Properties.
* Provided properties have the highest priority.
*/
def updateSparkConfigFromProperties(
conf: SparkConf,
properties: Map[String, String]) : Unit = {
properties.filter { case (k, v) =>
k.startsWith("spark.")
}.foreach { case (k, v) =>
conf.set(k, v)
}
}
/**
* Implements the same logic as JDK `java.lang.String#trim` by removing leading and trailing
* non-printable characters less or equal to '\\u0020' (SPACE) but preserves natural line
* delimiters according to [[java.util.Properties]] load method. The natural line delimiters are
* removed by JDK during load. Therefore any remaining ones have been specifically provided and
* escaped by the user, and must not be ignored
*
* @param str
* @return the trimmed value of str
*/
private[util] def trimExceptCRLF(str: String): String = {
val nonSpaceOrNaturalLineDelimiter: Char => Boolean = { ch =>
ch > ' ' || ch == '\\r' || ch == '\\n'
}
val firstPos = str.indexWhere(nonSpaceOrNaturalLineDelimiter)
val lastPos = str.lastIndexWhere(nonSpaceOrNaturalLineDelimiter)
if (firstPos >= 0 && lastPos >= 0) {
str.substring(firstPos, lastPos + 1)
} else {
""
}
}
/** Load properties present in the given file. */
def getPropertiesFromFile(filename: String): Map[String, String] = {
val file = new File(filename)
require(file.exists(), s"Properties file $file does not exist")
require(file.isFile(), s"Properties file $file is not a normal file")
val inReader = new InputStreamReader(new FileInputStream(file), StandardCharsets.UTF_8)
try {
val properties = new Properties()
properties.load(inReader)
properties.stringPropertyNames().asScala
.map { k => (k, trimExceptCRLF(properties.getProperty(k))) }
.toMap
} catch {
case e: IOException =>
throw new SparkException(s"Failed when loading Spark properties from $filename", e)
} finally {
inReader.close()
}
}
/** Return the path of the default Spark properties file. */
def getDefaultPropertiesFile(env: Map[String, String] = sys.env): String = {
env.get("SPARK_CONF_DIR")
.orElse(env.get("SPARK_HOME").map { t => s"$t${File.separator}conf" })
.map { t => new File(s"$t${File.separator}spark-defaults.conf")}
.filter(_.isFile)
.map(_.getAbsolutePath)
.orNull
}
/**
* Return a nice string representation of the exception. It will call "printStackTrace" to
* recursively generate the stack trace including the exception and its causes.
*/
def exceptionString(e: Throwable): String = {
if (e == null) {
""
} else {
// Use e.printStackTrace here because e.getStackTrace doesn't include the cause
val stringWriter = new StringWriter()
e.printStackTrace(new PrintWriter(stringWriter))
stringWriter.toString
}
}
private implicit class Lock(lock: LockInfo) {
def lockString: String = {
lock match {
case monitor: MonitorInfo =>
s"Monitor(${lock.getClassName}@${lock.getIdentityHashCode}})"
case _ =>
s"Lock(${lock.getClassName}@${lock.getIdentityHashCode}})"
}
}
}
/** Return a thread dump of all threads' stacktraces. Used to capture dumps for the web UI */
def getThreadDump(): Array[ThreadStackTrace] = {
// We need to filter out null values here because dumpAllThreads() may return null array
// elements for threads that are dead / don't exist.
val threadInfos = ManagementFactory.getThreadMXBean.dumpAllThreads(true, true).filter(_ != null)
threadInfos.sortWith { case (threadTrace1, threadTrace2) =>
val v1 = if (threadTrace1.getThreadName.contains("Executor task launch")) 1 else 0
val v2 = if (threadTrace2.getThreadName.contains("Executor task launch")) 1 else 0
if (v1 == v2) {
val name1 = threadTrace1.getThreadName().toLowerCase(Locale.ROOT)
val name2 = threadTrace2.getThreadName().toLowerCase(Locale.ROOT)
val nameCmpRes = name1.compareTo(name2)
if (nameCmpRes == 0) {
threadTrace1.getThreadId < threadTrace2.getThreadId
} else {
nameCmpRes < 0
}
} else {
v1 > v2
}
}.map(threadInfoToThreadStackTrace)
}
def getThreadDumpForThread(threadId: Long): Option[ThreadStackTrace] = {
if (threadId <= 0) {
None
} else {
// The Int.MaxValue here requests the entire untruncated stack trace of the thread:
val threadInfo =
Option(ManagementFactory.getThreadMXBean.getThreadInfo(threadId, Int.MaxValue))
threadInfo.map(threadInfoToThreadStackTrace)
}
}
private def threadInfoToThreadStackTrace(threadInfo: ThreadInfo): ThreadStackTrace = {
val monitors = threadInfo.getLockedMonitors.map(m => m.getLockedStackFrame -> m).toMap
val stackTrace = StackTrace(threadInfo.getStackTrace.map { frame =>
monitors.get(frame) match {
case Some(monitor) =>
monitor.getLockedStackFrame.toString + s" => holding ${monitor.lockString}"
case None =>
frame.toString
}
})
// use a set to dedup re-entrant locks that are held at multiple places
val heldLocks =
(threadInfo.getLockedSynchronizers ++ threadInfo.getLockedMonitors).map(_.lockString).toSet
ThreadStackTrace(
threadId = threadInfo.getThreadId,
threadName = threadInfo.getThreadName,
threadState = threadInfo.getThreadState,
stackTrace = stackTrace,
blockedByThreadId =
if (threadInfo.getLockOwnerId < 0) None else Some(threadInfo.getLockOwnerId),
blockedByLock = Option(threadInfo.getLockInfo).map(_.lockString).getOrElse(""),
holdingLocks = heldLocks.toSeq)
}
/**
* Convert all spark properties set in the given SparkConf to a sequence of java options.
*/
def sparkJavaOpts(conf: SparkConf, filterKey: (String => Boolean) = _ => true): Seq[String] = {
conf.getAll
.filter { case (k, _) => filterKey(k) }
.map { case (k, v) => s"-D$k=$v" }
}
/**
* Maximum number of retries when binding to a port before giving up.
*/
def portMaxRetries(conf: SparkConf): Int = {
val maxRetries = conf.getOption("spark.port.maxRetries").map(_.toInt)
if (conf.contains(IS_TESTING)) {
// Set a higher number of retries for tests...
maxRetries.getOrElse(100)
} else {
maxRetries.getOrElse(16)
}
}
/**
* Returns the user port to try when trying to bind a service. Handles wrapping and skipping
* privileged ports.
*/
def userPort(base: Int, offset: Int): Int = {
(base + offset - 1024) % (65536 - 1024) + 1024
}
/**
* Attempt to start a service on the given port, or fail after a number of attempts.
* Each subsequent attempt uses 1 + the port used in the previous attempt (unless the port is 0).
*
* @param startPort The initial port to start the service on.
* @param startService Function to start service on a given port.
* This is expected to throw java.net.BindException on port collision.
* @param conf A SparkConf used to get the maximum number of retries when binding to a port.
* @param serviceName Name of the service.
* @return (service: T, port: Int)
*/
def startServiceOnPort[T](
startPort: Int,
startService: Int => (T, Int),
conf: SparkConf,
serviceName: String = ""): (T, Int) = {
require(startPort == 0 || (1024 <= startPort && startPort < 65536),
"startPort should be between 1024 and 65535 (inclusive), or 0 for a random free port.")
val serviceString = if (serviceName.isEmpty) "" else s" '$serviceName'"
val maxRetries = portMaxRetries(conf)
for (offset <- 0 to maxRetries) {
// Do not increment port if startPort is 0, which is treated as a special port
val tryPort = if (startPort == 0) {
startPort
} else {
userPort(startPort, offset)
}
try {
val (service, port) = startService(tryPort)
logInfo(s"Successfully started service$serviceString on port $port.")
return (service, port)
} catch {
case e: Exception if isBindCollision(e) =>
if (offset >= maxRetries) {
val exceptionMessage = if (startPort == 0) {
s"${e.getMessage}: Service$serviceString failed after " +
s"$maxRetries retries (on a random free port)! " +
s"Consider explicitly setting the appropriate binding address for " +
s"the service$serviceString (for example ${DRIVER_BIND_ADDRESS.key} " +
s"for SparkDriver) to the correct binding address."
} else {
s"${e.getMessage}: Service$serviceString failed after " +
s"$maxRetries retries (starting from $startPort)! Consider explicitly setting " +
s"the appropriate port for the service$serviceString (for example spark.ui.port " +
s"for SparkUI) to an available port or increasing spark.port.maxRetries."
}
val exception = new BindException(exceptionMessage)
// restore original stack trace
exception.setStackTrace(e.getStackTrace)
throw exception
}
if (startPort == 0) {
// As startPort 0 is for a random free port, it is most possibly binding address is
// not correct.
logWarning(s"Service$serviceString could not bind on a random free port. " +
"You may check whether configuring an appropriate binding address.")
} else {
logWarning(s"Service$serviceString could not bind on port $tryPort. " +
s"Attempting port ${tryPort + 1}.")
}
}
}
// Should never happen
throw new SparkException(s"Failed to start service$serviceString on port $startPort")
}
/**
* Return whether the exception is caused by an address-port collision when binding.
*/
def isBindCollision(exception: Throwable): Boolean = {
exception match {
case e: BindException =>
if (e.getMessage != null) {
return true
}
isBindCollision(e.getCause)
case e: MultiException =>
e.getThrowables.asScala.exists(isBindCollision)
case e: NativeIoException =>
(e.getMessage != null && e.getMessage.startsWith("bind() failed: ")) ||
isBindCollision(e.getCause)
case e: Exception => isBindCollision(e.getCause)
case _ => false
}
}
/**
* configure a new log4j level
*/
def setLogLevel(l: Level): Unit = {
val ctx = LogManager.getContext(false).asInstanceOf[LoggerContext]
val config = ctx.getConfiguration()
val loggerConfig = config.getLoggerConfig(LogManager.ROOT_LOGGER_NAME)
loggerConfig.setLevel(l)
ctx.updateLoggers()
// Setting threshold to null as rootLevel will define log level for spark-shell
Logging.sparkShellThresholdLevel = null
}
/**
* Return the current system LD_LIBRARY_PATH name
*/
def libraryPathEnvName: String = {
if (isWindows) {
"PATH"
} else if (isMac) {
"DYLD_LIBRARY_PATH"
} else {
"LD_LIBRARY_PATH"
}
}
/**
* Return the prefix of a command that appends the given library paths to the
* system-specific library path environment variable. On Unix, for instance,
* this returns the string LD_LIBRARY_PATH="path1:path2:$LD_LIBRARY_PATH".
*/
def libraryPathEnvPrefix(libraryPaths: Seq[String]): String = {
val libraryPathScriptVar = if (isWindows) {
s"%${libraryPathEnvName}%"
} else {
"$" + libraryPathEnvName
}
val libraryPath = (libraryPaths :+ libraryPathScriptVar).mkString("\\"",
File.pathSeparator, "\\"")
val ampersand = if (Utils.isWindows) {
" &"
} else {
""
}
s"$libraryPathEnvName=$libraryPath$ampersand"
}
/**
* Return the value of a config either through the SparkConf or the Hadoop configuration.
* We Check whether the key is set in the SparkConf before look at any Hadoop configuration.
* If the key is set in SparkConf, no matter whether it is running on YARN or not,
* gets the value from SparkConf.
* Only when the key is not set in SparkConf and running on YARN,
* gets the value from Hadoop configuration.
*/
def getSparkOrYarnConfig(conf: SparkConf, key: String, default: String): String = {
if (conf.contains(key)) {
conf.get(key, default)
} else if (conf.get(SparkLauncher.SPARK_MASTER, null) == "yarn") {
new YarnConfiguration(SparkHadoopUtil.get.newConfiguration(conf)).get(key, default)
} else {
default
}
}
/**
* Return a pair of host and port extracted from the `sparkUrl`.
*
* A spark url (`spark://host:port`) is a special URI that its scheme is `spark` and only contains
* host and port.
*
* @throws org.apache.spark.SparkException if sparkUrl is invalid.
*/
@throws(classOf[SparkException])
def extractHostPortFromSparkUrl(sparkUrl: String): (String, Int) = {
try {
val uri = new java.net.URI(sparkUrl)
val host = uri.getHost
val port = uri.getPort
if (uri.getScheme != "spark" ||
host == null ||
port < 0 ||
(uri.getPath != null && !uri.getPath.isEmpty) || // uri.getPath returns "" instead of null
uri.getFragment != null ||
uri.getQuery != null ||
uri.getUserInfo != null) {
throw new SparkException("Invalid master URL: " + sparkUrl)
}
(host, port)
} catch {
case e: java.net.URISyntaxException =>
throw new SparkException("Invalid master URL: " + sparkUrl, e)
}
}
/**
* Returns the current user name. This is the currently logged in user, unless that's been
* overridden by the `SPARK_USER` environment variable.
*/
def getCurrentUserName(): String = {
Option(System.getenv("SPARK_USER"))
.getOrElse(UserGroupInformation.getCurrentUser().getShortUserName())
}
val EMPTY_USER_GROUPS = Set.empty[String]
// Returns the groups to which the current user belongs.
def getCurrentUserGroups(sparkConf: SparkConf, username: String): Set[String] = {
val groupProviderClassName = sparkConf.get(USER_GROUPS_MAPPING)
if (groupProviderClassName != "") {
try {
val groupMappingServiceProvider = classForName(groupProviderClassName).
getConstructor().newInstance().
asInstanceOf[org.apache.spark.security.GroupMappingServiceProvider]
val currentUserGroups = groupMappingServiceProvider.getGroups(username)
return currentUserGroups
} catch {
case e: Exception => logError(s"Error getting groups for user=$username", e)
}
}
EMPTY_USER_GROUPS
}
/**
* Split the comma delimited string of master URLs into a list.
* For instance, "spark://abc,def" becomes [spark://abc, spark://def].
*/
def parseStandaloneMasterUrls(masterUrls: String): Array[String] = {
masterUrls.stripPrefix("spark://").split(",").map("spark://" + _)
}
/** An identifier that backup masters use in their responses. */
val BACKUP_STANDALONE_MASTER_PREFIX = "Current state is not alive"
/** Return true if the response message is sent from a backup Master on standby. */
def responseFromBackup(msg: String): Boolean = {
msg.startsWith(BACKUP_STANDALONE_MASTER_PREFIX)
}
/**
* To avoid calling `Utils.getCallSite` for every single RDD we create in the body,
* set a dummy call site that RDDs use instead. This is for performance optimization.
*/
def withDummyCallSite[T](sc: SparkContext)(body: => T): T = {
val oldShortCallSite = sc.getLocalProperty(CallSite.SHORT_FORM)
val oldLongCallSite = sc.getLocalProperty(CallSite.LONG_FORM)
try {
sc.setLocalProperty(CallSite.SHORT_FORM, "")
sc.setLocalProperty(CallSite.LONG_FORM, "")
body
} finally {
// Restore the old ones here
sc.setLocalProperty(CallSite.SHORT_FORM, oldShortCallSite)
sc.setLocalProperty(CallSite.LONG_FORM, oldLongCallSite)
}
}
/**
* Return whether the specified file is a parent directory of the child file.
*/
@tailrec
def isInDirectory(parent: File, child: File): Boolean = {
if (child == null || parent == null) {
return false
}
if (!child.exists() || !parent.exists() || !parent.isDirectory()) {
return false
}
if (parent.equals(child)) {
return true
}
isInDirectory(parent, child.getParentFile)
}
/**
*
* @return whether it is local mode
*/
def isLocalMaster(conf: SparkConf): Boolean = {
val master = conf.get("spark.master", "")
master == "local" || master.startsWith("local[")
}
/**
* Push based shuffle can only be enabled when below conditions are met:
* - the application is submitted to run in YARN mode
* - external shuffle service enabled
* - IO encryption disabled
* - serializer(such as KryoSerializer) supports relocation of serialized objects
*/
def isPushBasedShuffleEnabled(conf: SparkConf,
isDriver: Boolean,
checkSerializer: Boolean = true): Boolean = {
val pushBasedShuffleEnabled = conf.get(PUSH_BASED_SHUFFLE_ENABLED)
if (pushBasedShuffleEnabled) {
val canDoPushBasedShuffle = {
val isTesting = conf.get(IS_TESTING).getOrElse(false)
val isShuffleServiceAndYarn = conf.get(SHUFFLE_SERVICE_ENABLED) &&
conf.get(SparkLauncher.SPARK_MASTER, null) == "yarn"
lazy val serializerIsSupported = {
if (checkSerializer) {
Option(SparkEnv.get)
.map(_.serializer)
.filter(_ != null)
.getOrElse(instantiateSerializerFromConf[Serializer](SERIALIZER, conf, isDriver))
.supportsRelocationOfSerializedObjects
} else {
// if no need to check Serializer, always set serializerIsSupported as true
true
}
}
// TODO: [SPARK-36744] needs to support IO encryption for push-based shuffle
val ioEncryptionDisabled = !conf.get(IO_ENCRYPTION_ENABLED)
(isShuffleServiceAndYarn || isTesting) && ioEncryptionDisabled && serializerIsSupported
}
if (!canDoPushBasedShuffle) {
logWarning("Push-based shuffle can only be enabled when the application is submitted " +
"to run in YARN mode, with external shuffle service enabled, IO encryption disabled, " +
"and relocation of serialized objects supported.")
}
canDoPushBasedShuffle
} else {
false
}
}
// Create an instance of Serializer or ShuffleManager with the given name,
// possibly initializing it with our conf
def instantiateSerializerOrShuffleManager[T](className: String,
conf: SparkConf,
isDriver: Boolean): T = {
val cls = Utils.classForName(className)
// Look for a constructor taking a SparkConf and a boolean isDriver, then one taking just
// SparkConf, then one taking no arguments
try {
cls.getConstructor(classOf[SparkConf], java.lang.Boolean.TYPE)
.newInstance(conf, java.lang.Boolean.valueOf(isDriver))
.asInstanceOf[T]
} catch {
case _: NoSuchMethodException =>
try {
cls.getConstructor(classOf[SparkConf]).newInstance(conf).asInstanceOf[T]
} catch {
case _: NoSuchMethodException =>
cls.getConstructor().newInstance().asInstanceOf[T]
}
}
}
// Create an instance of Serializer named by the given SparkConf property
// if the property is not set, possibly initializing it with our conf
def instantiateSerializerFromConf[T](propertyName: ConfigEntry[String],
conf: SparkConf,
isDriver: Boolean): T = {
instantiateSerializerOrShuffleManager[T](
conf.get(propertyName), conf, isDriver)
}
/**
* Return whether dynamic allocation is enabled in the given conf.
*/
def isDynamicAllocationEnabled(conf: SparkConf): Boolean = {
val dynamicAllocationEnabled = conf.get(DYN_ALLOCATION_ENABLED)
dynamicAllocationEnabled &&
(!isLocalMaster(conf) || conf.get(DYN_ALLOCATION_TESTING))
}
def isStreamingDynamicAllocationEnabled(conf: SparkConf): Boolean = {
val streamingDynamicAllocationEnabled = conf.get(STREAMING_DYN_ALLOCATION_ENABLED)
streamingDynamicAllocationEnabled &&
(!isLocalMaster(conf) || conf.get(STREAMING_DYN_ALLOCATION_TESTING))
}
/**
* Return the initial number of executors for dynamic allocation.
*/
def getDynamicAllocationInitialExecutors(conf: SparkConf): Int = {
if (conf.get(DYN_ALLOCATION_INITIAL_EXECUTORS) < conf.get(DYN_ALLOCATION_MIN_EXECUTORS)) {
logWarning(s"${DYN_ALLOCATION_INITIAL_EXECUTORS.key} less than " +
s"${DYN_ALLOCATION_MIN_EXECUTORS.key} is invalid, ignoring its setting, " +
"please update your configs.")
}
if (conf.get(EXECUTOR_INSTANCES).getOrElse(0) < conf.get(DYN_ALLOCATION_MIN_EXECUTORS)) {
logWarning(s"${EXECUTOR_INSTANCES.key} less than " +
s"${DYN_ALLOCATION_MIN_EXECUTORS.key} is invalid, ignoring its setting, " +
"please update your configs.")
}
val initialExecutors = Seq(
conf.get(DYN_ALLOCATION_MIN_EXECUTORS),
conf.get(DYN_ALLOCATION_INITIAL_EXECUTORS),
conf.get(EXECUTOR_INSTANCES).getOrElse(0)).max
logInfo(s"Using initial executors = $initialExecutors, max of " +
s"${DYN_ALLOCATION_INITIAL_EXECUTORS.key}, ${DYN_ALLOCATION_MIN_EXECUTORS.key} and " +
s"${EXECUTOR_INSTANCES.key}")
initialExecutors
}
def tryWithResource[R <: Closeable, T](createResource: => R)(f: R => T): T = {
val resource = createResource
try f.apply(resource) finally resource.close()
}
/**
* Returns a path of temporary file which is in the same directory with `path`.
*/
def tempFileWith(path: File): File = {
new File(path.getAbsolutePath + "." + UUID.randomUUID())
}
/**
* Returns the name of this JVM process. This is OS dependent but typically (OSX, Linux, Windows),
* this is formatted as PID@hostname.
*/
def getProcessName(): String = {
ManagementFactory.getRuntimeMXBean().getName()
}
/**
* Utility function that should be called early in `main()` for daemons to set up some common
* diagnostic state.
*/
def initDaemon(log: Logger): Unit = {
log.info(s"Started daemon with process name: ${Utils.getProcessName()}")
SignalUtils.registerLogger(log)
}
/**
* Return the jar files pointed by the "spark.jars" property. Spark internally will distribute
* these jars through file server. In the YARN mode, it will return an empty list, since YARN
* has its own mechanism to distribute jars.
*/
def getUserJars(conf: SparkConf): Seq[String] = {
conf.get(JARS).filter(_.nonEmpty)
}
/**
* Return the local jar files which will be added to REPL's classpath. These jar files are
* specified by --jars (spark.jars) or --packages, remote jars will be downloaded to local by
* SparkSubmit at first.
*/
def getLocalUserJarsForShell(conf: SparkConf): Seq[String] = {
val localJars = conf.getOption("spark.repl.local.jars")
localJars.map(_.split(",")).map(_.filter(_.nonEmpty)).toSeq.flatten
}
private[spark] val REDACTION_REPLACEMENT_TEXT = "*********(redacted)"
/**
* Redact the sensitive values in the given map. If a map key matches the redaction pattern then
* its value is replaced with a dummy text.
*/
def redact(conf: SparkConf, kvs: Seq[(String, String)]): Seq[(String, String)] = {
val redactionPattern = conf.get(SECRET_REDACTION_PATTERN)
redact(redactionPattern, kvs)
}
/**
* Redact the sensitive values in the given map. If a map key matches the redaction pattern then
* its value is replaced with a dummy text.
*/
def redact[K, V](regex: Option[Regex], kvs: Seq[(K, V)]): Seq[(K, V)] = {
regex match {
case None => kvs
case Some(r) => redact(r, kvs)
}
}
/**
* Redact the sensitive information in the given string.
*/
def redact(regex: Option[Regex], text: String): String = {
regex match {
case None => text
case Some(r) =>
if (text == null || text.isEmpty) {
text
} else {
r.replaceAllIn(text, REDACTION_REPLACEMENT_TEXT)
}
}
}
private def redact[K, V](redactionPattern: Regex, kvs: Seq[(K, V)]): Seq[(K, V)] = {
// If the sensitive information regex matches with either the key or the value, redact the value
// While the original intent was to only redact the value if the key matched with the regex,
// we've found that especially in verbose mode, the value of the property may contain sensitive
// information like so:
// "sun.java.command":"org.apache.spark.deploy.SparkSubmit ... \\
// --conf spark.executorEnv.HADOOP_CREDSTORE_PASSWORD=secret_password ...
//
// And, in such cases, simply searching for the sensitive information regex in the key name is
// not sufficient. The values themselves have to be searched as well and redacted if matched.
// This does mean we may be accounting more false positives - for example, if the value of an
// arbitrary property contained the term 'password', we may redact the value from the UI and
// logs. In order to work around it, user would have to make the spark.redaction.regex property
// more specific.
kvs.map {
case (key: String, value: String) =>
redactionPattern.findFirstIn(key)
.orElse(redactionPattern.findFirstIn(value))
.map { _ => (key, REDACTION_REPLACEMENT_TEXT) }
.getOrElse((key, value))
case (key, value: String) =>
redactionPattern.findFirstIn(value)
.map { _ => (key, REDACTION_REPLACEMENT_TEXT) }
.getOrElse((key, value))
case (key, value) =>
(key, value)
}.asInstanceOf[Seq[(K, V)]]
}
/**
* Looks up the redaction regex from within the key value pairs and uses it to redact the rest
* of the key value pairs. No care is taken to make sure the redaction property itself is not
* redacted. So theoretically, the property itself could be configured to redact its own value
* when printing.
*/
def redact(kvs: Map[String, String]): Seq[(String, String)] = {
val redactionPattern = kvs.getOrElse(
SECRET_REDACTION_PATTERN.key,
SECRET_REDACTION_PATTERN.defaultValueString
).r
redact(redactionPattern, kvs.toArray)
}
def redactCommandLineArgs(conf: SparkConf, commands: Seq[String]): Seq[String] = {
val redactionPattern = conf.get(SECRET_REDACTION_PATTERN)
commands.map {
case PATTERN_FOR_COMMAND_LINE_ARG(key, value) =>
val (_, newValue) = redact(redactionPattern, Seq((key, value))).head
s"-D$key=$newValue"
case cmd => cmd
}
}
def stringToSeq(str: String): Seq[String] = {
str.split(",").map(_.trim()).filter(_.nonEmpty)
}
/**
* Create instances of extension classes.
*
* The classes in the given list must:
* - Be sub-classes of the given base class.
* - Provide either a no-arg constructor, or a 1-arg constructor that takes a SparkConf.
*
* The constructors are allowed to throw "UnsupportedOperationException" if the extension does not
* want to be registered; this allows the implementations to check the Spark configuration (or
* other state) and decide they do not need to be added. A log message is printed in that case.
* Other exceptions are bubbled up.
*/
def loadExtensions[T <: AnyRef](
extClass: Class[T], classes: Seq[String], conf: SparkConf): Seq[T] = {
classes.flatMap { name =>
try {
val klass = classForName[T](name)
require(extClass.isAssignableFrom(klass),
s"$name is not a subclass of ${extClass.getName()}.")
val ext = Try(klass.getConstructor(classOf[SparkConf])) match {
case Success(ctor) =>
ctor.newInstance(conf)
case Failure(_) =>
klass.getConstructor().newInstance()
}
Some(ext)
} catch {
case _: NoSuchMethodException =>
throw new SparkException(
s"$name did not have a zero-argument constructor or a" +
" single-argument constructor that accepts SparkConf. Note: if the class is" +
" defined inside of another Scala class, then its constructors may accept an" +
" implicit parameter that references the enclosing class; in this case, you must" +
" define the class as a top-level class in order to prevent this extra" +
" parameter from breaking Spark's ability to find a valid constructor.")
case e: InvocationTargetException =>
e.getCause() match {
case uoe: UnsupportedOperationException =>
logDebug(s"Extension $name not being initialized.", uoe)
logInfo(s"Extension $name not being initialized.")
None
case null => throw e
case cause => throw cause
}
}
}
}
/**
* Check the validity of the given Kubernetes master URL and return the resolved URL. Prefix
* "k8s://" is appended to the resolved URL as the prefix is used by KubernetesClusterManager
* in canCreate to determine if the KubernetesClusterManager should be used.
*/
def checkAndGetK8sMasterUrl(rawMasterURL: String): String = {
require(rawMasterURL.startsWith("k8s://"),
"Kubernetes master URL must start with k8s://.")
val masterWithoutK8sPrefix = rawMasterURL.substring("k8s://".length)
// To handle master URLs, e.g., k8s://host:port.
if (!masterWithoutK8sPrefix.contains("://")) {
val resolvedURL = s"https://$masterWithoutK8sPrefix"
logInfo("No scheme specified for kubernetes master URL, so defaulting to https. Resolved " +
s"URL is $resolvedURL.")
return s"k8s://$resolvedURL"
}
val masterScheme = new URI(masterWithoutK8sPrefix).getScheme
val resolvedURL = Option(masterScheme).map(_.toLowerCase(Locale.ROOT)) match {
case Some("https") =>
masterWithoutK8sPrefix
case Some("http") =>
logWarning("Kubernetes master URL uses HTTP instead of HTTPS.")
masterWithoutK8sPrefix
case _ =>
throw new IllegalArgumentException("Invalid Kubernetes master scheme: " + masterScheme
+ " found in URL: " + masterWithoutK8sPrefix)
}
s"k8s://$resolvedURL"
}
/**
* Replaces all the {{EXECUTOR_ID}} occurrences with the Executor Id
* and {{APP_ID}} occurrences with the App Id.
*/
def substituteAppNExecIds(opt: String, appId: String, execId: String): String = {
opt.replace("{{APP_ID}}", appId).replace("{{EXECUTOR_ID}}", execId)
}
/**
* Replaces all the {{APP_ID}} occurrences with the App Id.
*/
def substituteAppId(opt: String, appId: String): String = {
opt.replace("{{APP_ID}}", appId)
}
def createSecret(conf: SparkConf): String = {
val bits = conf.get(AUTH_SECRET_BIT_LENGTH)
val rnd = new SecureRandom()
val secretBytes = new Array[Byte](bits / JByte.SIZE)
rnd.nextBytes(secretBytes)
Hex.encodeHexString(secretBytes)
}
/**
* Returns true if and only if the underlying class is a member class.
*
* Note: jdk8u throws a "Malformed class name" error if a given class is a deeply-nested
* inner class (See SPARK-34607 for details). This issue has already been fixed in jdk9+, so
* we can remove this helper method safely if we drop the support of jdk8u.
*/
def isMemberClass(cls: Class[_]): Boolean = {
try {
cls.isMemberClass
} catch {
case _: InternalError =>
// We emulate jdk8u `Class.isMemberClass` below:
// public boolean isMemberClass() {
// return getSimpleBinaryName() != null && !isLocalOrAnonymousClass();
// }
// `getSimpleBinaryName()` returns null if a given class is a top-level class,
// so we replace it with `cls.getEnclosingClass != null`. The second condition checks
// if a given class is not a local or an anonymous class, so we replace it with
// `cls.getEnclosingMethod == null` because `cls.getEnclosingMethod()` return a value
// only in either case (JVM Spec 4.8.6).
//
// Note: The newer jdk evaluates `!isLocalOrAnonymousClass()` first,
// we reorder the conditions to follow it.
cls.getEnclosingMethod == null && cls.getEnclosingClass != null
}
}
/**
* Safer than Class obj's getSimpleName which may throw Malformed class name error in scala.
* This method mimics scalatest's getSimpleNameOfAnObjectsClass.
*/
def getSimpleName(cls: Class[_]): String = {
try {
cls.getSimpleName
} catch {
// TODO: the value returned here isn't even quite right; it returns simple names
// like UtilsSuite$MalformedClassObject$MalformedClass instead of MalformedClass
// The exact value may not matter much as it's used in log statements
case _: InternalError =>
stripDollars(stripPackages(cls.getName))
}
}
/**
* Remove the packages from full qualified class name
*/
private def stripPackages(fullyQualifiedName: String): String = {
fullyQualifiedName.split("\\\\.").takeRight(1)(0)
}
/**
* Remove trailing dollar signs from qualified class name,
* and return the trailing part after the last dollar sign in the middle
*/
@scala.annotation.tailrec
private def stripDollars(s: String): String = {
val lastDollarIndex = s.lastIndexOf('$')
if (lastDollarIndex < s.length - 1) {
// The last char is not a dollar sign
if (lastDollarIndex == -1 || !s.contains("$iw")) {
// The name does not have dollar sign or is not an interpreter
// generated class, so we should return the full string
s
} else {
// The class name is interpreter generated,
// return the part after the last dollar sign
// This is the same behavior as getClass.getSimpleName
s.substring(lastDollarIndex + 1)
}
}
else {
// The last char is a dollar sign
// Find last non-dollar char
val lastNonDollarChar = s.reverse.find(_ != '$')
lastNonDollarChar match {
case None => s
case Some(c) =>
val lastNonDollarIndex = s.lastIndexOf(c)
if (lastNonDollarIndex == -1) {
s
} else {
// Strip the trailing dollar signs
// Invoke stripDollars again to get the simple name
stripDollars(s.substring(0, lastNonDollarIndex + 1))
}
}
}
}
/**
* Regular expression matching full width characters.
*
* Looked at all the 0x0000-0xFFFF characters (unicode) and showed them under Xshell.
* Found all the full width characters, then get the regular expression.
*/
private val fullWidthRegex = ("""[""" +
// scalastyle:off nonascii
"\\u1100-\\u115F" +
"\\u2E80-\\uA4CF" +
"\\uAC00-\\uD7A3" +
"\\uF900-\\uFAFF" +
"\\uFE10-\\uFE19" +
"\\uFE30-\\uFE6F" +
"\\uFF00-\\uFF60" +
"\\uFFE0-\\uFFE6" +
// scalastyle:on nonascii
"""]""").r
/**
* Return the number of half widths in a given string. Note that a full width character
* occupies two half widths.
*
* For a string consisting of 1 million characters, the execution of this method requires
* about 50ms.
*/
def stringHalfWidth(str: String): Int = {
if (str == null) 0 else str.length + fullWidthRegex.findAllIn(str).size
}
def sanitizeDirName(str: String): String = {
str.replaceAll("[ :/]", "-").replaceAll("[.${}'\\"]", "_").toLowerCase(Locale.ROOT)
}
def isClientMode(conf: SparkConf): Boolean = {
"client".equals(conf.get(SparkLauncher.DEPLOY_MODE, "client"))
}
/** Returns whether the URI is a "local:" URI. */
def isLocalUri(uri: String): Boolean = {
uri.startsWith(s"$LOCAL_SCHEME:")
}
/** Check whether the file of the path is splittable. */
def isFileSplittable(path: Path, codecFactory: CompressionCodecFactory): Boolean = {
val codec = codecFactory.getCodec(path)
codec == null || codec.isInstanceOf[SplittableCompressionCodec]
}
/** Create a new properties object with the same values as `props` */
def cloneProperties(props: Properties): Properties = {
if (props == null) {
return props
}
val resultProps = new Properties()
props.forEach((k, v) => resultProps.put(k, v))
resultProps
}
/**
* Convert a sequence of `Path`s to a metadata string. When the length of metadata string
* exceeds `stopAppendingThreshold`, stop appending paths for saving memory.
*/
def buildLocationMetadata(paths: Seq[Path], stopAppendingThreshold: Int): String = {
val metadata = new StringBuilder(s"(${paths.length} paths)[")
var index: Int = 0
while (index < paths.length && metadata.length < stopAppendingThreshold) {
if (index > 0) {
metadata.append(", ")
}
metadata.append(paths(index).toString)
index += 1
}
if (paths.length > index) {
if (index > 0) {
metadata.append(", ")
}
metadata.append("...")
}
metadata.append("]")
metadata.toString
}
/**
* Convert MEMORY_OFFHEAP_SIZE to MB Unit, return 0 if MEMORY_OFFHEAP_ENABLED is false.
*/
def executorOffHeapMemorySizeAsMb(sparkConf: SparkConf): Int = {
val sizeInMB = Utils.memoryStringToMb(sparkConf.get(MEMORY_OFFHEAP_SIZE).toString)
checkOffHeapEnabled(sparkConf, sizeInMB).toInt
}
/**
* return 0 if MEMORY_OFFHEAP_ENABLED is false.
*/
def checkOffHeapEnabled(sparkConf: SparkConf, offHeapSize: Long): Long = {
if (sparkConf.get(MEMORY_OFFHEAP_ENABLED)) {
require(offHeapSize > 0,
s"${MEMORY_OFFHEAP_SIZE.key} must be > 0 when ${MEMORY_OFFHEAP_ENABLED.key} == true")
offHeapSize
} else {
0
}
}
/** Returns a string message about delegation token generation failure */
def createFailedToGetTokenMessage(serviceName: String, e: scala.Throwable): String = {
val message = "Failed to get token from service %s due to %s. " +
"If %s is not used, set spark.security.credentials.%s.enabled to false."
message.format(serviceName, e, serviceName, serviceName)
}
/**
* Decompress a zip file into a local dir. File names are read from the zip file. Note, we skip
* addressing the directory here. Also, we rely on the caller side to address any exceptions.
*/
def unzipFilesFromFile(fs: FileSystem, dfsZipFile: Path, localDir: File): Seq[File] = {
val files = new ArrayBuffer[File]()
val in = new ZipInputStream(fs.open(dfsZipFile))
var out: OutputStream = null
try {
var entry = in.getNextEntry()
while (entry != null) {
if (!entry.isDirectory) {
val fileName = localDir.toPath.resolve(entry.getName).getFileName.toString
val outFile = new File(localDir, fileName)
files += outFile
out = new FileOutputStream(outFile)
IOUtils.copy(in, out)
out.close()
in.closeEntry()
}
entry = in.getNextEntry()
}
in.close() // so that any error in closing does not get ignored
logInfo(s"Unzipped from $dfsZipFile\\n\\t${files.mkString("\\n\\t")}")
} finally {
// Close everything no matter what happened
IOUtils.closeQuietly(in)
IOUtils.closeQuietly(out)
}
files.toSeq
}
/**
* Return the median number of a long array
*
* @param sizes
* @return
*/
def median(sizes: Array[Long]): Long = {
val len = sizes.length
val sortedSize = sizes.sorted
len match {
case _ if (len % 2 == 0) =>
math.max((sortedSize(len / 2) + sortedSize(len / 2 - 1)) / 2, 1)
case _ => math.max(sortedSize(len / 2), 1)
}
}
}
private[util] object CallerContext extends Logging {
val callerContextSupported: Boolean = {
SparkHadoopUtil.get.conf.getBoolean("hadoop.caller.context.enabled", false) && {
try {
Utils.classForName("org.apache.hadoop.ipc.CallerContext")
Utils.classForName("org.apache.hadoop.ipc.CallerContext$Builder")
true
} catch {
case _: ClassNotFoundException =>
false
case NonFatal(e) =>
logWarning("Fail to load the CallerContext class", e)
false
}
}
}
}
/**
* An utility class used to set up Spark caller contexts to HDFS and Yarn. The `context` will be
* constructed by parameters passed in.
* When Spark applications run on Yarn and HDFS, its caller contexts will be written into Yarn RM
* audit log and hdfs-audit.log. That can help users to better diagnose and understand how
* specific applications impacting parts of the Hadoop system and potential problems they may be
* creating (e.g. overloading NN). As HDFS mentioned in HDFS-9184, for a given HDFS operation, it's
* very helpful to track which upper level job issues it.
*
* @param from who sets up the caller context (TASK, CLIENT, APPMASTER)
*
* The parameters below are optional:
* @param upstreamCallerContext caller context the upstream application passes in
* @param appId id of the app this task belongs to
* @param appAttemptId attempt id of the app this task belongs to
* @param jobId id of the job this task belongs to
* @param stageId id of the stage this task belongs to
* @param stageAttemptId attempt id of the stage this task belongs to
* @param taskId task id
* @param taskAttemptNumber task attempt id
*/
private[spark] class CallerContext(
from: String,
upstreamCallerContext: Option[String] = None,
appId: Option[String] = None,
appAttemptId: Option[String] = None,
jobId: Option[Int] = None,
stageId: Option[Int] = None,
stageAttemptId: Option[Int] = None,
taskId: Option[Long] = None,
taskAttemptNumber: Option[Int] = None) extends Logging {
private val context = prepareContext("SPARK_" +
from +
appId.map("_" + _).getOrElse("") +
appAttemptId.map("_" + _).getOrElse("") +
jobId.map("_JId_" + _).getOrElse("") +
stageId.map("_SId_" + _).getOrElse("") +
stageAttemptId.map("_" + _).getOrElse("") +
taskId.map("_TId_" + _).getOrElse("") +
taskAttemptNumber.map("_" + _).getOrElse("") +
upstreamCallerContext.map("_" + _).getOrElse(""))
private def prepareContext(context: String): String = {
// The default max size of Hadoop caller context is 128
lazy val len = SparkHadoopUtil.get.conf.getInt("hadoop.caller.context.max.size", 128)
if (context == null || context.length <= len) {
context
} else {
val finalContext = context.substring(0, len)
logWarning(s"Truncated Spark caller context from $context to $finalContext")
finalContext
}
}
/**
* Set up the caller context [[context]] by invoking Hadoop CallerContext API of
* [[org.apache.hadoop.ipc.CallerContext]], which was added in hadoop 2.8.
*/
def setCurrentContext(): Unit = {
if (CallerContext.callerContextSupported) {
try {
val callerContext = Utils.classForName("org.apache.hadoop.ipc.CallerContext")
val builder: Class[AnyRef] =
Utils.classForName("org.apache.hadoop.ipc.CallerContext$Builder")
val builderInst = builder.getConstructor(classOf[String]).newInstance(context)
val hdfsContext = builder.getMethod("build").invoke(builderInst)
callerContext.getMethod("setCurrent", callerContext).invoke(null, hdfsContext)
} catch {
case NonFatal(e) =>
logWarning("Fail to set Spark caller context", e)
}
}
}
}
/**
* A utility class to redirect the child process's stdout or stderr.
*/
private[spark] class RedirectThread(
in: InputStream,
out: OutputStream,
name: String,
propagateEof: Boolean = false)
extends Thread(name) {
setDaemon(true)
override def run(): Unit = {
scala.util.control.Exception.ignoring(classOf[IOException]) {
// FIXME: We copy the stream on the level of bytes to avoid encoding problems.
Utils.tryWithSafeFinally {
val buf = new Array[Byte](1024)
var len = in.read(buf)
while (len != -1) {
out.write(buf, 0, len)
out.flush()
len = in.read(buf)
}
} {
if (propagateEof) {
out.close()
}
}
}
}
}
/**
* An [[OutputStream]] that will store the last 10 kilobytes (by default) written to it
* in a circular buffer. The current contents of the buffer can be accessed using
* the toString method.
*/
private[spark] class CircularBuffer(sizeInBytes: Int = 10240) extends java.io.OutputStream {
private var pos: Int = 0
private var isBufferFull = false
private val buffer = new Array[Byte](sizeInBytes)
def write(input: Int): Unit = {
buffer(pos) = input.toByte
pos = (pos + 1) % buffer.length
isBufferFull = isBufferFull || (pos == 0)
}
override def toString: String = {
if (!isBufferFull) {
return new String(buffer, 0, pos, StandardCharsets.UTF_8)
}
val nonCircularBuffer = new Array[Byte](sizeInBytes)
System.arraycopy(buffer, pos, nonCircularBuffer, 0, buffer.length - pos)
System.arraycopy(buffer, 0, nonCircularBuffer, buffer.length - pos, pos)
new String(nonCircularBuffer, StandardCharsets.UTF_8)
}
}
| shaneknapp/spark | core/src/main/scala/org/apache/spark/util/Utils.scala | Scala | apache-2.0 | 124,051 |
package blended.streams.transaction.internal
import java.io._
import java.nio.file.{DirectoryStream, Files, Path}
import java.util.Date
import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Sink, Source}
import blended.streams.json.PrickleProtocol._
import blended.streams.transaction._
import blended.util.logging.Logger
import prickle._
import scala.jdk.CollectionConverters._
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.io.BufferedSource
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}
object FileFlowTransactionManager {
def apply(dir : File)(implicit system : ActorSystem) : FileFlowTransactionManager = new FileFlowTransactionManager(
FlowTransactionManagerConfig(dir)
)
}
class FileFlowTransactionManager(
override val config: FlowTransactionManagerConfig
)(implicit system : ActorSystem) extends FlowTransactionManager {
private val log : Logger = Logger[FileFlowTransactionManager]
private implicit val eCtxt : ExecutionContext = system.dispatcher
private val dir : File = config.dir
private lazy val initialized : Boolean = {
if (!dir.exists()) {
if (!dir.mkdirs()) {
log.warn(s"Unable to create directory [${dir.getAbsolutePath()}]")
} else {
log.info(s"Created directory [${dir.getAbsolutePath()}] to persist FlowTransactions")
}
}
dir.exists() && dir.canRead() && dir.canWrite() && dir.isDirectory()
}
require(initialized)
/**
* @inheritdoc
*/
override def updateTransaction(e: FlowTransactionEvent): Try[FlowTransaction] = {
val fut : Future[Try[FlowTransaction]] = findTransaction(e.transactionId).map[(Option[FlowTransaction], FlowTransaction)] {
case None =>
val now: Date = new Date()
log.trace(s"Storing new transaction [${e.transactionId}]")
val newT : FlowTransaction = FlowTransaction(
id = e.transactionId,
created = now,
lastUpdate = now,
creationProps = e.properties,
first = true
)
if (e.state == FlowTransactionStateStarted) {
(None, newT)
} else {
(None, newT.updateTransaction(e))
}
case Some(t) =>
log.trace(s"Updating transaction [${e.transactionId}]")
(Some(t), t.updateTransaction(e).copy(first = false))
}.map {
case (old, updated) => store(old, updated)
}
Await.result(fut, 3.seconds)
}
/**
* @inheritdoc
*/
override def findTransaction(tid: String): Future[Option[FlowTransaction]] = {
log.trace(s"Trying to find transaction [$tid]")
mapDirectoryStream(FilteredDirectoryStream.tidStream(tid)).map(_.headOption)
}
/**
* @inheritdoc
*/
override def removeTransaction(tid: String): Unit =
withDirectoryStream(FilteredDirectoryStream.tidStream(tid)){ p =>
log.trace(s"Removing file [${p.toFile().getAbsolutePath()}]")
p.toFile().delete()
}
/**
* A stream of all known transactions of the container.
*/
override def withAll(f: FlowTransaction => Boolean): Future[Int] = withDirectoryStream(new FilteredDirectoryStream(_ => true)){ p =>
loadExistingTransaction(p.toFile()).map(f).getOrElse(false)
}
override def cleanUp(states: FlowTransactionState*): Future[Int] = withDirectoryStream(FilteredDirectoryStream.stateDirectoryStream(states:_*)){ p =>
val n : Array[String] = p.toFile().getName().split("\\\\.")
if (n.length == 3) {
val doDelete : Boolean = Try {
val state : FlowTransactionState = FlowTransactionState.apply(n(2)).get
val retain : FiniteDuration = state match {
case FlowTransactionStateFailed => config.retainFailed
case FlowTransactionStateCompleted => config.retainCompleted
case _ => config.retainStale
}
System.currentTimeMillis() - n(1).toLong >= retain.toMillis
}.getOrElse(true)
if (doDelete) {
log.trace(s"Cleaning up file [${p.toFile()}]")
p.toFile().delete()
} else {
false
}
} else {
false
}
}
private val filename : FlowTransaction => String = t =>
s"${t.tid}.${t.lastUpdate.getTime()}.${t.state}"
private def store(old: Option[FlowTransaction], changed : FlowTransaction) : Try[FlowTransaction] = Try {
val json : String = Pickle.intoString(changed)
val newFile : File = new File(dir, filename(changed))
old.foreach(t => new File(dir, filename(t)).delete())
var os : Option[OutputStream] = None
var writer : Option[BufferedWriter] = None
try {
os = Some(new FileOutputStream(newFile))
writer = Some(new BufferedWriter(new PrintWriter(os.get)))
writer.foreach{ w => w.write(json) }
changed
} catch {
case NonFatal(e) =>
log.warn(s"Error writing transaction file [${newFile.getAbsolutePath()}][${e.getMessage()}]")
throw e
} finally {
writer.foreach { w =>
try {
w.close()
} catch {
case NonFatal(e) =>
log.warn(s"Error closing file [${newFile.getAbsolutePath()}][${e.getMessage()}]")
}
}
os.foreach { s =>
try {
s.close()
} catch {
case NonFatal(e) =>
log.warn(s"Error closing file [${newFile.getAbsolutePath()}][${e.getMessage()}]")
}
}
}
}
private def loadExistingTransaction(f : File) : Try[FlowTransaction] = Try {
loadTransaction(f) match {
case Failure(t) => throw t
case Success(None) => throw new Exception(s"FlowTransaction [${f.getName()}] not found")
case Success(Some(ft)) => ft
}
}
private def loadTransaction(f : File) : Try[Option[FlowTransaction]] = Try {
if (f.exists()) {
val json : String = loadFile(f).get
val t : FlowTransaction = Unpickle[FlowTransaction].fromString(json).get
Some(t)
} else {
None
}
}
private def loadFile(f : File) : Try[String] = {
val src : BufferedSource = scala.io.Source.fromFile(f, "UTF-8")
try {
Success(src.getLines().mkString("\\n"))
} catch {
case NonFatal(t) =>
log.warn(s"Exception encountered accessing transaction file [${f.getAbsolutePath()}]")
Failure(t)
} finally {
try {
src.close()
} catch {
case NonFatal(t) =>
log.warn(s"Error closing file [${f.getAbsolutePath()}]:[${t.getMessage()}]")
}
}
}
private def mapDirectoryStream(dirStream : FilteredDirectoryStream) : Future[Seq[FlowTransaction]] =
mapDirectoryStreamWithFilter(dirStream)({_ : FlowTransaction => true})
private def mapDirectoryStreamWithFilter(
dirStream : FilteredDirectoryStream
)(
select : FlowTransaction => Boolean
) : Future[Seq[FlowTransaction]] = {
val transactions : Future[Seq[FlowTransaction]] = dirStream.entries
.map{ p => loadExistingTransaction(p.toFile()) }
.filter(_.isSuccess)
.map(_.get)
.filter(select)
.runWith(Sink.seq)
transactions.onComplete(_ => dirStream.close())
transactions
}
private def withDirectoryStream(
dirStream : FilteredDirectoryStream
)(
f : Path => Boolean
) : Future[Int] = {
val count : Future[Int] = dirStream.entries
.via(Flow.fromFunction{ p => if (f(p)) 1 else 0 })
.runFold(0)( (c,v) => c + v )
count.onComplete(_ => dirStream.close())
count
}
private object FilteredDirectoryStream {
def tidStream(tid : String) : FilteredDirectoryStream = new FilteredDirectoryStream({ p =>
p.toFile().getName().startsWith(tid)
})
def stateDirectoryStream(states : FlowTransactionState*) : FilteredDirectoryStream = {
new FilteredDirectoryStream({ p =>
val s : Array[String] = p.toFile().getName().split("\\\\.")
states.map(_.toString).contains(s(s.length -1))
})
}
}
private class FilteredDirectoryStream(f : Path => Boolean) {
private val stream : DirectoryStream[Path] = {
val tidFilter: DirectoryStream.Filter[Path] = new DirectoryStream.Filter[Path] {
override def accept(entry: Path): Boolean = f(entry)
}
Files.newDirectoryStream(dir.toPath(), tidFilter)
}
val entries : Source[Path, NotUsed] = Source.fromIterator(() => stream.iterator().asScala)
def close() : Unit = {
try {
stream.close()
} catch {
case NonFatal(t) =>
log.warn(s"Error closing directory stream in transaction cleanup : [${t.getMessage()}]")
}
}
}
}
| woq-blended/blended | blended.streams/src/main/scala/blended/streams/transaction/internal/FileFlowTransactionManager.scala | Scala | apache-2.0 | 8,633 |
package misc
import org.scalatest.{Matchers, FlatSpec}
/**
* Created by skunnumkal on 12/23/14.
*/
class KnightsTourSpec extends FlatSpec with Matchers{
it should "get some tours" in {
val knightsTour = new KnightsTour(5)
knightsTour.getTotalCount() should be (0)
knightsTour.knightsTour(0,0)
val validSolns = knightsTour.getValidSolns()
validSolns.isEmpty should be (false)
val aSoln = validSolns(0)
}
it should "not complete any tour on a small board" in {
val knightsTour = new KnightsTour(4)
knightsTour.knightsTour(0,0)
knightsTour.getTotalCount() should be (0)
}
it should "get only valid next Positions 1" in {
val integerSnapshot = Array(Array(0,9,-1,5,2),Array(13,4,1,10,-1),Array(8,-1,12,3,6),Array(-1,14,7,-1,11),Array(-1,-1,-1,-1,-1))
val booleanSnapshot:Array[Array[Boolean]] = integerSnapshot.map{row => row.map{ el => el >=0 }}
val knightsTour = new KnightsTour(5)
val nextSteps = knightsTour.getValidNexts(3,1,booleanSnapshot)
nextSteps.length should be (1)
nextSteps(0) should be ((4,3))
}
it should "get only valid next Positions 2" in {
val integerSnapshot = Array(Array(0,9,-1,5,2),Array(13,4,1,10,-1),Array(8,-1,12,3,6),Array(-1,14,7,-1,11),Array(-1,-1,-1,-1,-1))
val booleanSnapshot:Array[Array[Boolean]] = integerSnapshot.map{row => row.map{ el => el >=0 }}
val knightsTour = new KnightsTour(5)
val nextSteps = knightsTour.getValidNexts(0,2,booleanSnapshot)
nextSteps.length should be (2)
}
it should "get only valid next Positions 3" in {
val integerSnapshot = Array(Array(0,9,-1,5,2),Array(13,4,1,10,-1),Array(8,-1,12,3,6),Array(-1,14,7,-1,11),Array(-1,-1,-1,-1,-1))
val booleanSnapshot:Array[Array[Boolean]] = integerSnapshot.map{row => row.map{ el => el >=0 }}
val knightsTour = new KnightsTour(5)
val nextSteps = knightsTour.getValidNexts(4,4,booleanSnapshot)
nextSteps.isEmpty should be (true)
}
}
| sajit/skalad | scala/src/test/scala/misc/KnightsTourSpec.scala | Scala | apache-2.0 | 1,972 |
package scala.tasty.internal
package dotc
package ast
trait TTrees {
self: API =>
import scala.tasty.internal.dotc.core._
import Types._, Names._, Flags._, util.Positions._, Contexts._, Constants._, Symbols._, Denotations._, SymDenotations._
import StdNames._
import annotation.tailrec
import language.higherKinds
import collection.IndexedSeqOptimized
import collection.immutable.IndexedSeq
import collection.mutable.ListBuffer
import annotation.unchecked.uncheckedVariance
import language.implicitConversions
//This outer object because we don't want to change code inside the pickling
object ast {
object Trees {
type Untyped = Null
case class Modifiers[-T >: Untyped](
flags: FlagSet = EmptyFlags,
privateWithin: TypeName = tpnme.EMPTY,
annotations: List[Tree[T]] = Nil) extends Positioned with Cloneable {
}
abstract class Tree[-T >: Untyped] extends Positioned with Product with Cloneable {
type ThisTree[T >: Untyped] <: Tree[T]
private[this] var myTpe: T = _
def tpe: T @uncheckedVariance = {
if (myTpe == null)
throw new UnAssignedTypeException(this)
myTpe
}
def withType(tpe: T): ThisTree[Type] = {
myTpe = tpe
this.asInstanceOf[ThisTree[Type]]
}
def denot: Denotation = NoDenotation
final def symbol: Symbol = denot.symbol
def isEmpty: Boolean = false
override def hashCode(): Int = System.identityHashCode(this)
override def equals(that: Any) = this eq that.asInstanceOf[AnyRef]
}
class UnAssignedTypeException[T >: Untyped](tree: Tree[T]) extends RuntimeException {
override def getMessage: String = s"type of $tree is not assigned"
}
trait TypTree[-T >: Untyped] extends Tree[T] {
type ThisTree[-T >: Untyped] <: TypTree[T]
}
trait TermTree[-T >: Untyped] extends Tree[T] {
type ThisTree[-T >: Untyped] <: TermTree[T]
}
trait PatternTree[-T >: Untyped] extends Tree[T] {
type ThisTree[-T >: Untyped] <: PatternTree[T]
}
abstract class DenotingTree[-T >: Untyped] extends Tree[T] {
type ThisTree[-T >: Untyped] <: DenotingTree[T]
override def denot = tpe match {
case tpe: NamedType => tpe.denot
case tpe: ThisType => tpe.cls.denot
case tpe: AnnotatedType => tpe.stripAnnots match {
case tpe: NamedType => tpe.denot
case tpe: ThisType => tpe.cls.denot
case _ => NoDenotation
}
case _ => NoDenotation
}
}
abstract class ProxyTree[-T >: Untyped] extends Tree[T] {
type ThisTree[-T >: Untyped] <: ProxyTree[T]
def forwardTo: Tree[T]
override def denot: Denotation = forwardTo.denot
}
abstract class NameTree[-T >: Untyped] extends DenotingTree[T] {
type ThisTree[-T >: Untyped] <: NameTree[T]
def name: Name
}
abstract class RefTree[-T >: Untyped] extends NameTree[T] {
type ThisTree[-T >: Untyped] <: RefTree[T]
// def qualifier: Tree[T]
}
trait DefTree[-T >: Untyped] extends DenotingTree[T] {
type ThisTree[-T >: Untyped] <: DefTree[T]
}
abstract class MemberDef[-T >: Untyped] extends NameTree[T] with DefTree[T] {
type ThisTree[-T >: Untyped] <: MemberDef[T]
private[this] var myMods: Modifiers[T] = null
protected def setMods(mods: Modifiers[T @uncheckedVariance]) = myMods = mods
}
trait ValOrDefDef[-T >: Untyped] extends MemberDef[T] with WithLazyField[Tree[T]] {
def tpt: Tree[T]
def rhs: Tree[T]
}
case class Ident[-T >: Untyped] private[ast] (name: Name) extends RefTree[T] {
type ThisTree[-T >: Untyped] = Ident[T]
// def qualifier: Tree[T] = ???
}
case class Select[-T >: Untyped] private[ast] (qualifier: Tree[T], name: Name) extends RefTree[T] {
type ThisTree[-T >: Untyped] = Select[T]
}
case class This[-T >: Untyped] private[ast] (qual: TypeName) extends DenotingTree[T] with TermTree[T] {
type ThisTree[-T >: Untyped] = This[T]
override def denot: Denotation = {
tpe match {
case tpe @ TermRef(pre, _) if tpe.symbol is Module =>
tpe.symbol.moduleClass.denot.asSeenFrom(pre)
case _ =>
super.denot
}
}
}
case class Super[-T >: Untyped] private[ast] (qual: Tree[T], mix: TypeName) extends ProxyTree[T] with TermTree[T] {
type ThisTree[-T >: Untyped] = Super[T]
def forwardTo = qual
}
abstract class GenericApply[-T >: Untyped] extends ProxyTree[T] with TermTree[T] {
type ThisTree[-T >: Untyped] <: GenericApply[T]
val fun: Tree[T]
val args: List[Tree[T]]
def forwardTo = fun
}
case class Apply[-T >: Untyped] private[ast] (fun: Tree[T], args: List[Tree[T]]) extends GenericApply[T] {
type ThisTree[-T >: Untyped] = Apply[T]
}
case class TypeApply[-T >: Untyped] private[ast] (fun: Tree[T], args: List[Tree[T]]) extends GenericApply[T] {
type ThisTree[-T >: Untyped] = TypeApply[T]
}
case class Literal[-T >: Untyped] private[ast] (const: Constant) extends TermTree[T] {
type ThisTree[-T >: Untyped] = Literal[T]
}
case class New[-T >: Untyped] private[ast] (tpt: Tree[T]) extends TermTree[T] {
type ThisTree[-T >: Untyped] = New[T]
}
case class Pair[-T >: Untyped] private[ast] (left: Tree[T], right: Tree[T]) extends TermTree[T] {
type ThisTree[-T >: Untyped] = Pair[T]
}
case class Typed[-T >: Untyped] private[ast] (expr: Tree[T], tpt: Tree[T]) extends ProxyTree[T] with TermTree[T] {
type ThisTree[-T >: Untyped] = Typed[T]
def forwardTo = expr
}
/** name = arg, in a parameter list */
case class NamedArg[-T >: Untyped] private[ast] (name: Name, arg: Tree[T]) extends Tree[T] {
type ThisTree[-T >: Untyped] = NamedArg[T]
}
/** name = arg, outside a parameter list */
case class Assign[-T >: Untyped] private[ast] (lhs: Tree[T], rhs: Tree[T]) extends TermTree[T] {
type ThisTree[-T >: Untyped] = Assign[T]
}
case class Block[-T >: Untyped] private[ast] (stats: List[Tree[T]], expr: Tree[T]) extends TermTree[T] {
type ThisTree[-T >: Untyped] = Block[T]
}
case class If[-T >: Untyped] private[ast] (cond: Tree[T], thenp: Tree[T], elsep: Tree[T]) extends TermTree[T] {
type ThisTree[-T >: Untyped] = If[T]
}
case class Closure[-T >: Untyped] private[ast] (env: List[Tree[T]], meth: Tree[T], tpt: Tree[T]) extends TermTree[T] {
type ThisTree[-T >: Untyped] = Closure[T]
}
case class Match[-T >: Untyped] private[ast] (selector: Tree[T], cases: List[CaseDef[T]]) extends TermTree[T] {
type ThisTree[-T >: Untyped] = Match[T]
}
case class CaseDef[-T >: Untyped] private[ast] (pat: Tree[T], guard: Tree[T], body: Tree[T]) extends Tree[T] {
type ThisTree[-T >: Untyped] = CaseDef[T]
}
case class Return[-T >: Untyped] private[ast] (expr: Tree[T], from: Tree[T]) extends TermTree[T] {
type ThisTree[-T >: Untyped] = Return[T]
}
case class Try[-T >: Untyped] private[ast] (expr: Tree[T], cases: List[CaseDef[T]], finalizer: Tree[T]) extends TermTree[T] {
type ThisTree[-T >: Untyped] = Try[T]
}
case class SeqLiteral[-T >: Untyped] private[ast] (elems: List[Tree[T]]) extends Tree[T] {
type ThisTree[-T >: Untyped] = SeqLiteral[T]
}
class JavaSeqLiteral[T >: Untyped] private[ast] (elems: List[Tree[T]])
extends SeqLiteral(elems) {
override def toString = s"JavaSeqLiteral($elems)"
}
case class TypeTree[-T >: Untyped] private[ast] (original: Tree[T]) extends DenotingTree[T] with TypTree[T] {
type ThisTree[-T >: Untyped] = TypeTree[T]
//TODO - fix
override def isEmpty = /*!hasType &&*/ original.isEmpty
override def toString = s"TypeTree${ /*if (hasType) s"[$typeOpt]" else */ s"($original)"}"
}
/** ref.type */
case class SingletonTypeTree[-T >: Untyped] private[ast] (ref: Tree[T]) extends DenotingTree[T] with TypTree[T] {
type ThisTree[-T >: Untyped] = SingletonTypeTree[T]
}
/** tpt[args] */
case class AppliedTypeTree[-T >: Untyped] private[ast] (tpt: Tree[T], args: List[Tree[T]]) extends ProxyTree[T] with TypTree[T] {
type ThisTree[-T >: Untyped] = AppliedTypeTree[T]
def forwardTo = tpt
}
/** => T */
case class ByNameTypeTree[-T >: Untyped] private[ast] (result: Tree[T]) extends TypTree[T] {
type ThisTree[-T >: Untyped] = ByNameTypeTree[T]
}
/** >: lo <: hi */
case class TypeBoundsTree[-T >: Untyped] private[ast] (lo: Tree[T], hi: Tree[T]) extends TypTree[T] {
type ThisTree[-T >: Untyped] = TypeBoundsTree[T]
}
/** name @ body */
case class Bind[-T >: Untyped] private[ast] (name: Name, body: Tree[T]) extends NameTree[T] with DefTree[T] with PatternTree[T] {
type ThisTree[-T >: Untyped] = Bind[T]
}
case class Alternative[-T >: Untyped] private[ast] (trees: List[Tree[T]]) extends PatternTree[T] {
type ThisTree[-T >: Untyped] = Alternative[T]
}
case class UnApply[-T >: Untyped] private[ast] (fun: Tree[T], implicits: List[Tree[T]], patterns: List[Tree[T]]) extends PatternTree[T] {
type ThisTree[-T >: Untyped] = UnApply[T]
}
case class ValDef[-T >: Untyped] private[ast] (name: TermName, tpt: Tree[T], val rhs: Tree[T]) extends ValOrDefDef[T] {
type ThisTree[-T >: Untyped] = ValDef[T]
}
case class DefDef[-T >: Untyped] private[ast] (name: TermName, tparams: List[TypeDef[T]],
vparamss: List[List[ValDef[T]]], tpt: Tree[T], val rhs: Tree[T]) extends ValOrDefDef[T] {
type ThisTree[-T >: Untyped] = DefDef[T]
}
/**
* mods class name template or
* mods trait name template or
* mods type name = rhs or
* mods type name >: lo <: hi, if rhs = TypeBoundsTree(lo, hi) & (lo ne hi)
*/
case class TypeDef[-T >: Untyped] private[ast] (name: TypeName, rhs: Tree[T]) extends MemberDef[T] {
type ThisTree[-T >: Untyped] = TypeDef[T]
def isClassDef = rhs.isInstanceOf[Template[_]]
}
case class Template[-T >: Untyped] private[ast] (constr: DefDef[T], parents: List[Tree[T]], self: ValDef[T], body: List[Tree[T]]) extends DefTree[T] with WithLazyField[List[Tree[T]]] {
type ThisTree[-T >: Untyped] = Template[T]
}
case class Import[-T >: Untyped] private[ast] (expr: Tree[T], selectors: List[Tree[Untyped]]) extends DenotingTree[T] {
type ThisTree[-T >: Untyped] = Import[T]
}
case class PackageDef[-T >: Untyped] private[ast] (pid: RefTree[T], stats: List[Tree[T]]) extends ProxyTree[T] {
type ThisTree[-T >: Untyped] = PackageDef[T]
def forwardTo = pid
}
case class Annotated[-T >: Untyped] private[ast] (annot: Tree[T], arg: Tree[T])
extends ProxyTree[T] {
type ThisTree[-T >: Untyped] = Annotated[T]
def forwardTo = arg
}
trait WithoutTypeOrPos[-T >: Untyped] extends Tree[T] {
override def tpe: T @uncheckedVariance = NoType.asInstanceOf[T]
override def pos = NoPosition
}
case class Thicket[-T >: Untyped](trees: List[Tree[T]]) extends Tree[T] with WithoutTypeOrPos[T] {
type ThisTree[-T >: Untyped] = Thicket[T]
override def isEmpty: Boolean = trees.isEmpty
override def toString = if (isEmpty) "EmptyTree" else "Thicket(" + trees.mkString(", ") + ")"
}
class EmptyValDef[T >: Untyped] extends ValDef[T](
nme.WILDCARD, genericEmptyTree[T], genericEmptyTree[T]) with WithoutTypeOrPos[T] {
override def isEmpty: Boolean = true
setMods(Modifiers[T](PrivateLocal))
}
val theEmptyTree: Thicket[Type] = Thicket(Nil)
val theEmptyValDef = new EmptyValDef[Type]
val theEmptyModifiers = new Modifiers()
def genericEmptyValDef[T >: Untyped]: ValDef[T] = theEmptyValDef.asInstanceOf[ValDef[T]]
def genericEmptyTree[T >: Untyped]: Thicket[T] = theEmptyTree.asInstanceOf[Thicket[T]]
def genericEmptyModifiers[T >: Untyped]: Modifiers[T] = theEmptyModifiers.asInstanceOf[Modifiers[T]]
trait WithLazyField[+T <: AnyRef]
abstract class Instance[T >: Untyped <: Type] extends util.DotClass { inst =>
type Modifiers = Trees.Modifiers[T]
type Tree = Trees.Tree[T]
type TypTree = Trees.TypTree[T]
type TermTree = Trees.TermTree[T]
type RefTree = Trees.RefTree[T]
type MemberDef = Trees.MemberDef[T]
type ValOrDefDef = Trees.ValOrDefDef[T]
type Ident = Trees.Ident[T]
type Select = Trees.Select[T]
type This = Trees.This[T]
type Super = Trees.Super[T]
type Apply = Trees.Apply[T]
type TypeApply = Trees.TypeApply[T]
type Literal = Trees.Literal[T]
type New = Trees.New[T]
type Pair = Trees.Pair[T]
type Typed = Trees.Typed[T]
type NamedArg = Trees.NamedArg[T]
type Assign = Trees.Assign[T]
type Block = Trees.Block[T]
type If = Trees.If[T]
type Closure = Trees.Closure[T]
type Match = Trees.Match[T]
type CaseDef = Trees.CaseDef[T]
type Return = Trees.Return[T]
type Try = Trees.Try[T]
type SeqLiteral = Trees.SeqLiteral[T]
type JavaSeqLiteral = Trees.JavaSeqLiteral[T]
type TypeTree = Trees.TypeTree[T]
type SingletonTypeTree = Trees.SingletonTypeTree[T]
type AppliedTypeTree = Trees.AppliedTypeTree[T]
type ByNameTypeTree = Trees.ByNameTypeTree[T]
type TypeBoundsTree = Trees.TypeBoundsTree[T]
type Bind = Trees.Bind[T]
type Alternative = Trees.Alternative[T]
type UnApply = Trees.UnApply[T]
type ValDef = Trees.ValDef[T]
type DefDef = Trees.DefDef[T]
type TypeDef = Trees.TypeDef[T]
type Template = Trees.Template[T]
type Import = Trees.Import[T]
type PackageDef = Trees.PackageDef[T]
type Annotated = Trees.Annotated[T]
type Thicket = Trees.Thicket[T]
val EmptyTree: Thicket = genericEmptyTree
val EmptyValDef: ValDef = genericEmptyValDef
val EmptyModifiers: Modifiers = genericEmptyModifiers
}
}
object tpd extends Trees.Instance[Type] {
def Modifiers(sym: Symbol): Modifiers = ast.Trees.Modifiers(
sym.flags & ModifierFlags,
if (sym.privateWithin.exists) sym.privateWithin.asType.name else tpnme.EMPTY,
sym.annotations map (_.tree))
def Ident(name: Name)/*(tp: NamedType)*/: Ident =
new Ident(name)
def Select(qualifier: Tree, name: Name): Select =
new Select(qualifier, name)
// def SelectFromTypeTree(qualifier: Tree, name: Name): SelectFromTypeTree =
// new SelectFromTypeTree(qualifier, name)
def This(qual: TypeName)/*(cls: ClassSymbol)*/: This =
new This(qual)
def Super(qual: Tree, mix: TypeName, inConstrCall: Boolean, mixinClass: Symbol = NoSymbol): Super =
new Super(qual, mix)
def Apply(fn: Tree, args: List[Tree]): Apply =
new Apply(fn, args)
def TypeApply(fn: Tree, args: List[Tree]): TypeApply =
new TypeApply(fn, args)
def Literal(const: Constant): Literal =
new Literal(const)
def unitLiteral: Literal =
Literal(Constant(()))
def New(tpt: Tree): New =
new New(tpt)
def New(tp: Type): New = New(TypeTree(tp))
def Pair(left: Tree, right: Tree): Pair =
new Pair(left, right)
def Typed(expr: Tree, tpt: Tree): Typed =
new Typed(expr, tpt)
def NamedArg(name: Name, arg: Tree) =
new NamedArg(name, arg)
def Assign(lhs: Tree, rhs: Tree): Assign =
new Assign(lhs, rhs)
def Block(stats: List[Tree], expr: Tree): Block =
new Block(stats, expr)
def If(cond: Tree, thenp: Tree, elsep: Tree): If =
new If(cond, thenp, elsep)
def Closure(env: List[Tree], meth: Tree, tpt: Tree): Closure =
new Closure(env, meth, tpt)
/**
* A function def
*
* vparams => expr
*
* gets expanded to
*
* { def $anonfun(vparams) = expr; Closure($anonfun) }
*
* where the closure's type is the target type of the expression (FunctionN, unless
* otherwise specified).
*/
// def Closure(meth: TermSymbol, rhsFn: List[List[Tree]] => Tree, targs: List[Tree] = Nil, targetType: Type = NoType): Block = {
// val targetTpt = if (targetType.exists) TypeTree(targetType) else EmptyTree
// val call =
// if (targs.isEmpty) Ident(TermRef(NoPrefix, meth))
// else TypeApply(Ident(TermRef(NoPrefix, meth)), targs)
// Block(
// DefDef(meth, rhsFn) :: Nil,
// Closure(Nil, call, targetTpt))
// }
def CaseDef(pat: Tree, guard: Tree, body: Tree): CaseDef =
new CaseDef(pat, guard, body)
def Match(selector: Tree, cases: List[CaseDef]): Match =
new Match(selector, cases)
def Return(expr: Tree, from: Tree): Return =
new Return(expr, from)
def Try(block: Tree, cases: List[CaseDef], finalizer: Tree): Try =
new Try(block, cases, finalizer)
def SeqLiteral(elems: List[Tree]): SeqLiteral =
new SeqLiteral(elems)
// def SeqLiteral(tpe: Type, elems: List[Tree]): SeqLiteral =
// if (tpe derivesFrom defn.SeqClass) SeqLiteral(elems) else JavaSeqLiteral(elems)
def JavaSeqLiteral(elems: List[Tree]): SeqLiteral =
new JavaSeqLiteral(elems)
def TypeTree(original: Tree): TypeTree =
TypeTree(original.tpe, original)
def TypeTree(tp: Type, original: Tree = EmptyTree): TypeTree =
new TypeTree(original).withType(tp)
def TypeTree() = new TypeTree(EmptyTree)
def SingletonTypeTree(ref: Tree): SingletonTypeTree =
new SingletonTypeTree(ref)
def AppliedTypeTree(tycon: Tree, args: List[Tree]): AppliedTypeTree =
new AppliedTypeTree(tycon, args)
def ByNameTypeTree(result: Tree): ByNameTypeTree =
new ByNameTypeTree(result)
def TypeBoundsTree(lo: Tree, hi: Tree): TypeBoundsTree =
new TypeBoundsTree(lo, hi)
def Bind(sym: TermSymbol, body: Tree): Bind =
new Bind(sym.name, body)
def Bind(name: TermName, body: Tree): Bind =
new Bind(name, body)
def Alternative(trees: List[Tree]): Alternative =
new Alternative(trees)
def UnApply(fun: Tree, implicits: List[Tree], patterns: List[Tree], proto: Type): UnApply =
new UnApply(fun, implicits, patterns)
def ValDef(sym: TermSymbol, rhs: Tree = EmptyTree): ValDef =
new ValDef(sym.name, TypeTree(sym.info), rhs)
def ValDef(name: TermName, tpt: TypeTree, rhs: Tree): ValDef =
new ValDef(name, tpt, rhs)
// def SyntheticValDef(name: TermName, rhs: Tree): ValDef =
// ValDef(ctx.newSymbol(ctx.owner, name, Synthetic, rhs.tpe.widen, coord = rhs.pos), rhs)
// def DefDef(sym: TermSymbol, rhs: Tree = EmptyTree): DefDef =
// DefDef(sym, Function.const(rhs) _)
// def DefDef(sym: TermSymbol, rhsFn: List[List[Tree]] => Tree): DefDef =
// polyDefDef(sym, Function.const(rhsFn))
def DefDef(name: TermName, tparams: List[TypeDef], vparamss: List[List[ValDef]], tpt: Tree, rhs: Tree): DefDef = new DefDef(name, tparams, vparamss, tpt, rhs)
def TypeDef(sym: TypeSymbol): TypeDef =
new TypeDef(sym.name, TypeTree(sym.info))
def TypeDef(name: TypeName, tpt: Tree /*TypeTree*/) =
new TypeDef(name, tpt)
def Template(constr: DefDef, parents: List[Tree], self: ValDef, body: List[Tree]): Template = new Template(constr, parents, self, body)
def ClassDef(name: TypeName, impl: Tree) =
new TypeDef(name, impl)
def ClassDef(cls: ClassSymbol, constr: DefDef, body: List[Tree], superArgs: List[Tree] = Nil): TypeDef = ???
/**
* An anonymous class
*
* new parents { forwarders }
*
*/
// def AnonClass(parents: List[Type], fns: List[TermSymbol], methNames: List[TermName]): Block = {
// val owner = fns.head.owner
// val parents1 =
// if (parents.head.classSymbol.is(Trait)) defn.ObjectClass.typeRef :: parents
// else parents
// val cls = ctx.newNormalizedClassSymbol(owner, tpnme.ANON_FUN, Synthetic, parents1,
// coord = fns.map(_.pos).reduceLeft(_ union _))
// val constr = ctx.newConstructor(cls, Synthetic, Nil, Nil).entered
// def forwarder(fn: TermSymbol, name: TermName) = {
// val fwdMeth = fn.copy(cls, name, Synthetic | Method).entered.asTerm
// DefDef(fwdMeth, prefss => ref(fn).appliedToArgss(prefss))
// }
// val forwarders = (fns, methNames).zipped.map(forwarder)
// val cdef = ClassDef(cls, DefDef(constr), forwarders)
// Block(cdef :: Nil, New(cls.typeRef, Nil))
// }
// { <label> def while$(): Unit = if (cond) { body; while$() } ; while$() }
// def WhileDo(owner: Symbol, cond: Tree, body: List[Tree]): Tree = {
// val sym = ctx.newSymbol(owner, nme.WHILE_PREFIX, Flags.Label | Flags.Synthetic,
// MethodType(Nil, defn.UnitType), coord = cond.pos)
//
// val call = Apply(ref(sym), Nil)
// val rhs = If(cond, Block(body, call), unitLiteral)
// Block(List(DefDef(sym, rhs)), call)
// }
def Import(expr: Tree, selectors: List[Tree]): Import =
new Import(expr, selectors)
def PackageDef(pid: RefTree, stats: List[Tree]): PackageDef =
new PackageDef(pid, stats)
def Annotated(annot: Tree, arg: Tree): Annotated =
new Annotated(annot, arg)
def Thicket(trees: List[Tree]): Thicket =
new Thicket(trees)
// def Throw(expr: Tree): Tree =
// ref(defn.throwMethod).appliedTo(expr)
}
}
}
| VladimirNik/tasty | plugin/src/main/scala/scala/tasty/internal/dotc/ast/Trees.scala | Scala | bsd-3-clause | 22,592 |
package spatutorial.client.services
import diode.ActionResult._
import diode.RootModelRW
import diode.data._
import kidstravel.client.services._
import kidstravel.shared.poi.Poi
import utest._
object SPACircuitTests extends TestSuite {
def tests = TestSuite {
/*
'TodoHandler - {
val model = Ready(Pois(Seq(
Poi(0, "Test1"),
Poi(1, "Test2"),
Poi(2, "Test3")
)))
val newTodosPois = Seq(
Poi(3, "Test3")
)
def build = new PoiHandler(new RootModelRW(model))
'RefreshPois - {
val h = build
val result = h.handle(RefreshPois)
result match {
case EffectOnly(effects) =>
assert(effects.size == 1)
case _ =>
assert(false)
}
}
'UpdateAllPois - {
val h = build
val result = h.handle(UpdateAllPois(newTodosPois))
assert(result == ModelUpdate(Ready(Pois(newTodosPois))))
}
'UpdatePoiAdd - {
val h = build
val result = h.handle(UpdatePoi(Poi(4, "Test4")))
result match {
case ModelUpdateEffect(newValue, effects) =>
assert(newValue.get.pois.size == 4)
assert(newValue.get.pois(3).id == "4")
assert(effects.size == 1)
case _ =>
assert(false)
}
}
'UpdatePoi - {
val h = build
val result = h.handle(UpdatePoi(Poi(1, "Test111")))
result match {
case ModelUpdateEffect(newValue, effects) =>
assert(newValue.get.pois.size == 3)
assert(newValue.get.pois.head.name == "Test111")
assert(effects.size == 1)
case _ =>
assert(false)
}
}
'DeletePoi - {
val h = build
val result = h.handle(DeletePoi(model.get.pois.head))
result match {
case ModelUpdateEffect(newValue, effects) =>
assert(newValue.get.pois.size == 2)
assert(newValue.get.pois.head.name == "Test2")
assert(effects.size == 1)
case _ =>
assert(false)
}
}
}
*/
}
}
| devkat/kidstravel | client/src/test/scala/spatutorial/client/services/SPACircuitTests.scala | Scala | apache-2.0 | 2,279 |
class L
class Test[X, M/* <: X*/, U <: L] extends Object {
type Y = String
type Z
type Q <: X
type W >: L
def this(x: Int) = {this(); null.asInstanceOf[X]; }
def test = null.asInstanceOf[X]
def this(x: Int, y: Int) = { this(y); null.asInstanceOf[X]; }
}
| VladimirNik/tasty | exttests/tests/typedef2/TypeDef2.scala | Scala | bsd-3-clause | 269 |
package scala.meta
package internal
package trees
import scala.language.experimental.macros
import scala.language.reflectiveCalls
import scala.annotation.StaticAnnotation
import scala.reflect.macros.whitebox.Context
import scala.meta.internal.trees.{Reflection => AstReflection}
import org.scalameta.internal.MacroHelpers
// Detects scala.meta ASTs defined in the current compilation unit
// and then saves them in a runtime annotation on the annottee.
class registry extends StaticAnnotation {
def macroTransform(annottees: Any*): Any = macro RegistryMacros.impl
}
class RegistryMacros(val c: Context) extends AstReflection with MacroHelpers {
lazy val u: c.universe.type = c.universe
lazy val mirror: u.Mirror = c.mirror
import c.universe._
def impl(annottees: Tree*): Tree = annottees.transformAnnottees(new ImplTransformer {
override def transformModule(mdef: ModuleDef): ModuleDef = {
val ModuleDef(mods @ Modifiers(flags, privateWithin, anns), name, Template(parents, self, stats)) = mdef
val enclosingUnit = c.asInstanceOf[{ def enclosingUnit: { def body: Tree } }].enclosingUnit.body
val anns1 = anns :+ q"new $AstMetadataModule.registry(${enclosingUnit.detectAst})"
ModuleDef(Modifiers(flags, privateWithin, anns1), name, Template(parents, self, stats))
}
})
}
| olafurpg/scalameta | scalameta/common/shared/src/main/scala/scala/meta/internal/trees/registry.scala | Scala | bsd-3-clause | 1,317 |
package org.vaadin.addons.rinne
import com.vaadin.event.ShortcutListener
import com.vaadin.server.ThemeResource
import org.scalatest.FunSpec
class AbstractComponentMixinSpec extends FunSpec {
describe("An AbstractComponentMixinSpec") {
describe("should allow to set") {
it("data") {
val field = new VTextField
field.data = "opa"
assert(field.data === "opa")
}
it("immediate") {
val field = new VTextField
assert(field.immediate === false)
field.immediate = true
assert(field.immediate === true)
}
it("description") {
val field = new VTextField
field.description = "qwe"
assert(field.description === Some("qwe"))
field.description = None
assert(field.description === None)
}
}
}
describe("shortcutListeners should") {
val listener1 = (e: Any) => println(e)
it("shortcutListeners.add a listener to table") {
val table = new VTable
val action = new KeyShortcutAction("aaa", KeyShortcut(KeyCode.A, KeyModifier.Shift), {})
table.shortcutListeners += action
assert(table.shortcutListeners.size === 1)
}
it("shortcutListeners.remove an action from table") {
val table = new VTable
val action = new KeyShortcutAction("aaa", KeyShortcut(KeyCode.A), {})
table.shortcutListeners += action
table.shortcutListeners -= action
assert(table.shortcutListeners.size === 0)
}
it("shortcutListeners.remove a listener from table") {
val table = new VTable
val action2 = new KeyShortcutAction("aac", new ThemeResource("aaa"), KeyShortcut(KeyCode.C), {})
table.shortcutListeners += action2
table.shortcutListeners -= action2
assert(table.shortcutListeners.size === 0)
}
it("shortcutListeners.iterator returns added listeners") {
val table = new VTable
var action1executed = false
var action2executed = false
val action1 = new KeyShortcutAction("aaa", {
action1executed = true
}, KeyModifier.Shift)
val action2 = new KeyShortcutAction("aac", KeyShortcut(KeyCode.C), {
action2executed = true
})
table.shortcutListeners += action1
table.shortcutListeners += action2
val iter = table.shortcutListeners.iterator
iter.next().apply(null)
iter.next().apply(null)
assert(!iter.hasNext)
assert(action1executed)
assert(action2executed)
}
it("shortcutListeners.contains returns false for non-added listener") {
val table = new VTable
assert(!table.shortcutListeners.contains(listener1))
}
it("execute listeners") {
var executed = false
val table = new VTable {
shortcutListeners += new KeyShortcutAction("aaa", (_, _) => {
executed = true
}, KeyModifier.Shift)
}
import scala.collection.JavaConverters._
table.shortcutListeners.listeners.asScala
.foreach { case e: ShortcutListener => e.handleAction(null, null) }
assert(executed)
}
}
}
| LukaszByczynski/rinne | src/test/scala/org/vaadin/addons/rinne/AbstractComponentMixinSpec.scala | Scala | apache-2.0 | 3,104 |
///////////////////////////////////////////////////////////////
// © ООО «Праймтолк», 2014 //
// Все права принадлежат компании ООО «Праймтолк». //
///////////////////////////////////////////////////////////////
/**
* SynapseGrid
* © Primetalk Ltd., 2014.
* All rights reserved.
* Authors: A.Zhizhelev
*
* Created: 30.05.14, zhizhelev
*/
package ru.primetalk.synapse.ontology
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class OntologyTest extends FunSuite {
import ru.primetalk.synapse.ontology.resources._
trait Person
test("Property value for an instance") {
val ivan = LongId[Person](1L)
// name of a person
PropertyValue(ivan, Name, "Иван")
}
}
| Primetalk/typed-map | synapse-frames/src/test/scala/ru/primetalk/synapse/ontology/OntologyTest.scala | Scala | bsd-2-clause | 875 |
package org.scalex
package model
/**
* An entity that is a member of a template. All entities, including templates, are member of another entity
* except for parameters and annotations. Note that all members of a template are modelled, including those that are
* inherited and not declared locally.
*/
case class Member(
/** a member is an entity */
entity: Entity,
/** The comment attached to this member, if any. */
comment: Option[Comment],
/** The templates in which this member has been declared. The first element of the list is the template that contains
* the currently active declaration of this member, subsequent elements are declarations that have been overriden. If
* the first element is equal to `inTemplate`, the member is declared locally, if not, it has been inherited. All
* elements of this list are in the linearization of `inTemplate`. */
inDefinitionTemplates: List[QualifiedName],
/**
* The flags that have been set for this entity. The following flags are supported:
* `implicit`, `sealed`, `abstract`, `deprecated`, `migration` and `final`.
*/
flags: List[Flag],
/**
* For members representing values: the type of the value returned by this member; for members
* representing types: the type itself.
*/
resultType: TypeEntity,
/** def, val, lazy val, alias type, ... */
role: Role,
/**
* If this symbol is a use case, the useCaseOf will contain the member it was derived from, containing the full
* signature and the complete parameter descriptions.
*/
// useCaseOf: Option[Member],
/** If this member originates from an implicit conversion, we set the implicit information to the correct origin */
// byConversion: Option[ImplicitConversion],
/** The identity of this member, used for linking */
// signature: String,
/** Compatibility signature, will be removed from future versions */
// signatureCompat: String,
/** Indicates whether the member is inherited by implicit conversion */
isImplicitlyInherited: Boolean)
/** Indicates whether there is another member with the same name in the template that will take precendence */
// isShadowedImplicit: Boolean,
/**
* Indicates whether there are other implicitly inherited members that have similar signatures (and thus they all
* become ambiguous)
*/
// isAmbiguousImplicit: Boolean,
/** Indicates whether the implicitly inherited member is shadowed or ambiguous in its template */
// isShadowedOrAmbiguousImplicit: Boolean
| kzys/scalex | src/main/scala/model/Member.scala | Scala | mit | 2,527 |
package net.white_azalea.models.exporter
import java.io.File
import net.white_azalea.datas.javadoc.PackageDoc
import net.white_azalea.datas.junit.TestCase
/**
* Export result file.
*/
object Exporter {
/**
* Export test result file.
*
* @param file
* @param docs
* @param results
*/
def export(file: Option[File], docs: => List[PackageDoc], results: => List[TestCase]): String = {
// convert to TestPackageList.
val result = TestPackageResult.parse(results, docs)
// init template.
val template = new Template(file)
// render to file.
template.render(result)
}
}
| Sunao-Yoshii/JUnitDocMarge | src/main/scala/net/white_azalea/models/exporter/Exporter.scala | Scala | apache-2.0 | 619 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.