code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package definiti.scalamodel.builder
import definiti.common.ast.{Library, Namespace, Root}
import definiti.scalamodel.builder.typeVerification.TypeVerificationBuilder
import definiti.scalamodel.{Configuration, ScalaAST}
class ScalaModelBuilder(val config: Configuration, val library: Library)
extends CommonBuilder
with ClassDefinitionBuilder
with ExpressionBuilder
with ImportExtractor
with JsonBuilder
with NamedFunctionBuilder
with PackageBuilder
with TypeBuilder
with TypeVerificationBuilder
with VerificationBuilder {
def build(root: Root): ScalaAST.Root = {
ScalaAST.Root(
packages = root.namespaces
.map(buildPackage)
.filter(_.elements.nonEmpty)
)
}
private def buildPackage(namespace: Namespace): ScalaAST.Package = {
ScalaAST.Package(
if (namespace.fullName.isEmpty) "root" else namespace.fullName,
imports = extractImportsFromNamespace(namespace) ++ jsonImports,
elements = buildNamespaceContent(namespace)
)
}
implicit def valueToValueSeq[A](value: A): Seq[A] = Seq(value)
implicit def valueToValueOption[A](value: A): Option[A] = Some(value)
implicit def optionToSeq[A](option: Option[A]): Seq[A] = option match {
case Some(value) => Seq(value)
case None => Nil
}
implicit def typeToString(typ: ScalaAST.Type): String = typ.toCode
implicit def stringToExpression(rawStatement: String): ScalaAST.Expression = ScalaAST.SimpleExpression(rawStatement)
} | definiti/definiti-scala-model | src/main/scala/definiti/scalamodel/builder/ScalaModelBuilder.scala | Scala | mit | 1,492 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.junit
import org.junit._
import org.scalajs.junit.utils._
object ExceptionBeforeClass {
@BeforeClass def beforeClass(): Unit =
throw new IllegalArgumentException("foo")
}
class ExceptionBeforeClass {
@Test def test1(): Unit = ()
@Test def test2(): Unit = ()
}
class ExceptionBeforeClassAssertions extends JUnitTest
| scala-js/scala-js | junit-test/shared/src/test/scala/org/scalajs/junit/ExceptionBeforeClass.scala | Scala | apache-2.0 | 623 |
class C {
@Annot(optionType=classOf[String]) val k = 0
}
object Test {
def main(args: Array[String]) {
val xs = (
classOf[C].getDeclaredFields.toList
. sortBy(f => f.getName)
. map(f => f.getAnnotations.toList)
. filterNot (_.isEmpty) // there are extra fields under -Xcheckinit
)
println(xs)
}
}
| felixmulder/scala | test/files/jvm/t3003/Test_1.scala | Scala | bsd-3-clause | 346 |
package models
case class Track(
album: AlbumSimple,
artists: List[ArtistSimple],
available_markets: List[String],
disc_number: Int,
duration_ms: Int,
explicit: Boolean,
external_ids: Map[String, String],
external_urls: Map[String, String],
href: String,
id: String,
is_playable: Option[Boolean],
linked_from: Option[TrackLink],
name: String,
popularity: Int,
preview_url: String,
track_number: Int,
`type`: String,
uri: String
)
| Jakeway/spotify-web-api-scala | src/main/scala/models/Track.scala | Scala | mit | 735 |
/*
* Copyright (C) 2005, The OpenURP Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.spa.doc.model
import org.beangle.data.orm.{IdGenerator, MappingModule}
class DefaultMapping extends MappingModule {
def binding(): Unit = {
defaultCache("openurp.spa", "read-write")
bind[DocType] declare { e =>
e.notice is length(3000)
}
bind[PrintConfig]
bind[PrintLog]
bind[PrintQuota]
bind[Coupon]
bind[DownloadLog]
all.except(classOf[PrintLog], classOf[PrintQuota], classOf[DownloadLog]).cacheable()
}
}
| openurp/api | spa/src/main/scala/org/openurp/spa/doc/model/DefaultMapping.scala | Scala | lgpl-3.0 | 1,202 |
package scjson.ast
import scala.language.implicitConversions
object JsonNavigation {
implicit def extendJsonNavigation(value:JsonValue):JsonNavigation = new JsonNavigation(Some(value))
implicit def extendJsonNavigationOption(value:Option[JsonValue]):JsonNavigation = new JsonNavigation(value)
}
@SuppressWarnings(Array("org.wartremover.warts.Overloading"))
final class JsonNavigation(peer:Option[JsonValue]) {
def /(name:String):Option[JsonValue] = toMap flatMap { _ get name }
def /(index:Int):Option[JsonValue] = arraySeq flatMap { _ lift index }
//------------------------------------------------------------------------------
def nullRef:Option[Unit] = peer flatMap { _.asNull }
def string:Option[String] = peer flatMap { _.asString }
def boolean:Option[Boolean] = peer flatMap { _.asBoolean }
def number:Option[BigDecimal] = peer flatMap { _.asNumber }
def arraySeq:Option[Seq[JsonValue]] = peer flatMap { _.asArray }
def objectSeq:Option[Seq[(String,JsonValue)]] = peer flatMap { _.asObject }
//------------------------------------------------------------------------------
def toLong:Option[Long] = number map { _.longValue }
def toInt:Option[Int] = number map { _.intValue }
def toDouble:Option[Double] = number map { _.doubleValue }
def toFloat:Option[Float] = number map { _.floatValue }
def toMap:Option[Map[String,JsonValue]] = objectSeq map { _.toMap }
}
| ritschwumm/scjson | modules/ast/src/main/scala/JsonNavigation.scala | Scala | bsd-2-clause | 1,448 |
import scala.util.Random.nextInt
case class SearchOutcome(status: Boolean, foundAt: Int)
object BinarySearching2 {
def main(args: Array[String]) {
val n = 10
val MaxValue = 100
val randInts = Array.fill(n)(nextInt(MaxValue))
val sortedInts = randInts.sorted
val outcomes = randInts map { item => intArrayBinarySearch(sortedInts, item) }
val numberTrue = outcomes count { n => n.status == true }
if (numberTrue == randInts.length)
println("All random integers (1st set) were found successfully!")
// now generate some values that shouldn't be found in the original sortedArray (of randInts)
val randInts2 = Array.fill(n)(nextInt(MaxValue) + MaxValue)
val outcomes2 = randInts2 map { item => intArrayBinarySearch(sortedInts, item) }
val numberTrue2 = outcomes2 count { n => n.status == true }
if (numberTrue2 == 0)
println("All random integers (2nd set) were not found successfully")
println("If you see 3 lines of output, this program works. We will use unit tests next time.")
}
// The recursive version. We break this up into two cases so users don't need to think about
// min, max, and mid to use this function!
def intArrayBinarySearch(data: Array[Int], item: Int) =
intArrayBinarySearchRange(data, 0, data.length - 1, item)
def intArrayBinarySearchRange(data: Array[Int], min: Int, max: Int, item: Int): SearchOutcome = {
val mid = (min + max) / 2
if (min > max)
SearchOutcome(false, -1)
else if (item == data(mid))
SearchOutcome(true, mid)
else if (item > data(mid))
intArrayBinarySearchRange(data, mid + 1, max, item)
else
intArrayBinarySearchRange(data, min, mid - 1, item)
}
}
| LoyolaChicagoBooks/introcs-scala-examples | binary_searching2/binary_searching2.scala | Scala | gpl-3.0 | 1,720 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.worker
import java.io._
import java.nio.charset.StandardCharsets
import scala.collection.JavaConverters._
import com.google.common.io.Files
import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.deploy.{ApplicationDescription, ExecutorState}
import org.apache.spark.deploy.DeployMessages.ExecutorStateChanged
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.UI._
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.util.{ShutdownHookManager, Utils}
import org.apache.spark.util.logging.FileAppender
/**
* Manages the execution of one executor process.
* This is currently only used in standalone mode.
*/
private[deploy] class ExecutorRunner(
val appId: String,
val execId: Int,
val appDesc: ApplicationDescription,
val cores: Int,
val memory: Int,
val worker: RpcEndpointRef,
val workerId: String,
val webUiScheme: String,
val host: String,
val webUiPort: Int,
val publicAddress: String,
val sparkHome: File,
val executorDir: File,
val workerUrl: String,
conf: SparkConf,
val appLocalDirs: Seq[String],
@volatile var state: ExecutorState.Value)
extends Logging {
private val fullId = appId + "/" + execId
private var workerThread: Thread = null
private var process: Process = null
private var stdoutAppender: FileAppender = null
private var stderrAppender: FileAppender = null
// Timeout to wait for when trying to terminate an executor.
private val EXECUTOR_TERMINATE_TIMEOUT_MS = 10 * 1000
// NOTE: This is now redundant with the automated shut-down enforced by the Executor. It might
// make sense to remove this in the future.
private var shutdownHook: AnyRef = null
private[worker] def start() {
workerThread = new Thread("ExecutorRunner for " + fullId) {
override def run() { fetchAndRunExecutor() }
}
workerThread.start()
// Shutdown hook that kills actors on shutdown.
shutdownHook = ShutdownHookManager.addShutdownHook { () =>
// It's possible that we arrive here before calling `fetchAndRunExecutor`, then `state` will
// be `ExecutorState.LAUNCHING`. In this case, we should set `state` to `FAILED`.
if (state == ExecutorState.LAUNCHING) {
state = ExecutorState.FAILED
}
killProcess(Some("Worker shutting down")) }
}
/**
* Kill executor process, wait for exit and notify worker to update resource status.
*
* @param message the exception message which caused the executor's death
*/
private def killProcess(message: Option[String]) {
var exitCode: Option[Int] = None
if (process != null) {
logInfo("Killing process!")
if (stdoutAppender != null) {
stdoutAppender.stop()
}
if (stderrAppender != null) {
stderrAppender.stop()
}
exitCode = Utils.terminateProcess(process, EXECUTOR_TERMINATE_TIMEOUT_MS)
if (exitCode.isEmpty) {
logWarning("Failed to terminate process: " + process +
". This process will likely be orphaned.")
}
}
try {
worker.send(ExecutorStateChanged(appId, execId, state, message, exitCode))
} catch {
case e: IllegalStateException => logWarning(e.getMessage(), e)
}
}
/** Stop this executor runner, including killing the process it launched */
private[worker] def kill() {
if (workerThread != null) {
// the workerThread will kill the child process when interrupted
workerThread.interrupt()
workerThread = null
state = ExecutorState.KILLED
try {
ShutdownHookManager.removeShutdownHook(shutdownHook)
} catch {
case e: IllegalStateException => None
}
}
}
/** Replace variables such as {{EXECUTOR_ID}} and {{CORES}} in a command argument passed to us */
private[worker] def substituteVariables(argument: String): String = argument match {
case "{{WORKER_URL}}" => workerUrl
case "{{EXECUTOR_ID}}" => execId.toString
case "{{HOSTNAME}}" => host
case "{{CORES}}" => cores.toString
case "{{APP_ID}}" => appId
case other => other
}
/**
* Download and run the executor described in our ApplicationDescription
*/
private def fetchAndRunExecutor() {
try {
// Launch the process
val subsOpts = appDesc.command.javaOpts.map {
Utils.substituteAppNExecIds(_, appId, execId.toString)
}
val subsCommand = appDesc.command.copy(javaOpts = subsOpts)
val builder = CommandUtils.buildProcessBuilder(subsCommand, new SecurityManager(conf),
memory, sparkHome.getAbsolutePath, substituteVariables)
val command = builder.command()
val redactedCommand = Utils.redactCommandLineArgs(conf, command.asScala)
.mkString("\\"", "\\" \\"", "\\"")
logInfo(s"Launch command: $redactedCommand")
builder.directory(executorDir)
builder.environment.put("SPARK_EXECUTOR_DIRS", appLocalDirs.mkString(File.pathSeparator))
// In case we are running this from within the Spark Shell, avoid creating a "scala"
// parent process for the executor command
builder.environment.put("SPARK_LAUNCH_WITH_SCALA", "0")
// Add webUI log urls
val baseUrl =
if (conf.get(UI_REVERSE_PROXY)) {
s"/proxy/$workerId/logPage/?appId=$appId&executorId=$execId&logType="
} else {
s"$webUiScheme$publicAddress:$webUiPort/logPage/?appId=$appId&executorId=$execId&logType="
}
builder.environment.put("SPARK_LOG_URL_STDERR", s"${baseUrl}stderr")
builder.environment.put("SPARK_LOG_URL_STDOUT", s"${baseUrl}stdout")
process = builder.start()
val header = "Spark Executor Command: %s\\n%s\\n\\n".format(
redactedCommand, "=" * 40)
// Redirect its stdout and stderr to files
val stdout = new File(executorDir, "stdout")
stdoutAppender = FileAppender(process.getInputStream, stdout, conf)
val stderr = new File(executorDir, "stderr")
Files.write(header, stderr, StandardCharsets.UTF_8)
stderrAppender = FileAppender(process.getErrorStream, stderr, conf)
state = ExecutorState.RUNNING
worker.send(ExecutorStateChanged(appId, execId, state, None, None))
// Wait for it to exit; executor may exit with code 0 (when driver instructs it to shutdown)
// or with nonzero exit code
val exitCode = process.waitFor()
state = ExecutorState.EXITED
val message = "Command exited with code " + exitCode
worker.send(ExecutorStateChanged(appId, execId, state, Some(message), Some(exitCode)))
} catch {
case interrupted: InterruptedException =>
logInfo("Runner thread for executor " + fullId + " interrupted")
state = ExecutorState.KILLED
killProcess(None)
case e: Exception =>
logError("Error running executor", e)
state = ExecutorState.FAILED
killProcess(Some(e.toString))
}
}
}
| aosagie/spark | core/src/main/scala/org/apache/spark/deploy/worker/ExecutorRunner.scala | Scala | apache-2.0 | 7,789 |
package scala.collection
import org.junit.Assert._
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.JUnit4
@RunWith(classOf[JUnit4])
class IndexedSeqViewTest {
@Test
def _toString(): Unit = {
assertEquals("IndexedSeqView(<not computed>)", IndexedSeq(1, 2, 3).view.toString)
}
@Test
def iteratorKnownSize(): Unit = {
assertEquals(5, IndexedSeq(1, 2, 3, 4, 5).view.iterator.knownSize)
assertEquals(2, IndexedSeq(1, 2, 3, 4, 5).view.iterator.take(2).knownSize)
assertEquals(2, IndexedSeq(1, 2, 3, 4, 5).view.iterator.slice(2, 4).knownSize)
}
}
| lrytz/scala | test/junit/scala/collection/IndexedSeqViewTest.scala | Scala | apache-2.0 | 602 |
package com.dys.chatwork4s.http.parameters
/**
* チャットのメッセージ一覧を取得する際に指定するパラメータ
*
* @param force 未取得にかかわらず最新の100件を取得するか<br>trueを指定すると未取得にかかわらず最新の100件を取得します
*/
case class GetMessage(
force: Boolean = false
) extends HttpParameter {
override def toParameters: Seq[(String, String)] = singleParameter(
("force", Some(booleanToInt(force)))
)
}
object GetMessage {
val unreadOnly: GetMessage = GetMessage()
} | kado-yasuyuki/chatwork4s | src/main/scala/com/dys/chatwork4s/http/parameters/GetMessage.scala | Scala | apache-2.0 | 609 |
package tscfg.example
final case class ScalaIssue62aCfg(
foo : ScalaIssue62aCfg.Foo
)
object ScalaIssue62aCfg {
sealed trait FruitType
object FruitType {
object apple extends FruitType
object banana extends FruitType
object pineapple extends FruitType
def $resEnum(name: java.lang.String, path: java.lang.String, $tsCfgValidator: $TsCfgValidator): FruitType = name match {
case "apple" => FruitType.apple
case "banana" => FruitType.banana
case "pineapple" => FruitType.pineapple
case v => $tsCfgValidator.addInvalidEnumValue(path, v, "FruitType")
null
}
}
final case class Foo(
fruit : ScalaIssue62aCfg.FruitType
)
object Foo {
def apply(c: com.typesafe.config.Config, parentPath: java.lang.String, $tsCfgValidator: $TsCfgValidator): ScalaIssue62aCfg.Foo = {
ScalaIssue62aCfg.Foo(
fruit = ScalaIssue62aCfg.FruitType.$resEnum(c.getString("fruit"), parentPath + "fruit", $tsCfgValidator)
)
}
}
def apply(c: com.typesafe.config.Config): ScalaIssue62aCfg = {
val $tsCfgValidator: $TsCfgValidator = new $TsCfgValidator()
val parentPath: java.lang.String = ""
val $result = ScalaIssue62aCfg(
foo = ScalaIssue62aCfg.Foo(if(c.hasPathOrNull("foo")) c.getConfig("foo") else com.typesafe.config.ConfigFactory.parseString("foo{}"), parentPath + "foo.", $tsCfgValidator)
)
$tsCfgValidator.validate()
$result
}
final class $TsCfgValidator {
private val badPaths = scala.collection.mutable.ArrayBuffer[java.lang.String]()
def addBadPath(path: java.lang.String, e: com.typesafe.config.ConfigException): Unit = {
badPaths += s"'$path': ${e.getClass.getName}(${e.getMessage})"
}
def addInvalidEnumValue(path: java.lang.String, value: java.lang.String, enumName: java.lang.String): Unit = {
badPaths += s"'$path': invalid value $value for enumeration $enumName"
}
def validate(): Unit = {
if (badPaths.nonEmpty) {
throw new com.typesafe.config.ConfigException(
badPaths.mkString("Invalid configuration:\n ", "\n ", "")
){}
}
}
}
}
| carueda/tscfg | src/test/scala/tscfg/example/ScalaIssue62aCfg.scala | Scala | apache-2.0 | 2,167 |
package edu.nus.hipci.cli
import java.nio.file.Paths
import java.security.MessageDigest
import scala.concurrent.Await
import scala.util.{Try, Success, Failure}
import scala.collection.JavaConversions
import scala.collection.immutable.{HashSet, HashMap}
import scala.io._
import akka.pattern._
import pl.project13.scala.rainbow._
import com.typesafe.config.{ConfigRenderOptions, Config}
import com.github.kxbmap.configs._
import edu.nus.hipci.core._
import edu.nus.hipci.hg._
sealed trait ConfigurationFactoryRequest
/**
* Request to create a test configuration from a Config object
*
* @constructor Create a request from a config object
* @param config The config object
*/
case class CreateTestConfiguration(config: Config)
extends ConfigurationFactoryRequest
/**
* Request to load a global application configuration from a Config object
*
* @constructor Create a request from a config object
* @param config The config object
*/
case class LoadAppConfiguration(config: Config)
extends ConfigurationFactoryRequest
/** Create AppConfiguration interactively by asking the user some questions. */
case object CreateAppConfigurationInteractively
extends ConfigurationFactoryRequest
/** Singleton descriptor for [[ConfigurationFactory]] */
object ConfigurationFactory
extends ComponentDescriptor[ConfigurationFactory] {
override val subComponents = List(Hg)
/**
* Compute the SHA of a config object.
*
* @param config the config object.
* @return SHA digest of the config object.
*/
def computeConfigSHA(config: Config) : String = {
val hash = config.hashCode().toString
MessageDigest.getInstance("SHA-1")
.digest(hash.getBytes("UTF-8"))
.map("%02x".format(_))
.mkString
}
}
/** Creates a TestConfiguration from a Config object. */
class ConfigurationFactory extends CLIComponent {
val descriptor = ConfigurationFactory
protected def fromConfig(config: Config): Try[TestConfiguration] = {
try {
val tests = collectTestsFromConfig(config)
val testID = computeTestID(AppConfiguration.global.projectDirectory, config)
Success(TestConfiguration(testID, tests))
} catch {
case e:InvalidHipSpec => Failure(e)
}
}
private def computeTestID(projectDirectory: String, config: Config) : String = {
val hg = loadComponent(Hg)
val revision = Await.result(hg ? GetCurrentRevision(Paths.get(projectDirectory)),
timeout.duration)
revision match {
case RevisionDirty(rev) =>
logger.error(DirtyRepository(projectDirectory).getMessage)
rev + "@" + ConfigurationFactory.computeConfigSHA(config)
case RevisionClean(rev) =>
rev + "@" + ConfigurationFactory.computeConfigSHA(config)
case MercurialError(_) => ""
}
}
private def parseSingleHipSpec(spec: String) = {
val token = spec.split('.')
if (token.length != 2) {
throw InvalidHipSpec(spec)
} else {
(token(0), token(1).toUpperCase.equals(HipSuccess))
}
}
private def parseSingleSleekSpec(entailment: Int, spec: String) =
if (!spec.contains(".")) {
Map(entailment.toString -> spec.toUpperCase.equals(SleekValid))
} else {
val token = spec.split('.')
if (token.length != 2) {
throw InvalidHipSpec(spec)
} else {
val expectInfer = (1 to token(1).toInt)
.foldRight(Map.empty[String, Boolean])({ (p, acc) =>
acc + (s"$entailment.$p" -> true)
})
expectInfer + (entailment.toString -> token(0).equals(SleekValid))
}
}
private def toGenTest(data: List[String]) = {
data match {
case List() => None
case filename :: rest =>
val (arguments, specs) = rest span(_.startsWith("-"))
if (filename.endsWith(HipExtension)) {
val hipSpec = specs.foldRight[Map[String, Boolean]](HashMap.empty)({
(en, acc) => acc + parseSingleHipSpec(en)
})
Some(GenTest(filename, HipTest, arguments, hipSpec))
} else {
val indexedSpecs = specs.zipWithIndex.map((p) => p.copy(_2 = p._2 + 1))
val sleekSpec = indexedSpecs.foldRight[Map[String, Boolean]](Map.empty)({
(p, acc) => acc ++ parseSingleSleekSpec(p._2, p._1.toString)
})
Some(GenTest(filename, SleekTest, arguments, sleekSpec))
}
}
}
private def collectTestsFromConfig(config: Config) = {
import scala.collection.JavaConverters._
type JList[T] = java.util.List[T]
val entries = JavaConversions asScalaSet config.entrySet
entries.foldRight[Map[String, Set[GenTest]]](HashMap.empty)({
(en, acc) =>
val rawTestEntry = config.getValue(en.getKey).unwrapped().asInstanceOf[JList[JList[String]]]
.asScala.toList.map(_.asScala.toList)
val testPool = rawTestEntry.foldRight[HashSet[GenTest]](HashSet.empty)({
(en, acc) => toGenTest(en).fold(acc)({ (t) => acc + t })
})
acc + ((en.getKey, testPool))
})
}
private def loadAppConfig(config: Config) = {
import AppConfiguration.Fields._
val app = AppConfiguration.global
app.projectDirectory = config.getOrElse[String](ProjectDirectory, app.projectDirectory)
app.hipDirectory = config.getOrElse[String](HipDirectory, app.hipDirectory)
app.sleekDirectory = config.getOrElse[String](SleekDirectory, app.sleekDirectory)
app.daemonHost = config.getOrElse[String](DaemonHost, app.daemonHost)
app.daemonPort = config.getOrElse[String](DaemonPort, app.daemonPort)
}
private def createAppConfigurationInteractively() = {
type Question = (String, (String, AppConfiguration) => AppConfiguration)
val runQuestion = { (previous: AppConfiguration, question: Question) =>
Console.out.println(question._1.cyan)
val line = Console.in.readLine()
if (line.trim().size > 0)
question._2(line, previous)
else
previous
}
val runQuestions = { (questions: Seq[Question]) =>
questions.foldLeft(AppConfiguration())(runQuestion)
}
val questions = Seq(
("Where is the HIP/SLEEK project directory located?. You can input relative path from this directory or " +
" an absolute path",
(input: String, appConfig:AppConfiguration) =>
appConfig.copy(projectDirectory = input)),
(s"What is the HIP test directory? default: [${AppConfiguration.global.hipDirectory }]",
(input: String, appConfig:AppConfiguration) => appConfig.copy(hipDirectory = input)),
(s"What is the SLEEK test directory? default: [${AppConfiguration.global.sleekDirectory }]",
(input: String, appConfig:AppConfiguration) => appConfig.copy(sleekDirectory = input)),
(s"What is the Daemon host addresss? default: [${AppConfiguration.global.daemonHost }]",
(input: String, appConfig:AppConfiguration) => appConfig.copy(daemonHost = input)),
(s"What is the Daemon port number? default: [${AppConfiguration.global.daemonPort }]",
(input: String, appConfig:AppConfiguration) => appConfig.copy(daemonPort = input))
)
val config = AppConfiguration.toConfig(runQuestions(questions))
import java.io._
val renderOpts = ConfigRenderOptions.defaults().setOriginComments(false).setComments(false).setJson(false);
val pw = new PrintWriter(Paths.get(HipciConf).toFile())
pw.write(config.root().render(renderOpts))
pw.close()
}
override def receive = {
case CreateTestConfiguration(config) => sender ! fromConfig(config)
case LoadAppConfiguration(config) => sender ! loadAppConfig(config)
case CreateAppConfigurationInteractively => sender ! createAppConfigurationInteractively()
case other => super.receive(other)
}
}
| rgoulter/hipci | src/main/edu/nus/hipci/cli/ConfigurationFactory.scala | Scala | mit | 7,734 |
package spark.streaming.util
import spark.{Logging, RDD}
import spark.streaming._
import spark.streaming.dstream.ForEachDStream
import StreamingContext._
import scala.util.Random
import scala.collection.mutable.{SynchronizedBuffer, ArrayBuffer}
import java.io.{File, ObjectInputStream, IOException}
import java.util.UUID
import com.google.common.io.Files
import org.apache.commons.io.FileUtils
import org.apache.hadoop.fs.{FileUtil, FileSystem, Path}
import org.apache.hadoop.conf.Configuration
private[streaming]
object MasterFailureTest extends Logging {
initLogging()
@volatile var killed = false
@volatile var killCount = 0
def main(args: Array[String]) {
if (args.size < 2) {
println(
"Usage: MasterFailureTest <local/HDFS directory> <# batches> [<batch size in milliseconds>]")
System.exit(1)
}
val directory = args(0)
val numBatches = args(1).toInt
val batchDuration = if (args.size > 2) Milliseconds(args(2).toInt) else Seconds(1)
println("\n\n========================= MAP TEST =========================\n\n")
testMap(directory, numBatches, batchDuration)
println("\n\n================= UPDATE-STATE-BY-KEY TEST =================\n\n")
testUpdateStateByKey(directory, numBatches, batchDuration)
println("\n\nSUCCESS\n\n")
}
def testMap(directory: String, numBatches: Int, batchDuration: Duration) {
// Input: time=1 ==> [ 1 ] , time=2 ==> [ 2 ] , time=3 ==> [ 3 ] , ...
val input = (1 to numBatches).map(_.toString).toSeq
// Expected output: time=1 ==> [ 1 ] , time=2 ==> [ 2 ] , time=3 ==> [ 3 ] , ...
val expectedOutput = (1 to numBatches)
val operation = (st: DStream[String]) => st.map(_.toInt)
// Run streaming operation with multiple master failures
val output = testOperation(directory, batchDuration, input, operation, expectedOutput)
logInfo("Expected output, size = " + expectedOutput.size)
logInfo(expectedOutput.mkString("[", ",", "]"))
logInfo("Output, size = " + output.size)
logInfo(output.mkString("[", ",", "]"))
// Verify whether all the values of the expected output is present
// in the output
assert(output.distinct.toSet == expectedOutput.toSet)
}
def testUpdateStateByKey(directory: String, numBatches: Int, batchDuration: Duration) {
// Input: time=1 ==> [ a ] , time=2 ==> [ a, a ] , time=3 ==> [ a, a, a ] , ...
val input = (1 to numBatches).map(i => (1 to i).map(_ => "a").mkString(" ")).toSeq
// Expected output: time=1 ==> [ (a, 1) ] , time=2 ==> [ (a, 3) ] , time=3 ==> [ (a,6) ] , ...
val expectedOutput = (1L to numBatches).map(i => (1L to i).reduce(_ + _)).map(j => ("a", j))
val operation = (st: DStream[String]) => {
val updateFunc = (values: Seq[Long], state: Option[Long]) => {
Some(values.foldLeft(0L)(_ + _) + state.getOrElse(0L))
}
st.flatMap(_.split(" "))
.map(x => (x, 1L))
.updateStateByKey[Long](updateFunc)
.checkpoint(batchDuration * 5)
}
// Run streaming operation with multiple master failures
val output = testOperation(directory, batchDuration, input, operation, expectedOutput)
logInfo("Expected output, size = " + expectedOutput.size + "\n" + expectedOutput)
logInfo("Output, size = " + output.size + "\n" + output)
// Verify whether all the values in the output are among the expected output values
output.foreach(o =>
assert(expectedOutput.contains(o), "Expected value " + o + " not found")
)
// Verify whether the last expected output value has been generated, there by
// confirming that none of the inputs have been missed
assert(output.last == expectedOutput.last)
}
/**
* Tests stream operation with multiple master failures, and verifies whether the
* final set of output values is as expected or not.
*/
def testOperation[T: ClassManifest](
directory: String,
batchDuration: Duration,
input: Seq[String],
operation: DStream[String] => DStream[T],
expectedOutput: Seq[T]
): Seq[T] = {
// Just making sure that the expected output does not have duplicates
assert(expectedOutput.distinct.toSet == expectedOutput.toSet)
// Setup the stream computation with the given operation
val (ssc, checkpointDir, testDir) = setupStreams(directory, batchDuration, operation)
// Start generating files in the a different thread
val fileGeneratingThread = new FileGeneratingThread(input, testDir, batchDuration.milliseconds)
fileGeneratingThread.start()
// Run the streams and repeatedly kill it until the last expected output
// has been generated, or until it has run for twice the expected time
val lastExpectedOutput = expectedOutput.last
val maxTimeToRun = expectedOutput.size * batchDuration.milliseconds * 2
val mergedOutput = runStreams(ssc, lastExpectedOutput, maxTimeToRun)
// Delete directories
fileGeneratingThread.join()
val fs = checkpointDir.getFileSystem(new Configuration())
fs.delete(checkpointDir, true)
fs.delete(testDir, true)
logInfo("Finished test after " + killCount + " failures")
mergedOutput
}
/**
* Sets up the stream computation with the given operation, directory (local or HDFS),
* and batch duration. Returns the streaming context and the directory to which
* files should be written for testing.
*/
private def setupStreams[T: ClassManifest](
directory: String,
batchDuration: Duration,
operation: DStream[String] => DStream[T]
): (StreamingContext, Path, Path) = {
// Reset all state
reset()
// Create the directories for this test
val uuid = UUID.randomUUID().toString
val rootDir = new Path(directory, uuid)
val fs = rootDir.getFileSystem(new Configuration())
val checkpointDir = new Path(rootDir, "checkpoint")
val testDir = new Path(rootDir, "test")
fs.mkdirs(checkpointDir)
fs.mkdirs(testDir)
// Setup the streaming computation with the given operation
System.clearProperty("spark.driver.port")
System.clearProperty("spark.hostPort")
var ssc = new StreamingContext("local[4]", "MasterFailureTest", batchDuration, null, Nil, Map())
ssc.checkpoint(checkpointDir.toString)
val inputStream = ssc.textFileStream(testDir.toString)
val operatedStream = operation(inputStream)
val outputStream = new TestOutputStream(operatedStream)
ssc.registerOutputStream(outputStream)
(ssc, checkpointDir, testDir)
}
/**
* Repeatedly starts and kills the streaming context until timed out or
* the last expected output is generated. Finally, return
*/
private def runStreams[T: ClassManifest](
ssc_ : StreamingContext,
lastExpectedOutput: T,
maxTimeToRun: Long
): Seq[T] = {
var ssc = ssc_
var totalTimeRan = 0L
var isLastOutputGenerated = false
var isTimedOut = false
val mergedOutput = new ArrayBuffer[T]()
val checkpointDir = ssc.checkpointDir
var batchDuration = ssc.graph.batchDuration
while(!isLastOutputGenerated && !isTimedOut) {
// Get the output buffer
val outputBuffer = ssc.graph.getOutputStreams.head.asInstanceOf[TestOutputStream[T]].output
def output = outputBuffer.flatMap(x => x)
// Start the thread to kill the streaming after some time
killed = false
val killingThread = new KillingThread(ssc, batchDuration.milliseconds * 10)
killingThread.start()
var timeRan = 0L
try {
// Start the streaming computation and let it run while ...
// (i) StreamingContext has not been shut down yet
// (ii) The last expected output has not been generated yet
// (iii) Its not timed out yet
System.clearProperty("spark.streaming.clock")
System.clearProperty("spark.driver.port")
System.clearProperty("spark.hostPort")
ssc.start()
val startTime = System.currentTimeMillis()
while (!killed && !isLastOutputGenerated && !isTimedOut) {
Thread.sleep(100)
timeRan = System.currentTimeMillis() - startTime
isLastOutputGenerated = (!output.isEmpty && output.last == lastExpectedOutput)
isTimedOut = (timeRan + totalTimeRan > maxTimeToRun)
}
} catch {
case e: Exception => logError("Error running streaming context", e)
}
if (killingThread.isAlive) killingThread.interrupt()
ssc.stop()
logInfo("Has been killed = " + killed)
logInfo("Is last output generated = " + isLastOutputGenerated)
logInfo("Is timed out = " + isTimedOut)
// Verify whether the output of each batch has only one element or no element
// and then merge the new output with all the earlier output
mergedOutput ++= output
totalTimeRan += timeRan
logInfo("New output = " + output)
logInfo("Merged output = " + mergedOutput)
logInfo("Time ran = " + timeRan)
logInfo("Total time ran = " + totalTimeRan)
if (!isLastOutputGenerated && !isTimedOut) {
val sleepTime = Random.nextInt(batchDuration.milliseconds.toInt * 10)
logInfo(
"\n-------------------------------------------\n" +
" Restarting stream computation in " + sleepTime + " ms " +
"\n-------------------------------------------\n"
)
Thread.sleep(sleepTime)
// Recreate the streaming context from checkpoint
ssc = new StreamingContext(checkpointDir)
}
}
mergedOutput
}
/**
* Verifies the output value are the same as expected. Since failures can lead to
* a batch being processed twice, a batches output may appear more than once
* consecutively. To avoid getting confused with those, we eliminate consecutive
* duplicate batch outputs of values from the `output`. As a result, the
* expected output should not have consecutive batches with the same values as output.
*/
private def verifyOutput[T: ClassManifest](output: Seq[T], expectedOutput: Seq[T]) {
// Verify whether expected outputs do not consecutive batches with same output
for (i <- 0 until expectedOutput.size - 1) {
assert(expectedOutput(i) != expectedOutput(i+1),
"Expected output has consecutive duplicate sequence of values")
}
// Log the output
println("Expected output, size = " + expectedOutput.size)
println(expectedOutput.mkString("[", ",", "]"))
println("Output, size = " + output.size)
println(output.mkString("[", ",", "]"))
// Match the output with the expected output
output.foreach(o =>
assert(expectedOutput.contains(o), "Expected value " + o + " not found")
)
}
/** Resets counter to prepare for the test */
private def reset() {
killed = false
killCount = 0
}
}
/**
* This is a output stream just for testing. All the output is collected into a
* ArrayBuffer. This buffer is wiped clean on being restored from checkpoint.
*/
private[streaming]
class TestOutputStream[T: ClassManifest](
parent: DStream[T],
val output: ArrayBuffer[Seq[T]] = new ArrayBuffer[Seq[T]] with SynchronizedBuffer[Seq[T]]
) extends ForEachDStream[T](
parent,
(rdd: RDD[T], t: Time) => {
val collected = rdd.collect()
output += collected
}
) {
// This is to clear the output buffer every it is read from a checkpoint
@throws(classOf[IOException])
private def readObject(ois: ObjectInputStream) {
ois.defaultReadObject()
output.clear()
}
}
/**
* Thread to kill streaming context after a random period of time.
*/
private[streaming]
class KillingThread(ssc: StreamingContext, maxKillWaitTime: Long) extends Thread with Logging {
initLogging()
override def run() {
try {
// If it is the first killing, then allow the first checkpoint to be created
var minKillWaitTime = if (MasterFailureTest.killCount == 0) 5000 else 2000
val killWaitTime = minKillWaitTime + math.abs(Random.nextLong % maxKillWaitTime)
logInfo("Kill wait time = " + killWaitTime)
Thread.sleep(killWaitTime)
logInfo(
"\n---------------------------------------\n" +
"Killing streaming context after " + killWaitTime + " ms" +
"\n---------------------------------------\n"
)
if (ssc != null) {
ssc.stop()
MasterFailureTest.killed = true
MasterFailureTest.killCount += 1
}
logInfo("Killing thread finished normally")
} catch {
case ie: InterruptedException => logInfo("Killing thread interrupted")
case e: Exception => logWarning("Exception in killing thread", e)
}
}
}
/**
* Thread to generate input files periodically with the desired text.
*/
private[streaming]
class FileGeneratingThread(input: Seq[String], testDir: Path, interval: Long)
extends Thread with Logging {
initLogging()
override def run() {
val localTestDir = Files.createTempDir()
var fs = testDir.getFileSystem(new Configuration())
val maxTries = 3
try {
Thread.sleep(5000) // To make sure that all the streaming context has been set up
for (i <- 0 until input.size) {
// Write the data to a local file and then move it to the target test directory
val localFile = new File(localTestDir, (i+1).toString)
val hadoopFile = new Path(testDir, (i+1).toString)
val tempHadoopFile = new Path(testDir, ".tmp_" + (i+1).toString)
FileUtils.writeStringToFile(localFile, input(i).toString + "\n")
var tries = 0
var done = false
while (!done && tries < maxTries) {
tries += 1
try {
// fs.copyFromLocalFile(new Path(localFile.toString), hadoopFile)
fs.copyFromLocalFile(new Path(localFile.toString), tempHadoopFile)
fs.rename(tempHadoopFile, hadoopFile)
done = true
} catch {
case ioe: IOException => {
fs = testDir.getFileSystem(new Configuration())
logWarning("Attempt " + tries + " at generating file " + hadoopFile + " failed.", ioe)
}
}
}
if (!done)
logError("Could not generate file " + hadoopFile)
else
logInfo("Generated file " + hadoopFile + " at " + System.currentTimeMillis)
Thread.sleep(interval)
localFile.delete()
}
logInfo("File generating thread finished normally")
} catch {
case ie: InterruptedException => logInfo("File generating thread interrupted")
case e: Exception => logWarning("File generating in killing thread", e)
} finally {
fs.close()
}
}
}
| baeeq/incubator-spark | streaming/src/main/scala/spark/streaming/util/MasterFailureTest.scala | Scala | bsd-3-clause | 14,626 |
package org.cristal.repository.dao
import org.cristal.model.User
import reactivemongo.api.collections.bson.BSONCollection
import reactivemongo.api.commands.WriteResult
import reactivemongo.bson.BSONDocument
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}
trait UserDAO {
def insert(user: User): Future[Unit]
}
class UserDAOImpl(implicit executor: ExecutionContext) extends UserDAO with UserCollection {
override def insert(user: User) = handleInsertResult(
usersCollection.flatMap(_.insert(userToDocument(user))))
private def handleInsertResult(result: Future[WriteResult]) = {
result.onComplete {
case failure @ Failure(e) => e.printStackTrace(); failure
case Success(_) => println(s"User successfully inserted.")
}
result.map(_ => ())
}
def usersCollection : Future[BSONCollection] = db.map(_.collection("users"))
def userToDocument(user: User) = BSONDocument(
"username" -> user.username,
"password" -> user.password,
"first_name" -> user.firstName,
"last_name" -> user.lastName,
"email" -> user.email
)
}
trait UserCollection extends DBConnection {
}
| frecano/cristal | src/main/scala/org/cristal/repository/dao/UserDAO.scala | Scala | gpl-3.0 | 1,174 |
/*
* Copyright 2016 Dennis Vriend
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dnvriend.component.helloworld.controller
import com.github.dnvriend.component.helloworld.repository.HelloWorldRepository
import com.google.inject.Inject
import play.api.mvc._
import scalaz._
import Scalaz._
// Why does this work; the result type of getByIdD is a Disjunction[String, HelloWorld]..
//
// This is because of the implicit conversion from HelloWorld to a Result type
//
// Action assumes either a 'play.api.mvc.Result' or a function, lets call that function
// 'f' that converts Request => Result. Here we transform the type Disjunction[String, HelloWorld]
// to a 'play.api.mvc.Result'.
//
// You should look at the HelloWorld entity to read more.
//
class HelloWorldController @Inject() (repo: HelloWorldRepository) extends Controller {
def getHelloWorld = Action { request =>
val header: String = request.headers.get("X-ExampleFilter").getOrElse("No header")
val msg = repo.getHelloWorld
msg.copy(msg = s"${msg.msg} - $header")
}
def getHelloWorldOpt(id: Long) = Action(repo.getById(id))
def getHelloWorldMB(id: Long) = Action(repo.getById(id).toMaybe)
def getHelloWorldD(id: Long) = Action(repo.getByIdD(id))
def getHelloWorldV(id: Long) = Action(repo.getByIdD(id).validation)
def getHelloWorldVN(id: Long) = Action(repo.getByIdD(id).validationNel)
}
| dnvriend/akka-http-test | app/com/github/dnvriend/component/helloworld/controller/HelloWorldController.scala | Scala | apache-2.0 | 1,913 |
package lib
import scala.collection.immutable.StringOps
object Text {
/**
* We require names to be alpha numeric and to start with a letter
*/
def isValidName(name: String): Boolean = {
validateName(name).isEmpty
}
def validateName(name: String): Seq[String] = {
val alphaNumericError = if (isAlphaNumeric(name)) {
Seq.empty
} else {
Seq("Name can only contain a-z, A-Z, 0-9, - and _ characters")
}
val startsWithLetterError = if (startsWithLetter(name) || (startsWithUnderscore(name) && name.length > 1)) {
Seq.empty
} else if (name.isEmpty) {
Seq("Name cannot be blank")
} else {
Seq("Name must start with a letter")
}
alphaNumericError ++ startsWithLetterError
}
private[this] val AlphaNumericRx = "^[a-zA-Z0-9-_.\\\\.]*$".r
def isAlphaNumeric(value: String): Boolean = {
value match {
case AlphaNumericRx() => true
case _ => false
}
}
private[this] val StartsWithLetterRx = "^[a-zA-Z].*".r
def startsWithLetter(value: String): Boolean = {
val result = value match {
case StartsWithLetterRx() => true
case _ => false
}
result
}
private[this] val StartsWithUnderscoreRx = "^_.*".r
def startsWithUnderscore(value: String): Boolean = {
value match {
case StartsWithUnderscoreRx() => true
case _ => false
}
}
private[this] val Ellipsis = "..."
/**
* if value is longer than maxLength characters, it wil be truncated
* to <= (maxLength-Ellipsis.length) characters and an ellipsis
* added. We try to truncate on a space to avoid breaking a word in
* pieces.
*
* @param value The string value to truncate
* @param maxLength The max length of the returned string, including the final ellipsis if added. Must be >= 10
* @param ellipsis If the string is truncated, this value will be appended to the string.
*/
def truncate(
value: String,
maxLength: Int = 80,
ellipsis: Option[String] = Some(Ellipsis)
): String = {
val suffix = ellipsis.getOrElse("")
require(maxLength >= suffix.length, "maxLength must be greater than the length of the suffix[${suffix.length}]")
if (value.length <= maxLength) {
value
} else {
val pieces = value.split(" ")
var i = pieces.length
while (i > 0) {
val sentence = pieces.slice(0, i).mkString(" ").trim
val target = sentence + suffix
if (target.length <= maxLength) {
return target
}
i -= 1
}
value.split("").slice(0, maxLength - suffix.length).mkString("") + suffix
}
}
private[this] val Plurals = Map(
"metadatum" -> "metadata",
"datum" -> "data",
"person" -> "people",
"species" -> "species",
"epoch" -> "epochs",
"memo" -> "memos"
)
private[lib] val KnownPlurals = (Plurals.values ++ Seq(
"bison",
"buffalo",
"deer",
"duck",
"fish",
"moose",
"pike",
"plankton",
"salmon",
"sheep",
"squid",
"swine",
"trout"
)).toSet
/**
* Handle only base cases for pluralization. User can specify own plural
* form via api.json if needed.
*/
def pluralize(value: String): String = {
if (KnownPlurals.contains(value.toLowerCase)) {
value
} else if (Plurals.contains(value)) {
Plurals(value)
} else if (value.endsWith("es") || value.endsWith("ts") || value.endsWith("data")) {
value
} else {
org.atteo.evo.inflector.English.plural(value)
}
}
private[this] val RemoveUnsafeCharacters = """([^0-9a-zA-Z\\-\\_])""".r
def safeName(name: String): String = {
RemoveUnsafeCharacters.replaceAllIn(name, _ => "").replaceAll("\\\\.", "_").replaceAll("\\\\_+", "_").trim
}
def underscoreToInitCap(value: String): String = {
initCap(splitIntoWords(value))
}
def underscoreAndDashToInitCap(value: String): String = {
initCap(splitIntoWords(value).flatMap(_.split("-")))
}
private[this] val WordDelimiterRx = "_|\\\\-|\\\\.|:|/| ".r
def splitIntoWords(value: String): Seq[String] = {
WordDelimiterRx.split(lib.Text.camelCaseToUnderscore(value)).map(_.trim).filter(!_.isEmpty)
}
def snakeToCamelCase(value: String): String = {
splitIntoWords(value).toList match {
case Nil => ""
case part :: rest => part + initCap(rest)
}
}
def initCap(word: String): String = {
word.capitalize
}
def initCap(parts: Seq[String]): String = {
parts.map(s => initCap(s)).mkString("")
}
/**
* Returns the word with first character in lower case
*/
private[this] val InitLowerCaseRx = """^([A-Z])""".r
def initLowerCase(word: String): String = {
InitLowerCaseRx.replaceAllIn(word, m => s"${m.toString.toLowerCase}")
}
private[this] val Capitals = """([A-Z])""".r
def camelCaseToUnderscore(phrase: String): String = {
if (phrase == phrase.toUpperCase) {
phrase.toLowerCase
} else {
val word = Capitals.replaceAllIn(phrase, m => s"_${m}").trim
if (word.startsWith("_")) {
word.slice(1, word.length)
} else {
word
}
}
}
implicit class Indentable(s: String) {
def indent: String = indent(2)
def indent(width: Int): String = {
s.split("\\n").map { value =>
if (value.trim == "") {
""
} else {
(" " * width) + value
}
}.mkString("\\n")
}
}
}
| gheine/apidoc | lib/src/main/scala/Text.scala | Scala | mit | 5,696 |
package eu.inn.binders
import java.io.{OutputStream, InputStream}
import com.typesafe.config.{ConfigValue, Config}
import eu.inn.binders.tconfig.internal.ConfigMacro
import eu.inn.binders.naming.{PlainConverter, Converter}
import scala.language.experimental.macros
import scala.reflect.runtime.universe._
package object tconfig {
implicit class ConfigReader(val config: Config) extends AnyVal{
def read[O](path: String): O = macro ConfigMacro.read[O]
}
implicit class ConfigValueReader(val configValue: ConfigValue) extends AnyVal{
def read[O]: O = macro ConfigMacro.readValue[O]
}
}
| InnovaCo/binders-typesafe-config | src/main/scala/eu/inn/binders/tconfig/tconfig.scala | Scala | bsd-3-clause | 603 |
package com.bayesianwitch.injera.functions
import scalaz._
import Scalaz._
trait WritableStore[-K,-V] {
protected def putImpl(k: K, v: V): Unit
def put(k: K, v: V): Unit = putImpl(k,v)
protected def invalidateImpl(k: K): Unit
def invalidate(k: K): Unit = invalidate(k)
}
trait WritesEntirePreimage[K,V] extends WritableStore[K,V] {
protected implicit val cpi: ComputesPreimage[K,V]
override def put(k: K, v: V) = cpi.preimage(v).foreach(kp => putImpl(kp, v))
def putObj(v: V) = cpi.preimage(v).foreach(kp => putImpl(kp, v))
def invalidateObj(v: V): Unit = cpi.preimage(v).foreach(kp => invalidateImpl(kp))
}
trait ReadableStore[-K,+V] {
def get(k: K): V
}
trait CacheFunctionWithPreimage[K,V] extends ReadableStore[K,V] with WritesEntirePreimage[K,V] {
protected def getFromCache(k: K): Option[V]
protected implicit val cpi = fwpi
protected implicit val fwpi: FunctionWithPreimage[K,V]
def get(k: K): V = getFromCache(k).getOrElse({
val result = fwpi(k)
putObj(result)
result
})
override def invalidate(k: K): Unit = {
getFromCache(k).foreach(invalidateObj)
invalidateImpl(k)
}
}
trait FunctorReadableStore[-K,V, M[_]] {
protected implicit val mFunctor: Applicative[M]
def get(k: K): M[V]
}
trait FunctorCacheFunctionWithPreimage[K,V,M[_]] extends FunctorReadableStore[K,V,M] with WritesEntirePreimage[K,V] {
protected implicit val mFunctor: ApplicativePlus[M]
protected implicit val cpi = fwpi
protected implicit val fwpi: FunctionMWithPreimage[K,V,M]
protected def getFromCache(k: K): M[V]
def get(k: K): M[V] = mFunctor.plus(getFromCache(k), {
val result = fwpi(k)
result.map( v => putObj(v))
result
})
}
| bayesianwitch/injera | src/main/scala/injera/functions/Caching.scala | Scala | gpl-3.0 | 1,700 |
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.lang.scala.observables
import rx.lang.scala.{Observable, Subscription}
import rx.lang.scala.JavaConversions._
class ConnectableObservable[+T] private[scala](val asJavaObservable: rx.observables.ConnectableObservable[_ <: T])
extends Observable[T] {
/**
* Call a ConnectableObservable's connect method to instruct it to begin emitting the
* items from its underlying [[rx.lang.scala.Observable]] to its [[rx.lang.scala.Observer]]s.
*/
def connect: Subscription = toScalaSubscription(asJavaObservable.connect())
/**
* Returns an observable sequence that stays connected to the source as long
* as there is at least one subscription to the observable sequence.
*
* @return a [[rx.lang.scala.Observable]]
*/
def refCount: Observable[T] = toScalaObservable[T](asJavaObservable.refCount())
}
| zjrstar/RxScala | src/main/scala/rx/lang/scala/observables/ConnectableObservable.scala | Scala | apache-2.0 | 1,430 |
package io.netflow.lib
import java.net.InetAddress
import com.twitter.util.Future
import io.netflow.flows.cflow._
trait NetFlowTemplateMeta[T <: Template] {
def findAll(inet: InetAddress): Future[Seq[NetFlowV9Template]]
}
| ayscb/netflow | netflow1/netflow-master/src/main/scala/io/netflow/lib/NetFlowTemplateMeta.scala | Scala | apache-2.0 | 227 |
package us.feliscat.text.similarity
/**
* @author K.Sakamoto
* Created on 2016/05/23
*/
object Divider {
def divide(numerator: Double, denominator: Double): Double = {
if (denominator == 0D) {
return 0D
}
numerator / denominator
}
}
| ktr-skmt/FelisCatusZero-multilingual | libraries/src/main/scala/us/feliscat/text/similarity/Divider.scala | Scala | apache-2.0 | 270 |
/* Copyright (c) 2016 Lucas Satabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package lingua.fst.semiring
/** Semiring representation over elements of `K`.
*
* @author Lucas Satabin
*/
trait Semiring[K] {
def zero: K
def one: K
def plus(k1: K, k2: K): K
def times(k1: K, k2: K): K
def inverse(k: K): K
}
| satabin/lingua | fst/src/main/scala/lingua/fst/semiring/Semiring.scala | Scala | apache-2.0 | 843 |
package com.sk.app.proxmock.application.domain.conditions
import com.sk.app.proxmock.application.configuration.ConfigurationContext
import org.springframework.messaging.Message
import scala.util.Random
/**
* Created by Szymon on 22.05.2016.
*/
case class RandomCondition() extends Condition {
val random = new Random()
override def test(message: Message[Object], context: ConfigurationContext): Boolean = random.nextBoolean()
}
| szymonkudzia/proxmock | sources/src/main/scala/com/sk/app/proxmock/application/domain/conditions/RandomCondition.scala | Scala | mit | 438 |
package com.tribbloids.spookystuff.actions
import java.sql.Timestamp
import com.tribbloids.spookystuff.SpookyEnvFixture
import com.tribbloids.spookystuff.doc.Doc
import com.tribbloids.spookystuff.rdd.FetchedDataset
import com.tribbloids.spookystuff.testutils.LocalOnly
import org.scalatest.Tag
import org.scalatest.tags.Retryable
import scala.concurrent.duration
object TestWget {
case class Sample(A: String, B: Timestamp)
}
@Retryable
class TestWget extends SpookyEnvFixture {
import com.tribbloids.spookystuff.dsl._
def wget(uri: String): Action = Wget(uri)
lazy val noProxyIP: String = {
spooky.spookyConf.webProxy = WebProxyFactories.NoProxy
getIP()
}
Seq(
"http" -> HTTP_IP_URL,
"https" -> HTTPS_IP_URL
).foreach { tuple =>
it(s"use TOR socks5 proxy for ${tuple._1} wget", Tag(classOf[LocalOnly].getCanonicalName)) {
val newIP = {
spooky.spookyConf.webProxy = WebProxyFactories.Tor
getIP(tuple._2)
}
assert(newIP !== null)
assert(newIP !== "")
assert(newIP !== noProxyIP)
}
it(s"revert from TOR socks5 proxy for ${tuple._1} wget", Tag(classOf[LocalOnly].getCanonicalName)) {
val newIP = {
spooky.spookyConf.webProxy = WebProxyFactories.Tor
getIP(tuple._2)
}
val noProxyIP2 = {
spooky.spookyConf.webProxy = WebProxyFactories.NoProxy
getIP(tuple._2)
}
assert(newIP !== noProxyIP2)
}
}
def getIP(url: String = HTTP_IP_URL): String = {
val results = (
wget(HTTPS_IP_URL) :: Nil
).fetch(spooky)
results.head.asInstanceOf[Doc].code.get
}
//TODO: add canonized URI check
it("wget should encode malformed url") {
spooky.spookyConf.webProxy = WebProxyFactories.NoProxy
val results = (
wget("https://www.google.com/?q=giant robot") :: Nil
).fetch(spooky)
assert(results.size === 1)
val doc = results.head.asInstanceOf[Doc]
assert(doc.uri.contains("?q=giant+robot") || doc.uri.contains("?q=giant%20robot"))
}
//TODO: find a new way to test it!
// test("wget should encode redirection to malformed url") {
//
// spooky.conf.proxy = ProxyFactories.NoProxy
//
// val url = "http://www.sigmaaldrich.com/catalog/search/SearchResultsPage?Query=%3Ca+href%3D%22%2Fcatalog%2Fsearch%3Fterm%3D81-25-4%26interface%3DCAS+No.%26N%3D0%26mode%3Dpartialmax%26lang%3Den%26region%3DUS%26focus%3Dproduct%22%3E81-25-4%3C%2Fa%3E&Scope=CASSearch&btnSearch.x=1"
//
// val results = (
// Wget(url) :: Nil
// ).fetch(spooky)
//
// assert(results.size === 1)
// val page = results.head.asInstanceOf[Page]
// assert(page.uri.contains("www.sigmaaldrich.com/catalog/AdvancedSearchPage"))
// }
//TODO: find a new way to test it!
// test("wget should correct redirection to relative url path") {
// spooky.conf.proxy = ProxyFactories.NoProxy
//
// val results = (
// wget("http://www.sigmaaldrich.com/etc/controller/controller-page.html?TablePage=17193175") :: Nil
// ).fetch(spooky)
//
// assert(results.size === 1)
// val page = results.head.asInstanceOf[Page]
// assert(page.findAll("title").head.text.get.contains("Sigma-Aldrich"))
// assert(page.uri.contains("www.sigmaaldrich.com/labware"))
// }
//TODO: how to simulate circular redirection?
// test("wget should smoothly fail on circular redirection") {
// spooky.conf.proxy = ProxyFactories.NoProxy
//
// val results = (
// wget("http://www.perkinelmer.ca/en-ca/products/consumables-accessories/integrated-solutions/for-thermo-scientific-gcs/default.xhtml") :: Nil
// ).fetch(spooky)
//
// assert(results.size === 1)
// assert(results.head.isInstanceOf[NoPage])
// }
it("output of wget should not include session's backtrace") {
spooky.spookyConf.webProxy = WebProxyFactories.NoProxy
import duration._
val results = (
RandomDelay(1.seconds, 2.seconds)
:: wget("http://www.wikipedia.org")
:: Nil
).fetch(spooky)
assert(results.size === 1)
assert(results.head.uid.backtrace.children.last == wget("http://www.wikipedia.org"))
}
//TODO: how to simulate a PKIX exception page?
// test("wget should handle PKIX exception") {
// spooky.conf.proxy = ProxyFactories.NoProxy
//
// val results = List(
// wget("https://www.canadacompany.ca/en/")
// ).fetch(spooky)
// }
it("wget.interpolate should not overwrite each other") {
val wget = Wget(
'A
) waybackTo 'B.typed[Timestamp]
val rows = 1 to 5 map { i =>
TestWget.Sample("http://dummy.com" + i, new Timestamp(i * 100000))
}
require(rows.map(_.B.getTime).distinct.size == rows.size)
val df = sql.createDataFrame(sc.parallelize(rows))
val set: FetchedDataset = spooky.create(df)
require(set.toObjectRDD('B).collect().toSeq.map(_.asInstanceOf[Timestamp].getTime).distinct.size == rows.size)
val fetchedRows = set.unsquashedRDD.collect()
val interpolated = fetchedRows.map { fr =>
wget.interpolate(fr, set.schema).get
}
assert(interpolated.distinct.length == rows.size)
assert(interpolated.map(_.wayback).distinct.length == rows.size)
}
// val classes = Seq(
// classOf[Wget],
// classOf[Visit],
// classOf[Snapshot]
// )
//
// classes.foreach {
// clazz =>
// val name = clazz.getCanonicalName
//
// test(s"$name.serialVersionUID should be generated properly") {
// val expected = SpookyUtils.hash(clazz)
// val actual = java.io.ObjectStreamClass.lookup(clazz).getSerialVersionUID
// assert(expected == actual)
// }
// }
}
| tribbloid/spookystuff | core/src/test/scala/com/tribbloids/spookystuff/actions/TestWget.scala | Scala | apache-2.0 | 5,707 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.bforms.typeclasses
trait FusUrl
trait FusFeUrl
trait ServiceUrl[T] {
def url: String
}
| VlachJosef/bforms | app/uk/gov/hmrc/bforms/typeclasses/ServiceUrl.scala | Scala | apache-2.0 | 716 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.spark.rdd;
import JDKCollectionConvertersCompat.Converters._
import scala.reflect.ClassTag
import org.apache.commons.logging.LogFactory
import org.apache.spark.Partition
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.elasticsearch.hadoop.mr.security.HadoopUserProvider
import org.elasticsearch.hadoop.rest.InitializationUtils
import org.elasticsearch.hadoop.rest.RestService
import org.elasticsearch.hadoop.rest.PartitionDefinition
import org.elasticsearch.hadoop.util.ObjectUtils
import org.elasticsearch.spark.cfg.SparkSettingsManager
import org.elasticsearch.hadoop.rest.RestRepository
import scala.annotation.meta.param
private[spark] abstract class AbstractEsRDD[T: ClassTag](
@(transient @param) sc: SparkContext,
val params: scala.collection.Map[String, String] = Map.empty)
extends RDD[T](sc, Nil) {
private val init = { ObjectUtils.loadClass("org.elasticsearch.spark.rdd.CompatUtils", classOf[ObjectUtils].getClassLoader) }
@transient protected lazy val logger = LogFactory.getLog(this.getClass())
override def getPartitions: Array[Partition] = {
esPartitions.asScala.zipWithIndex.map { case(esPartition, idx) =>
new EsPartition(id, idx, esPartition)
}.toArray
}
override def getPreferredLocations(split: Partition): Seq[String] = {
val esSplit = split.asInstanceOf[EsPartition]
esSplit.esPartition.getHostNames
}
override def checkpoint(): Unit = {
// Do nothing. Elasticsearch RDD should not be checkpointed.
}
def esCount(): Long = {
val repo = new RestRepository(esCfg)
try {
repo.count(true)
} finally {
repo.close()
}
}
@transient private[spark] lazy val esCfg = {
val cfg = new SparkSettingsManager().load(sc.getConf).copy();
cfg.merge(params.asJava)
InitializationUtils.setUserProviderIfNotSet(cfg, classOf[HadoopUserProvider], logger)
cfg
}
@transient private[spark] lazy val esPartitions = {
RestService.findPartitions(esCfg, logger)
}
}
private[spark] class EsPartition(rddId: Int, idx: Int, val esPartition: PartitionDefinition)
extends Partition {
override def hashCode(): Int = 41 * (41 * (41 + rddId) + idx) + esPartition.hashCode()
override val index: Int = idx
}
| elastic/elasticsearch-hadoop | spark/core/src/main/scala/org/elasticsearch/spark/rdd/AbstractEsRDD.scala | Scala | apache-2.0 | 3,065 |
/*
* Copyright 2009-2016 DigitalGlobe, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and limitations under the License.
*
*/
package org.mrgeo.mapalgebra
import org.apache.spark.{SparkContext, SparkConf}
import org.mrgeo.job.JobArguments
import org.mrgeo.mapalgebra.parser.ParserNode
import org.mrgeo.mapalgebra.raster.RasterMapOp
object SlopeMapOp extends MapOpRegistrar {
override def register: Array[String] = {
Array[String]("slope")
}
def create(raster:RasterMapOp, units:String="rad"):MapOp = {
new SlopeAspectMapOp(Some(raster), units, true)
}
override def apply(node:ParserNode, variables: String => Option[ParserNode]): MapOp =
new SlopeAspectMapOp(node, true, variables)
}
// Dummy class definition to allow the python reflection to find the Slope mapop
abstract class SlopeMapOp extends RasterMapOp {
}
| ttislerdg/mrgeo | mrgeo-mapalgebra/mrgeo-mapalgebra-terrain/src/main/java/org/mrgeo/mapalgebra/SlopeMapOp.scala | Scala | apache-2.0 | 1,320 |
package controllers
import javax.inject._
import play.api._
import play.api.mvc._
/**
* This controller creates an `Action` to handle HTTP requests to the
* application's home page.
*/
@Singleton
class HomeController @Inject() extends Controller {
/**
* Create an Action to render an HTML page with a welcome message.
* The configuration in the `routes` file means that this method
* will be called when the application receives a `GET` request with
* a path of `/`.
*/
def index = Action {
// Ok(views.html.xx.raraindex("Your new application is ready."))
Ok(("Your new application is ready."))
}
}
| razie/diesel-rx | wiki/app/controllers/HomeController.scala | Scala | apache-2.0 | 634 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.web
import java.security.cert.X509Certificate
import javax.servlet.http.{HttpServletRequest, HttpServletResponse}
import org.scalatra.ScalatraBase
import org.scalatra.auth.{ScentryConfig, ScentryStrategy, ScentrySupport}
package object scalatra {
case class User(dn:String)
class PkiStrategy(protected val app: ScalatraBase)
(implicit request: HttpServletRequest, response: HttpServletResponse)
extends ScentryStrategy[User] {
override def name: String = "Pki"
override def isValid(implicit request: HttpServletRequest): Boolean = true
def authenticate()(implicit request: HttpServletRequest, response: HttpServletResponse): Option[User] = {
val certs = request.getAttribute("javax.servlet.request.X509Certificate").asInstanceOf[Array[X509Certificate]]
if (certs != null && certs.length > 0) {
Some(User(certs.head.getSubjectX500Principal.getName))
} else {
None
}
}
}
trait PkiAuthenticationSupport
extends ScalatraBase with
ScentrySupport[User] {
protected def fromSession = { case dn: String => User(dn) }
protected def toSession = { case usr: User => usr.dn }
protected val scentryConfig = new ScentryConfig {}.asInstanceOf[ScentryConfiguration]
override protected def registerAuthStrategies() = {
scentry.register("Pki", app => new PkiStrategy(app))
}
}
}
| elahrvivaz/geomesa | geomesa-web/geomesa-web-core/src/main/scala/org/locationtech/geomesa/web/scalatra/package.scala | Scala | apache-2.0 | 1,901 |
package brainiak.search.strategies
import brainiak.search.{Node, Strategy}
import scala.collection.immutable.List
/**
* Created by thiago on 1/17/14.
*/
object DepthFirst {
def apply: DepthFirst = new DepthFirst
}
class DepthFirst extends Strategy {
//var stack: Stack[Node] = Stack.empty[Node]
var stack: List[Node] = List.empty[Node]
def <<(state: Node): Strategy = {
if (!stack.contains(state)) stack = state :: stack
this
}
def contains(state: Node) = stack.contains(state)
def isEmpty: Boolean = stack.isEmpty
def actual: Node = {
val aux = stack.head
stack = stack.tail
aux
}
}
| pintowar/brainiak | brainiak-core/src/main/scala/brainiak/search/strategies/DepthFirst.scala | Scala | apache-2.0 | 631 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.parquet
import java.nio.file.Files
import java.time.temporal.ChronoUnit
import com.vividsolutions.jts.geom.{Coordinate, Point}
import org.apache.commons.io.FileUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileContext, Path}
import org.geotools.data.Query
import org.geotools.factory.CommonFactoryFinder
import org.geotools.filter.text.ecql.ECQL
import org.geotools.geometry.jts.JTSFactoryFinder
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.fs.storage.common.PartitionScheme
import org.locationtech.geomesa.fs.storage.common.partitions.{CompositeScheme, DateTimeScheme, Z2Scheme}
import org.locationtech.geomesa.index.planning.QueryPlanner
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.io.WithClose
import org.opengis.feature.simple.SimpleFeature
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import org.specs2.specification.AllExpectations
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class ParquetFSTest extends Specification with AllExpectations {
sequential
val gf = JTSFactoryFinder.getGeometryFactory
val sft = SimpleFeatureTypes.createType("test", "name:String,age:Int,dtg:Date,*geom:Point:srid=4326")
val ff = CommonFactoryFinder.getFilterFactory2
val tempDir = Files.createTempDirectory("geomesa")
val fc = FileContext.getFileContext(tempDir.toUri)
val parquetFactory = new ParquetFileSystemStorageFactory
val conf = new Configuration()
conf.set("parquet.compression", "gzip")
val scheme = new CompositeScheme(Seq(
new DateTimeScheme("yyy/DDD/HH", ChronoUnit.HOURS, 1, "dtg", false),
new Z2Scheme(10, "geom", false)
))
PartitionScheme.addToSft(sft, scheme)
val fsStorage = parquetFactory.create(fc, conf, new Path(tempDir.toUri), sft)
val sf1 = new ScalaSimpleFeature(sft, "1", Array("first", Integer.valueOf(100), new java.util.Date, gf.createPoint(new Coordinate(25.236263, 27.436734))))
val sf2 = new ScalaSimpleFeature(sft, "2", Array(null, Integer.valueOf(200), new java.util.Date, gf.createPoint(new Coordinate(67.2363, 55.236))))
val sf3 = new ScalaSimpleFeature(sft, "3", Array("third", Integer.valueOf(300), new java.util.Date, gf.createPoint(new Coordinate(73.0, 73.0))))
"ParquetFileSystemStorage" should {
"write and read features" >> {
val partitions = List(sf1, sf2, sf3).map(fsStorage.getPartition)
List[SimpleFeature](sf1, sf2, sf3)
.zip(partitions)
.groupBy(_._2)
.foreach { case (partition, features) =>
val writer = fsStorage.getWriter(partition)
features.map(_._1).foreach(writer.write)
writer.close()
}
WithClose(fsStorage.getReader(partitions.take(1), new Query("test", ECQL.toFilter("name = 'first'")))) { reader =>
val features = reader.toList
features must haveSize(1)
features.head.getAttribute("name") mustEqual "first"
features.head.getAttribute("dtg") must not(beNull)
features.head.getDefaultGeometry.asInstanceOf[Point].getX mustEqual 25.236263
features.head.getDefaultGeometry.asInstanceOf[Point].getY mustEqual 27.436734
}
WithClose(fsStorage.getReader(partitions.slice(2, 3), new Query("test", ECQL.toFilter("name = 'third'")))) { reader =>
val features = reader.toList
features must haveSize(1)
features.head.getAttribute("name") mustEqual "third"
features.head.getAttribute("dtg") must not(beNull)
features.head.getDefaultGeometry.asInstanceOf[Point].getX mustEqual 73.0
features.head.getDefaultGeometry.asInstanceOf[Point].getY mustEqual 73.0
}
val transform = new Query("test", ECQL.toFilter("name = 'third'"), Array("dtg", "geom"))
QueryPlanner.setQueryTransforms(transform, sft)
WithClose(fsStorage.getReader(partitions.slice(2, 3), transform)) { reader =>
val features = reader.toList
features must haveSize(1)
features.head.getFeatureType.getAttributeDescriptors.map(_.getLocalName) mustEqual Seq("dtg", "geom")
features.head.getAttribute("name") must beNull
features.head.getAttribute("dtg") must not(beNull)
features.head.getDefaultGeometry.asInstanceOf[Point].getX mustEqual 73.0
features.head.getDefaultGeometry.asInstanceOf[Point].getY mustEqual 73.0
}
}
}
step {
FileUtils.deleteDirectory(tempDir.toFile)
}
}
| jahhulbert-ccri/geomesa | geomesa-fs/geomesa-fs-storage/geomesa-fs-storage-parquet/src/test/scala/org/locationtech/geomesa/parquet/ParquetFSTest.scala | Scala | apache-2.0 | 5,046 |
package waldap.core.controller
import javax.servlet.http.HttpServletRequest
import javax.servlet.{FilterChain, ServletRequest, ServletResponse}
import io.github.gitbucket.scalatra.forms.{ClientSideValidationFormSupport, ValueType}
import waldap.core.ldap.WaldapLdapServer
import waldap.core.model.Account
import waldap.core.util.{Keys, StringUtil}
import waldap.core.util.Implicits._
import waldap.core.util.SyntaxSugars._
import waldap.core.util._
import waldap.core.service.SystemSettingsService
import org.apache.directory.server.core.api.CoreSession
import org.scalatra.{FlashMap, FlashMapSupport, Route, ScalatraFilter}
import org.scalatra.i18n.{I18nSupport, Messages}
import org.scalatra.json.JacksonJsonSupport
abstract class ControllerBase
extends ScalatraFilter
with ClientSideValidationFormSupport
with JacksonJsonSupport
with I18nSupport
with FlashMapSupport
with SystemSettingsService {
override def doFilter(request: ServletRequest, response: ServletResponse, chain: FilterChain): Unit =
try {
super.doFilter(request, response, chain)
} finally {
contextCache.remove()
}
implicit val jsonFormats = waldap.core.util.JsonFormat.jsonFormats
protected def UnauthorizedAdmin()(implicit context: Context) =
if (request.hasAttribute(Keys.Request.Ajax)) {
org.scalatra.Unauthorized()
} else {
if (context.loginAccount.isDefined) {
org.scalatra.Unauthorized(redirect("/"))
} else {
if (request.getMethod.toUpperCase == "POST") {
org.scalatra.Unauthorized(redirect("/admin/signin"))
} else {
org.scalatra.Unauthorized(
redirect(
"/admin/signin?redirect=" + StringUtil.urlEncode(
defining(request.getQueryString) { queryString =>
request.getRequestURI.substring(request.getContextPath.length) + (if (queryString != null)
"?" + queryString
else "")
}
)
)
)
}
}
}
def ajaxGet(path: String)(action: => Any): Route =
super.get(path) {
request.setAttribute(Keys.Request.Ajax, "true")
action
}
override def ajaxGet[T](path: String, form: ValueType[T])(action: T => Any): Route =
super.ajaxGet(path, form) { form =>
request.setAttribute(Keys.Request.Ajax, "true")
action(form)
}
def ajaxPost(path: String)(action: => Any): Route =
super.post(path) {
request.setAttribute(Keys.Request.Ajax, "true")
action
}
override def ajaxPost[T](path: String, form: ValueType[T])(action: T => Any): Route =
super.ajaxPost(path, form) { form =>
request.setAttribute(Keys.Request.Ajax, "true")
action(form)
}
protected def NotFound() =
if (request.hasAttribute(Keys.Request.Ajax)) {
org.scalatra.NotFound()
} else {
org.scalatra.NotFound(waldap.core.html.error("Not Found"))
}
private val contextCache = new java.lang.ThreadLocal[Context]()
implicit def context: Context = {
contextCache.get match {
case null =>
val context = Context(loadSystemSettings(), LoginAccount, request, messages)
contextCache.set(context)
context
case context => context
}
}
implicit def ldapSession: CoreSession = context.ldapSession
private def LoginAccount: Option[Account] =
request.getAs[Account](Keys.Session.LoginAccount).orElse(session.getAs[Account](Keys.Session.LoginAccount))
}
case class Context(
settings: SystemSettingsService.SystemSettings,
loginAccount: Option[Account],
request: HttpServletRequest,
messages: Messages
) {
val path = settings.baseUrl.getOrElse(request.getContextPath)
val currentPath = request.getRequestURI.substring(request.getContextPath.length)
val baseUrl = settings.baseUrl(request)
val host = new java.net.URL(baseUrl).getHost
val ldapSession = WaldapLdapServer.directoryService.getAdminSession
/**
* Get object from cache.
*
* If object has not been cached with the specified key then retrieves by given action.
* Cached object are available during a request.
*/
def cache[A](key: String)(action: => A): A =
defining(Keys.Request.Cache(key)) { cacheKey =>
Option(request.getAttribute(cacheKey).asInstanceOf[A]).getOrElse {
val newObject = action
request.setAttribute(cacheKey, newObject)
newObject
}
}
}
| kounoike/waldap | src/main/scala/waldap/core/controller/ControllerBase.scala | Scala | apache-2.0 | 4,594 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.process.transform
import java.util.Date
import org.locationtech.jts.geom.Point
import org.geotools.data.collection.ListFeatureCollection
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder.EncodedValues
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class BinConversionProcessTest extends Specification {
import scala.collection.JavaConversions._
val sft = SimpleFeatureTypes.createType("bin",
"name:String,track:String,dtg:Date,dtg2:Date,*geom:Point:srid=4326,geom2:Point:srid=4326")
val process = new BinConversionProcess
val features = (0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, s"0$i")
sf.setAttribute("name", s"name$i")
sf.setAttribute("track", s"$i")
sf.setAttribute("dtg", s"2017-02-20T00:00:0$i.000Z")
sf.setAttribute("dtg2", s"2017-02-21T00:00:0$i.000Z")
sf.setAttribute("geom", s"POINT(40 ${50 + i})")
sf.setAttribute("geom2", s"POINT(20 ${30 + i})")
sf
}
val ids = features.map(_.getID.hashCode)
val names = features.map(_.getAttribute("name").hashCode)
val tracks = features.map(_.getAttribute("track").hashCode)
val dates = features.map(_.getAttribute("dtg").asInstanceOf[Date].getTime)
val dates2 = features.map(_.getAttribute("dtg2").asInstanceOf[Date].getTime)
val lonlat = features.map(_.getAttribute("geom").asInstanceOf[Point]).map(p => (p.getY.toFloat, p.getX.toFloat))
val latlon = lonlat.map(_.swap)
val lonlat2 = features.map(_.getAttribute("geom2").asInstanceOf[Point]).map(p => (p.getY.toFloat, p.getX.toFloat))
val latlon2 = lonlat2.map(_.swap)
val listCollection = new ListFeatureCollection(sft, features)
// converts to tuples that we can compare to zipped values
def toTuples(value: EncodedValues): Any = value match {
case EncodedValues(trackId, lat, lon, dtg, label) if label == -1L => ((trackId, dtg), (lat, lon))
case EncodedValues(trackId, lat, lon, dtg, label) => (((trackId, dtg), (lat, lon)), label)
}
"BinConversionProcess" should {
"encode an empty feature collection" in {
val bytes = process.execute(new ListFeatureCollection(sft), null, null, null, null, "lonlat")
bytes must beEmpty
}
"encode a generic feature collection" in {
val bytes = process.execute(listCollection, null, null, null, null, "lonlat").toList
bytes must haveLength(10)
val decoded = bytes.map(BinaryOutputEncoder.decode).map(toTuples)
decoded must containTheSameElementsAs(ids.zip(dates).zip(lonlat))
}
"encode a generic feature collection with alternate values" in {
val bytes = process.execute(listCollection, "name", "geom2", "dtg2", null, "lonlat").toList
bytes must haveLength(10)
val decoded = bytes.map(BinaryOutputEncoder.decode).map(toTuples)
decoded must containTheSameElementsAs(names.zip(dates2).zip(lonlat2))
}
"encode a generic feature collection with labels" in {
val bytes = process.execute(listCollection, null, null, null, "track", "lonlat").toList
bytes must haveLength(10)
val decoded = bytes.map(BinaryOutputEncoder.decode).map(toTuples)
decoded must containTheSameElementsAs(ids.zip(dates).zip(lonlat).zip(tracks))
}
}
}
| elahrvivaz/geomesa | geomesa-process/geomesa-process-vector/src/test/scala/org/locationtech/geomesa/process/transform/BinConversionProcessTest.scala | Scala | apache-2.0 | 3,989 |
package edu.gemini.dbTools.ephemeris
import edu.gemini.dbTools.ephemeris.ExportError.FileError
import edu.gemini.spModel.core.HorizonsDesignation
import java.net.{URLDecoder, URLEncoder}
import java.nio.charset.StandardCharsets.UTF_8
import java.nio.file.{Path, Files}
import java.time.Instant
import scala.collection.JavaConverters._
import scalaz._, Scalaz._
/** Support for working with ephemeris files in a directory. */
sealed trait EphemerisFiles {
/** Gets an action that will return the id of every ephemeris file in the
* directory.
*/
def list: TryExport[ISet[HorizonsDesignation]]
/** Obtains the `Path` corresponding to the given id. */
def path(hid: HorizonsDesignation): Path
/** Gets an action that deletes the file corresponding to the given id, if
* any.
* @return `true` if the file is deleted, `false` otherwise
*/
def delete(hid: HorizonsDesignation): TryExport[Boolean]
/** Gets an action that deletes all the files corresponding to the given
* ids, if any.
* @return `true` if any file is actually deleted, `false` otherwise
*/
def deleteAll(hids: ISet[HorizonsDesignation]): TryExport[Boolean]
/** Gets an action that reads the content of the ephemeris file with the
* given id into a String if it exists, but produces a `FileError` otherwise.
*/
def read(hid: HorizonsDesignation): TryExport[String]
/** Gets an action that parses the content of an ephemeris file corresponding
* to the given id if it exists and is readable, but produces a `FileError`
* otherwise.
*/
def parse(hid: HorizonsDesignation): TryExport[EphemerisMap]
/** Gets an action that parses the timestamps of an ephemeris file
* corresponding to the given id if it exists and is readable, but produces
* a `FileError` otherwise.
*/
def parseTimes(hid: HorizonsDesignation): TryExport[ISet[Instant]]
/** Gets an action that will write the given ephemeris map to a file with a
* name corresponding to the given id, replacing any existing file.
*/
def write(hid: HorizonsDesignation, em: EphemerisMap): TryExport[Path]
}
object EphemerisFiles {
private val EphRegex = """(.*).eph$""".r
def filename(hid: HorizonsDesignation): String =
URLEncoder.encode(s"${hid.show}.eph", UTF_8.name)
def horizonsDesignation(filename: String): Option[HorizonsDesignation] =
filename match {
case EphRegex(prefix) => HorizonsDesignation.read(URLDecoder.decode(prefix, UTF_8.name))
case _ => None
}
def apply(dir: Path): EphemerisFiles = new EphemerisFiles {
val list: TryExport[ISet[HorizonsDesignation]] =
TryExport.fromTryCatch(ex => FileError("Error listing existing ephemeris files", None, Some(ex))) {
ISet.fromList {
Files.list(dir).iterator.asScala.toList.flatMap { p =>
horizonsDesignation(p.getFileName.toString)
}
}
}
def path(hid: HorizonsDesignation): Path =
dir.resolve(filename(hid))
def error(action: String, hid: HorizonsDesignation, ex: Throwable): FileError =
FileError(s"Error $action ephemeris file", Some(hid), Some(ex))
def fileOp[T](action: String, hid: HorizonsDesignation)(op: Path => T): TryExport[T] =
TryExport.fromTryCatch(error(action, hid, _)) { op(path(hid)) }
def delete(hid: HorizonsDesignation): TryExport[Boolean] =
fileOp("deleting", hid) { Files.deleteIfExists }
def deleteAll(hids: ISet[HorizonsDesignation]): TryExport[Boolean] =
hids.toList.traverseU { delete }.map(_.any(identity))
def read(hid: HorizonsDesignation): TryExport[String] =
fileOp("reading", hid) { Files.readAllLines(_, UTF_8).asScala.mkString("\\n") }
def parseContent[T](hid: HorizonsDesignation)(parser: String => String \\/ T): TryExport[T] = {
def parseString(content: String): TryExport[T] =
TryExport.fromDisjunction {
parser(content).leftMap { s =>
FileError(s"Could not parse ephemeris data: $s", Some(hid), None)
}
}
read(hid) >>= parseString
}
def parse(hid: HorizonsDesignation): TryExport[EphemerisMap] =
parseContent(hid)(EphemerisFileFormat.parse)
def parseTimes(hid: HorizonsDesignation): TryExport[ISet[Instant]] =
parseContent(hid)(EphemerisFileFormat.parseTimestamps)
def write(hid: HorizonsDesignation, em: EphemerisMap): TryExport[Path] =
fileOp("writing", hid) { p =>
Files.write(p, EphemerisFileFormat.format(em).getBytes(UTF_8))
p
}
}
}
| spakzad/ocs | bundle/edu.gemini.spdb.reports.collection/src/main/scala/edu/gemini/dbTools/ephemeris/EphemerisFiles.scala | Scala | bsd-3-clause | 4,566 |
package com.atomist.rug.runtime.js
import com.atomist.project.archive.RugResolver
import com.atomist.rug.runtime.Rug
import jdk.nashorn.api.scripting.ScriptObjectMirror
import scala.collection.JavaConverters._
/**
* Interface for finding rugs of different types in Nashorn
*/
trait JavaScriptRugFinder[R <: Rug] {
def find(jsc: JavaScriptContext, resolver: Option[RugResolver] = None): Seq[R] = {
jsc.vars.flatMap {
case Var(_, handler) if isValidRug(handler) =>
create(jsc, handler, resolver)
case Var(_, arr) if arr.isArray =>
arr.values().asScala.collect {
case som: ScriptObjectMirror if isValidRug(som) => som
}.flatMap(create(jsc, _, resolver))
case _ => None
}
}
/**
* Is the supplied thing valid?
*/
def isValidRug(obj: ScriptObjectMirror): Boolean = {
obj.hasMember("__kind") &&
obj.hasMember("__description") &&
(obj.hasMember("__name") || obj.hasMember("constructor")) &&
isValid(obj)
}
def isValid(obj: ScriptObjectMirror): Boolean
def create(jsc: JavaScriptContext, jsVar: ScriptObjectMirror, resolver: Option[RugResolver]): Option[R]
}
| atomist/rug | src/main/scala/com/atomist/rug/runtime/js/JavaScriptRugFinder.scala | Scala | gpl-3.0 | 1,165 |
package com.lookout.borderpatrol.session
import java.util.concurrent.TimeUnit
import javax.crypto.spec.SecretKeySpec
import com.twitter.util.{Duration, Time}
object SecretExpiry extends Expiry {
val lifetime = Duration(1, TimeUnit.DAYS)
}
sealed trait ASecret extends Signer {
val algo = "HmacSHA256"
val entropySize = Constants.Secret.entropySize
val id: Byte
val entropy: List[Byte]
val expiry: Time
lazy val key = new SecretKeySpec(entropy.toArray, algo)
}
case class Secret(expiry: Time,
id: Byte = Generator(1).head,
entropy: List[Byte] = Generator(16).toList) extends ASecret
case class Secrets(current: Secret, previous: Secret)
object Secrets {
def mockSecrets: Secrets =
Secrets(Secret(Time.now), Secret(Time.fromMilliseconds(0)))
}
| rtyler/borderpatrol | borderpatrol-core/src/main/scala/com/lookout/borderpatrol/session/Secret.scala | Scala | mit | 806 |
package monocle.law.discipline.function
import monocle.function.Each._
import monocle.function._
import monocle.law.discipline.TraversalTests
import org.scalacheck.Arbitrary
import org.typelevel.discipline.Laws
import cats.Eq
object EachTests extends Laws {
def apply[S: Eq: Arbitrary, A: Eq: Arbitrary](implicit evEach: Each[S, A], arbAA: Arbitrary[A => A]): RuleSet =
new SimpleRuleSet("Each", TraversalTests(each[S, A]).props: _*)
}
| julien-truffaut/Monocle | law/src/main/scala/monocle/law/discipline/function/EachTests.scala | Scala | mit | 445 |
// Copyright: 2010 - 2018 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.indexer.graph
import java.sql.Timestamp
import java.util.concurrent.{ Executors, ThreadFactory, TimeUnit }
import scala.Predef._
import scala.collection.mutable
import scala.concurrent._
import scala.util.Try
import akka.event.slf4j.SLF4JLogging
import com.orientechnologies.orient.core.Orient
import com.orientechnologies.orient.core.config.OGlobalConfiguration
import com.orientechnologies.orient.core.metadata.schema.OType
import com.tinkerpop.blueprints.Vertex
import com.tinkerpop.blueprints.impls.orient.OrientGraphFactory
import org.apache.commons.vfs2.FileObject
import org.ensime.api.{ DeclaredAs, EnsimeFile }
import org.ensime.indexer._
import org.ensime.indexer.IndexService.FqnIndex
import org.ensime.indexer.SearchService._
import org.ensime.indexer.orientdb.api._
import org.ensime.indexer.orientdb.schema.api._
import org.ensime.indexer.orientdb.syntax._
import org.ensime.util.ensimefile._
import org.ensime.util.file._
import org.ensime.util.fileobject._
import org.ensime.util.stringymap.api.BigDataFormat
import org.ensime.vfs._
import shapeless.cachedImplicit
// I'm not particularly keen on this kind of OOP modelling...
sealed trait FqnSymbol {
def fqn: String
def line: Option[Int]
def source: Option[String] // uri
def declAs: DeclaredAs
def access: Access
def scalaName: Option[String]
def sourceFileObject(implicit vfs: EnsimeVFS): Option[FileObject] =
source.map(vfs.vfile)
def toSearchResult: String = s"$declAs ${scalaName.getOrElse(fqn)}"
}
object FqnSymbol {
private[graph] def fromFullyQualifiedReference(
ref: FullyQualifiedReference
): Option[FqnSymbol] = ref.fqn match {
case cn: ClassName if !cn.isPrimitive =>
Some(
ClassDef(ref.fqn.fqnString,
null,
null,
None,
None,
null,
None,
None,
None)
)
case fn: FieldName =>
Some(Field(ref.fqn.fqnString, None, None, None, null, None))
case mn: MethodName =>
Some(Method(ref.fqn.fqnString, None, None, null, None))
case _ => None
}
}
sealed trait Hierarchy
object Hierarchy {
sealed trait Direction
case object Supertypes extends Direction
case object Subtypes extends Direction
}
final case class TypeHierarchy(aClass: ClassDef, typeRefs: Seq[Hierarchy])
extends Hierarchy
final case class ClassDef(
fqn: String,
file: String, // the underlying file (should be shared)
path: String, // the VFS handle (e.g. classes in jars)
source: Option[String], // should be shared
line: Option[Int],
access: Access,
scalaName: Option[String],
scalapDeclaredAs: Option[DeclaredAs],
jdi: Option[String] // the JDI name for the source: bin/pkg/Source.scala (should be shared)
) extends FqnSymbol
with Hierarchy {
override def declAs: DeclaredAs = scalapDeclaredAs.getOrElse(DeclaredAs.Class)
}
sealed trait Member extends FqnSymbol
final case class Field(
fqn: String,
internal: Option[String],
line: Option[Int],
source: Option[String],
access: Access,
scalaName: Option[String]
) extends Member {
override def declAs: DeclaredAs = DeclaredAs.Field
}
final case class Method(
fqn: String,
line: Option[Int],
source: Option[String],
access: Access,
scalaName: Option[String]
) extends Member {
override def declAs: DeclaredAs = DeclaredAs.Method
}
final case class UsageLocation(file: Option[String], line: Option[Int])
final case class FileCheck(filename: String, timestamp: Timestamp) {
def file(implicit vfs: EnsimeVFS): FileObject = vfs.vfile(filename)
def lastModified: Long = timestamp.getTime
def changed(implicit vfs: EnsimeVFS): Boolean =
file.getContent.getLastModifiedTime != lastModified
}
object FileCheck extends ((String, Timestamp) => FileCheck) {
def apply(f: FileObject): FileCheck = {
val name = f.uriString
val ts =
if (f.exists()) new Timestamp(f.getContent.getLastModifiedTime)
else new Timestamp(-1L)
FileCheck(name, ts)
}
def fromPath(path: String)(implicit vfs: EnsimeVFS): FileCheck =
apply(vfs.vfile(path))
}
// core/it:test-only *Search* -- -z prestine
class GraphService(dir: File) extends SLF4JLogging {
import org.ensime.indexer.graph.GraphService._
// all methods return Future, which means we can do isolation by
// doing all work on a single worker Thread. We can't optimise until
// we better understand the concurrency limitations (the fact that
// we have to write dummy vertices to add edges doesn't help)
private val pools = 1
private val executor = Executors.newSingleThreadExecutor(
new ThreadFactory() {
override def newThread(runnable: Runnable): Thread = {
val thread = Executors.defaultThreadFactory().newThread(runnable)
thread.setName("GraphService")
thread.setDaemon(true)
thread
}
}
)
private implicit val ec = ExecutionContext.fromExecutor(executor)
private implicit lazy val db: OrientGraphFactory = {
// http://orientdb.com/docs/2.1/Performance-Tuning.html
OGlobalConfiguration.USE_WAL.setValue(true)
OGlobalConfiguration.DISK_CACHE_SIZE.setValue(64) // 64MB is far more sensible than 4GB
Orient.setRegisterDatabaseByPath(true)
val url = "plocal:" + dir.getAbsolutePath
val db = new OrientGraphFactory(url).setupPool(pools, pools)
db.setAutoStartTx(false)
db.setUseLightweightEdges(true)
db.setUseLog(true)
db
}
def shutdown(): Future[Unit] =
Future {
blocking {
try {
executor.shutdownNow()
executor.awaitTermination(30, TimeUnit.SECONDS)
()
} finally {
Try(db.close())
Try(db.getDatabase.getStorage.close(true, false))
}
}
}(ExecutionContext.Implicits.global) // must be from a different thread than the executor
if (!dir.exists) {
log.info("creating the graph database...")
dir.mkdirs()
// schema changes are not transactional
val g = db.getNoTx()
val fqnSymbolClass = g.createVertexType("FqnSymbol")
fqnSymbolClass.createProperty("fqn", OType.STRING).setMandatory(true)
fqnSymbolClass.createProperty("line", OType.INTEGER).setMandatory(false)
fqnSymbolClass.createProperty("source", OType.STRING).setMandatory(false)
val memberSymbolClass = g.createVertexType("Member", fqnSymbolClass)
g.createVertexFrom[ClassDef](superClass = Some(fqnSymbolClass))
g.createVertexFrom[Method](superClass = Some(memberSymbolClass))
g.createVertexFrom[Field](superClass = Some(memberSymbolClass))
g.createVertexFrom[FileCheck]()
g.createEdge[DefinedIn.type]
.createEdge[EnclosingClass.type]
.createEdge[UsedIn.type]
.createEdge[IsParent.type]
g.createIndexOn[Vertex, FqnSymbol, String](Unique)
g.createIndexOn[Vertex, FileCheck, String](Unique)
g.shutdown()
log.info("... created the graph database")
}
def knownFiles(): Future[Seq[FileCheck]] = withGraphAsync { implicit g =>
RichGraph.allV[FileCheck]
}
def outOfDate(f: FileObject)(implicit vfs: EnsimeVFS): Future[Boolean] =
withGraphAsync { implicit g =>
RichGraph.readUniqueV[FileCheck, String](f.uriString) match {
case None => true
case Some(v) => v.toDomain.changed
}
}
def persist(symbols: Seq[SourceSymbolInfo]): Future[Int] = withGraphAsync {
implicit g =>
val checks = mutable.Map.empty[String, VertexT[FileCheck]]
val classes = mutable.Map.empty[String, VertexT[ClassDef]]
g.begin()
symbols.foreach { s =>
val scalaName = s.scalapSymbol.map(_.scalaName)
val typeSignature = s.scalapSymbol.map(_.typeSignature)
val declAs = s.scalapSymbol.map(_.declaredAs)
val vertex = s match {
case EmptySourceSymbolInfo(fileCheck) =>
if (!checks.contains(fileCheck.filename)) {
RichGraph.upsertV[FileCheck, String](fileCheck)
}
None
case ClassSymbolInfo(fileCheck,
path,
source,
refs,
bs,
scalap,
jdi) =>
val classDef = ClassDef(bs.fqn,
fileCheck.filename,
path,
source,
bs.source.line,
bs.access,
scalaName,
declAs,
jdi)
val fileV =
checks.getOrElse(fileCheck.filename,
RichGraph.upsertV[FileCheck, String](fileCheck))
val classV = RichGraph.upsertV[ClassDef, String](classDef)
classes += (bs.fqn -> classV)
RichGraph.insertE(classV, fileV, DefinedIn)
val superClass = bs.superClass.map(
name =>
ClassDef(name.fqnString,
null,
null,
None,
None,
null,
None,
None,
None)
)
val interfaces = bs.interfaces.map(
name =>
ClassDef(name.fqnString,
null,
null,
None,
None,
null,
None,
None,
None)
)
(superClass.toList ::: interfaces).foreach { cdef =>
val parentV = RichGraph.insertIfNotExists[ClassDef, String](cdef)
RichGraph.insertE(classV, parentV, IsParent)
}
Some(classV)
case MethodSymbolInfo(_, source, refs, bs, scalap) =>
val owner = classes(bs.name.owner.fqnString)
val method =
Method(s.fqn,
bs.line,
source,
bs.access,
(scalaName ++ typeSignature).reduceOption(_ + _))
val methodV: VertexT[FqnSymbol] =
RichGraph.upsertV[Method, String](method)
RichGraph.insertE(methodV, owner, EnclosingClass)
Some(methodV)
case FieldSymbolInfo(_, source, refs, bs, scalap) =>
val owner = classes(bs.name.owner.fqnString)
val field = Field(bs.name.fqnString,
Some(s.fqn),
None,
source,
bs.access,
scalaName)
val fieldV: VertexT[FqnSymbol] =
RichGraph.upsertV[Field, String](field)
RichGraph.insertE(fieldV, owner, EnclosingClass)
Some(fieldV)
case TypeAliasSymbolInfo(_, source, t) =>
val owner = classes(t.owner.fqnString)
val field = Field(s.fqn,
None,
None,
source,
t.access,
Some(t.scalaName + t.typeSignature))
val fieldV: VertexT[FqnSymbol] =
RichGraph.upsertV[Field, String](field)
RichGraph.insertE(fieldV, owner, EnclosingClass)
Some(fieldV)
}
s.internalRefs.foreach { ref =>
val sym = FqnSymbol.fromFullyQualifiedReference(ref)
val usage: Option[VertexT[FqnSymbol]] = sym.map {
case cd: ClassDef =>
RichGraph.insertIfNotExists[ClassDef, String](cd)
case m: Method => RichGraph.insertIfNotExists[Method, String](m)
case f: Field => RichGraph.insertIfNotExists[Field, String](f)
}
for {
u <- usage
v <- vertex
} yield {
val intermediary: VertexT[UsageLocation] =
RichGraph.insertV[UsageLocation](
UsageLocation(v.toDomain.source, ref.line)
)
RichGraph.insertE(u, intermediary, UsedAt)
RichGraph.insertE(intermediary, v, UsedIn)
}
}
}
symbols.collect {
case c: ClassSymbolInfo =>
c.bytecodeSymbol.innerClasses.foreach { inner =>
for {
innerClassV: VertexT[FqnSymbol] <- classes.get(inner.fqnString)
outerClassV <- classes.get(c.fqn)
} yield {
RichGraph.insertE(innerClassV, outerClassV, EnclosingClass)
classes
.get(s"${c.fqn}$$")
.foreach(RichGraph.insertE(innerClassV, _, EnclosingClass))
}
}
}
g.commit()
symbols.size
}
/**
* Removes given `files` from the graph.
*/
def removeFiles(files: List[FileObject]): Future[Int] = withGraphAsync {
implicit g =>
RichGraph.removeV(files.map(FileCheck(_)))
}
/**
* Finds the FqnSymbol uniquely identified by `fqn`.
*/
def find(fqn: String): Future[Option[FqnSymbol]] = withGraphAsync {
implicit g =>
RichGraph.readUniqueV[FqnSymbol, String](fqn).map(_.toDomain)
}
/**
* Finds all FqnSymbol's identified by unique `fqns`.
*/
def find(fqns: List[FqnIndex]): Future[List[FqnSymbol]] = withGraphAsync {
implicit g =>
fqns.flatMap(
fqn => RichGraph.readUniqueV[FqnSymbol, String](fqn.fqn).map(_.toDomain)
)
}
def getClassHierarchy(fqn: String,
hierarchyType: Hierarchy.Direction,
levels: Option[Int]): Future[Option[Hierarchy]] =
withGraphAsync { implicit g =>
RichGraph.classHierarchy[String](fqn, hierarchyType, levels)
}
def findUsageLocations(fqn: String): Future[Iterable[UsageLocation]] =
withGraphAsync { implicit g =>
RichGraph.findUsageLocations[String](fqn).map(_.toDomain).distinct
}
def findUsages(fqn: String): Future[Iterable[FqnSymbol]] = withGraphAsync {
implicit g =>
RichGraph.findUsages[String](fqn).map(_.toDomain)
}
def findClasses(source: EnsimeFile): Future[Seq[ClassDef]] = withGraphAsync {
implicit g =>
val uri = Some(source.uriString)
RichGraph.findV[ClassDef]("jdi") { c =>
c.source == uri
}
}
def findClasses(jdi: String): Future[Seq[ClassDef]] = withGraphAsync {
implicit g =>
RichGraph.findV[ClassDef]("source") { c =>
c.jdi == Some(jdi)
}
}
}
object GraphService {
private[indexer] case object DefinedIn extends EdgeT[ClassDef, FileCheck]
private[indexer] case object EnclosingClass extends EdgeT[FqnSymbol, ClassDef]
private[indexer] case object UsedAt extends EdgeT[FqnSymbol, UsageLocation]
private[indexer] case object UsedIn extends EdgeT[UsageLocation, FqnSymbol]
private[indexer] case object IsParent extends EdgeT[ClassDef, ClassDef]
// the domain-specific formats for schema generation
import org.ensime.indexer.orientdb.schema.impl._
import org.ensime.util.stringymap.api._
import org.ensime.util.stringymap.impl._
implicit object AccessSPrimitive extends SPrimitive[Access] {
import org.objectweb.asm.Opcodes._
import SPrimitive.IntSPrimitive
def toValue(v: Access): java.lang.Integer =
if (v == null) null
else {
val code = v match {
case Public => ACC_PUBLIC
case Private => ACC_PRIVATE
case Protected => ACC_PROTECTED
case Default => 0
}
IntSPrimitive.toValue(code)
}
def fromValue(v: AnyRef): Either[String, Access] =
IntSPrimitive.fromValue(v).right.map(Access(_))
}
implicit object DeclaredAsSPrimitive extends SPrimitive[DeclaredAs] {
import org.ensime.util.enums._
import SPrimitive.StringSPrimitive
private val lookup: Map[String, DeclaredAs] =
implicitly[AdtToMap[DeclaredAs]].lookup
def toValue(v: DeclaredAs): java.lang.String =
if (v == null) null else StringSPrimitive.toValue(v.toString)
def fromValue(v: AnyRef): Either[String, DeclaredAs] =
StringSPrimitive.fromValue(v).right.map(lookup)
}
implicit val FileCheckBdf: BigDataFormat[FileCheck] = cachedImplicit
implicit val FileCheckS: SchemaFormat[FileCheck] = cachedImplicit
implicit val ClassDefBdf: BigDataFormat[ClassDef] = cachedImplicit
implicit val ClassDefS: SchemaFormat[ClassDef] = cachedImplicit
implicit val MethodBdf: BigDataFormat[Method] = cachedImplicit
implicit val MethodS: SchemaFormat[Method] = cachedImplicit
implicit val FieldBdf: BigDataFormat[Field] = cachedImplicit
implicit val FieldS: SchemaFormat[Field] = cachedImplicit
implicit val FqnSymbolBdf: BigDataFormat[FqnSymbol] = cachedImplicit
implicit val LineNumberBdf: BigDataFormat[UsageLocation] = cachedImplicit
implicit val DefinedInS: BigDataFormat[DefinedIn.type] = cachedImplicit
implicit val EnclosingClassS: BigDataFormat[EnclosingClass.type] =
cachedImplicit
implicit val UsedInS: BigDataFormat[UsedIn.type] = cachedImplicit
implicit val UsedAtS: BigDataFormat[UsedAt.type] = cachedImplicit
implicit val IsParentS: BigDataFormat[IsParent.type] = cachedImplicit
implicit val UniqueFileCheckV: OrientIdFormat[FileCheck, String] =
new OrientIdFormat[FileCheck, String] {
override def key = "filename"
override def value(t: FileCheck): String = t.filename
}
implicit val FqnIndexV: OrientIdFormat[FqnIndex, String] =
new OrientIdFormat[FqnIndex, String] {
override def key = "fqn"
override def value(t: FqnIndex): String = t.fqn
}
class UniqueFqnSymbol[T <: FqnSymbol] extends OrientIdFormat[T, String] {
override def key = "fqn"
override def value(t: T): String = t.fqn
}
implicit val UniqueClassDefV: UniqueFqnSymbol[ClassDef] =
new UniqueFqnSymbol[ClassDef]
implicit val UniqueMethodV: UniqueFqnSymbol[Method] =
new UniqueFqnSymbol[Method]
implicit val UniqueFieldV: UniqueFqnSymbol[Field] = new UniqueFqnSymbol[Field]
implicit val UniqueFqnSymbolV: UniqueFqnSymbol[FqnSymbol] =
new UniqueFqnSymbol[FqnSymbol]
}
| yyadavalli/ensime-server | core/src/main/scala/org/ensime/indexer/graph/GraphService.scala | Scala | gpl-3.0 | 18,739 |
// no package
// plugins declared within no package should be visible to other plugins in the _root_ package
import sbt._, Keys._
object TopLevelImports {
lazy val topLevelDemo = settingKey[String]("A top level demo setting.")
}
object TopA extends AutoPlugin {
import TopLevelImports._
import sbttest.Imports._
val autoImport = TopLevelImports
override def requires = sbttest.X
override def trigger = AllRequirements
override def projectSettings: scala.Seq[sbt.Setting[_]] = Seq(
topLevelDemo := s"TopA: topLevelDemo project ${name.value}",
demo := s"TopA: demo project ${name.value}"
)
}
object TopB extends AutoPlugin {
import TopLevelImports._
val autoImport = TopLevelImports
override def projectSettings: Seq[Setting[_]] = Seq(
topLevelDemo := s"TopB: topLevelDemo project ${name.value}"
)
}
object TopC extends AutoPlugin {
object autoImport {
lazy val topLevelKeyTest = settingKey[String]("A top level setting declared in a plugin.")
}
}
| Duhemm/sbt | sbt/src/sbt-test/project/auto-plugins/project/A.scala | Scala | bsd-3-clause | 1,007 |
package io.github.junheng.akka.hbase
import akka.actor.{PoisonPill, Props}
import io.github.junheng.akka.hbase.protocol.HScannerProtocol.HNext
import io.github.junheng.akka.hbase.protocol.HTableProtocol._
import org.apache.hadoop.hbase.client.ResultScanner
import scala.language.postfixOps
class HScanner(scanner: ResultScanner, closeAfterNext: Boolean) extends HActor {
override def preStart(): Unit = log.info("started")
override def receive: Receive = {
case HNext(step) =>
var results: List[HResult] = Nil
val getCost = cost {
results = scanner.next(step)
.map(x => HResult(x))
.toList
}
sender() ! HResults(getCost, results)
if (closeAfterNext) self ! PoisonPill
}
override def postStop(): Unit = {
scanner.close()
log.info("stopped")
}
}
object HScanner {
def props(scanner: ResultScanner, closeAfterNext: Boolean = false) = Props(new HScanner(scanner, closeAfterNext))
} | junheng/akka-hbase | service/src/main/scala/io/github/junheng/akka/hbase/HScanner.scala | Scala | mit | 966 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services.schools
import model.School
import org.mockito.Mockito._
import repositories.csv.{ SchoolsCSVRepository, SchoolsRepository }
import testkit.MockitoImplicits._
import testkit.{ ShortTimeout, UnitWithAppSpec }
class SchoolsServiceSpec extends UnitWithAppSpec with ShortTimeout {
val schoolsRepo = new SchoolsCSVRepository(app)
val service = new SchoolsService(schoolsRepo) {
override val MaxNumberOfSchools = Integer.MAX_VALUE
}
"Schools Service" should {
"return simple 3 letter matches from beginning of school name" in {
val term = "Abb"
val result = service.getSchools(term).futureValue
expect23SchoolsContains("Abb", result)
}
"return simple 3 letter matches from beginning of school name ignoring case" in {
val term = "aBB"
val result = service.getSchools(term).futureValue
result.size mustBe 23
result.foreach(s => withClue(s"school name: ${s.name}") {
s.name.toLowerCase.contains("abb") mustBe true
})
}
"match on middle words" in {
val school1WithGrammarName = School("IRN", "341-0209", "Antrim Grammar School", None, None, None, None, None, None, None)
val school2WithGrammarName = School("IRN", "142-0277", "Aquinas Diocesan Grammar School", None, None, None, None, None, None, None)
val schoolWithoutGrammarName = School("IRN", "542-0059", "Abbey Christian Brothers School", None, None, None, None, None, None, None)
val schoolsData = List(school1WithGrammarName, schoolWithoutGrammarName, school2WithGrammarName)
val schoolsRepoMock = mock[SchoolsRepository]
when(schoolsRepoMock.schools).thenReturnAsync(schoolsData)
val service = new SchoolsService(schoolsRepoMock)
val term = "Grammar"
service.getSchools(term).futureValue mustBe
List(school1WithGrammarName, school2WithGrammarName)
}
"ignore whitespace in term" in {
val term = "A b b "
val result = service.getSchools(term).futureValue
expect23SchoolsContains("Abb", result)
}
"ignore punctuation in term" in {
val term = "-A?(b_@'b,)&"
val result = service.getSchools(term).futureValue
expect23SchoolsContains("Abb", result)
}
"ignore punctuation in school name" in {
val term = "Girls High"
val result = service.getSchools(term).futureValue
result.size mustBe 20
result.foreach(s => withClue(s"school name: ${s.name}") {
s.name.contains("Girls") && s.name.contains("High") mustBe true
})
}
"should limit number of results to 16" in {
val service = new SchoolsService(new SchoolsCSVRepository(app))
val term = "aBB"
val result = service.getSchools(term).futureValue
result.size mustBe 16
result.foreach(s => withClue(s"school name: ${s.name}") {
s.name.toLowerCase.contains("abb") mustBe true
})
}
"should return less than 16 if the criteria narrows down the result" in {
val service = new SchoolsService(new SchoolsCSVRepository(app))
val term = "Abbey Community"
val result = service.getSchools(term).futureValue
result.size mustBe 1
result.foreach(s => withClue(s"school name: ${s.name}") {
s.name.contains("Abbey Community") mustBe true
})
}
}
private def expect23SchoolsContains(term: String, actualResult: List[School]) = {
actualResult.size mustBe 23
actualResult.foreach(s => withClue(s"school name: ${s.name}") {
s.name.contains(term) mustBe true
})
}
}
| hmrc/fset-faststream | test/services/schools/SchoolsServiceSpec.scala | Scala | apache-2.0 | 4,158 |
package com.programmaticallyspeaking.ncd.nashorn
import com.sun.jdi.ClassType
trait TypeLookup {
def apply(name: String): Option[ClassType]
}
| provegard/ncdbg | src/main/scala/com/programmaticallyspeaking/ncd/nashorn/TypeLookup.scala | Scala | bsd-3-clause | 146 |
package latis.util
import org.junit._
import Assert._
import latis.dm.TestDataset
import latis.dm.Dataset
class TestDataMap {
@Test
def real {
val map = DataMap.toDoubleMap(TestDataset.real)
assertEquals(3.14, map("myReal")(0), 0.0)
}
@Test
def scalar_has_only_one_value {
val map = DataMap.toDoubleMap(TestDataset.real)
assertEquals(1, map("myReal").length)
}
@Test
def empty {
val map = DataMap.toDoubleMap(Dataset.empty)
assertTrue(map.isEmpty)
}
// @Test
// def scalars {
// val map = DataMap.toDoubleMap(TestDataset.scalars)
// assertEquals(3.14, map("myReal")(0), 0.0)
// assertEquals(42.0, map("myInteger")(0), 0.0)
// assertTrue(map("myText")(0).isNaN)
// assertEquals(1000.0, map("myRealTime")(0), 0.0)
// }
// @Test
// def function_of_scalar_with_data_from_kids {
// val map = DataMap.toDoubleMap(TestDataset.function_of_scalar_with_data_from_kids)
// //for ((n,v) <- map; d <- v) println(n + ": " + d)
// assertEquals(2.0, map("domain")(2), 0.0)
// assertEquals(0.0, map("range")(0), 0.0)
// }
//
// @Test
// def doubles_from_function_of_scalar_with_data_from_kids {
// val ds = DataMap.toDoubles(TestDataset.function_of_scalar_with_data_from_kids)
// //for (a <- ds; d <- a) println(d)
// assertEquals(2.0, ds(0)(2), 0.0)
// assertEquals(0.0, ds(1)(0), 0.0)
// }
@Test
def doubles_from_function_of_tuple_with_mixed_types {
val ds = DataMap.toDoubles(TestDataset.function_of_tuple)
//for (a <- ds; d <- a) println(d)
assertEquals(2.0, ds(0)(2), 0.0)
assertEquals(0.0, ds(1)(0), 0.0)
assertTrue(ds(2)(0).isNaN)
}
@Test
def strings_from_function_of_tuple_with_mixed_types {
val ss = DataMap.toStrings(TestDataset.function_of_tuple)
//for (a <- ss; s <- a) println(s)
assertEquals("2", ss(0)(2))
assertEquals("0.0", ss(1)(0))
assertEquals("one", ss(2)(1))
}
//TODO:
//unknown
//bad name
} | dlindhol/LaTiS | src/test/scala/latis/util/TestDataMap.scala | Scala | epl-1.0 | 1,973 |
import scala.concurrent.{ Await, Future }
import scala.concurrent.duration.FiniteDuration
import reactivemongo.api.{
Cursor,
CursorFlattener,
CursorProducer,
WrappedCursor
}
import reactivemongo.api.bson.BSONDocument
import reactivemongo.api.bson.collection.BSONCollection
trait Cursor1Spec { spec: CursorSpec =>
def group1 = {
val nDocs = 16517
s"insert $nDocs records" in {
def insert(rem: Int, bulks: Seq[Future[Unit]]): Future[Unit] = {
if (rem == 0) {
Future.sequence(bulks).map(_ => {})
} else {
val len = if (rem < 256) rem else 256
def prepared = nDocs - rem
def bulk = coll.insert(false).many(
for (i <- 0 until len) yield {
val n = i + prepared
BSONDocument("i" -> n, "record" -> s"record$n")
}).map(_ => {})
insert(rem - len, bulk +: bulks)
}
}
insert(nDocs, Seq.empty).map { _ =>
info(s"inserted $nDocs records")
} aka "fixtures" must beTypedEqualTo({}).await(1, timeout)
}
"request for cursor query" in {
import reactivemongo.core.protocol.{ Response, Reply }
import reactivemongo.api.tests.{ makeRequest => req, nextResponse }
def cursor(batchSize: Int = 0) =
coll.find(matchAll("makeReq1")).batchSize(batchSize).cursor()
req(cursor(nDocs + 1), nDocs + 1) must beLike[Response] {
case Response(_, Reply(_, id, from, ret), _, _) =>
id aka "cursor ID #1" must_== 0 and {
from must_== 0 and (ret aka "returned" must_== nDocs)
}
}.await(1, timeout) and {
req(cursor(nDocs), 1) must beLike[Response] {
case Response(_, Reply(_, id, from, ret), _, _) =>
id aka "cursor ID #2" must_== 0 and {
from must_== 0 and (ret must_== 1)
}
}.await(1, timeout)
} and {
req(cursor(128), Int.MaxValue) must beLike[Response] {
case Response(_, Reply(_, id, from, ret), _, _) =>
id aka "cursor ID #3" must not(beEqualTo(0)) and {
from must_== 0 and (ret must_== 128)
}
}.await(1, timeout)
} and {
req(cursor(), 10) must beLike[Response] {
case Response(_, Reply(_, id, from, ret), _, _) =>
id aka "cursor ID #4" must_== 0 and {
from must_== 0 and (ret must_== 10)
}
}.await(1, timeout)
} and {
req(cursor(), 101) must beLike[Response] {
case Response(_, Reply(_, id, from, ret), _, _) =>
id aka "cursor ID #5" must_== 0 and {
from must_== 0 and (ret must_== 101 /* default batch size */ )
}
}.await(1, timeout)
} and {
req(cursor(), Int.MaxValue /* unlimited */ ) must beLike[Response] {
case Response(_, Reply(_, id, from, ret), _, _) =>
id aka "cursor ID #6" must not(beEqualTo(0)) and {
from must_== 0 and (ret must_== 101 /* default batch size */ )
}
}.await(1, timeout)
} and {
val batchSize = 128
val max = (batchSize * 2) - 1
val cur = cursor(batchSize)
@volatile var r1: Response = null // Workaround to avoid nesting .await
req(cur, max) must beLike[Response] {
case r @ Response(_, Reply(_, id1, from1, ret1), _, _) =>
id1 aka "cursor ID #7a" must not(beEqualTo(0)) and {
from1 must_== 0 and (ret1 must_== batchSize)
} and {
r1 = r
r1 aka "r1" must not(beNull)
}
}.await(1, timeout) and {
nextResponse(cur, max)(ee.ec, r1) must beSome[Response].like {
case r2 @ Response(_, Reply(_, id2, from2, ret2), _, _) =>
id2 aka "cursor ID #7b" must_== 0 and {
from2 aka "from #7b" must_== 128
} and {
ret2 must_== (batchSize - 1)
} and {
nextResponse(cur, 1)(ee.ec, r2) must beNone.await(1, timeout)
}
}.await(1, timeout)
}
}
}
{ // headOption
def headOptionSpec(c: BSONCollection, timeout: FiniteDuration) = {
"find first document when matching" in {
c.find(matchAll("headOption1")).cursor().
headOption must beSome[BSONDocument].await(1, timeout)
}
"find first document when not matching" in {
c.find(BSONDocument("i" -> -1)).cursor().
headOption must beNone.await(1, timeout)
}
}
"with the default connection" >> {
headOptionSpec(coll, timeout)
}
"with the slow connection" >> {
headOptionSpec(slowColl, slowTimeout)
}
}
"peek operation" in {
import reactivemongo.api.tests.{ decoder, reader => docReader, pack }
implicit val reader = docReader[Int] { decoder.int(_, "i").get }
def cursor = coll.find(matchAll("foo")).
sort(BSONDocument("i" -> 1)).batchSize(2).cursor[Int]()
val cn = s"${db.name}.${coll.name}"
cursor.headOption must beSome(0).await(1, timeout) and {
cursor.peek[List](100) must beLike[Cursor.Result[List[Int]]] {
case Cursor.Result(0 :: 1 :: Nil, ref1) => // batchSize(2) ~> 0,1
ref1.collectionName must_=== cn and {
def getMore = db.getMore(pack, ref1)
getMore.peek[Seq](3) must beLike[Cursor.Result[Seq[Int]]] {
case Cursor.Result(2 +: 3 +: 4 +: Nil, ref2) =>
ref2.cursorId must_=== ref1.cursorId and {
ref2.collectionName must_=== cn
} and {
db.getMore(pack, ref2).
peek[Seq](2) must beLike[Cursor.Result[Seq[Int]]] {
case Cursor.Result(5 +: 6 +: Nil, ref3) =>
ref3.cursorId must_=== ref1.cursorId and {
ref3.collectionName must_=== cn
}
}.await(1, timeout)
}
}.await(1, timeout) and {
getMore.peek[Set](1) must beLike[Cursor.Result[Set[Int]]] {
case Cursor.Result(s, ref4) =>
s must_=== Set(7) and {
ref4.cursorId must_=== ref1.cursorId
} and {
ref4.collectionName must_=== cn
}
}.await(1, timeout)
}
}
}.await(1, timeout)
}
} tag "not_mongo26"
// head
"find first document when matching" in {
coll.find(matchAll("head1") ++ ("i" -> 0)).cursor[BSONDocument]().head.
map(_ -- "_id") must beTypedEqualTo(BSONDocument(
"i" -> 0, "record" -> "record0")).await(1, timeout)
}
"find first document when not matching" in {
Await.result(
coll.find(BSONDocument("i" -> -1)).cursor().head,
timeout) must throwA[Cursor.NoSuchResultException.type]
}
"read one option document with success" in {
coll.find(matchAll("one1")).one[BSONDocument].
aka("findOne") must beSome[BSONDocument].await(0, timeout)
}
"read one document with success" in {
coll.find(matchAll("one2") ++ ("i" -> 1)).requireOne[BSONDocument].
map(_ -- "_id") must beTypedEqualTo(BSONDocument(
"i" -> 1, "record" -> "record1")).await(0, timeout)
}
"collect with limited maxDocs" in {
val max = (nDocs / 8).toInt
coll.find(matchAll("collectLimit")).batchSize(997).cursor().
collect[List](max, Cursor.FailOnError[List[BSONDocument]]()).
aka("documents") must haveSize[List[BSONDocument]](max).
await(1, timeout)
}
def foldSpec1(c: BSONCollection, timeout: FiniteDuration) = {
"get 10 first docs" in {
c.find(matchAll("cursorspec1")).cursor().
collect[List](10, Cursor.FailOnError[List[BSONDocument]]()).
map(_.size) aka "result size" must beEqualTo(10).await(1, timeout)
}
{ // .fold
"fold all the documents" in {
c.find(matchAll("cursorspec2a")).batchSize(2096).cursor().fold(0)(
{ (st, _) => debug(s"fold: $st"); st + 1 }).
aka("result size") must beEqualTo(16517).await(1, timeout) and {
c.find(matchAll("cursorspec2b")).
batchSize(2096).cursor().fold(0, -1)(
{ (st, _) => st + 1 }) aka "result size" must beEqualTo(16517).await(1, timeout)
}
}
"fold only 1024 documents" in {
c.find(matchAll("cursorspec3")).batchSize(256).cursor().
fold(0, 1024)((st, _) => st + 1).
aka("result size") must beEqualTo(1024).await(1, timeout)
}
}
{ // .foldWhile
"fold while all the documents" in {
c.find(matchAll("cursorspec4a")).
batchSize(2096).cursor().foldWhile(0)(
{ (st, _) => debug(s"foldWhile: $st"); Cursor.Cont(st + 1) }).
aka("result size") must beEqualTo(16517).await(1, timeout)
}
"fold while only 1024 documents" in {
c.find(matchAll("cursorspec5a")).batchSize(256).
cursor().foldWhile(0, 1024)(
(st, _) => Cursor.Cont(st + 1)).
aka("result size") must beEqualTo(1024).await(1, timeout)
}
"fold while successfully with async function" >> {
"all the documents" in {
coll.find(matchAll("cursorspec4b")).
batchSize(2096).cursor().foldWhileM(0)(
(st, _) => Future.successful(Cursor.Cont(st + 1))).
aka("result size") must beEqualTo(16517).await(1, timeout)
}
"only 1024 documents" in {
coll.find(matchAll("cursorspec5b")).
batchSize(256).cursor().foldWhileM(0, 1024)(
(st, _) => Future.successful(Cursor.Cont(st + 1))).
aka("result size") must beEqualTo(1024).await(1, timeout)
}
}
}
{ // .foldBulk
"fold the bulks for all the documents" in {
c.find(matchAll("cursorspec6a")).
batchSize(2096).cursor().foldBulks(0)({ (st, bulk) =>
debug(s"foldBulk: $st")
Cursor.Cont(st + bulk.size)
}) aka "result size" must beEqualTo(16517).await(1, timeout)
}
"fold the bulks for 1024 documents" in {
c.find(matchAll("cursorspec7a")).
batchSize(256).cursor().foldBulks(0, 1024)(
(st, bulk) => Cursor.Cont(st + bulk.size)).
aka("result size") must beEqualTo(1024).await(1, timeout)
}
"fold the bulks with async function" >> {
"for all the documents" in {
coll.find(matchAll("cursorspec6b")).
batchSize(2096).cursor().foldBulksM(0)(
(st, bulk) => Future.successful(Cursor.Cont(st + bulk.size))).
aka("result size") must beEqualTo(16517).await(1, timeout)
}
"for 1024 documents" in {
coll.find(matchAll("cursorspec7b")).
batchSize(256).cursor().foldBulksM(0, 1024)(
(st, bulk) => Future.successful(Cursor.Cont(st + bulk.size))).
aka("result size") must beEqualTo(1024).await(1, timeout)
}
}
}
{ // .foldResponse
"fold the responses for all the documents" in {
c.find(matchAll("cursorspec8a")).
batchSize(2096).cursor().foldResponses(0)({ (st, resp) =>
debug(s"foldResponses: $st")
Cursor.Cont(st + resp.reply.numberReturned)
}) aka "result size" must beEqualTo(16517).await(1, timeout)
}
"fold the responses for 1024 documents" in {
c.find(matchAll("cursorspec9a")).
batchSize(2056).cursor().foldResponses(0, 1024)(
(st, resp) => Cursor.Cont(st + resp.reply.numberReturned)).
aka("result size") must beEqualTo(1024).await(1, timeout)
}
"fold the responses with async function" >> {
"for all the documents" in {
coll.find(matchAll("cursorspec8b")).
batchSize(2096).cursor().foldResponsesM(0)((st, resp) =>
Future.successful(Cursor.Cont(st + resp.reply.numberReturned))).
aka("result size") must beEqualTo(16517).await(1, timeout)
}
"for 1024 documents" in {
coll.find(matchAll("cursorspec9b")).batchSize(256).cursor().
foldResponsesM(0, 1024)(
(st, resp) => Future.successful(
Cursor.Cont(st + resp.reply.numberReturned))).
aka("result size") must beEqualTo(1024).await(1, timeout)
}
}
}
}
"with the default connection" >> {
foldSpec1(coll, timeout)
}
"with the slow connection" >> {
foldSpec1(slowColl, slowTimeout * 2L)
}
"fold the responses with async function" >> {
"for all the documents" in {
coll.find(matchAll("cursorspec8")).
batchSize(2096).cursor().foldResponsesM(0)((st, resp) =>
Future.successful(Cursor.Cont(st + resp.reply.numberReturned))).
aka("result size") must beEqualTo(16517).await(1, timeout)
}
"for 1024 documents" in {
coll.find(matchAll("cursorspec9")).batchSize(256).cursor().
foldResponsesM(0, 1024)((st, resp) =>
Future.successful(Cursor.Cont(st + resp.reply.numberReturned))).
aka("result size") must beEqualTo(1024).await(1, timeout)
}
}
"produce a custom cursor for the results" in {
implicit def fooProducer[T] = new CursorProducer[T] {
type ProducedCursor = FooCursor[T]
def produce(base: Cursor.WithOps[T]): ProducedCursor =
new DefaultFooCursor(base)
}
implicit object fooFlattener extends CursorFlattener[FooCursor] {
type Flattened[T] = FooCursor[T]
def flatten[T](future: Future[FooCursor[T]]) =
new FlattenedFooCursor(future)
}
val cursor = coll.find(matchAll("cursorspec10")).cursor[BSONDocument]()
cursor.foo must_== "Bar" and {
Cursor.flatten(Future.successful(cursor)).foo must_=== "raB"
} and {
val extCursor: FooExtCursor[BSONDocument] = new DefaultFooCursor(cursor)
// Check resolution as super type (FooExtCursor <: FooCursor)
val flattened = Cursor.flatten[BSONDocument, FooCursor](
Future.successful[FooExtCursor[BSONDocument]](extCursor))
flattened must beAnInstanceOf[FooCursor[BSONDocument]] and {
flattened must not(beAnInstanceOf[FooExtCursor[BSONDocument]])
} and {
flattened.foo must_=== "raB"
}
}
}
}
// ---
private sealed trait FooCursor[T] extends Cursor[T] { def foo: String }
private sealed trait FooExtCursor[T] extends FooCursor[T]
private class DefaultFooCursor[T](val wrappee: Cursor[T])
extends FooExtCursor[T] with WrappedCursor[T] {
val foo = "Bar"
}
private class FlattenedFooCursor[T](cursor: Future[FooCursor[T]])
extends reactivemongo.api.FlattenedCursor[T](cursor) with FooCursor[T] {
val foo = "raB"
}
}
| cchantep/ReactiveMongo | driver/src/test/scala/Cursor1Spec.scala | Scala | apache-2.0 | 15,333 |
package com.latamautos
/**
* Created by Harold on 25/11/16.
*/
import java.io.{File, IOException}
import java.net.{ServerSocket, URL}
import java.nio.file.attribute.BasicFileAttributes
import java.nio.file.{Files, Paths}
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import com.atlassian.oai.validator.pact.PactProviderValidator
import org.scalatest._
import akka.stream.ActorMaterializer
import akka.util.Timeout
import com.latamautos.resources.CorsSupport
import com.typesafe.config.ConfigFactory
import scala.concurrent.duration._
import scala.collection.JavaConverters._
class PactProviderTest extends FunSuiteLike with CorsSupport with RestInterface {
val config = ConfigFactory.load()
val host = config.getString("http.host")
val port = findFreePort()
implicit override lazy val system = ActorSystem("quiz-management-service")
implicit val materializer = ActorMaterializer()
implicit val executionContext = system.dispatcher
implicit val timeout = Timeout(10 seconds)
def routeSwagger = routes
Http().bindAndHandle(routeSwagger, host, port) map { binding =>
println(s"REST interface bound to ${binding.localAddress}") } recover { case ex =>
println(s"REST interface could not bind to $host:$port", ex.getMessage)
}
val amazonS3Files: AmazonS3Files = new AmazonS3Files
val SWAGGER_URL = s"http://localhost:$port/api-docs/swagger.json"
val pactDir = "/Users/Harold/projects/provider-pact/src/main/resources/pacts"
test("validateCmd WHEN messageId is empty SHOULD return (None, None)") {
println(s"SWAGGER_URL----->>>>>>>>>>> $SWAGGER_URL")
amazonS3Files.listfiles("provider-pact").asScala.foreach(url => {
println(s"url------>>>>>>>>>>>>>>>>>>> $url")
val validator: PactProviderValidator = PactProviderValidator.createFor(SWAGGER_URL).withConsumer("ExampleConsumer", new URL(url)).build
println("======================validator.validate() = " + validator.validate().hasErrors)
println("======================validator.validate() = " + validator.validate().getValidationFailureReport)
assert(!validator.validate().hasErrors)
})
}
private def findFreePort(): Int = {
val socket: ServerSocket = new ServerSocket(0)
var port = -1
try {
socket.setReuseAddress(true)
port = socket.getLocalPort
try {
socket.close()
} catch {
case e: IOException =>
}
} catch{
case e: IOException =>
} finally {
if (socket != null) {
try {
socket.close()
} catch {
case e: IOException =>
}
}
}
if(port == -1) throw new IllegalStateException("Could not find a free TCP/IP port to start embedded Jetty HTTP Server on")
else port
}
}
| haroldport/provider-pact | src/test/scala/com/latamautos/PactProviderTest.scala | Scala | mit | 2,773 |
package info.hargrave.commons.javafx
import java.util.{List => JList}
import javafx.beans.WeakListener
import javafx.collections.ListChangeListener
import scala.ref.WeakReference
import javafx.collections.ListChangeListener.Change
/**
* A ListBinding implementation that doesn't suffer from index overrun errors.
* Also, Subscriptions!
*
* @author Roman Hargrave <roman@hargrave.info>
*/
class SafeListBinding[T](destination: JList[T]) extends AnyRef with ListChangeListener[T] with WeakListener {
private val listRef = WeakReference(destination)
private def applyPermutation(change: Change[_ <: T]): Unit = {
destination.subList(change.getFrom, change.getTo).clear()
destination.addAll(change.getFrom, change.getList.subList(change.getFrom, change.getTo))
}
private def applyAddition(change: Change[_ <: T]): Unit = {
destination.addAll(change.getFrom, change.getAddedSubList)
}
private def applyDeletion(change: Change[_ <: T]): Unit = {
val ahead = change.getFrom + change.getRemovedSize
/*
* This is a hack/fix for a JavaFX collections corner case bug (only verified to exist the oracle JavaFX implementation).
* Effectively prevent a situation where JavaFX can try to create a sublist from `tail` `tail+1`
* This can happen if a list of unknown size >= 1 experiences a removal at `tail` or of the only element
*/
if (change.getRemovedSize == 1) {
destination.subList(change.getFrom, change.getFrom).clear()
} else if (ahead > destination.size || destination.size == 0) {
throw new IllegalStateException("Invalid List Change (Deletion): Change extends past the destination tail")
} else {
destination.subList(change.getFrom, ahead).clear()
}
}
override def onChanged(change: Change[_ <: T]): Unit = if(wasGarbageCollected()) {
change.getList.removeListener(this)
} else {
while (change.next()) {
if(change.wasPermutated()) applyPermutation(change)
else {
if (change.wasAdded()) applyAddition(change)
if (change.wasRemoved()) applyDeletion(change)
}
}
}
override def wasGarbageCollected(): Boolean = listRef.get.isEmpty
}
object SafeListBinding {
import javafx.collections.ObservableList
import scalafx.event.subscriptions.Subscription
def apply[T](destination: JList[T], master: ObservableList[T]): Subscription = if(destination eq master) {
throw new IllegalArgumentException("An observable list may not be bound to itself")
} else {
val listener = new SafeListBinding(destination)
master.addListener(listener)
new BindingSubscription(master, listener)
}
final class BindingSubscription[T](list: ObservableList[T], listener: ListChangeListener[T]) extends Subscription {
override def cancel(): Unit = list.removeListener(listener)
}
} | RomanHargrave/CUEComposer | src/main/scala/info/hargrave/commons/javafx/SafeListBinding.scala | Scala | gpl-3.0 | 3,013 |
package net.sansa_stack.query.spark.graph.jena.expression
import net.sansa_stack.query.spark.graph.jena.util.Result
import org.apache.jena.graph.Node
class LessThan(left: Expression, right: Expression) extends FilterTwo(left, right) {
private val tag = "Less Than"
override def evaluate(result: Map[Node, Node]): Boolean = {
// compiler here
true
}
override def evaluate(result: Result[Node]): Boolean = {
compareNodes(result) < 0
}
override def getTag: String = { tag }
}
| SANSA-Stack/SANSA-RDF | sansa-query/sansa-query-spark/src/main/scala/net/sansa_stack/query/spark/graph/jena/expression/LessThan.scala | Scala | apache-2.0 | 503 |
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.stream.ActorMaterializer
import akka.util.Timeout
import spray.json.DefaultJsonProtocol
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
//Our domain class
case class Robot(naam: String, kleur: Option[String], aantalArmen: Int) {
require(aantalArmen >= 0, "Robots kunnen geen negatief aantal armen hebben!")
}
object RobotsApiApp extends App with SprayJsonSupport with DefaultJsonProtocol {
implicit val system = ActorSystem("RobotSystem")
implicit val materializer = ActorMaterializer()
implicit val executionContext: ExecutionContext = system.dispatcher
implicit val timeout = Timeout(5.seconds)
val port = system.settings.config.getInt("port")
//We use the default json marshalling for Robot.
//There are multiple jsonFormat methods in DefaultJsonProtocol. Depending on how many parameters the model class has.
//Robot has just one, so we use jsonFormat1
implicit val RobotFormat = jsonFormat3(Robot)
//A list of our domain objects
var robots = List(Robot("R2D2", Some("wit"), 0), Robot("Asimo", None, 2))
val route: Route = logRequestResult("RobotsAPI") {
pathPrefix("robots") {
get {
//with get we will return our current list of robots
complete {
//complete will return the result in an appropriate format
//With SprayJsonSupport it knows how to marshall a List to json
//With RobotFormat it knows how to marshall Robot
robots
}
} ~ post {
//With post we will add a robot
handleWith { robot: Robot => //handleWith will unmarshall the input
robots = robot :: robots
system.log.info(s"We hebben nu ${robots.size} robots.")
robot //handleWith will also marshall the result. Here we simply return the new robot.
}
} ~ delete {
path(Segment) { naam =>
robots = robots.filter { _.naam != naam }
complete(s"robot $naam verwijderd")
}
}
} ~ path("") {
//When we go to localhost:8080/ we can show documentation
complete("Robots API documentatie")
}
}
val bindingFuture = Http().bindAndHandle(route, "localhost", port)
println(s"Robots API - http://localhost:$port/")
}
| tammosminia/sprayApiExample | robotsApi/src/main/scala/RobotsApiApp.scala | Scala | mit | 2,459 |
package com.twitter.finagle.memcachedx
/**
Package replication implements a base cache client that can manage multiple cache replicas.
The base replication client will forward cache command to all replicas, as well as collect and
aggregate each replica's response into a ReplicationStatus object representing the replication
consistency. The BaseReplicationClient will not handle the consistency between replicas in anyway,
but only to report its view of the replication state. For instance, BaseReplicationClient provides
interfaces similar to generic memcache client but always returns ReplicationStatus object which
can be one of these three forms:
- ConsistentReplication, indicating a consistent state across all replicas
- InconsistentReplication, indicating an inconsistent state across all replicas
- FailedReplication, indicating a failure state
By checking the returned ReplicationStatus object, one can tell the cache replication status and
then handle it with application specific logic.
In addition to a base replication client, a simple replication client wrapper that's compatible
with generic cache client interface is also provided. The SimpleReplicationClient only supports
a subset of all memcached commands for now, and will succeed only if the command succeed on all
cache replicas. In a more complicate caching scenario, this simple/naive replication client may
not be applicable.
*/
package object replication
| travisbrown/finagle | finagle-memcachedx/src/main/scala/com/twitter/finagle/memcachedx/replication/package.scala | Scala | apache-2.0 | 1,445 |
import java.io.File
import sbt._
import Process._
class GaeLuceneProject(info:ProjectInfo) extends DefaultProject(info) {
// locate the Home directory
val userHome = system[File]("user.home")
// define custom property
val defaultGaeHome = userHome.value + "/Documents/src/gae/" + "appengine-java-sdk-1.3.3"
val gaeHome = propertyOptional[String](defaultGaeHome)
val gaePath = Path.fromFile(gaeHome.value)
// Lucene
val luceneCore = "org.apache.lucene" % "lucene-core" % "3.0.1"
// Dependencies for testing
val junit = "junit" % "junit" % "4.7" % "test->default"
val specs = "org.scala-tools.testing" % "specs_2.8.0.Beta1" % "1.6.4" % "test->default"
// App Engine paths
val gaeSharedJars = gaePath / "lib" / "shared" * "*.jar" +++ gaePath / "lib" / "user" * "*.jar"
val gaeTestingJars = gaePath / "lib" / "impl" * "*.jar" +++ gaePath / "lib" / "testing" * "*.jar"
val jars = gaeSharedJars
val testingJars = gaeTestingJars
// override looking for jars in ./lib
override def dependencyPath = "src" / "main" / "lib"
// compile with App Engine jars
override def compileClasspath = super.compileClasspath +++ jars
// add App Engine jars to console classpath
override def consoleClasspath = super.consoleClasspath +++ jars +++ testingJars
// compile tests with App Engine jars
override def testClasspath = super.testClasspath +++ jars +++ testingJars
// override path to managed dependency cache
override def managedDependencyPath = "project" / "lib_managed"
}
| bryanjswift/gaelucene | project/build/GaeLucene.scala | Scala | mit | 1,513 |
package com.twitter.finagle.memcached.unit.util
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.FunSuite
import org.scalatest.matchers.ShouldMatchers
import com.twitter.finagle.memcached.util.ParserUtils
import org.jboss.netty.buffer.ChannelBuffers
import com.google.common.base.Charsets
@RunWith(classOf[JUnitRunner])
class ParserUtilsTest extends FunSuite
with ShouldMatchers
{
private def isDigits(str: String): Boolean = {
val cb = ChannelBuffers.copiedBuffer(str, Charsets.UTF_8)
ParserUtils.isDigits(cb)
}
test("isDigits") {
isDigits("123") should be (true)
isDigits("1") should be (true)
isDigits("") should be (false)
isDigits(" ") should be (false)
isDigits("x") should be (false)
isDigits(" 9") should be (false)
isDigits("9 ") should be (false)
}
}
| JustinTulloss/finagle | finagle-memcached/src/test/scala/com/twitter/finagle/memcached/unit/util/ParserUtilsTest.scala | Scala | apache-2.0 | 856 |
package com.twitter.finagle.thrift
import com.google.common.base.Charsets
import com.twitter.finagle.stats.{NullStatsReceiver, Counter, DefaultStatsReceiver, StatsReceiver}
import com.twitter.logging.Logger
import com.twitter.util.NonFatal
import java.nio.{ByteBuffer, CharBuffer}
import java.nio.charset.{CoderResult, CharsetEncoder}
import java.security.{PrivilegedExceptionAction, AccessController}
import org.apache.thrift.protocol.{TProtocol, TProtocolFactory, TBinaryProtocol}
import org.apache.thrift.transport.TTransport
object Protocols {
// based on guava's UnsignedBytes.getUnsafe()
private[this] def getUnsafe: sun.misc.Unsafe = {
try {
sun.misc.Unsafe.getUnsafe()
} catch {
case NonFatal(_) => // try reflection instead
try {
AccessController.doPrivileged(new PrivilegedExceptionAction[sun.misc.Unsafe]() {
def run(): sun.misc.Unsafe = {
val k = classOf[sun.misc.Unsafe]
for (f <- k.getDeclaredFields) {
f.setAccessible(true)
val x = f.get(null)
if (k.isInstance(x)) {
return k.cast(x)
}
}
throw new NoSuchFieldException("the Unsafe") // fall through to the catch block below
}
})
} catch {
case NonFatal(t) =>
Logger.get().info("%s unable to initialize sun.misc.Unsafe", getClass.getName)
null
}
}
}
private val unsafe: Option[sun.misc.Unsafe] = Option(getUnsafe)
private[this] def optimizedBinarySupported: Boolean = unsafe.isDefined
/**
* Returns a `TProtocolFactory` that creates `TProtocol`s that
* are wire-compatible with `TBinaryProtocol`.
*/
def binaryFactory(
strictRead: Boolean = false,
strictWrite: Boolean = true,
readLength: Int = 0,
statsReceiver: StatsReceiver = DefaultStatsReceiver
): TProtocolFactory = {
if (!optimizedBinarySupported) {
new TBinaryProtocol.Factory(strictRead, strictWrite)
} else {
// Factories are created rarely while the creation of their TProtocol's
// is a common event. Minimize counter creation to just once per Factory.
val fastEncodeFailed = statsReceiver.counter("fast_encode_failed")
val largerThanTlOutBuffer = statsReceiver.counter("larger_than_threadlocal_out_buffer")
new TProtocolFactory {
override def getProtocol(trans: TTransport): TProtocol = {
val proto = new TFinagleBinaryProtocol(
trans, fastEncodeFailed, largerThanTlOutBuffer, strictRead, strictWrite)
proto
}
}
}
}
def factory(statsReceiver: StatsReceiver = DefaultStatsReceiver): TProtocolFactory = {
binaryFactory(statsReceiver = statsReceiver)
}
// Visible for testing purposes.
private[thrift] object TFinagleBinaryProtocol {
// zero-length strings are written to the wire as an i32 of its length, which is 0
private val EmptyStringInBytes = Array[Byte](0, 0, 0, 0)
// assume that most of our strings are mostly single byte utf8
private val MultiByteMultiplierEstimate = 1.3f
/** Only valid if unsafe is defined */
private val StringValueOffset: Long = unsafe.map {
_.objectFieldOffset(classOf[String].getDeclaredField("value"))
}.getOrElse(Long.MinValue)
/**
* Note, some versions of the JDK's define `String.offset`,
* while others do not and always use 0.
*/
private val OffsetValueOffset: Long = unsafe.map { u =>
try {
u.objectFieldOffset(classOf[String].getDeclaredField("offset"))
} catch {
case NonFatal(_) => Long.MinValue
}
}.getOrElse(Long.MinValue)
/**
* Note, some versions of the JDK's define `String.count`,
* while others do not and always use `value.length`.
*/
private val CountValueOffset: Long = unsafe.map { u =>
try {
u.objectFieldOffset(classOf[String].getDeclaredField("count"))
} catch {
case NonFatal(_) => Long.MinValue
}
}.getOrElse(Long.MinValue)
private val charsetEncoder = new ThreadLocal[CharsetEncoder] {
override def initialValue() = Charsets.UTF_8.newEncoder()
}
// Visible for testing purposes
private[thrift] val OutBufferSize = 4096
private val outByteBuffer = new ThreadLocal[ByteBuffer] {
override def initialValue() = ByteBuffer.allocate(OutBufferSize)
}
}
/**
* An implementation of TBinaryProtocol that optimizes `writeString`
* to minimize object allocations.
*
* This specific speedup depends on sun.misc.Unsafe and will fall
* back to standard TBinaryProtocol in the case when it is unavailable.
*
* Visible for testing purposes.
*/
private[thrift] class TFinagleBinaryProtocol(
trans: TTransport,
fastEncodeFailed: Counter,
largerThanTlOutBuffer: Counter,
strictRead: Boolean = false,
strictWrite: Boolean = true)
extends TBinaryProtocol(
trans,
strictRead,
strictWrite)
{
import TFinagleBinaryProtocol._
override def writeString(str: String) {
if (str.length == 0) {
trans.write(EmptyStringInBytes)
return
}
// this is based on the CharsetEncoder code at:
// http://psy-lob-saw.blogspot.co.nz/2013/04/writing-java-micro-benchmarks-with-jmh.html
// we could probably do better than this via:
// https://github.com/nitsanw/jmh-samples/blob/master/src/main/java/psy/lob/saw/utf8/CustomUtf8Encoder.java
val u = unsafe.get
val chars = u.getObject(str, StringValueOffset).asInstanceOf[Array[Char]]
val offset = if (OffsetValueOffset == Long.MinValue) 0 else {
u.getInt(str, OffsetValueOffset)
}
val count = if (CountValueOffset == Long.MinValue) chars.length else {
u.getInt(str, CountValueOffset)
}
val charBuffer = CharBuffer.wrap(chars, offset, count)
val out = if (count * MultiByteMultiplierEstimate <= OutBufferSize) {
val o = outByteBuffer.get()
o.clear()
o
} else {
largerThanTlOutBuffer.incr()
ByteBuffer.allocate((count * MultiByteMultiplierEstimate).toInt)
}
val csEncoder = charsetEncoder.get()
csEncoder.reset()
val result = csEncoder.encode(charBuffer, out, true)
if (result != CoderResult.UNDERFLOW) {
fastEncodeFailed.incr()
super.writeString(str)
} else {
writeI32(out.position())
trans.write(out.array(), 0, out.position())
}
}
// Note: libthrift 0.5.0 has a bug when operating on ByteBuffer's with a non-zero arrayOffset.
// We instead use the version from head that fixes this issue.
override def writeBinary(bin: ByteBuffer) {
if (bin.hasArray) {
val length = bin.remaining()
writeI32(length)
trans.write(bin.array(), bin.position() + bin.arrayOffset(), length)
} else {
val array = new Array[Byte](bin.remaining())
bin.duplicate().get(array)
writeI32(array.length)
trans.write(array, 0, array.length)
}
}
}
}
| rojanu/finagle | finagle-thrift/src/main/scala/com/twitter/finagle/thrift/Protocols.scala | Scala | apache-2.0 | 7,166 |
package org.jetbrains.plugins.scala.lang.resolve
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReference
import org.junit.Assert._
/**
* Created by kate on 6/15/16.
*/
class ResolveImplicitConversion extends ScalaResolveTestCase {
override def folderPath: String = super.folderPath + "resolve/implicitConversion"
def doTest(): Unit = {
findReferenceAtCaret() match {
case ref: ScReference =>
val variants = ref.multiResolveScala(false)
assertTrue(s"Single resolve expected, was: ${variants.length}", variants.length == 1)
}
}
def testScl4968(): Unit = doTest()
def testSCL8757(): Unit = doTest()
def testSCL8660(): Unit = doTest()
def testScl7974(): Unit = doTest()
def testSCL10670(): Unit = doTest()
def testSCL10549(): Unit = doTest()
def testSCL9224(): Unit = doTest()
def testSCL12251(): Unit = doTest()
def testSCL13686(): Unit = doTest()
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/resolve/ResolveImplicitConversion.scala | Scala | apache-2.0 | 926 |
package pl.touk.nussknacker.engine.requestresponse.http
import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
import akka.http.scaladsl.server.{Directives, Route}
import akka.stream.Materializer
import cats.data.NonEmptyList
import cats.data.Validated.{Invalid, Valid}
import com.typesafe.scalalogging.LazyLogging
import de.heikoseeberger.akkahttpcirce.FailFastCirceSupport
import io.circe.generic.JsonCodec
import io.circe.syntax._
import pl.touk.nussknacker.engine.api.exception.NuExceptionInfo
import pl.touk.nussknacker.engine.requestresponse.deployment.ProcessInterpreters
import pl.touk.nussknacker.engine.requestresponse.http.logging.RequestResponseLogger
import scala.concurrent.ExecutionContext
class ProcessRoute(processInterpreters: ProcessInterpreters) extends Directives with LazyLogging with FailFastCirceSupport {
def route(log: RequestResponseLogger)
(implicit ec: ExecutionContext, mat: Materializer): Route =
path(Segment) { processPath =>
log.loggingDirective(processPath)(mat) {
processInterpreters.getInterpreterByPath(processPath) match {
case None =>
complete {
HttpResponse(status = StatusCodes.NotFound)
}
case Some(processInterpreter) => new RequestResponseRequestHandler(processInterpreter).invoke {
case Invalid(errors) => complete {
logErrors(processPath, errors)
(StatusCodes.InternalServerError, errors.toList.map(info => EspError(info.nodeComponentInfo.map(_.nodeId), Option(info.throwable.getMessage))).asJson)
}
case Valid(results) => complete {
(StatusCodes.OK, results)
}
}
}
}
//TODO place openApi endpoint
} ~ pathEndOrSingleSlash {
//healthcheck endpoint
get {
complete {
HttpResponse(status = StatusCodes.OK)
}
}
}
private def logErrors(processPath: String, errors: NonEmptyList[NuExceptionInfo[_ <: Throwable]]): Unit = {
logger.warn(s"Failed to invoke: $processPath with errors: ${errors.map(_.throwable.getMessage)}")
errors.toList.foreach { error =>
logger.info(s"Invocation failed $processPath, error in ${error.nodeComponentInfo.map(_.nodeId)}: ${error.throwable.getMessage}", error.throwable)
}
}
@JsonCodec case class EspError(nodeId: Option[String], message: Option[String])
}
| TouK/nussknacker | engine/lite/request-response/app/src/main/scala/pl/touk/nussknacker/engine/requestresponse/http/ProcessRoute.scala | Scala | apache-2.0 | 2,425 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.filter
import com.typesafe.scalalogging.LazyLogging
import org.geotools.factory.CommonFactoryFinder
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.filter._
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import org.specs2.specification.core.Fragments
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class FilterPackageObjectTest extends Specification with LazyLogging {
import TestFilters._
"The partitionGeom function" should {
val sft = SimpleFeatureTypes.createType("filterPackageTest", "g:Geometry,*geom:Geometry")
val geomFilter = ECQL.toFilter("BBOX(geom, -45.0,-45.0,45.0,45.0)")
"filter bbox based on default geom" in {
val filter = ECQL.toFilter("BBOX(geom, -45.0,-45.0,45.0,45.0) AND BBOX(g, -30.0,-30.0,30.0,30.0)")
val (geoms, nongeoms) = partitionPrimarySpatials(filter, sft)
geoms must haveLength(1)
nongeoms must haveLength(1)
ECQL.toCQL(geoms(0)) mustEqual "BBOX(geom, -45.0,-45.0,45.0,45.0)"
ECQL.toCQL(nongeoms(0)) mustEqual "BBOX(g, -30.0,-30.0,30.0,30.0)"
}
"filter intersect based on default geom" in {
val filter =
ECQL.toFilter("INTERSECTS(geom, POLYGON ((-45 -45, -45 45, 45 45, 45 -45, -45 -45))) " +
"AND INTERSECTS(g, POLYGON ((-30 -30, -30 30, 30 30, 30 -30, -30 -30)))")
val (geoms, nongeoms) = partitionPrimarySpatials(filter, sft)
geoms must haveLength(1)
nongeoms must haveLength(1)
ECQL.toCQL(geoms(0)) mustEqual "INTERSECTS(geom, POLYGON ((-45 -45, -45 45, 45 45, 45 -45, -45 -45)))"
ECQL.toCQL(nongeoms(0)) mustEqual "INTERSECTS(g, POLYGON ((-30 -30, -30 30, 30 30, 30 -30, -30 -30)))"
}
"filter intersect based on default geom regardless of order" in {
val filter =
ECQL.toFilter("INTERSECTS(POLYGON ((-30 -30, -30 30, 30 30, 30 -30, -30 -30)), g) " +
"AND INTERSECTS(POLYGON ((-45 -45, -45 45, 45 45, 45 -45, -45 -45)), geom)")
val (geoms, nongeoms) = partitionPrimarySpatials(filter, sft)
geoms must haveLength(1)
nongeoms must haveLength(1)
ECQL.toCQL(geoms(0)) mustEqual "INTERSECTS(POLYGON ((-45 -45, -45 45, 45 45, 45 -45, -45 -45)), geom)"
ECQL.toCQL(nongeoms(0)) mustEqual "INTERSECTS(POLYGON ((-30 -30, -30 30, 30 30, 30 -30, -30 -30)), g)"
}
"handle ANDs with multiple predicates" in {
val filters = List(ECQL.toFilter("attr1 = val1"), ECQL.toFilter("attr2 = val2"), geomFilter)
.permutations.map(ff.and(_)).toSeq
forall(filters) { filter =>
val (geoms, nongeoms) = partitionPrimarySpatials(filter, sft)
geoms must haveLength(1)
nongeoms must haveLength(2)
}
}
}
"the mergeFilters function" should {
val ff = CommonFactoryFinder.getFilterFactory2
val f1 = ff.equals(ff.property("test"), ff.literal("a"))
"ignore Filter.INCLUDE" >> {
val f2 = Filter.INCLUDE
val combined = mergeFilters(f1, f2)
combined mustEqual f1
val combined2 = mergeFilters(f2, f1)
combined2 mustEqual f1
}
"simplify same filters" >> {
val f2 = ff.equals(ff.property("test"), ff.literal("a"))
val combined = mergeFilters(f1, f2)
combined mustEqual f1
val combined2 = mergeFilters(f2, f1)
combined2 mustEqual f1
}
"AND different filters" >> {
val f2 = ff.equals(ff.property("test2"), ff.literal("a"))
val desired = ff.and(f1, f2)
val combined = mergeFilters(f1, f2)
combined mustEqual desired
val combined2 = mergeFilters(f2, f1)
combined2 mustEqual desired
}
}
"The deMorgan function" should {
"change ANDs to ORs" in {
oneLevelAndFilters.flatMap { case (f: And) =>
val dm = deMorgan(f)
dm.isInstanceOf[Or] must beTrue
val dmChildren = dm.asInstanceOf[Or].getChildren
f.getChildren.zip(dmChildren).map {
case (origChild, dmChild) =>
dmChild.isInstanceOf[Not] must beTrue
dmChild.asInstanceOf[Not].getFilter mustEqual origChild
}
}
}
"change ORs to ANDs" in {
oneLevelOrFilters.flatMap { case (f: Or) =>
val dm = deMorgan(f)
dm.isInstanceOf[And] must beTrue
val dmChildren = dm.asInstanceOf[And].getChildren
f.getChildren.zip(dmChildren).map {
case (origChild, dmChild) =>
dmChild.isInstanceOf[Not] must beTrue
dmChild.asInstanceOf[Not].getFilter mustEqual origChild
}
}
}
"remove stacked NOTs" in {
simpleNotFilters.map { case (f: Not) =>
deMorgan(f) mustEqual f.getFilter
}
}
}
// Test logicDistributionDNF
"The function 'logicDistributionDNF'" should {
"split a top-level OR into a List of single-element Lists each containing a filter" in {
(oneLevelOrFilters++oneLevelMultipleOrsFilters).flatMap { or =>
val ll = logicDistributionDNF(or)
ll.map { l => l.size mustEqual 1}
}
}
"split a top-level AND into a singleton List which contains a List of the ANDed filters" in {
oneLevelAndFilters.map { case (and: And) =>
val ll = logicDistributionDNF(and)
ll.size mustEqual 1
and.getChildren.size mustEqual ll(0).size
}
}
"not return filters with ANDs or ORs explicitly stated" in {
// NB: The nested lists imply ANDs and ORs.
andsOrsFilters.flatMap { filter: Filter =>
val ll = logicDistributionDNF(filter)
ll.flatten.map { l => l.isInstanceOf[BinaryLogicOperator] must beFalse}
}
}
"take a 'simple' filter and return List(List(filter))" in {
baseFilters.map { f =>
val ll = logicDistributionDNF(f)
ll.size mustEqual 1
ll(0).size mustEqual 1
}
}
}
// Test logicDistributionCNF
"The function 'logicDistributionCNF'" should {
"split a top-level OR into a singleton List which contains a List of the ORed filters" in {
oneLevelOrFilters.map { case (or: Or) =>
val ll = logicDistributionCNF(or)
ll.size mustEqual 1
or.getChildren.size mustEqual ll(0).size
}
}
"split a top-level AND into a a singleton List which contains a List of the ANDed filters" in {
(oneLevelAndFilters++oneLevelMultipleAndsFilters).flatMap { and =>
val ll = logicDistributionCNF(and)
ll.map { l => l.size mustEqual 1}
}
}
"not return filters with ANDs or ORs explicitly stated" in {
// NB: The nested lists imply ANDs and ORs.
andsOrsFilters.flatMap { filter: Filter =>
val ll = logicDistributionCNF(filter)
ll.flatten.map { l => l.isInstanceOf[BinaryLogicOperator] must beFalse}
}
}
"take a 'simple' filter and return List(List(filter))" in {
baseFilters.map { f =>
val ll = logicDistributionCNF(f)
ll.size mustEqual 1
ll(0).size mustEqual 1
}
}
}
// Function defining rewriteFilter Properties.
def testRewriteProps(filter: Filter): Fragments = {
logger.debug(s"Filter: ${ECQL.toCQL(filter)}")
def breakUpOr(f: Filter): Seq[Filter] = {
f match {
case or: Or => or.getChildren
case _ => Seq(f)
}
}
"The function rewriteFilter" should {
val rewrittenFilter: Filter = rewriteFilterInDNF(filter)
"return a Filter with at most one OR at the top" in {
val decomp = breakUpOr(rewrittenFilter)
val orCount = decomp.count(_.isInstanceOf[Or])
orCount mustEqual 0
}
val children = decomposeOr(rewrittenFilter)
"return a Filter where the children of the (optional) OR can (optionally) be an AND" in {
children.map { _.isInstanceOf[Or] must beFalse }
}
"return a Filter where NOTs do not have ANDs or ORs as children" in {
foreachWhen(children) { case f if f.isInstanceOf[Not] => f.isInstanceOf[BinaryLogicOperator] must beFalse }
}
}
}
oneGeomFilters.map(testRewriteProps)
}
| elahrvivaz/geomesa | geomesa-filter/src/test/scala/org/locationtech/geomesa/filter/FilterPackageObjectTest.scala | Scala | apache-2.0 | 8,645 |
import collection.mutable.ArrayBuffer
import collection.immutable
import immutable.HashSet
class O {
val buffer: ArrayBuffer[Int] = null
val s: HashSet[String] = null
}
/*
import scala.collection.immutable
import scala.collection.immutable.HashSet
import scala.collection.mutable.ArrayBuffer
class O {
val buffer: ArrayBuffer[Int] = null
val s: HashSet[String] = null
}
*/ | ilinum/intellij-scala | testdata/optimize/groups/LongerNames.scala | Scala | apache-2.0 | 383 |
package gitbucket.core.util
import org.scalatest.funspec.AnyFunSpec
class DirectorySpec extends AnyFunSpec {
describe("GitBucketHome") {
it("should set under target in test scope") {
assert(Directory.GitBucketHome == new java.io.File("target/gitbucket_home_for_test").getAbsolutePath)
}
}
// test("GitBucketHome should exists"){
// new java.io.File(Directory.GitBucketHome).exists
// }
}
| imeszaros/gitbucket | src/test/scala/gitbucket/core/util/DirectorySpec.scala | Scala | apache-2.0 | 415 |
package com.danielasfregola.twitter4s.entities.enums
object FilterLevel extends Enumeration {
type FilterLevel = Value
val None = Value("none")
val Low = Value("low")
val Medium = Value("medium")
}
| DanielaSfregola/twitter4s | src/main/scala/com/danielasfregola/twitter4s/entities/enums/FilterLevel.scala | Scala | apache-2.0 | 208 |
package com.twitter.finagle
import com.google.common.cache.{CacheLoader, CacheBuilder}
import com.twitter.cache.guava.GuavaCache
import com.twitter.concurrent.AsyncSemaphore
import com.twitter.conversions.time._
import com.twitter.finagle.stats.{DefaultStatsReceiver, StatsReceiver}
import com.twitter.finagle.util._
import com.twitter.util._
import java.net.{InetAddress, InetSocketAddress, SocketAddress, UnknownHostException}
import java.util.logging.Logger
import scala.util.control.NoStackTrace
/**
* Indicates that a [[com.twitter.finagle.Resolver]] was not found for the
* given `scheme`.
*
* Resolvers are discovered via Finagle's [[com.twitter.finagle.util.LoadService]]
* mechanism. These exceptions typically suggest that there are no libraries
* on the classpath that define a Resolver for the given scheme.
*/
class ResolverNotFoundException(scheme: String)
extends Exception(
"Resolver not found for scheme \\"%s\\". Please add the jar containing this resolver to your classpath".format(scheme))
/**
* Indicates that multiple [[com.twitter.finagle.Resolver Resolvers]] were
* discovered for given `scheme`.
*
* Resolvers are discovered via Finagle's [[com.twitter.finagle.util.LoadService]]
* mechanism. These exceptions typically suggest that there are multiple
* libraries on the classpath with conflicting scheme definitions.
*/
class MultipleResolversPerSchemeException(resolvers: Map[String, Seq[Resolver]])
extends Exception with NoStackTrace
{
override def getMessage = {
val msgs = resolvers map { case (scheme, rs) =>
"%s=(%s)".format(scheme, rs.map(_.getClass.getName).mkString(", "))
} mkString(" ")
"Multiple resolvers defined: %s".format(msgs)
}
}
/**
* Indicates that a destination name string passed to a
* [[com.twitter.finagle.Resolver]] was invalid according to the destination
* name grammar [1].
*
* [1] https://twitter.github.io/finagle/guide/Names.html
*/
class ResolverAddressInvalid(addr: String)
extends Exception("Resolver address \\"%s\\" is not valid".format(addr))
/**
* A resolver binds a name, represented by a string, to a
* variable address. Resolvers have an associated scheme
* which is used for lookup so that names may be resolved
* in a global context.
*
* These are loaded by Finagle through the
* [[com.twitter.finagle.util.LoadService service loading mechanism]]. Thus, in
* order to implement a new resolver, a class implementing `Resolver` with a
* 0-arg constructor must be registered in a file named
* `META-INF/services/com.twitter.finagle.Resolver` included in the classpath; see
* Oracle's
* [[http://docs.oracle.com/javase/6/docs/api/java/util/ServiceLoader.html ServiceLoader]]
* documentation for further details.
*/
trait Resolver {
val scheme: String
def bind(arg: String): Var[Addr]
@deprecated("Use Resolver.bind", "6.7.x")
final def resolve(name: String): Try[Group[SocketAddress]] =
bind(name) match {
case Var.Sampled(Addr.Failed(e)) => Throw(e)
case va => Return(Group.fromVarAddr(va))
}
}
/**
* An abstract class version of Resolver for java compatibility.
*/
abstract class AbstractResolver extends Resolver
/**
* Resolver for inet scheme.
*/
object InetResolver {
def apply(): Resolver = apply(DefaultStatsReceiver)
def apply(statsReceiver: StatsReceiver): Resolver =
new InetResolver(statsReceiver, Some(5.seconds))
}
private[finagle] class InetResolver(
unscopedStatsReceiver: StatsReceiver,
pollIntervalOpt: Option[Duration]
) extends Resolver {
import InetSocketAddressUtil._
type HostPortMetadata = (String, Int, Addr.Metadata)
val scheme = "inet"
protected[this] val statsReceiver = unscopedStatsReceiver.scope("inet").scope("dns")
private[this] val latencyStat = statsReceiver.stat("lookup_ms")
private[this] val successes = statsReceiver.counter("successes")
private[this] val failures = statsReceiver.counter("failures")
private[this] val dnsLookupFailures = statsReceiver.counter("dns_lookup_failures")
private[this] val dnsLookups = statsReceiver.counter("dns_lookups")
private val log = Logger.getLogger(getClass.getName)
private val timer = DefaultTimer.twitter
/*
* Resolve hostnames asynchronously and concurrently.
*/
private[this] val dnsCond = new AsyncSemaphore(100)
private val waitersGauge = statsReceiver.addGauge("queue_size") { dnsCond.numWaiters }
protected def resolveHost(host: String): Future[Seq[InetAddress]] = {
dnsLookups.incr()
dnsCond.acquire().flatMap { permit =>
FuturePool.unboundedPool(InetAddress.getAllByName(host).toSeq)
.onFailure{ e =>
log.warning(s"Failed to resolve $host. Error $e")
dnsLookupFailures.incr()
}
.ensure { permit.release() }
}
}
/**
* Resolve all hostnames and merge into a final Addr.
* If all lookups are unknown hosts, returns Addr.Neg.
* If all lookups fail with unexpected errors, returns Addr.Failed.
* If any lookup succeeds the final result will be Addr.Bound
* with the successful results.
*/
def toAddr(hp: Seq[HostPortMetadata]): Future[Addr] = {
val elapsed = Stopwatch.start()
Future.collectToTry(hp.map {
case (host, port, meta) =>
resolveHost(host).map { inetAddrs =>
inetAddrs.map { inetAddr =>
Address.Inet(new InetSocketAddress(inetAddr, port), meta)
}
}
}).flatMap { seq: Seq[Try[Seq[Address]]] =>
// Filter out all successes. If there was at least 1 success, consider
// the entire operation a success
val results = seq.collect {
case Return(subset) => subset
}.flatten
// Consider any result a success. Ignore partial failures.
if (results.nonEmpty) {
successes.incr()
latencyStat.add(elapsed().inMilliseconds)
Future.value(Addr.Bound(results.toSet))
} else {
// Either no hosts or resolution failed for every host
failures.incr()
latencyStat.add(elapsed().inMilliseconds)
log.warning("Resolution failed for all hosts")
seq.collectFirst {
case Throw(e) => e
} match {
case Some(_: UnknownHostException) => Future.value(Addr.Neg)
case Some(e) => Future.value(Addr.Failed(e))
case None => Future.value(Addr.Bound(Set[Address]()))
}
}
}
}
def bindHostPortsToAddr(hosts: Seq[HostPortMetadata]): Var[Addr] = {
Var.async(Addr.Pending: Addr) { u =>
toAddr(hosts) onSuccess { u() = _ }
pollIntervalOpt match {
case Some(pollInterval) =>
val updater = new Updater[Unit] {
val one = Seq(())
// Just perform one update at a time.
protected def preprocess(elems: Seq[Unit]) = one
protected def handle(unit: Unit) {
// This always runs in a thread pool; it's okay to block.
u() = Await.result(toAddr(hosts))
}
}
timer.schedule(pollInterval.fromNow, pollInterval) {
FuturePool.unboundedPool(updater(()))
}
case None =>
Closable.nop
}
}
}
/**
* Binds to the specified hostnames, and refreshes the DNS information periodically.
*/
def bind(hosts: String): Var[Addr] = Try(parseHostPorts(hosts)) match {
case Return(hp) =>
bindHostPortsToAddr(hp.map { case (host, port) =>
(host, port, Addr.Metadata.empty)
})
case Throw(exc) =>
Var.value(Addr.Failed(exc))
}
}
/**
* InetResolver that caches all successful DNS lookups indefinitely
* and does not poll for updates.
*
* Clients should only use this in scenarios where host -> IP map changes
* do not occur.
*/
object FixedInetResolver {
val scheme = "fixedinet"
def apply(): InetResolver =
apply(DefaultStatsReceiver)
def apply(statsReceiver: StatsReceiver): InetResolver =
apply(statsReceiver, 16000)
def apply(statsReceiver: StatsReceiver, maxCacheSize: Long): InetResolver =
new FixedInetResolver(statsReceiver, maxCacheSize, None)
}
/**
* Uses a [[com.twitter.util.Future]] cache to memoize lookups.
*
* Allows unit tests to specify a CI-friendly resolve fn. Otherwise
* defaults to InetResolver.resolveHost
*
* @param maxCacheSize Specifies the maximum number of `Futures` that can be cached.
* No maximum size limit if Long.MaxValue.
* @param resolveOverride Optional fn. If None, defaults back to superclass implementation
*/
private[finagle] class FixedInetResolver(
unscopedStatsReceiver: StatsReceiver,
maxCacheSize: Long,
resolveOverride: Option[String => Future[Seq[InetAddress]]]
) extends InetResolver(unscopedStatsReceiver, None) {
override val scheme = FixedInetResolver.scheme
// fallback to InetResolver.resolveHost if no override was provided
val resolveFn: (String => Future[Seq[InetAddress]]) =
resolveOverride.getOrElse(super.resolveHost)
// A size-bounded FutureCache backed by a LoaderCache
private[this] val cache = {
var builder = CacheBuilder
.newBuilder()
.recordStats()
if (maxCacheSize != Long.MaxValue) {
builder = builder.maximumSize(maxCacheSize)
}
builder
.build(
new CacheLoader[String, Future[Seq[InetAddress]]]() {
def load(host: String) = resolveFn(host)
})
}
private[this] val cacheStatsReceiver = statsReceiver.scope("cache")
private[this] val cacheGauges = Seq(
cacheStatsReceiver.addGauge("size") { cache.size },
cacheStatsReceiver.addGauge("evicts") { cache.stats().evictionCount },
cacheStatsReceiver.addGauge("hit_rate") { cache.stats().hitRate.toFloat })
private[this] val futureCache = GuavaCache.fromLoadingCache(cache)
override def resolveHost(host: String): Future[Seq[InetAddress]] =
futureCache(host)
}
object NegResolver extends Resolver {
val scheme = "neg"
def bind(arg: String) = Var.value(Addr.Neg)
}
object NilResolver extends Resolver {
val scheme = "nil"
def bind(arg: String) = Var.value(Addr.Bound())
}
object FailResolver extends Resolver {
val scheme = "fail"
def bind(arg: String) = Var.value(Addr.Failed(new Exception(arg)))
}
private[finagle] abstract class BaseResolver(f: () => Seq[Resolver]) {
private[this] val inetResolver = InetResolver()
private[this] val fixedInetResolver = FixedInetResolver()
private[this] lazy val resolvers = {
val rs = f()
val log = Logger.getLogger(getClass.getName)
val resolvers = Seq(inetResolver, fixedInetResolver, NegResolver, NilResolver, FailResolver) ++ rs
val dups = resolvers
.groupBy(_.scheme)
.filter { case (_, rs) => rs.size > 1 }
if (dups.nonEmpty) throw new MultipleResolversPerSchemeException(dups)
for (r <- resolvers)
log.info("Resolver[%s] = %s(%s)".format(r.scheme, r.getClass.getName, r))
resolvers
}
def get[T <: Resolver](clazz: Class[T]): Option[T] =
resolvers find { _.getClass isAssignableFrom clazz } map { _.asInstanceOf[T] }
private[this] sealed trait Token
private[this] case class El(e: String) extends Token
private[this] object Eq extends Token
private[this] object Bang extends Token
private[this] def delex(ts: Seq[Token]) =
ts map {
case El(e) => e
case Bang => "!"
case Eq => "="
} mkString ""
private[this] def lex(s: String) = {
s.foldLeft(List[Token]()) {
case (ts, '=') => Eq :: ts
case (ts, '!') => Bang :: ts
case (El(s) :: ts, c) => El(s+c) :: ts
case (ts, c) => El(""+c) :: ts
}
}.reverse
/**
* Resolve a group from an address, a string. Resolve uses
* `Resolver`s to do this. These are loaded via the Java
* [[http://docs.oracle.com/javase/6/docs/api/java/util/ServiceLoader.html ServiceLoader]]
* mechanism. The default resolver is "inet", resolving DNS
* name/port pairs.
*
* Target names have a simple grammar: The name of the resolver
* precedes the name of the address to be resolved, separated by
* an exclamation mark ("bang"). For example: inet!twitter.com:80
* resolves the name "twitter.com:80" using the "inet" resolver. If no
* resolver name is present, the inet resolver is used.
*
* Names resolved by this mechanism are also a
* [[com.twitter.finagle.LabelledGroup]]. By default, this name is
* simply the `addr` string, but it can be overridden by prefixing
* a name separated by an equals sign from the rest of the addr.
* For example, the addr "www=inet!google.com:80" resolves
* "google.com:80" with the inet resolver, but the returned group's
* [[com.twitter.finagle.LabelledGroup]] name is "www".
*/
@deprecated("Use Resolver.eval", "6.7.x")
def resolve(addr: String): Try[Group[SocketAddress]] =
Try { eval(addr) } flatMap {
case Name.Path(_) =>
Throw(new IllegalArgumentException("Resolver.resolve does not support logical names"))
case bound@Name.Bound(_) =>
Return(NameGroup(bound))
}
/**
* Parse and evaluate the argument into a Name. Eval parses
* a simple grammar: a scheme is followed by a bang, followed
* by an argument:
* name := scheme ! arg
* The scheme is looked up from registered Resolvers, and the
* argument is passed in.
*
* When `name` begins with the character '/' it is interpreted to be
* a logical name whose interpretation is subject to a
* [[com.twitter.finagle.Dtab Dtab]].
*
* Eval throws exceptions upon failure to parse the name, or
* on failure to scheme lookup. Since names are late bound,
* binding failures are deferred.
*
* @see [[Resolvers.eval]] for Java support
*/
def eval(name: String): Name =
if (name startsWith "/") Name(name)
else {
val (resolver, arg) = lex(name) match {
case (Eq :: _) | (Bang :: _) =>
throw new ResolverAddressInvalid(name)
case El(scheme) :: Bang :: name =>
resolvers.find(_.scheme == scheme) match {
case Some(resolver) => (resolver, delex(name))
case None => throw new ResolverNotFoundException(scheme)
}
case ts => (inetResolver, delex(ts))
}
Name.Bound(resolver.bind(arg), name)
}
/**
* Parse and evaluate the argument into a (Name, label: String) tuple.
* Arguments are parsed with the same grammar as in `eval`. If a label is not
* provided (i.e. no "label=<addr>"), then the empty string is returned.
*
* @see [[Resolvers.evalLabeled]] for Java support
*/
def evalLabeled(addr: String): (Name, String) = {
val (label, rest) = lex(addr) match {
case El(n) :: Eq :: rest => (n, rest)
case rest => ("", rest)
}
(eval(delex(rest)), label)
}
}
/**
* The default [[Resolver]] used by Finagle.
*
* @see [[Resolvers]] for Java support.
*/
object Resolver extends BaseResolver(() => LoadService[Resolver]())
/**
* Java APIs for [[Resolver]].
*/
object Resolvers {
/**
* @see [[Resolver.eval]]
*/
def eval(name: String): Name =
Resolver.eval(name)
/**
* @see [[Resolver.evalLabeled]]
*/
def evalLabeled(addr: String): (Name, String) =
Resolver.evalLabeled(addr)
}
| sveinnfannar/finagle | finagle-core/src/main/scala/com/twitter/finagle/Resolver.scala | Scala | apache-2.0 | 15,237 |
package logreceiver.processor
import java.nio.ByteBuffer
import com.github.vonnagy.service.container.health.{HealthInfo, HealthState}
import com.github.vonnagy.service.container.metrics.{Counter, Meter}
/**
* Created by ivannagy on 4/13/15.
*/
class NullProcessor extends Processor {
import context.system
def lineMetricPrefix = "processors.null"
val batchReceivedCount = Counter("processors.null.batch.receive")
val batchReceivedMeter = Meter("processors.null.batch.receive.meter")
override def preStart() {
super.preStart
self ! ProcessorReady
}
override def running: Receive = {
case LogBatch(token, frameId, count, payload) =>
batchReceivedCount.incr
batchReceivedMeter.meter {
processPayload(payload, Seq[Tuple2[ByteBuffer, String]]()).foreach { ent =>
println(ent)
}
}
}
override def getHealth: HealthInfo = {
new HealthInfo("null-processor", HealthState.OK, s"The processor running")
}
}
| vonnagy/log-receiver | src/main/scala/logreceiver/processor/NullProcessor.scala | Scala | apache-2.0 | 985 |
package util
/**
* @author K.Sakamoto
* Created on 2016/08/08
*/
object FloatUtils {
implicit def floatToFloatUtils(repr: Float): FloatUtils = {
new FloatUtils(repr)
}
}
/**
* @author K.Sakamoto
* @param repr float
*/
class FloatUtils(repr: Float) {
def toHexString: String = {
java.lang.Float.toHexString(repr)
}
} | ktr-skmt/FelisCatusZero | src/main/scala/util/FloatUtils.scala | Scala | apache-2.0 | 353 |
package hackerRankFunctionalProgramming.helloWorldNTimes
object HelloWorldNTimes {
def f(n: Integer){
(0 until n).foreach(_ => println("Hello World"));
}
}
| sagasu/scalaPlayground | playground/src/main/scala/hackerRankFunctionalProgramming/helloWorldNTimes/HelloWorldNTimes.scala | Scala | apache-2.0 | 165 |
package debug
object Nested extends App {
def outer(): Int = {
val outerUsed = 1
val outerUnused = 2
def inner(): Int = {
val result = outerUsed + 1
// number of following line must be specified in
// org.scalaide.debug.internal.expression.integration.TestValues object because a lot of tests use it
val breakpointHere = ???
result
}
inner()
}
outer()
} | romanowski/scala-ide | org.scala-ide.sdt.debug.expression.tests/test-workspace/expr-eval-nested-scope/src/debug/Nested.scala | Scala | bsd-3-clause | 416 |
import play.sbt.PlayImport._
import sbt.Keys._
import sbt._
object Common {
val settings: Seq[Setting[_]] = Seq(
organization := "com.cognism",
scalaVersion := "2.13.8",
scalacOptions ++= Seq(
//Emit warning for usages of deprecated APIs
"-deprecation",
//Emit warning for usages of features that should be imported explicitly
"-feature",
//Enable additional warnings where generated code depends on assumptions
"-unchecked",
//Warn when dead code is identified
"-Ywarn-dead-code",
"-language:postfixOps"
)
)
object Dependencies {
val janino = "org.codehaus.janino" % "janino" % "3.1.4"
val json = "com.typesafe.play" %% "play-json" % "2.9.2"
val logdnaZileo = "net.zileo" % "logback-logdna" % "1.2.0"
val jackson = "com.fasterxml.jackson.module" %% "jackson-module-scala" % "2.12.3"
val jerseyCore = "org.glassfish.jersey.core" % "jersey-client" % "2.35"
val jerseyInject = "org.glassfish.jersey.inject" % "jersey-hk2" % "2.35"
val commonDeps = Seq(ehcache, ws)
val rootDeps = Seq(guice, ehcache, filters, ws, specs2 % Test, janino, json, logdnaZileo, jackson, jerseyCore, jerseyInject)
}
}
| Cognism/cognism-template-play | project/Common.scala | Scala | mit | 1,208 |
package dotty.tools.backend.jvm
import dotty.tools.dotc.CompilationUnit
import dotty.tools.dotc.ast.Trees.{ValDef, PackageDef}
import dotty.tools.dotc.ast.tpd
import dotty.tools.dotc.core.Phases.Phase
import scala.collection.mutable
import scala.tools.asm.{ClassVisitor, MethodVisitor, FieldVisitor}
import scala.tools.nsc.Settings
import scala.tools.nsc.backend.jvm._
import dotty.tools.dotc
import dotty.tools.dotc.backend.jvm.DottyPrimitives
import dotty.tools.dotc.transform.Erasure
import scala.reflect.ClassTag
import dotty.tools.dotc.core._
import Periods._
import SymDenotations._
import Contexts._
import Types._
import Symbols._
import Denotations._
import Phases._
import java.lang.AssertionError
import scala.tools.asm
import scala.tools.asm.tree._
import dotty.tools.dotc.util.{Positions, DotClass}
import tpd._
import scala.tools.nsc.backend.jvm.opt.LocalOpt
class GenBCode extends Phase {
def phaseName: String = "genBCode"
private val entryPoints = new mutable.HashSet[Symbol]()
def registerEntryPoint(sym: Symbol) = entryPoints += sym
def run(implicit ctx: Context): Unit = {
new GenBCodePipeline(entryPoints.toList, new DottyBackendInterface()(ctx))(ctx).run(ctx.compilationUnit.tpdTree)
entryPoints.clear()
}
}
class GenBCodePipeline(val entryPoints: List[Symbol], val int: DottyBackendInterface)(implicit val ctx: Context) extends BCodeSyncAndTry{
var tree: Tree = _
final class PlainClassBuilder(cunit: CompilationUnit) extends SyncAndTryBuilder(cunit)
// class BCodePhase() {
private var bytecodeWriter : BytecodeWriter = null
private var mirrorCodeGen : JMirrorBuilder = null
private var beanInfoCodeGen : JBeanInfoBuilder = null
/* ---------------- q1 ---------------- */
case class Item1(arrivalPos: Int, cd: TypeDef, cunit: CompilationUnit) {
def isPoison = { arrivalPos == Int.MaxValue }
}
private val poison1 = Item1(Int.MaxValue, null, ctx.compilationUnit)
private val q1 = new java.util.LinkedList[Item1]
/* ---------------- q2 ---------------- */
case class Item2(arrivalPos: Int,
mirror: asm.tree.ClassNode,
plain: asm.tree.ClassNode,
bean: asm.tree.ClassNode,
outFolder: scala.tools.nsc.io.AbstractFile) {
def isPoison = { arrivalPos == Int.MaxValue }
}
private val poison2 = Item2(Int.MaxValue, null, null, null, null)
private val q2 = new _root_.java.util.LinkedList[Item2]
/* ---------------- q3 ---------------- */
/*
* An item of queue-3 (the last queue before serializing to disk) contains three of these
* (one for each of mirror, plain, and bean classes).
*
* @param jclassName internal name of the class
* @param jclassBytes bytecode emitted for the class SubItem3 represents
*/
case class SubItem3(
jclassName: String,
jclassBytes: Array[Byte]
)
case class Item3(arrivalPos: Int,
mirror: SubItem3,
plain: SubItem3,
bean: SubItem3,
outFolder: scala.tools.nsc.io.AbstractFile) {
def isPoison = { arrivalPos == Int.MaxValue }
}
private val i3comparator = new java.util.Comparator[Item3] {
override def compare(a: Item3, b: Item3) = {
if (a.arrivalPos < b.arrivalPos) -1
else if (a.arrivalPos == b.arrivalPos) 0
else 1
}
}
private val poison3 = Item3(Int.MaxValue, null, null, null, null)
private val q3 = new java.util.PriorityQueue[Item3](1000, i3comparator)
/*
* Pipeline that takes ClassDefs from queue-1, lowers them into an intermediate form, placing them on queue-2
*/
class Worker1(needsOutFolder: Boolean) {
val caseInsensitively = scala.collection.mutable.Map.empty[String, Symbol]
def run() {
while (true) {
val item = q1.poll
if (item.isPoison) {
q2 add poison2
return
}
else {
try { /*withCurrentUnit(item.cunit)*/(visit(item)) }
catch {
case ex: Throwable =>
ex.printStackTrace()
ctx.error(s"Error while emitting ${int.sourceFileFor(item.cunit)}\\n${ex.getMessage}")
}
}
}
}
/*
* Checks for duplicate internal names case-insensitively,
* builds ASM ClassNodes for mirror, plain, and bean classes;
* enqueues them in queue-2.
*
*/
def visit(item: Item1) {
val Item1(arrivalPos, cd, cunit) = item
val claszSymbol = cd.symbol
// GenASM checks this before classfiles are emitted, https://github.com/scala/scala/commit/e4d1d930693ac75d8eb64c2c3c69f2fc22bec739
// todo: add back those checks
/*val lowercaseJavaClassName = claszSymbol.javaClassName.toLowerCase
caseInsensitively.get(lowercaseJavaClassName) match {
case None =>
caseInsensitively.put(lowercaseJavaClassName, claszSymbol)
case Some(dupClassSym) =>
reporter.warning(
claszSymbol.pos,
s"Class ${claszSymbol.javaClassName} differs only in case from ${dupClassSym.javaClassName}. " +
"Such classes will overwrite one another on case-insensitive filesystems."
)
}*/
// -------------- mirror class, if needed --------------
val mirrorC =
if (int.symHelper(claszSymbol).isTopLevelModuleClass) {
if (claszSymbol.companionClass == NoSymbol) {
mirrorCodeGen.genMirrorClass(claszSymbol, cunit)
} else {
ctx.log(s"No mirror class for module with linked class: ${claszSymbol.fullName}")
null
}
} else null
// -------------- "plain" class --------------
val pcb = new PlainClassBuilder(cunit)
pcb.genPlainClass(cd)
val outF = if (needsOutFolder) getOutFolder(claszSymbol, pcb.thisName) else null;
val plainC = pcb.cnode
// -------------- bean info class, if needed --------------
val beanC =
if (claszSymbol hasAnnotation int.BeanInfoAttr) {
beanInfoCodeGen.genBeanInfoClass(
claszSymbol, cunit,
int.symHelper(claszSymbol).fieldSymbols,
int.symHelper(claszSymbol).methodSymbols
)
} else null
// ----------- hand over to pipeline-2
val item2 =
Item2(arrivalPos,
mirrorC, plainC, beanC,
outF)
q2 add item2 // at the very end of this method so that no Worker2 thread starts mutating before we're done.
} // end of method visit(Item1)
} // end of class BCodePhase.Worker1
/*
* Pipeline that takes ClassNodes from queue-2. The unit of work depends on the optimization level:
*
* (a) no optimization involves:
* - converting the plain ClassNode to byte array and placing it on queue-3
*/
class Worker2 {
lazy val localOpt = new LocalOpt(new Settings())
def localOptimizations(classNode: ClassNode): Unit = {
/*BackendStats.timed(BackendStats.methodOptTimer)*/(localOpt.methodOptimizations(classNode))
}
def run() {
while (true) {
val item = q2.poll
if (item.isPoison) {
q3 add poison3
return
}
else {
try {
localOptimizations(item.plain)
addToQ3(item)
} catch {
case ex: Throwable =>
ex.printStackTrace()
ctx.error(s"Error while emitting ${item.plain.name}\\n${ex.getMessage}")
}
}
}
}
private def addToQ3(item: Item2) {
def getByteArray(cn: asm.tree.ClassNode): Array[Byte] = {
val cw = new CClassWriter(extraProc)
cn.accept(cw)
cw.toByteArray
}
val Item2(arrivalPos, mirror, plain, bean, outFolder) = item
val mirrorC = if (mirror == null) null else SubItem3(mirror.name, getByteArray(mirror))
val plainC = SubItem3(plain.name, getByteArray(plain))
val beanC = if (bean == null) null else SubItem3(bean.name, getByteArray(bean))
if (AsmUtils.traceSerializedClassEnabled && plain.name.contains(AsmUtils.traceSerializedClassPattern)) {
if (mirrorC != null) AsmUtils.traceClass(mirrorC.jclassBytes)
AsmUtils.traceClass(plainC.jclassBytes)
if (beanC != null) AsmUtils.traceClass(beanC.jclassBytes)
}
q3 add Item3(arrivalPos, mirrorC, plainC, beanC, outFolder)
}
} // end of class BCodePhase.Worker2
var arrivalPos = 0
/*
* A run of the BCodePhase phase comprises:
*
* (a) set-up steps (most notably supporting maps in `BCodeTypes`,
* but also "the" writer where class files in byte-array form go)
*
* (b) building of ASM ClassNodes, their optimization and serialization.
*
* (c) tear down (closing the classfile-writer and clearing maps)
*
*/
def run(t: Tree) {
this.tree = t
// val bcodeStart = Statistics.startTimer(BackendStats.bcodeTimer)
// val initStart = Statistics.startTimer(BackendStats.bcodeInitTimer)
arrivalPos = 0 // just in case
// scalaPrimitives.init()
bTypes.intializeCoreBTypes()
// Statistics.stopTimer(BackendStats.bcodeInitTimer, initStart)
// initBytecodeWriter invokes fullName, thus we have to run it before the typer-dependent thread is activated.
bytecodeWriter = initBytecodeWriter(entryPoints)
mirrorCodeGen = new JMirrorBuilder
beanInfoCodeGen = new JBeanInfoBuilder
val needsOutfileForSymbol = bytecodeWriter.isInstanceOf[ClassBytecodeWriter]
buildAndSendToDisk(needsOutfileForSymbol)
// closing output files.
bytecodeWriter.close()
// Statistics.stopTimer(BackendStats.bcodeTimer, bcodeStart)
/* TODO Bytecode can be verified (now that all classfiles have been written to disk)
*
* (1) asm.util.CheckAdapter.verify()
* public static void verify(ClassReader cr, ClassLoader loader, boolean dump, PrintWriter pw)
* passing a custom ClassLoader to verify inter-dependent classes.
* Alternatively,
* - an offline-bytecode verifier could be used (e.g. Maxine brings one as separate tool).
* - -Xverify:all
*
* (2) if requested, check-java-signatures, over and beyond the syntactic checks in `getGenericSignature()`
*
*/
}
/*
* Sequentially:
* (a) place all ClassDefs in queue-1
* (b) dequeue one at a time from queue-1, convert it to ASM ClassNode, place in queue-2
* (c) dequeue one at a time from queue-2, convert it to byte-array, place in queue-3
* (d) serialize to disk by draining queue-3.
*/
private def buildAndSendToDisk(needsOutFolder: Boolean) {
feedPipeline1()
// val genStart = Statistics.startTimer(BackendStats.bcodeGenStat)
(new Worker1(needsOutFolder)).run()
// Statistics.stopTimer(BackendStats.bcodeGenStat, genStart)
(new Worker2).run()
// val writeStart = Statistics.startTimer(BackendStats.bcodeWriteTimer)
drainQ3()
// Statistics.stopTimer(BackendStats.bcodeWriteTimer, writeStart)
}
/* Feed pipeline-1: place all ClassDefs on q1, recording their arrival position. */
private def feedPipeline1() {
def gen(tree: Tree) {
tree match {
case EmptyTree => ()
case PackageDef(_, stats) => stats foreach gen
case ValDef(name, tpt, rhs) => () // module val not emmited
case cd: TypeDef =>
q1 add Item1(arrivalPos, cd, int.currentUnit)
arrivalPos += 1
}
}
gen(tree)
q1 add poison1
}
/* Pipeline that writes classfile representations to disk. */
private def drainQ3() {
def sendToDisk(cfr: SubItem3, outFolder: scala.tools.nsc.io.AbstractFile) {
if (cfr != null){
val SubItem3(jclassName, jclassBytes) = cfr
try {
val outFile =
if (outFolder == null) null
else getFileForClassfile(outFolder, jclassName, ".class")
bytecodeWriter.writeClass(jclassName, jclassName, jclassBytes, outFile)
}
catch {
case e: FileConflictException =>
ctx.error(s"error writing $jclassName: ${e.getMessage}")
}
}
}
var moreComing = true
// `expected` denotes the arrivalPos whose Item3 should be serialized next
var expected = 0
while (moreComing) {
val incoming = q3.poll
moreComing = !incoming.isPoison
if (moreComing) {
val item = incoming
val outFolder = item.outFolder
sendToDisk(item.mirror, outFolder)
sendToDisk(item.plain, outFolder)
sendToDisk(item.bean, outFolder)
expected += 1
}
}
// we're done
assert(q1.isEmpty, s"Some ClassDefs remained in the first queue: $q1")
assert(q2.isEmpty, s"Some classfiles remained in the second queue: $q2")
assert(q3.isEmpty, s"Some classfiles weren't written to disk: $q3")
}
//} // end of class BCodePhase
}
| AlexSikia/dotty | src/dotty/tools/backend/jvm/GenBCode.scala | Scala | bsd-3-clause | 13,652 |
package uk.gov.gds.common.http.places
import uk.gov.gds.common.config.Config
import uk.gov.gds.common.http.ApacheHttpClient
object PlacesHttpClient extends ApacheHttpClient {
private[http] def targetUrl(path: String) = Config("places.api.url") + path
}
| alphagov/gds-scala-common | govuk-clients/src/main/scala/uk/gov/gds/common/http/places/PlacesHttpClient.scala | Scala | mit | 257 |
package controllers.api
import java.time.Instant
import java.util.UUID
import com.google.inject.AbstractModule
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.impl.providers.BasicAuthProvider
import com.mohiva.play.silhouette.impl.util.SecureRandomIDGenerator
import com.mohiva.play.silhouette.persistence.daos.{ DelegableAuthInfoDAO, MongoAuthInfoDAO }
import com.typesafe.config.ConfigFactory
import models.daos.{ ClaimDAO, FormDAO, TwilioFaxDAO }
import models._
import net.codingwell.scalaguice.ScalaModule
import org.mockito.Mockito
import org.specs2.specification.Scope
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.libs.json.JsValue
import play.api.{ Application, Configuration }
import services.documents.DocumentService
import services.documents.pdf.PDFConcatenator
import utils.auth.{ TwilioRequestValidator, TwilioRequestValidatorImpl }
import utils.secrets.SecretsManager
trait TwilioControllerTestContext extends Scope {
val mockFormDao: FormDAO = Mockito.mock(classOf[FormDAO])
val mockDocumentService: DocumentService = Mockito.mock(classOf[DocumentService])
val mockBasicAuthProvider: BasicAuthProvider = Mockito.mock(classOf[BasicAuthProvider])
val mockTwilioUserDao: DelegableAuthInfoDAO[TwilioUser] = Mockito.mock(classOf[DelegableAuthInfoDAO[TwilioUser]])
val mockPdfConcatenator: PDFConcatenator = Mockito.mock(classOf[PDFConcatenator])
val mockSecureRandomIdGenerator: SecureRandomIDGenerator = Mockito.mock(classOf[SecureRandomIDGenerator])
val mockConfiguration: Configuration = Mockito.mock(classOf[Configuration])
val mockSecretsManager: SecretsManager = Mockito.mock(classOf[SecretsManager])
val mockClaimDao: ClaimDAO = Mockito.mock(classOf[ClaimDAO])
val mockTwilioFaxDao: TwilioFaxDAO = Mockito.mock(classOf[TwilioFaxDAO])
Mockito.when(mockConfiguration.get[String]("twilio.authTokenSecretName"))
.thenReturn("fakeSecretName")
Mockito.when(mockSecretsManager.getSecretUtf8("fakeSecretName")).thenReturn("12345")
val requestValidator = new TwilioRequestValidatorImpl(mockConfiguration, mockSecretsManager)
val userID: UUID = UUID.randomUUID()
/**
* An identity.
*/
var identity = User(
userID = userID,
loginInfo = LoginInfo("credentials", "user@website.com"),
firstName = None,
lastName = None,
fullName = None,
email = None,
avatarURL = None,
activated = true,
contact = None)
var testClaim = Claim(
userID = identity.userID,
claimID = UUID.randomUUID(),
key = "fakeKey",
state = Claim.State.INCOMPLETE,
stateUpdatedAt = java.util.Date.from(Instant.now()),
recipients = Seq(
Recipient(Recipient.Type.FAX, "18005555555"),
Recipient(Recipient.Type.EMAIL, "test@x.com")))
var testForm = ClaimForm("VBA-21-0966-ARE", Map.empty[String, JsValue], identity.userID, testClaim.claimID, 0, 0, 0, 0)
class FakeModule extends AbstractModule with ScalaModule {
def configure(): Unit = {
bind[FormDAO].toInstance(mockFormDao)
bind[ClaimDAO].toInstance(mockClaimDao)
bind[TwilioFaxDAO].toInstance(mockTwilioFaxDao)
bind[DocumentService].toInstance(mockDocumentService)
bind[BasicAuthProvider].toInstance(mockBasicAuthProvider)
bind[DelegableAuthInfoDAO[TwilioUser]].toInstance(mockTwilioUserDao)
bind[PDFConcatenator].toInstance(mockPdfConcatenator)
bind[SecureRandomIDGenerator].toInstance(mockSecureRandomIdGenerator)
bind[TwilioRequestValidator].toInstance(requestValidator)
}
}
val application: Application = GuiceApplicationBuilder()
.configure(Configuration(ConfigFactory.load("application.test.conf")))
.overrides(new FakeModule)
.build()
}
| vetafi/vetafi-web | test/controllers/api/TwilioControllerTestContext.scala | Scala | apache-2.0 | 3,730 |
package org.jetbrains.plugins.scala
package compiler
import java.util.UUID
import com.intellij.compiler.server.BuildManagerListener
import com.intellij.openapi.application.ApplicationManager
import com.intellij.openapi.compiler.{CompileContext, CompileTask, CompilerManager}
import com.intellij.openapi.components.ProjectComponent
import com.intellij.openapi.module.{Module, ModuleManager}
import com.intellij.openapi.project.Project
import com.intellij.openapi.roots.{CompilerModuleExtension, ModuleRootManager}
import com.intellij.openapi.ui.Messages
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.project._
/**
* Pavel Fatin
*/
class ServerMediator(project: Project) extends ProjectComponent {
private def isScalaProject = project.hasScala
private val settings = ScalaCompileServerSettings.getInstance
private val connection = project.getMessageBus.connect
private val serverLauncher = new BuildManagerListener {
override def beforeBuildProcessStarted(project: Project, uuid: UUID): Unit = {}
override def buildStarted(project: Project, sessionId: UUID, isAutomake: Boolean): Unit = {
if (settings.COMPILE_SERVER_ENABLED && isScalaProject && !ApplicationManager.getApplication.isUnitTestMode) {
invokeAndWait {
CompileServerManager.instance(project).configureWidget()
}
if (CompileServerLauncher.needRestart(project)) {
CompileServerLauncher.instance.stop()
}
if (!CompileServerLauncher.instance.running) {
invokeAndWait {
CompileServerLauncher.instance.tryToStart(project)
}
}
}
}
override def buildFinished(project: Project, sessionId: UUID, isAutomake: Boolean): Unit = {}
}
connection.subscribe(BuildManagerListener.TOPIC, serverLauncher)
private val checkSettingsTask = new CompileTask {
def execute(context: CompileContext): Boolean = {
if (isScalaProject) {
if (!checkCompilationSettings()) false
else true
}
else true
}
}
CompilerManager.getInstance(project).addBeforeTask(checkSettingsTask)
private def checkCompilationSettings(): Boolean = {
def hasClashes(module: Module) = module.hasScala && {
val extension = CompilerModuleExtension.getInstance(module)
val production = extension.getCompilerOutputUrl
val test = extension.getCompilerOutputUrlForTests
production == test
}
val modulesWithClashes = ModuleManager.getInstance(project).getModules.toSeq.filter(hasClashes)
var result = true
if (modulesWithClashes.nonEmpty) {
invokeAndWait {
val choice =
if (!ApplicationManager.getApplication.isUnitTestMode) {
Messages.showYesNoDialog(project,
"Production and test output paths are shared in: " + modulesWithClashes.map(_.getName).mkString(" "),
"Shared compile output paths in Scala module(s)",
"Split output path(s) automatically", "Cancel compilation", Messages.getErrorIcon)
}
else Messages.YES
val splitAutomatically = choice == Messages.YES
if (splitAutomatically) {
inWriteAction {
modulesWithClashes.foreach { module =>
val model = ModuleRootManager.getInstance(module).getModifiableModel
val extension = model.getModuleExtension(classOf[CompilerModuleExtension])
val outputUrlParts = extension.getCompilerOutputUrl match {
case null => Seq.empty
case url => url.split("/").toSeq
}
val nameForTests = if (outputUrlParts.last == "classes") "test-classes" else "test"
extension.inheritCompilerOutputPath(false)
extension.setCompilerOutputPathForTests((outputUrlParts.dropRight(1) :+ nameForTests).mkString("/"))
model.commit()
}
project.save()
}
}
result = splitAutomatically
}
}
result
}
def getComponentName = getClass.getSimpleName
def initComponent() {}
def disposeComponent() {}
def projectOpened() {}
def projectClosed() {}
}
| LPTK/intellij-scala | src/org/jetbrains/plugins/scala/compiler/ServerMediator.scala | Scala | apache-2.0 | 4,208 |
/**
* Copyright 2015 Otto (GmbH & Co KG)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.schedoscope.scheduler.driver
import java.nio.file.Files
import net.lingala.zip4j.core.ZipFile
import org.apache.commons.io.FileUtils
import org.joda.time.LocalDateTime
import org.schedoscope.{ DriverSettings, Schedoscope }
import org.schedoscope.dsl.transformations.Transformation
import scala.concurrent.{ Await, Future }
import scala.concurrent.duration.Duration
import scala.util.Random
/**
* Exceptions occurring in a driver that merit a retry. These will be escalated to the driver actor
* to cause a driver actor restart.
*/
case class RetryableDriverException(message: String = null, cause: Throwable = null) extends RuntimeException(message, cause)
/**
* Handle for the transformation executed by a driver, called a driver run.
*/
class DriverRunHandle[T <: Transformation](val driver: Driver[T], val started: LocalDateTime, val transformation: T, val stateHandle: Any)
/**
* Base class for driver run's state, contains reference to driver instance (e.g. to execute code for termination)
*
*/
sealed abstract class DriverRunState[T <: Transformation](val driver: Driver[T])
/**
* Driver run state: transformation is still running
*/
case class DriverRunOngoing[T <: Transformation](override val driver: Driver[T], val runHandle: DriverRunHandle[T]) extends DriverRunState[T](driver)
/**
* Driver run state: transformation has finished succesfully. The driver actor embedding the driver having sucessfully
* executed the transformation will return a success message to the view actor initiating the transformation.
*/
case class DriverRunSucceeded[T <: Transformation](override val driver: Driver[T], comment: String) extends DriverRunState[T](driver)
/**
* Driver run state: transformation has terminated with an error. The driver actor embedding the driver having failed
* at executing the transformation will return a failure message to the view actor initiating the transformation. That view
* actor will subsequently retry the transformation.
*
*/
case class DriverRunFailed[T <: Transformation](override val driver: Driver[T], reason: String, cause: Throwable) extends DriverRunState[T](driver)
/**
* Trait for user defined code to be executed after a transformation. e.g. for gathering statistics
* and logging information from the execution framework (e.g. mapreduce)
*/
trait DriverRunCompletionHandler[T <: Transformation] {
/**
* This method has to be implemented for gathering information about a completed run run
*
* As for exceptions:
*
* In case a failure within the completion handler should not cause the transformation to fail, the exception should be suppressed.
*
* In case a failure within the completion handler should cause a restart of the driver actor and a redo of the transformation,
* it should be raised as a DriverException.
*
* Any other raised exception will cause the driver run to fail and the transformation subsequently to be retried by the view actor.
*/
def driverRunCompleted(stateOfCompletion: DriverRunState[T], run: DriverRunHandle[T])
}
/**
* Default implementation of a completion handler. Does nothing
*/
class DoNothingCompletionHandler[T <: Transformation] extends DriverRunCompletionHandler[T] {
def driverRunCompleted(stateOfCompletion: DriverRunState[T], run: DriverRunHandle[T]) {}
}
/**
* In Schedoscope, drivers are responsible for actually executing transformations. Drivers might be
* executed from within the DriverActor or directly from a test. A
* Driver is parameterized by the type of transformation that it is able to execute.
*
* Generally, there are two classes of implementations of this trait depending on the API
* supporting a transformation. For blocking APIs, driver run states are implemented using futures.
* For non-blocking APIs, driver run states can encapsulate whatever handler mechanism is supported
* by the API.
*
* The present trait provides default implementations for blocking APIs. To support one, the methods
* transformationName, run, and driverRunCompletionHandlerClassNames need to be overridden
* (for example @see HiveDriver). Run needs to return an appropriate DriverRunHandle with a
* future as its stateHandle which produces a DriverRunState or throws an exception.
*
* For non-blocking APIs, one needs to override transformationName, killRun, getDriverRunState, run,
* runAndWait, driverRunCompletionHandlerClassNames for the appropriate handle type of the API. As
* an example @see OozieDriver.
*
*/
trait Driver[T <: Transformation] {
implicit val executionContext = Schedoscope.actorSystem.dispatchers.lookup("akka.actor.future-driver-dispatcher")
/**
* @return the name of the transformation. This is a string identifier of the transformation type
* used within configurations.
*/
def transformationName: String
/**
* A driver can override this to have a fixed timeout
*/
def runTimeOut: Duration = Schedoscope.settings.getDriverSettings(transformationName).timeout
/**
* Kill the given driver run, default: do nothing
*/
def killRun(run: DriverRunHandle[T]): Unit = {}
/**
* Retrieve the driver state from a run handle.
*/
def getDriverRunState(run: DriverRunHandle[T]): DriverRunState[T] = {
val runState = run.stateHandle.asInstanceOf[Future[DriverRunState[T]]]
if (runState.isCompleted)
runState.value.get.get
else
DriverRunOngoing[T](this, run)
}
/**
* Run the transformation asychronously/nonblocking
*/
def run(t: T): DriverRunHandle[T]
/**
* Run the transformation sychronously/blocking (e.g. for tests)
*/
def runAndWait(t: T): DriverRunState[T] = Await.result(run(t).stateHandle.asInstanceOf[Future[DriverRunState[T]]], runTimeOut)
/**
* Deploy all resources for this transformation/view to the cluster. By default, deploys all
* jars defined in the libJars section of the transformation configuration (@see DriverSettings)
*/
def deployAll(ds: DriverSettings): Boolean = {
val fsd = FileSystemDriver(ds)
// clear destination
fsd.delete(ds.location, true)
fsd.mkdirs(ds.location)
val succ = ds.libJars
.map(f => {
if (ds.unpack) {
val tmpDir = Files.createTempDirectory("schedoscope-" + Random.nextLong.abs.toString).toFile
new ZipFile(f.replaceAll("file:", "")).extractAll(tmpDir.getAbsolutePath)
val succ = fsd.copy("file://" + tmpDir + "/*", ds.location, true)
FileUtils.deleteDirectory(tmpDir)
succ
} else {
fsd.copy(f, ds.location, true)
}
})
succ.filter(_.isInstanceOf[DriverRunFailed[_]]).isEmpty
}
/**
* Needs to be overridden to return the class names of driver run completion handlers to apply.
* E.g., provide a val of the same name to the constructor of the driver implementation.
*/
def driverRunCompletionHandlerClassNames: List[String]
lazy val driverRunCompletionHandlers: List[DriverRunCompletionHandler[T]] =
driverRunCompletionHandlerClassNames.map { className => Class.forName(className).newInstance().asInstanceOf[DriverRunCompletionHandler[T]] }
/**
* Invokes completion handler on the given driver run.
*/
def driverRunCompleted(run: DriverRunHandle[T]) {
getDriverRunState(run) match {
case s: DriverRunSucceeded[T] => driverRunCompletionHandlers.foreach(_.driverRunCompleted(s, run))
case f: DriverRunFailed[T] => driverRunCompletionHandlers.foreach(_.driverRunCompleted(f, run))
case _ => throw RetryableDriverException("driverRunCompleted called with non-final driver run state")
}
}
}
| hpzorn/schedoscope | schedoscope-core/src/main/scala/org/schedoscope/scheduler/driver/Driver.scala | Scala | apache-2.0 | 8,265 |
package org.scalameta
package object reflection {
def instanceOf[T: InstanceTag] = implicitly[InstanceTag[T]].instantiate
}
| beni55/scalameta | foundation/src/main/scala/org/scalameta/reflection/package.scala | Scala | bsd-3-clause | 127 |
import cats.implicits._
import treelog.LogTreeSyntaxWithoutAnnotations._
import scala.concurrent._
import scala.concurrent.duration._
object FuturesExample extends App {
implicit val ec: ExecutionContext = ExecutionContext.global
/*
* Lets start with some extremely complicated parallel computations
*/
val future1: Future[DescribedComputation[Int]] = Future(1 ~> "Got 1")
val future2: Future[DescribedComputation[Int]] = Future(2 ~> "Got 2")
// Use this to see how a failure is dealt with
//val future2: Future[DescribedComputation[Int]] = Future(failure("Couldn't get a 2"))
val future3: Future[DescribedComputation[Int]] = Future(3 ~> "Got 3")
// Sequence the Futures to work on the results below
val lf: Future[List[DescribedComputation[Int]]] = Future.sequence(future1 :: future2 :: future3 :: Nil)
// map over the future, summing the result
val summedFuture: Future[DescribedComputation[Int]] = lf map doSum
/*
* What we want here is a new root containing the logs of each parallel computation, and the result
* of some operation on the values containined, in this case we are just going to sum them.
* Have a look at the Scaladoc for treelog.LogTreeSyntax.BranchLabelingSyntax.~<+
*/
def doSum(computations: List[DescribedComputation[Int]]): DescribedComputation[Int] =
"Summed up" ~<+ (computations, (bits: List[Int]) => bits.sum)
val ans: DescribedComputation[Int] = Await.result(summedFuture, 1.second)
val log = ans.value.written
val sum = ans.value.value
println(log.show)
println(sum.show)
/*
* Output is
* Summed up
* Got 1
* Got 2
* Got 3
* \\/-(6)
*
* For the failure case the output is
* Failed: Summed up
* Got 1
* Failed: Couldn't get a 2
* Got 3
* -\\/("Summed up")
*/
}
| lancewalton/treelog | src/test/scala/FuturesExample.scala | Scala | mit | 1,866 |
package main.scala.procedures
import breeze.linalg._
/**
* Created by Francois Belletti on 7/14/15.
*/
object InnovationAlgoMulti{
/*
Multivariate version of the innovation algorithm.
Expects autocovariations of negative rank (-modelOrder ... modelOrder)
TODO: shield procedure against the following edge cases, autoCov.size < 1, autoCov(0) = 0.0
*/
def apply(q: Int, crossCovMatrices: Array[DenseMatrix[Double]]): (Array[DenseMatrix[Double]], DenseMatrix[Double]) ={
val d = crossCovMatrices(0).rows
val thetaEsts = (1 to q).toArray.map(Array.fill(_){DenseMatrix.zeros[Double](d, d)})
val varEsts = Array.fill(q + 1){DenseMatrix.zeros[Double](d, d)}
val invVarEsts = Array.fill(q + 1){DenseMatrix.zeros[Double](d, d)}
varEsts(0) = crossCovMatrices(q)
invVarEsts(0) = inv(varEsts(0))
for(m <- 1 to q){
for(j <- 0 until m){
thetaEsts(m - 1)(m - 1 - j) := crossCovMatrices(j - m + q)
for(i <- 0 until j){
thetaEsts(m - 1)(m - 1 - j) += - thetaEsts(m - 1)(m - 1 - i) * varEsts(i) * thetaEsts(j)(j - 1 - i).t
}
thetaEsts(m - 1)(m - 1 - j) = thetaEsts(m - 1)(m - 1 - j) * invVarEsts(j)
}
varEsts(m) = crossCovMatrices(q)
for(i <- 0 until m){
varEsts(m) += - thetaEsts(m - 1)(i) * varEsts(i) * thetaEsts(m - 1)(m - 1 - i)
}
invVarEsts(m) = inv(varEsts(m))
}
(thetaEsts(q - 1), varEsts(q))
}
}
| bellettif/sparkGeoTS | sparkTS/src/main/scala/procedures/InnovationAlgoMulti.scala | Scala | bsd-3-clause | 1,453 |
// AORTA is copyright (C) 2012 Dustin Carlino, Mike Depinet, and Piyush
// Khandelwal of UT Austin
// License: GNU GPL v2
package utexas.aorta.experiments
import utexas.aorta.map.{Graph, Road, AbstractAstarRouter, SimpleHeuristic, Turn, Edge}
import utexas.aorta.sim.{EV_AgentSpawned, EV_Transition}
import utexas.aorta.sim.drivers.Agent
import utexas.aorta.sim.make.{Scenario, RouterType}
import utexas.aorta.common.{Util, RNG}
import utexas.aorta.common.algorithms.Pathfind
import scala.collection.mutable
object DTAExperiment {
def main(args: Array[String]) {
new DTAExperiment(ExpConfig.from_args(args)).run_experiment()
}
}
// Dynamic traffic assignment
class DTAExperiment(config: ExpConfig) extends SmartExperiment(config, "dta") {
private val iterations = 5 // TODO put in ExpConfig
private val rng = new RNG()
// TODO indicate which agents had routes shifted in each round.
override def get_metrics(info: MetricInfo) = List(
new TripTimeMetric(info), new OriginalRouteMetric(info), new LinkDelayMetric(info)
)
override def run() {
val results = new mutable.ListBuffer[List[Metric]]()
var current_scenario = scenario
for (round <- Range(0, iterations)) {
val metrics = run_trial(current_scenario, s"dta_$round")
results += metrics
if (round != iterations - 1) {
val delay = metrics.last.asInstanceOf[LinkDelayMetric] // TODO bit of a hack.
// Min round value is 2
current_scenario = change_paths(current_scenario, delay, 1.0 / (round + 2))
}
}
output_data(results.toList)
}
// Reroute some drivers using actual delays
private def change_paths(
base_scenario: Scenario, delay: LinkDelayMetric, percent: Double
): Scenario = {
val graph = base_scenario.graph
return base_scenario.copy(agents = base_scenario.agents.map(a => {
// TODO choose lucky part of population based on budget?
if (rng.percent(percent)) {
// Replan!
// TODO spawn vs start time...
val new_path = new TimeDependentAStar(graph, delay, a.birth_tick)
.path(graph.get_r(a.start), graph.get_r(a.route.goal)).path
.map(_.id)
.toArray
a.copy(route = a.route.copy(orig_router = RouterType.Fixed, initial_path = new_path))
// TODO make these delays available to all/some drivers, for rerouting? could introduce bad
// biases towards regions that should be clear but arent, though.
} else {
a
}
}))
}
}
class TimeDependentAStar(graph: Graph, delays: LinkDelayMetric, start_time: Double)
extends AbstractAstarRouter(graph) with SimpleHeuristic
{
override def router_type = RouterType.Unusable
override def transform(spec: Pathfind) = super.transform(spec).copy(
calc_cost = (prev: Road, next: Road, cost_sofar: Double) =>
delays.delay(next, start_time + cost_sofar)
)
}
class LinkDelayMetric(info: MetricInfo) extends Metric(info) {
override def name = "link_delay"
// TODO will this eat too much memory?
private val delays_per_time = info.sim.graph.roads.map(
r => r -> new java.util.TreeMap[Double, Double]()
).toMap
private val entry_time = new mutable.HashMap[Agent, Double]()
info.sim.listen(classOf[EV_Transition], _ match {
// Entering a road
case EV_Transition(a, from: Turn, to) => entry_time(a) = a.sim.tick
// Exiting a road that we didn't spawn on
case EV_Transition(a, from: Edge, to: Turn) if entry_time.contains(a) =>
add_delay(entry_time(a), a.sim.tick - entry_time(a), from.road)
case _ =>
})
private def add_delay(entry_time: Double, delay: Double, at: Road) {
// Two agents can enter the same Road at the same time (on different lanes)
// Just arbitrarily overwrite if there's a conflict
delays_per_time(at).put(entry_time, delay)
}
override def output(ls: List[Metric]) {
// Don't actually save anything!
}
// Many possible interpolations for this...
def delay(on: Road, at: Double) = delays_per_time(on).lowerKey(at) match {
// 'at' is before all entries here? then the road's clear
case 0.0 => on.freeflow_time // TODO 0.0 is how failure gets encoded by java treemap...
case entry_time => delays_per_time(on).get(entry_time) match {
// 'at' happens after the most recent entry finishes
case delay if at > entry_time + delay => on.freeflow_time
// This instance overlaps 'at', so just use the same delay.
case delay => delay
}
}
}
| dabreegster/aorta | utexas/aorta/experiments/DTAExperiment.scala | Scala | gpl-2.0 | 4,498 |
package sorm.test.types
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.matchers.ShouldMatchers
import sorm._
import sorm.test.MultiInstanceSuite
@RunWith(classOf[JUnitRunner])
class OptionSupportSuite extends FunSuite with ShouldMatchers with MultiInstanceSuite {
import OptionSupportSuite._
def entities = Entity[EntityWithOptionInOption]() :: Nil
}
object OptionSupportSuite {
case class EntityWithOptionInOption
( optionInOption : Option[Option[Int]] )
}
| cllu/sorm2 | src/test/scala/sorm/test/types/OptionSupportSuite.scala | Scala | mit | 550 |
package io.getquill.context.jdbc.sqlite
import io.getquill.context.sql.CaseClassQuerySpec
import org.scalatest.Matchers._
class CaseClassQueryJdbcSpec extends CaseClassQuerySpec {
val context = testContext
import testContext._
override def beforeAll = {
testContext.transaction {
testContext.run(query[Contact].delete)
testContext.run(query[Address].delete)
testContext.run(liftQuery(peopleEntries).foreach(p => peopleInsert(p)))
testContext.run(liftQuery(addressEntries).foreach(p => addressInsert(p)))
}
()
}
"Example 1 - Single Case Class Mapping" in {
testContext.run(`Ex 1 CaseClass Record Output`) should contain theSameElementsAs `Ex 1 CaseClass Record Output expected result`
}
"Example 1A - Single Case Class Mapping" in {
testContext.run(`Ex 1A CaseClass Record Output`) should contain theSameElementsAs `Ex 1 CaseClass Record Output expected result`
}
"Example 1B - Single Case Class Mapping" in {
testContext.run(`Ex 1B CaseClass Record Output`) should contain theSameElementsAs `Ex 1 CaseClass Record Output expected result`
}
"Example 2 - Single Record Mapped Join" in {
testContext.run(`Ex 2 Single-Record Join`) should contain theSameElementsAs `Ex 2 Single-Record Join expected result`
}
"Example 3 - Inline Record as Filter" in {
testContext.run(`Ex 3 Inline Record Usage`) should contain theSameElementsAs `Ex 3 Inline Record Usage exepected result`
}
}
| mentegy/quill | quill-jdbc/src/test/scala/io/getquill/context/jdbc/sqlite/CaseClassQueryJdbcSpec.scala | Scala | apache-2.0 | 1,464 |
package examples.services
import cats.data.EitherT
package object algebra {
import cats._
sealed abstract class ServiceOp[A] extends Product with Serializable
final case class FetchUser(userId: Long) extends ServiceOp[TimeoutException Either User]
final case class FetchAddress(addressId: Long) extends ServiceOp[TimeoutException Either Address]
type ServiceIO[A] = cats.free.Free[ServiceOp, A]
object ServiceOps {
def fetchUser(userId: Long): ServiceIO[TimeoutException Either User] =
cats.free.Free.liftF(FetchUser(userId))
def fetchAddress(addressId: Long): ServiceIO[TimeoutException Either Address] =
cats.free.Free.liftF(FetchAddress(addressId))
}
def interpreter[M[_] : Effect : Monad](implicit ins: μservice[M]): ServiceOp ~> M =
new (ServiceOp ~> M) {
override def apply[A](fa: ServiceOp[A]): M[A] = {
val result = fa match {
case FetchUser(userId) => (ins fetchUser userId)
case FetchAddress(addressId) => (ins fetchAddress addressId)
}
result.asInstanceOf[M[A]]
}
}
def fetchBoth(userId: Long): ServiceIO[TimeoutException Either (User, Address)] =
(for {
user <- EitherT[ServiceIO, TimeoutException, User](ServiceOps.fetchUser(userId))
address <- EitherT[ServiceIO, TimeoutException, Address](ServiceOps.fetchAddress(user.addressId))
} yield (user, address)).value
} | haghard/shapeless-playbook | src/main/scala/examples/services/algebra/package.scala | Scala | apache-2.0 | 1,408 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert.avro.registry
import com.typesafe.config.Config
import org.locationtech.geomesa.convert.avro.registry.AvroSchemaRegistryConverter.AvroSchemaRegistryConfig
import org.locationtech.geomesa.convert.avro.registry.AvroSchemaRegistryConverterFactory.AvroSchemaRegistryConfigConvert
import org.locationtech.geomesa.convert2.AbstractConverter.{BasicField, BasicOptions}
import org.locationtech.geomesa.convert2.AbstractConverterFactory
import org.locationtech.geomesa.convert2.AbstractConverterFactory.{BasicFieldConvert, BasicOptionsConvert, ConverterConfigConvert, ConverterOptionsConvert, FieldConvert, OptionConvert}
import org.locationtech.geomesa.convert2.transforms.Expression
import pureconfig.ConfigObjectCursor
import pureconfig.error.ConfigReaderFailures
class AvroSchemaRegistryConverterFactory
extends AbstractConverterFactory[AvroSchemaRegistryConverter, AvroSchemaRegistryConfig, BasicField, BasicOptions] {
override protected val typeToProcess: String = "avro-schema-registry"
override protected implicit def configConvert: ConverterConfigConvert[AvroSchemaRegistryConfig] = AvroSchemaRegistryConfigConvert
override protected implicit def fieldConvert: FieldConvert[BasicField] = BasicFieldConvert
override protected implicit def optsConvert: ConverterOptionsConvert[BasicOptions] = BasicOptionsConvert
}
object AvroSchemaRegistryConverterFactory {
object AvroSchemaRegistryConfigConvert extends ConverterConfigConvert[AvroSchemaRegistryConfig] with OptionConvert {
override protected def decodeConfig(
cur: ConfigObjectCursor,
`type`: String,
idField: Option[Expression],
caches: Map[String, Config],
userData: Map[String, Expression]): Either[ConfigReaderFailures, AvroSchemaRegistryConfig] = {
for { schemaRegistry <- cur.atKey("schema-registry").right.flatMap(_.asString).right } yield {
AvroSchemaRegistryConfig(`type`, schemaRegistry, idField, caches, userData)
}
}
override protected def encodeConfig(config: AvroSchemaRegistryConfig, base: java.util.Map[String, AnyRef]): Unit =
base.put("schema-registry", config.schemaRegistry)
}
}
| aheyne/geomesa | geomesa-convert/geomesa-convert-avro-schema-registry/src/main/scala/org/locationtech/geomesa/convert/avro/registry/AvroSchemaRegistryConverterFactory.scala | Scala | apache-2.0 | 2,659 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
import scala.tools.nsc.doc.DocFactory
import scala.tools.nsc.reporters.ConsoleReporter
import scala.reflect.internal.Reporter
import scala.reflect.internal.util.{ FakePos, NoPosition, Position }
/** The main class for scaladoc, a front-end for the Scala compiler
* that generates documentation from source files.
*/
class ScalaDoc {
val versionMsg = "Scaladoc %s -- %s".format(Properties.versionString, Properties.copyrightString)
def process(args: Array[String]): Boolean = {
var reporter: ScalaDocReporter = null
val docSettings = new doc.Settings(msg => reporter.error(FakePos("scaladoc"), msg + "\\n scaladoc -help gives more information"),
msg => reporter.echo(msg))
reporter = new ScalaDocReporter(docSettings)
val command = new ScalaDoc.Command(args.toList, docSettings)
def hasFiles = command.files.nonEmpty || docSettings.uncompilableFiles.nonEmpty
if (docSettings.version.value)
reporter.echo(versionMsg)
else if (docSettings.Xhelp.value)
reporter.echo(command.xusageMsg)
else if (docSettings.Yhelp.value)
reporter.echo(command.yusageMsg)
else if (docSettings.showPlugins.value)
reporter.warning(null, "Plugins are not available when using Scaladoc")
else if (docSettings.showPhases.value)
reporter.warning(null, "Phases are restricted when using Scaladoc")
else if (docSettings.help.value || !hasFiles)
reporter.echo(command.usageMsg)
else
try { new DocFactory(reporter, docSettings) document command.files }
catch {
case ex @ FatalError(msg) =>
if (docSettings.debug.value) ex.printStackTrace()
reporter.error(null, "fatal error: " + msg)
}
finally reporter.finish()
!reporter.reallyHasErrors
}
}
/** The Scaladoc reporter adds summary messages to the `ConsoleReporter`
*
* Use the `summaryX` methods to add unique summarizing message to the end of
* the run.
*/
class ScalaDocReporter(settings: Settings) extends ConsoleReporter(settings) {
import scala.collection.mutable.LinkedHashMap
// need to do sometimes lie so that the Global instance doesn't
// trash all the symbols just because there was an error
override def hasErrors = false
def reallyHasErrors = super.hasErrors
private[this] val delayedMessages: LinkedHashMap[(Position, String), () => Unit] =
LinkedHashMap.empty
/** Eliminates messages if both `pos` and `msg` are equal to existing element */
def addDelayedMessage(pos: Position, msg: String, print: () => Unit): Unit =
delayedMessages += ((pos, msg) -> print)
def printDelayedMessages(): Unit = delayedMessages.values.foreach(_.apply())
override def finish(): Unit = {
printDelayedMessages()
super.finish()
}
}
object ScalaDoc extends ScalaDoc {
class Command(arguments: List[String], settings: doc.Settings) extends CompilerCommand(arguments, settings) {
override def cmdName = "scaladoc"
override def usageMsg = (
createUsageMsg("where possible scaladoc", explain = false)(x => x.isStandard && settings.isScaladocSpecific(x.name)) +
"\\n\\nStandard scalac options also available:" +
optionsMessage(x => x.isStandard && !settings.isScaladocSpecific(x.name))
)
}
def main(args: Array[String]): Unit = {
System.exit(if (process(args)) 0 else 1)
}
implicit class SummaryReporter(val rep: Reporter) extends AnyVal {
/** Adds print lambda to ScalaDocReporter, executes it on other reporter */
private[this] def summaryMessage(pos: Position, msg: String, print: () => Unit): Unit = rep match {
case r: ScalaDocReporter => r.addDelayedMessage(pos, msg, print)
case _ => print()
}
def summaryEcho(pos: Position, msg: String): Unit = summaryMessage(pos, msg, () => rep.echo(pos, msg))
def summaryError(pos: Position, msg: String): Unit = summaryMessage(pos, msg, () => rep.error(pos, msg))
def summaryWarning(pos: Position, msg: String): Unit = summaryMessage(pos, msg, () => rep.warning(pos, msg))
def summaryEcho(msg: String): Unit = summaryEcho(NoPosition, msg)
def summaryError(msg: String): Unit = summaryError(NoPosition, msg)
def summaryWarning(msg: String): Unit = summaryWarning(NoPosition, msg)
}
}
| martijnhoekstra/scala | src/scaladoc/scala/tools/nsc/ScalaDoc.scala | Scala | apache-2.0 | 4,598 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.client
import java.lang.{Boolean => JBoolean, Integer => JInteger, Long => JLong}
import java.lang.reflect.{InvocationTargetException, Method, Modifier}
import java.net.URI
import java.util.{ArrayList => JArrayList, List => JList, Locale, Map => JMap, Set => JSet}
import java.util.concurrent.TimeUnit
import scala.collection.JavaConverters._
import scala.util.Try
import scala.util.control.NonFatal
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.metastore.api.{EnvironmentContext, Function => HiveFunction, FunctionType}
import org.apache.hadoop.hive.metastore.api.{MetaException, PrincipalType, ResourceType, ResourceUri}
import org.apache.hadoop.hive.ql.Driver
import org.apache.hadoop.hive.ql.io.AcidUtils
import org.apache.hadoop.hive.ql.metadata.{Hive, HiveException, Partition, Table}
import org.apache.hadoop.hive.ql.plan.AddPartitionDesc
import org.apache.hadoop.hive.ql.processors.{CommandProcessor, CommandProcessorFactory}
import org.apache.hadoop.hive.ql.session.SessionState
import org.apache.hadoop.hive.serde.serdeConstants
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.FunctionIdentifier
import org.apache.spark.sql.catalyst.analysis.NoSuchPermanentFunctionException
import org.apache.spark.sql.catalyst.catalog.{CatalogFunction, CatalogTablePartition, CatalogUtils, FunctionResource, FunctionResourceType}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{IntegralType, StringType}
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.Utils
/**
* A shim that defines the interface between [[HiveClientImpl]] and the underlying Hive library used
* to talk to the metastore. Each Hive version has its own implementation of this class, defining
* version-specific version of needed functions.
*
* The guideline for writing shims is:
* - always extend from the previous version unless really not possible
* - initialize methods in lazy vals, both for quicker access for multiple invocations, and to
* avoid runtime errors due to the above guideline.
*/
private[client] sealed abstract class Shim {
/**
* Set the current SessionState to the given SessionState. Also, set the context classloader of
* the current thread to the one set in the HiveConf of this given `state`.
*/
def setCurrentSessionState(state: SessionState): Unit
/**
* This shim is necessary because the return type is different on different versions of Hive.
* All parameters are the same, though.
*/
def getDataLocation(table: Table): Option[String]
def setDataLocation(table: Table, loc: String): Unit
def getAllPartitions(hive: Hive, table: Table): Seq[Partition]
def getPartitionsByFilter(hive: Hive, table: Table, predicates: Seq[Expression]): Seq[Partition]
def getCommandProcessor(token: String, conf: HiveConf): CommandProcessor
def getDriverResults(driver: Driver): Seq[String]
def getMetastoreClientConnectRetryDelayMillis(conf: HiveConf): Long
def alterTable(hive: Hive, tableName: String, table: Table): Unit
def alterPartitions(hive: Hive, tableName: String, newParts: JList[Partition]): Unit
def createPartitions(
hive: Hive,
db: String,
table: String,
parts: Seq[CatalogTablePartition],
ignoreIfExists: Boolean): Unit
def loadPartition(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
inheritTableSpecs: Boolean,
isSkewedStoreAsSubdir: Boolean,
isSrcLocal: Boolean): Unit
def loadTable(
hive: Hive,
loadPath: Path,
tableName: String,
replace: Boolean,
isSrcLocal: Boolean): Unit
def loadDynamicPartitions(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
numDP: Int,
listBucketingEnabled: Boolean): Unit
def createFunction(hive: Hive, db: String, func: CatalogFunction): Unit
def dropFunction(hive: Hive, db: String, name: String): Unit
def renameFunction(hive: Hive, db: String, oldName: String, newName: String): Unit
def alterFunction(hive: Hive, db: String, func: CatalogFunction): Unit
def getFunctionOption(hive: Hive, db: String, name: String): Option[CatalogFunction]
def listFunctions(hive: Hive, db: String, pattern: String): Seq[String]
def dropIndex(hive: Hive, dbName: String, tableName: String, indexName: String): Unit
def dropTable(
hive: Hive,
dbName: String,
tableName: String,
deleteData: Boolean,
ignoreIfNotExists: Boolean,
purge: Boolean): Unit
def dropPartition(
hive: Hive,
dbName: String,
tableName: String,
part: JList[String],
deleteData: Boolean,
purge: Boolean): Unit
protected def findStaticMethod(klass: Class[_], name: String, args: Class[_]*): Method = {
val method = findMethod(klass, name, args: _*)
require(Modifier.isStatic(method.getModifiers()),
s"Method $name of class $klass is not static.")
method
}
protected def findMethod(klass: Class[_], name: String, args: Class[_]*): Method = {
klass.getMethod(name, args: _*)
}
}
private[client] class Shim_v0_12 extends Shim with Logging {
// See HIVE-12224, HOLD_DDLTIME was broken as soon as it landed
protected lazy val holdDDLTime = JBoolean.FALSE
// deletes the underlying data along with metadata
protected lazy val deleteDataInDropIndex = JBoolean.TRUE
private lazy val startMethod =
findStaticMethod(
classOf[SessionState],
"start",
classOf[SessionState])
private lazy val getDataLocationMethod = findMethod(classOf[Table], "getDataLocation")
private lazy val setDataLocationMethod =
findMethod(
classOf[Table],
"setDataLocation",
classOf[URI])
private lazy val getAllPartitionsMethod =
findMethod(
classOf[Hive],
"getAllPartitionsForPruner",
classOf[Table])
private lazy val getCommandProcessorMethod =
findStaticMethod(
classOf[CommandProcessorFactory],
"get",
classOf[String],
classOf[HiveConf])
private lazy val getDriverResultsMethod =
findMethod(
classOf[Driver],
"getResults",
classOf[JArrayList[String]])
private lazy val createPartitionMethod =
findMethod(
classOf[Hive],
"createPartition",
classOf[Table],
classOf[JMap[String, String]],
classOf[Path],
classOf[JMap[String, String]],
classOf[String],
classOf[String],
JInteger.TYPE,
classOf[JList[Object]],
classOf[String],
classOf[JMap[String, String]],
classOf[JList[Object]],
classOf[JList[Object]])
private lazy val loadPartitionMethod =
findMethod(
classOf[Hive],
"loadPartition",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val loadTableMethod =
findMethod(
classOf[Hive],
"loadTable",
classOf[Path],
classOf[String],
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val loadDynamicPartitionsMethod =
findMethod(
classOf[Hive],
"loadDynamicPartitions",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JInteger.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val dropIndexMethod =
findMethod(
classOf[Hive],
"dropIndex",
classOf[String],
classOf[String],
classOf[String],
JBoolean.TYPE)
private lazy val alterTableMethod =
findMethod(
classOf[Hive],
"alterTable",
classOf[String],
classOf[Table])
private lazy val alterPartitionsMethod =
findMethod(
classOf[Hive],
"alterPartitions",
classOf[String],
classOf[JList[Partition]])
override def setCurrentSessionState(state: SessionState): Unit = {
// Starting from Hive 0.13, setCurrentSessionState will internally override
// the context class loader of the current thread by the class loader set in
// the conf of the SessionState. So, for this Hive 0.12 shim, we add the same
// behavior and make shim.setCurrentSessionState of all Hive versions have the
// consistent behavior.
Thread.currentThread().setContextClassLoader(state.getConf.getClassLoader)
startMethod.invoke(null, state)
}
override def getDataLocation(table: Table): Option[String] =
Option(getDataLocationMethod.invoke(table)).map(_.toString())
override def setDataLocation(table: Table, loc: String): Unit =
setDataLocationMethod.invoke(table, new URI(loc))
// Follows exactly the same logic of DDLTask.createPartitions in Hive 0.12
override def createPartitions(
hive: Hive,
database: String,
tableName: String,
parts: Seq[CatalogTablePartition],
ignoreIfExists: Boolean): Unit = {
val table = hive.getTable(database, tableName)
parts.foreach { s =>
val location = s.storage.locationUri.map(
uri => new Path(table.getPath, new Path(uri))).orNull
val params = if (s.parameters.nonEmpty) s.parameters.asJava else null
val spec = s.spec.asJava
if (hive.getPartition(table, spec, false) != null && ignoreIfExists) {
// Ignore this partition since it already exists and ignoreIfExists == true
} else {
if (location == null && table.isView()) {
throw new HiveException("LOCATION clause illegal for view partition");
}
createPartitionMethod.invoke(
hive,
table,
spec,
location,
params, // partParams
null, // inputFormat
null, // outputFormat
-1: JInteger, // numBuckets
null, // cols
null, // serializationLib
null, // serdeParams
null, // bucketCols
null) // sortCols
}
}
}
override def getAllPartitions(hive: Hive, table: Table): Seq[Partition] =
getAllPartitionsMethod.invoke(hive, table).asInstanceOf[JSet[Partition]].asScala.toSeq
override def getPartitionsByFilter(
hive: Hive,
table: Table,
predicates: Seq[Expression]): Seq[Partition] = {
// getPartitionsByFilter() doesn't support binary comparison ops in Hive 0.12.
// See HIVE-4888.
logDebug("Hive 0.12 doesn't support predicate pushdown to metastore. " +
"Please use Hive 0.13 or higher.")
getAllPartitions(hive, table)
}
override def getCommandProcessor(token: String, conf: HiveConf): CommandProcessor =
getCommandProcessorMethod.invoke(null, token, conf).asInstanceOf[CommandProcessor]
override def getDriverResults(driver: Driver): Seq[String] = {
val res = new JArrayList[String]()
getDriverResultsMethod.invoke(driver, res)
res.asScala
}
override def getMetastoreClientConnectRetryDelayMillis(conf: HiveConf): Long = {
conf.getIntVar(HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY) * 1000
}
override def loadPartition(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
inheritTableSpecs: Boolean,
isSkewedStoreAsSubdir: Boolean,
isSrcLocal: Boolean): Unit = {
loadPartitionMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
JBoolean.FALSE, inheritTableSpecs: JBoolean, isSkewedStoreAsSubdir: JBoolean)
}
override def loadTable(
hive: Hive,
loadPath: Path,
tableName: String,
replace: Boolean,
isSrcLocal: Boolean): Unit = {
loadTableMethod.invoke(hive, loadPath, tableName, replace: JBoolean, holdDDLTime)
}
override def loadDynamicPartitions(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
numDP: Int,
listBucketingEnabled: Boolean): Unit = {
loadDynamicPartitionsMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
numDP: JInteger, holdDDLTime, listBucketingEnabled: JBoolean)
}
override def dropIndex(hive: Hive, dbName: String, tableName: String, indexName: String): Unit = {
dropIndexMethod.invoke(hive, dbName, tableName, indexName, deleteDataInDropIndex)
}
override def dropTable(
hive: Hive,
dbName: String,
tableName: String,
deleteData: Boolean,
ignoreIfNotExists: Boolean,
purge: Boolean): Unit = {
if (purge) {
throw new UnsupportedOperationException("DROP TABLE ... PURGE")
}
hive.dropTable(dbName, tableName, deleteData, ignoreIfNotExists)
}
override def alterTable(hive: Hive, tableName: String, table: Table): Unit = {
alterTableMethod.invoke(hive, tableName, table)
}
override def alterPartitions(hive: Hive, tableName: String, newParts: JList[Partition]): Unit = {
alterPartitionsMethod.invoke(hive, tableName, newParts)
}
override def dropPartition(
hive: Hive,
dbName: String,
tableName: String,
part: JList[String],
deleteData: Boolean,
purge: Boolean): Unit = {
if (purge) {
throw new UnsupportedOperationException("ALTER TABLE ... DROP PARTITION ... PURGE")
}
hive.dropPartition(dbName, tableName, part, deleteData)
}
override def createFunction(hive: Hive, db: String, func: CatalogFunction): Unit = {
throw new AnalysisException("Hive 0.12 doesn't support creating permanent functions. " +
"Please use Hive 0.13 or higher.")
}
def dropFunction(hive: Hive, db: String, name: String): Unit = {
throw new NoSuchPermanentFunctionException(db, name)
}
def renameFunction(hive: Hive, db: String, oldName: String, newName: String): Unit = {
throw new NoSuchPermanentFunctionException(db, oldName)
}
def alterFunction(hive: Hive, db: String, func: CatalogFunction): Unit = {
throw new NoSuchPermanentFunctionException(db, func.identifier.funcName)
}
def getFunctionOption(hive: Hive, db: String, name: String): Option[CatalogFunction] = {
None
}
def listFunctions(hive: Hive, db: String, pattern: String): Seq[String] = {
Seq.empty[String]
}
}
private[client] class Shim_v0_13 extends Shim_v0_12 {
private lazy val setCurrentSessionStateMethod =
findStaticMethod(
classOf[SessionState],
"setCurrentSessionState",
classOf[SessionState])
private lazy val setDataLocationMethod =
findMethod(
classOf[Table],
"setDataLocation",
classOf[Path])
private lazy val getAllPartitionsMethod =
findMethod(
classOf[Hive],
"getAllPartitionsOf",
classOf[Table])
private lazy val getPartitionsByFilterMethod =
findMethod(
classOf[Hive],
"getPartitionsByFilter",
classOf[Table],
classOf[String])
private lazy val getCommandProcessorMethod =
findStaticMethod(
classOf[CommandProcessorFactory],
"get",
classOf[Array[String]],
classOf[HiveConf])
private lazy val getDriverResultsMethod =
findMethod(
classOf[Driver],
"getResults",
classOf[JList[Object]])
override def setCurrentSessionState(state: SessionState): Unit =
setCurrentSessionStateMethod.invoke(null, state)
override def setDataLocation(table: Table, loc: String): Unit =
setDataLocationMethod.invoke(table, new Path(loc))
override def createPartitions(
hive: Hive,
db: String,
table: String,
parts: Seq[CatalogTablePartition],
ignoreIfExists: Boolean): Unit = {
val addPartitionDesc = new AddPartitionDesc(db, table, ignoreIfExists)
parts.zipWithIndex.foreach { case (s, i) =>
addPartitionDesc.addPartition(
s.spec.asJava, s.storage.locationUri.map(CatalogUtils.URIToString(_)).orNull)
if (s.parameters.nonEmpty) {
addPartitionDesc.getPartition(i).setPartParams(s.parameters.asJava)
}
}
hive.createPartitions(addPartitionDesc)
}
override def getAllPartitions(hive: Hive, table: Table): Seq[Partition] =
getAllPartitionsMethod.invoke(hive, table).asInstanceOf[JSet[Partition]].asScala.toSeq
private def toHiveFunction(f: CatalogFunction, db: String): HiveFunction = {
val resourceUris = f.resources.map { resource =>
new ResourceUri(ResourceType.valueOf(
resource.resourceType.resourceType.toUpperCase(Locale.ROOT)), resource.uri)
}
new HiveFunction(
f.identifier.funcName,
db,
f.className,
null,
PrincipalType.USER,
(System.currentTimeMillis / 1000).toInt,
FunctionType.JAVA,
resourceUris.asJava)
}
override def createFunction(hive: Hive, db: String, func: CatalogFunction): Unit = {
hive.createFunction(toHiveFunction(func, db))
}
override def dropFunction(hive: Hive, db: String, name: String): Unit = {
hive.dropFunction(db, name)
}
override def renameFunction(hive: Hive, db: String, oldName: String, newName: String): Unit = {
val catalogFunc = getFunctionOption(hive, db, oldName)
.getOrElse(throw new NoSuchPermanentFunctionException(db, oldName))
.copy(identifier = FunctionIdentifier(newName, Some(db)))
val hiveFunc = toHiveFunction(catalogFunc, db)
hive.alterFunction(db, oldName, hiveFunc)
}
override def alterFunction(hive: Hive, db: String, func: CatalogFunction): Unit = {
hive.alterFunction(db, func.identifier.funcName, toHiveFunction(func, db))
}
private def fromHiveFunction(hf: HiveFunction): CatalogFunction = {
val name = FunctionIdentifier(hf.getFunctionName, Option(hf.getDbName))
val resources = hf.getResourceUris.asScala.map { uri =>
val resourceType = uri.getResourceType() match {
case ResourceType.ARCHIVE => "archive"
case ResourceType.FILE => "file"
case ResourceType.JAR => "jar"
case r => throw new AnalysisException(s"Unknown resource type: $r")
}
FunctionResource(FunctionResourceType.fromString(resourceType), uri.getUri())
}
CatalogFunction(name, hf.getClassName, resources)
}
override def getFunctionOption(hive: Hive, db: String, name: String): Option[CatalogFunction] = {
try {
Option(hive.getFunction(db, name)).map(fromHiveFunction)
} catch {
case NonFatal(e) if isCausedBy(e, s"$name does not exist") =>
None
}
}
private def isCausedBy(e: Throwable, matchMassage: String): Boolean = {
if (e.getMessage.contains(matchMassage)) {
true
} else if (e.getCause != null) {
isCausedBy(e.getCause, matchMassage)
} else {
false
}
}
override def listFunctions(hive: Hive, db: String, pattern: String): Seq[String] = {
hive.getFunctions(db, pattern).asScala
}
/**
* Converts catalyst expression to the format that Hive's getPartitionsByFilter() expects, i.e.
* a string that represents partition predicates like "str_key=\\"value\\" and int_key=1 ...".
*
* Unsupported predicates are skipped.
*/
def convertFilters(table: Table, filters: Seq[Expression]): String = {
/**
* An extractor that matches all binary comparison operators except null-safe equality.
*
* Null-safe equality is not supported by Hive metastore partition predicate pushdown
*/
object SpecialBinaryComparison {
def unapply(e: BinaryComparison): Option[(Expression, Expression)] = e match {
case _: EqualNullSafe => None
case _ => Some((e.left, e.right))
}
}
object ExtractableLiteral {
def unapply(expr: Expression): Option[String] = expr match {
case Literal(value, _: IntegralType) => Some(value.toString)
case Literal(value, _: StringType) => Some(quoteStringLiteral(value.toString))
case _ => None
}
}
object ExtractableLiterals {
def unapply(exprs: Seq[Expression]): Option[Seq[String]] = {
val extractables = exprs.map(ExtractableLiteral.unapply)
if (extractables.nonEmpty && extractables.forall(_.isDefined)) {
Some(extractables.map(_.get))
} else {
None
}
}
}
object ExtractableValues {
private lazy val valueToLiteralString: PartialFunction[Any, String] = {
case value: Byte => value.toString
case value: Short => value.toString
case value: Int => value.toString
case value: Long => value.toString
case value: UTF8String => quoteStringLiteral(value.toString)
}
def unapply(values: Set[Any]): Option[Seq[String]] = {
val extractables = values.toSeq.map(valueToLiteralString.lift)
if (extractables.nonEmpty && extractables.forall(_.isDefined)) {
Some(extractables.map(_.get))
} else {
None
}
}
}
object NonVarcharAttribute {
// hive varchar is treated as catalyst string, but hive varchar can't be pushed down.
private val varcharKeys = table.getPartitionKeys.asScala
.filter(col => col.getType.startsWith(serdeConstants.VARCHAR_TYPE_NAME) ||
col.getType.startsWith(serdeConstants.CHAR_TYPE_NAME))
.map(col => col.getName).toSet
def unapply(attr: Attribute): Option[String] = {
if (varcharKeys.contains(attr.name)) {
None
} else {
Some(attr.name)
}
}
}
def convertInToOr(name: String, values: Seq[String]): String = {
values.map(value => s"$name = $value").mkString("(", " or ", ")")
}
val useAdvanced = SQLConf.get.advancedPartitionPredicatePushdownEnabled
def convert(expr: Expression): Option[String] = expr match {
case In(NonVarcharAttribute(name), ExtractableLiterals(values)) if useAdvanced =>
Some(convertInToOr(name, values))
case InSet(NonVarcharAttribute(name), ExtractableValues(values)) if useAdvanced =>
Some(convertInToOr(name, values))
case op @ SpecialBinaryComparison(NonVarcharAttribute(name), ExtractableLiteral(value)) =>
Some(s"$name ${op.symbol} $value")
case op @ SpecialBinaryComparison(ExtractableLiteral(value), NonVarcharAttribute(name)) =>
Some(s"$value ${op.symbol} $name")
case And(expr1, expr2) if useAdvanced =>
val converted = convert(expr1) ++ convert(expr2)
if (converted.isEmpty) {
None
} else {
Some(converted.mkString("(", " and ", ")"))
}
case Or(expr1, expr2) if useAdvanced =>
for {
left <- convert(expr1)
right <- convert(expr2)
} yield s"($left or $right)"
case _ => None
}
filters.flatMap(convert).mkString(" and ")
}
private def quoteStringLiteral(str: String): String = {
if (!str.contains("\\"")) {
s""""$str""""
} else if (!str.contains("'")) {
s"""'$str'"""
} else {
throw new UnsupportedOperationException(
"""Partition filter cannot have both `"` and `'` characters""")
}
}
override def getPartitionsByFilter(
hive: Hive,
table: Table,
predicates: Seq[Expression]): Seq[Partition] = {
// Hive getPartitionsByFilter() takes a string that represents partition
// predicates like "str_key=\\"value\\" and int_key=1 ..."
val filter = convertFilters(table, predicates)
val partitions =
if (filter.isEmpty) {
getAllPartitionsMethod.invoke(hive, table).asInstanceOf[JSet[Partition]]
} else {
logDebug(s"Hive metastore filter is '$filter'.")
val tryDirectSqlConfVar = HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL
// We should get this config value from the metaStore. otherwise hit SPARK-18681.
// To be compatible with hive-0.12 and hive-0.13, In the future we can achieve this by:
// val tryDirectSql = hive.getMetaConf(tryDirectSqlConfVar.varname).toBoolean
val tryDirectSql = hive.getMSC.getConfigValue(tryDirectSqlConfVar.varname,
tryDirectSqlConfVar.defaultBoolVal.toString).toBoolean
try {
// Hive may throw an exception when calling this method in some circumstances, such as
// when filtering on a non-string partition column when the hive config key
// hive.metastore.try.direct.sql is false
getPartitionsByFilterMethod.invoke(hive, table, filter)
.asInstanceOf[JArrayList[Partition]]
} catch {
case ex: InvocationTargetException if ex.getCause.isInstanceOf[MetaException] &&
!tryDirectSql =>
logWarning("Caught Hive MetaException attempting to get partition metadata by " +
"filter from Hive. Falling back to fetching all partition metadata, which will " +
"degrade performance. Modifying your Hive metastore configuration to set " +
s"${tryDirectSqlConfVar.varname} to true may resolve this problem.", ex)
// HiveShim clients are expected to handle a superset of the requested partitions
getAllPartitionsMethod.invoke(hive, table).asInstanceOf[JSet[Partition]]
case ex: InvocationTargetException if ex.getCause.isInstanceOf[MetaException] &&
tryDirectSql =>
throw new RuntimeException("Caught Hive MetaException attempting to get partition " +
"metadata by filter from Hive. You can set the Spark configuration setting " +
s"${SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key} to false to work around this " +
"problem, however this will result in degraded performance. Please report a bug: " +
"https://issues.apache.org/jira/browse/SPARK", ex)
}
}
partitions.asScala.toSeq
}
override def getCommandProcessor(token: String, conf: HiveConf): CommandProcessor =
getCommandProcessorMethod.invoke(null, Array(token), conf).asInstanceOf[CommandProcessor]
override def getDriverResults(driver: Driver): Seq[String] = {
val res = new JArrayList[Object]()
getDriverResultsMethod.invoke(driver, res)
res.asScala.map { r =>
r match {
case s: String => s
case a: Array[Object] => a(0).asInstanceOf[String]
}
}
}
}
private[client] class Shim_v0_14 extends Shim_v0_13 {
// true if this is an ACID operation
protected lazy val isAcid = JBoolean.FALSE
// true if list bucketing enabled
protected lazy val isSkewedStoreAsSubdir = JBoolean.FALSE
private lazy val loadPartitionMethod =
findMethod(
classOf[Hive],
"loadPartition",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val loadTableMethod =
findMethod(
classOf[Hive],
"loadTable",
classOf[Path],
classOf[String],
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val loadDynamicPartitionsMethod =
findMethod(
classOf[Hive],
"loadDynamicPartitions",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JInteger.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val dropTableMethod =
findMethod(
classOf[Hive],
"dropTable",
classOf[String],
classOf[String],
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val getTimeVarMethod =
findMethod(
classOf[HiveConf],
"getTimeVar",
classOf[HiveConf.ConfVars],
classOf[TimeUnit])
override def loadPartition(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
inheritTableSpecs: Boolean,
isSkewedStoreAsSubdir: Boolean,
isSrcLocal: Boolean): Unit = {
loadPartitionMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
holdDDLTime, inheritTableSpecs: JBoolean, isSkewedStoreAsSubdir: JBoolean,
isSrcLocal: JBoolean, isAcid)
}
override def loadTable(
hive: Hive,
loadPath: Path,
tableName: String,
replace: Boolean,
isSrcLocal: Boolean): Unit = {
loadTableMethod.invoke(hive, loadPath, tableName, replace: JBoolean, holdDDLTime,
isSrcLocal: JBoolean, isSkewedStoreAsSubdir, isAcid)
}
override def loadDynamicPartitions(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
numDP: Int,
listBucketingEnabled: Boolean): Unit = {
loadDynamicPartitionsMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
numDP: JInteger, holdDDLTime, listBucketingEnabled: JBoolean, isAcid)
}
override def dropTable(
hive: Hive,
dbName: String,
tableName: String,
deleteData: Boolean,
ignoreIfNotExists: Boolean,
purge: Boolean): Unit = {
dropTableMethod.invoke(hive, dbName, tableName, deleteData: JBoolean,
ignoreIfNotExists: JBoolean, purge: JBoolean)
}
override def getMetastoreClientConnectRetryDelayMillis(conf: HiveConf): Long = {
getTimeVarMethod.invoke(
conf,
HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY,
TimeUnit.MILLISECONDS).asInstanceOf[Long]
}
}
private[client] class Shim_v1_0 extends Shim_v0_14
private[client] class Shim_v1_1 extends Shim_v1_0 {
// throws an exception if the index does not exist
protected lazy val throwExceptionInDropIndex = JBoolean.TRUE
private lazy val dropIndexMethod =
findMethod(
classOf[Hive],
"dropIndex",
classOf[String],
classOf[String],
classOf[String],
JBoolean.TYPE,
JBoolean.TYPE)
override def dropIndex(hive: Hive, dbName: String, tableName: String, indexName: String): Unit = {
dropIndexMethod.invoke(hive, dbName, tableName, indexName, throwExceptionInDropIndex,
deleteDataInDropIndex)
}
}
private[client] class Shim_v1_2 extends Shim_v1_1 {
// txnId can be 0 unless isAcid == true
protected lazy val txnIdInLoadDynamicPartitions: JLong = 0L
private lazy val loadDynamicPartitionsMethod =
findMethod(
classOf[Hive],
"loadDynamicPartitions",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JInteger.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JLong.TYPE)
private lazy val dropOptionsClass =
Utils.classForName("org.apache.hadoop.hive.metastore.PartitionDropOptions")
private lazy val dropOptionsDeleteData = dropOptionsClass.getField("deleteData")
private lazy val dropOptionsPurge = dropOptionsClass.getField("purgeData")
private lazy val dropPartitionMethod =
findMethod(
classOf[Hive],
"dropPartition",
classOf[String],
classOf[String],
classOf[JList[String]],
dropOptionsClass)
override def loadDynamicPartitions(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
numDP: Int,
listBucketingEnabled: Boolean): Unit = {
loadDynamicPartitionsMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
numDP: JInteger, holdDDLTime, listBucketingEnabled: JBoolean, isAcid,
txnIdInLoadDynamicPartitions)
}
override def dropPartition(
hive: Hive,
dbName: String,
tableName: String,
part: JList[String],
deleteData: Boolean,
purge: Boolean): Unit = {
val dropOptions = dropOptionsClass.newInstance().asInstanceOf[Object]
dropOptionsDeleteData.setBoolean(dropOptions, deleteData)
dropOptionsPurge.setBoolean(dropOptions, purge)
dropPartitionMethod.invoke(hive, dbName, tableName, part, dropOptions)
}
}
private[client] class Shim_v2_0 extends Shim_v1_2 {
private lazy val loadPartitionMethod =
findMethod(
classOf[Hive],
"loadPartition",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val loadTableMethod =
findMethod(
classOf[Hive],
"loadTable",
classOf[Path],
classOf[String],
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val loadDynamicPartitionsMethod =
findMethod(
classOf[Hive],
"loadDynamicPartitions",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JInteger.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JLong.TYPE)
override def loadPartition(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
inheritTableSpecs: Boolean,
isSkewedStoreAsSubdir: Boolean,
isSrcLocal: Boolean): Unit = {
loadPartitionMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
inheritTableSpecs: JBoolean, isSkewedStoreAsSubdir: JBoolean,
isSrcLocal: JBoolean, isAcid)
}
override def loadTable(
hive: Hive,
loadPath: Path,
tableName: String,
replace: Boolean,
isSrcLocal: Boolean): Unit = {
loadTableMethod.invoke(hive, loadPath, tableName, replace: JBoolean, isSrcLocal: JBoolean,
isSkewedStoreAsSubdir, isAcid)
}
override def loadDynamicPartitions(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
numDP: Int,
listBucketingEnabled: Boolean): Unit = {
loadDynamicPartitionsMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
numDP: JInteger, listBucketingEnabled: JBoolean, isAcid, txnIdInLoadDynamicPartitions)
}
}
private[client] class Shim_v2_1 extends Shim_v2_0 {
// true if there is any following stats task
protected lazy val hasFollowingStatsTask = JBoolean.FALSE
// TODO: Now, always set environmentContext to null. In the future, we should avoid setting
// hive-generated stats to -1 when altering tables by using environmentContext. See Hive-12730
protected lazy val environmentContextInAlterTable = null
private lazy val loadPartitionMethod =
findMethod(
classOf[Hive],
"loadPartition",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val loadTableMethod =
findMethod(
classOf[Hive],
"loadTable",
classOf[Path],
classOf[String],
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val loadDynamicPartitionsMethod =
findMethod(
classOf[Hive],
"loadDynamicPartitions",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JInteger.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JLong.TYPE,
JBoolean.TYPE,
classOf[AcidUtils.Operation])
private lazy val alterTableMethod =
findMethod(
classOf[Hive],
"alterTable",
classOf[String],
classOf[Table],
classOf[EnvironmentContext])
private lazy val alterPartitionsMethod =
findMethod(
classOf[Hive],
"alterPartitions",
classOf[String],
classOf[JList[Partition]],
classOf[EnvironmentContext])
override def loadPartition(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
inheritTableSpecs: Boolean,
isSkewedStoreAsSubdir: Boolean,
isSrcLocal: Boolean): Unit = {
loadPartitionMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
inheritTableSpecs: JBoolean, isSkewedStoreAsSubdir: JBoolean,
isSrcLocal: JBoolean, isAcid, hasFollowingStatsTask)
}
override def loadTable(
hive: Hive,
loadPath: Path,
tableName: String,
replace: Boolean,
isSrcLocal: Boolean): Unit = {
loadTableMethod.invoke(hive, loadPath, tableName, replace: JBoolean, isSrcLocal: JBoolean,
isSkewedStoreAsSubdir, isAcid, hasFollowingStatsTask)
}
override def loadDynamicPartitions(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
numDP: Int,
listBucketingEnabled: Boolean): Unit = {
loadDynamicPartitionsMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
numDP: JInteger, listBucketingEnabled: JBoolean, isAcid, txnIdInLoadDynamicPartitions,
hasFollowingStatsTask, AcidUtils.Operation.NOT_ACID)
}
override def alterTable(hive: Hive, tableName: String, table: Table): Unit = {
alterTableMethod.invoke(hive, tableName, table, environmentContextInAlterTable)
}
override def alterPartitions(hive: Hive, tableName: String, newParts: JList[Partition]): Unit = {
alterPartitionsMethod.invoke(hive, tableName, newParts, environmentContextInAlterTable)
}
}
private[client] class Shim_v2_2 extends Shim_v2_1
private[client] class Shim_v2_3 extends Shim_v2_1
| brad-kaiser/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala | Scala | apache-2.0 | 38,369 |
package cwe.scala.library.math
import cwe.scala.library.serviceproviders._
import cwe.scala.library.audit._
import cwe.scala.library.boxes._
import cwe.scala.library.boxes.Numerics._
import cwe.scala.library.math.bignumbers.Integer
import cwe.scala.library.math.bignumbers.Natural
/**
* A service provider for the package
*/
class MathServiceProvider {
private val auditor = AuditServiceProvider.createAuditor(this)
// instances
private var integerOperationsServiceInstance: IntegerOperationsService = null
private var rationalOperationsServiceInstance: RationalOperationsService = null
// service providing implementation
def getIntegerOperationsService(): IntegerOperationsService = {
if (integerOperationsServiceInstance == null) { synchronized { if (integerOperationsServiceInstance == null) { integerOperationsServiceInstance = createIntegerOperationsService(); auditor.createSingleton(integerOperationsServiceInstance) } } }
integerOperationsServiceInstance
}
def getRationalOperationsService(): RationalOperationsService = {
if (rationalOperationsServiceInstance == null) { synchronized { if (rationalOperationsServiceInstance == null) { rationalOperationsServiceInstance = createRationalOperationsService(); auditor.createSingleton(rationalOperationsServiceInstance) } } }
rationalOperationsServiceInstance
}
// service factory implementation
protected def createIntegerOperationsService(): IntegerOperationsService = new IntegerOperationsService()
protected def createRationalOperationsService(): RationalOperationsService = new RationalOperationsService()
def createRational(n: Int, d: Int): Rational[Int] = new Rational(n, d)
def createRational(n: Long, d: Long): Rational[Long] = new Rational(n, d)
def createRational(n: Integer, d: Integer): Rational[Integer] = new Rational(n, d)
def createRational(n: Natural, d: Natural): Rational[Natural] = new Rational(n, d)
def createRational[T](n: T, d: T)(implicit nBox: Numeric[T]): Rational[T] = new Rational(n, d)(nBox)
def createPositiveRational(n: Int, d: Int): PositiveRational[Int] = new PositiveRational(n, d)
def createPositiveRational(n: Long, d: Long): PositiveRational[Long] = new PositiveRational(n, d)
def createPositiveRational(n: Integer, d: Integer): PositiveRational[Integer] = new PositiveRational(n, d)
def createPositiveRational(n: Natural, d: Natural): PositiveRational[Natural] = new PositiveRational(n, d)
def createPositiveRational[T](n: T, d: T)(implicit nBox: Numeric[T]): PositiveRational[T] = new PositiveRational(n, d)(nBox)
}
object MathServiceProvider extends IServiceProvider {
private var instance: MathServiceProvider = null
// dependency injection
/**
* Injects a MathServiceProvider to be used.
*/
def setInstance(sp: MathServiceProvider) {
if (sp == null) throw new IllegalArgumentException("MathServiceProvider cannot be null")
else synchronized { instance = sp; AuditServiceProvider.createAuditor(sp).createSingleton(sp); ServiceProviders.registerServiceProvider(this) }
}
private def getInstance(): MathServiceProvider = {
if (instance == null) { synchronized { if (instance == null) setInstance(new MathServiceProvider) } }
instance
}
/**
* Resets ServiceProvider to built in default
*/
def reset() = if (instance != null) synchronized { instance = null }
// Methods delegation to MathServiceProvider
def getIntegerOperationsService(): IntegerOperationsService = getInstance getIntegerOperationsService ()
def getRationalOperationsService(): RationalOperationsService = getInstance getRationalOperationsService ()
def createRational(n: Int, d: Int): Rational[Int] = getInstance createRational(n, d)
def createRational(n: Long, d: Long): Rational[Long] = getInstance createRational(n, d)
def createRational(n: Integer, d: Integer): Rational[Integer] = getInstance createRational(n, d)
def createRational(n: Natural, d: Natural): Rational[Natural] = getInstance createRational(n, d)
def createRational[T](n: T, d: T)(implicit nBox: Numeric[T]): Rational[T] = getInstance.createRational(n, d)(nBox)
def createPositiveRational(n: Int, d: Int): PositiveRational[Int] = getInstance createPositiveRational(n, d)
def createPositiveRational(n: Long, d: Long): PositiveRational[Long] = getInstance createPositiveRational(n, d)
def createPositiveRational(n: Integer, d: Integer): PositiveRational[Integer] = getInstance createPositiveRational(n, d)
def createPositiveRational(n: Natural, d: Natural): PositiveRational[Natural] = getInstance createPositiveRational(n, d)
def createPositiveRational[T](n: T, d: T)(implicit nBox: Numeric[T]): PositiveRational[T] = getInstance.createPositiveRational(n, d)(nBox)
} | wwwigii-system/research | cwe-scala-library/src/cwe/scala/library/math/MathServiceProvider.scala | Scala | gpl-3.0 | 4,683 |
package models.services
import scala.concurrent.Future
import com.mohiva.play.silhouette.api.services.IdentityService
import com.mohiva.play.silhouette.impl.providers.CommonSocialProfile
import models.User
/**
* Handles actions to users.
*/
trait UserService extends IdentityService[User] {
/**
* Saves a user.
*
* @param user The user to save.
* @return The saved user.
*/
def save(user: User): Future[User]
/**
* Saves the social profile for a user.
*
* If a user exists for this profile then update the user, otherwise create a new user with the given profile.
*
* @param profile The social profile to save.
* @return The user for whom the profile was saved.
*/
def save(profile: CommonSocialProfile): Future[User]
}
/**
* An exception thrown when the user cannot be created.
*
* @param msg The exception message.
* @param cause The exception cause.
*/
case class UserCreationException(msg: String, cause: Throwable)
extends Exception(msg, cause) {
//logger.error(msg, cause)
/**
* Constructs an exception with only a message.
*
* @param msg The exception message.
*/
def this(msg: String) = this(msg, null)
} | yzernik/office-ladder | server/app/models/services/UserService.scala | Scala | mit | 1,193 |
package org.apache.spark.ml.feature
import java.sql.Timestamp
import org.apache.log4j.{Level, LogManager}
import org.apache.spark.sql.functions._
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.sql.types._
import org.joda.time.format.DateTimeFormat
/**
* Loads various test datasets
*/
object TestHelper {
final val SPARK_CTX = createSparkContext()
final val FILE_PREFIX = "src/test/resources/data/"
final val ISO_DATE_FORMAT = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss")
final val NULL_VALUE = "?"
// This value is used to represent nulls in string columns
final val MISSING = "__MISSING_VALUE__"
final val CLEAN_SUFFIX: String = "_CLEAN"
final val INDEX_SUFFIX: String = "_IDX"
/**
* @return the discretizer fit to the data given the specified features to bin and label use as target.
*/
def createDiscretizerModel(dataframe: DataFrame, inputCols: Array[String],
labelColumn: String,
maxBins: Int = 100,
maxByPart: Int = 10000,
stoppingCriterion: Double = 0,
minBinPercentage: Double = 0,
approximate: Boolean = false): DiscretizerModel = {
val featureAssembler = new VectorAssembler()
.setInputCols(inputCols)
.setOutputCol("features")
val processedDf = featureAssembler.transform(dataframe)
val discretizer = new MDLPDiscretizer()
.setMaxBins(maxBins)
.setMaxByPart(maxByPart)
.setStoppingCriterion(stoppingCriterion)
.setMinBinPercentage(minBinPercentage)
.setInputCol("features") // this must be a feature vector
.setLabelCol(labelColumn + INDEX_SUFFIX)
.setOutputCol("bucketFeatures")
.setApproximate(approximate)
discretizer.fit(processedDf)
}
/**
* The label column will have null values replaced with MISSING values in this case.
* @return the discretizer fit to the data given the specified features to bin and label use as target.
*/
def getDiscretizerModel(dataframe: DataFrame, inputCols: Array[String],
labelColumn: String,
maxBins: Int = 100,
maxByPart: Int = 10000,
stoppingCriterion: Double = 0,
minBinPercentage: Double = 0,
approximate: Boolean = false): DiscretizerModel = {
val processedDf = cleanLabelCol(dataframe, labelColumn)
createDiscretizerModel(processedDf, inputCols, labelColumn,
maxBins, maxByPart, stoppingCriterion, minBinPercentage, approximate)
}
def cleanLabelCol(dataframe: DataFrame, labelColumn: String): DataFrame = {
val df = dataframe
.withColumn(labelColumn + CLEAN_SUFFIX, when(col(labelColumn).isNull, lit(MISSING)).otherwise(col(labelColumn)))
convertLabelToIndex(df, labelColumn + CLEAN_SUFFIX, labelColumn + INDEX_SUFFIX)
}
def cleanNumericCols(dataframe: DataFrame, numericCols: Array[String]): DataFrame = {
var df = dataframe
numericCols.foreach(column => {
df = df.withColumn(column + CLEAN_SUFFIX, when(col(column).isNull, lit(Double.NaN)).otherwise(col(column)))
})
df
}
def convertLabelToIndex(df: DataFrame, inputCol: String, outputCol: String): DataFrame = {
val labelIndexer = new StringIndexer()
.setInputCol(inputCol)
.setOutputCol(outputCol).fit(df)
labelIndexer.transform(df)
}
def createSparkContext() = {
// the [n] corresponds to the number of worker threads and should correspond ot the number of cores available.
val conf = new SparkConf().setAppName("test-spark").setMaster("local[4]")
// Changing the default parallelism to 4 hurt performance a lot for a big dataset.
// When maxByPart was 10000, it wend from 39 min to 4.5 hours.
//conf.set("spark.default.parallelism", "4")
val sc = new SparkContext(conf)
LogManager.getRootLogger.setLevel(Level.WARN)
sc
}
/** @return the cars data as a dataframe */
def readCarsData(sqlContext: SQLContext): DataFrame = {
val cars = SPARK_CTX.textFile(FILE_PREFIX + "cars.data")
val nullable = true
// mpg, cylinders, cubicinches, horsepower, weightlbs, time to sixty, year, brand, origin
val schema = StructType(List(
StructField("mpg", DoubleType, nullable),
StructField("cylinders", IntegerType, nullable),
StructField("cubicinches", IntegerType, nullable),
StructField("horsepower", DoubleType, nullable),
StructField("weightlbs", DoubleType, nullable),
StructField("time to sixty", DoubleType, nullable),
StructField("year", IntegerType, nullable),
StructField("brand", StringType, nullable),
StructField("origin", StringType, nullable)
))
val rows = cars.map(line => line.split(",").map(elem => elem.trim))
.map(x => Row.fromSeq(Seq(x(0).toDouble, x(1).toInt, x(2).toInt, x(3).toDouble, x(4).toDouble, x(5).toDouble, x(6).toInt, x(7), x(8))))
sqlContext.createDataFrame(rows, schema)
}
/** @return the dates data as a dataframe. */
def readDatesData(sqlContext: SQLContext): DataFrame = {
val datesData = SPARK_CTX.textFile(FILE_PREFIX + "dates.data")
val nullable = true
// txt, date
val schema = StructType(List(
StructField("txt", StringType, nullable),
StructField("date", DoubleType, nullable)
))
val rows = datesData.map(line => line.split(",").map(elem => elem.trim))
.map(x => Row.fromSeq(Seq(asString(x(0)), asDateDouble(x(1)))))
sqlContext.createDataFrame(rows, schema)
}
/** @return the titanic data as a dataframe. This dataset has nulls and dates */
def readTitanicData(sqlContext: SQLContext): DataFrame = {
val titanic = SPARK_CTX.textFile(FILE_PREFIX + "titanic.data")
val nullable = true
// No, Braund,male,22, A/5 21171,7.25, ?, 3, S, 1, 0, 2015-04-22T00:00:00
val schema = StructType(List(
StructField("survived", StringType, nullable),
StructField("name", StringType, nullable),
StructField("sex", StringType, nullable),
StructField("age", DoubleType, nullable),
StructField("ticket", StringType, nullable),
StructField("fare", DoubleType, nullable),
StructField("cabin", StringType, nullable),
StructField("pclass", DoubleType, nullable), // int
StructField("embarked", StringType, nullable),
StructField("sibsp", DoubleType, nullable), // int
StructField("parch", DoubleType, nullable), // int
StructField("grad date", DoubleType, nullable)
))
// ints and dates must be read as doubles
val rows = titanic.map(line => line.split(",").map(elem => elem.trim))
.map(x => Row.fromSeq(Seq(
asString(x(0)), asString(x(1)), asString(x(2)),
asDouble(x(3)), asString(x(4)), asDouble(x(5)), asString(x(6)),
asDouble(x(7)), asString(x(8)), asDouble(x(9)), asDouble(x(10)), asDateDouble(x(11))
)
))
sqlContext.createDataFrame(rows, schema)
}
/** @return the titanic data as a dataframe. This version is interesting because the VectorAssembler
* makes some of its values sparse and other dense. In the other version they are all dense.
*/
def readTitanic2Data(sqlContext: SQLContext): DataFrame = {
val data = SPARK_CTX.textFile(FILE_PREFIX + "titanic2.data")
val nullable = true
// No ? 3 S 1 0
val schema = StructType(List(
StructField("survived", StringType, nullable),
StructField("name", StringType, nullable),
StructField("sex", StringType, nullable),
StructField("age", DoubleType, nullable),
StructField("ticket", StringType, nullable),
StructField("fare", DoubleType, nullable),
StructField("cabin", StringType, nullable),
StructField("pclass", DoubleType, nullable), // int
StructField("embarked", StringType, nullable),
StructField("sibsp", DoubleType, nullable), // int
StructField("parch", DoubleType, nullable) // int
))
// ints and dates must be read as doubles
val rows = data.map(line => line.split(",").map(elem => elem.trim))
.map(x => {
Row.fromSeq(Seq(
asString(x(0)), asString(x(1)), asString(x(2)), asDouble(x(3)), asString(x(4)), asDouble(x(5)),
asString(x(6)), asDouble(x(7)), asString(x(8)), asDouble(x(9)), asDouble(x(10))))
})
sqlContext.createDataFrame(rows, schema)
}
/** @return standard iris dataset from UCI repo.
*/
def readIrisData(sqlContext: SQLContext): DataFrame = {
val data = SPARK_CTX.textFile(FILE_PREFIX + "iris.data")
val nullable = true
val schema = StructType(List(
StructField("sepallength", DoubleType, nullable),
StructField("sepalwidth", DoubleType, nullable),
StructField("petallength", DoubleType, nullable),
StructField("petalwidth", DoubleType, nullable),
StructField("iristype", StringType, nullable)
))
// ints and dates must be read as doubles
val rows = data.map(line => line.split(",").map(elem => elem.trim))
.map(x => {Row.fromSeq(Seq(asDouble(x(0)), asDouble(x(1)), asDouble(x(2)), asDouble(x(3)), asString(x(4))))})
sqlContext.createDataFrame(rows, schema)
}
/** @return subset of fake telecom churn dataset. This dataset has more rows than the others.
*/
def readChurnData(sqlContext: SQLContext): DataFrame = {
val data = SPARK_CTX.textFile(FILE_PREFIX + "churn.data")
val nullable = true
val schema = StructType(List(
StructField("State", StringType, nullable),
StructField("Number Vmail Messages", DoubleType, nullable),
StructField("Total Day Minutes", DoubleType, nullable),
StructField("Total Day Calls", DoubleType, nullable),
StructField("Total Day Charge", DoubleType, nullable),
StructField("Total Eve Minutes", DoubleType, nullable),
StructField("Calls", DoubleType, nullable),
StructField("Charge", DoubleType, nullable),
StructField("Total Night Minutes", DoubleType, nullable),
StructField("Total Night Calls", DoubleType, nullable),
StructField("Total Night Charge", DoubleType, nullable),
StructField("Total Intl Minutes", DoubleType, nullable),
StructField("Total Intl Calls", DoubleType, nullable),
StructField("Total Intl Charge", DoubleType, nullable),
StructField("Number Customer Service Calls", DoubleType, nullable),
StructField("Churned", StringType, nullable)
))
// ints and dates must be read as doubles
val rows = data.map(line => line.split(",").map(elem => elem.trim))
.map(x => {Row.fromSeq(Seq(asString(x(0)), asDouble(x(1)), asDouble(x(2)), asDouble(x(3)), asDouble(x(4)),
asDouble(x(5)), asDouble(x(6)), asDouble(x(7)), asDouble(x(8)), asDouble(x(9)), asDouble(x(10)),
asDouble(x(11)), asDouble(x(12)), asDouble(x(13)), asDouble(x(14)), asString(x(15))))})
sqlContext.createDataFrame(rows, schema)
}
/** @return the blockbuster dataset. It has a lot of columns (312), but not that many rows (421)
*/
def readBlockBusterData(sqlContext: SQLContext): DataFrame = {
val data = SPARK_CTX.textFile(FILE_PREFIX + "blockbuster.data")
val nullable = true
val numTrailingNumberCols = 308
// A whole bunch of numeric columns
var fields: Seq[StructField] = for (i <- 1 to numTrailingNumberCols) yield {
StructField("col" + i, DoubleType, nullable)
}
fields = List(List(
StructField("Store", DoubleType, nullable),
StructField("Sqft", DoubleType, nullable),
StructField("City", StringType, nullable),
StructField("State", StringType, nullable)
), fields).flatten
val schema = StructType(fields)
val rows = data.map(line => line.split(",").map(elem => elem.trim))
.map(x => {Row.fromSeq(
List(Seq(asDouble(x(0)), asDouble(x(1)), asString(x(2)), asString(x(3))),
for (i <- 4 to numTrailingNumberCols + 3) yield { asDouble(x(i)) }
).flatten )
})
sqlContext.createDataFrame(rows, schema)
}
/** @return subset of 311 service call data.
*/
def readSvcRequests40000Data(sqlContext: SQLContext): DataFrame = {
val data = SPARK_CTX.textFile(FILE_PREFIX + "svcRequests40000.data")
val nullable = true
val schema = StructType(List(
StructField("Unique Key", DoubleType, nullable),
StructField("Closed Date", DoubleType, nullable),
StructField("Agency", StringType, nullable),
StructField("Complaint Type", StringType, nullable),
StructField("Descriptor", StringType, nullable),
StructField("Incident Zip", StringType, nullable),
StructField("City", StringType, nullable),
StructField("Landmark", StringType, nullable),
StructField("Facility Type", StringType, nullable),
StructField("Status", StringType, nullable),
StructField("Borough", StringType, nullable),
StructField("X Coordinate (State Plane)", DoubleType, nullable),
StructField("Y Coordinate (State Plane)", DoubleType, nullable),
StructField("Latitude", DoubleType, nullable),
StructField("Longitude", DoubleType, nullable)
))
// ints and dates must be read as doubles
val rows = data.map(line => line.split(",").map(elem => elem.trim))
.map(x => {Row.fromSeq(Seq(asDouble(x(0)), asDateDouble(x(1)), asString(x(2)), asString(x(3)), asString(x(4)),
asString(x(5)), asString(x(6)), asString(x(7)), asString(x(8)), asString(x(9)), asString(x(10)),
asDouble(x(11)), asDouble(x(12)), asDouble(x(13)), asDouble(x(14))))})
sqlContext.createDataFrame(rows, schema)
}
/** @return dataset with lots of rows
*/
def readServerXData(sqlContext: SQLContext): DataFrame = {
val data = SPARK_CTX.textFile(FILE_PREFIX + "serverX_100000.data")
val nullable = true
val schema = StructType(List(
StructField("rpm1", DoubleType, nullable),
StructField("CPU1_TJ", DoubleType, nullable),
StructField("CPU2_TJ", DoubleType, nullable),
StructField("total_cfm", DoubleType, nullable),
StructField("val1", DoubleType, nullable),
StructField("val2", DoubleType, nullable),
StructField("target4", StringType, nullable),
StructField("target2", StringType, nullable)
))
// ints and dates must be read as doubles
val rows = data.map(line => line.split(",").map(elem => elem.trim))
.map(x => {Row.fromSeq(Seq(asDouble(x(0)), asDouble(x(1)), asDouble(x(2)),
asDouble(x(3)), asDouble(x(4)), asDouble(x(5)),
asString(x(6)), asString(x(7))))})
sqlContext.createDataFrame(rows, schema)
}
/** @return dataset with lots of rows
*/
def readServerBigXData(sqlContext: SQLContext): DataFrame = {
val data = SPARK_CTX.textFile(FILE_PREFIX + "serverX_10000000.data")
val nullable = true
val schema = StructType(List(
StructField("targetA", StringType, nullable),
StructField("val1", DoubleType, nullable),
StructField("val2", DoubleType, nullable),
StructField("val3", DoubleType, nullable),
StructField("val4", DoubleType, nullable),
StructField("val5", DoubleType, nullable),
StructField("val6", DoubleType, nullable),
StructField("targetB", StringType, nullable)
))
// ints and dates must be read as doubles
val rows = data.map(line => line.split(",").map(elem => elem.trim))
.map(x => {Row.fromSeq(Seq(asString(x(0)), asDouble(x(1)),
asDouble(x(2)), asDouble(x(3)), asDouble(x(4)), asDouble(x(5)), asDouble(x(6)), asString(x(7))))
})
sqlContext.createDataFrame(rows, schema)
}
/** @return dataset with lots of rows
*/
def readRedTrainData(sqlContext: SQLContext): DataFrame = {
val data = SPARK_CTX.textFile(FILE_PREFIX + "red_train.data")
val nullable = true
val schema = StructType(List(
StructField("col1", DoubleType, nullable),
StructField("col2", DoubleType, nullable),
StructField("col3", DoubleType, nullable),
StructField("col4", DoubleType, nullable),
StructField("col5", DoubleType, nullable),
StructField("col6", DoubleType, nullable),
StructField("col7", DoubleType, nullable),
StructField("col8", DoubleType, nullable),
StructField("col9", DoubleType, nullable),
StructField("outcome", StringType, nullable)
))
// ints and dates must be read as doubles
val rows = data.map(line => line.split(",").map(elem => elem.trim))
.map(x => {Row.fromSeq(Seq(
asDouble(x(0)), asDouble(x(1)), asDouble(x(2)), asDouble(x(3)), asDouble(x(4)), asDouble(x(5)),
asDouble(x(6)), asDouble(x(7)), asDouble(x(8)), asString(x(9))))
})
sqlContext.createDataFrame(rows, schema)
}
/** @return dataset with 3 double columns. The first is the label column and contain null.
*/
def readNullLabelTestData(sqlContext: SQLContext): DataFrame = {
val data = SPARK_CTX.textFile(FILE_PREFIX + "null_label_test.data")
val nullable = true
val schema = StructType(List(
StructField("label_IDX", DoubleType, nullable),
StructField("col1", DoubleType, nullable),
StructField("col2", DoubleType, nullable)
))
// ints and dates must be read as doubles
val rows = data.map(line => line.split(",").map(elem => elem.trim))
.map(x => {Row.fromSeq(Seq(asDouble(x(0)), asDouble(x(1)), asDouble(x(2))))})
sqlContext.createDataFrame(rows, schema)
}
private def asDateDouble(isoString: String) = {
if (isoString == NULL_VALUE) Double.NaN
else ISO_DATE_FORMAT.parseDateTime(isoString).getMillis.toString.toDouble
}
// label cannot currently have null values - see #8.
private def asString(value: String) = if (value == NULL_VALUE) null else value
private def asDouble(value: String) = if (value == NULL_VALUE) Double.NaN else value.toDouble
}
| sramirez/spark-MDLP-discretization | src/test/scala/org/apache/spark/ml/feature/TestHelper.scala | Scala | apache-2.0 | 18,073 |
package com.stulsoft.exercises.binary.trees
/** Construct completely balanced binary trees.
* Example:
* {{{
* cBalanced(3, "x")
*
* N
* /\\
* / \\
* N N
* /\\ /\\
* / \\ /\\
* N N N N
* .........
* }}}
*
* @author Yuriy Stul.
*/
object P55ConstructCompletelyBalancedTrees extends App {
test(1, "x")
test(2, "x")
test(3, "x")
def test[T](n: Int, x: T): Unit = {
println(s"n=$n --> ${cBalanced(n, x)}")
}
def cBalanced[T](n: Int, value: T): Node[T] = {
if (n == 1)
Node(value, End, End)
else
Node(value, cBalanced(n - 1, value), cBalanced(n - 1, value))
}
}
| ysden123/scala-exercises | src/main/scala/com/stulsoft/exercises/binary/trees/P55ConstructCompletelyBalancedTrees.scala | Scala | mit | 661 |
import pattern.{P2, Context, P0}
/*
+1>> This source code is licensed as GPLv3 if not stated otherwise.
>> NO responsibility taken for ANY harm, damage done
>> to you, your data, animals, etc.
>>
+2>>
>> Last modified: 2013-10-29 :: 20:37
>> Origin: patterns
>>
+3>>
>> Copyright (c) 2013:
>>
>> | | |
>> | ,---.,---|,---.|---.
>> | | || |`---.| |
>> `---'`---'`---'`---'`---'
>> // Niklas Klügel
>>
+4>>
>> Made in Bavaria by fat little elves - since 1983.
*/
object GetOrElse {
def apply[T]() : P2[Option[T],T,T] = {
val func = (ctx: Context, op: Option[T], t: T) => {
op.getOrElse(t)
}
new P2(func)
}
}
| lodsb/patterns | src/main/scala/pattern/combination/GetOrElse.scala | Scala | gpl-3.0 | 772 |
package dawn.flow.spatialf
import spatial._
import spatial.interpreter._
import spatial.dsl._
import org.virtualized._
import dawn.flow._
import scala.collection.JavaConverters._
import argon.core.Const
trait Spatialable[A] {
type Spatial
type Internal
implicit def bitsI: Bits[Spatial]
implicit def typeI: Type[Spatial]
def from(x: Internal): A
def to(x: A): Spatial
}
abstract class SpatialBatch[R, SpatialR: Bits: Type]() extends SpatialStream {
def name = "Spatial Batch"
//to setup SRAM etc
protected var memsStorage: Map[java.lang.String, MetaAny[_]] = _
def initMems(): Map[java.lang.String, MetaAny[_]] = Map()
def mems[T <: MetaAny[_]](x: java.lang.String) = memsStorage(x).asInstanceOf[T]
def convertOutput(x: Seq[(java.lang.String, Exp[_])]): Timestamped[R]
@struct case class TSR(t: Double, v: SpatialR)
override def stagingArgs = scala.Array[java.lang.String]("--interpreter", "-q")
type BatchInput
val zipIns: Source[BatchInput]
def convertInputs(x: ListT[BatchInput]): List[List[MetaAny[_]]]
def setStreams(x: ListT[BatchInput]) = {
val inputs = List[Bus](In1, In2, In3, In4, In5).zip(convertInputs(x))
val inputsMap: Map[Bus, List[MetaAny[_]]] = inputs.toMap
val outs: List[Bus] = List(Out1)
inputsMap.foreach {
case (bus, content) =>
Streams.addStreamIn(bus)
content.foreach(x => Streams.streamsIn(bus).put(x.s.asInstanceOf[Const[_]].c))
}
Streams.addStreamOut(Out1)
}
def getAndCleanStreams(): List[Timestamped[R]] = {
val r = Streams.streamsOut(Out1)
Streams.streamsOut = Map()
Streams.streamsIn = Map()
r.asScala.toList
.asInstanceOf[List[Seq[(java.lang.String, Exp[_])]]]
.map(convertOutput)
}
}
abstract class SpatialBatchRaw1[A, R, SA: Bits: Type, SR: Bits: Type](
val rawSource1: Source[A])(implicit val sa: Spatialable[A] { type Spatial = SA }, val sr: Spatialable[R] { type Spatial = SR })
extends SpatialBatch[R, SR]
with Block1[A, R] {
type SpatialA = sa.Spatial
type SpatialR = sr.Spatial
@struct case class TSA(t: Double, v: SA)
type BatchInput = A
lazy val zipIns = source1
def convertInputs(x: ListT[BatchInput]): List[List[MetaAny[_]]] = {
List(x.map(y => TSA(y.t, sa.to(y.v))))
}
def convertOutput(x: Seq[(java.lang.String, Exp[_])]) = {
var m: Map[java.lang.String, Exp[_]] = Map()
x.foreach(y => m += y)
Timestamped(
m("t").asInstanceOf[Const[_]].c.asInstanceOf[BigDecimal].toDouble,
sr.from(m("v").asInstanceOf[Const[_]].c.asInstanceOf[sr.Internal])
)
}
def spatial(): Unit
lazy val out = new Batch[BatchInput, R] {
lazy val rawSource1 = zipIns
def name = "Spatial Batch Inner 1"
def runI(prog: () => Unit) {
initConfig(stagingArgs)
compileProgram(() => prog())
}
def f(lA: ListT[BatchInput]): ListT[R] = {
setStreams(lA)
def prog() = {
spatial()
}
runI(prog)
getAndCleanStreams()
}
}
}
abstract class SpatialBatch1[A, R, SA: Bits: Type, SR: Bits: Type](
rawSource1: Source[A])(implicit sa: Spatialable[A] { type Spatial = SA }, sr: Spatialable[R] {
type Spatial = SR
}) extends SpatialBatchRaw1[A, R, SA, SR](rawSource1) {
def spatial(tsa: TSA): SR
@virtualize def spatial() = {
Accel {
memsStorage = initMems()
val in1 = StreamIn[TSA](In1)
val out = StreamOut[TSR](Out1)
Stream(*) { x =>
val tsa = in1.value
out := TSR(tsa.t, spatial(tsa))
}
}
}
}
abstract class SpatialBatch2[A, B, R, SA: Bits: Type, SB: Bits: Type, SR: Bits: Type](
rawSource1: Source[A], rawSource2: Source[B])(implicit sa: Spatialable[A] { type Spatial = SA }, sb: Spatialable[B] { type Spatial = SB }, sr: Spatialable[R] { type Spatial = SR })
extends SpatialBatchRaw2[A, B, R, SA, SB, SR](rawSource1, rawSource2) {
def spatial(ts: Either[TSA, TSB]): SR
@virtualize def spatial() = {
Accel {
memsStorage = initMems()
val in1 = StreamIn[TSA](In1)
val in2 = StreamIn[TSB](In2)
val fifo1 = FIFO[TSA](100000)
val fifo2 = FIFO[TSB](100000)
val out = StreamOut[TSR](Out1)
Stream(*) { x =>
fifo1.enq(in1)
}
Stream(*) { x =>
fifo2.enq(in2)
}
FSM[Boolean, Boolean](true) { x =>
x
} { x =>
if ((fifo2.empty && !fifo1.empty) || (!fifo1.empty && !fifo2.empty && fifo1.peek.t < fifo2.peek.t)) {
val tsa = fifo1.deq()
out := TSR(tsa.t, spatial(Left(tsa)))
} else if (!fifo2.empty) {
val tsb = fifo2.deq()
out := TSR(tsb.t, spatial(Right(tsb)))
}
} { x =>
!fifo1.empty || !fifo2.empty
}
}
}
}
abstract class SpatialBatchRaw2[A, B, R, SA: Bits: Type, SB: Bits: Type, SR: Bits: Type](
val rawSource1: Source[A],
val rawSource2: Source[B])(implicit val sa: Spatialable[A] { type Spatial = SA }, val sb: Spatialable[B] {
type Spatial = SB
}, val sr: Spatialable[R] { type Spatial = SR })
extends SpatialBatch[R, SR]
with Block2[A, B, R] {
type SpatialA = sa.Spatial
type SpatialB = sb.Spatial
type SpatialR = sr.Spatial
@struct case class TSA(t: Double, v: SA)
@struct case class TSB(t: Double, v: SB)
type BatchInput = Either[A, B]
lazy val zipIns = source1.merge(source2)
def convertInputs(x: ListT[BatchInput]): List[List[MetaAny[_]]] = {
val la = x.filter(_.v.isLeft).map(y => TSA(y.t, sa.to(y.v.left.get)))
val lb = x.filter(_.v.isRight).map(y => TSB(y.t, sb.to(y.v.right.get)))
List(la, lb)
}
def convertOutput(x: Seq[(java.lang.String, Exp[_])]) = {
var m: Map[java.lang.String, Exp[_]] = Map()
x.foreach(y => m += y)
Timestamped(
m("t").asInstanceOf[Const[_]].c.asInstanceOf[BigDecimal].toDouble,
sr.from(m("v").asInstanceOf[Const[_]].c.asInstanceOf[sr.Internal])
)
}
def spatial(): Unit
lazy val out = new Batch[BatchInput, R] {
lazy val rawSource1 = zipIns
def name = "Spatial Batch Inner 1"
def runI(prog: () => Unit) {
initConfig(stagingArgs)
compileProgram(() => prog())
}
def f(lA: ListT[BatchInput]): ListT[R] = {
implicitly[argon.core.State].reset()
setStreams(lA)
@virtualize def prog() = {
spatial()
}
runI(prog)
getAndCleanStreams()
}
}
}
//object SpatialBatch {
// def apply[A, B, R](s1: Source[A], s2: Source[B]) = new Spatial {
// new Source1
// }
//}
| rubenfiszel/scala-flow | core/src/main/scala/spatial/SpatialBatch.scala | Scala | mit | 6,773 |
package com.datastax.spark.connector
import scala.collection.JavaConversions._
import scala.reflect.runtime.universe._
import org.apache.commons.lang3.tuple
import com.datastax.driver.core.{ProtocolVersion, UDTValue => DriverUDTValue}
import com.datastax.spark.connector.types.NullableTypeConverter
final case class UDTValue(columnNames: IndexedSeq[String], columnValues: IndexedSeq[AnyRef])
extends ScalaGettableData
object UDTValue {
def fromJavaDriverUDTValue(value: DriverUDTValue)(implicit protocolVersion: ProtocolVersion): UDTValue = {
val fields = value.getType.getFieldNames.toIndexedSeq
val values = fields.map(GettableData.get(value, _))
new UDTValue(fields, values)
}
def fromMap(map: Map[String, Any]): UDTValue =
new UDTValue(map.keys.toIndexedSeq, map.values.map(_.asInstanceOf[AnyRef]).toIndexedSeq)
val TypeTag = implicitly[TypeTag[UDTValue]]
val Symbol = typeOf[UDTValue].asInstanceOf[TypeRef].sym
implicit object UDTValueConverter extends NullableTypeConverter[UDTValue] {
def targetTypeTag = TypeTag
def convertPF = {
case x: UDTValue => x
}
}
}
| EchoSYSU/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/UDTValue.scala | Scala | apache-2.0 | 1,125 |
package filters
import play.filters.gzip.GzipFilter
object Filters {
val headersToGzip = Seq("text/html", "text/json", "application/json", "application/javascript", "text/javascript", "text/css")
val gzipFilter = new GzipFilter(shouldGzip=(request, response) => headersToGzip.exists(h => response.headers.get("Content-Type").exists(header => header.startsWith(h))))
} | benhalton/angular-play-mongo-starter | app/Filters.scala | Scala | gpl-3.0 | 377 |
/*
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
package com.krux.hyperion
import com.krux.hyperion.expression.{Duration, Parameter}
import com.krux.hyperion.workflow.WorkflowExpression
case class DataPipelineDefGroupWrapper private (
override val hc: HyperionContext,
override val pipelineName: String,
override val nameKeySeparator: String,
schedule: Schedule,
override val pipelineLifeCycle: PipelineLifeCycle,
override val scheduleDelay: Option[Duration],
workflowsFunc: () => Map[WorkflowKey, WorkflowExpression], // for delayed workfow execution
override val tags: Map[String, Option[String]],
override val parameters: Iterable[Parameter[_]]
) extends DataPipelineDefGroup {
def withName(name: String) = copy(pipelineName = name)
def withSchedule(schedule: Schedule) = copy(schedule = schedule)
def withScheduleDelay(scheduleDelay: Option[Duration]) = copy(scheduleDelay = scheduleDelay)
def withTags(tags: Map[String, Option[String]]) = copy(tags = this.tags ++ tags)
def withParameters(parameters: Iterable[Parameter[_]]) = copy(parameters = parameters)
def withPipelineLifeCycle(pipelineLifeCycle: PipelineLifeCycle) = copy(pipelineLifeCycle = pipelineLifeCycle)
def workflows = workflowsFunc()
}
object DataPipelineDefGroupWrapper {
def apply(inner: DataPipelineDefGroup): DataPipelineDefGroupWrapper =
new DataPipelineDefGroupWrapper(
inner.hc,
inner.pipelineName,
inner.nameKeySeparator,
inner.schedule,
inner.pipelineLifeCycle,
inner.scheduleDelay,
() => inner.workflows,
inner.tags,
inner.parameters
)
}
| realstraw/hyperion | core/src/main/scala/com/krux/hyperion/DataPipelineDefGroupWrapper.scala | Scala | bsd-3-clause | 1,804 |
package russoul.lib.common.math.geometry.simple
import russoul.lib.common.TypeClasses.{CanonicalEuclideanSpaceOverField, Field, Tensor1}
import russoul.lib.common._
import russoul.lib.common.Implicits._
import russoul.lib.common.utils.Arr
import shapeless.Nat
import shapeless.Nat._
import Abstraction._
import russoul.lib.common.math.geometry.simple.general.CenteredShape
import scala.reflect.ClassTag
/**
* Created by russoul on 01.07.2017.
*/
@immutable case class OBB2Over[V[_,_ <: Nat], @tbsp F]private(override val center: V[F,_2], val right: V[F,_2], val up: V[F,_2], val extentRight : F, val extentUp: F) extends CenteredShape[V,F,_2] {
/**
*
* @param factor
* @return scaled around its center version
*/
override def scale(factor: F)(implicit ev1 : CES[V,F, _2], tensor1:T1[F,V,_2], field: Field[F]): OBB2Over[V, F] = {
new OBB2Over(center, right, up, extentRight * factor, extentUp * factor)
}
override def translate(v: V[F,_2])(implicit ev1 : CES[V,F, _2], tensor1:T1[F,V,_2], field: Field[F]): OBB2Over[V, F] = {
new OBB2Over(center + v, right, up, extentRight, extentUp)
}
override def scaleAroundBasis(factor: F)(implicit ev1 : CES[V,F, _2], tensor1:T1[F,V,_2], field: Field[F]): OBB2Over[V, F] = {
new OBB2Over(center * factor, right, up, extentRight * factor, extentUp * factor)
}
def genVertices()(implicit ev1 : CES[V,F, _2], tensor1:T1[F,V,_2], field: Field[F], tag: ClassTag[V[F,_2]]): Array[V[F,_2]] = Array[V[F,_2]](center - right * extentRight - up * extentUp, center + right * extentRight - up * extentUp, center + right * extentRight + up * extentUp, center - right * extentRight + up * extentUp)
override def toString: String =
{
"OBB2(center = "+center.toString() + ";right = " + right.toString() + ";up = " + up.toString() + ";extentRight = " + extentRight + ";extentUp = " + extentUp + ")"
}
}
object OBB2Over{
def apply[V[_,_ <: Nat], @tbsp F](center: V[F,_2], right: V[F,_2], up: V[F,_2], extentRight : F, extentUp: F) = new OBB2Over[V,F](center, right, up, extentRight, extentUp)
}
| Russoul/UniScalaLib | src/main/scala/russoul/lib/common/math/geometry/simple/OBB2Over.scala | Scala | mit | 2,083 |
/*
* Copyright (c) 2013-2014 Plausible Labs Cooperative, Inc.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
package coop.plausible.sbt.keychain.git
import scala.util.parsing.combinator.RegexParsers
import coop.plausible.sbt.keychain.KeychainPlugin._
/**
* Git credential tool output parser.
*
* Refer to git-credential(7) man page for details on the credential output format.
* https://www.kernel.org/pub/software/scm/git/docs/git-credential.html
*/
private[keychain] object GitCredentialParser extends RegexParsers {
override def skipWhitespace = false
private val key = """[a-zA-Z]+""".r
private val value = "=" ~> """.*""".r
private val sep = "\\r\\n" | "\\n" | "\\r" | """\\z""".r
private val keypair = key ~ value ^^ {
case key ~ value => (key, value)
}
private val keypairs: Parser[Map[String, String]] = rep(keypair <~ sep) ^^ (Map() ++ _)
/**
* Parse the given Git credential output, returning either a set of key value pairs, or a keychain error.
*
* @param input Git credential data.
* @return Set of key value pairs, or a keychain error.
*/
def parseInput (input: String): Either[KeychainError, Map[String, String]] = {
parseAll (keypairs, input) match {
case Success(v, _) => Right(v)
case e: NoSuccess => Left(CommandFailed(s"Failed to parse git credential helper output: $e"))
}
}
}
| plausiblelabs/sbt-keychain | src/main/scala/coop/plausible/sbt/keychain/git/GitCredentialParser.scala | Scala | mit | 2,443 |
package org.singingwizard.screeps.ai.tasks
import scalajs.js
import scalajs.js.JSConverters._
import org.singingwizard.screeps.ai._
import org.singingwizard.screeps.api._
import ScreepsContext._
import prickle._
import org.singingwizard.screeps.wrappers.APIPickler
import org.singingwizard.prickle.WeakRef
case class TakeEnergyTo(val target: WeakRef[Structure], var amount: Int = Int.MaxValue,
val k: Option[Task] = None,
var creep: Option[WeakRef[Creep]] = None,
var state: Int = TakeEnergyTo.NEED_CREEP) extends TaskWithContinuation {
import TakeEnergyTo._
def continuation: TraversableOnce[Task] = k
def run()(implicit ctx: AIContext): Unit = {
import ctx._
try {
val t = target.get
state match {
case NEED_CREEP =>
val creeps = t.room.find[Creep](FIND_CREEPS).filter(c => c.carry.energy >= c.carryCapacity || c.carry.energy >= amount)
val c = if (!creeps.isEmpty) t.pos.findClosestByRangeFrom[Creep](creeps) else null
Console.log(s"Checking full creeps: $creeps")
if (!creeps.isEmpty && ctx.claim(c)) {
creep = Some(c)
state = RUNNING
run()
} else {
// TODO: Make it prefer creeps better for this job: && (c.carryCapacity >= amount || amount == Int.MaxValue)
val cs = t.room.find[Creep](FIND_CREEPS).filter(c => isAvailable(c))
Console.log(s"Checking other creeps: $cs")
cs.headOption match {
case Some(c) =>
schedule(new GetEnergy(c, amount, k = Some(this)))
case None =>
// TODO: Make this block on a new creep becoming available.
reschedule()
}
}
case RUNNING =>
val c = creep.get.get
val n = c.carry.energy min amount
if (c.transfer(t, RESOURCE_ENERGY, n) == ERR_NOT_IN_RANGE) {
c.moveTo(t)
reschedule()
} else {
amount -= n
ctx.unclaim(c)
if (amount <= 0) {
state = COMPLETE
} else {
state = NEED_CREEP
creep = None
reschedule()
}
}
case _ => ()
}
} catch {
case _: IllegalStateException =>
fail("Creep disappeared")
}
}
override def toString(): String = {
s"TakeEnergyTo for $target ($state, $creep)"
}
}
/** @author amp
*/
object TakeEnergyTo extends TaskCompanion {
import APIPickler._
import Task._
val NEED_CREEP = 0
val RUNNING = 1
val COMPLETE = 2
def register(pickler: PicklerPair[Task]) = {
pickler.concreteType[TakeEnergyTo]
}
} | arthurp/amps-screeps | src/main/scala/org/singingwizard/screeps/ai/tasks/TakeEnergyTo.scala | Scala | gpl-3.0 | 2,758 |
package rml.args.register
import rml.args.arg.Arg
import rml.args.arg.ArgState
import rml.args.arg.Func
import rml.args.arg.function.FunctionOrigin
import com.typesafe.scalalogging.LazyLogging
import rml.args.arg.FuncArg
object @@ {
def apply(key: String) = RegisterBuilder(key, ArgState())
def apply(key: String, desc: String) = RegisterBuilder(key, ArgState(description = desc))
def apply(key: List[String]): FuncArg[_] = FunctionRegister(key)
def update[R](key: String, func: FuncArg[R]) = FunctionRegister(key.split("\\\\s+").toList) = func
def update[R](key: String, desc: String, func: FuncArg[R]) = FunctionRegister(key.split("\\\\s+").toList) = (func -- desc)
}
object @@@ {
def apply(key: String): FuncArg[_] = @@(key.split("\\\\s+").toList)
}
object Register extends LazyLogging {
def @@(key: String) = cmd(key)
def key(key: String) = cmd(key)
def cmd(key: String) = RegisterBuilder(key, ArgState())
}
| rml/scala_args | src/main/scala/rml/args/register/Register.scala | Scala | gpl-3.0 | 948 |
/*
* Boolean Operations (operands are forced to bool).
*/
package see.operations
import see.Binary
import see.Scope
import see.Unary
import see.nodes.Node
import see.values._
private[see] object UnaryNot extends Unary("!") {
override def apply(s: Scope, v: Val): Val = v match {
case Bool(x) => Bool(!x)
case n: Number => Bool(!n.toBool)
case _ => super.apply(s, v)
}
}
private[see] object BoolAnd extends Binary("&&") {
override def needsRhs(lhs: Val) = lhs.toBool
override def apply(lhs: Val, rhs: Val): Val = Bool(lhs.toBool && rhs.toBool)
override def isDefinedFor(s: Scope, lhs: Node, rhs: Node) =
(lhs isDefinedIn s) && ((rhs isDefinedIn s) || !lhs.evalIn(s).coerce.toBool)
}
private[see] object BoolOr extends Binary("||") {
override def needsRhs(lhs: Val) = !lhs.toBool
override def apply(lhs: Val, rhs: Val): Val = Bool(lhs.toBool || rhs.toBool)
override def isDefinedFor(s: Scope, lhs: Node, rhs: Node) =
(lhs isDefinedIn s) && ((rhs isDefinedIn s) || lhs.evalIn(s).coerce.toBool)
}
private[see] object BoolXor extends Binary("^^") {
override def apply(lhs: Val, rhs: Val): Val = Bool(lhs.toBool ^ rhs.toBool)
}
| RayRacine/scee | src/main/scala/see/operations/BoolOps.scala | Scala | bsd-3-clause | 1,168 |
package hu.japy.dev.katas
import org.junit._
import Assert._
@Test
class BerlinUhrTest {
val berlin = new BerlinUhrClass
@Test
def test000000() = assertEquals("YOOOOOOOOOOOOOOOOOOOOOOO", berlin.berlinUhrAsSingleString("00:00:00"))
@Test
def test165006() = assertEquals("YRRROROOOYYRYYRYYRYOOOOO", berlin.berlinUhrAsSingleString("16:50:06"))
@Test
def test235959() = assertEquals("ORRRRRRROYYRYYRYYRYYYYYY", berlin.berlinUhrAsSingleString("23:59:59"))
@Test
def test113701() = assertEquals("ORROOROOOYYRYYRYOOOOYYOO", berlin.berlinUhrAsSingleString("11:37:01"))
}
| ghajba/Coding-Katas | BerlinUhr/src/test/scala/hu/japy/dev/katas/BerlinUhrTest.scala | Scala | mit | 591 |
class SCL6198 {
case class Cell[A](value: A)
case class Row(i: Int)(val values: Cell[_]*)
val row = Row(1)(Cell("a"), Cell("b"), Cell("c"))
row.values.foreach {
cell => println(/*start*/cell/*end*/.value)
}
}
//SCL6198.this.type#Cell[_] | consulo/consulo-scala | testdata/typeInference/bugs5/SCL6198.scala | Scala | apache-2.0 | 252 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.hadoop
import org.specs.Specification
import com.twitter.zipkin.gen
import com.twitter.scalding._
import gen.AnnotationType
import scala.collection.JavaConverters._
import collection.mutable.HashMap
import com.twitter.zipkin.hadoop.sources._
/**
* Tests that DependencyTree finds all service calls and how often per pair
* of endpoints
*/
class DependencyTreeSpec extends Specification with TupleConversions {
noDetailedDiffs()
val timeGranularity = TimeGranularity.Day
implicit val dateRange = DateRange(RichDate(123), RichDate(321))
val endpoint = new gen.Endpoint(123, 666, "service")
val endpoint1 = new gen.Endpoint(123, 666, "service1")
val endpoint2 = new gen.Endpoint(123, 666, "service2")
val span = new gen.SpanServiceName(12345, "methodcall", 666,
List(new gen.Annotation(1000, "cs").setHost(endpoint), new gen.Annotation(2000, "sr").setHost(endpoint)).asJava,
List[gen.BinaryAnnotation]().asJava, "service")
val span1 = new gen.SpanServiceName(123456, "methodcall", 666,
List(new gen.Annotation(1000, "cs").setHost(endpoint1), new gen.Annotation(4000, "sr").setHost(endpoint1)).asJava,
List(new gen.BinaryAnnotation("bye", null, AnnotationType.BOOL)).asJava, "service1")
val span2 = new gen.SpanServiceName(1234567, "methodcall", 666,
List(new gen.Annotation(1000, "cs").setHost(endpoint2), new gen.Annotation(3000, "cr").setHost(endpoint2)).asJava,
List(new gen.BinaryAnnotation("bye", null, AnnotationType.BOOL)).asJava, "service2")
val spans = Util.repeatSpan(span, 30, 40, 1) ++ Util.repeatSpan(span1, 50, 200, 40)
"DependencyTree" should {
"Find the number of calls between endpoints" in {
JobTest("com.twitter.zipkin.hadoop.DependencyTree")
.arg("input", "inputFile")
.arg("output", "outputFile")
.arg("date", "2012-01-01T01:00")
.source(PreprocessedSpanSource(timeGranularity), spans)
.source(PrepTsvSource(timeGranularity), Util.getSpanIDtoNames(spans))
.sink[(String, String, Long)](Tsv("outputFile")) {
val map = new HashMap[String, Long]()
map("service, " + Util.UNKNOWN_SERVICE_NAME) = 0
map("service1, service") = 0
map("service1, " + Util.UNKNOWN_SERVICE_NAME) = 0
outputBuffer => outputBuffer foreach { e =>
map(e._1 + ", " + e._2) = e._3
}
map("service, " + Util.UNKNOWN_SERVICE_NAME) mustEqual 31
map("service1, service") mustEqual 31
map("service1, " + Util.UNKNOWN_SERVICE_NAME) mustEqual 20
}.run.finish
}
}
}
| devcamcar/zipkin | zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/DependencyTreeSpec.scala | Scala | apache-2.0 | 3,158 |
/**********************************************************************************************************************
* This file is part of Scrupal, a Scalable Reactive Web Application Framework for Content Management *
* *
* Copyright (c) 2015, Reactific Software LLC. All Rights Reserved. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance *
* with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed *
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for *
* the specific language governing permissions and limitations under the License. *
**********************************************************************************************************************/
package scrupal.api
import java.net.URL
import play.api.Configuration
import scrupal.storage.api.{SchemaDesign, Store, StoreContext}
import scrupal.utils._
import scala.collection.immutable.HashMap
import scala.concurrent.ExecutionContext
/** A modular plugin to Scrupal to extend its functionality.
* A module is an object that provides information (data) and functionality (behavior) to Scrupal so that Scrupal can
* be extended to do things it was not originally invented to do. The scrupal-core module provides the abstractions,
* such as Module, to make this possible. Everything else Scrupal provides is done as a module that extends the
* functionality of the API.
*/
abstract class Module(implicit scrpl : Scrupal) extends {
implicit val scrupal : Scrupal = scrpl
} with Settingsable with Registrable[Module] with Authorable with Describable with Enablee
with Enablement[Module] with Versionable with Bootstrappable {
def registry = scrupal.Modules
/** The name of the database your module's schema is stored to
*
* Generally most modules want to live in the "scrupal" database and most everything else. However,
* if you override this, your module's own content will be stored in the correspondingly named database.
*/
val dbName : String = "scrupal"
def moreDetailsURL : URL
/** A mapping of the Module's dependencies.
* The dependencies map provides the version for each named module this module depends on. The default value lists
* the primary dependency upon the most recent version of the `Core` module. New
* modules should always depend on the latest version of Scrupal available at the time of their writing to ensure
* the longest future lifespan before they become obsoleted.
*/
def dependencies : Map[Identifier, Version] = HashMap('Core -> Version(0, 1, 0))
// TODO: Can modules provide modules? def modules : Seq[Module]
// TODO: Can modules provide applications? def applications : Seq[Application]
/** The set of nodes this module defines.
* A node is simply a dynamic content generator. It is a Function0 (no arguments, returns a result) and can be
* used in templates and other places to generate dynamic content either from a template or directly from code.
* @return The sequence of nodes defined by this module.
*/
def nodes : Seq[Node] // FIXME: This should return a type of node, not node itself
// TODO: Can modules provide instances ?
/** The set of Features that this Module provides.
* These features can be enabled and disabled through the admin interface and the module can provide its own
* functionality for when those events occur. See [[scrupal.api.Feature]]
*/
def features : Seq[Feature]
/** The entities that this module supports.
* An entity combines together a BundleType for storage, a set of REST API handlers,
* additional operations that can be requested, and
*/
def entities : Seq[Entity]
def entity(id: Symbol) = entities.find { e ⇒ e.id == id }
// TODO: Can modules provide sites ?
/** The set of handlers for the events this module is interested in.
* Interest is expressed by providing a handler for each event the module wishes to intercept. When the event occurs
* the module's handler will be invoked. Multiple modules can register for interest in the same event but there is
* no defined order in which the handlers are invoked.
*/
def handlers : Seq[EventHandlerFor[Event]]
final def isChildScope(e : Enablement[_]) : Boolean = false
/** The set of Database Schemas that this Module defines.
* Modules may need to have their own special database tables. This is where a module tells Scrupal about those
* schemas.
*/
def schemas : Seq[SchemaDesign] = Seq()
/** Determine compatibility between `this` [[scrupal.api.Module]] and `that`.
* This module is compatible with `that` if either `that` does not depend on `this` or the version `that` requires
* comes after the `obsoletes` version of `this`
* @param that The module that purports to depend on `this` for which compatibility is being checked
* @return Whether `this` is compatible with `that`
*/
def isCompatibleWith(that : Module) : Boolean = {
!that.dependencies.contains(this.id) || {
val required_version = that.dependencies.get(this.id).get
required_version > this.obsoletes
}
}
/** Load lazy instantiated objects into memory
* This is part of the bootstrapping mechanism
*/
override protected[scrupal] def bootstrap(config : Configuration) = {
// Touch the various aspects of the module by by asking for it's id's length.
// This just makes sure it gets instantiated & registered as well as not being null
features foreach { feature ⇒
require(feature != null); require(feature.label.length > 0); feature.bootstrap(config)
}
entities foreach { entity ⇒
require(entity != null); require(entity.label.length > 0); entity.bootstrap(config)
}
nodes foreach { node ⇒
require(node != null); node.bootstrap(config)
}
// FIXME: What about handlers and schemas?
}
}
/** The Registry of Modules for this Scrupal.
*
* This object is the registry of Module objects. When a [[scrupal.api.Module]] is instantiated, it will
* register itself with this object.
*/
case class ModulesRegistry() extends Registry[Module] {
val registryName = "Modules"
val registrantsName = "module"
private[scrupal] def installSchemas(implicit context : StoreContext, ec : ExecutionContext) : Unit = {
// For each module ...
values foreach { mod : Module ⇒
// In a database session ...
context.withStore { implicit store : Store ⇒
// For each schema ...
mod.schemas.foreach { design : SchemaDesign ⇒
if (!store.hasSchema(design.name))
store.addSchema(design)
store.withSchema(design.name) { schema ⇒
schema.construct
}
}
}
}
}
}
| scrupal/scrupal | scrupal-api/src/main/scala/scrupal/api/Module.scala | Scala | apache-2.0 | 7,738 |
package net.bmjames.opts.common
import scalaz.Monoid
sealed trait MatchResult
case object NoMatch extends MatchResult
case class Match(s: Option[String]) extends MatchResult
object MatchResult {
implicit def matchResultMonoid: Monoid[MatchResult] =
new Monoid[MatchResult] {
def zero: MatchResult = NoMatch
def append(f1: MatchResult, f2: => MatchResult): MatchResult =
f1 match {
case Match(_) => f1
case NoMatch => f2
}
}
}
| bmjames/scala-optparse-applicative | src/main/scala/net/bmjames/opts/common/MatchResult.scala | Scala | bsd-3-clause | 489 |
package org.workcraft.gui.docking
import javax.swing.JPanel
import javax.swing.JButton
import java.awt.Dimension
import javax.swing.Icon
import java.awt.event.ActionListener
import java.awt.event.ActionEvent
import java.awt.BorderLayout
import java.awt.Color
import javax.swing.UIManager
import java.awt.FlowLayout
import javax.swing.JLabel
import java.awt.Font
import javax.swing.JComponent
import javax.swing.BorderFactory
import org.workcraft.scala.Expressions._
import org.workcraft.scala.effects.IO._
class DockableWindowContentPanel[A <: JComponent](val window: DockableWindow[A]) extends JPanel {
object Header extends JPanel {
def createHeaderButton(icon: Icon) = {
val button = new JButton()
button.setPreferredSize(new Dimension(icon.getIconWidth(), icon.getIconHeight()))
button.setFocusable(false)
button.setBorder(null)
button.setIcon(icon)
button
}
setLayout(new BorderLayout())
val c = if (UIManager.getLookAndFeel().getName().contains("Substance")) {
val bgc = getBackground()
new Color((bgc.getRed() * 0.9).toInt, (bgc.getGreen() * 0.9).toInt, (bgc.getBlue() * 0.9).toInt)
} else
UIManager.getColor("InternalFrame.activeTitleBackground")
setBackground(c)
if (window.configuration.minimiseButton || window.configuration.maximiseButton || window.configuration.closeButton) {
val buttonPanel = new JPanel()
buttonPanel.setBackground(c)
buttonPanel.setLayout(new FlowLayout(FlowLayout.TRAILING, 4, 2))
buttonPanel.setFocusable(false)
add(buttonPanel, BorderLayout.EAST)
var buttons = 0
if (window.configuration.minimiseButton) {
val btnMin = createHeaderButton(UIManager.getIcon("InternalFrame.minimizeIcon"))
btnMin.addActionListener(new ActionListener() {
override def actionPerformed(evt: ActionEvent) = window.configuration.onMinimiseClicked(window)
})
btnMin.setToolTipText("Toggle minimized")
buttonPanel.add(btnMin)
buttons += 1
}
if (window.configuration.maximiseButton) {
val btnMax: JButton = createHeaderButton(UIManager.getIcon("InternalFrame.maximizeIcon"))
btnMax.addActionListener(new ActionListener() {
override def actionPerformed(evt: ActionEvent) = {
window.configuration.onMaximiseClicked(window)
if (window.isMaximised) {
btnMax.setIcon(UIManager.getIcon("InternalFrame.minimizeIcon"))
btnMax.setToolTipText("Restore window")
} else {
btnMax.setIcon(UIManager.getIcon("InternalFrame.maximizeIcon"))
btnMax.setToolTipText("Maximize window")
}
}
})
buttonPanel.add(btnMax)
buttons += 1
}
if (window.configuration.closeButton) {
val btnClose = createHeaderButton(UIManager.getIcon("InternalFrame.closeIcon"))
btnClose.addActionListener(new ActionListener() {
override def actionPerformed(evt: ActionEvent) = window.configuration.onCloseClicked(window)
})
btnClose.setToolTipText("Close window")
buttonPanel.add(btnClose)
buttons += 1
}
buttonPanel.setPreferredSize(new Dimension((UIManager.getIcon("InternalFrame.closeIcon").getIconWidth() + 4) * buttons, UIManager.getIcon("InternalFrame.closeIcon").getIconHeight() + 4))
}
val label = new JLabel(window.title.unsafeEval)
val refresh = swingAutoRefresh (window.title, (title:String) => ioPure.pure { label.setText(title) })
label.setOpaque(false);
label.setForeground(UIManager.getColor("InternalFrame.activeTitleForeground"));
label.setFont(label.getFont().deriveFont(Font.BOLD));
add(label, BorderLayout.WEST);
}
setLayout(new BorderLayout(0, 0))
val contentPane = new JPanel()
contentPane.setLayout(new BorderLayout(0, 0))
contentPane.add(window.content, BorderLayout.CENTER)
contentPane.setBorder(BorderFactory.createLineBorder(contentPane.getBackground(), 2))
contentPane.add(Header, BorderLayout.NORTH)
add(contentPane, BorderLayout.CENTER)
setFocusable(false)
def showHeader =
if (Header.getParent() != contentPane) {
contentPane.add(Header, BorderLayout.NORTH)
contentPane.doLayout()
}
def hideHeader =
if (Header.getParent() == contentPane) {
contentPane.remove(Header)
contentPane.doLayout()
}
}
| mechkg/workcraft | Gui/src/main/scala/org/workcraft/gui/docking/DockableWindowContentPanel.scala | Scala | gpl-3.0 | 4,446 |
package org.igye.learnpl2
import org.igye.learnpl2.enums.{Person, Gender, PartOfSpeech}
case class WordOld(writtenRepresentation: String,
partOfSpeech: PartOfSpeech,
number: Option[Number] = None,
gender: Option[Gender] = None,
person: Option[Person] = None
) {
// def this(writtenRepresentation: String, partOfSpeech: PartOfSpeech) = this(
// writtenRepresentation,
// partOfSpeech,
// number = None,
// gender = None,
// person = None
// )
}
| Igorocky/learnpl2 | src/main/scala/org/igye/learnpl2/WordOld.scala | Scala | mit | 570 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.