code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.domain.eventlog
import com.normation.eventlog._
import scala.xml._
import com.normation.rudder.domain.policies._
import org.joda.time.DateTime
import net.liftweb.common._
import com.normation.utils.HashcodeCaching
sealed trait RuleEventLog extends EventLog { override final val eventLogCategory = RuleLogCategory }
final case class AddRule(
override val eventDetails : EventLogDetails
) extends RuleEventLog with HashcodeCaching {
override val eventType = AddRule.eventType
}
object AddRule extends EventLogFilter {
override val eventType = AddRuleEventType
override def apply(x : (EventLogType, EventLogDetails)) : AddRule = AddRule(x._2)
}
final case class DeleteRule(
override val eventDetails : EventLogDetails
) extends RuleEventLog with HashcodeCaching {
override val eventType = DeleteRule.eventType
}
object DeleteRule extends EventLogFilter {
override val eventType = DeleteRuleEventType
override def apply(x : (EventLogType, EventLogDetails)) : DeleteRule = DeleteRule(x._2)
}
final case class ModifyRule(
override val eventDetails : EventLogDetails
) extends RuleEventLog with HashcodeCaching {
override val eventType = ModifyRule.eventType
}
object ModifyRule extends EventLogFilter {
override val eventType = ModifyRuleEventType
override def apply(x : (EventLogType, EventLogDetails)) : ModifyRule = ModifyRule(x._2)
}
object RuleEventLogsFilter {
final val eventList : List[EventLogFilter] = List(
AddRule
, DeleteRule
, ModifyRule
)
} | Kegeruneku/rudder | rudder-core/src/main/scala/com/normation/rudder/domain/eventlog/RuleEventLog.scala | Scala | agpl-3.0 | 3,218 |
package pt.tecnico.dsi.afs
import java.io.File
import com.typesafe.config.{Config, ConfigFactory, ConfigValueType}
import work.martins.simon.expect.{Settings => ScalaExpectSettings}
import work.martins.simon.expect.StringUtils.splitBySpaces
import scala.collection.JavaConverters._
/**
* This class holds all the settings that parameterize AFS.
*
* By default these settings are read from the Config obtained with `ConfigFactory.load()`.
*
* You can change the settings in multiple ways:
*
* - Change them in the default configuration file (e.g. application.conf)
* - Pass a different config holding your configurations: {{{
* new Settings(yourConfig)
* }}}
* However it will be more succinct to pass your config directly to AFS: {{{
* new AFS(yourConfig)
* }}}
* - Extend this class overriding the settings you want to redefine {{{
* object YourSettings extends Settings() {
* override val realm: String = "YOUR.DOMAIN.TLD"
* override val keytabsLocation: String = "/var/local/keytabs"
* override val commandWithAuthentication: String = s"""ssh user@server:port "kadmin -p \\$authenticatingPrincipal""""
* }
* new AFS(YourSettings)
* }}}
*
* @param config
*/
class Settings(config: Config = ConfigFactory.load()) {
val afsConfig: Config = {
val reference = ConfigFactory.defaultReference()
val finalConfig = config.withFallback(reference)
finalConfig.checkValid(reference, "afs")
finalConfig.getConfig("afs")
}
import afsConfig._
val cell = getString("cell")
val cacheDir = new File(getString("cache-dir"))
val scalaExpectSettings = {
val path = "scala-expect"
if (afsConfig.hasPath(path)) {
val c = if (config.hasPath(path)) {
afsConfig.getConfig(path).withFallback(config.getConfig(path))
} else {
afsConfig.getConfig(path)
}
new ScalaExpectSettings(c.atPath(path))
} else if (config.hasPath(path)) {
new ScalaExpectSettings(config.getConfig(path).atPath(path))
} else {
new ScalaExpectSettings()
}
}
override def toString: String = afsConfig.root.render
}
| ist-dsi/afs | src/main/scala/pt/tecnico/dsi/afs/Settings.scala | Scala | mit | 2,185 |
package utils.helpers
import org.specs2.mutable.Specification
import play.api.test.FakeRequest
import play.api.test.Helpers._
import utils.WithApplication
import utils.csrf.DwpCSRFFormHelper
// Test the carers common CSRF util in here as we have easy access to test including request and fakeapp.
class DwpCSRFSpec extends Specification {
section("unit")
"DwpCSRF" should {
"generate csrf string" in new WithApplication {
val request = FakeRequest()
val csrfString = DwpCSRFFormHelper.formField(request)
println("csrfString:" + csrfString)
"a" mustEqual "a"
}
}
section("unit")
}
| Department-for-Work-and-Pensions/ClaimCapture | c3/test/utils/helpers/DwpCSRFSpec.scala | Scala | mit | 622 |
/*
This could also be implemented directly using `foldRight`.
*/
def flatMap[A,B](l: List[A])(f: A => List[B]): List[B] =
concat(map(l)(f)) | ud3sh/coursework | functional-programming-in-scala-textbook/answerkey/datastructures/20.answer.scala | Scala | unlicense | 143 |
package org.coroutines.common
import scala.collection._
object Cache {
class _1[T, S](val function: T => S) {
val cache = mutable.Map[T, S]()
def apply(t: T): S = cache.get(t) match {
case Some(s) => s
case None =>
val s = function(t)
cache(t) = s
s
}
}
def cached[T, S](f: T => S): _1[T, S] = new _1(f)
class _2[T1, T2, S](val function: (T1, T2) => S) {
val cache = mutable.Map[(T1, T2), S]()
def apply(t1: T1, t2: T2): S = cache.get((t1, t2)) match {
case Some(s) => s
case None =>
val s = function(t1, t2)
cache((t1, t2)) = s
s
}
}
def cached[T1, T2, S](f: (T1, T2) => S): _2[T1, T2, S] = new _2(f)
class _3[T1, T2, T3, S](val function: (T1, T2, T3) => S) {
val cache = mutable.Map[(T1, T2, T3), S]()
def apply(t1: T1, t2: T2, t3: T3): S = cache.get((t1, t2, t3)) match {
case Some(s) => s
case None =>
val s = function(t1, t2, t3)
cache((t1, t2, t3)) = s
s
}
}
def cached[T1, T2, T3, S](f: (T1, T2, T3) => S): _3[T1, T2, T3, S] = new _3(f)
}
| storm-enroute/coroutines | coroutines-common/src/main/scala/scala/coroutines/common/Cache.scala | Scala | bsd-3-clause | 1,119 |
package avrohugger
package format
package scavro
import avrohugger.matchers.TypeMatcher
import avrohugger.matchers.custom.CustomNamespaceMatcher
import org.apache.avro.{ Schema, Protocol }
object ScavroNamespaceRenamer {
// By default, Scavro generates Scala classes in packages that are the same
// as the Java package with `model` appended.
// TypeMatcher is here because it holds the custom namespace map
def renameNamespace(
maybeNamespace: Option[String],
schemaOrProtocol: Either[Schema, Protocol],
typeMatcher: TypeMatcher): Option[String] = {
val scavroModelDefaultPackage: String =
typeMatcher.customNamespaces
.get("SCAVRO_DEFAULT_PACKAGE$")
.getOrElse("model")
val someScavroModelDefaultNamespace = maybeNamespace match {
case Some(ns) => Some(ns + "." + scavroModelDefaultPackage)
case None => sys.error("Scavro requires a namespace because Java " +
"classes cannot be imported from the default package")
}
val scavroModelNamespace = {
val ns = schemaOrProtocol match {
case Left(schema) => Option(schema.getNamespace)
case Right(protocol) => Option(protocol.getNamespace)
}
ns match {
case Some(schemaNS) => {
CustomNamespaceMatcher.checkCustomNamespace(
ns,
typeMatcher,
maybeDefaultNamespace = someScavroModelDefaultNamespace)
}
case None => someScavroModelDefaultNamespace
}
}
scavroModelNamespace
}
} | julianpeeters/avrohugger | avrohugger-core/src/main/scala/format/scavro/ScavroNamespaceRenamer.scala | Scala | apache-2.0 | 1,529 |
package org.jetbrains.plugins.scala.codeInsight.intentions
import com.intellij.openapi.fileTypes.FileType
import org.jetbrains.plugins.scala.worksheet.WorksheetFileType
trait ScalaWorksheetIntentionTestBase extends ScalaIntentionTestBase {
override def fileType: FileType = WorksheetFileType
}
| JetBrains/intellij-scala | scala/worksheet/test/org/jetbrains/plugins/scala/codeInsight/intentions/ScalaWorksheetIntentionTestBase.scala | Scala | apache-2.0 | 299 |
import _root_.java.{lang => jl}
object T {
private val wrapperTypes: Map[Class[_], Class[_]] = Map(
jl.Boolean.TYPE -> /*start*/classOf[jl.Boolean]/*end*/
)
}
//Class[Boolean] | ilinum/intellij-scala | testdata/typeInference/bugs5/SCL3877.scala | Scala | apache-2.0 | 184 |
/*
* Copyright (C) 20011-2014 Scalable minds UG (haftungsbeschränkt) & Co. KG. <http://scm.io>
*/
package controllers
import javax.inject.Inject
import play.api._
import play.api.i18n.MessagesApi
import play.api.mvc._
import views.html
import play.api.routing.JavaScriptReverseRouter
class Application @Inject()(
config: Configuration,
val messagesApi: MessagesApi) extends Controller {
val hostUrl = config.getString("host.url").get
def index = UserAwareAction { implicit request =>
if (request.userOpt.isDefined)
Redirect(controllers.routes.Application.home)
else
Ok(views.html.index("Your new application is ready."))
}
def home = Authenticated {
implicit request =>
Ok(html.home())
}
def team(any: String) = Authenticated {
implicit request =>
Ok(html.team())
}
def faq = UserAwareAction {
implicit request =>
Ok(html.faq())
}
def terms = UserAwareAction {
implicit request =>
Ok(html.terms())
}
def javascriptRoutes = Action { implicit request =>
Ok(
JavaScriptReverseRouter("jsRoutes")(
// fill in stuff which should be able to be called from js
controllers.routes.javascript.TimeEntryController.showTimesForInterval,
controllers.routes.javascript.TimeEntryController.showTimeForUser
)).as("text/javascript")
}
} | scalableminds/time-tracker | app/controllers/Application.scala | Scala | mit | 1,415 |
/**
* Copyright (C) 2007 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.library
import org.orbeon.saxon.`type`.{BuiltInAtomicType, Type}
import org.orbeon.saxon.expr.StaticProperty._
import org.orbeon.saxon.`type`.BuiltInAtomicType._
import org.orbeon.oxf.xforms.function.xxforms._
import org.orbeon.oxf.xml.OrbeonFunctionLibrary
import org.orbeon.oxf.xforms.function.{Bind, Event, If, XXFormsValid}
import org.orbeon.oxf.xforms.function.exforms.EXFormsMIP
import org.orbeon.saxon.`type`.Type.{ITEM_TYPE, NODE_TYPE}
/*
* Orbeon extension functions that depend on the XForms environment.
*/
trait XXFormsEnvFunctions extends OrbeonFunctionLibrary {
// Define in early definition of subclass
val XXFormsEnvFunctionsNS: Seq[String]
Namespace(XXFormsEnvFunctionsNS) {
// NOTE: This is deprecated and just points to the event() function.
Fun("event", classOf[Event], op = 0, min = 1, Type.NODE_TYPE, ALLOWS_ZERO_OR_MORE,
Arg(STRING, EXACTLY_ONE)
)
Fun("cases", classOf[XXFormsCases], op = 0, min = 1, STRING, ALLOWS_ZERO_OR_MORE,
Arg(STRING, EXACTLY_ONE)
)
Fun("repeat-current", classOf[XXFormsRepeatCurrent], op = 0, min = 0, Type.NODE_TYPE, EXACTLY_ONE,
Arg(STRING, EXACTLY_ONE)
)
Fun("repeat-position", classOf[XXFormsRepeatPosition], op = 0, min = 0, INTEGER, EXACTLY_ONE,
Arg(STRING, EXACTLY_ONE)
)
Fun("repeat-positions", classOf[XXFormsRepeatPositions], op = 0, min = 0, INTEGER, ALLOWS_ZERO_OR_MORE)
Fun("context", classOf[XXFormsContext], op = 0, min = 1, Type.NODE_TYPE, ALLOWS_ZERO_OR_ONE,
Arg(STRING, EXACTLY_ONE)
)
Fun("repeat-items", classOf[XXFormsRepeatItems], op = 0, min = 0, Type.NODE_TYPE, ALLOWS_ZERO_OR_MORE,
Arg(STRING, EXACTLY_ONE)
)
// Backward compatibility, use repeat-items() instead
Fun("repeat-nodeset", classOf[XXFormsRepeatItems], op = 0, min = 0, Type.NODE_TYPE, ALLOWS_ZERO_OR_MORE,
Arg(STRING, EXACTLY_ONE)
)
Fun("evaluate-bind-property", classOf[XXFormsEvaluateBindProperty], op = 0, min = 2, ANY_ATOMIC, ALLOWS_ZERO_OR_ONE,
Arg(STRING, EXACTLY_ONE),
Arg(ANY_ATOMIC, EXACTLY_ONE) // QName or String
)
Fun("valid", classOf[XXFormsValid], op = 0, min = 0, BOOLEAN, EXACTLY_ONE,
Arg(Type.ITEM_TYPE, ALLOWS_ZERO_OR_MORE),
Arg(BOOLEAN, EXACTLY_ONE),
Arg(BOOLEAN, EXACTLY_ONE)
)
Fun("type", classOf[XXFormsType], op = 0, min = 0, QNAME, ALLOWS_ZERO_OR_MORE,
Arg(Type.ITEM_TYPE, ALLOWS_ZERO_OR_MORE)
)
Fun("custom-mip", classOf[XXFormsCustomMIP], op = 0, min = 2, STRING, ALLOWS_ZERO_OR_ONE,
Arg(Type.ITEM_TYPE, ALLOWS_ZERO_OR_MORE),
Arg(STRING, EXACTLY_ONE)
)
Fun("invalid-binds", classOf[XXFormsInvalidBinds], op = 0, min = 0, STRING, ALLOWS_ZERO_OR_MORE,
Arg(Type.NODE_TYPE, ALLOWS_ZERO_OR_MORE)
)
Fun("if", classOf[If], op = 0, min = 3, STRING, EXACTLY_ONE,
Arg(BOOLEAN, EXACTLY_ONE),
Arg(STRING, EXACTLY_ONE),
Arg(STRING, EXACTLY_ONE)
)
Fun("binding", classOf[XXFormsBinding], op = 0, min = 1, Type.ITEM_TYPE, ALLOWS_ZERO_OR_MORE,
Arg(STRING, EXACTLY_ONE)
)
Fun("binding-context", classOf[XXFormsBindingContext], op = 0, min = 1, Type.ITEM_TYPE, ALLOWS_ZERO_OR_ONE,
Arg(STRING, EXACTLY_ONE)
)
Fun("is-control-relevant", classOf[XXFormsIsControlRelevant], op = 0, min = 1, BOOLEAN, EXACTLY_ONE,
Arg(STRING, EXACTLY_ONE)
)
Fun("is-control-readonly", classOf[XXFormsIsControlReadonly], op = 0, min = 1, BOOLEAN, EXACTLY_ONE,
Arg(STRING, EXACTLY_ONE)
)
Fun("is-control-required", classOf[XXFormsIsControlRequired], op = 0, min = 1, BOOLEAN, EXACTLY_ONE,
Arg(STRING, EXACTLY_ONE)
)
Fun("is-control-valid", classOf[XXFormsIsControlValid], op = 0, min = 1, BOOLEAN, EXACTLY_ONE,
Arg(STRING, EXACTLY_ONE)
)
Fun("value", classOf[XXFormsValue], op = 0, min = 1, STRING, ALLOWS_ZERO_OR_MORE,
Arg(STRING, EXACTLY_ONE),
Arg(BOOLEAN, EXACTLY_ONE)
)
Fun("formatted-value", classOf[XXFormsFormattedValue], op = 0, min = 1, STRING, ALLOWS_ZERO_OR_MORE,
Arg(STRING, EXACTLY_ONE),
Arg(BOOLEAN, EXACTLY_ONE)
)
Fun("avt-value", classOf[XXFormsAVTValue], op = 0, min = 2, STRING, ALLOWS_ZERO_OR_MORE,
Arg(STRING, EXACTLY_ONE),
Arg(STRING, EXACTLY_ONE)
)
Fun("component-context", classOf[XXFormsComponentContext], op = 0, min = 0, Type.ITEM_TYPE, ALLOWS_ZERO_OR_MORE)
Fun("component-param-value", classOf[XXFormsComponentParam], op = 0, min = 1, ANY_ATOMIC, ALLOWS_ZERO_OR_ONE,
Arg(STRING, EXACTLY_ONE)
)
Fun("instance", classOf[XXFormsInstance], op = 0, min = 1, Type.NODE_TYPE, EXACTLY_ONE,
Arg(STRING, EXACTLY_ONE),
Arg(BOOLEAN, EXACTLY_ONE)
)
Fun("index", classOf[XXFormsIndex], op = 0, min = 0, INTEGER, EXACTLY_ONE,
Arg(STRING, ALLOWS_ZERO_OR_ONE)
)
Fun("list-models", classOf[XXFormsListModels], op = 0, min = 0, STRING, ALLOWS_ZERO_OR_MORE)
Fun("list-instances", classOf[XXFormsListInstances], op = 0, min = 1, STRING, ALLOWS_ZERO_OR_MORE,
Arg(STRING, EXACTLY_ONE)
)
Fun("list-variables", classOf[XXFormsListVariables], op = 0, min = 1, STRING, ALLOWS_ZERO_OR_MORE,
Arg(STRING, EXACTLY_ONE)
)
Fun("get-variable", classOf[XXFormsGetVariable], op = 0, min = 2, Type.ITEM_TYPE, ALLOWS_ZERO_OR_MORE,
Arg(STRING, EXACTLY_ONE),
Arg(STRING, EXACTLY_ONE)
)
Fun("itemset", classOf[XXFormsItemset], op = 0, min = 2, Type.ITEM_TYPE, ALLOWS_ZERO_OR_MORE,
Arg(STRING, EXACTLY_ONE),
Arg(STRING, EXACTLY_ONE),
Arg(BOOLEAN, EXACTLY_ONE)
)
Fun("format-message", classOf[XXFormsFormatMessage], op = 0, min = 2, STRING, EXACTLY_ONE,
Arg(STRING, EXACTLY_ONE),
Arg(Type.ITEM_TYPE, ALLOWS_ZERO_OR_MORE)
)
Fun("lang", classOf[XXFormsLang], op = 0, min = 0, STRING, ALLOWS_ZERO_OR_ONE)
Fun("r", classOf[XXFormsResource], op = 0, min = 1, STRING, ALLOWS_ZERO_OR_ONE,
Arg(STRING, EXACTLY_ONE),
Arg(STRING, EXACTLY_ONE),
Arg(BuiltInAtomicType.ANY_ATOMIC, EXACTLY_ONE) // `map(*)`
)
Fun("resource-elements", classOf[XXFormsResourceElem], op = 0, min = 1, NODE_TYPE, ALLOWS_ZERO_OR_MORE,
Arg(STRING, EXACTLY_ONE),
Arg(STRING, EXACTLY_ONE)
)
Fun("pending-uploads", classOf[XXFormsPendingUploads], op = 0, min = 0, INTEGER, EXACTLY_ONE)
Fun("document-id", classOf[XXFormsDocumentId], op = 0, min = 0, STRING, EXACTLY_ONE)
// TODO: This is the only place where we use `op`. Should remove it and remove the `op` from `Fun`.
Fun("label", classOf[XXFormsLHHA], op = 0, min = 1, STRING, ALLOWS_ZERO_OR_ONE,
Arg(STRING, EXACTLY_ONE)
)
Fun("help", classOf[XXFormsLHHA], op = 1, min = 1, STRING, ALLOWS_ZERO_OR_ONE,
Arg(STRING, EXACTLY_ONE)
)
Fun("hint", classOf[XXFormsLHHA], op = 2, min = 1, STRING, ALLOWS_ZERO_OR_ONE,
Arg(STRING, EXACTLY_ONE)
)
Fun("alert", classOf[XXFormsLHHA], op = 3, min = 1, STRING, ALLOWS_ZERO_OR_ONE,
Arg(STRING, EXACTLY_ONE)
)
Fun("visited", classOf[XXFormsVisited], op = 0, min = 1, BOOLEAN, ALLOWS_ZERO_OR_ONE,
Arg(STRING, EXACTLY_ONE)
)
Fun("focusable", classOf[XXFormsFocusable], op = 0, min = 1, BOOLEAN, ALLOWS_ZERO_OR_ONE,
Arg(STRING, EXACTLY_ONE),
Arg(BOOLEAN, EXACTLY_ONE)
)
Fun("absolute-id", classOf[XXFormsAbsoluteId], op = 0, min = 1, STRING, ALLOWS_ZERO_OR_ONE,
Arg(STRING, EXACTLY_ONE)
)
Fun("client-id", classOf[XXFormsClientId], op = 0, min = 1, STRING, ALLOWS_ZERO_OR_ONE,
Arg(STRING, EXACTLY_ONE)
)
Fun("control-element", classOf[XXFormsControlElement], op = 0, min = 1, Type.NODE_TYPE, ALLOWS_ZERO_OR_ONE,
Arg(STRING, EXACTLY_ONE)
)
Fun("extract-document", classOf[XXFormsExtractDocument], op = 0, min = 1, Type.NODE_TYPE, ALLOWS_ZERO_OR_ONE,
Arg(Type.NODE_TYPE, EXACTLY_ONE),
Arg(STRING, EXACTLY_ONE),
Arg(BOOLEAN, EXACTLY_ONE)
)
// RFE: Support XSLT 2.0-features such as multiple sort keys
Fun("sort", classOf[XXFormsSort], op = 0, min = 2, Type.ITEM_TYPE, ALLOWS_ZERO_OR_MORE,
Arg(Type.ITEM_TYPE, ALLOWS_ZERO_OR_MORE),
Arg(Type.ITEM_TYPE, EXACTLY_ONE),
Arg(STRING, ALLOWS_ZERO_OR_ONE),
Arg(STRING, ALLOWS_ZERO_OR_ONE),
Arg(STRING, ALLOWS_ZERO_OR_ONE)
)
// NOTE: also from exforms
Fun("relevant", classOf[EXFormsMIP], op = 0, min = 0, BOOLEAN, EXACTLY_ONE,
Arg(Type.ITEM_TYPE, ALLOWS_ZERO_OR_MORE)
)
// NOTE: also from exforms
Fun("readonly", classOf[EXFormsMIP], op = 1, min = 0, BOOLEAN, EXACTLY_ONE,
Arg(Type.ITEM_TYPE, ALLOWS_ZERO_OR_MORE)
)
// NOTE: also from exforms
Fun("required", classOf[EXFormsMIP], op = 2, min = 0, BOOLEAN, EXACTLY_ONE,
Arg(Type.ITEM_TYPE, ALLOWS_ZERO_OR_MORE)
)
// Now available in XForms 2.0
Fun("bind", classOf[Bind], op = 0, min = 1, Type.NODE_TYPE, ALLOWS_ZERO_OR_MORE,
Arg(STRING, EXACTLY_ONE),
Arg(BOOLEAN, EXACTLY_ONE)
)
// Validation functions
Fun("max-length", classOf[MaxLengthValidation], op = 0, min = 1, BOOLEAN, EXACTLY_ONE,
Arg(INTEGER, ALLOWS_ZERO_OR_ONE)
)
Fun("min-length", classOf[MinLengthValidation], op = 0, min = 1, BOOLEAN, EXACTLY_ONE,
Arg(INTEGER, ALLOWS_ZERO_OR_ONE)
)
Fun("non-negative", classOf[NonNegativeValidation], op = 0, min = 0, BOOLEAN, EXACTLY_ONE)
Fun("negative", classOf[NegativeValidation], op = 0, min = 0, BOOLEAN, EXACTLY_ONE)
Fun("non-positive", classOf[NonPositiveValidation], op = 0, min = 0, BOOLEAN, EXACTLY_ONE)
Fun("positive", classOf[PositiveValidation], op = 0, min = 0, BOOLEAN, EXACTLY_ONE)
Fun("fraction-digits", classOf[MaxFractionDigitsValidation], op = 0, min = 1, BOOLEAN, EXACTLY_ONE,
Arg(INTEGER, ALLOWS_ZERO_OR_ONE)
)
Fun(ValidationFunctionNames.UploadMaxSize, classOf[UploadMaxSizeValidation], op = 0, min = 1, BOOLEAN, EXACTLY_ONE,
Arg(INTEGER, ALLOWS_ZERO_OR_ONE)
)
Fun(ValidationFunctionNames.UploadMediatypes, classOf[UploadMediatypesValidation], op = 0, min = 1, BOOLEAN, EXACTLY_ONE,
Arg(STRING, ALLOWS_ZERO_OR_ONE)
)
Fun("evaluate-avt", classOf[XXFormsEvaluateAVT], op = 0, min = 1, max = 10, ITEM_TYPE, ALLOWS_ZERO_OR_MORE,
Arg(STRING, EXACTLY_ONE)
)
Fun("form-urlencode", classOf[XXFormsFormURLEncode], op = 0, min = 1, STRING, ALLOWS_ZERO_OR_ONE,
Arg(NODE_TYPE, EXACTLY_ONE)
)
Fun("get-request-method", classOf[GetRequestMethodTryXFormsDocument], op = 0, 0, STRING, ALLOWS_ONE)
Fun("get-request-context-path", classOf[GetRequestContextPathTryXFormsDocument], op = 0, 0, STRING, ALLOWS_ONE)
Fun("get-request-path", classOf[GetRequestPathTryXFormsDocument], op = 0, 0, STRING, ALLOWS_ONE)
Fun("get-request-header", classOf[GetRequestHeaderTryXFormsDocument], op = 0, min = 1, STRING, ALLOWS_ZERO_OR_MORE,
Arg(STRING, EXACTLY_ONE),
Arg(STRING, EXACTLY_ONE)
)
Fun("get-request-parameter", classOf[GetRequestParameterTryXFormsDocument], op = 0, min = 1, STRING, ALLOWS_ZERO_OR_MORE,
Arg(STRING, EXACTLY_ONE)
)
Fun(ExcludedDatesValidation.PropertyName, classOf[ExcludedDatesValidation], op = 0, min = 1, BOOLEAN, EXACTLY_ONE,
Arg(DATE, ALLOWS_ZERO_OR_MORE)
)
}
}
| orbeon/orbeon-forms | xforms-runtime/jvm/src/main/scala/org/orbeon/oxf/xforms/library/XXFormsEnvFunctions.scala | Scala | lgpl-2.1 | 12,036 |
package pl.touk.nussknacker.engine.process.typeinformation.internal.typedobject
import com.typesafe.scalalogging.LazyLogging
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.common.typeutils.{TypeSerializer, TypeSerializerSnapshot}
case class TypedScalaMapTypeInformation(informations: Map[String, TypeInformation[_]]) extends TypedObjectBasedTypeInformation[Map[String, _<:AnyRef]](informations) {
override def createSerializer(serializers: Array[(String, TypeSerializer[_])]): TypeSerializer[Map[String, _<:AnyRef]] = TypedScalaMapSerializer(serializers)
}
@SerialVersionUID(1L)
case class TypedScalaMapSerializer(override val serializers: Array[(String, TypeSerializer[_])])
extends TypedObjectBasedTypeSerializer[Map[String, _<:AnyRef]](serializers) with LazyLogging {
override def deserialize(values: Array[AnyRef]): Map[String, _<:AnyRef] = {
//using builder instead of zipWithIndex.map.toMap gives 10-20% performance improvement
val builder = Map.newBuilder[String, AnyRef]
serializers.indices.foreach { idx =>
builder.+=((name(idx), values(idx)))
}
builder.result()
}
override def get(value: Map[String, _<:AnyRef], k: String): AnyRef = value.getOrElse(k, null)
override def duplicate(serializers: Array[(String, TypeSerializer[_])]): TypeSerializer[Map[String, _<:AnyRef]]
= TypedScalaMapSerializer(serializers)
override def createInstance(): Map[String, _<:AnyRef] = Map.empty
override def snapshotConfiguration(snapshots: Array[(String, TypeSerializerSnapshot[_])]): TypeSerializerSnapshot[Map[String, _ <: AnyRef]]
= new TypedScalaMapSerializerSnapshot(snapshots)
}
class TypedScalaMapSerializerSnapshot extends TypedObjectBasedSerializerSnapshot[Map[String, _<:AnyRef]] {
def this(serializers: Array[(String, TypeSerializerSnapshot[_])]) = {
this()
this.serializersSnapshots = serializers
}
override protected def compatibilityRequiresSameKeys: Boolean = false
override protected def restoreSerializer(restored: Array[(String, TypeSerializer[_])]): TypeSerializer[Map[String, _ <: AnyRef]]
= TypedScalaMapSerializer(restored)
} | TouK/nussknacker | engine/flink/executor/src/main/scala/pl/touk/nussknacker/engine/process/typeinformation/internal/typedobject/TypedScalaMapBasedTypeInformation.scala | Scala | apache-2.0 | 2,161 |
package org.jetbrains.plugins.scala
package testingSupport.test
import com.intellij.execution.configurations.RunConfiguration
import com.intellij.execution.{JavaRunConfigurationExtensionManager, Location, RunManager, RunnerAndConfigurationSettings}
import com.intellij.psi._
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.base.ScLiteral
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScReferencePattern
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScInfixExpr, ScMethodCall, ScParenthesisedExpr, ScReferenceExpression}
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScPatternDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiManager
/**
* @author Ksenia.Sautina
* @since 5/22/12
*/
object TestConfigurationUtil {
def packageSettings(element: PsiElement, location: Location[_ <: PsiElement],
confFactory: AbstractTestRunConfigurationFactory,
displayName: String): RunnerAndConfigurationSettings = {
val pack: PsiPackage = element match {
case dir: PsiDirectory => JavaDirectoryService.getInstance.getPackage(dir)
case pack: PsiPackage => pack
}
if (pack == null) return null
val settings = RunManager.getInstance(location.getProject).createRunConfiguration(displayName, confFactory)
val configuration = settings.getConfiguration.asInstanceOf[AbstractTestRunConfiguration]
configuration.setTestPackagePath(pack.getQualifiedName)
configuration.setTestKind(TestRunConfigurationForm.TestKind.ALL_IN_PACKAGE)
configuration.setGeneratedName(displayName)
configuration.setModule(location.getModule)
configuration.initWorkingDir()
JavaRunConfigurationExtensionManager.getInstance.extendCreatedConfiguration(configuration, location)
settings
}
def isPackageConfiguration(element: PsiElement, configuration: RunConfiguration): Boolean = {
val pack: PsiPackage = element match {
case dir: PsiDirectory => JavaDirectoryService.getInstance.getPackage(dir)
case pack: PsiPackage => pack
}
if (pack == null) return false
configuration match {
case configuration: AbstractTestRunConfiguration =>
configuration.getTestKind == TestRunConfigurationForm.TestKind.ALL_IN_PACKAGE &&
configuration.getTestPackagePath == pack.getQualifiedName
case _ => false
}
}
def isInheritor(clazz: ScTemplateDefinition, fqn: String): Boolean = {
val suiteClazz = ScalaPsiManager.instance(clazz.getProject).getCachedClass(clazz.getResolveScope, fqn)
suiteClazz.fold(false)(ScalaPsiUtil.cachedDeepIsInheritor(clazz, _))
}
private def getStaticTestNameElement(element: PsiElement, allowSymbolLiterals: Boolean): Option[Any] = {
val noArgMethods = Seq("toLowerCase", "trim", "toString")
val oneArgMethods = Seq("stripSuffix", "stripPrefix", "substring")
val twoArgMethods = Seq("replace", "substring")
def processNoArgMethods(refExpr: ScReferenceExpression) =
if (refExpr.refName == "toString") {
//special handling for now, since only toString is allowed on integers
refExpr.smartQualifier.flatMap(getStaticTestNameElement(_, allowSymbolLiterals) match {
case Some(string: String) => Some(string)
case Some(number: Number) => Some(number.toString)
case _ => None
})
} else refExpr.smartQualifier.
flatMap(getStaticTestNameRaw(_, allowSymbolLiterals)).flatMap { expr =>
refExpr.refName match {
case "toLowerCase" => Some(expr.toLowerCase)
case "trim" => Some(expr.trim)
case "toString" => Some(expr)
case _ => None
}
}
element match {
case literal: ScLiteral if literal.isString && literal.getValue.isInstanceOf[String] =>
Some(escapeTestName(literal.getValue.asInstanceOf[String]))
case literal: ScLiteral if allowSymbolLiterals && literal.isSymbol && literal.getValue.isInstanceOf[Symbol] =>
Some(escapeTestName(literal.getValue.asInstanceOf[Symbol].name))
case literal: ScLiteral if literal.getValue.isInstanceOf[Number] =>
Some(literal.getValue)
case p: ScParenthesisedExpr => p.expr.flatMap(getStaticTestNameRaw(_, allowSymbolLiterals))
case infixExpr: ScInfixExpr =>
infixExpr.getInvokedExpr match {
case refExpr: ScReferenceExpression if refExpr.refName == "+" =>
getStaticTestNameElement(infixExpr.lOp, allowSymbolLiterals).flatMap(left => getStaticTestNameElement(infixExpr.rOp, allowSymbolLiterals).map(left + _.toString))
case _ => None
}
case methodCall: ScMethodCall =>
methodCall.getInvokedExpr match {
case refExpr: ScReferenceExpression if noArgMethods.contains(refExpr.refName) &&
methodCall.argumentExpressions.isEmpty =>
processNoArgMethods(refExpr)
case refExpr: ScReferenceExpression if oneArgMethods.contains(refExpr.refName) &&
methodCall.argumentExpressions.size == 1 =>
def helper(anyExpr: Any, arg: Any): Option[Any] = (anyExpr, refExpr.refName, arg) match {
case (expr: String, "stripSuffix", string: String) => Some(expr.stripSuffix(string))
case (expr: String, "stripPrefix", string: String) => Some(expr.stripPrefix(string))
case (expr: String, "substring", integer: Int) => Some(expr.substring(integer))
case _ => None
}
methodCall.argumentExpressions.headOption.flatMap(getStaticTestNameElement(_, allowSymbolLiterals)).
flatMap(arg =>
refExpr.smartQualifier.flatMap(getStaticTestNameElement(_, allowSymbolLiterals)).flatMap(helper(_, arg))
)
case refExpr: ScReferenceExpression if twoArgMethods.contains(refExpr.refName) &&
methodCall.argumentExpressions.size == 2 =>
def helper(anyExpr: Any, arg1: Any, arg2: Any): Option[Any] = (anyExpr, refExpr.refName, arg1, arg2) match {
case (expr: String, "replace", s1: String, s2: String) => Some(expr.replace(s1, s2))
case (expr: String, "substring", begin: Int, end: Int) => Some(expr.substring(begin, end))
case _ => None
}
val arg1Opt = getStaticTestNameElement(methodCall.argumentExpressions.head, allowSymbolLiterals)
val arg2Opt = getStaticTestNameElement(methodCall.argumentExpressions(1), allowSymbolLiterals)
(arg1Opt, arg2Opt) match {
case (Some(arg1), Some(arg2)) =>
refExpr.smartQualifier.flatMap(getStaticTestNameElement(_, allowSymbolLiterals)).flatMap(helper(_, arg1, arg2))
case _ => None
}
case _ => None
}
case refExpr: ScReferenceExpression if refExpr.getText == "+" =>
getStaticTestNameRaw(refExpr.getParent, allowSymbolLiterals)
case refExpr: ScReferenceExpression if noArgMethods.contains(refExpr.refName) =>
processNoArgMethods(refExpr)
case refExpr: ScReferenceExpression =>
refExpr.advancedResolve.map(_.getActualElement) match {
case Some(refPattern: ScReferencePattern) =>
ScalaPsiUtil.nameContext(refPattern) match {
case patternDef: ScPatternDefinition => patternDef.expr.flatMap(getStaticTestNameRaw(_, allowSymbolLiterals))
case _ => None
}
case _ => None
}
case _ => None
}
}
private def getStaticTestNameRaw(element: PsiElement, allowSymbolLiterals: Boolean): Option[String] =
getStaticTestNameElement(element, allowSymbolLiterals).filter(_.isInstanceOf[String]).map(_.asInstanceOf[String])
def getStaticTestName(element: PsiElement, allowSymbolLiterals: Boolean = false): Option[String] =
getStaticTestNameRaw(element, allowSymbolLiterals).map(_.trim)
def getStaticTestNameOrDefault(element: PsiElement, default: String, allowSymbolLiterals: Boolean) =
getStaticTestName(element, allowSymbolLiterals).getOrElse(default)
def escapeTestName(testName: String) = testName.replace("\\\\", "\\\\\\\\").replace("\\n", "\\\\n")
}
| LPTK/intellij-scala | src/org/jetbrains/plugins/scala/testingSupport/test/TestConfigurationUtil.scala | Scala | apache-2.0 | 8,306 |
/*
* Copyright 2014 Dennis Vis
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.talares.api.datatypes
import akka.pattern.ask
import org.talares.api.Talares
import org.talares.api.actors.messages.MediatorMessages
import org.talares.api.datatypes.items.Item
import play.api.libs.json.{JsPath, Reads}
import scala.annotation.implicitNotFound
import scala.concurrent.Future
import scala.reflect.ClassTag
/**
* A deferred instance holds a Future of a Seq of type 'T' to be resolved by an additional call to the webservice.
*
* The call to said webservice will only be triggered when the value field is accessed.
*
* @param uri the URI supplied by the webservice which holds the complete path where the data for a Seq of type 'T'
* resides
*
* @author Dennis Vis
* @since 0.1.0
*/
case class DeferredSeq[T <: Item](uri: String)(implicit jsonReadable: JsonReadable[T], classTag: ClassTag[T]) {
@implicitNotFound(
"No implicit value for org.talares.api.Talares found. Try to import org.talares.api.Talares.current."
) def value(implicit app: Talares): Future[Seq[T]] = {
val system = app.system
val dispatcher = app.mediator
implicit val executionContext = system.dispatcher
implicit val timeout = app.timeout
(dispatcher ? MediatorMessages.URIRequest[T](uri)) map {
case MediatorMessages.Response(value: Seq[T]) => value
case _ => Seq()
}
}
}
object DeferredSeq {
implicit def reads[T <: Item](implicit jsonReadable: JsonReadable[T], classTag: ClassTag[T]): Reads[DeferredSeq[T]] =
(JsPath \ "__deferred" \ "uri").read[String].map(DeferredSeq[T])
} | talares/talares | src/talares/src/main/scala/org/talares/api/datatypes/DeferredSeq.scala | Scala | apache-2.0 | 2,150 |
package auth.models.daos
import java.time.Instant
import java.util.UUID
import javax.inject.Inject
import auth.models.AuthToken
import auth.utils.json.MongoFormats._
import core.utils.mongo.MongoModel
import play.api.libs.json.Json
import play.modules.reactivemongo.ReactiveMongoApi
import play.modules.reactivemongo.json._
import reactivemongo.play.json.collection.JSONCollection
import scala.concurrent.{ ExecutionContext, Future }
/**
* Give access to the [[AuthToken]] object.
*
* @param reactiveMongoApi The ReactiveMongo API.
* @param ec The execution context.
*/
class AuthTokenDAOImpl @Inject() (reactiveMongoApi: ReactiveMongoApi)(
implicit
val ec: ExecutionContext
) extends AuthTokenDAO with MongoModel {
/**
* The MongoDB collection.
*/
protected def collection = reactiveMongoApi.database.map(_.collection[JSONCollection]("auth.tokens"))
/**
* Finds a token by its ID.
*
* @param id The unique token ID.
* @return The found token or None if no token for the given ID could be found.
*/
def find(id: UUID): Future[Option[AuthToken]] = collection.flatMap(_.find(Json.obj("_id" -> id)).one[AuthToken])
/**
* Finds expired tokens.
*
* @param instant The current instant.
*/
def findExpired(instant: Instant): Future[Seq[AuthToken]] =
find[AuthToken](Json.obj("expiry" -> Json.obj("$lte" -> instant)))
/**
* Saves a token.
*
* If the token doesn't exists then it will be added, otherwise it will be updated.
*
* @param token The token to save.
* @return The saved token.
*/
def save(token: AuthToken): Future[AuthToken] = onSuccess(collection.flatMap(
_.update(Json.obj("_id" -> token.id), token, upsert = true)
), token)
/**
* Removes the token for the given ID.
*
* @param id The ID for which the token should be removed.
* @return A future to wait for the process to be completed.
*/
def remove(id: UUID): Future[Unit] = onSuccess(collection.flatMap(_.remove(Json.obj("_id" -> id))), ())
}
| akkie/silhouette-play-react-seed | app-auth/src/main/scala/auth/models/daos/AuthTokenDAOImpl.scala | Scala | mit | 2,032 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.stream.table
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.{EnvironmentSettings, ValidationException}
import org.apache.flink.table.expressions.utils._
import org.apache.flink.table.functions.ScalarFunction
import org.apache.flink.table.runtime.utils.{StreamITCase, StreamTestData, UserDefinedFunctionTestUtils}
import org.apache.flink.test.util.AbstractTestBase
import org.apache.flink.types.Row
import org.junit.Assert._
import org.junit.{Ignore, Test}
import scala.collection.mutable
class CalcITCase extends AbstractTestBase {
val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
val settings: EnvironmentSettings = EnvironmentSettings.newInstance().useOldPlanner().build
val tEnv: StreamTableEnvironment = StreamTableEnvironment.create(env, settings)
@Test
def testSimpleSelectAll(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val ds = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv).select('_1, '_2, '_3)
val results = ds.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
"1,1,Hi",
"2,2,Hello",
"3,2,Hello world")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testSimpleSelectEmpty(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val ds = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv)
.select()
.select(1.count)
val results = ds.toRetractStream[Row]
results.addSink(new StreamITCase.RetractingSink).setParallelism(1)
env.execute()
val expected = mutable.MutableList("3")
assertEquals(expected.sorted, StreamITCase.retractedResults.sorted)
}
@Test
def testSelectStar(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val ds = StreamTestData.getSmallNestedTupleDataStream(env).toTable(tEnv).select('*)
val results = ds.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList("(1,1),one", "(2,2),two", "(3,3),three")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testSelectFirst(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val ds = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv).select('_1)
val results = ds.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList("1", "2", "3")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testSimpleSelectWithNaming(): Unit = {
// verify ProjectMergeRule.
StreamITCase.testResults = mutable.MutableList()
val ds = StreamTestData.get3TupleDataStream(env).toTable(tEnv)
.select('_1 as 'a, '_2 as 'b, '_1 as 'c)
.select('a, 'b)
val results = ds.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
"1,1", "2,2", "3,2", "4,3", "5,3", "6,3", "7,4",
"8,4", "9,4", "10,4", "11,5", "12,5", "13,5", "14,5", "15,5",
"16,6", "17,6", "18,6", "19,6", "20,6", "21,6")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testSimpleSelectAllWithAs(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val ds = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c)
.select('a, 'b, 'c)
val results = ds.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
"1,1,Hi",
"2,2,Hello",
"3,2,Hello world")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testSimpleFilter(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val ds = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c)
val filterDs = ds.filter('a === 3)
val results = filterDs.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList("3,2,Hello world")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testAllRejectingFilter(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val ds = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c)
val filterDs = ds.filter(false)
val results = filterDs.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
assertEquals(true, StreamITCase.testResults.isEmpty)
}
@Test
def testAllPassingFilter(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val ds = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c)
val filterDs = ds.filter(true)
val results = filterDs.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
"1,1,Hi",
"2,2,Hello",
"3,2,Hello world")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testFilterOnIntegerTupleField(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val ds = StreamTestData.get3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c)
val filterDs = ds.filter( 'a % 2 === 0 )
.where($"b" === 3 || $"b" === 4 || $"b" === 5)
val results = filterDs.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
"4,3,Hello world, how are you?", "6,3,Luke Skywalker",
"8,4,Comment#2", "10,4,Comment#4", "12,5,Comment#6", "14,5,Comment#8")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testNotEquals(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val ds = StreamTestData.get3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c)
val filterDs = ds.filter( 'a % 2 !== 0)
.where(($"b" !== 1) && ($"b" !== 2) && ($"b" !== 3))
val results = filterDs.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
"7,4,Comment#1", "9,4,Comment#3",
"11,5,Comment#5", "13,5,Comment#7", "15,5,Comment#9",
"17,6,Comment#11", "19,6,Comment#13", "21,6,Comment#15")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testUserDefinedFunctionWithParameter(): Unit = {
tEnv.registerFunction("RichFunc2", new RichFunc2)
UserDefinedFunctionTestUtils.setJobParameters(env, Map("string.value" -> "ABC"))
StreamITCase.testResults = mutable.MutableList()
val result = StreamTestData.get3TupleDataStream(env)
.toTable(tEnv, 'a, 'b, 'c)
.where(call("RichFunc2", $"c") === "ABC#Hello")
.select('c)
val results = result.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList("Hello")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testMultipleUserDefinedFunctions(): Unit = {
tEnv.registerFunction("RichFunc1", new RichFunc1)
tEnv.registerFunction("RichFunc2", new RichFunc2)
UserDefinedFunctionTestUtils.setJobParameters(env, Map("string.value" -> "Abc"))
StreamITCase.testResults = mutable.MutableList()
val result = StreamTestData.get3TupleDataStream(env)
.toTable(tEnv, 'a, 'b, 'c)
.where(call("RichFunc2", $"c") === "Abc#Hello" ||
(call("RichFunc1", $"a") === 3) &&
($"b" === 2))
.select('c)
val results = result.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList("Hello", "Hello world")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testScalarFunctionConstructorWithParams(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val testData = new mutable.MutableList[(Int, Long, String)]
testData.+=((1, 1L, "Jack#22"))
testData.+=((2, 2L, "John#19"))
testData.+=((3, 2L, "Anna#44"))
testData.+=((4, 3L, "nosharp"))
val t = env.fromCollection(testData).toTable(tEnv).as("a", "b", "c")
val func0 = new Func13("default")
val func1 = new Func13("Sunny")
val func2 = new Func13("kevin2")
val result = t.select(func0('c), func1('c), func2('c))
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
"default-Anna#44,Sunny-Anna#44,kevin2-Anna#44",
"default-Jack#22,Sunny-Jack#22,kevin2-Jack#22",
"default-John#19,Sunny-John#19,kevin2-John#19",
"default-nosharp,Sunny-nosharp,kevin2-nosharp"
)
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testInlineScalarFunction(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val t = env.fromElements(1, 2, 3, 4).toTable(tEnv).as("a")
val result = t.select(
(new ScalarFunction() {
def eval(i: Int, suffix: String): String = {
suffix + i
}
})('a, ">>"))
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
">>1",
">>2",
">>3",
">>4"
)
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testNonStaticObjectScalarFunction(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val t = env.fromElements(1, 2, 3, 4).toTable(tEnv).as("a")
val result = t.select(NonStaticObjectScalarFunction('a, ">>"))
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
">>1",
">>2",
">>3",
">>4"
)
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
object NonStaticObjectScalarFunction extends ScalarFunction {
def eval(i: Int, suffix: String): String = {
suffix + i
}
}
@Test(expected = classOf[ValidationException]) // see FLINK-15162
def testNonStaticClassScalarFunction(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val t = env.fromElements(1, 2, 3, 4).toTable(tEnv).as("a")
val result = t.select(new NonStaticClassScalarFunction()('a, ">>"))
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
">>1",
">>2",
">>3",
">>4"
)
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
class NonStaticClassScalarFunction extends ScalarFunction {
def eval(i: Int, suffix: String): String = {
suffix + i
}
}
@Test
def testMapType(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val ds = StreamTestData.get3TupleDataStream(env)
.toTable(tEnv)
.select(map('_1, '_3))
val results = ds.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
"{10=Comment#4}",
"{11=Comment#5}",
"{12=Comment#6}",
"{13=Comment#7}",
"{14=Comment#8}",
"{15=Comment#9}",
"{16=Comment#10}",
"{17=Comment#11}",
"{18=Comment#12}",
"{19=Comment#13}",
"{1=Hi}",
"{20=Comment#14}",
"{21=Comment#15}",
"{2=Hello}",
"{3=Hello world}",
"{4=Hello world, how are you?}",
"{5=I am fine.}",
"{6=Luke Skywalker}",
"{7=Comment#1}",
"{8=Comment#2}",
"{9=Comment#3}")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testColumnOperation(): Unit = {
StreamITCase.clear
val testData = new mutable.MutableList[(Int, Long, String)]
testData.+=((1, 1L, "Kevin"))
testData.+=((2, 2L, "Sunny"))
val t = env.fromCollection(testData).toTable(tEnv).as("a", "b", "c")
val result = t
// Adds simple column
.addColumns("concat(c, 'sunny') as kid")
// Adds columns by flattening
.addColumns(row(1, "str").flatten())
// If the added fields have duplicate field name, then the last one is used.
.addOrReplaceColumns(concat('c, "_kid") as 'kid, concat('c, "kid") as 'kid)
// Existing fields will be replaced.
.addOrReplaceColumns("concat(c, ' is a kid') as kid")
// Adds value literal column
.addColumns("'last'")
// Adds column without alias
.addColumns('a + 2)
// Renames columns
.renameColumns('a as 'a2, 'b as 'b2)
.renameColumns("c as c2")
// Drops columns
.dropColumns('b2)
.dropColumns("c2")
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
"1,Kevin is a kid,1,str,last,3",
"2,Sunny is a kid,1,str,last,4"
)
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testMap(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val ds = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c)
.map(Func23('a, 'b, 'c)).as("a", "b", "c", "d")
.map(Func24('a, 'b, 'c, 'd)).as("a", "b", "c", "d")
.map(Func1('b))
val results = ds.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
"3",
"4",
"5")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Ignore("Will be open when FLINK-10834 has been fixed.")
@Test
def testNonDeterministic(): Unit = {
StreamITCase.testResults = mutable.MutableList()
val ds = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c)
.map(Func25('a))
val results = ds.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
StreamITCase.testResults.foreach { testResult =>
val result = testResult.split(",")
assertEquals(result(0), result(1))
}
}
}
| hequn8128/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/runtime/stream/table/CalcITCase.scala | Scala | apache-2.0 | 15,152 |
package org.jetbrains.plugins.scala.lang.psi.impl.search
import com.intellij.openapi.application.ApplicationManager
import com.intellij.openapi.util.Computable
import com.intellij.psi.PsiMember
import com.intellij.psi.search.GlobalSearchScope
import com.intellij.psi.search.searches.AnnotatedElementsSearch
import com.intellij.psi.stubs.StubIndex
import com.intellij.util.{Processor, QueryExecutor}
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScAnnotation, ScAnnotations}
import org.jetbrains.plugins.scala.lang.psi.stubs.index.ScalaIndexKeys
/**
* User: Alexander Podkhalyuzin
* Date: 10.01.2009
*/
class ScalaAnnotatedMembersSearcher extends QueryExecutor[PsiMember, AnnotatedElementsSearch.Parameters] {
def execute(p: AnnotatedElementsSearch.Parameters, consumer: Processor[PsiMember]): Boolean = {
val annClass = p.getAnnotationClass
assert(annClass.isAnnotationType, "Annotation type should be passed to annotated members search")
val annotationFQN = annClass.qualifiedName
assert(annotationFQN != null)
val scope = p.getScope match {case x: GlobalSearchScope => x case _ => return true}
ApplicationManager.getApplication.runReadAction(new Computable[Boolean] {
def compute: Boolean = {
val candidates = StubIndex.getElements(ScalaIndexKeys.ANNOTATED_MEMBER_KEY,
annClass.name, annClass.getProject, scope, classOf[ScAnnotation])
val iter = candidates.iterator
while (iter.hasNext) {
val annotation = iter.next
annotation.getParent match {
case ann: ScAnnotations => ann.getParent match {
case member: PsiMember => if (!consumer.process(member)) return false
case _ =>
}
case _ =>
}
}
true
}
})
true
}
} | gtache/intellij-lsp | intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/impl/search/ScalaAnnotatedMembersSearcher.scala | Scala | apache-2.0 | 1,869 |
package org.scalafmt.rewrite
import scala.meta.Importee
import scala.meta.Tree
import scala.meta._
/**
* Sorts imports inside curly braces.
*
* For example
*
* import a.{c, b}
*
* into
*
* import a.{b, c}
*/
sealed trait SortImports extends Rewrite {
/**
* The sorting scheme to use when sorting the imports
*/
def sorted(str: Seq[String]): Seq[String]
override def rewrite(code: Tree, ctx: RewriteCtx): Seq[Patch] = {
import ctx.dialect
code.collect {
case Import(imports) =>
imports.flatMap { `import` =>
if (`import`.importees.exists(!_.is[Importee.Name])) {
// Do nothing if an importee has for example rename
// import a.{d, b => c}
// I think we are safe to sort these, just want to convince myself
// it's 100% safe first.
Nil
} else {
val sortedImporteesByIndex: Map[Int, String] =
sorted(`import`.importees.map(_.tokens.mkString)).zipWithIndex
.map(_.swap)
.toMap
`import`.importees.zipWithIndex.collect {
case (importee, i) =>
TokenPatch.AddRight(
importee.tokens.head,
sortedImporteesByIndex(i)
)
}
}
}
}.flatten
}
}
/**
* Sort imports with symbols at the beginning, followed by lowercase and
* finally uppercase
*/
case object SortImports extends SortImports {
// sort contributed by @djspiewak: https://gist.github.com/djspiewak/127776c2b6a9d6cd3c21a228afd4580f
private val LCase = """([a-z].*)""".r
private val UCase = """([A-Z].*)""".r
private val Other = """(.+)""".r
override def sorted(strs: Seq[String]): Seq[String] = {
// we really want partition, but there is no ternary version of it
val (syms, lcs, ucs) = strs.foldLeft(
(Vector.empty[String], Vector.empty[String], Vector.empty[String])
) {
case ((syms, lcs, ucs), str) =>
str match {
case LCase(s) => (syms, lcs :+ s, ucs)
case UCase(s) => (syms, lcs, ucs :+ s)
case Other(s) => (syms :+ s, lcs, ucs)
}
}
syms.sorted ++ lcs.sorted ++ ucs.sorted
}
}
/**
* Sort imports using the traditional ASCII sorting
*
* See: http://support.ecisolutions.com/doc-ddms/help/reportsmenu/ascii_sort_order_chart.htm
*/
case object AsciiSortImports extends SortImports {
override def sorted(strs: Seq[String]): Seq[String] = strs.sorted
}
| olafurpg/scalafmt | scalafmt-core/shared/src/main/scala/org/scalafmt/rewrite/SortImports.scala | Scala | apache-2.0 | 2,535 |
package cn.gridx.phoenix.sql.scala
import java.sql.DriverManager
/**
* Created by tao on 10/20/15.
*
* 测试分页查询Phoenix中的一个表
* 使用方法:java -cp <target.jar>:<phoenix-client.jar>:<scala-library.jar> <MainClass>
*
*/
object TestPagedQuery {
val Driver = "org.apache.phoenix.jdbc.PhoenixDriver"
val ZK_CONN = "ecs2:2181"
def main(args: Array[String]): Unit = {
f1()
}
/**
* 方法1:对主键"ROW"进行排序,每次查询5条记录,查询10次
*/
def f1(): Unit = {
Class.forName(Driver)
val conn = DriverManager.getConnection(s"jdbc:phoenix:${ZK_CONN}")
var query = """ select * from "TEST" order by "ROW" limit 5 """
var pstmt = conn.prepareStatement(query)
var rs = pstmt.executeQuery
var row = "" // 记录下最近一次查询到的记录的ROW的值
// 第一次查询
println("-------------------------------------------")
println("# 1 ")
while (rs.next()) {
println(s"ROW = ${rs.getString("ROW")}, " +
s"C1 = ${rs.getString("C1")}, " +
s"C2 = ${rs.getString("C2")}, " +
s"C3 = ${rs.getString("C3")}")
row = rs.getString("ROW")
}
// 后续的分页查询
for (i <- 1 to 10) {
query =
s""" select * from "TEST" where "ROW" > '${row}'
order by "ROW" limit 5 """
pstmt = conn.prepareStatement(query)
rs = pstmt.executeQuery
println("\\n---------------------------------------------------------------")
println(s"# ${i} ")
while (rs.next()) {
println(s"ROW = ${rs.getString("ROW")}, " +
s"C1 = ${rs.getString("C1")}, " +
s"C2 = ${rs.getString("C2")}, " +
s"C3 = ${rs.getString("C3")}")
row = rs.getString("ROW")
}
}
rs.close
pstmt.close
conn.close
}
}
| TaoXiao/Phoenix | sql/src/main/scala/cn/gridx/phoenix/sql/scala/TestPagedQuery.scala | Scala | apache-2.0 | 2,102 |
package com.github.eerohele.expek
import scala.xml.transform.RewriteRule
import scala.xml.{Elem, NamespaceBinding, Node, TopScope}
/** A container class for functions that refine [[Node]] instances in some way.
*
* "Refine" here means "modify in some way before or after the XSLT transformation.
*/
object NodeRefinery {
/** Add a namespace to a [[Node]].
*
* Example:
*
* {{{
* setNameSpace("urn:foo:bar")(<foo/>)
* }}}
*/
def setNameSpace(uri: String)(node: Node): Node = {
object Modify extends RewriteRule {
val binding = NamespaceBinding(None.orNull, uri, TopScope)
override def transform(n: Node): Seq[Node] = n match {
case e: Elem => e.copy(scope = binding, child = e.child.map(Modify))
case n: Node => n
}
}
Modify(node)
}
}
| eerohele/expek | src/main/scala/com/github/eerohele/expek/NodeRefinery.scala | Scala | mit | 890 |
package texteditor.signals
import javax.swing
import rescala._
class Timer(delay0: Int) {
val peer: swing.Timer = new swing.Timer(delay0, null) {
override def fireActionPerformed(e: java.awt.event.ActionEvent): Unit = { Timer.this.isRunning() = isRunning(); fired.fire() }
}
def this(delay: Int, repeating: Boolean) {
this(delay)
this.repeating = repeating
}
private val isRunning = Var(true)
val running = Signal { isRunning() }
val fired = Evt[Unit]
def delay = peer.getDelay
def delay_=(delay: Int) = peer.setDelay(delay)
def repeating = peer.isRepeats
def repeating_=(repeating: Boolean): Unit = { peer.setRepeats(repeating); isRunning() = peer.isRunning() }
def restart = { peer.restart(); isRunning() = peer.isRunning(); this }
def start = { peer.start(); isRunning() = peer.isRunning(); this }
def stop = { peer.stop(); isRunning() = peer.isRunning(); this }
}
| volkc/REScala | Examples/Editor/src/main/scala/texteditor/signals/Timer.scala | Scala | apache-2.0 | 916 |
/*
Copyright 2014 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.platform
import com.twitter.scalding._
import java.io.{
BufferedInputStream,
BufferedReader,
BufferedWriter,
DataInputStream,
DataOutputStream,
File,
FileInputStream,
FileOutputStream,
FileReader,
FileWriter,
RandomAccessFile
}
import java.nio.channels.FileLock
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.filecache.DistributedCache
import org.apache.hadoop.fs.{ FileSystem, FileUtil, Path }
import org.apache.hadoop.hdfs.MiniDFSCluster
import org.apache.hadoop.mapred.{ JobConf, MiniMRCluster }
import org.slf4j.LoggerFactory
object LocalCluster {
private final val HADOOP_CLASSPATH_DIR = new Path("/tmp/hadoop-classpath-lib")
private final val MUTEX = new RandomAccessFile("NOTICE", "rw").getChannel
def apply() = new LocalCluster()
}
class LocalCluster(mutex: Boolean = true) {
private val LOG = LoggerFactory.getLogger(getClass)
private var hadoop: Option[(MiniDFSCluster, MiniMRCluster, JobConf)] = None
private def getHadoop = hadoop.getOrElse(throw new Exception("Hadoop has not been initialized"))
private def dfs = getHadoop._1
private def cluster = getHadoop._2
private def jobConf = getHadoop._3
private def fileSystem = dfs.getFileSystem
private var classpath = Set[File]()
private var lock: Option[FileLock] = None
// The Mini{DFS,MR}Cluster does not make it easy or clean to have two different processes
// running without colliding. Thus we implement our own mutex. Mkdir should be atomic so
// there should be no race. Just to be careful, however, we make sure that the file
// is what we expected, or else we fail.
private[this] def acquireMutex() {
LOG.debug("Attempting to acquire mutex")
lock = Some(LocalCluster.MUTEX.lock())
LOG.debug("Mutex file acquired")
}
private[this] def releaseMutex() {
LOG.debug("Releasing mutex")
lock.foreach { _.release() }
LOG.debug("Mutex released")
lock = None
}
def initialize(): this.type = {
if (mutex) {
acquireMutex()
}
if (Option(System.getProperty("hadoop.log.dir")).isEmpty) {
System.setProperty("hadoop.log.dir", "build/test/logs")
}
new File(System.getProperty("hadoop.log.dir")).mkdirs()
val conf = new Configuration
val dfs = new MiniDFSCluster(conf, 4, true, null)
val fileSystem = dfs.getFileSystem
val cluster = new MiniMRCluster(4, fileSystem.getUri.toString, 1, null, null, new JobConf(conf))
val mrJobConf = cluster.createJobConf()
mrJobConf.setInt("mapred.submit.replication", 2)
mrJobConf.set("mapred.map.max.attempts", "2")
mrJobConf.set("mapred.reduce.max.attempts", "2")
mrJobConf.set("mapred.child.java.opts", "-Xmx512m")
mrJobConf.setInt("mapred.job.reuse.jvm.num.tasks", -1)
mrJobConf.setInt("jobclient.completion.poll.interval", 50)
mrJobConf.setInt("jobclient.progress.monitor.poll.interval", 50)
mrJobConf.setInt("ipc.ping.interval", 5000)
mrJobConf.setMapSpeculativeExecution(false)
mrJobConf.setReduceSpeculativeExecution(false)
mrJobConf.set("mapreduce.user.classpath.first", "true")
LOG.debug("Creating directory to store jars on classpath: " + LocalCluster.HADOOP_CLASSPATH_DIR)
fileSystem.mkdirs(LocalCluster.HADOOP_CLASSPATH_DIR)
hadoop = Some(dfs, cluster, mrJobConf)
//TODO I desperately want there to be a better way to do this. I'd love to be able to run ./sbt assembly and depend
// on that, but I couldn't figure out how to make that work.
val baseClassPath = List(
getClass,
classOf[JobConf],
classOf[LoggerFactory],
classOf[scala.ScalaObject],
classOf[com.twitter.scalding.Args],
classOf[org.apache.log4j.LogManager],
classOf[com.twitter.scalding.RichDate],
classOf[cascading.tuple.TupleException],
classOf[com.twitter.chill.Externalizer[_]],
classOf[com.twitter.algebird.Semigroup[_]],
classOf[com.twitter.chill.KryoInstantiator],
classOf[org.jgrapht.ext.EdgeNameProvider[_]],
classOf[org.apache.commons.lang.StringUtils],
classOf[cascading.scheme.local.TextDelimited],
classOf[org.apache.commons.logging.LogFactory],
classOf[org.apache.commons.codec.binary.Base64],
classOf[com.twitter.scalding.IntegralComparator],
classOf[org.apache.commons.collections.Predicate],
classOf[com.esotericsoftware.kryo.KryoSerializable],
classOf[com.twitter.chill.hadoop.KryoSerialization],
classOf[com.twitter.maple.tap.TupleMemoryInputFormat],
classOf[org.apache.commons.configuration.Configuration]).foreach { addClassSourceToClassPath(_) }
this
}
def addClassSourceToClassPath[T](clazz: Class[T]) {
addFileToHadoopClassPath(getFileForClass(clazz))
}
def addFileToHadoopClassPath(resourceDir: File): Boolean =
if (classpath.contains(resourceDir)) {
LOG.debug("Already on Hadoop classpath: " + resourceDir)
false
} else {
LOG.debug("Not yet on Hadoop classpath: " + resourceDir)
val localJarFile = if (resourceDir.isDirectory) MakeJar(resourceDir) else resourceDir
val hdfsJarPath = new Path(LocalCluster.HADOOP_CLASSPATH_DIR, localJarFile.getName)
fileSystem.copyFromLocalFile(new Path("file://%s".format(localJarFile.getAbsolutePath)), hdfsJarPath)
DistributedCache.addFileToClassPath(hdfsJarPath, jobConf, fileSystem)
LOG.debug("Added to Hadoop classpath: " + localJarFile)
classpath += resourceDir
true
}
private def getFileForClass[T](clazz: Class[T]): File =
new File(clazz.getProtectionDomain.getCodeSource.getLocation.toURI)
def mode: Mode = Hdfs(true, jobConf)
def putFile(file: File, location: String): Boolean = {
val hdfsLocation = new Path(location)
val exists = fileSystem.exists(hdfsLocation)
if (!exists) FileUtil.copy(file, fileSystem, hdfsLocation, false, jobConf)
exists
}
//TODO is there a way to know if we need to wait on anything to shut down, etc?
def shutdown() {
hadoop.foreach {
case (dfs, mr, _) =>
dfs.shutdown()
mr.shutdown()
}
hadoop = None
if (mutex) {
releaseMutex()
}
}
}
| lucamilanesio/scalding | scalding-hadoop-test/src/main/scala/com/twitter/scalding/platform/LocalCluster.scala | Scala | apache-2.0 | 6,737 |
package actors
import akka.actor._
import clashcode._
import com.clashcode.web.controllers.Application
import scala.concurrent.duration.FiniteDuration
import java.util.concurrent.TimeUnit
import org.joda.time.{Seconds, DateTime}
import scala.collection.mutable
import play.api.libs.concurrent.Execution.Implicits._
import akka.pattern.{ ask, pipe }
import akka.util.Timeout
import akka.cluster.ClusterEvent._
import play.api.Logger
import akka.cluster.ClusterEvent.MemberRemoved
import scala.Some
import clashcode.Hello
import akka.cluster.ClusterEvent.UnreachableMember
import akka.cluster.ClusterEvent.MemberUp
import akka.actor.Identify
import akka.cluster.ClusterEvent.CurrentClusterState
import clashcode.PrisonerResponse
import clashcode.PrisonerRequest
/**
* Continuously sends game requests to all participants, keeps player high score.
* Keeps list of upcoming tournament pairings
*/
class HostingActor extends Actor {
/** list of players (max 100) */
val players = mutable.Map.empty[String, Player]
/** list of played games (max 5000) */
val games = mutable.Queue.empty[Game]
/** list of currently running games */
val running = mutable.Queue.empty[Game]
/** list of upcoming games */
val upcoming = mutable.Queue.empty[Game]
/** timer for running tournament rounds */
context.system.scheduler.schedule(FiniteDuration(1, TimeUnit.SECONDS), FiniteDuration(1, TimeUnit.SECONDS)) {
self ! TournamentTick()
}
def receive = {
case Hello(rawName) => handleHello(sender, rawName, false)
// handle ongoing tournaments
case _ : TournamentTick =>
// lets check the list of running games for timeouts
val now = DateTime.now
running.filter(g => !g.timedOut(now).isEmpty).foreach(timeoutGame => {
running.dequeueFirst(_ == timeoutGame)
// finalize game: other player gets default win point on time out
val timeoutTurns = timeoutGame.timedOut(now)
val maybeWinnerTurn = timeoutGame.turns.diff(timeoutTurns).headOption
val defaultWinnerTurn = maybeWinnerTurn.map(_.copy(points = 1))
val finalGame = Game(timeoutTurns ++ defaultWinnerTurn)
addGame(finalGame)
// handle player timeout
timeoutTurns.foreach(t => {
val response = "Hey " + t.player.name + ", we didn't get your response in time.";
logStatus(response)
t.player.ref ! response
t.player.active = false // remove player from upcoming tournaments
})
})
// no running or upcoming games? start new tournament if we have players
val activePlayers = players.values.toSeq.filter(_.active)
if (running.length == 0 && upcoming.length == 0) {
if (activePlayers.length >= 2)
{
// round robin tournament
val newGames = activePlayers.flatMap(player => {
// create all games where this player is first player (ordered alphabetically)
val opponents = activePlayers.filter(_.name > player.name)
opponents.map(opponent => Game(List(
Turn(player, now, now.plusSeconds(1), None, 0),
Turn(opponent, now, now.plusSeconds(1), None, 0)
)))
})
logStatus("Starting new tournament with " + activePlayers.length + " players, " + newGames.length + " games.")
upcoming.enqueue(newGames : _*)
}
else if (activePlayers.length == 1) {
logStatus("Only one player connected (hello, " + activePlayers.head.name + ")")
}
else {
logStatus("No players connected")
}
}
//logStatus("No players connected x")
// start upcoming games (use clone of upcoming queue, since we're modifying it inside)
List(upcoming : _*).foreach(upcomingGame => {
val runningPlayers = running.flatMap(_.players)
val availablePlayers = activePlayers.diff(runningPlayers)
// if both players for this game available, start the game
if (upcomingGame.players.forall(availablePlayers.contains)) {
upcoming.dequeueFirst(_ == upcomingGame)
val start = DateTime.now
val runningGame = upcomingGame.copy(upcomingGame.turns.map(_.copy(
start = start,
response = start.plusSeconds(1)))) // set timeout to 1 second
running.enqueue(runningGame)
// get player responses
implicit val timeout = Timeout(FiniteDuration(1, TimeUnit.SECONDS)) // needed for `?` below
upcomingGame.players.foreach(player => {
val otherPlayer = upcomingGame.players.filter(_ != player).headOption.getOrElse(player)
(player.ref ? PrisonerRequest(otherPlayer.name)).foreach {
case response : PrisonerResponse => self ! PlayerResponse(player, otherPlayer, response)
case x =>
val response = "Unknown message " + x.toString + " from " + player.name
player.ref ! response
logStatus(response)
}
})
}
})
// still no running games? not enough active players. remove upcoming games, wait for new tournament.
if (running.length == 0) {
upcoming.clear()
}
// update high score list to web socket
Application.push(players.values.toSeq.sortBy(- _.points))
// handle response of a player
case PlayerResponse(player, otherPlayer, response) =>
val now = DateTime.now
player.lastSeen = now
val maybeGame = running.find(g => g.hasPlayers(Seq(player, otherPlayer)))
if (!maybeGame.isDefined)
{
val response = "Sorry " + player.name + ", your response came too late.";
logStatus(response)
player.ref ! response
}
//else logStatus("got response from " + player.name)
// update game
maybeGame.foreach(game => {
running.dequeueFirst(_ == game)
// update points and turn info
val playerTurn = game.turns.find(_.player == player).head
val otherTurn = game.turns.find(_ != playerTurn).head
val (playerPoints, otherPoints) = otherTurn.cooperate.fold((0, 0))(other =>
(getPoints(response.cooperate, other), getPoints(other, response.cooperate)))
val newPlayerTurn = playerTurn.copy(
response = now,
cooperate = Some(response.cooperate),
points = playerPoints)
val newOtherTurn = otherTurn.copy(points = otherPoints)
// handle updated game
val newGame = Game(List(newPlayerTurn, newOtherTurn))
if (otherTurn.cooperate.isDefined)
addGame(newGame) // finalize game
else
running.enqueue(newGame) // game still running, keep waiting for other response
})
case ResetStats =>
// reset all tournament points
games.clear()
players.values.foreach(player => {
player.coop = 1.0
player.games = 0
player.points = 0
})
case state: CurrentClusterState ⇒
Logger.info("Current members: " + state.members.mkString(", "))
case MemberUp(member) ⇒
Logger.info("Member is Up: " + member.address)
// try to discover player using cluster
val playerRef = context.actorFor(member.address + "/user/player")
implicit val timeout = Timeout(FiniteDuration(1, TimeUnit.SECONDS)) // needed for `?` below
(playerRef ? NameRequest).mapTo[Hello].foreach(hello => handleHello(playerRef, hello.name, true))
case UnreachableMember(member) ⇒
Logger.info("Member detected as unreachable: " + member)
case MemberRemoved(member, previousStatus) ⇒
Logger.info("Member is Removed: " + member.address + " after " + previousStatus)
case _: ClusterDomainEvent ⇒ // ignore
case x => // handle unknown messages
val response = "Unknown message " + x.toString + " from " + sender.path.address.host.getOrElse("???")
sender ! response
logStatus(response)
}
/** received players name */
private def handleHello(sender: ActorRef, rawName: String, cluster: Boolean) {
// add player to list
val now = DateTime.now
val name = rawName.take(12) // trim name to 12 chars max
val isNew = !players.contains(name)
val player = players.getOrElseUpdate(name, new Player(name, sender, 0, 0, 0, now, true, 1.0, cluster))
player.lastSeen = now
player.active = true
player.ref = sender // update actor reference
player.cluster = cluster // update whether player was discovered via cluster
// log event
val response = (if (isNew) "Welcome, " else "Hi again, ") +
player.name + " from " + (if (cluster) "Cluster " else "") +
player.ref.path.address.host.getOrElse("???")
logStatus(response)
sender ! response
// remove old players
while (players.size > 100)
{
val lastPlayer = players.values.toSeq.sortBy(p => Seconds.secondsBetween(now, p.lastSeen).getSeconds).last
players -= lastPlayer.name
}
}
/** get points for player cooperation / defect */
private def getPoints(player: Boolean, other: Boolean) = {
if (player && other) 1 // both cooperate
else if (player) -1 // defect other player
else if (other) 2 // other player cooperates, we defect
else 0 // both defect
}
/** handle completed game, update statistics */
private def addGame(game: Game) {
// add game to archive, prune out old games
games.enqueue(game)
while(games.length > 5000) games.dequeue()
// update player statistics
game.players.foreach(player => {
// notify players about result
val myTurn = game.turns.find(_.player == player).getOrElse(game.turns.head)
val otherTurn = game.turns.find(_ != myTurn).getOrElse(game.turns.last)
otherTurn.cooperate.foreach(cooperate =>
player.ref ! PrisonerResult(otherTurn.player.name, cooperate))
// stats
val turns = games.flatMap(_.turns.find(t => t.player == player && t.cooperate.isDefined))
player.games = turns.size
player.points = turns.map(_.points).sum
player.ping = turns.map(t => (t.response.getMillis - t.start.getMillis).toInt).sum / turns.length.max(1)
val cooperations = turns.map(t => if (t.cooperate.getOrElse(true)) 1 else 0).sum
player.coop = cooperations / player.games.max(1).toDouble
})
// send updated game
Application.push(game)
}
var lastStatus = ""
private def logStatus(status: String) {
if (status == lastStatus) return
lastStatus = status
Application.push(status)
}
}
case class TournamentTick()
case class PlayerResponse(player: Player, otherPlayer: Player, response: PrisonerResponse)
class Player(val name: String,
var ref: ActorRef, // actor endpoint for communication with this player
var points: Int, // total score
var games: Int, // number of games completed
var ping: Int, // average response time in ms
var lastSeen: DateTime, // last message from this player
var active: Boolean, // does player answer to requests?
var coop: Double, // how cooperative is this player?
var cluster: Boolean) // is player discovered in cluster?
case class Turn(player: Player,
start: DateTime,
response: DateTime, /** 1 sec after start of turn, or actual response time of player */
cooperate: Option[Boolean], /** true, false, or none for timeout */
points: Int)
case class Game(turns: List[Turn]) {
if (turns.length != 2) throw new IllegalArgumentException("turns")
def timedOut(now: DateTime) : List[Turn] = turns.filter(t => t.cooperate.isEmpty && t.response.isBefore(now))
def hasPlayers(seq: Seq[Player]) : Boolean = seq.forall(players.contains)
lazy val players = turns.map(_.player)
}
case object ResetStats | clashcode/clashcode-wordguess | app/actors/HostingActor.scala | Scala | mit | 11,920 |
package com.twitter.finagle.service
import com.twitter.conversions.time._
import com.twitter.finagle.stats.InMemoryStatsReceiver
import com.twitter.finagle.{FailedFastException, MockTimer, Service, ServiceFactory, SourcedException, Status}
import com.twitter.util._
import java.util.concurrent.atomic.AtomicInteger
import org.junit.runner.RunWith
import org.mockito.Matchers.any
import org.mockito.Mockito.{never, times, verify, when}
import org.scalatest.FunSuite
import org.scalatest.concurrent.{Conductors, IntegrationPatience}
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
import scala.language.reflectiveCalls
@RunWith(classOf[JUnitRunner])
class FailFastFactoryTest extends FunSuite
with MockitoSugar
with Conductors
with IntegrationPatience {
def newCtx() = new {
val timer = new MockTimer
val backoffs = 1.second #:: 2.seconds #:: Stream.empty[Duration]
val service = mock[Service[Int, Int]]
when(service.close(any[Time])).thenReturn(Future.Done)
val underlying = mock[ServiceFactory[Int, Int]]
when(underlying.status).thenReturn(Status.Open)
when(underlying.close(any[Time])).thenReturn(Future.Done)
val stats = new InMemoryStatsReceiver
val failfast = new FailFastFactory(underlying, stats, timer, backoffs)
val p, q, r = new Promise[Service[Int, Int]]
when(underlying()).thenReturn(p)
val pp = failfast()
assert(pp.isDefined === false)
assert(failfast.isAvailable)
assert(timer.tasks.isEmpty)
}
test("pass through whenever everything is fine") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
p() = Return(service)
assert(pp.poll === Some(Return(service)))
}
}
test("failure") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
p() = Throw(new Exception)
verify(underlying).apply()
assert(!failfast.isAvailable)
assert(stats.counters.get(Seq("marked_dead")) === Some(1))
}
}
test("time out according to backoffs") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
p() = Throw(new Exception)
assert(timer.tasks.size === 1)
tc.set(timer.tasks(0).when)
timer.tick()
verify(underlying, times(2)).apply()
assert(!failfast.isAvailable)
}
}
test("become available again if the next attempt succeeds") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
p() = Throw(new Exception)
tc.set(timer.tasks(0).when)
when(underlying()).thenReturn(q)
verify(underlying).apply()
timer.tick()
verify(underlying, times(2)).apply()
assert(timer.tasks.isEmpty)
q() = Return(service)
assert(timer.tasks.isEmpty)
assert(failfast.isAvailable)
assert(stats.counters.get(Seq("marked_available")) === Some(1))
}
}
test("is Busy when failing; done when revived") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
assert(failfast.status === Status.Open)
p() = Throw(new Exception)
assert(failfast.status == Status.Busy)
tc.set(timer.tasks(0).when)
when(underlying()).thenReturn(Future.value(service))
timer.tick()
assert(failfast.status === Status.Open)
}
}
test("refuse external attempts") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
p() = Throw(new Exception)
assert {
failfast().poll match {
case Some(Throw(_: FailedFastException)) => true
case _ => false
}
}
verify(underlying).apply() // nothing new
}
}
test("admit external attempts when available again") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
p() = Throw(new Exception)
tc.set(timer.tasks(0).when)
verify(underlying).apply()
when(underlying()).thenReturn(q)
timer.tick()
verify(underlying, times(2)).apply()
q() = Return(service)
when(underlying()).thenReturn(r)
assert(failfast().poll === None)
r() = Return(service)
assert {
failfast().poll match {
case Some(Return(s)) => s eq service
case _ => false
}
}
}
}
test("cancels timer on close") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
p() = Throw(new Exception)
assert(timer.tasks.size === 1)
assert(!failfast.isAvailable)
verify(underlying, never()).close()
failfast.close()
verify(underlying).close()
assert(timer.tasks.isEmpty)
assert(failfast.status === underlying.status)
val status = underlying.status match {
case Status.Open => Status.Closed
case Status.Closed => Status.Open
case status => fail(s"bad status $status")
}
when(underlying.status).thenReturn(status)
assert(failfast.status === underlying.status)
}
}
test("fails simultaneous requests properly") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
val pp2 = failfast()
val e = new Exception
p() = Throw(e)
assert(pp.poll === Some(Throw(e)))
assert(pp2.poll === Some(Throw(e)))
val ffe = intercept[FailedFastException] {
failfast().poll.get.get
}
assert(ffe.getMessage().contains("twitter.github.io/finagle/guide/FAQ.html"))
}
}
test("maintains separate exception state in separate threads") {
Time.withCurrentTimeFrozen { tc =>
val conductor = new Conductor
import conductor._
val threadCompletionCount = new AtomicInteger(0)
thread("threadOne") {
val ctx = newCtx()
ctx.p() = Throw(new Exception)
ctx.failfast().poll match {
case Some(Throw(ex: FailedFastException)) => {
ex.serviceName = "threadOne"
assert(beat === 0)
}
case _ => throw new Exception
}
threadCompletionCount.incrementAndGet()
}
thread("threadTwo") {
waitForBeat(1)
val ctx = newCtx()
ctx.p() = Throw(new Exception)
ctx.failfast().poll match {
case Some(Throw(ex: FailedFastException)) => {
assert(ex.serviceName === SourcedException.UnspecifiedServiceName)
}
case _ => throw new Exception
}
threadCompletionCount.incrementAndGet()
}
whenFinished {
assert(threadCompletionCount.get === 2)
}
}
}
test("accepts empty backoff stream") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
val failfast = new FailFastFactory(underlying, stats, timer, Stream.empty)
failfast()
}
}
}
| travisbrown/finagle | finagle-core/src/test/scala/com/twitter/finagle/service/FailFastFactoryTest.scala | Scala | apache-2.0 | 6,859 |
import com.clearspring.analytics.stream.cardinality.HyperLogLog
import org.apache.spark.graphx._
import scala.collection.mutable._
import scala.reflect.ClassTag
/** HyperANF algorithm. */
object HyperAnf {
/**
* Compute the Approximation of the Neighbourhood Function of a graph
* The Neighbourhood Function N(t) counts the number of pairs of nodes such that each one is reachable from other in less than t hops
* See "Boldi P, Rosa M, Vigna S. HyperANF: Approximating the neighbourhood function of very large graphs on a budget[C]//Proceedings of the 20th international conference on World wide web. ACM, 2011: 625-634."
*
* @tparam VD the vertex attribute type (discarded in the computation)
* @tparam ED the edge attribute type (discarded in the computation)
*
* @param graph the graph for which to compute the neighbourhood function
* @param log2m number of registers, larger leads to better estimate but much costly
*
* @return a list of number that its t-th element counts the approximate number of N(t)
*/
def anf[VD: ClassTag, ED: ClassTag](graph: Graph[VD, ED], log2m: Int): MutableList[Double] = {
// Initial the graph, assign a counter to each vertex that contains the vertex id only
var anfGraph = graph.mapVertices { case (vid, _) =>
val counter = new HyperLogLog(log2m)
counter.offer(vid)
counter
}
// Send counter to neighbourhood
def sendMessage(edge: EdgeTriplet[HyperLogLog, ED]) = {
Iterator((edge.srcId, edge.dstAttr), (edge.dstId, edge.srcAttr))
}
// Merge two counters of different neighbourhood
def mergeMessage(msg1: HyperLogLog, msg2: HyperLogLog): HyperLogLog = {
mergeCounter(msg1, msg2)
}
// Merge the counter from neighbourhood to the counter of the vertex
def vertexProgram(vid: VertexId, attr: HyperLogLog, msg: HyperLogLog): HyperLogLog = {
mergeCounter(attr, msg)
}
// Initial message and neighbourhood function list
val initialMessage = new HyperLogLog(log2m)
val neighbourhoodFunction = new MutableList[Double]
// It should be the approximation of number vertices of the graph
var anf = anfGraph.vertices.map{ case(vid, counter) => counter.cardinality() }.reduce(_ + _)
neighbourhoodFunction += anf
// Start iteration like pregel
var prevG: Graph[HyperLogLog, ED] = null
var messages = anfGraph.mapReduceTriplets(sendMessage, mergeMessage)
var lastAnf = 0.0
// Loop until the neighbourhood function list does not increase
while(neighbourhoodFunction.isEmpty || lastAnf != anf) {
lastAnf = anf
// Update vertices
val newVerts = anfGraph.vertices.innerJoin(messages)(vertexProgram).cache()
prevG = anfGraph
anfGraph = anfGraph.outerJoinVertices(newVerts) { (vid, old, newOpt) => newOpt.getOrElse(old) }
anfGraph.cache()
anf = anfGraph.vertices.map{ case(vid, counter) => counter.cardinality() }.reduce(_ + _)
neighbourhoodFunction += anf
// Propagation
val oldMessages = messages
messages = anfGraph.mapReduceTriplets(sendMessage, mergeMessage, Some((newVerts, EdgeDirection.Either))).cache()
messages.count()
// Unpersist
oldMessages.unpersist(blocking=false)
newVerts.unpersist(blocking=false)
prevG.unpersistVertices(blocking=false)
prevG.edges.unpersist(blocking=false)
}
neighbourhoodFunction
}
/**
* Calculate the average distance of a graph
* @param anf Approximate neighbourhood function
* @return Average distance
*/
def avgDistance(anf: MutableList[Double]): Double = {
val maxANF = anf.max
val cdf = anf.map(x => x / maxANF)
val size = anf.size
val pdf = cdf.takeRight(size-1) zip cdf.take(size-1) map {case (next, cur) => (next - cur) / (1 - cdf(0)) }
1 to size zip pdf map {case (distance, p) => distance * p} sum
}
/**
* Merge two counters
* @param hll1 HyperLogLog counter
* @param hll2 Another HyperLogLog counter
* @return Merged conter contains elements of two input counters
*/
def mergeCounter(hll1: HyperLogLog, hll2: HyperLogLog): HyperLogLog = {
hll1.merge(hll2) match {
case hll: HyperLogLog => hll
case _ => throw new ClassCastException
}
}
}
| alcaid1801/Erdos | src/main/scala/HyperANF.scala | Scala | gpl-3.0 | 4,282 |
package chat
import akka.actor.Actor
import akka.actor.Props
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator.{Publish, Subscribe}
object ChatClient {
def props(name: String): Props = Props(classOf[ChatClient], name)
case class Publish(msg: String)
case class Message(from: String, text: String)
}
class ChatClient(name: String) extends Actor {
val mediator = DistributedPubSub(context.system).mediator
val topic = "chatroom"
mediator ! Subscribe(topic, self)
println(s"$name joined chat room")
def receive = {
case ChatClient.Publish(msg) =>
mediator ! Publish(topic, ChatClient.Message(name, msg))
case ChatClient.Message(from, text) =>
val direction = if (sender == self) ">>>>" else s"<< $from:"
println(s"$name $direction $text")
}
} | typesafehub/activator-akka-clustering | src/main/scala/chat/ChatClient.scala | Scala | cc0-1.0 | 838 |
package org.edla.tmdb.api
//import acyclic.file
import java.nio.file.Path
import scala.concurrent.Future
import Protocol.{AuthenticateResult, Configuration, Credits, Movie, Results}
import akka.stream.IOResult
trait TmdbApi {
def getMovie(id: Long): Future[Movie]
def getCredits(id: Long): Future[Credits]
def getConfiguration(): Future[Configuration]
def getToken(): Future[AuthenticateResult]
def searchMovie(query: String, page: Int): Future[Results]
def downloadPoster(movie: Movie, path: Path): Option[Future[IOResult]]
def shutdown(): Unit
}
| newca12/TMDb-async-client | src/main/scala/org/edla/tmdb/api/TmdbApi.scala | Scala | gpl-3.0 | 565 |
package org.jetbrains.plugins.scala.lang.psi.types.api
import com.intellij.psi.{PsiClass, PsiNamedElement, PsiPackage}
import org.apache.commons.lang.StringEscapeUtils
import org.jetbrains.plugins.scala.extensions.{PsiClassExt, PsiNamedElementExt, childOf}
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScRefinement
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScTypeAliasDeclaration, ScTypeAliasDefinition}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScMember, ScObject}
import org.jetbrains.plugins.scala.lang.psi.light.scala.ScLightTypeAliasDefinition
import org.jetbrains.plugins.scala.lang.psi.types.{ScType, ScTypeExt}
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaNamesUtil
/**
* @author adkozlov
*/
trait TypePresentation {
final def presentableText(`type`: ScType, withPrefix: Boolean = true): String = typeText(`type`, {
case c: PsiClass if withPrefix => ScalaPsiUtil.nameWithPrefixIfNeeded(c)
case e => e.name
}, {
case o: ScObject if Set("scala.Predef", "scala").contains(o.qualifiedName) => ""
case _: PsiPackage => ""
case c: PsiClass => ScalaPsiUtil.nameWithPrefixIfNeeded(c) + "."
case e => e.name + "."
}
)
final def urlText(`type`: ScType): String = {
def nameFun(e: PsiNamedElement, withPoint: Boolean): String = {
e match {
case o: ScObject if withPoint && o.qualifiedName == "scala.Predef" => ""
case e: PsiClass => "<a href=\\"psi_element://" + e.qualifiedName + "\\"><code>" +
StringEscapeUtils.escapeHtml(e.name) +
"</code></a>" + (if (withPoint) "." else "")
case _: PsiPackage if withPoint => ""
case _ => StringEscapeUtils.escapeHtml(e.name) + "."
}
}
typeText(`type`, nameFun(_, withPoint = false), nameFun(_, withPoint = true))
}
final def canonicalText(`type`: ScType): String = {
def removeKeywords(s: String): String =
ScalaNamesUtil.escapeKeywordsFqn(s)
def nameFun(e: PsiNamedElement, withPoint: Boolean): String = {
removeKeywords(e match {
case c: PsiClass =>
val qname = c.qualifiedName
if (qname != null && qname != c.name /* exlude default package*/ ) "_root_." + qname else c.name
case p: PsiPackage => "_root_." + p.getQualifiedName
case _ =>
ScalaPsiUtil.nameContext(e) match {
case m: ScMember =>
m.containingClass match {
case o: ScObject => nameFun(o, withPoint = true) + e.name
case _ => e.name
}
case _ => e.name
}
}) + (if (withPoint) "." else "")
}
typeText(`type`, nameFun(_, withPoint = false), nameFun(_, withPoint = true))
}
protected def typeText(`type`: ScType,
nameFun: PsiNamedElement => String,
nameWithPointFun: PsiNamedElement => String): String
}
object ScTypePresentation {
val ABSTRACT_TYPE_POSTFIX = "_"
def different(t1: ScType, t2: ScType): (String, String) = {
val (p1, p2) = (t1.presentableText, t2.presentableText)
if (p1 != p2) (p1, p2)
else (t1.canonicalText.replace("_root_.", ""), t2.canonicalText.replace("_root_.", ""))
}
def shouldExpand(ta: ScTypeAliasDefinition): Boolean = ta match {
case _: ScLightTypeAliasDefinition | childOf(_, _: ScRefinement) => true
case _ =>
ScalaPsiUtil.superTypeMembers(ta).exists(_.isInstanceOf[ScTypeAliasDeclaration])
}
def withoutAliases(`type`: ScType): String = {
`type`.removeAliasDefinitions(expandableOnly = true).presentableText
}
}
case class ScTypeText(tp: ScType) {
val canonicalText: String = tp.canonicalText
val presentableText: String = tp.presentableText
}
| triplequote/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/types/api/ScTypePresentation.scala | Scala | apache-2.0 | 3,815 |
package kartograffel.server.infrastructure.doobie.statements
import kartograffel.server.infrastructure.doobie.DbSpecification._
import kartograffel.server.ArbitraryInstances._
import kartograffel.server.domain.model.{GraffelId, Radius, Tag}
import kartograffel.shared.domain.model.Position
import org.scalatest.funsuite.AnyFunSuite
class TagStatementsTest extends AnyFunSuite {
test("find") {
check(TagStatements.find(sampleOf[String], sampleOf[GraffelId]))
}
test("findTagsByPosition") {
check(TagStatements.findTagsByPosition(sampleOf[Position], sampleOf[Radius]))
}
test("create") {
check(TagStatements.create(sampleOf[Tag]))
}
}
| fthomas/kartograffel | modules/server/jvm/src/test/scala/kartograffel/server/infrastructure/doobie/statements/TagStatementsTest.scala | Scala | apache-2.0 | 660 |
package xyz.discretezoo.web.db.model
import xyz.discretezoo.web.Parameter
import xyz.discretezoo.web.db.Property
abstract class Columns {
protected def columnsIndex: Seq[Property]
protected def columnsBool: Seq[Property]
protected def columnsInt: Seq[Property]
protected def columnsString: Seq[Property]
protected def transformParameter(p: Parameter): Parameter
def isValidFilterColumnName(s: String): Boolean
def all: Seq[Property] = columnsIndex ++ columnsBool ++ columnsInt ++ columnsString
def getColumnList: String = all.map(p => s""""${p.name}"""").mkString(", ")
def isValidIndexColumnName(s: String): Boolean = columnsIndex.map(_.name).contains(s)
def isValidBoolColumnName(s: String): Boolean = columnsBool.map(_.name).contains(s)
def isValidIntColumnName(s: String): Boolean = columnsInt.map(_.name).contains(s)
def isValidStringColumnName(s: String): Boolean = columnsString.map(_.name).contains(s)
def isValidQueryFilter(p: Parameter): Boolean = {
val t = transformParameter(p)
(isValidBoolColumnName(t.name) && isBoolValue(t.value)) ||
(isValidIntColumnName(t.name) && isNumericCondition(t.value))
}
// assumes valid conditions
def queryCondition(p: Parameter): String = {
val t = transformParameter(p)
val escapedColumnName = s""""${t.name}""""
if (isValidBoolColumnName(t.name))
(if (p.value == "false") "NOT " else "") + escapedColumnName
else escapedColumnName + p.value.filter(_ > ' ')
}
def isBoolValue(s: String): Boolean = s == "true" || s == "false"
def isNumericCondition(s: String): Boolean = {
Seq("=", "<=", ">=", "<", ">", "<>", "!=").exists(op => {
if (s.filter(_ > ' ').startsWith(op)) s.filter(_ > ' ').substring(op.length).forall(_.isDigit)
else false
})
}
}
| DiscreteZOO/DiscreteZOO-web | src/main/scala/xyz/discretezoo/web/db/model/Columns.scala | Scala | mit | 1,797 |
package dao
import util.Page
import models.TQueryable
/**
* Created by hooxin on 14-3-7.
*/
trait BaseDao[M, L,Q<:TQueryable] {
// case class MaybeFilter[X, Y](val query: Query[X, Y]) {
// def filter[T, R: CanBeQueryCondition](data: Option[T])(f: T => X => R) = {
// data.map(v => MaybeFilter(query.filter(f(v)))).getOrElse(this)
// }
// }
/**
* 插入
* @param m 实体
* @return 插入后的实体
*/
def insert(m: M): M
/**
* 修改
* @param m 实体
* @return
*/
def update(m: M): Unit
/**
* 删除
* @param m 实体
* @return
*/
def delete(m: M): Unit
/**
* 通过主键删除
* @param id 主键
* @return
*/
def deleteById(id: L): Unit
/**
* 通过主键获取单个实体
* @param id 主键
* @return 实体
*/
def getById(id: L): Option[M]
/**
* 分页总数查询
* @param params 分页查询条件
* @return 结果数
*/
def count(params : Q ): Int
/**
* 非分页查询
* @param params 查询条件
* @return 结果列表
*/
def list(params: Q): List[M]
/**
* 分页查询
* @param pageno 页码
* @param pagesize 每页显示数
* @param params 分页查询条件
* @param sort 排序字段
* @param dir 升降序
* @return 分页结果
*/
def page(pageno: Int, pagesize: Int, params: Q, sort: String, dir: String): Page[M]
}
| firefoxmmx2/techsupport_ext4_scala | app/dao/BaseDao.scala | Scala | apache-2.0 | 1,421 |
/*
* Ported from https://github.com/junit-team/junit
*/
package org.junit
import org.junit.internal.InexactComparisonCriteria
import org.junit.internal.ExactComparisonCriteria
import org.hamcrest.Matcher
import org.hamcrest.MatcherAssert
object Assert {
def assertTrue(message: String, condition: Boolean): Unit = {
if (!condition)
fail(message)
}
def assertTrue(condition: Boolean): Unit =
assertTrue(null, condition)
def assertFalse(message: String, condition: Boolean): Unit =
assertTrue(message, !condition)
def assertFalse(condition: Boolean): Unit =
assertFalse(null, condition)
def fail(message: String): Unit =
if (message eq null) throw new AssertionError()
else throw new AssertionError(message)
def fail(): Unit =
fail(null)
def assertEquals(message: String, expected: Any, actual: Any): Unit = {
if (!equalsRegardingNull(expected, actual)) {
(expected, actual) match {
case (expectedString: String, actualString: String) =>
val cleanMsg: String = if (message == null) "" else message
throw new ComparisonFailure(cleanMsg, expectedString, actualString)
case _ =>
failNotEquals(message, expected, actual)
}
}
}
@inline
private def equalsRegardingNull(expected: Any, actual: Any): Boolean =
if (expected == null) actual == null
else isEquals(expected, actual)
@inline
private def isEquals(expected: Any, actual: Any): Boolean =
expected.equals(actual)
def assertEquals(expected: Any, actual: Any): Unit =
assertEquals(null, expected, actual)
def assertNotEquals(message: String, unexpected: AnyRef,
actual: AnyRef): Unit = {
if (equalsRegardingNull(unexpected, actual))
failEquals(message, actual)
}
def assertNotEquals(unexpected: AnyRef, actual: AnyRef): Unit =
assertNotEquals(null, unexpected, actual)
private def failEquals(message: String, actual: Any): Unit = {
val checkedMessage = {
if (message != null) message
else "Values should be different"
}
fail(s"$checkedMessage. Actual: $actual")
}
def assertNotEquals(message: String, unexpected: Long, actual: Long): Unit = {
if (unexpected == actual)
failEquals(message, actual)
}
def assertNotEquals(unexpected: Long, actual: Long): Unit =
assertNotEquals(null, unexpected, actual)
def assertNotEquals(message: String, unexpected: Double, actual: Double,
delta: Double): Unit = {
if (!doubleIsDifferent(unexpected, actual, delta))
failEquals(message, actual)
}
def assertNotEquals(unexpected: Double, actual: Double, delta: Double): Unit =
assertNotEquals(null, unexpected, actual, delta)
def assertNotEquals(unexpected: Float, actual: Float, delta: Float): Unit =
assertNotEquals(null, unexpected, actual, delta)
def assertArrayEquals(message: String, expecteds: Array[AnyRef],
actuals: Array[AnyRef]): Unit = {
internalArrayEquals(message, expecteds, actuals)
}
def assertArrayEquals(expecteds: Array[AnyRef],
actuals: Array[AnyRef]): Unit = {
assertArrayEquals(null, expecteds, actuals)
}
def assertArrayEquals(message: String, expecteds: Array[Boolean],
actuals: Array[Boolean]): Unit = {
internalArrayEquals(message, expecteds, actuals)
}
def assertArrayEquals(expecteds: Array[Boolean],
actuals: Array[Boolean]): Unit = {
assertArrayEquals(null, expecteds, actuals)
}
def assertArrayEquals(message: String, expecteds: Array[Byte],
actuals: Array[Byte]): Unit = {
internalArrayEquals(message, expecteds, actuals)
}
def assertArrayEquals(expecteds: Array[Byte], actuals: Array[Byte]): Unit =
assertArrayEquals(null, expecteds, actuals)
def assertArrayEquals(message: String, expecteds: Array[Char],
actuals: Array[Char]): Unit = {
internalArrayEquals(message, expecteds, actuals)
}
def assertArrayEquals(expecteds: Array[Char], actuals: Array[Char]): Unit =
assertArrayEquals(null, expecteds, actuals)
def assertArrayEquals(message: String, expecteds: Array[Short],
actuals: Array[Short]): Unit = {
internalArrayEquals(message, expecteds, actuals)
}
def assertArrayEquals(expecteds: Array[Short],
actuals: Array[Short]): Unit = {
assertArrayEquals(null, expecteds, actuals)
}
def assertArrayEquals(message: String, expecteds: Array[Int],
actuals: Array[Int]): Unit = {
internalArrayEquals(message, expecteds, actuals)
}
def assertArrayEquals(expecteds: Array[Int], actuals: Array[Int]): Unit =
assertArrayEquals(null, expecteds, actuals)
def assertArrayEquals(message: String, expecteds: Array[Long],
actuals: Array[Long]): Unit = {
internalArrayEquals(message, expecteds, actuals)
}
def assertArrayEquals(expecteds: Array[Long], actuals: Array[Long]): Unit =
assertArrayEquals(null, expecteds, actuals)
def assertArrayEquals(message: String, expecteds: Array[Double],
actuals: Array[Double], delta: Double): Unit = {
new InexactComparisonCriteria(delta).arrayEquals(message, expecteds, actuals)
}
def assertArrayEquals(expecteds: Array[Double], actuals: Array[Double],
delta: Double): Unit = {
assertArrayEquals(null, expecteds, actuals, delta)
}
def assertArrayEquals(message: String, expecteds: Array[Float],
actuals: Array[Float], delta: Float): Unit = {
new InexactComparisonCriteria(delta).arrayEquals(message, expecteds, actuals)
}
def assertArrayEquals(expecteds: Array[Float], actuals: Array[Float],
delta: Float): Unit = {
assertArrayEquals(null, expecteds, actuals, delta)
}
private def internalArrayEquals(message: String, expecteds: AnyRef,
actuals: AnyRef): Unit = {
new ExactComparisonCriteria().arrayEquals(message, expecteds, actuals)
}
def assertEquals(message: String, expected: Double, actual: Double,
delta: Double): Unit = {
if (doubleIsDifferent(expected, actual, delta)) {
failNotEquals(message, expected, actual)
}
}
def assertEquals(message: String, expected: Float, actual: Float,
delta: Float): Unit = {
if (floatIsDifferent(expected, actual, delta)) {
failNotEquals(message, expected, actual)
}
}
def assertNotEquals(message: String, unexpected: Float, actual: Float,
delta: Float): Unit = {
if (!floatIsDifferent(unexpected, actual, delta))
failEquals(message, actual)
}
private def doubleIsDifferent(d1: Double, d2: Double,
delta: Double): Boolean = {
java.lang.Double.compare(d1, d2) != 0 && Math.abs(d1 - d2) > delta
}
private def floatIsDifferent(f1: Float, f2: Float, delta: Float): Boolean =
java.lang.Float.compare(f1, f2) != 0 && Math.abs(f1 - f2) > delta
def assertEquals(expected: Double, actual: Double, delta: Double): Unit =
assertEquals(null, expected, actual, delta)
def assertEquals(expected: Float, actual: Float, delta: Float): Unit =
assertEquals(null, expected, actual, delta)
def assertNotNull(message: String, obj: AnyRef): Unit =
assertTrue(message, obj != null)
def assertNotNull(obj: AnyRef): Unit =
assertNotNull(null, obj)
def assertNull(message: String, obj: AnyRef): Unit = {
if (obj != null)
failNotNull(message, obj)
}
def assertNull(obj: AnyRef): Unit =
assertNull(null, obj)
private def failNotNull(message: String, actual: AnyRef): Unit = {
val formatted = if (message != null) message + " " else ""
fail(s"${formatted}expected null, but was:<$actual}>")
}
def assertSame(message: String, expected: AnyRef, actual: AnyRef): Unit = {
if (expected ne actual)
failNotSame(message, expected, actual)
}
def assertSame(expected: AnyRef, actual: AnyRef): Unit =
assertSame(null, expected, actual)
def assertNotSame(message: String, unexpected: AnyRef, actual: AnyRef): Unit = {
if (unexpected eq actual)
failSame(message)
}
def assertNotSame(unexpected: AnyRef, actual: AnyRef): Unit =
assertNotSame(null, unexpected, actual)
private def failSame(message: String): Unit = {
if (message == null)
fail("expected not same")
else
fail(s"$message expected not same")
}
private def failNotSame(message: String, expected: AnyRef,
actual: AnyRef): Unit = {
if (message == null)
fail(s"expected same:<$expected> was not:<$actual>")
else
fail(s"$message expected same:<$expected> was not:<$actual>")
}
@inline
private def failNotEquals(message: String, expected: Any, actual: Any): Unit =
fail(format(message, expected, actual))
private[junit] def format(message: String, expected: Any, actual: Any): String = {
val formatted = if (message != null && message != "") message + " " else ""
val expectedString = String.valueOf(expected)
val actualString = String.valueOf(actual)
if (expectedString == actualString) {
val expectedFormatted = formatClassAndValue(expected, expectedString)
val actualFormatted = formatClassAndValue(actual, actualString)
s"${formatted}expected: $expectedFormatted but was: $actualFormatted"
} else {
s"${formatted}expected:<$expectedString> but was:<$actualString>"
}
}
private def formatClassAndValue(value: Any, valueString: String): String = {
val className = if (value == null) "null" else value.getClass.getName
s"$className<$valueString>"
}
def assertThat[T](actual: T, matcher: Matcher[T]): Unit =
assertThat("", actual, matcher)
def assertThat[T](reason: String, actual: T, matcher: Matcher[T]): Unit =
MatcherAssert.assertThat(reason, actual, matcher)
// The following methods will be available on JUnit 4.13, a backport implementation
// is being tested in JUnitAssertionTest until 4.13 is released.
/*
def assertThrows(expectedThrowable: Class[_ <: Throwable],
runnable: ThrowingRunnable): Unit = {
expectThrows(expectedThrowable, runnable)
}
def expectThrows[T <: Throwable](expectedThrowable: Class[T], runnable: ThrowingRunnable): T = {
try {
runnable.run()
val message =
s"expected ${expectedThrowable.getSimpleName} to be thrown," +
" but nothing was thrown"
throw new AssertionError(message)
} catch {
case actualThrown: Throwable =>
if (expectedThrowable.isInstance(actualThrown)) {
actualThrown.asInstanceOf[T]
} else {
val mismatchMessage = format("unexpected exception type thrown;",
expectedThrowable.getSimpleName, actualThrown.getClass.getSimpleName)
val assertionError = new AssertionError(mismatchMessage)
assertionError.initCause(actualThrown)
throw assertionError
}
}
}
trait ThrowingRunnable {
def run(): Unit
}
*/
}
| mdedetrich/scala-js | junit-runtime/src/main/scala/org/junit/Assert.scala | Scala | bsd-3-clause | 10,836 |
import com.twitter.scalding._
import com.twitter.scalding.mathematics.Matrix
/*
* MatrixTutorial1.scala
*
* Loads a directed graph adjacency matrix where a[i,j] = 1 if there is an edge from a[i] to b[j]
* and compute the co-follows between any two nodes
*
yarn jar target/scalding-tutorial-0.14.0.jar MatrixTutorial1 --local\\
--input data/graph.tsv --output target/data/cofollows.tsv
*
*/
class MatrixTutorial1(args : Args) extends Job(args) {
import Matrix._
val adjacencyMatrix = Tsv( args("input"), ('user1, 'user2, 'rel) )
.read
.toMatrix[Long,Long,Double]('user1, 'user2, 'rel)
// compute the innerproduct of the adjacency matrix with itself
(adjacencyMatrix * adjacencyMatrix.transpose).write( Tsv( args("output") ) )
}
| Cascading/scalding-tutorial | src/main/scala/tutorial/MatrixTutorial1.scala | Scala | apache-2.0 | 761 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package controllers
import play.api.mvc._
import views._
class Application(controllerComponents: ControllerComponents) extends AbstractController(controllerComponents) {
def index = Action {
Ok(html.index())
}
}
| rcavalcanti/lagom | dev/sbt-plugin/src/sbt-test/sbt-plugin/run-all-scaladsl/p/app/controllers/Application.scala | Scala | apache-2.0 | 300 |
package com.featurefm.io.customer
import nl.grons.metrics.scala.{Timer => ScalaTimer}
/**
* Created by yardena on 1/21/16.
*/
trait Timers {
def identify: ScalaTimer
def track: ScalaTimer
def delete: ScalaTimer
def health: ScalaTimer
}
| ListnPlay/Kastomer | src/main/scala/com/featurefm/io/customer/Timers.scala | Scala | mit | 257 |
/*
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.extensions.iterativebatch.compiler
package graph
import com.asakusafw.bridge.hadoop.temporary.TemporaryFileOutputFormat
import scala.collection.JavaConversions._
import org.objectweb.asm.Type
import com.asakusafw.lang.compiler.extension.directio.DirectFileIoModels
import com.asakusafw.lang.compiler.model.graph.ExternalOutput
import com.asakusafw.lang.compiler.planning.SubPlan
import com.asakusafw.spark.compiler.`package`._
import com.asakusafw.spark.compiler.planning.SubPlanInfo
import com.asakusafw.spark.compiler.spi.NodeCompiler
import com.asakusafw.spark.compiler.graph.{
Instantiator,
TemporaryOutputClassBuilder,
TemporaryOutputInstantiator
}
import com.asakusafw.spark.extensions.iterativebatch.compiler.spi.RoundAwareNodeCompiler
class TemporaryOutputCompiler extends RoundAwareNodeCompiler {
override def support(
subplan: SubPlan)(
implicit context: NodeCompiler.Context): Boolean = {
val subPlanInfo = subplan.getAttribute(classOf[SubPlanInfo])
val primaryOperator = subPlanInfo.getPrimaryOperator
if (primaryOperator.isInstanceOf[ExternalOutput]) {
val operator = primaryOperator.asInstanceOf[ExternalOutput]
if (context.options.useOutputDirect) {
Option(operator.getInfo).map { info =>
!DirectFileIoModels.isSupported(info)
}.getOrElse(true)
} else {
true
}
} else {
false
}
}
override def instantiator: Instantiator = TemporaryOutputInstantiator
override def compile(
subplan: SubPlan)(
implicit context: NodeCompiler.Context): Type = {
assert(support(subplan), s"The subplan is not supported: ${subplan}")
val subPlanInfo = subplan.getAttribute(classOf[SubPlanInfo])
val primaryOperator = subPlanInfo.getPrimaryOperator
assert(primaryOperator.isInstanceOf[ExternalOutput],
s"The primary operator should be external output: ${primaryOperator}")
val operator = primaryOperator.asInstanceOf[ExternalOutput]
context.addExternalOutput(
operator.getName, operator.getInfo,
Seq(context.options.getRuntimeWorkingPath(
s"${operator.getName}/*/${TemporaryFileOutputFormat.DEFAULT_FILE_NAME}-*")))
val builder =
new TemporaryOutputClassBuilder(
operator)(
subplan.label)
context.addClass(builder)
}
}
| asakusafw/asakusafw-spark | extensions/iterativebatch/compiler/core/src/main/scala/com/asakusafw/spark/extensions/iterativebatch/compiler/graph/TemporaryOutputCompiler.scala | Scala | apache-2.0 | 2,960 |
package ore.discourse
import scala.language.higherKinds
trait DiscourseApi[F[_]] {
/**
* Creates a new topic as the specified poster.
*
* @param poster Poster username
* @param title Topic title
* @param content Topic raw content
* @param categoryId Optional category id
* @return New topic or list of errors
*/
def createTopic(
poster: String,
title: String,
content: String,
categoryId: Option[Int]
): F[Either[DiscourseError, DiscoursePost]]
/**
* Creates a new post as the specified user.
*
* @param poster User to post as
* @param topicId Topic ID
* @param content Raw content
* @return New post or list of errors
*/
def createPost(poster: String, topicId: Int, content: String): F[Either[DiscourseError, DiscoursePost]]
/**
* Updates a topic as the specified user.
*
* @param poster Username to update as
* @param topicId Topic ID
* @param title Optional new topic title
* @param categoryId Optional new category ID
* @return List of errors
*/
def updateTopic(
poster: String,
topicId: Int,
title: Option[String],
categoryId: Option[Int]
): F[Either[DiscourseError, Unit]]
/**
* Updates a post as the specified user.
*
* @param poster User to update as
* @param postId Post ID
* @param content Raw content
* @return List of errors
*/
def updatePost(poster: String, postId: Int, content: String): F[Either[DiscourseError, Unit]]
/**
* Deletes the specified topic.
*
* @param poster User to delete as
* @param topicId Topic ID
*/
def deleteTopic(poster: String, topicId: Int): F[Either[DiscourseError, Unit]]
// Utils
/**
* Returns true if the Discourse instance is available.
*
* @return True if available
*/
def isAvailable: F[Boolean]
}
| SpongePowered/Ore | discourse/src/main/scala/ore/discourse/DiscourseApi.scala | Scala | mit | 1,968 |
/*
Copyright 2011 the original author or authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package net.gumbix.bioinf.string.alignment
import scala.collection.mutable.ArrayBuffer
/**
* Implementation of the star alignment algorithm.
* @param strings List of sequences to align.
* @author Markus Gumbel (m.gumbel@hs-mannheim.de)
*/
class StarAlignment(strings: Array[String], ll: Boolean = false)
extends AbstractMultipleAlignment(strings)
with ProgressiveAlignment {
logLevel = ll
/**
* Calculate the multiple alignment with the star algorithm.
*/
val multipleAlignment = {
/**
* Calculate root index. Add the similarities for each row, i.e.
* iterate over the columns per row. Identify the row-index with
* the highest sum of similarities.
* Let M be the n x n matrix of similarities and e=(1, 1, ..., 1)
* a vector. Then root index is max(M e).
*/
val rootIdx = {
// similarities is an array of sum of column-similarities
val similarities = alignments.map {
ai => // this is row i
// Sum over all columns for row ai:
ai.map(aij => aij.similarity).reduceLeft(_ + _)
}
logln("alignments:")
logln(mkAlignmentTable())
logln("list of similarities: " + similarities.toList.mkString(", "))
val max = similarities reduceLeft (_ max _)
val rootIdx = similarities.indexOf(max)
logln("root " + alignments(rootIdx)(rootIdx).s1 +
" with max. " + max)
rootIdx
}
val oroot = new AlignedString(strings(rootIdx))
val msa = new ArrayBuffer[AlignedString]
// Create a list of all indices except the root index:
val idxs = (0 until alignments.size).toArray.filter(_ != rootIdx)
// Get the aligned strings for the root and the first child.
// Note that first aligned string must be the root. If
// rootIdx is greater than idx the aligned string have to be swapped.
val (root, leaf) = if (rootIdx > idxs(0)) {
alignments(rootIdx)(idxs(0)).alignedStrings.swap
} else {
alignments(rootIdx)(idxs(0)).alignedStrings
}
msa += root
msa += leaf
val msaRoot = root // Current msa root element.
logln("\nInitial pairwise Alignment:")
logln(msa.mkString("\n"))
// Go through the remaining children...
for (idx <- idxs.drop(1)) {
val (root, leaf) = if (rootIdx > idx) { // See comment above.
alignments(rootIdx)(idx).alignedStrings.swap
} else {
alignments(rootIdx)(idx).alignedStrings
}
logln("\nAlignment of root and new leaf:")
logln("root: " + root.toString)
logln("leaf: " + leaf.toString)
// Look for differences in the current root
// and the root which is part of the msa:
val (msaGaps, pwGaps) = getInsertions(root, msaRoot)
logln("Gap positions from pw. alignment (msa): " +
pwGaps.mkString(", "))
logln("Gap positions from msa (root, leaf) : " +
msaGaps.mkString(", "))
// Add the msa gaps to the new leaf:
insertGaps(leaf, msaGaps)
// Also insert the gaps in the current msa:
msa.foreach(insertGaps(_, pwGaps.toList))
msa += leaf // Finally, add the new string.
logln("\nMultiple alignment:")
logln(msa.mkString("\n"))
}
logln("\nDone.\n")
msa.toArray
}
}
| markusgumbel/scalabioalg | core/src/main/scala/net/gumbix/bioinf/string/alignment/StarAlignment.scala | Scala | apache-2.0 | 3,879 |
package io.viper.common
import java.net.InetSocketAddress
import java.util.concurrent.Executors
import org.jboss.netty.bootstrap.ServerBootstrap
import org.jboss.netty.channel.group.{DefaultChannelGroup, ChannelGroup}
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory
import org.jboss.netty.channel.ChannelPipelineFactory
import io.viper.core.server.router._
import java.util
import collection.mutable.ListBuffer
object NestServer
{
private val _allChannels: ChannelGroup = new DefaultChannelGroup("server")
private var _virtualServers: ServerBootstrap = null
def getMaxContentLength = 1024 * 1024 * 1024
def create(localhostPort: Int, handler: ChannelPipelineFactory) {
create(getMaxContentLength, localhostPort, handler)
}
def create(maxContentLength: Int, localhostPort: Int, handler: ChannelPipelineFactory) {
_virtualServers = createServer(maxContentLength, localhostPort, handler)
_allChannels.add(_virtualServers.bind(new InetSocketAddress(localhostPort)))
}
private def createServer(maxContentLength: Int, port: Int, handler: ChannelPipelineFactory): ServerBootstrap = {
val server = new ServerBootstrap(new NioServerSocketChannelFactory(Executors.newCachedThreadPool,
Executors.newCachedThreadPool))
server.setOption("tcpNoDelay", true)
server.setOption("keepAlive", true)
server.setPipelineFactory(handler)
server
}
def run(handler: ChannelPipelineFactory) {
run(80, handler)
}
def run(localhostPort: Int, handler: ChannelPipelineFactory) {
create(getMaxContentLength, localhostPort, handler)
Thread.currentThread.join()
}
def run(localhostPort: Int)(f:(RestServer) => Unit) {
val handler = new RestServer {
def addRoutes() {
f(this)
}
}
create(getMaxContentLength, localhostPort, handler)
Thread.currentThread.join()
}
def shutdown() {
_allChannels.close.awaitUninterruptibly()
}
}
class NestServer(val port: Int = 80) extends DelayedInit {
import NestServer._
protected def args: Array[String] = _args
protected def server: RestServer = _server
private var _server: RestServer = _
private var _args: Array[String] = _
override def delayedInit(body: => Unit) {
_server = new RestServer {
def addRoutes() {
body
}
}
}
def get(route: String)(f:(util.Map[String, String]) => RouteResponse) {
val handler = new RouteHandler {
def exec(args: util.Map[String, String]) = f(args)
}
server.addRoute(new GetRoute(route, handler))
}
def put(route: String)(f:(util.Map[String, String]) => RouteResponse) {
val handler = new RouteHandler {
def exec(args: util.Map[String, String]) = f(args)
}
server.addRoute(new PutRoute(route, handler))
}
def post(route: String)(f:(util.Map[String, String]) => RouteResponse) {
val handler = new RouteHandler {
def exec(args: util.Map[String, String]) = f(args)
}
server.addRoute(new PostRoute(route, handler))
}
def delete(route: String)(f:(util.Map[String, String]) => RouteResponse) {
val handler = new RouteHandler {
def exec(args: util.Map[String, String]) = f(args)
}
server.addRoute(new DeleteRoute(route, handler))
}
/** The main method.
* This stores all argument so that they can be retrieved with `args`
* and the executes all initialization code segments in the order they were
* passed to `delayedInit`
* @param args the arguments passed to the main method
*/
def main(args: Array[String]) {
this._args = args
create(getMaxContentLength, port, server)
Thread.currentThread.join()
}
}
class StaticServer(resourcePath: String, port: Int = 80) extends App {
import NestServer._
create(getMaxContentLength, port, new ViperServer(resourcePath))
Thread.currentThread.join()
}
class MultiHostServer(port: Int = 80) {
import NestServer._
val runners = new ListBuffer[VirtualServerRunner]
def run() {
try {
runners.foreach(_.start())
runners.foreach{ runner =>
val viperServer = runner.create
viperServer.resourceInstance = runner.getClass
route(runner.hostname, viperServer)
}
create(getMaxContentLength, port, server)
Thread.currentThread.join()
} finally {
runners.foreach(_.stop())
}
}
protected def server: HostRouterHandler = _server
protected val _server = new HostRouterHandler
def route(hostname: String, resourcePath: String): VirtualServer = {
route(new VirtualServer(hostname, resourcePath))
}
def route(hostname: String): VirtualServer = {
route(hostname, "res:///%s".format(hostname))
}
def route(hostname: String, server: ViperServer) {
_server.putRoute(hostname, port, server)
}
def route(hostname: String, resourcePath: String, f:(RestServer) => Unit) {
_server.putRoute(hostname, port, new ViperServer(resourcePath) {
override def addRoutes() { f(this) }
})
}
def route(hostname: String, f:(RestServer) => Unit) {
route(hostname, "res:///%s".format(hostname), f)
}
def route(virtualServer: VirtualServer): VirtualServer = {
server.putRoute(virtualServer.hostname, port, virtualServer)
virtualServer
}
def route(runner: VirtualServerRunner) {
runners.append(runner)
}
}
class MultiHostServerApp(port: Int = 80) extends MultiHostServer(port) with DelayedInit {
override def delayedInit(body: => Unit) {
body
}
def main(args: Array[String]) {
run()
}
}
| briangu/viper.io | core/src/main/scala/io/viper/common/NestServer.scala | Scala | apache-2.0 | 5,602 |
package com.github.bespalovdn.asteriskscala.agi.request
import org.scalatest._
class LineParserTest extends FlatSpecLike
with Assertions
with Matchers
with BeforeAndAfterAll
with BeforeAndAfterEach
{
"LineParserTest" should
"check correctness parsing the AGI variables" in {
//valid examples:
assert(parse("agi_request: HELLO\\n") == ("request", "HELLO"))
assert(parse("agi_request: HELLO, world!\\n") == ("request", "HELLO, world!"))
//invalid: missing end of line:
assert(parse("agi_request: HELLO") == null)
}
private def parse(line: String): (String, String) = AgiRequest.lineParser.parse(line)
}
| bespalovdn/asterisk-scala | agi/src/test/scala/com/github/bespalovdn/asteriskscala/agi/request/LineParserTest.scala | Scala | mit | 674 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.pages.withdraw
import controllers.routes
import forms.WithdrawDateForm.withdrawDateForm
import models.WithdrawDateFormModel
import views.html.pages.withdraw.withdrawImplications
import org.jsoup.Jsoup
import play.api.data.Form
import testHelpers.ViewSpecHelpers.CommonViewSpecHelper
import testHelpers.ViewSpecHelpers.withdraw.WithdrawImplicationsSpecMessages
class WithdrawImplicationsSpec extends CommonViewSpecHelper with WithdrawImplicationsSpecMessages {
val withdrawDateForModel = withdrawDateForm: Form[WithdrawDateFormModel]
"Withdraw Implication view" when {
lazy val view = application.injector.instanceOf[withdrawImplications]
lazy val doc = Jsoup.parse(view.apply(withdrawDateForModel, "IP2014", "dormant").body)
s"have a title ${"pla.withdraw.protection.title"}" in {
doc.title() shouldBe plaWithdrawProtectionTitle(plaWithdrawProtectionIP2014label)
}
s"have a back link with text back " in {
doc.select("a.back-link").text() shouldBe "Back"
}
s"have a back link with href" in {
doc.select("a.back-link").attr("href") shouldBe routes.ReadProtectionsController.currentProtections().url
}
s"have the question of the page ${"pla.withdraw.protection.title"}" in {
doc.select("h1.heading-large").text shouldEqual plaWithdrawProtectionTitle(plaWithdrawProtectionIP2014label)
}
"have a div tag that" should {
"have a heading label" in {
doc.select("div.grid > p").text() shouldBe plaWithdrawProtectionIfInfo(plaWithdrawProtectionIP2014label)
}
s"has first paragraph of ${"pla.withdraw.protection.if.info.1"}" in {
doc.select("li").get(0).text() shouldBe plaWithdrawProtectionIfInfo1(plaWithdrawProtectionIP2014label)
}
s"has second paragraph of ${"pla.withdraw.protection.if.info.1"}" in {
doc.select("li").get(1).text() shouldBe plaWithdrawProtectionIfInfo2(plaWithdrawProtectionIP2014label)
}
}
"have a continue button that" should {
lazy val button = doc.getElementById("continue-button")
s"have text of ${"pla.withdraw.protection.continue.title"}" in {
button.text() shouldBe plaWithdrawProtectionContinueTitle
}
s"have a href" in {
button.attr("href") shouldBe routes.WithdrawProtectionController.getWithdrawDateInput().url
}
}
}
}
| hmrc/pensions-lifetime-allowance-frontend | test/views/pages/withdraw/WithdrawImplicationsSpec.scala | Scala | apache-2.0 | 2,971 |
/*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.model
import akka.util.ByteString
import com.fasterxml.jackson.core.JsonParser
import com.fasterxml.jackson.core.JsonToken
import com.netflix.atlas.akka.ByteStringInputStream
import com.netflix.atlas.akka.DiagnosticMessage
import com.netflix.atlas.core.util.SmallHashMap
import com.netflix.atlas.json.Json
import com.netflix.atlas.json.JsonParserHelper._
import com.netflix.atlas.json.JsonSupport
import java.io.ByteArrayOutputStream
/**
* Helpers for working with messages coming back from the LWCAPI service.
*/
object LwcMessages {
/**
* Parse the message string into an internal model object based on the type.
*/
def parse(msg: ByteString): AnyRef = {
parse(Json.newJsonParser(new ByteStringInputStream(msg)))
}
/**
* Parse the message string into an internal model object based on the type.
*/
def parse(msg: String): AnyRef = {
parse(Json.newJsonParser(msg))
}
private def parse(parser: JsonParser): AnyRef = {
// This is a performance critical part of the code so the parsing is done by
// hand rather than using ObjectMapper to minimize allocations and get peak
// performance.
try {
// All
var typeDesc: String = null
// LwcExpression
var expression: String = null
var step: Long = -1L
// LwcSubscription
// - expression
var metrics: List[LwcDataExpr] = Nil
// LwcDatapoint
var timestamp: Long = -1L
var id: String = null
var tags: Map[String, String] = Map.empty
var value: Double = Double.NaN
// LwcDiagnosticMessage
// - id
// - message: DiagnosticMessage
var diagnosticMessage: DiagnosticMessage = null
// LwcHeartbeat
// - timestamp
// - step
// DiagnosticMessage
// - message: String
var message: String = null
// Actually do the parsing work
foreachField(parser) {
case "type" => typeDesc = nextString(parser)
case "expression" => expression = nextString(parser)
case "step" => step = nextLong(parser)
case "metrics" => metrics = parseDataExprs(parser)
case "timestamp" => timestamp = nextLong(parser)
case "id" => id = nextString(parser)
case "tags" => tags = parseTags(parser)
case "value" => value = nextDouble(parser)
case "message" =>
val t = parser.nextToken()
if (t == JsonToken.VALUE_STRING)
message = parser.getText
else
diagnosticMessage = parseDiagnosticMessage(parser)
case _ => skipNext(parser)
}
typeDesc match {
case "expression" => LwcExpression(expression, step)
case "subscription" => LwcSubscription(expression, metrics)
case "datapoint" => LwcDatapoint(timestamp, id, tags, value)
case "diagnostic" => LwcDiagnosticMessage(id, diagnosticMessage)
case "heartbeat" => LwcHeartbeat(timestamp, step)
case _ => DiagnosticMessage(typeDesc, message, None)
}
} finally {
parser.close()
}
}
private[model] def parseDataExprs(parser: JsonParser): List[LwcDataExpr] = {
val builder = List.newBuilder[LwcDataExpr]
foreachItem(parser) {
var id: String = null
var expression: String = null
var step: Long = -1L
foreachField(parser) {
case "id" => id = nextString(parser)
case "expression" => expression = nextString(parser)
case "step" | "frequency" => step = nextLong(parser)
case _ => skipNext(parser)
}
builder += LwcDataExpr(id, expression, step)
}
builder.result()
}
private def parseDiagnosticMessage(parser: JsonParser): DiagnosticMessage = {
var typeDesc: String = null
var message: String = null
foreachField(parser) {
case "type" => typeDesc = nextString(parser)
case "message" => message = nextString(parser)
case _ => skipNext(parser)
}
DiagnosticMessage(typeDesc, message, None)
}
private def parseTags(parser: JsonParser): Map[String, String] = {
val builder = new SmallHashMap.Builder[String, String](30)
foreachField(parser) {
case k => builder.add(k, nextString(parser))
}
builder.result
}
private val Expression = 0
private val Subscription = 1
private val Datapoint = 2
private val LwcDiagnostic = 3
private val Diagnostic = 4
private val Heartbeat = 5
/**
* Encode messages using Jackson's smile format into a ByteString.
*/
def encodeBatch(msgs: Seq[AnyRef]): ByteString = {
encodeBatch(msgs, new ByteArrayOutputStream())
}
/**
* Encode messages using Jackson's smile format into a ByteString. The
* `ByteArrayOutputStream` will be reset and used as a buffer for encoding
* the data.
*/
def encodeBatch(msgs: Seq[AnyRef], baos: ByteArrayOutputStream): ByteString = {
baos.reset()
val gen = Json.newSmileGenerator(baos)
try {
gen.writeStartArray()
msgs.foreach {
case msg: LwcExpression =>
gen.writeNumber(Expression)
gen.writeString(msg.expression)
gen.writeNumber(msg.step)
case msg: LwcSubscription =>
gen.writeNumber(Subscription)
gen.writeString(msg.expression)
gen.writeStartArray()
msg.metrics.foreach { m =>
gen.writeString(m.id)
gen.writeString(m.expression)
gen.writeNumber(m.step)
}
gen.writeEndArray()
case msg: LwcDatapoint =>
gen.writeNumber(Datapoint)
gen.writeNumber(msg.timestamp)
gen.writeString(msg.id)
gen.writeNumber(msg.tags.size)
msg.tags.foreachEntry { (k, v) =>
gen.writeString(k)
gen.writeString(v)
}
gen.writeNumber(msg.value)
case msg: LwcDiagnosticMessage =>
gen.writeNumber(LwcDiagnostic)
gen.writeString(msg.id)
gen.writeString(msg.message.typeName)
gen.writeString(msg.message.message)
case msg: DiagnosticMessage =>
gen.writeNumber(Diagnostic)
gen.writeString(msg.typeName)
gen.writeString(msg.message)
case msg: LwcHeartbeat =>
gen.writeNumber(Heartbeat)
gen.writeNumber(msg.timestamp)
gen.writeNumber(msg.step)
case _ =>
throw new MatchError("foo")
}
gen.writeEndArray()
} finally {
gen.close()
}
ByteString.fromArrayUnsafe(baos.toByteArray)
}
/**
* Parse a set of messages that were encoded with `encodeBatch`.
*/
def parseBatch(msgs: ByteString): List[AnyRef] = {
parseBatch(Json.newSmileParser(new ByteStringInputStream(msgs)))
}
private def parseBatch(parser: JsonParser): List[AnyRef] = {
val builder = List.newBuilder[AnyRef]
try {
foreachItem(parser) {
parser.getIntValue match {
case Expression =>
builder += LwcExpression(parser.nextTextValue(), parser.nextLongValue(-1L))
case Subscription =>
val expression = parser.nextTextValue()
val dataExprs = List.newBuilder[LwcDataExpr]
foreachItem(parser) {
dataExprs += LwcDataExpr(
parser.getText,
parser.nextTextValue(),
parser.nextLongValue(-1L)
)
}
builder += LwcSubscription(expression, dataExprs.result())
case Datapoint =>
val timestamp = parser.nextLongValue(-1L)
val id = parser.nextTextValue()
val tags = parseTags(parser, parser.nextIntValue(0))
val value = nextDouble(parser)
builder += LwcDatapoint(timestamp, id, tags, value)
case LwcDiagnostic =>
val id = parser.nextTextValue()
val typeName = parser.nextTextValue()
val message = parser.nextTextValue()
builder += LwcDiagnosticMessage(id, DiagnosticMessage(typeName, message, None))
case Diagnostic =>
val typeName = parser.nextTextValue()
val message = parser.nextTextValue()
builder += DiagnosticMessage(typeName, message, None)
case Heartbeat =>
val timestamp = parser.nextLongValue(-1L)
val step = parser.nextLongValue(-1L)
builder += LwcHeartbeat(timestamp, step)
case v =>
throw new MatchError(s"invalid type id: $v")
}
}
} finally {
parser.close()
}
builder.result()
}
private def parseTags(parser: JsonParser, n: Int): Map[String, String] = {
if (n == 0) {
SmallHashMap.empty[String, String]
} else {
val builder = new SmallHashMap.Builder[String, String](2 * n)
var i = 0
while (i < n) {
val k = parser.nextTextValue()
val v = parser.nextTextValue()
builder.add(k, v)
i += 1
}
builder.result
}
}
def toSSE(msg: JsonSupport): ByteString = {
val prefix = msg match {
case _: LwcSubscription => subscribePrefix
case _: LwcDatapoint => metricDataPrefix
case _: LwcDiagnosticMessage => diagnosticPrefix
case _: LwcHeartbeat => heartbeatPrefix
case _ => defaultPrefix
}
prefix ++ ByteString(msg.toJson) ++ suffix
}
private val subscribePrefix = ByteString("info: subscribe ")
private val metricDataPrefix = ByteString("data: metric ")
private val diagnosticPrefix = ByteString("data: diagnostic ")
private val heartbeatPrefix = ByteString("data: heartbeat ")
private val defaultPrefix = ByteString("data: ")
private val suffix = ByteString("\\r\\n\\r\\n")
}
| Netflix/atlas | atlas-eval/src/main/scala/com/netflix/atlas/eval/model/LwcMessages.scala | Scala | apache-2.0 | 10,464 |
package scribe.benchmark.tester
import cats.effect.IO
import cats.effect.unsafe.implicits.global
import cats.implicits._
import scribe.Logger
import scribe.cats._
import scribe.file._
import scribe.format._
class ScribeEffectParallelLoggingTester extends LoggingTester {
private lazy val fileWriter = FileWriter("logs" / "scribe-effect-par.log")
private lazy val formatter = formatter"$date $levelPaddedRight [$threadName] $messages"
private lazy val logger = Logger.empty.orphan().withHandler(formatter = formatter, writer = fileWriter).f[IO]
override def init(): Unit = logger
override def run(messages: Iterator[String]): Unit = fs2.Stream
.fromIterator[IO](messages, 1000)
.parEvalMap(1000)(msg => logger.info(msg))
.compile
.drain
.unsafeRunSync()
override def dispose(): Unit = fileWriter.dispose()
} | outr/scribe | benchmarks/src/main/scala/scribe/benchmark/tester/ScribeEffectParallelLoggingTester.scala | Scala | mit | 843 |
package helpers
import com.google.inject._
import com.sun.javafx.binding.Logging
import org.mongodb.scala.{MongoClient, MongoDatabase}
import play.Configuration
import play.api.inject.ApplicationLifecycle
import scala.concurrent.Future
trait GlobalSingleton {}
@Singleton
class Global @Inject()(applicationLifecycle: ApplicationLifecycle) extends Logging{
val mongoClient: MongoClient = MongoClient()
// get handle to "mydb" database
val database: MongoDatabase = mongoClient.getDatabase("mydb")
// other initialize code here
applicationLifecycle.addStopHook { () =>
Future.successful(mongoClient.close())
}
}
class ConfigureModules extends AbstractModule {
override def configure() = { // or without override
println("configure called")
bind(classOf[Logging]).to(classOf[Global]).asEagerSingleton()
}
}
| bminderh/play-react-webpack | app/helpers/Global.scala | Scala | apache-2.0 | 846 |
package edu.umass.ciir.kbbridge.kb2text
import org.lemurproject.galago.core.retrieval.ScoredDocument
import edu.umass.ciir.kbbridge.data.{IdMap, ScoredWikipediaEntity}
import org.lemurproject.galago.core.parse.Document
import edu.umass.ciir.kbbridge.search.DocumentBridgeMap
import edu.umass.ciir.kbbridge.data.repr.EntityRepr
/**
* User: dietz
* Date: 6/7/13
* Time: 5:20 PM
*/
object GalagoDoc2Text {
def galagoResultsToDocForReverseLinking(docs: Seq[ScoredDocument],originalQueryEntityRepr:EntityRepr): Seq[TrecKbaDocumentForReverseLinking] = {
for (d <- docs) yield {
galagoResultToDocForReverseLinking(d, originalQueryEntityRepr)
}
}
def galagoResultToDocForReverseLinking(sd: ScoredDocument, originalQueryEntityRepr:EntityRepr): TrecKbaDocumentForReverseLinking = {
val pulledDoc = DocumentBridgeMap.getDefaultDocumentProvider.getBridgeDocument(sd.documentName)
val nameVariants = originalQueryEntityRepr.nameVariants.map(_._1)
val originalQueryName = originalQueryEntityRepr.entityName
val score = sd.score
val rank = sd.rank
new TrecKbaDocumentForReverseLinking(pulledDoc, originalQueryName, nameVariants, Some(originalQueryEntityRepr), score=Some(score), rank = Some(rank))
}
def galagoResultsToTrecKbaDoc(docs: Seq[ScoredDocument],originalQueryEntityRepr:EntityRepr): Seq[TrecKbaDocument] = {
for (d <- docs) yield {
galagoResultToTrecKbaDocument(d, originalQueryEntityRepr)
}
}
def galagoResultToTrecKbaDocument(sd: ScoredDocument, originalQueryEntityRepr:EntityRepr): TrecKbaDocument = {
val score = sd.score
val rank = sd.rank
new TrecKbaDocument(sd.documentName, topicId = originalQueryEntityRepr.entityName, rank = rank, rawScore=Some(score))
}
}
| daltonj/KbBridge | src/main/scala/edu/umass/ciir/kbbridge/kb2text/GalagoDoc2Text.scala | Scala | apache-2.0 | 1,760 |
package scala.slick.test.lifted
import org.junit.Test
import org.junit.Assert._
/** Test case for the SQL schema support in table definitions */
class SchemaSupportTest {
@Test def testSchemaSupport {
import scala.slick.driver.H2Driver.simple._
class T(tag: Tag) extends Table[Int](tag, Some("myschema"), "mytable") {
def id = column[Int]("id")
def * = id
}
val ts = TableQuery[T]
val s1 = ts.filter(_.id < 5).selectStatement
println(s1)
assertTrue("select ... from uses schema name", s1 contains """from "myschema"."mytable" """)
//val s2 = ts.insertStatement
//println(s2)
val s3 = ts.filter(_.id < 5).updateStatement
println(s3)
assertTrue("update uses schema name", s3 contains """update "myschema"."mytable" """)
val s4 = ts.filter(_.id < 5).deleteStatement
println(s4)
assertTrue("delete uses schema name", s4 contains """delete from "myschema"."mytable" """)
val s5 = ts.schema.createStatements
s5.foreach(println)
s5.foreach(s => assertTrue("DDL (create) uses schema name", s contains """ "myschema"."mytable" """))
val s6 = ts.schema.dropStatements
s6.foreach(println)
s6.foreach(s => assertTrue("DDL (drop) uses schema name", s contains """ "myschema"."mytable" """))
}
}
| nuodb/slick | slick-testkit/src/test/scala/scala/slick/test/lifted/SchemaSupportTest.scala | Scala | bsd-2-clause | 1,292 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.tools
import java.io._
import java.text.SimpleDateFormat
import java.util.{Date, TimeZone}
import com.typesafe.scalalogging.slf4j.Logging
import com.vividsolutions.jts.geom.{Coordinate, Geometry}
import org.apache.commons.lang.StringEscapeUtils
import org.geotools.GML
import org.geotools.GML.Version
import org.geotools.data.DataUtilities
import org.geotools.data.shapefile.{ShapefileDataStore, ShapefileDataStoreFactory}
import org.geotools.data.simple.{SimpleFeatureCollection, SimpleFeatureStore}
import org.geotools.geojson.feature.FeatureJSON
import org.geotools.geometry.jts.JTSFactoryFinder
import org.locationtech.geomesa.filter.function._
import org.locationtech.geomesa.tools.Utils.Formats
import org.locationtech.geomesa.tools.commands.ExportCommand.ExportParameters
import org.locationtech.geomesa.utils.geotools.Conversions._
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import scala.collection.JavaConversions._
import scala.collection.mutable
import scala.util.Try
trait FeatureExporter extends AutoCloseable with Flushable {
def write(featureCollection: SimpleFeatureCollection): Unit
}
class GeoJsonExport(writer: Writer) extends FeatureExporter {
val featureJson = new FeatureJSON()
override def write(features: SimpleFeatureCollection) =
featureJson.writeFeatureCollection(features, writer)
override def flush() = writer.flush()
override def close() = {
flush()
writer.close()
}
}
class GmlExport(os: OutputStream) extends FeatureExporter {
val encode = new GML(Version.WFS1_0)
encode.setNamespace("location", "location.xsd")
override def write(features: SimpleFeatureCollection) = encode.encode(os, features)
override def flush() = os.flush()
override def close() = {
os.flush()
os.close()
}
}
class ShapefileExport(file: File) extends FeatureExporter {
override def write(features: SimpleFeatureCollection) = {
val url = DataUtilities.fileToURL(file)
val factory = new ShapefileDataStoreFactory()
val newShapeFile = factory.createDataStore(url).asInstanceOf[ShapefileDataStore]
newShapeFile.createSchema(features.getSchema)
val store = newShapeFile.getFeatureSource.asInstanceOf[SimpleFeatureStore]
store.addFeatures(features)
}
override def flush() = {}
override def close() = {}
}
object ShapefileExport {
def modifySchema(sft: SimpleFeatureType): String = {
// When exporting to Shapefile, we must rename the Geometry Attribute Descriptor to "the_geom", per
// the requirements of Geotools' ShapefileDataStore and ShapefileFeatureWriter. The easiest way to do this
// is transform the attribute when retrieving the SimpleFeatureCollection.
val attrs = sft.getAttributeDescriptors.map(_.getLocalName)
val geomDescriptor = sft.getGeometryDescriptor.getLocalName
val modifiedAttrs =
if (attrs.contains(geomDescriptor)) {
attrs.updated(attrs.indexOf(geomDescriptor), s"the_geom=$geomDescriptor")
} else {
attrs ++ List(s"the_geom=$geomDescriptor")
}
modifiedAttrs.mkString(",")
}
}
class DelimitedExport(writer: Writer, format: String, attributes: Option[String])
extends FeatureExporter with Logging {
val delimiter = format match {
case Formats.CSV => ","
case Formats.TSV => "\\t"
}
lazy val dateFormat = {
val df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
df.setTimeZone(TimeZone.getTimeZone("UTC"))
df
}
val escape: String => String = format match {
case Formats.CSV => StringEscapeUtils.escapeCsv
case _ => (s: String) => s
}
override def write(features: SimpleFeatureCollection): Unit = {
val sft = features.getSchema
// split apart the optional attributes and then split any derived values to just get the names
val names = attributes.map(_.split("""(?<!\\\\),""").toSeq.map(_.split(":")(0).split("=").head.trim))
.getOrElse(sft.getAttributeDescriptors.map(_.getLocalName))
val indices = names.map(sft.indexOf)
def findMessage(i: Int) = {
val index = indices.indexOf(i)
s"Attribute ${names(index)} does not exist in the feature type."
}
indices.foreach(i => assert(i != -1, findMessage(i)))
// write out a header line
writer.write(names.mkString("", delimiter, "\\n"))
var count = 0
features.features.foreach { sf =>
val values = indices.map(i => escape(stringify(sf.getAttribute(i))))
writer.write(values.mkString("", delimiter, "\\n"))
count += 1
if (count % 10000 == 0) {
logger.debug(s"wrote $count features")
}
}
logger.info(s"Exported $count features")
}
def stringify(o: Object): String = o match {
case null => ""
case d: Date => dateFormat.format(d)
case _ => o.toString
}
override def flush() = writer.flush()
override def close() = {
writer.flush()
writer.close()
}
}
object DelimitedExport {
def apply(writer: Writer, params: ExportParameters) =
new DelimitedExport(writer, params.format.toLowerCase, Option(params.attributes))
}
object BinFileExport {
var DEFAULT_TIME = "dtg"
def getAttributeList(p: ExportParameters): String = {
val dtg = Option(p.dateAttribute).getOrElse(DEFAULT_TIME)
Seq(p.latAttribute, p.lonAttribute, p.idAttribute, dtg, p.labelAttribute)
.filter(_ != null)
.mkString(",")
}
def apply(os: OutputStream, params: ExportParameters) =
new BinFileExport(os,
Option(params.dateAttribute).getOrElse(DEFAULT_TIME),
Option(params.idAttribute),
Option(params.latAttribute),
Option(params.lonAttribute),
Option(params.labelAttribute))
}
class BinFileExport(os: OutputStream,
dtgAttribute: String,
idAttribute: Option[String],
latAttribute: Option[String],
lonAttribute: Option[String],
lblAttribute: Option[String]) extends FeatureExporter {
import org.locationtech.geomesa.filter.function.BinaryOutputEncoder._
val id = idAttribute.orElse(Some("id"))
val latLon = latAttribute.flatMap(lat => lonAttribute.map(lon => (lat, lon)))
override def write(fc: SimpleFeatureCollection) =
encodeFeatureCollection(fc, os, dtgAttribute, id, lblAttribute, latLon, AxisOrder.LonLat)
override def flush() = os.flush()
override def close() = os.close()
}
| mmatz-ccri/geomesa | geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/FeatureExporter.scala | Scala | apache-2.0 | 7,127 |
/* sbt -- Simple Build Tool
* Copyright 2011 Mark Harrah
*/
package sbt
import java.io.File
import java.net.URI
import Project._
import Types.Endo
import CommandSupport.logger
import compiler.Eval
import SessionSettings._
final case class SessionSettings(currentBuild: URI, currentProject: Map[URI, String], original: Seq[Setting[_]], append: SessionMap, currentEval: () => Eval)
{
assert(currentProject contains currentBuild, "Current build (" + currentBuild + ") not associated with a current project.")
def setCurrent(build: URI, project: String, eval: () => Eval): SessionSettings = copy(currentBuild = build, currentProject = currentProject.updated(build, project), currentEval = eval)
def current: ProjectRef = ProjectRef(currentBuild, currentProject(currentBuild))
def appendSettings(s: Seq[SessionSetting]): SessionSettings = copy(append = modify(append, _ ++ s))
def mergeSettings: Seq[Setting[_]] = original ++ merge(append)
def clearExtraSettings: SessionSettings = copy(append = Map.empty)
private[this] def merge(map: SessionMap): Seq[Setting[_]] = map.values.toSeq.flatten[SessionSetting].map(_._1)
private[this] def modify(map: SessionMap, onSeq: Endo[Seq[SessionSetting]]): SessionMap =
{
val cur = current
map.updated(cur, onSeq(map.getOrElse( cur, Nil)))
}
}
object SessionSettings
{
type SessionSetting = (Setting[_], String)
type SessionMap = Map[ProjectRef, Seq[SessionSetting]]
def reapply(session: SessionSettings, s: State): State =
BuiltinCommands.reapply(session, Project.structure(s), s)
def clearSettings(s: State): State =
withSettings(s)(session => reapply(session.copy(append = session.append - session.current), s))
def clearAllSettings(s: State): State =
withSettings(s)(session => reapply(session.clearExtraSettings, s))
def withSettings(s: State)(f: SessionSettings => State): State =
{
val extracted = Project extract s
import extracted._
if(session.append.isEmpty)
{
logger(s).info("No session settings defined.")
s
}
else
f(session)
}
def pluralize(size: Int, of: String) = size.toString + (if(size == 1) of else (of + "s"))
def checkSession(newSession: SessionSettings, oldState: State)
{
val oldSettings = (oldState get Keys.sessionSettings).toList.flatMap(_.append).flatMap(_._2)
if(newSession.append.isEmpty && !oldSettings.isEmpty)
logger(oldState).warn("Discarding " + pluralize(oldSettings.size, " session setting") + ". Use 'session save' to persist session settings.")
}
def removeRanges[T](in: Seq[T], ranges: Seq[(Int,Int)]): Seq[T] =
{
val asSet = (Set.empty[Int] /: ranges) { case (s, (hi,lo)) => s ++ (hi to lo) }
in.zipWithIndex.flatMap { case (t, index) => if(asSet(index+1)) Nil else t :: Nil }
}
def removeSettings(s: State, ranges: Seq[(Int,Int)]): State =
withSettings(s) { session =>
val current = session.current
val newAppend = session.append.updated(current, removeRanges(session.append.getOrElse(current, Nil), ranges))
reapply(session.copy( append = newAppend ), s)
}
def saveAllSettings(s: State): State = saveSomeSettings(s)(_ => true)
def saveSettings(s: State): State =
{
val current = Project.session(s).current
saveSomeSettings(s)( _ == current)
}
def saveSomeSettings(s: State)(include: ProjectRef => Boolean): State =
withSettings(s){session =>
for( (ref, settings) <- session.append if !settings.isEmpty && include(ref) )
writeSettings(ref, settings, Project.structure(s))
reapply(session.copy(original = session.mergeSettings, append = Map.empty), s)
}
def writeSettings(pref: ProjectRef, settings: Seq[SessionSetting], structure: Load.BuildStructure)
{
val project = Project.getProject(pref, structure).getOrElse(error("Invalid project reference " + pref))
val appendTo: File = BuildPaths.configurationSources(project.base).headOption.getOrElse(new File(project.base, "build.sbt"))
val baseAppend = settingStrings(settings).flatMap("" :: _ :: Nil)
val adjustedLines = if(appendTo.isFile && hasTrailingBlank(IO readLines appendTo) ) baseAppend else "" +: baseAppend
IO.writeLines(appendTo, adjustedLines, append = true)
}
def hasTrailingBlank(lines: Seq[String]) = lines.takeRight(1).exists(_.trim.isEmpty)
def printAllSettings(s: State): State =
withSettings(s){ session =>
for( (ref, settings) <- session.append if !settings.isEmpty) {
println("In " + Project.display(ref))
printSettings(settings)
}
s
}
def printSettings(s: State): State =
withSettings(s){ session =>
printSettings(session.append.getOrElse(session.current, Nil))
s
}
def printSettings(settings: Seq[SessionSetting]): Unit =
for((stringRep, index) <- settingStrings(settings).zipWithIndex)
println(" " + (index+1) + ". " + stringRep)
def settingStrings(s: Seq[SessionSetting]): Seq[String] = s.map(_._2)
def Help = """session <command>
Manipulates session settings, which are temporary settings that do not persist past the current sbt execution (that is, the current session).
Valid commands are:
clear, clear-all
Removes temporary settings added using 'set' and re-evaluates all settings.
For 'clear', only the settings defined for the current project are cleared.
For 'clear-all', all settings in all projects are cleared.
list, list-all
Prints a numbered list of session settings defined.
The numbers may be used to remove individual settings or ranges of settings using 'remove'.
For 'list', only the settings for the current project are printed.
For 'list-all', all settings in all projets are printed.
remove <range-spec>
<range-spec> is a comma-separated list of individual numbers or ranges of numbers.
For example, 'remove 1,3,5-7'.
The temporary settings at the given indices for the current project are removed and all settings are re-evaluated.
Use the 'list' command to see a numbered list of settings for the current project.
save, save-all
Makes the session settings permanent by writing them to a '.sbt' configuration file.
For 'save', only the current project's settings are saved (the settings for other projects are left alone).
For 'save-all', the session settings are saved for all projects.
The session settings defined for a project are appended to the first '.sbt' configuration file in that project.
If no '.sbt' configuration file exists, the settings are written to 'build.sbt' in the project's base directory."""
sealed trait SessionCommand
final class Print(val all: Boolean) extends SessionCommand
final class Clear(val all: Boolean) extends SessionCommand
final class Save(val all: Boolean) extends SessionCommand
final class Remove(val ranges: Seq[(Int,Int)]) extends SessionCommand
import complete._
import DefaultParsers._
lazy val parser =
token(Space) ~>
(token("list-all" ^^^ new Print(true)) | token("list" ^^^ new Print(false)) | token("clear" ^^^ new Clear(false)) |
token("save-all" ^^^ new Save(true)) | token("save" ^^^ new Save(false)) | token("clear-all" ^^^ new Clear(true)) |
remove)
lazy val remove = token("remove") ~> token(Space) ~> natSelect.map(ranges => new Remove(ranges))
def natSelect = rep1sep(token(range, "<range>"), ',')
def range: Parser[(Int,Int)] = (NatBasic ~ ('-' ~> NatBasic).?).map { case lo ~ hi => (lo, hi getOrElse lo)}
def command(s: State) = Command.applyEffect(parser){
case p: Print => if(p.all) printAllSettings(s) else printSettings(s)
case v: Save => if(v.all) saveAllSettings(s) else saveSettings(s)
case c: Clear => if(c.all) clearAllSettings(s) else clearSettings(s)
case r: Remove => removeSettings(s,r.ranges)
}
}
| ornicar/xsbt | main/SessionSettings.scala | Scala | bsd-3-clause | 7,579 |
package com.twitter.finagle.thrift.transport.netty3
import org.jboss.netty.channel.{ChannelPipeline, ChannelPipelineFactory, Channels}
private[finagle] object ThriftServerFramedPipelineFactory extends ChannelPipelineFactory {
def getPipeline(): ChannelPipeline = {
val pipeline = Channels.pipeline()
pipeline.addLast("thriftFrameCodec", new ThriftFrameCodec)
pipeline.addLast("byteEncoder", new ThriftServerChannelBufferEncoder)
pipeline.addLast("byteDecoder", new ThriftChannelBufferDecoder)
pipeline
}
}
| spockz/finagle | finagle-thrift/src/main/scala/com/twitter/finagle/thrift/transport/netty3/ThriftServerFramedPipelineFactory.scala | Scala | apache-2.0 | 533 |
package dotty.tools
package dotc
import core._
import Contexts._
import SymDenotations.ClassDenotation
import Symbols._
import util.{FreshNameCreator, SourceFile, NoSource}
import util.Spans.Span
import ast.{tpd, untpd}
import tpd.{Tree, TreeTraverser}
import typer.Nullables
import transform.SymUtils._
import core.Decorators._
import config.SourceVersion
import scala.annotation.internal.sharable
class CompilationUnit protected (val source: SourceFile) {
override def toString: String = source.toString
var untpdTree: untpd.Tree = untpd.EmptyTree
var tpdTree: tpd.Tree = tpd.EmptyTree
/** Is this the compilation unit of a Java file */
def isJava: Boolean = source.file.name.endsWith(".java")
/** The source version for this unit, as determined by a language import */
var sourceVersion: Option[SourceVersion] = None
/** Pickled TASTY binaries, indexed by class. */
var pickled: Map[ClassSymbol, () => Array[Byte]] = Map()
/** The fresh name creator for the current unit.
* FIXME(#7661): This is not fine-grained enough to enable reproducible builds,
* see https://github.com/scala/scala/commit/f50ec3c866263448d803139e119b33afb04ec2bc
*/
val freshNames: FreshNameCreator = new FreshNameCreator.Default
/** Will be set to `true` if there are inline call that must be inlined after typer.
* The information is used in phase `Inlining` in order to avoid traversing trees that need no transformations.
*/
var needsInlining: Boolean = false
/** Set to `true` if inliner added anonymous mirrors that need to be completed */
var needsMirrorSupport: Boolean = false
/** Will be set to `true` if contains `Quote`.
* The information is used in phase `Staging` in order to avoid traversing trees that need no transformations.
*/
var needsStaging: Boolean = false
/** Will be set to `true` if contains `Quote` that needs to be pickled
* The information is used in phase `PickleQuotes` in order to avoid traversing trees that need no transformations.
*/
var needsQuotePickling: Boolean = false
var suspended: Boolean = false
var suspendedAtInliningPhase: Boolean = false
/** Can this compilation unit be suspended */
def isSuspendable: Boolean = true
/** Suspends the compilation unit by thowing a SuspendException
* and recording the suspended compilation unit
*/
def suspend()(using Context): Nothing =
assert(isSuspendable)
if !suspended then
if (ctx.settings.XprintSuspension.value)
report.echo(i"suspended: $this")
suspended = true
ctx.run.nn.suspendedUnits += this
if ctx.phase == Phases.inliningPhase then
suspendedAtInliningPhase = true
throw CompilationUnit.SuspendException()
private var myAssignmentSpans: Map[Int, List[Span]] | Null = null
/** A map from (name-) offsets of all local variables in this compilation unit
* that can be tracked for being not null to the list of spans of assignments
* to these variables.
*/
def assignmentSpans(using Context): Map[Int, List[Span]] =
if myAssignmentSpans == null then myAssignmentSpans = Nullables.assignmentSpans
myAssignmentSpans.nn
}
@sharable object NoCompilationUnit extends CompilationUnit(NoSource) {
override def isJava: Boolean = false
override def suspend()(using Context): Nothing =
throw CompilationUnit.SuspendException()
override def assignmentSpans(using Context): Map[Int, List[Span]] = Map.empty
}
object CompilationUnit {
class SuspendException extends Exception
/** Make a compilation unit for top class `clsd` with the contents of the `unpickled` tree */
def apply(clsd: ClassDenotation, unpickled: Tree, forceTrees: Boolean)(using Context): CompilationUnit =
val file = clsd.symbol.associatedFile.nn
apply(new SourceFile(file, Array.empty[Char]), unpickled, forceTrees)
/** Make a compilation unit, given picked bytes and unpickled tree */
def apply(source: SourceFile, unpickled: Tree, forceTrees: Boolean)(using Context): CompilationUnit = {
assert(!unpickled.isEmpty, unpickled)
val unit1 = new CompilationUnit(source)
unit1.tpdTree = unpickled
if (forceTrees) {
val force = new Force
force.traverse(unit1.tpdTree)
unit1.needsStaging = force.containsQuote
unit1.needsQuotePickling = force.containsQuote
unit1.needsInlining = force.containsInline
}
unit1
}
/** Create a compilation unit corresponding to `source`.
* If `mustExist` is true, this will fail if `source` does not exist.
*/
def apply(source: SourceFile, mustExist: Boolean = true)(using Context): CompilationUnit = {
val src =
if (!mustExist)
source
else if (source.file.isDirectory) {
report.error(s"expected file, received directory '${source.file.path}'")
NoSource
}
else if (!source.file.exists) {
report.error(s"source file not found: ${source.file.path}")
NoSource
}
else source
new CompilationUnit(src)
}
/** Force the tree to be loaded */
private class Force extends TreeTraverser {
var containsQuote = false
var containsInline = false
def traverse(tree: Tree)(using Context): Unit = {
if (tree.symbol.isQuote)
containsQuote = true
if tree.symbol.is(Flags.Inline) then
containsInline = true
traverseChildren(tree)
}
}
}
| dotty-staging/dotty | compiler/src/dotty/tools/dotc/CompilationUnit.scala | Scala | apache-2.0 | 5,393 |
class A(a: Int)(b: String)
| dotty-staging/dotty | sbt-test/source-dependencies/constructors-curried/changes/A2.scala | Scala | apache-2.0 | 27 |
package com.twitter.finagle.spdy
import java.util.concurrent.atomic.AtomicInteger
import org.jboss.netty.channel.{Channel, ChannelPipelineFactory, Channels}
import org.jboss.netty.handler.codec.http.{HttpRequest, HttpResponse}
import org.jboss.netty.handler.codec.spdy._
import com.twitter.conversions.storage._
import com.twitter.finagle._
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.transport.{ChannelTransport, Transport}
import com.twitter.util.{Closable, StorageUnit}
class AnnotateSpdyStreamId extends SimpleFilter[HttpRequest, HttpResponse] {
def apply(request: HttpRequest, service: Service[HttpRequest, HttpResponse]) = {
val streamId = request.headers.get(SpdyHttpHeaders.Names.STREAM_ID)
service(request) map { response =>
response.headers.set(SpdyHttpHeaders.Names.STREAM_ID, streamId)
response
}
}
}
class GenerateSpdyStreamId extends SimpleFilter[HttpRequest, HttpResponse] {
private[this] val currentStreamId = new AtomicInteger(1)
def apply(request: HttpRequest, service: Service[HttpRequest, HttpResponse]) = {
SpdyHttpHeaders.setStreamId(request, currentStreamId.getAndAdd(2))
service(request) map { response =>
SpdyHttpHeaders.removeStreamId(response)
response
}
}
}
case class Spdy(
_version: SpdyVersion = SpdyVersion.SPDY_3_1,
_enableHeaderCompression: Boolean = true,
_maxHeaderSize: StorageUnit = 16384.bytes,
_maxRequestSize: StorageUnit = 5.megabytes,
_maxResponseSize: StorageUnit = 5.megabytes)
extends CodecFactory[HttpRequest, HttpResponse]
{
def version(version: SpdyVersion) = copy(_version = version)
def enableHeaderCompression(enable: Boolean) = copy(_enableHeaderCompression = enable)
def maxHeaderSize(size: StorageUnit) = copy(_maxHeaderSize = size)
def maxRequestSize(size: StorageUnit) = copy(_maxRequestSize = size)
def maxResponseSize(size: StorageUnit) = copy(_maxResponseSize = size)
private[this] def spdyFrameCodec = {
val maxHeaderSizeInBytes = _maxHeaderSize.inBytes.toInt
if (_enableHeaderCompression) {
// Header blocks tend to be small so reduce the window-size of the
// compressor from 32 KB (15) to 2KB (11) to save memory.
// These settings still provide sufficient compression to fit the
// compressed header block within the TCP initial congestion window.
new SpdyFrameCodec(_version, 8192, maxHeaderSizeInBytes, 9, 11, 8)
} else {
new SpdyRawFrameCodec(_version, 8192, maxHeaderSizeInBytes)
}
}
def client = { config =>
new Codec[HttpRequest, HttpResponse] {
def pipelineFactory = new ChannelPipelineFactory {
def getPipeline() = {
val maxHeaderSizeInBytes = _maxHeaderSize.inBytes.toInt
val maxResponseSizeInBytes = _maxResponseSize.inBytes.toInt
val pipeline = Channels.pipeline()
pipeline.addLast("spdyFrameCodec", spdyFrameCodec)
pipeline.addLast("spdySessionHandler", new SpdySessionHandler(_version, false))
pipeline.addLast("spdyHttpCodec", new SpdyHttpCodec(_version, maxResponseSizeInBytes))
pipeline
}
}
override def prepareConnFactory(
underlying: ServiceFactory[HttpRequest, HttpResponse]
): ServiceFactory[HttpRequest, HttpResponse] = {
new GenerateSpdyStreamId andThen super.prepareConnFactory(underlying)
}
override def newClientTransport(ch: Channel, statsReceiver: StatsReceiver): Transport[Any, Any] =
new ChannelTransport(ch)
override def newClientDispatcher(transport: Transport[Any, Any]) =
new SpdyClientDispatcher(transport.cast[HttpRequest, HttpResponse])
}
}
def server = { config =>
new Codec[HttpRequest, HttpResponse] {
def pipelineFactory = new ChannelPipelineFactory {
def getPipeline() = {
val maxRequestSizeInBytes = _maxRequestSize.inBytes.toInt
val pipeline = Channels.pipeline()
pipeline.addLast("spdyFrameCodec", spdyFrameCodec)
pipeline.addLast("spdySessionHandler", new SpdySessionHandler(_version, true))
pipeline.addLast("spdyHttpCodec", new SpdyHttpCodec(_version, maxRequestSizeInBytes))
pipeline
}
}
override def prepareConnFactory(
underlying: ServiceFactory[HttpRequest, HttpResponse]
): ServiceFactory[HttpRequest, HttpResponse] = {
new AnnotateSpdyStreamId andThen super.prepareConnFactory(underlying)
}
override def newServerDispatcher(
transport: Transport[Any, Any],
service: Service[HttpRequest, HttpResponse]
): Closable = new SpdyServerDispatcher(
transport.cast[HttpResponse, HttpRequest], service)
}
}
}
object Spdy {
def get() = Spdy()
}
| kristofa/finagle | finagle-spdy/src/main/scala/com/twitter/finagle/spdy/Codec.scala | Scala | apache-2.0 | 4,812 |
package com.shocktrade.controlpanel.runtime.functions
import com.shocktrade.controlpanel.runtime._
import scala.concurrent.ExecutionContext
/**
* Represents a user-defined function
* @author Lawrence Daniels <lawrence.daniels@gmail.com>
*/
class UserDefinedFunction(val name: String, val params: Seq[String], ops: Evaluatable) extends Function {
override def eval(rc: RuntimeContext, scope: Scope)(implicit ec: ExecutionContext) = ops.eval(rc, scope)
} | ldaniels528/shocktrade.js | app/client/control_panel/src/main/scala/com/shocktrade/controlpanel/runtime/functions/UserDefinedFunction.scala | Scala | apache-2.0 | 465 |
package dundertext.ui.keyboard
import org.scalajs.dom
import org.scalajs.dom.KeyboardEvent
class GlobalKeyboardHandler extends Keyboard {
dom.document.onkeydown = onKeyDown _
dom.document.onkeypress = onKeyPress _
var listeners: Set[KeyboardListener] = Set.empty
def onKeyDown(e: KeyboardEvent): Unit = {
val chord = KeyChord(e.keyCode, shift = e.shiftKey, ctrl = e.ctrlKey || e.metaKey, alt = e.altKey)
for (l <- listeners) {
val handled = l.onKeyDown(chord)
if (handled) e.preventDefault()
}
}
def onKeyPress(e: KeyboardEvent): Unit = {
for (l <- listeners) {
val handled = l.onKeyPress(e.keyCode.toChar)
if (handled) e.preventDefault()
}
}
override def listen(listener: KeyboardListener) = {
listeners += listener
}
}
| dundertext/dundertext | ui/src/main/scala/dundertext/ui/keyboard/GlobalKeyboardHandler.scala | Scala | gpl-3.0 | 793 |
object FooDef {
def foo() = {}
}
| ymasory/scala-grading | src/test/resources/defs/FooDef.scala | Scala | gpl-3.0 | 35 |
package shell
import akka.actor.Actor
import akka.actor.ActorRef
import akka.actor.Props
object ShellSocketActor {
def props(out: ActorRef, shell: GTBazaarShell) = Props(new ShellSocketActor(out, shell))
}
class ShellSocketActor(out: ActorRef, shell: GTBazaarShell) extends Actor {
val shellEchoActor =
context.actorOf(BufferedReaderActor.props(shell.reader, self), "ShellEchoActor")
def receive = {
case ReaderData(string) => out ! string
case msg: String => shell.writer.write(msg + "\\n"); shell.writer.flush()
}
override def postStop : Unit = {
shell.destroy
}
} | curoli/genothesis-bazaar | app/shell/ShellSocketActor.scala | Scala | mit | 601 |
package org.nikosoft.oanda.bot.pricescalper
import akka.actor.ActorSystem
import akka.kafka.scaladsl.Consumer
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.stream.{ActorMaterializer, ActorMaterializerSettings, Supervision}
import akka.stream.scaladsl.Sink
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, StringDeserializer}
import org.joda.time.{DateTime, Duration}
import org.json4s.jackson.Serialization.read
import org.nikosoft.oanda.api.ApiModel.PricingModel.Price
import org.nikosoft.oanda.api.JsonSerializers
import org.nikosoft.oanda.bot.pricescalper.PriceScalper.PositionType.{LongPosition, PositionType, ShortPosition}
import scalaz.Scalaz._
import org.nikosoft.oanda.bot.scalping.Model._
object PriceScalper extends App {
implicit val actorSystem = ActorSystem("streamer")
implicit val materializer = ActorMaterializer()
implicit val formats = JsonSerializers.formats
val consumerSettings = ConsumerSettings(actorSystem, new ByteArrayDeserializer, new StringDeserializer)
.withBootstrapServers("localhost:9092")
.withGroupId("client")
.withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
// var prices: Vector[Price] = Vector.empty
val sink = Sink.fold[List[Price], Price](Nil)((prices, price) => {
val updatedPrices = price +: prices
calculateSlope(updatedPrices)
updatedPrices
})
var prices: Vector[Price] = Vector.empty
Consumer.committableSource(consumerSettings, Subscriptions.topics("prices"))
.map { record =>
val value = record.record.value()
val price = read[Price](value)
prices = price +: prices
// if (price.time.hourOfDay.get >= 6 && price.time.hourOfDay.get <= 14)
calculateSlope(prices)
/*
val first = prices.take(2)
if (first.length == 2 && first.head.time.getDayOfMonth != first.last.time.getDayOfMonth) {
println(price)
}
*/
// price.time.hourOfDay()
// println(price.time)
// if (price.time.isAfter(DateTime.now.minusHours(24)) && price.time.hourOfDay.get == 6) {
// } else Nil
}
.runWith(Sink.ignore)
object PositionType extends Enumeration {
type PositionType = Value
val LongPosition, ShortPosition = Value
}
def meanPrice(prices: List[Price]): BigDecimal = prices.map(price => (price.bids.head.price.value + price.asks.head.price.value) / 2).sum / prices.length
def meanPrice(price: Price): BigDecimal = (price.bids.head.price.value + price.asks.head.price.value) / 2
case class Order(creationPrice: BigDecimal, creationDate: DateTime, takeProfit: Int, positionType: PositionType, closedPrice: Option[Price] = None, closedAt: Option[DateTime] = None) {
def profit: Int = ~closedPrice.map(profit)
def profit(currentPrice: Price): Int = if (positionType == LongPosition)
(currentPrice.closeoutBid.value - creationPrice).toPips else (creationPrice - currentPrice.closeoutAsk.value).toPips
def closeOrder(currentPrice: Price): Boolean = if (profit(currentPrice) >= takeProfit) true else false
def closedAtPrice: BigDecimal = ~closedPrice.map(price => (positionType == LongPosition) ? price.closeoutBid.value | price.closeoutAsk.value)
def orderDuration(currentTime: DateTime): Long = new Duration(creationDate, currentTime).getStandardMinutes
def orderDuration: Long = ~closedAt.map(orderDuration)
}
val divergenceThreshold = 70
val distanceBetweenPrices = 5
val defaultTakeProfit = 30
val defaultStopLoss = -100
val secondsToCalculateAverage = 2
val maxMinutesToWait = 2
var openOrders: List[Order] = Nil
var closedOrders: List[Order] = Nil
def avg(price: Price): BigDecimal = (price.asks.head.price.value + price.bids.head.price.value) / 2
def pips(value: BigDecimal) = value * 100000
def spread(price: Price): Int = ((price.asks.head.price.value - price.bids.head.price.value) * 100000).toInt.abs
def calculateSlope(prices: Seq[Price]): Unit = {
val lastPrice: Price = prices.head
val fromTime = lastPrice.time.minusSeconds(secondsToCalculateAverage)
val (values, _) = prices.partition(_.time.isAfter(fromTime))
if (openOrders.isEmpty && values.length >= 2 && spread(lastPrice) <= 15) {
val diverge = pips(avg(values.head) - avg(values.last))
def positives = values.sliding(2).forall { case now +: past +: _ => pips(avg(now) - avg(past)) >= distanceBetweenPrices } && diverge >= divergenceThreshold
def negatives = values.sliding(2).forall { case now +: past +: _ => pips(avg(past) - avg(now)) >= distanceBetweenPrices } && diverge <= divergenceThreshold
val positionTypeOption = if (positives) Option(ShortPosition) else if (negatives) Option(LongPosition) else None
openOrders = positionTypeOption.map { positionType =>
val order = Order((positionType == LongPosition) ? lastPrice.asks.head.price.value | lastPrice.bids.head.price.value, lastPrice.time, defaultTakeProfit, positionType, None)
println(s"Opened $positionType order at ${lastPrice.time}")
order
}.toList ++ openOrders
}
if (openOrders.nonEmpty) {
val ordersToClose = openOrders.filter(_.closeOrder(lastPrice))
if (ordersToClose.nonEmpty) {
val justClosedOrders = ordersToClose.map(_.copy(closedPrice = Option(lastPrice), closedAt = Option(lastPrice.time)))
closedOrders = closedOrders ++ justClosedOrders
openOrders = openOrders.filterNot(ordersToClose.contains)
justClosedOrders.foreach { order =>
println(s"Take profit ${order.positionType}, open price ${order.creationPrice} at ${order.creationDate}, close price ${order.closedAtPrice} at ${order.closedAt.getOrElse("")}, profit ${order.profit}, duration ${order.orderDuration} minutes, total profit ${closedOrders.map(_.profit).sum}")
}
}
def staleOrders = openOrders.filter(_.orderDuration(lastPrice.time) >= maxMinutesToWait).toSet
def stopLoss = openOrders.filter(_.profit(lastPrice) < defaultStopLoss).toSet
val outdatedOrders: Set[Order] = staleOrders ++ stopLoss
if (outdatedOrders.nonEmpty) {
val justClosedOrders = outdatedOrders.map(_.copy(closedPrice = Option(lastPrice), closedAt = Option(lastPrice.time)))
closedOrders = closedOrders ++ justClosedOrders
openOrders = openOrders.filterNot(outdatedOrders.contains)
justClosedOrders.foreach { order =>
println(s"Stop loss ${order.positionType}, open price ${order.creationPrice} at ${order.creationDate}, close price ${order.closedAtPrice} at ${order.closedAt.getOrElse("")}, profit ${order.profit}, duration ${order.orderDuration} minutes, total profit ${closedOrders.map(_.profit).sum}")
}
}
}
}
}
| cnnickolay/forex-trader | trading-bot/src/main/scala/org/nikosoft/oanda/bot/pricescalper/PriceScalper.scala | Scala | mit | 6,797 |
package uk.gov.gds.ier.test
import akka.util.Timeout
import java.util.concurrent.TimeUnit
/** use this with test you want to debug, do not use regularly */
trait WithLongTimeout {
implicit def defaultAwaitTimeout = Timeout(10, TimeUnit.MINUTES)
}
| michaeldfallen/ier-frontend | test/uk/gov/gds/ier/test/WithLongTimeout.scala | Scala | mit | 251 |
package scredis.commands
import org.scalatest._
import org.scalatest.concurrent._
import scredis._
import scredis.protocol.requests.SortedSetRequests._
import scredis.exceptions._
import scredis.tags._
import scredis.util.LinkedHashSet
import scredis.util.TestUtils._
import scala.collection.mutable.ListBuffer
import scala.concurrent.duration._
class SortedSetCommandsSpec extends WordSpec
with GivenWhenThen
with BeforeAndAfterAll
with Matchers
with ScalaFutures {
private val client = Client()
private val SomeValue = "HelloWorld!虫àéç蟲"
override def beforeAll(): Unit = {
client.hSet("HASH", "FIELD", SomeValue).!
}
ZAdd.toString when {
"the key does not contain a sorted set" should {
"return an error" taggedAs (V120) in {
a [RedisErrorResponseException] should be thrownBy {
client.zAdd("HASH", "hello", 1).!
}
}
}
"the key does not exist" should {
"create a sorted set and add the member to it" taggedAs (V120) in {
client.zAdd("SET", SomeValue, Score.MinusInfinity).futureValue should be (true)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
(SomeValue, Score.MinusInfinity)
)
}
}
"the sorted set contains some elements" should {
"add the provided member only if it is not already contained in the " +
"sorted set" taggedAs (V120) in {
client.zAdd("SET", SomeValue, Score.PlusInfinity).futureValue should be (false)
client.zAdd("SET", "A", -1.3).futureValue should be (true)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", Score.Value(-1.3)), (SomeValue, Score.PlusInfinity)
)
client.del("SET")
}
}
}
s"${ZAdd.toString}-2.4" when {
"providing an empty map" should {
"return 0" taggedAs (V240) in {
client.zAdd("SET", Map.empty[String, Score]).futureValue should be (0)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V240) in {
a [RedisErrorResponseException] should be thrownBy {
client.zAdd("HASH", Map("hello" -> 1, "asd" -> 2)).!
}
}
}
"the key does not exist" should {
"create a sorted set and add the member to it" taggedAs (V240) in {
client.zAdd("SET", Map("A" -> 1, "B" -> 1.5)).futureValue should be (2)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", Score.Value(1)), ("B", Score.Value(1.5))
)
}
}
"the sorted set contains some elements" should {
"add the provided members only if they are not already contained " +
"in the sorted set" taggedAs (V240) in {
client.zAdd("SET", Map("A" -> 2.5, "B" -> 3.8)).futureValue should be (0)
client.zAdd("SET", Map("C" -> -1.3, "D" -> -2.6, "E" -> -2.6)).futureValue should be (3)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("D", -2.6), ("E", -2.6), ("C", -1.3), ("A", 2.5), ("B", 3.8)
)
client.del("SET")
}
}
}
ZCard.toString when {
"the key does not exist" should {
"return 0" taggedAs (V120) in {
client.zCard("SET").futureValue should be (0)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V120) in {
a [RedisErrorResponseException] should be thrownBy {
client.zCard("HASH").!
}
}
}
"the sorted set contains some elements" should {
"return the correct cardinality" taggedAs (V120) in {
client.zAdd("SET", "A", 0)
client.zAdd("SET", "B", 0)
client.zAdd("SET", "C", 0)
client.zCard("SET").futureValue should be (3)
client.del("SET")
}
}
}
ZCount.toString when {
"the key does not exist" should {
"return 0" taggedAs (V200) in {
client.zCount(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity
).futureValue should be (0)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V200) in {
a [RedisErrorResponseException] should be thrownBy {
client.zCount("HASH", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity).!
}
}
}
"the sorted set contains some elements" should {
"return the correct cardinality" taggedAs (V200) in {
client.zAdd("SET", "A", -1.5)
client.zAdd("SET", "B", 0.4)
client.zAdd("SET", "C", 1.6)
client.zAdd("SET", "D", 3)
client.zCount(
"SET", ScoreLimit.Inclusive(-3), ScoreLimit.Inclusive(-3)
).futureValue should be (0)
client.zCount(
"SET", ScoreLimit.Inclusive(4), ScoreLimit.Inclusive(4)
).futureValue should be (0)
client.zCount(
"SET", ScoreLimit.Inclusive(-1.5), ScoreLimit.Inclusive(3)
).futureValue should be (4)
client.zCount(
"SET", ScoreLimit.Exclusive(-1.5), ScoreLimit.Exclusive(3)
).futureValue should be (2)
client.zCount(
"SET", ScoreLimit.Inclusive(-1.5), ScoreLimit.Exclusive(3)
).futureValue should be (3)
client.zCount(
"SET", ScoreLimit.Exclusive(-1.5), ScoreLimit.Inclusive(3)
).futureValue should be (3)
client.zCount(
"SET", ScoreLimit.Exclusive(0), ScoreLimit.Exclusive(0.5)
).futureValue should be (1)
client.zCount(
"SET", ScoreLimit.Inclusive(0.5), ScoreLimit.PlusInfinity
).futureValue should be (2)
client.zCount(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.Inclusive(0.5)
).futureValue should be (2)
client.zCount(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity
).futureValue should be (4)
client.zCount(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.MinusInfinity
).futureValue should be (0)
client.zCount(
"SET", ScoreLimit.PlusInfinity, ScoreLimit.PlusInfinity
).futureValue should be (0)
client.del("SET")
}
}
}
ZIncrBy.toString when {
"the key does not exist" should {
"create a sorted set, add the member and increment the score starting " +
"from zero" taggedAs (V120) in {
client.zIncrBy("SET", "A", 1.5).futureValue should be (1.5)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 1.5)
)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V120) in {
a [RedisErrorResponseException] should be thrownBy {
client.zIncrBy("HASH", "A", 1.5).!
}
}
}
"the sorted set contains some elements" should {
"increment the score of the given member" taggedAs (V120) in {
client.zIncrBy("SET", "A", 1.5).futureValue should be (3.0)
client.zIncrBy("SET", "A", -0.5).futureValue should be (2.5)
client.zIncrBy("SET", "B", -0.7).futureValue should be (-0.7)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("B", -0.7), ("A", 2.5)
)
client.del("SET")
}
}
}
ZInterStore.toString when {
"the keys do not exist" should {
"do nothing" taggedAs (V200) in {
client.zInterStore("SET", Seq("SET1", "SET2")).futureValue should be (0)
client.zRangeWithScores("SET").futureValue should be (empty)
}
}
"some keys do not exist" should {
"overwrite the destination sorted set with the empty set" taggedAs (V200) in {
client.zAdd("SET", "A", 0)
client.zAdd("SET1", "A", 0)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0)
)
client.zInterStore("SET", Seq("SET1", "SET2", "SET3")).futureValue should be (0)
client.zRangeWithScores("SET").futureValue should be (empty)
}
}
"at least one of the source key does not contain a sorted set" should {
"return an error" taggedAs (V200) in {
a [RedisErrorResponseException] should be thrownBy {
client.zInterStore("SET", Seq("HASH")).!
}
a [RedisErrorResponseException] should be thrownBy {
client.zInterStore("SET", Seq("HASH", "SET2")).!
}
}
}
"the sorted sets contain some elements" should {
Given("that the aggregation function is Sum")
"compute the intersection between them, aggregate the scores with Sum and " +
"store the result in the destination" taggedAs (V200) in {
client.zAdd("SET1", "B", 1.7)
client.zAdd("SET1", "C", 2.3)
client.zAdd("SET1", "D", 4.41)
client.zAdd("SET2", "C", 5.5)
client.zAdd("SET3", "A", -1.0)
client.zAdd("SET3", "C", -2.13)
client.zAdd("SET3", "E", -5.56)
client.zInterStore("SET", Seq("SET1", "SET1")).futureValue should be (4)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 2 * 0), ("B", 2 * 1.7), ("C", 2 * 2.3), ("D", 2 * 4.41)
)
client.zInterStore("SET", Seq("SET1", "SET2")).futureValue should be (1)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", 2.3 + 5.5)
)
client.zInterStore("SET", Seq("SET1", "SET3")).futureValue should be (2)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0 + (-1.0)), ("C", 2.3 + (-2.13))
)
client.zInterStore("SET", Seq("SET1", "SET2", "SET3")).futureValue should be (1)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", 2.3 + 5.5 + (-2.13))
)
}
Given("that the aggregation function is Min")
"compute the intersection between them, aggregate the scores with Min and " +
"store the result in the destination" taggedAs (V200) in {
client.zInterStore("SET", Seq("SET1", "SET1"), Aggregate.Min).futureValue should be (4)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0), ("B", 1.7), ("C", 2.3), ("D", 4.41)
)
client.zInterStore("SET", Seq("SET1", "SET2"), Aggregate.Min).futureValue should be (1)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", math.min(2.3, 5.5))
)
client.zInterStore("SET", Seq("SET1", "SET3"), Aggregate.Min).futureValue should be (2)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", -2.13), ("A", -1)
)
client.zInterStore(
"SET", Seq("SET1", "SET2", "SET3"), Aggregate.Min
).futureValue should be (1)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", math.min(math.min(2.3, 5.5), -2.13))
)
}
Given("that the aggregation function is Max")
"compute the intersection between them, aggregate the scores with Max and " +
"store the result in the destination" taggedAs (V200) in {
client.zInterStore("SET", Seq("SET1", "SET1"), Aggregate.Max).futureValue should be (4)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0), ("B", 1.7), ("C", 2.3), ("D", 4.41)
)
client.zInterStore("SET", Seq("SET1", "SET2"), Aggregate.Max).futureValue should be (1)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", math.max(2.3, 5.5))
)
client.zInterStore("SET", Seq("SET1", "SET3"), Aggregate.Max).futureValue should be (2)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0), ("C", 2.3)
)
client.zInterStore(
"SET", Seq("SET1", "SET2", "SET3"), Aggregate.Max
).futureValue should be (1)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", math.max(math.max(2.3, 5.5), -2.13))
)
}
Given("some custom weights and that the aggregation function is Sum")
"compute the intersection between them, aggregate the scores with Sum by taking the " +
"weights into account and store the result in the destination" taggedAs (V200) in {
client.zInterStoreWeighted("SET", Map("SET1" -> 1, "SET2" -> 2)).futureValue should be (1)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", 2.3 + 2 * 5.5)
)
client.zInterStoreWeighted("SET", Map("SET1" -> 1, "SET3" -> 2)).futureValue should be (2)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0 + 2 * (-1.0)), ("C", 2.3 + 2 * (-2.13))
)
client.zInterStoreWeighted(
"SET", Map("SET1" -> 1, "SET2" -> 2, "SET3" -> -1)
).futureValue should be (1)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", 2.3 + 2 * 5.5 + (-1) * (-2.13))
)
}
Given("some custom weights and that the aggregation function is Min")
"compute the intersection between them, aggregate the scores with Min by taking the " +
"weights into account and store the result in the destination" taggedAs (V200) in {
client.zInterStoreWeighted(
"SET", Map("SET1" -> 1, "SET2" -> 2), Aggregate.Min
).futureValue should be (1)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", math.min(2.3, 2 * 5.5))
)
client.zInterStoreWeighted(
"SET", Map("SET1" -> 1, "SET3" -> 2), Aggregate.Min
).futureValue should be (2)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", -4.26), ("A", -2)
)
client.zInterStoreWeighted(
"SET", Map("SET1" -> 1, "SET2" -> 2, "SET3" -> -1), Aggregate.Min
).futureValue should be (1)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", math.min(math.min(2.3, 2 * 5.5), (-1) * (-2.13)))
)
}
Given("some custom weights and that the aggregation function is Max")
"compute the intersection between them, aggregate the scores with Max by taking the " +
"weights into account and store the result in the destination" taggedAs (V200) in {
client.zInterStoreWeighted(
"SET", Map("SET1" -> 1, "SET2" -> 2), Aggregate.Max
).futureValue should be (1)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", math.max(2.3, 2 * 5.5))
)
client.zInterStoreWeighted(
"SET", Map("SET1" -> 1, "SET3" -> 2), Aggregate.Max
).futureValue should be (2)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0), ("C", 2.3)
)
client.zInterStoreWeighted(
"SET", Map("SET1" -> 1, "SET2" -> 2, "SET3" -> -1), Aggregate.Max
).futureValue should be (1)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", math.max(math.max(2.3, 2 * 5.5), (-1) * (-2.13)))
)
client.del("SET", "SET1", "SET2", "SET3")
}
}
}
ZLexCount.toString when {
"the key does not exist" should {
"return 0" taggedAs (V289) in {
client.zLexCount(
"SET", LexicalScoreLimit.MinusInfinity, LexicalScoreLimit.PlusInfinity
).futureValue should be (0)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V289) in {
a [RedisErrorResponseException] should be thrownBy {
client.zLexCount(
"HASH", LexicalScoreLimit.MinusInfinity, LexicalScoreLimit.PlusInfinity
).!
}
}
}
"the sorted set contains some elements" should {
"return the correct cardinality" taggedAs (V289) in {
client.zAdd("SET", "a", 0)
client.zAdd("SET", "b", 0)
client.zAdd("SET", "c", 0)
client.zAdd("SET", "d", 0)
client.zAdd("SET", "e", 0)
client.zAdd("SET", "f", 0)
client.zLexCount(
"SET", LexicalScoreLimit.Inclusive("1"), LexicalScoreLimit.Inclusive("1")
).futureValue should be (0)
client.zLexCount(
"SET", LexicalScoreLimit.Inclusive("a"), LexicalScoreLimit.Inclusive("a")
).futureValue should be (1)
client.zLexCount(
"SET", LexicalScoreLimit.Inclusive("a"), LexicalScoreLimit.Inclusive("c")
).futureValue should be (3)
client.zLexCount(
"SET", LexicalScoreLimit.Exclusive("a"), LexicalScoreLimit.Exclusive("c")
).futureValue should be (1)
client.zLexCount(
"SET", LexicalScoreLimit.Inclusive("a"), LexicalScoreLimit.Exclusive("f")
).futureValue should be (5)
client.zLexCount(
"SET", LexicalScoreLimit.Exclusive("a"), LexicalScoreLimit.Exclusive("f")
).futureValue should be (4)
client.zLexCount(
"SET", LexicalScoreLimit.Inclusive("d"), LexicalScoreLimit.PlusInfinity
).futureValue should be (3)
client.zLexCount(
"SET", LexicalScoreLimit.MinusInfinity, LexicalScoreLimit.Inclusive("c")
).futureValue should be (3)
client.zLexCount(
"SET", LexicalScoreLimit.MinusInfinity, LexicalScoreLimit.PlusInfinity
).futureValue should be (6)
client.zLexCount(
"SET", LexicalScoreLimit.MinusInfinity, LexicalScoreLimit.MinusInfinity
).futureValue should be (0)
client.zLexCount(
"SET", LexicalScoreLimit.PlusInfinity, LexicalScoreLimit.PlusInfinity
).futureValue should be (0)
client.del("SET")
}
}
}
ZRange.toString when {
"the key does not exist" should {
"return an empty set" taggedAs (V120) in {
client.zRange("SET").futureValue should be (empty)
client.zRangeWithScores("SET").futureValue should be (empty)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V120) in {
a [RedisErrorResponseException] should be thrownBy {
client.zRange("HASH").!
}
a [RedisErrorResponseException] should be thrownBy {
client.zRangeWithScores("HASH").!
}
}
}
"the sorted set contains some elements" should {
"return the ordered elements in the specified range" taggedAs (V120) in {
client.zAdd("SET", "A", -5)
client.zAdd("SET", "B", -1)
client.zAdd("SET", "C", 0)
client.zAdd("SET", "D", 3)
client.zRange("SET", 0, 0).futureValue should contain theSameElementsInOrderAs List("A")
client.zRangeWithScores(
"SET", 0, 0
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", -5)
)
client.zRange("SET", 3, 3).futureValue should contain theSameElementsInOrderAs List("D")
client.zRangeWithScores(
"SET", 3, 3
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("D", 3)
)
client.zRange("SET", 1, 2).futureValue should contain theSameElementsInOrderAs List(
"B", "C"
)
client.zRangeWithScores(
"SET", 1, 2
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("B", -1), ("C", 0)
)
client.zRange(
"SET", 0, 3
).futureValue should contain theSameElementsInOrderAs List(
"A", "B", "C", "D"
)
client.zRangeWithScores(
"SET", 0, 3
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", -5), ("B", -1), ("C", 0), ("D", 3)
)
client.zRange("SET", 0, -1).futureValue should contain theSameElementsInOrderAs List(
"A", "B", "C", "D"
)
client.zRangeWithScores(
"SET", 0, -1
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", -5), ("B", -1), ("C", 0), ("D", 3)
)
client.zRange("SET", 0, -2).futureValue should contain theSameElementsInOrderAs List(
"A", "B", "C"
)
client.zRangeWithScores(
"SET", 0, -2
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", -5), ("B", -1), ("C", 0)
)
client.zRange("SET", -3, -1).futureValue should contain theSameElementsInOrderAs List(
"B", "C", "D"
)
client.zRangeWithScores(
"SET", -3, -1
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("B", -1), ("C", 0), ("D", 3)
)
client.zRange("SET").futureValue should contain theSameElementsInOrderAs List(
"A", "B", "C", "D"
)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", -5), ("B", -1), ("C", 0), ("D", 3)
)
client.del("SET")
}
}
}
ZRangeByLex.toString when {
"the key does not exist" should {
"return None" taggedAs (V289) in {
client.zRangeByLex(
"SET", LexicalScoreLimit.MinusInfinity, LexicalScoreLimit.PlusInfinity
).futureValue should be (empty)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V289) in {
a [RedisErrorResponseException] should be thrownBy {
client.zRangeByLex(
"HASH", LexicalScoreLimit.MinusInfinity, LexicalScoreLimit.PlusInfinity
).!
}
}
}
"the sorted set contains some elements" should {
Given("that no limit is provided")
"return the ordered elements in the specified score range" taggedAs (V289) in {
client.zAdd("SET", "A", 0)
client.zAdd("SET", "B", 0)
client.zAdd("SET", "C", 0)
client.zAdd("SET", "D", 0)
client.zRangeByLex(
"SET", LexicalScoreLimit.MinusInfinity, LexicalScoreLimit.PlusInfinity
).futureValue should contain theSameElementsInOrderAs List("A", "B", "C", "D")
client.zRangeByLex(
"SET", LexicalScoreLimit.MinusInfinity, LexicalScoreLimit.Inclusive("C")
).futureValue should contain theSameElementsInOrderAs List("A", "B", "C")
client.zRangeByLex(
"SET", LexicalScoreLimit.MinusInfinity, LexicalScoreLimit.Exclusive("C")
).futureValue should contain theSameElementsInOrderAs List("A", "B")
client.zRangeByLex(
"SET", LexicalScoreLimit.Inclusive("B"), LexicalScoreLimit.PlusInfinity
).futureValue should contain theSameElementsInOrderAs List("B", "C", "D")
client.zRangeByLex(
"SET", LexicalScoreLimit.Exclusive("B"), LexicalScoreLimit.PlusInfinity
).futureValue should contain theSameElementsInOrderAs List("C", "D")
client.zRangeByLex(
"SET", LexicalScoreLimit.Inclusive("B"), LexicalScoreLimit.Inclusive("C")
).futureValue should contain theSameElementsInOrderAs List("B", "C")
client.zRangeByLex(
"SET", LexicalScoreLimit.Exclusive("B"), LexicalScoreLimit.Inclusive("C")
).futureValue should contain theSameElementsInOrderAs List("C")
client.zRangeByLex(
"SET", LexicalScoreLimit.Exclusive("A"), LexicalScoreLimit.Exclusive("B")
).futureValue should be (empty)
}
Given("that some limit is provided")
"return the ordered elements in the specified score range within " +
"provided limit" taggedAs (V289) in {
client.zRangeByLex(
"SET", LexicalScoreLimit.MinusInfinity, LexicalScoreLimit.PlusInfinity, Some((0, 3))
).futureValue should contain theSameElementsInOrderAs List(
"A", "B", "C"
)
client.zRangeByLex(
"SET", LexicalScoreLimit.MinusInfinity, LexicalScoreLimit.PlusInfinity, Some((1, 4))
).futureValue should contain theSameElementsInOrderAs List(
"B", "C", "D"
)
client.zRangeByLex(
"SET", LexicalScoreLimit.MinusInfinity, LexicalScoreLimit.PlusInfinity, Some((0, 0))
).futureValue should be (empty)
client.del("SET")
}
}
}
ZRangeByScore.toString when {
"the key does not exist" should {
"return None" taggedAs (V220) in {
client.zRangeByScore(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity
).futureValue should be (empty)
client.zRangeByScoreWithScores(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity
).futureValue should be (empty)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V220) in {
a [RedisErrorResponseException] should be thrownBy {
client.zRangeByScore(
"HASH", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity
).!
}
a [RedisErrorResponseException] should be thrownBy {
client.zRangeByScoreWithScores(
"HASH", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity
).!
}
}
}
"the sorted set contains some elements" should {
Given("that no limit is provided")
"return the ordered elements in the specified score range" taggedAs (V220) in {
client.zAdd("SET", "A", -5)
client.zAdd("SET", "B", -1)
client.zAdd("SET", "C", 0)
client.zAdd("SET", "D", 3)
client.zRangeByScore(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity
).futureValue should contain theSameElementsInOrderAs List("A", "B", "C", "D")
client.zRangeByScoreWithScores(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", -5), ("B", -1), ("C", 0), ("D", 3)
)
client.zRangeByScore(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.Inclusive(0)
).futureValue should contain theSameElementsInOrderAs List("A", "B", "C")
client.zRangeByScoreWithScores(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.Inclusive(0)
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", -5), ("B", -1), ("C", 0)
)
client.zRangeByScore(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.Exclusive(0)
).futureValue should contain theSameElementsInOrderAs List("A", "B")
client.zRangeByScoreWithScores(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.Exclusive(0)
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", -5), ("B", -1)
)
client.zRangeByScore(
"SET", ScoreLimit.Inclusive(-1), ScoreLimit.PlusInfinity
).futureValue should contain theSameElementsInOrderAs List("B", "C", "D")
client.zRangeByScoreWithScores(
"SET", ScoreLimit.Inclusive(-1), ScoreLimit.PlusInfinity
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("B", -1), ("C", 0), ("D", 3)
)
client.zRangeByScore(
"SET", ScoreLimit.Exclusive(-1), ScoreLimit.PlusInfinity
).futureValue should contain theSameElementsInOrderAs List("C", "D")
client.zRangeByScoreWithScores(
"SET", ScoreLimit.Exclusive(-1), ScoreLimit.PlusInfinity
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", 0), ("D", 3)
)
client.zRangeByScore(
"SET", ScoreLimit.Inclusive(-1), ScoreLimit.Inclusive(0)
).futureValue should contain theSameElementsInOrderAs List("B", "C")
client.zRangeByScoreWithScores(
"SET", ScoreLimit.Inclusive(-1), ScoreLimit.Inclusive(0)
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("B", -1), ("C", 0)
)
client.zRangeByScore(
"SET", ScoreLimit.Exclusive(-1), ScoreLimit.Inclusive(0)
).futureValue should contain theSameElementsInOrderAs List("C")
client.zRangeByScoreWithScores(
"SET", ScoreLimit.Exclusive(-1), ScoreLimit.Inclusive(0)
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", 0)
)
client.zRangeByScore(
"SET", ScoreLimit.Exclusive(-1), ScoreLimit.Exclusive(0)
).futureValue should be (empty)
client.zRangeByScoreWithScores(
"SET", ScoreLimit.Exclusive(-1), ScoreLimit.Exclusive(0)
).futureValue should be (empty)
}
Given("that some limit is provided")
"return the ordered elements in the specified score range within " +
"provided limit" taggedAs (V220) in {
client.zRangeByScore(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity, Some((0, 3))
).futureValue should contain theSameElementsInOrderAs List(
"A", "B", "C"
)
client.zRangeByScoreWithScores(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity, Some((0, 3))
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", -5), ("B", -1), ("C", 0)
)
client.zRangeByScore(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity, Some((1, 4))
).futureValue should contain theSameElementsInOrderAs List(
"B", "C", "D"
)
client.zRangeByScoreWithScores(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity, Some((1, 4))
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("B", -1), ("C", 0), ("D", 3)
)
client.zRangeByScore(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity, Some((0, 0))
).futureValue should be (empty)
client.zRangeByScoreWithScores(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity, Some((0, 0))
).futureValue should be (empty)
client.del("SET")
}
}
}
ZRank.toString when {
"the key does not exist" should {
"return None" taggedAs (V200) in {
client.zRank("SET", "A").futureValue should be (empty)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V200) in {
a [RedisErrorResponseException] should be thrownBy {
client.zRank("HASH", "hello").!
}
}
}
"the sorted set does not contain the member" should {
"return None" taggedAs (V200) in {
client.zAdd("SET", "A", 1)
client.zAdd("SET", "C", 3)
client.zRank("SET", "B").futureValue should be (empty)
}
}
"the sorted set contains the element" should {
"the correct index" taggedAs (V200) in {
client.zAdd("SET", "B", 2)
client.zRank("SET", "C").futureValue should contain (2)
client.del("SET")
}
}
}
ZRem.toString when {
"the key does not exist" should {
"return 0" taggedAs (V120) in {
client.zRem("SET", "A").futureValue should be (0)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V120) in {
a [RedisErrorResponseException] should be thrownBy {
client.zRem("HASH", "A").!
}
}
}
"the sorted set does not contain the element" should {
"do nothing and return 0" taggedAs (V120) in {
client.zAdd("SET", "A", 1)
client.zAdd("SET", "C", 3)
client.zRem("SET", "B").futureValue should be (0)
client.zRange("SET").futureValue should contain theSameElementsInOrderAs List("A", "C")
}
}
"the sorted set contains the element" should {
"remove the element" taggedAs (V120) in {
client.zAdd("SET", "B", 2)
client.zRem("SET", "B").futureValue should be (1)
client.zRange("SET").futureValue should contain theSameElementsInOrderAs List("A", "C")
client.del("SET")
}
}
}
s"${ZRem.toString}-2.4" when {
"the key does not exist" should {
"return 0" taggedAs (V240) in {
client.zRem("SET", "A", "B").futureValue should be (0)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V240) in {
a [RedisErrorResponseException] should be thrownBy {
client.zRem("HASH", "A", "B").!
}
}
}
"the sorted set does not contain the element" should {
"do nothing and return 0" taggedAs (V240) in {
client.zAdd("SET", "A", 1)
client.zAdd("SET", "C", 3)
client.zRem("SET", "B", "D", "E").futureValue should be (0)
client.zRange("SET").futureValue should contain theSameElementsInOrderAs List("A", "C")
}
}
"the sorted set contains some elements" should {
"remove the elements" taggedAs (V240) in {
client.zAdd("SET", "B", 2)
client.zAdd("SET", "D", 4)
client.zRem("SET", "B", "D", "E").futureValue should be (2)
client.zRange("SET").futureValue should contain theSameElementsInOrderAs List("A", "C")
client.del("SET")
}
}
}
ZRemRangeByLex.toString when {
"the key does not exist" should {
"return 0" taggedAs (V289) in {
client.zRemRangeByLex(
"SET", LexicalScoreLimit.MinusInfinity, LexicalScoreLimit.PlusInfinity
).futureValue should be (0)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V289) in {
a [RedisErrorResponseException] should be thrownBy {
client.zRemRangeByLex(
"HASH", LexicalScoreLimit.MinusInfinity, LexicalScoreLimit.PlusInfinity
).!
}
}
}
"the sorted set contains some element" should {
"remove the elements in the specified range" taggedAs (V289) in {
client.zAdd("SET", "A", 1)
client.zAdd("SET", "B", 2)
client.zAdd("SET", "C", 3)
client.zAdd("SET", "D", 4)
client.zAdd("SET", "E", 5)
client.zRemRangeByLex(
"SET", LexicalScoreLimit.Exclusive("E"), LexicalScoreLimit.PlusInfinity
).futureValue should be (0)
client.zRange("SET").futureValue should contain theSameElementsInOrderAs List(
"A", "B", "C", "D", "E"
)
client.zRemRangeByLex(
"SET", LexicalScoreLimit.Inclusive("A"), LexicalScoreLimit.Inclusive("A")
).futureValue should be (1)
client.zRange("SET").futureValue should contain theSameElementsInOrderAs List(
"B", "C", "D", "E"
)
client.zRemRangeByLex(
"SET", LexicalScoreLimit.MinusInfinity, LexicalScoreLimit.Exclusive("D")
).futureValue should be (2)
client.zRange("SET").futureValue should contain theSameElementsInOrderAs List(
"D", "E"
)
client.zRemRangeByLex(
"SET", LexicalScoreLimit.MinusInfinity, LexicalScoreLimit.PlusInfinity
).futureValue should be (2)
client.zRange("SET").futureValue should be (empty)
}
}
}
ZRemRangeByRank.toString when {
"the key does not exist" should {
"return 0" taggedAs (V200) in {
client.zRemRangeByRank("SET", 0, -1).futureValue should be (0)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V200) in {
a [RedisErrorResponseException] should be thrownBy {
client.zRemRangeByRank("HASH", 0, -1).!
}
}
}
"the sorted set contains some element" should {
"remove the elements in the specified range" taggedAs (V200) in {
client.zAdd("SET", "A", 1)
client.zAdd("SET", "B", 2)
client.zAdd("SET", "C", 3)
client.zAdd("SET", "D", 4)
client.zAdd("SET", "E", 5)
client.zRemRangeByRank("SET", 5, 6).futureValue should be (0)
client.zRange("SET").futureValue should contain theSameElementsInOrderAs List(
"A", "B", "C", "D", "E"
)
client.zRemRangeByRank("SET", 0, 0).futureValue should be (1)
client.zRange("SET").futureValue should contain theSameElementsInOrderAs List(
"B", "C", "D", "E"
)
client.zRemRangeByRank("SET", 1, 2).futureValue should be (2)
client.zRange("SET").futureValue should contain theSameElementsInOrderAs List(
"B", "E"
)
client.zRemRangeByRank("SET", 0, -1).futureValue should be (2)
client.zRange("SET").futureValue should be (empty)
}
}
}
ZRemRangeByScore.toString when {
"the key does not exist" should {
"return 0" taggedAs (V120) in {
client.zRemRangeByScore(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity
).futureValue should be (0)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V120) in {
a [RedisErrorResponseException] should be thrownBy {
client.zRemRangeByScore(
"HASH", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity
).!
}
}
}
"the sorted set contains some element" should {
"remove the elements in the specified score range" taggedAs (V120) in {
client.zAdd("SET", "A", 1)
client.zAdd("SET", "B", 2)
client.zAdd("SET", "C", 3)
client.zAdd("SET", "D", 4)
client.zAdd("SET", "E", 5)
client.zRemRangeByScore(
"SET", ScoreLimit.Exclusive(5), ScoreLimit.Inclusive(7)
).futureValue should be (0)
client.zRange("SET").futureValue should contain theSameElementsInOrderAs List(
"A", "B", "C", "D", "E"
)
client.zRemRangeByScore(
"SET", ScoreLimit.Inclusive(1), ScoreLimit.Inclusive(1)
).futureValue should be (1)
client.zRange("SET").futureValue should contain theSameElementsInOrderAs List(
"B", "C", "D", "E"
)
client.zRemRangeByScore(
"SET", ScoreLimit.Exclusive(2), ScoreLimit.Exclusive(5)
).futureValue should be (2)
client.zRange("SET").futureValue should contain theSameElementsInOrderAs List(
"B", "E"
)
client.zRemRangeByScore(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity
).futureValue should be (2)
client.zRange("SET").futureValue should be (empty)
}
}
}
ZRevRange.toString when {
"the key does not exist" should {
"return None" taggedAs (V120) in {
client.zRevRange("SET").futureValue should be (empty)
client.zRevRangeWithScores("SET").futureValue should be (empty)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V120) in {
a [RedisErrorResponseException] should be thrownBy {
client.zRevRange("HASH").!
}
a [RedisErrorResponseException] should be thrownBy {
client.zRevRangeWithScores("HASH").!
}
}
}
"the sorted set contains some elements" should {
"return the ordered elements in the specified range" taggedAs (V120) in {
client.zAdd("SET", "D", 3)
client.zAdd("SET", "C", 0)
client.zAdd("SET", "B", -1)
client.zAdd("SET", "A", -5)
client.zRevRange("SET", 0, 0).futureValue should contain theSameElementsInOrderAs List("D")
client.zRevRangeWithScores(
"SET", 0, 0
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](("D", 3))
client.zRevRange("SET", 3, 3).futureValue should contain theSameElementsInOrderAs List("A")
client.zRevRangeWithScores(
"SET", 3, 3
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](("A", -5))
client.zRevRange("SET", 1, 2).futureValue should contain theSameElementsInOrderAs List(
"C", "B"
)
client.zRevRangeWithScores(
"SET", 1, 2
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", 0), ("B", -1)
)
client.zRevRange("SET", 0, 3).futureValue should contain theSameElementsInOrderAs List(
"D", "C", "B", "A"
)
client.zRevRangeWithScores(
"SET", 0, 3
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("D", 3), ("C", 0), ("B", -1), ("A", -5)
)
client.zRevRange("SET", 0, -1).futureValue should contain theSameElementsInOrderAs List(
"D", "C", "B", "A"
)
client.zRevRangeWithScores(
"SET", 0, -1
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("D", 3), ("C", 0), ("B", -1), ("A", -5)
)
client.zRevRange("SET", 0, -2).futureValue should contain theSameElementsInOrderAs List(
"D", "C", "B"
)
client.zRevRangeWithScores(
"SET", 0, -2
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("D", 3), ("C", 0), ("B", -1)
)
client.zRevRange("SET", -3, -1).futureValue should contain theSameElementsInOrderAs List(
"C", "B", "A"
)
client.zRevRangeWithScores(
"SET", -3, -1
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", 0), ("B", -1), ("A", -5)
)
client.zRevRange("SET").futureValue should contain theSameElementsInOrderAs List(
"D", "C", "B", "A"
)
client.zRevRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("D", 3), ("C", 0), ("B", -1), ("A", -5)
)
client.del("SET")
}
}
}
ZRevRangeByScore.toString when {
"the key does not exist" should {
"return None" taggedAs (V220) in {
client.zRevRangeByScore(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity
).futureValue should be (empty)
client.zRevRangeByScoreWithScores(
"SET", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity
).futureValue should be (empty)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V220) in {
a [RedisErrorResponseException] should be thrownBy {
client.zRevRangeByScore(
"HASH", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity
).!
}
a [RedisErrorResponseException] should be thrownBy {
client.zRevRangeByScoreWithScores(
"HASH", ScoreLimit.MinusInfinity, ScoreLimit.PlusInfinity
).!
}
}
}
"the sorted set contains some elements" should {
Given("that no limit is provided")
"return the ordered elements in the specified score range" taggedAs (V220) in {
client.zAdd("SET", "D", 3)
client.zAdd("SET", "C", 0)
client.zAdd("SET", "B", -1)
client.zAdd("SET", "A", -5)
client.zRevRangeByScore(
"SET", min = ScoreLimit.MinusInfinity, max = ScoreLimit.PlusInfinity
).futureValue.reverse should contain theSameElementsInOrderAs List(
"A", "B", "C", "D"
)
client.zRevRangeByScoreWithScores(
"SET", min = ScoreLimit.MinusInfinity, max = ScoreLimit.PlusInfinity
).futureValue.reverse should contain theSameElementsInOrderAs List[(String, Score)](
("A", -5), ("B", -1), ("C", 0), ("D", 3)
)
client.zRevRangeByScore(
"SET", max = ScoreLimit.Inclusive(0), min = ScoreLimit.MinusInfinity
).futureValue.reverse should contain theSameElementsInOrderAs List(
"A", "B", "C"
)
client.zRevRangeByScoreWithScores(
"SET", max = ScoreLimit.Inclusive(0), min = ScoreLimit.MinusInfinity
).futureValue.reverse should contain theSameElementsInOrderAs List[(String, Score)](
("A", -5), ("B", -1), ("C", 0)
)
client.zRevRangeByScore(
"SET", max = ScoreLimit.Exclusive(0), min = ScoreLimit.MinusInfinity
).futureValue.reverse should contain theSameElementsInOrderAs List(
"A", "B"
)
client.zRevRangeByScoreWithScores(
"SET", max = ScoreLimit.Exclusive(0), min = ScoreLimit.MinusInfinity
).futureValue.reverse should contain theSameElementsInOrderAs List[(String, Score)](
("A", -5), ("B", -1)
)
client.zRevRangeByScore(
"SET", max = ScoreLimit.PlusInfinity, min = ScoreLimit.Inclusive(-1)
).futureValue.reverse should contain theSameElementsInOrderAs List(
"B", "C", "D"
)
client.zRevRangeByScoreWithScores(
"SET", max = ScoreLimit.PlusInfinity, min = ScoreLimit.Inclusive(-1)
).futureValue.reverse should contain theSameElementsInOrderAs List[(String, Score)](
("B", -1), ("C", 0), ("D", 3)
)
client.zRevRangeByScore(
"SET", max = ScoreLimit.PlusInfinity, min = ScoreLimit.Exclusive(-1)
).futureValue.reverse should contain theSameElementsInOrderAs List(
"C", "D"
)
client.zRevRangeByScoreWithScores(
"SET", max = ScoreLimit.PlusInfinity, min = ScoreLimit.Exclusive(-1)
).futureValue.reverse should contain theSameElementsInOrderAs List[(String, Score)](
("C", 0), ("D", 3)
)
client.zRevRangeByScore(
"SET", max = ScoreLimit.Inclusive(0), min = ScoreLimit.Inclusive(-1)
).futureValue.reverse should contain theSameElementsInOrderAs List(
"B", "C"
)
client.zRevRangeByScoreWithScores(
"SET", max = ScoreLimit.Inclusive(0), min = ScoreLimit.Inclusive(-1)
).futureValue.reverse should contain theSameElementsInOrderAs List[(String, Score)](
("B", -1), ("C", 0)
)
client.zRevRangeByScore(
"SET", max = ScoreLimit.Inclusive(0), min = ScoreLimit.Exclusive(-1)
).futureValue.reverse should contain theSameElementsInOrderAs List("C")
client.zRevRangeByScoreWithScores(
"SET", max = ScoreLimit.Inclusive(0), min = ScoreLimit.Exclusive(-1)
).futureValue.reverse should contain theSameElementsInOrderAs List[(String, Score)](
("C", 0)
)
client.zRevRangeByScore(
"SET", max = ScoreLimit.Exclusive(0), min = ScoreLimit.Exclusive(-1)
).futureValue should be (empty)
client.zRevRangeByScoreWithScores(
"SET", max = ScoreLimit.Exclusive(0), min = ScoreLimit.Exclusive(-1)
).futureValue should be (empty)
}
Given("that some limit is provided")
"return the ordered elements in the specified score range within " +
"provided limit" taggedAs (V220) in {
client.zRevRangeByScore(
"SET",
max = ScoreLimit.PlusInfinity,
min = ScoreLimit.MinusInfinity,
limitOpt = Some((0, 3))
).futureValue should contain theSameElementsInOrderAs List(
"D", "C", "B"
)
client.zRevRangeByScoreWithScores(
"SET",
max = ScoreLimit.PlusInfinity,
min = ScoreLimit.MinusInfinity,
limitOpt = Some((0, 3))
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("D", 3), ("C", 0), ("B", -1)
)
client.zRevRangeByScore(
"SET",
max = ScoreLimit.PlusInfinity,
min = ScoreLimit.MinusInfinity,
limitOpt = Some((1, 4))
).futureValue should contain theSameElementsInOrderAs List(
"C", "B", "A"
)
client.zRevRangeByScoreWithScores(
"SET",
max = ScoreLimit.PlusInfinity,
min = ScoreLimit.MinusInfinity,
limitOpt = Some((1, 4))
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("C", 0), ("B", -1), ("A", -5)
)
client.zRevRangeByScore(
"SET",
max = ScoreLimit.PlusInfinity,
min = ScoreLimit.MinusInfinity,
limitOpt = Some((0, 0))
).futureValue should be (empty)
client.zRevRangeByScoreWithScores(
"SET",
max = ScoreLimit.PlusInfinity,
min = ScoreLimit.MinusInfinity,
limitOpt = Some((0, 0))
).futureValue should be (empty)
client.del("SET")
}
}
}
ZRevRank.toString when {
"the key does not exist" should {
"return None" taggedAs (V200) in {
client.zRevRank("SET", "A").futureValue should be (empty)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V200) in {
a [RedisErrorResponseException] should be thrownBy {
client.zRevRank("HASH", "hello").!
}
}
}
"the sorted set does not contain the member" should {
"return None" taggedAs (V200) in {
client.zAdd("SET", "A", 1)
client.zAdd("SET", "C", 3)
client.zRevRank("SET", "B").futureValue should be (empty)
}
}
"the sorted set contains the element" should {
"the correct index" taggedAs (V200) in {
client.zAdd("SET", "B", 2)
client.zRevRank("SET", "C").futureValue should be (Some(0))
client.del("SET")
}
}
}
ZScan.toString when {
"the key does not exist" should {
"return an empty set" taggedAs (V280) in {
val (next, set) = client.zScan[String]("NONEXISTENTKEY", 0).!
next should be (0)
set should be (empty)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V280) in {
a [RedisErrorResponseException] should be thrownBy {
client.zScan[String]("HASH", 0).!
}
}
}
"the sorted set contains 5 elements" should {
"return all elements" taggedAs (V280) in {
for (i <- 1 to 5) {
client.zAdd("SSET", "value" + i, i)
}
val (next, set) = client.zScan[String]("SSET", 0).!
next should be (0)
set should contain theSameElementsInOrderAs List[(String, Score)](
("value1", 1.0),
("value2", 2.0),
("value3", 3.0),
("value4", 4.0),
("value5", 5.0)
)
for (i <- 1 to 10) {
client.zAdd("SSET", "foo" + i, 5 + i)
}
}
}
"the sorted set contains 15 elements" should {
val full = ListBuffer[(String, Score)]()
for (i <- 1 to 5) {
full += (("value" + i, i))
}
for (i <- 1 to 10) {
full += (("foo" + i, 5 + i))
}
val fullList = full.toList
Given("that no pattern is set")
"return all elements" taggedAs (V280) in {
val elements = ListBuffer[(String, Score)]()
var cursor = 0L
do {
val (next, set) = client.zScan[String]("SSET", cursor).!
elements ++= set
cursor = next
} while (cursor > 0)
elements.toList should contain theSameElementsInOrderAs fullList
}
Given("that a pattern is set")
"return all matching elements" taggedAs (V280) in {
val elements = ListBuffer[(String, Score)]()
var cursor = 0L
do {
val (next, set) = client.zScan[String]("SSET", cursor, matchOpt = Some("foo*")).!
elements ++= set
cursor = next
} while (cursor > 0)
elements.toList should contain theSameElementsInOrderAs fullList.filter {
case (value, score) => value.startsWith("foo")
}
}
Given("that a pattern is set and count is set to 100")
"return all matching elements in one iteration" taggedAs (V280) in {
val elements = ListBuffer[(String, Score)]()
var cursor = 0L
do {
val (next, set) = client.zScan[String](
"SSET", cursor, matchOpt = Some("foo*"), countOpt = Some(100)
).!
set.size should be (10)
elements ++= set
cursor = next
} while (cursor > 0)
elements.toList should contain theSameElementsInOrderAs fullList.filter {
case (value, score) => value.startsWith("foo")
}
}
}
}
ZScore.toString when {
"the key does not exist" should {
"return None" taggedAs (V120) in {
client.zScore("SET", "A").futureValue should be (empty)
}
}
"the key does not contain a sorted set" should {
"return an error" taggedAs (V120) in {
a [RedisErrorResponseException] should be thrownBy {
client.zScore("HASH", "A").!
}
}
}
"the sorted set does not contain the member" should {
"return None" taggedAs (V120) in {
client.zAdd("SET", "A", 1)
client.zAdd("SET", "C", 3)
client.zAdd("SET", "D", Score.MinusInfinity)
client.zAdd("SET", "E", Score.PlusInfinity)
client.zScore("SET", "B").futureValue should be (empty)
}
}
"the sorted set contains the element" should {
"the correct score" taggedAs (V120) in {
client.zAdd("SET", "B", 2)
client.zScore("SET", "A").futureValue should contain (Score.Value(1))
client.zScore("SET", "B").futureValue should contain (Score.Value(2.0))
client.zScore("SET", "C").futureValue should contain (Score.Value(3.0))
client.zScore("SET", "D").futureValue should contain (Score.MinusInfinity)
client.zScore("SET", "E").futureValue should contain (Score.PlusInfinity)
client.del("SET")
}
}
}
ZUnionStore.toString when {
"the keys do not exist" should {
"do nothing" taggedAs (V200) in {
client.zUnionStore("SET", Seq("SET1", "SET2")).futureValue should be (0)
client.zRangeWithScores("SET").futureValue should be (empty)
}
}
"some keys do not exist" should {
"overwrite the destination sorted set with the empty set" taggedAs (V200) in {
client.zAdd("SET", "A", 0)
client.zAdd("SET1", "A", 0)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0)
)
client.zUnionStore("SET", Seq("SET1", "SET2", "SET3")).futureValue should be (1)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0)
)
}
}
"at least one of the source key does not contain a sorted set" should {
"return an error" taggedAs (V200) in {
a [RedisErrorResponseException] should be thrownBy {
client.zUnionStore("SET", Seq("HASH")).!
}
a [RedisErrorResponseException] should be thrownBy {
client.zUnionStore("SET", Seq("HASH", "SET2")).!
}
a [RedisErrorResponseException] should be thrownBy {
client.zUnionStore("SET", Seq("SET1", "HASH")).!
}
}
}
"the sorted sets contain some elements" should {
Given("that the aggregation function is Sum")
"compute the union between them, aggregate the scores with Sum and " +
"store the result in the destination" taggedAs (V200) in {
client.zAdd("SET1", "B", 1.7)
client.zAdd("SET1", "C", 2.3)
client.zAdd("SET1", "D", 4.41)
client.zAdd("SET2", "C", 5.5)
client.zAdd("SET3", "A", -1.0)
client.zAdd("SET3", "C", -2.13)
client.zAdd("SET3", "E", -5.56)
client.zUnionStore("SET", Seq("SET1", "SET1")).futureValue should be (4)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 2 * 0), ("B", 2 * 1.7), ("C", 2 * 2.3), ("D", 2 * 4.41)
)
client.zUnionStore("SET", Seq("SET1", "SET2")).futureValue should be (4)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0), ("B", 1.7), ("D", 4.41), ("C", 2.3 + 5.5)
)
client.zUnionStore("SET", Seq("SET1", "SET3")).futureValue should be (5)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("E", -5.56), ("A", -1), ("C", 2.3 - 2.13), ("B", 1.7), ("D", 4.41)
)
client.zUnionStore("SET", Seq("SET1", "SET2", "SET3")).futureValue should be (5)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("E", -5.56), ("A", -1), ("B", 1.7), ("D", 4.41), ("C", 2.3 + 5.5 - 2.13)
)
}
Given("that the aggregation function is Min")
"compute the union between them, aggregate the scores with Min and " +
"store the result in the destination" taggedAs (V200) in {
client.zUnionStore("SET", Seq("SET1", "SET1"), Aggregate.Min).futureValue should be (4)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0), ("B", 1.7), ("C", 2.3), ("D", 4.41)
)
client.zUnionStore("SET", Seq("SET1", "SET2"), Aggregate.Min).futureValue should be (4)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0), ("B", 1.7), ("C", 2.3), ("D", 4.41)
)
client.zUnionStore("SET", Seq("SET1", "SET3"), Aggregate.Min).futureValue should be (5)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("E", -5.56), ("C", -2.13), ("A", -1), ("B", 1.7), ("D", 4.41)
)
client.zUnionStore(
"SET", Seq("SET1", "SET2", "SET3"), Aggregate.Min
).futureValue should be (5)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("E", -5.56), ("C", -2.13), ("A", -1), ("B", 1.7), ("D", 4.41)
)
}
Given("that the aggregation function is Max")
"compute the union between them, aggregate the scores with Max and " +
"store the result in the destination" taggedAs (V200) in {
client.zUnionStore("SET", Seq("SET1", "SET1"), Aggregate.Max).futureValue should be (4)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0), ("B", 1.7), ("C", 2.3), ("D", 4.41)
)
client.zUnionStore("SET", Seq("SET1", "SET2"), Aggregate.Max).futureValue should be (4)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0), ("B", 1.7), ("D", 4.41), ("C", 5.5)
)
client.zUnionStore("SET", Seq("SET1", "SET3"), Aggregate.Max).futureValue should be (5)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("E", -5.56), ("A", 0), ("B", 1.7), ("C", 2.3), ("D", 4.41)
)
client.zUnionStore(
"SET", Seq("SET1", "SET2", "SET3"), Aggregate.Max
).futureValue should be (5)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("E", -5.56), ("A", 0), ("B", 1.7), ("D", 4.41), ("C", 5.5)
)
}
Given("some custom weights and that the aggregation function is Sum")
"compute the union between them, aggregate the scores with Sum by taking the " +
"weights into account and store the result in the destination" taggedAs (V200) in {
client.zUnionStoreWeighted("SET", Map("SET1" -> 1, "SET2" -> 2)).futureValue should be (4)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0), ("B", 1.7), ("D", 4.41), ("C", 2.3 + 2 * 5.5)
)
client.zUnionStoreWeighted("SET", Map("SET1" -> 1, "SET3" -> 2)).futureValue should be (5)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("E", 2 * (-5.56)), ("A", 2 * (-1)), ("C", 2.3 + 2 * (-2.13)), ("B", 1.7), ("D", 4.41)
)
client.zUnionStoreWeighted(
"SET", Map("SET1" -> 1, "SET2" -> 2, "SET3" -> -1)
).futureValue should be (5)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 1),
("B", 1.7),
("D", 4.41),
("E", (-1) * (-5.56)),
("C", 2.3 + 2 * 5.5 + (-1) * (-2.13))
)
}
Given("some custom weights and that the aggregation function is Min")
"compute the union between them, aggregate the scores with Min by taking the " +
"weights into account and store the result in the destination" taggedAs (V200) in {
client.zUnionStoreWeighted(
"SET", Map("SET1" -> 1, "SET2" -> 2), Aggregate.Min
).futureValue should be (4)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0), ("B", 1.7), ("C", 2.3), ("D", 4.41)
)
client.zUnionStoreWeighted(
"SET", Map("SET1" -> 1, "SET3" -> 2), Aggregate.Min
).futureValue should be (5)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("E", 2 * (-5.56)), ("C", -4.26), ("A", -2), ("B", 1.7), ("D", 4.41)
)
client.zUnionStoreWeighted(
"SET", Map("SET1" -> 1, "SET2" -> 2, "SET3" -> -1), Aggregate.Min
).futureValue should be (5)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0), ("B", 1.7), ("C", 2.13), ("D", 4.41), ("E", 5.56)
)
}
Given("some custom weights and that the aggregation function is Max")
"compute the union between them, aggregate the scores with Max by taking the " +
"weights into account and store the result in the destination" taggedAs (V200) in {
client.zUnionStoreWeighted(
"SET", Map("SET1" -> 1, "SET2" -> 2), Aggregate.Max
).futureValue should be (4)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 0), ("B", 1.7), ("D", 4.41), ("C", 11)
)
client.zUnionStoreWeighted(
"SET", Map("SET1" -> 1, "SET3" -> 2), Aggregate.Max
).futureValue should be (5)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("E", 2 * (-5.56)), ("A", 0), ("B", 1.7), ("C", 2.3), ("D", 4.41)
)
client.zUnionStoreWeighted(
"SET", Map("SET1" -> 1, "SET2" -> 2, "SET3" -> -1), Aggregate.Max
).futureValue should be (5)
client.zRangeWithScores(
"SET"
).futureValue should contain theSameElementsInOrderAs List[(String, Score)](
("A", 1), ("B", 1.7), ("D", 4.41), ("E", 5.56), ("C", 11)
)
client.del("SET", "SET1", "SET2", "SET3")
}
}
}
override def afterAll() {
client.flushDB().!
client.quit().!
}
}
| rileyberton/scredis | src/test/scala/scredis/commands/SortedSetCommandsSpec.scala | Scala | apache-2.0 | 66,963 |
// Scala for the Impatient / Chapter 8 / Exercise 4:
// Define an abstract class Item with methods price and description. A SimpleItem is an item whose price and description are specified in the constructor. Take advantage of the fact that a val can override a def. A Bundle is an item that contains other items. Its price is the sum of the prices in the bundle. Also provide a mechanism for adding items to the bundle and a suitable description method.
abstract class Item { def price: Double; def description: String }
class SimpleItem(val price: Double, val description: String) extends Item
class Bundle(var items: List[Item] = Nil) extends Item {
def price = items.map(_.price).sum
def description = items.map(_.description).mkString(", ")
}
object Test extends App {
val bundle = new Bundle(items = List(
new SimpleItem(price = 13, description = "Unlucky"),
new SimpleItem(price = 12, description = "Lucky")))
assert(bundle.price == 25)
bundle.items :+= new SimpleItem(price = 10, description = "Gadget")
assert(bundle.price == 35)
}
| eallik/scalaimpatient | chapter08/ex04.scala | Scala | unlicense | 1,065 |
package cs4r.labs.learningscala.ninetynineproblems
import scala.util.Random
/**
* Generate a random permutation of the elements of a list.
*/
object Problem25 {
def main(args: Array[String]) {
println( randomPermute(List('a, 'b, 'c, 'd, 'e, 'f)))
}
def randomPermute[T](ls: List[T]): List[T] = {
Random.shuffle(ls)
}
}
| Cs4r/LearningScala | src/main/scala/cs4r/labs/learningscala/ninetynineproblems/Problem25.scala | Scala | gpl-3.0 | 344 |
package com.kelebra.github.impatient.scala.katas
import java.util.Properties
import scala.annotation.tailrec
import scala.collection.immutable.ListMap
import scala.io.Source
import scala.language.implicitConversions
/**
* Chapter 4: Associative arrays and tuples
*/
trait Tuples {
/**
* Calculates items with given discount
*
* @param `items and prices` mapping of item to its price
* @param discount from 0 to 1
* @return mapping of items to its discounted prices
*/
def discountedStuff(`items and prices`: Map[String, Double],
discount: Double): Map[String, Double]
/**
* Counts each word occurrence in given source
*
* @param source provider of lines
* @return mapping of each unique word to its count in a file
*/
def wordCountIn(source: Source)
(implicit order: Ordering[(String, Long)]): Map[String, Long]
/**
* Prints out aligned table of java environmental properties and their values
*
* @param source of java environmental properties
* @param out output of aligned table
* @param cnv implicit conversion to scala map
*/
def prettyPrintJavaVariables[T](source: T)
(out: String => Unit = println)
(implicit cnv: T => Map[String, String]): Unit
/**
* Returns minimum and maximum value
*
* @param in iterable
* @tparam T ordered type of elements
* @return min and max values
*/
def minMax[T](in: Iterable[T])
(implicit ord: Ordering[T]): (T, T)
/**
* Return number of elements lesser, equal and greater than given one
*
* @param in iterable
* @param element pivot
* @tparam T ordered type of elements
* @return number of elements lesser, equal and greater than given one
*/
def ltEqGt[T](in: Iterable[T], element: T)
(implicit ord: Ordering[T]): (Int, Int, Int)
}
object Tuples extends Tuples {
override def discountedStuff(`items and prices`: Map[String, Double],
discount: Double): Map[String, Double] =
if (discount > 1 || discount < 0) `items and prices`
else `items and prices`.mapValues(_ * (1 - discount))
override def wordCountIn(source: Source)
(implicit order: Ordering[(String, Long)]): Map[String, Long] = {
type Stats = Map[String, Long]
val words: Iterator[String] = source
.getLines()
.flatMap(_.split(' ').map(_.trim))
@tailrec
def accumulate(acc: Stats = Map.empty.withDefaultValue(0)): Stats =
if (words.hasNext) {
val word = words.next().trim
if(word.nonEmpty) accumulate(acc.updated(word, acc(word) + 1))
else accumulate(acc)
}
else acc
ListMap(accumulate().toSeq.sorted: _*)
}
implicit def javaPropertiesToScalaMap(properties: Properties): Map[String, String] = {
import collection.JavaConversions._
mapAsScalaMap(properties).toMap.asInstanceOf[Map[String, String]]
}
override def prettyPrintJavaVariables[T](source: T = System.getProperties)
(out: (String) => Unit)
(implicit cnv: T => Map[String, String]): Unit = {
val properties = cnv(source)
val max = properties.maxBy(_._1.length)._1.length
out(
properties.map { case (k, v) => s"${k.padTo(max, ' ')} | $v" }.mkString("\\n")
)
}
override def minMax[T](in: Iterable[T])
(implicit ord: Ordering[T]): (T, T) =
(in.min, in.max)
override def ltEqGt[T](in: Iterable[T], element: T)
(implicit ord: Ordering[T]): (Int, Int, Int) = {
val comparison = in
.groupBy(ord.compare(element, _))
.mapValues(_.size)
.withDefaultValue(0)
(comparison(-1), comparison(0), comparison(1))
}
}
| kelebra/impatient-scala-katas | src/main/scala/com/kelebra/github/impatient/scala/katas/Tuples.scala | Scala | mit | 3,922 |
package com.overviewdocs.http
case class Request(
url: String,
maybeCredentials: Option[Credentials] = None,
followRedirects: Boolean = true,
maybeBody: Option[Array[Byte]] = None
)
| overview/overview-server | worker/src/main/scala/com/overviewdocs/http/Request.scala | Scala | agpl-3.0 | 191 |
package edu.fuberlin.hotspots
import org.scalatest.{FlatSpec, Matchers}
/**
* Created by Christian Windolf on 08.07.16.
*/
class SuperCellComputationSpec extends FlatSpec with Matchers{
val superCellFactory = new SuperCellFactory(10)
it should "return one SuperCell for a cell in the middle" in {
superCellFactory.create((compose(5, 5, 5), 1)) should have size 1
}
it should "return the correct super cell id" in {
superCellFactory.create((compose(5, 15, 15), 1))(0)._1 shouldEqual(compose(0, 10, 10))
}
it should "return two super cells for a left border cell" in {
superCellFactory.create((compose(20, 15, 15), 1)) should have size 2
}
it should "return the correct main cell id" in {
val map = superCellFactory.create((compose(10, 15, 15), 1)).toMap
map(compose(0, 10, 10))._1 shouldEqual compose(10, 15, 15)
map(compose(10, 10, 10))._1 shouldEqual compose(10, 15, 15)
}
it should "return two super cells for a right border cell" in {
superCellFactory.create((compose(19, 15, 15), 1)) should have size 2
}
it should "return the correct main cell id for right border cell" in {
val map = superCellFactory.create((compose(19, 15, 15), 1)).toMap
all(map.values.map(_._1)) shouldEqual compose(19, 15, 15)
map.keys.map(decompose) should contain (20, 10, 10)
map.keys.map(decompose) should contain (10, 10, 10)
}
it should "return two super cells for an upper border cell" in {
superCellFactory.create((compose(15, 10, 15), 1)) should have size 2
}
it should "return two super cells for a lower border cell" in {
superCellFactory.create((compose(15, 59, 15), 1)) should have size 2
}
it should "find four cells for an edge cell" in {
superCellFactory.create((compose(20, 20, 5), 1)) should have size 4
}
it should "find correct base cells" in {
val map = superCellFactory.create((compose(20, 20, 5), 1)).toMap
val cellIDs = map.keys.map(decompose)
cellIDs should contain (20, 20, 0)
cellIDs should contain (10, 20, 0)
cellIDs should contain (10, 10, 0)
cellIDs should contain (20, 10, 0)
all(map.values.map(_._1)) shouldEqual compose(20, 20, 5)
}
it should "find 8 cells for a corner cell" in {
superCellFactory.create((compose(20, 20, 20), 1)) should have size 8
}
it should "return eight cells for a corner cells" in {
val map = superCellFactory.create((compose(20, 20, 20), 1)).toMap
map should have size 8
all(map.values.map(_._1)) shouldEqual compose(20, 20, 20)
val cellIDs = map.keys map decompose
cellIDs should contain (20, 20, 20)
cellIDs should contain (10, 20, 20)
cellIDs should contain (20, 10, 20)
cellIDs should contain (10, 10, 20)
cellIDs should contain (20, 20, 10)
cellIDs should contain (10, 20, 10)
cellIDs should contain (20, 10, 10)
cellIDs should contain (10, 10, 10)
}
}
| parasmehta/giscup2016 | src/test/scala/edu/fuberlin/hotspots/SuperCellComputationSpec.scala | Scala | apache-2.0 | 2,898 |
package cvx
import breeze.linalg.{DenseMatrix, DenseVector, _}
import scala.collection.mutable.ListBuffer
/** Affine equality constraints of the form Ax=b, where A is mxn with m < n
* and full rank m. The condition rank(A)=m will not be checked.
* Parametrizes solutions as x = z0+Fu, where x=z0 is the minimum norm solution
* and $Fu\\perp z0$, for all u in dimension n-m.
* Used for change of variables x --> u to reduce dimension and get rid of explicit
* equality constraints.
*/
class EqualityConstraint(val A:DenseMatrix[Double], val b:DenseVector[Double]){
assert(A.rows < A.cols, "m=A.rows="+A.rows+" is not less than n=A.cols="+A.cols)
assert(A.rows == b.length, "m=A.rows="+A.rows+" != b.length = "+b.length)
val dim:Int = A.cols
val solutionSpace:SolutionSpace = SolutionSpace(A,b)
val F:DenseMatrix[Double] = solutionSpace.F
val z0:DenseVector[Double] = solutionSpace.z0
/** @return ||Ax-b||.*/
def errorAt(x:DenseVector[Double]):Double = norm(A*x-b)
def isSatisfiedBy(x:DenseVector[Double],tol:Double=1e-14):Boolean = errorAt(x) < tol*A.rows
/** Add the constraints eqs to the existing ones.
*/
def addEqualities(eqs:EqualityConstraint):EqualityConstraint = {
// stack horizontally
val new_A = DenseMatrix.vertcat[Double](A,eqs.A)
val new_b = DenseVector.vertcat[Double](b,eqs.b)
EqualityConstraint(new_A,new_b)
}
/** The equality constraints mapped to the dimension of phase I analysis
*/
def phase_I_EqualityConstraint: EqualityConstraint = {
val zeroCol = DenseMatrix.zeros[Double](A.rows,1)
val B = DenseMatrix.horzcat(A,zeroCol)
EqualityConstraint(B,b)
}
/** The equality constraints mapped to the dimension of phase I SOI analysis
* @param p number of inequality constraints the pahse I SOI analysis is applied to.
*/
def phase_I_SOI_EqualityConstraint(p:Int): EqualityConstraint = {
val zeroCols = DenseMatrix.zeros[Double](A.rows,p)
val B = DenseMatrix.horzcat(A,zeroCols)
EqualityConstraint(B,b)
}
def printSelf:Unit = {
val msg = "\\nEquality constraints, matrix A:\\n"+A+"\\nvector b:\\n"+b+"\\n\\n"
print(msg)
}
def printSelf(logger:Logger,digits:Int):Unit = {
logger.print("\\nEquality constraints, matrix A:\\n")
MatrixUtils.print(A,logger,digits)
logger.print("\\nEquality constraints, vector b:\\n")
MatrixUtils.print(b,logger,digits)
}
/** The equality constraint Ax=b under change of variables x=z0+Fu.
* This is then rewritten as A(z0+Fu)=b, i.e. (AF)u=b-Az0.
*/
def affineTransformed(z0:DenseVector[Double],F:DenseMatrix[Double]):EqualityConstraint =
EqualityConstraint(A*F,b-A*z0)
/** Turns this equality constraint Ax=b into a list of inequalities
* row_i(A)'x<=b_i+tol and row_i(A)'x>=b_i-tol.
*
* We need the tolerance tol for example in phase I analysis where
* these inequalities become row_i(A)'x<=b_i+tol+s and
* row_i(A)'x>=b_i-tol-s with a slack variable s. There the objective
* is to push the slack variable s below zero to get a strictly feasible
* point for the inequality constraints.
*/
def asInequalities(tol:Double):List[LinearConstraint] = {
val n = A.rows
val dim = A.cols
val buff = ListBuffer[LinearConstraint]()
for(i <- 0 until n){
val row_iA = A(i,::)
val id1 = "row_"+i+"(A)'x <= "+b(i)
val ineq1 = LinearConstraint(id1,dim,b(i)+tol,0.0,row_iA.t)
val id2 = "row_"+i+"(A)'x >= "+b(i)
val ineq2 = LinearConstraint(id2,dim,-b(i)+tol,0.0,-row_iA.t)
buff+=ineq1
buff+=ineq2
}
buff.toList
}
/** The same equality constraint Ax=b after numSlacks slack variables
* have been added which are not constrained by any equality constraints.
* This has the effect of adding numSlacks zero columns to the matrix A
* on the right while b remains unaffected.
*/
def withSlackVariables(numSlacks:Int):EqualityConstraint = {
val new_A = MatrixUtils.addZeroColumns(A,numSlacks)
EqualityConstraint(new_A,b)
}
}
object EqualityConstraint {
def apply(A:DenseMatrix[Double], b:DenseVector[Double]) = new EqualityConstraint(A,b)
} | spyqqqdia/cvx | src/main/scala/cvx/EqualityConstraint.scala | Scala | mit | 4,170 |
package views.json.api.pagination.PageInfo
import play.api.libs.json.{JsValue,Json}
import models.pagination.PageInfo
object show {
def apply(info: PageInfo): JsValue = Json.obj(
"offset" -> info.offset,
"limit" -> info.limit,
"total" -> info.total
)
}
| overview/overview-server | web/app/views/api/pagination/PageInfo/show.json.scala | Scala | agpl-3.0 | 272 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.stream.sql
import org.apache.flink.table.planner.runtime.stream.FsStreamingSinkITCaseBase
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import scala.collection.Seq
/**
* Test checkpoint for file system table factory with testcsv format.
*/
@RunWith(classOf[Parameterized])
class FsStreamingSinkTestCsvITCase(useBulkWriter: Boolean) extends FsStreamingSinkITCaseBase {
override def additionalProperties(): Array[String] = {
super.additionalProperties() ++
Seq(
"'format' = 'testcsv'",
s"'testcsv.use-bulk-writer' = '$useBulkWriter'") ++
(if (useBulkWriter) Seq() else Seq("'sink.rolling-policy.file-size' = '1'"))
}
}
object FsStreamingSinkTestCsvITCase {
@Parameterized.Parameters(name = "useBulkWriter-{0}")
def parameters(): java.util.Collection[Boolean] = {
java.util.Arrays.asList(true, false)
}
}
| GJL/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/runtime/stream/sql/FsStreamingSinkTestCsvITCase.scala | Scala | apache-2.0 | 1,745 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.k8s
import io.fabric8.kubernetes.api.model.{DoneablePod, Pod, PodBuilder}
import io.fabric8.kubernetes.client.KubernetesClient
import io.fabric8.kubernetes.client.dsl.PodResource
import org.mockito.{Mock, MockitoAnnotations}
import org.mockito.ArgumentMatchers.{any, eq => meq}
import org.mockito.Mockito.{never, times, verify, when}
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.scalatest.BeforeAndAfter
import org.apache.spark.{SecurityManager, SparkConf, SparkFunSuite}
import org.apache.spark.deploy.k8s.{KubernetesExecutorConf, SparkPod}
import org.apache.spark.deploy.k8s.Config._
import org.apache.spark.deploy.k8s.Constants._
import org.apache.spark.deploy.k8s.Fabric8Aliases._
import org.apache.spark.scheduler.cluster.k8s.ExecutorLifecycleTestUtils._
import org.apache.spark.util.ManualClock
class ExecutorPodsAllocatorSuite extends SparkFunSuite with BeforeAndAfter {
private val driverPodName = "driver"
private val driverPod = new PodBuilder()
.withNewMetadata()
.withName(driverPodName)
.addToLabels(SPARK_APP_ID_LABEL, TEST_SPARK_APP_ID)
.addToLabels(SPARK_ROLE_LABEL, SPARK_POD_DRIVER_ROLE)
.withUid("driver-pod-uid")
.endMetadata()
.build()
private val conf = new SparkConf().set(KUBERNETES_DRIVER_POD_NAME, driverPodName)
private val podAllocationSize = conf.get(KUBERNETES_ALLOCATION_BATCH_SIZE)
private val podAllocationDelay = conf.get(KUBERNETES_ALLOCATION_BATCH_DELAY)
private val podCreationTimeout = math.max(podAllocationDelay * 5, 60000L)
private val secMgr = new SecurityManager(conf)
private var waitForExecutorPodsClock: ManualClock = _
@Mock
private var kubernetesClient: KubernetesClient = _
@Mock
private var podOperations: PODS = _
@Mock
private var labeledPods: LABELED_PODS = _
@Mock
private var driverPodOperations: PodResource[Pod, DoneablePod] = _
@Mock
private var executorBuilder: KubernetesExecutorBuilder = _
private var snapshotsStore: DeterministicExecutorPodsSnapshotsStore = _
private var podsAllocatorUnderTest: ExecutorPodsAllocator = _
before {
MockitoAnnotations.initMocks(this)
when(kubernetesClient.pods()).thenReturn(podOperations)
when(podOperations.withName(driverPodName)).thenReturn(driverPodOperations)
when(driverPodOperations.get).thenReturn(driverPod)
when(executorBuilder.buildFromFeatures(any(classOf[KubernetesExecutorConf]), meq(secMgr),
meq(kubernetesClient))).thenAnswer(executorPodAnswer())
snapshotsStore = new DeterministicExecutorPodsSnapshotsStore()
waitForExecutorPodsClock = new ManualClock(0L)
podsAllocatorUnderTest = new ExecutorPodsAllocator(
conf, secMgr, executorBuilder, kubernetesClient, snapshotsStore, waitForExecutorPodsClock)
podsAllocatorUnderTest.start(TEST_SPARK_APP_ID)
}
test("Initially request executors in batches. Do not request another batch if the" +
" first has not finished.") {
podsAllocatorUnderTest.setTotalExpectedExecutors(podAllocationSize + 1)
snapshotsStore.replaceSnapshot(Seq.empty[Pod])
snapshotsStore.notifySubscribers()
for (nextId <- 1 to podAllocationSize) {
verify(podOperations).create(podWithAttachedContainerForId(nextId))
}
verify(podOperations, never()).create(podWithAttachedContainerForId(podAllocationSize + 1))
}
test("Request executors in batches. Allow another batch to be requested if" +
" all pending executors start running.") {
podsAllocatorUnderTest.setTotalExpectedExecutors(podAllocationSize + 1)
snapshotsStore.replaceSnapshot(Seq.empty[Pod])
snapshotsStore.notifySubscribers()
for (execId <- 1 until podAllocationSize) {
snapshotsStore.updatePod(runningExecutor(execId))
}
snapshotsStore.notifySubscribers()
verify(podOperations, never()).create(podWithAttachedContainerForId(podAllocationSize + 1))
snapshotsStore.updatePod(runningExecutor(podAllocationSize))
snapshotsStore.notifySubscribers()
verify(podOperations).create(podWithAttachedContainerForId(podAllocationSize + 1))
snapshotsStore.updatePod(runningExecutor(podAllocationSize))
snapshotsStore.notifySubscribers()
verify(podOperations, times(podAllocationSize + 1)).create(any(classOf[Pod]))
}
test("When a current batch reaches error states immediately, re-request" +
" them on the next batch.") {
podsAllocatorUnderTest.setTotalExpectedExecutors(podAllocationSize)
snapshotsStore.replaceSnapshot(Seq.empty[Pod])
snapshotsStore.notifySubscribers()
for (execId <- 1 until podAllocationSize) {
snapshotsStore.updatePod(runningExecutor(execId))
}
val failedPod = failedExecutorWithoutDeletion(podAllocationSize)
snapshotsStore.updatePod(failedPod)
snapshotsStore.notifySubscribers()
verify(podOperations).create(podWithAttachedContainerForId(podAllocationSize + 1))
}
test("When an executor is requested but the API does not report it in a reasonable time, retry" +
" requesting that executor.") {
podsAllocatorUnderTest.setTotalExpectedExecutors(1)
snapshotsStore.replaceSnapshot(Seq.empty[Pod])
snapshotsStore.notifySubscribers()
snapshotsStore.replaceSnapshot(Seq.empty[Pod])
waitForExecutorPodsClock.setTime(podCreationTimeout + 1)
when(podOperations
.withLabel(SPARK_APP_ID_LABEL, TEST_SPARK_APP_ID))
.thenReturn(podOperations)
when(podOperations
withLabel(SPARK_ROLE_LABEL, SPARK_POD_EXECUTOR_ROLE))
.thenReturn(podOperations)
when(podOperations
.withLabel(SPARK_EXECUTOR_ID_LABEL, "1"))
.thenReturn(labeledPods)
snapshotsStore.notifySubscribers()
verify(labeledPods).delete()
verify(podOperations).create(podWithAttachedContainerForId(2))
}
private def executorPodAnswer(): Answer[SparkPod] =
(invocation: InvocationOnMock) => {
val k8sConf: KubernetesExecutorConf = invocation.getArgument(0)
executorPodWithId(k8sConf.executorId.toInt)
}
}
| icexelloss/spark | resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocatorSuite.scala | Scala | apache-2.0 | 6,846 |
package im.actor.server
import com.google.protobuf.ByteString
import im.actor.api.rpc.files.{ Avatar ⇒ ApiAvatar, AvatarImage ⇒ ApiAvatarImage, FileLocation ⇒ ApiFileLocation }
import im.actor.server.file.{ Avatar, AvatarImage, FileLocation }
import scala.language.implicitConversions
object ApiConversions {
implicit def apiToFileLocation(fl: ApiFileLocation): FileLocation =
FileLocation(fl.fileId, fl.accessHash)
implicit def apiToAvatarImage(image: ApiAvatarImage): AvatarImage =
AvatarImage(image.fileLocation, image.width, image.height, image.fileSize.toLong)
implicit def apiToAvatarImage(imageOpt: Option[ApiAvatarImage]): Option[AvatarImage] =
imageOpt map apiToAvatarImage
implicit def apiToAvatar(avatar: ApiAvatar): Avatar =
Avatar(avatar.smallImage, avatar.largeImage, avatar.fullImage)
implicit def apiOptToAvatar(avatarOpt: Option[ApiAvatar]): Option[Avatar] =
avatarOpt map apiToAvatar
implicit def fileLocationToApi(fl: FileLocation): ApiFileLocation =
ApiFileLocation(fl.fileId, fl.accessHash)
implicit def avatarImageToApi(image: AvatarImage): ApiAvatarImage =
ApiAvatarImage(image.fileLocation, image.width, image.height, image.fileSize.toInt)
implicit def avatarImageOptToApi(imageOpt: Option[AvatarImage]): Option[ApiAvatarImage] =
imageOpt map avatarImageToApi
implicit def avatarToApi(avatar: Avatar): ApiAvatar =
ApiAvatar(avatar.small, avatar.large, avatar.full)
implicit def avatarOptToApi(avatarOpt: Option[Avatar]): Option[ApiAvatar] =
avatarOpt map avatarToApi
}
| daodaoliang/actor-platform | actor-server/actor-core/src/main/scala/im/actor/server/ApiConversions.scala | Scala | mit | 1,572 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.catalyst.analysis.{Analyzer, FunctionRegistry}
import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTablePartition, FunctionResourceLoader, GlobalTempViewManager, SessionCatalog}
import org.apache.spark.sql.catalyst.expressions.{And, AttributeReference, BoundReference, Expression, InterpretedPredicate, PredicateSubquery, ScalarSubquery}
import org.apache.spark.sql.catalyst.optimizer.Optimizer
import org.apache.spark.sql.catalyst.parser.{ParserInterface, SqlBaseParser}
import org.apache.spark.sql.catalyst.parser.ParserUtils.{string, _}
import org.apache.spark.sql.catalyst.parser.SqlBaseParser.{CreateTableContext, ShowTablesContext}
import org.apache.spark.sql.catalyst.plans.logical.{Filter, LogicalPlan, SubqueryAlias}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.catalyst.{CatalystConf, TableIdentifier}
import org.apache.spark.sql.execution.command.table.{CarbonExplainCommand, CarbonShowTablesCommand}
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.execution.strategy.{CarbonLateDecodeStrategy, DDLStrategy, StreamingTableStrategy}
import org.apache.spark.sql.execution.{SparkOptimizer, SparkSqlAstBuilder}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.optimizer.{CarbonIUDRule, CarbonLateDecodeRule, CarbonUDFTransformRule}
import org.apache.spark.sql.parser.{CarbonHelperSqlAstBuilder, CarbonSpark2SqlParser, CarbonSparkSqlParser, CarbonSparkSqlParserUtil}
import org.apache.spark.sql.{CarbonDatasourceHadoopRelation, CarbonEnv, ExperimentalMethods, SparkSession, Strategy}
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datamap.DataMapStoreManager
import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.spark.util.CarbonScalaUtil
/**
* This class will have carbon catalog and refresh the relation from cache if the carbontable in
* carbon catalog is not same as cached carbon relation's carbon table
*
* @param externalCatalog
* @param globalTempViewManager
* @param sparkSession
* @param functionResourceLoader
* @param functionRegistry
* @param conf
* @param hadoopConf
*/
class CarbonHiveSessionCatalog(
externalCatalog: HiveExternalCatalog,
globalTempViewManager: GlobalTempViewManager,
sparkSession: SparkSession,
functionResourceLoader: FunctionResourceLoader,
functionRegistry: FunctionRegistry,
conf: SQLConf,
hadoopConf: Configuration)
extends HiveSessionCatalog(
externalCatalog,
globalTempViewManager,
sparkSession,
functionResourceLoader,
functionRegistry,
conf,
hadoopConf) with CarbonSessionCatalog {
private lazy val carbonEnv = {
val env = new CarbonEnv
env.init(sparkSession)
env
}
/**
* return's the carbonEnv instance
* @return
*/
override def getCarbonEnv() : CarbonEnv = {
carbonEnv
}
def alterTableRename(oldTableIdentifier: TableIdentifier,
newTableIdentifier: TableIdentifier,
newTablePath: String): Unit = {
getClient().runSqlHive(
s"ALTER TABLE ${ oldTableIdentifier.database.get }.${ oldTableIdentifier.table }" +
s" RENAME TO ${ oldTableIdentifier.database.get }.${ newTableIdentifier.table }")
getClient().runSqlHive(
s"ALTER TABLE ${ oldTableIdentifier.database.get }.${ newTableIdentifier.table }" +
s" SET SERDEPROPERTIES" +
s"('tableName'='${ newTableIdentifier.table }', " +
s"'dbName'='${ oldTableIdentifier.database.get }', 'tablePath'='${ newTablePath }')")
}
def alterTable(tableIdentifier: TableIdentifier,
schemaParts: String,
cols: Option[Seq[org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema]])
: Unit = {
getClient()
.runSqlHive(s"ALTER TABLE ${tableIdentifier.database.get}.${tableIdentifier.table } " +
s"SET TBLPROPERTIES(${ schemaParts })")
}
def alterAddColumns(tableIdentifier: TableIdentifier,
schemaParts: String,
cols: Option[Seq[org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema]])
: Unit = {
alterTable(tableIdentifier, schemaParts, cols)
}
def alterDropColumns(tableIdentifier: TableIdentifier,
schemaParts: String,
cols: Option[Seq[org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema]])
: Unit = {
alterTable(tableIdentifier, schemaParts, cols)
}
def alterColumnChangeDataType(tableIdentifier: TableIdentifier,
schemaParts: String,
cols: Option[Seq[org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema]])
: Unit = {
alterTable(tableIdentifier, schemaParts, cols)
}
// Initialize all listeners to the Operation bus.
CarbonEnv.init(sparkSession)
/**
* This method will invalidate carbonrelation from cache if carbon table is updated in
* carbon catalog
*
* @param name
* @param alias
* @return
*/
override def lookupRelation(name: TableIdentifier,
alias: Option[String]): LogicalPlan = {
val rtnRelation = super.lookupRelation(name, alias)
var toRefreshRelation = false
rtnRelation match {
case SubqueryAlias(_,
LogicalRelation(carbonDatasourceHadoopRelation: CarbonDatasourceHadoopRelation, _, _), _) =>
toRefreshRelation = refreshRelationFromCache(name, alias, carbonDatasourceHadoopRelation)
case LogicalRelation(carbonDatasourceHadoopRelation: CarbonDatasourceHadoopRelation, _, _) =>
toRefreshRelation = refreshRelationFromCache(name, alias, carbonDatasourceHadoopRelation)
case _ =>
}
if (toRefreshRelation) {
super.lookupRelation(name, alias)
} else {
rtnRelation
}
}
private def refreshRelationFromCache(identifier: TableIdentifier,
alias: Option[String],
carbonDatasourceHadoopRelation: CarbonDatasourceHadoopRelation): Boolean = {
var isRefreshed = false
val storePath = CarbonProperties.getStorePath
carbonEnv.carbonMetastore.
checkSchemasModifiedTimeAndReloadTable(identifier)
val table = carbonEnv.carbonMetastore.getTableFromMetadataCache(
carbonDatasourceHadoopRelation.carbonTable.getDatabaseName,
carbonDatasourceHadoopRelation.carbonTable.getTableName)
if (table.isEmpty || (table.isDefined &&
table.get.getTableLastUpdatedTime !=
carbonDatasourceHadoopRelation.carbonTable.getTableLastUpdatedTime)) {
refreshTable(identifier)
DataMapStoreManager.getInstance().
clearDataMaps(AbsoluteTableIdentifier.from(storePath,
identifier.database.getOrElse("default"),
identifier.table))
isRefreshed = true
logInfo(s"Schema changes have been detected for table: $identifier")
}
isRefreshed
}
/**
* returns hive client from session state
*
* @return
*/
override def getClient(): org.apache.spark.sql.hive.client.HiveClient = {
sparkSession.sessionState.asInstanceOf[CarbonSessionState].metadataHive
}
override def createPartitions(
tableName: TableIdentifier,
parts: Seq[CatalogTablePartition],
ignoreIfExists: Boolean): Unit = {
try {
val table = CarbonEnv.getCarbonTable(tableName)(sparkSession)
val updatedParts = CarbonScalaUtil.updatePartitions(parts, table)
super.createPartitions(tableName, updatedParts, ignoreIfExists)
} catch {
case e: Exception =>
super.createPartitions(tableName, parts, ignoreIfExists)
}
}
/**
* This is alternate way of getting partition information. It first fetches all partitions from
* hive and then apply filter instead of querying hive along with filters.
* @param partitionFilters
* @param sparkSession
* @param identifier
* @return
*/
def getPartitionsAlternate(
partitionFilters: Seq[Expression],
sparkSession: SparkSession,
identifier: TableIdentifier) = {
val allPartitions = sparkSession.sessionState.catalog.listPartitions(identifier)
val catalogTable = sparkSession.sessionState.catalog.getTableMetadata(identifier)
val partitionSchema = catalogTable.partitionSchema
if (partitionFilters.nonEmpty) {
val boundPredicate =
InterpretedPredicate.create(partitionFilters.reduce(And).transform {
case att: AttributeReference =>
val index = partitionSchema.indexWhere(_.name == att.name)
BoundReference(index, partitionSchema(index).dataType, nullable = true)
})
allPartitions.filter { p => boundPredicate(p.toRow(partitionSchema)) }
} else {
allPartitions
}
}
/**
* Update the storageformat with new location information
*/
override def updateStorageLocation(
path: Path,
storage: CatalogStorageFormat,
newTableName: String,
dbName: String): CatalogStorageFormat = {
storage.copy(locationUri = Some(path.toString))
}
}
/**
* Session state implementation to override sql parser and adding strategies
* @param sparkSession
*/
class CarbonSessionState(sparkSession: SparkSession) extends HiveSessionState(sparkSession) {
override lazy val sqlParser: ParserInterface = new CarbonSparkSqlParser(conf, sparkSession)
experimentalMethods.extraStrategies = extraStrategies
experimentalMethods.extraOptimizations = extraOptimizations
def extraStrategies: Seq[Strategy] = {
Seq(
new StreamingTableStrategy(sparkSession),
new CarbonLateDecodeStrategy,
new DDLStrategy(sparkSession)
)
}
def extraOptimizations: Seq[Rule[LogicalPlan]] = {
Seq(new CarbonIUDRule,
new CarbonUDFTransformRule,
new CarbonLateDecodeRule)
}
override lazy val optimizer: Optimizer = new CarbonOptimizer(catalog, conf, experimentalMethods)
def extendedAnalyzerRules: Seq[Rule[LogicalPlan]] = Nil
def internalAnalyzerRules: Seq[Rule[LogicalPlan]] = {
catalog.ParquetConversions ::
catalog.OrcConversions ::
CarbonPreInsertionCasts(sparkSession) ::
CarbonIUDAnalysisRule(sparkSession) ::
AnalyzeCreateTable(sparkSession) ::
PreprocessTableInsertion(conf) ::
DataSourceAnalysis(conf) ::
(if (conf.runSQLonFile) {
new ResolveDataSource(sparkSession) :: Nil
} else { Nil })
}
override lazy val analyzer: Analyzer =
new CarbonAnalyzer(catalog, conf, sparkSession,
new Analyzer(catalog, conf) {
override val extendedResolutionRules =
if (extendedAnalyzerRules.nonEmpty) {
extendedAnalyzerRules ++ internalAnalyzerRules
} else {
internalAnalyzerRules
}
override val extendedCheckRules = Seq(
PreWriteCheck(conf, catalog))
}
)
/**
* Internal catalog for managing table and database states.
*/
override lazy val catalog = {
new CarbonHiveSessionCatalog(
sparkSession.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog],
sparkSession.sharedState.globalTempViewManager,
sparkSession,
functionResourceLoader,
functionRegistry,
conf,
newHadoopConf())
}
}
class CarbonAnalyzer(catalog: SessionCatalog,
conf: CatalystConf,
sparkSession: SparkSession,
analyzer: Analyzer) extends Analyzer(catalog, conf) {
override def execute(plan: LogicalPlan): LogicalPlan = {
var logicalPlan = analyzer.execute(plan)
logicalPlan = CarbonPreAggregateDataLoadingRules(sparkSession).apply(logicalPlan)
CarbonPreAggregateQueryRules(sparkSession).apply(logicalPlan)
}
}
class CarbonOptimizer(
catalog: SessionCatalog,
conf: SQLConf,
experimentalMethods: ExperimentalMethods)
extends SparkOptimizer(catalog, conf, experimentalMethods) {
override def execute(plan: LogicalPlan): LogicalPlan = {
val transFormedPlan: LogicalPlan = CarbonOptimizerUtil.transformForScalarSubQuery(plan)
super.execute(transFormedPlan)
}
}
object CarbonOptimizerUtil {
def transformForScalarSubQuery(plan: LogicalPlan) : LogicalPlan = {
// In case scalar subquery add flag in relation to skip the decoder plan in optimizer rule,
// And optimize whole plan at once.
val transFormedPlan = plan.transform {
case filter: Filter =>
filter.transformExpressions {
case s: ScalarSubquery =>
val tPlan = s.plan.transform {
case lr: LogicalRelation
if lr.relation.isInstanceOf[CarbonDatasourceHadoopRelation] =>
lr.relation.asInstanceOf[CarbonDatasourceHadoopRelation].isSubquery += true
lr
}
ScalarSubquery(tPlan, s.children, s.exprId)
case p: PredicateSubquery =>
val tPlan = p.plan.transform {
case lr: LogicalRelation
if lr.relation.isInstanceOf[CarbonDatasourceHadoopRelation] =>
lr.relation.asInstanceOf[CarbonDatasourceHadoopRelation].isSubquery += true
lr
}
PredicateSubquery(tPlan, p.children, p.nullAware, p.exprId)
}
}
transFormedPlan
}
}
class CarbonSqlAstBuilder(conf: SQLConf, parser: CarbonSpark2SqlParser, sparkSession: SparkSession)
extends SparkSqlAstBuilder(conf) {
val helper = new CarbonHelperSqlAstBuilder(conf, parser, sparkSession)
override def visitCreateTable(ctx: CreateTableContext): LogicalPlan = {
val fileStorage = CarbonSparkSqlParserUtil.getFileStorage(ctx.createFileFormat)
if (fileStorage.equalsIgnoreCase("'carbondata'") ||
fileStorage.equalsIgnoreCase("carbondata") ||
fileStorage.equalsIgnoreCase("'carbonfile'") ||
fileStorage.equalsIgnoreCase("'org.apache.carbondata.format'")) {
val createTableTuple = (ctx.createTableHeader, ctx.skewSpec, ctx.bucketSpec,
ctx.partitionColumns, ctx.columns, ctx.tablePropertyList, ctx.locationSpec(),
Option(ctx.STRING()).map(string),
ctx.AS, ctx.query, fileStorage)
helper.createCarbonTable(createTableTuple)
} else {
super.visitCreateTable(ctx)
}
}
override def visitShowTables(ctx: ShowTablesContext): LogicalPlan = {
withOrigin(ctx) {
if (CarbonProperties.getInstance()
.getProperty(CarbonCommonConstants.CARBON_SHOW_DATAMAPS,
CarbonCommonConstants.CARBON_SHOW_DATAMAPS_DEFAULT).toBoolean) {
super.visitShowTables(ctx)
} else {
CarbonShowTablesCommand(
Option(ctx.db).map(_.getText),
Option(ctx.pattern).map(string))
}
}
}
override def visitExplain(ctx: SqlBaseParser.ExplainContext): LogicalPlan = {
CarbonExplainCommand(super.visitExplain(ctx))
}
}
| sgururajshetty/carbondata | integration/spark2/src/main/spark2.1/org/apache/spark/sql/hive/CarbonSessionState.scala | Scala | apache-2.0 | 15,614 |
package com.mentatlabs.nsa
package scalac
package options
/* -Yclosure-elim
* ==============
* 2.1.5 - 2.5.1: Perform closure elimination // previously -Xcloselim
* 2.6.0 - 2.8.2: Perform closure elimination
* 2.9.0 - 2.11.8: Perform closure elimination.
* 2.12.0: !! missing !!
*/
case object ScalacYClosureElim
extends ScalacOptionBoolean("-Yclosure-elim", ScalacVersions.`2.6.0`)
| mentat-labs/sbt-nsa | nsa-core/src/main/scala/com/mentatlabs/nsa/scalac/options/private/ScalacYClosureElim.scala | Scala | bsd-3-clause | 416 |
package com.twitter.finagle.http
import com.google.common.base.Charsets
import com.twitter.collection.RecordSchema
import com.twitter.finagle.http.netty.Bijections
import com.twitter.io.Reader
import org.jboss.netty.buffer.{ChannelBuffer, ChannelBuffers}
import org.jboss.netty.handler.codec.embedder.{DecoderEmbedder, EncoderEmbedder}
import org.jboss.netty.handler.codec.http._
import Bijections._
/**
* Rich HttpResponse
*/
abstract class Response extends Message {
/**
* Arbitrary user-defined context associated with this response object.
* [[com.twitter.collection.RecordSchema.Record RecordSchema.Record]] is
* used here, rather than [[com.twitter.finagle.Context Context]] or similar
* out-of-band mechanisms, to make the connection between the response and its
* associated context explicit.
*/
val ctx: Response.Schema.Record = Response.Schema.newRecord()
def isRequest = false
def status: Status = from(getStatus)
def status_=(value: Status): Unit = { setStatus(from(value)) }
def statusCode: Int = getStatus.getCode
def statusCode_=(value: Int): Unit = { setStatus(HttpResponseStatus.valueOf(value)) }
def getStatusCode(): Int = statusCode
def setStatusCode(value: Int): Unit = { statusCode = value }
/** Encode as an HTTP message */
def encodeString(): String = {
val encoder = new EncoderEmbedder[ChannelBuffer](new HttpResponseEncoder)
encoder.offer(httpResponse)
val buffer = encoder.poll()
buffer.toString(Charsets.UTF_8)
}
override def toString =
"Response(\\"" + version + " " + status + "\\")"
protected[finagle] def httpResponse: HttpResponse
protected[finagle] def getHttpResponse(): HttpResponse = httpResponse
protected[finagle] def httpMessage: HttpMessage = httpResponse
protected[finagle] def getStatus(): HttpResponseStatus = httpResponse.getStatus()
protected[finagle] def setStatus(status: HttpResponseStatus): Unit = {
httpResponse.setStatus(status)
}
}
object Response {
/**
* Utility class to make it possible to mock/spy a Response.
*/
class Ok extends Response {
val httpResponse = apply.httpResponse
}
/**
* [[com.twitter.collection.RecordSchema RecordSchema]] declaration, used
* to generate [[com.twitter.collection.RecordSchema.Record Record]] instances
* for Response.ctx.
*/
val Schema: RecordSchema = new RecordSchema
/** Decode a [[Response]] from a String */
def decodeString(s: String): Response = {
decodeBytes(s.getBytes(Charsets.UTF_8))
}
/** Decode a [[Response]] from a byte array */
def decodeBytes(b: Array[Byte]): Response = {
val decoder = new DecoderEmbedder(
new HttpResponseDecoder(Int.MaxValue, Int.MaxValue, Int.MaxValue))
decoder.offer(ChannelBuffers.wrappedBuffer(b))
val res = decoder.poll().asInstanceOf[HttpResponse]
assert(res ne null)
Response(res)
}
/** Create Response. */
def apply(): Response =
apply(Version.Http11, Status.Ok)
/** Create Response from version and status. */
def apply(version: Version, status: Status): Response =
apply(new DefaultHttpResponse(from(version), from(status)))
/**
* Create a Response from version, status, and Reader.
*/
def apply(version: Version, status: Status, reader: Reader): Response = {
val res = new DefaultHttpResponse(from(version), from(status))
res.setChunked(true)
apply(res, reader)
}
private[http] def apply(response: HttpResponse): Response =
new Response {
val httpResponse = response
}
private[http] def apply(response: HttpResponse, readerIn: Reader): Response =
new Response {
val httpResponse = response
override val reader = readerIn
}
/** Create Response from status. */
def apply(status: Status): Response =
apply(Version.Http11, status)
/** Create Response from Request. */
private[http] def apply(httpRequest: Request): Response =
new Response {
final val httpResponse =
new DefaultHttpResponse(from(httpRequest.version), HttpResponseStatus.OK)
}
}
| adriancole/finagle | finagle-http/src/main/scala/com/twitter/finagle/http/Response.scala | Scala | apache-2.0 | 4,069 |
package com.kakao.mango.hashing
import java.nio.charset.StandardCharsets.UTF_8
import com.kakao.shaded.guava.hash.{HashFunction, Hashing}
/** A simple Scala interface for Guava's hash implementation.
* returns hexstrings when called, and byte arrays can be obtained using bytes().
*
* {{{
* import com.kakao.mango.hashing._
*
* println(Md5("hi")) // prints "49f68a5c8493ec2c0bf489821c21fc3b"
* println(Murmur3_32("hi")) // prints "1a8b6fc7"
* }}}
*/
sealed trait Hash {
val function: HashFunction
def apply(str: String): String = function.hashString(str, UTF_8).toString
def apply(bytes: Array[Byte]): String = function.hashBytes(bytes).toString
def bytes(str: String): Array[Byte] = function.hashString(str, UTF_8).asBytes()
def bytes(bytes: Array[Byte]): Array[Byte] = function.hashBytes(bytes).asBytes()
}
case class Murmur3_32(seed: Int) extends Hash {
val function = Hashing.murmur3_32(seed)
}
case class Murmur3_128(seed: Int) extends Hash {
val function = Hashing.murmur3_128(seed)
}
object Md5 extends Hash {
val function = Hashing.md5()
}
object Sha256 extends Hash {
val function = Hashing.sha256()
}
object Sha512 extends Hash {
val function = Hashing.sha512()
}
object Murmur3_32 extends Murmur3_32(0)
object Murmur3_128 extends Murmur3_128(0)
| kakao/mango | mango-core/src/main/scala/com/kakao/mango/hashing/Hash.scala | Scala | apache-2.0 | 1,311 |
package rewriting.rules
import ir._
import ir.ast._
import lift.arithmetic.SizeVar
import opencl.executor.{Execute, TestWithExecutor}
import opencl.ir._
import opencl.ir.pattern._
import org.junit.Assert._
import org.junit.Test
import rewriting.{Lower, Rewrite}
import rewriting.macrorules.EnablingRules
object TestRules extends TestWithExecutor
class TestRules {
private val N = SizeVar("N")
private val M = SizeVar("M")
private val A = Array.fill[Float](128)(0.5f)
@Test
def extract0(): Unit = {
val f = fun(
ArrayTypeWSWC(Float, N),
ArrayTypeWSWC(Float, N),
(in1, in2) => Map(fun(x => Map(fun(y => add(x,y))) o Map(id) $ in2)) $ in1
)
val f1 = Rewrite.applyRuleAtId(f, 0, FissionRules.extractFromMap)
TypeChecker(f1)
assertTrue(f1.body.asInstanceOf[FunCall].f.isInstanceOf[Lambda])
}
@Test
def extract1(): Unit = {
val f = fun(
ArrayTypeWSWC(Float, N),
ArrayTypeWSWC(Float, N),
(in1, in2) => Map(fun(x =>
ReduceSeq(fun((acc, y) => add(acc, mult(x,y))), 0.0f) o Map(id) $ in2
)) $ in1
)
val f1 = Rewrite.applyRuleAtId(f, 0, FissionRules.extractFromMap)
TypeChecker(f1)
assertTrue(f1.body.asInstanceOf[FunCall].f.isInstanceOf[Lambda])
}
@Test
def mapFusionAfterExtract(): Unit = {
val f0 = fun(
ArrayTypeWSWC(Float, N),
Map(plusOne) o Let(Map(id) $ _) $ _
)
val f1 = Rewrite.applyRuleAtId(f0, 0, FusionRules.mapFusion)
TypeChecker(f1)
assertTrue(f1.body.asInstanceOf[FunCall].f.isInstanceOf[Lambda])
}
@Test
def mapTransposePromotion(): Unit = {
val M = SizeVar("M")
val N = SizeVar("N")
val K = SizeVar("K")
val O = SizeVar("O")
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N), K), O),
input => Map(Transpose()) o Join() $ input
)
assertTrue(EnablingRules.movingJoin.rewrite.isDefinedAt(f.body))
val result = EnablingRules.movingJoin.rewrite(f.body)
TypeChecker.check(result)
}
@Test
def slidePromotion(): Unit = {
val M = SizeVar("M")
val N = SizeVar("N")
val K = SizeVar("K")
val u = SizeVar("u")
val v = SizeVar("v")
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N), K),
input => Slide(u,v) o Map(Join()) $ input
)
assertTrue(Rules.slidePromotion.rewrite.isDefinedAt(f.body))
val result = Rules.slidePromotion.rewrite(f.body)
TypeChecker.check(result)
}
@Test
def slideSwap(): Unit = {
val K = SizeVar("K")
val N = SizeVar("N")
val M = SizeVar("M")
val n = SizeVar("n")
val s = SizeVar("s")
val u = SizeVar("u")
val v = SizeVar("v")
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N), K),
input => Slide(u, v) o Map(Map(Slide(n,s))) $ input
)
assertTrue(Rules.slideSwap.rewrite.isDefinedAt(f.body))
val result = Rules.slideSwap.rewrite(f.body)
TypeChecker.check(result)
}
@Test
def joinSwap(): Unit = {
val M = SizeVar("M")
val N = SizeVar("N")
val K = SizeVar("K")
val O = SizeVar("O")
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N), K), O),
input => Join() o Map(Map(Join())) $ input
)
assertTrue(Rules.joinSwap.rewrite.isDefinedAt(f.body))
val result = Rules.joinSwap.rewrite(f.body)
TypeChecker.check(result)
}
@Test
def transposeSwap(): Unit = {
val M = SizeVar("M")
val N = SizeVar("N")
val K = SizeVar("K")
val O = SizeVar("O")
val P = SizeVar("P")
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N), K), O), P),
input => Map(Map(Map(Transpose()))) o Map(Transpose()) $ input
)
assertTrue(Rules.transposeSwap.rewrite.isDefinedAt(f.body))
val result = Rules.transposeSwap.rewrite(f.body)
TypeChecker.check(result)
}
@Test
def slideTransposeSwap(): Unit = {
val M = SizeVar("M")
val N = SizeVar("N")
val K = SizeVar("K")
val O = SizeVar("O")
val u = SizeVar("u")
val v = SizeVar("v")
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N), K), O),
input => Map(Map(Map(Slide(u,v)))) o Map(Transpose()) $ input
)
assertTrue(Rules.slideTransposeSwap.rewrite.isDefinedAt(f.body))
val result = Rules.slideTransposeSwap.rewrite(f.body)
TypeChecker.check(result)
}
@Test
def slideTransposeReordering(): Unit = {
val M = SizeVar("M")
val N = SizeVar("N")
val K = SizeVar("K")
val u = SizeVar("u")
val v = SizeVar("v")
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N), K),
input => Map(Slide(u,v)) o Map(Transpose()) $ input
)
assertTrue(Rules.slideTransposeReordering.rewrite.isDefinedAt(f.body))
val result = Rules.slideTransposeReordering.rewrite(f.body)
TypeChecker.check(result)
}
@Test
def transposeMapJoinReordering(): Unit = {
val M = SizeVar("M")
val N = SizeVar("N")
val K = SizeVar("K")
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N), K),
input => Transpose() o Map(Join()) $ input
)
assertTrue(Rules.transposeMapJoinReordering.rewrite.isDefinedAt(f.body))
val result = Rules.transposeMapJoinReordering.rewrite(f.body)
TypeChecker.check(result)
}
@Test
def slideTiling(): Unit = {
val N = SizeVar("N")
val M = SizeVar("M")
// n/s need to be positive
val n = SizeVar("n")
val s = SizeVar("s")
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N),
input => Slide(n, s) $ input
)
assertTrue(Rules.slideTiling(s).rewrite.isDefinedAt(f.body))
assertTrue(Rules.slideTiling(s+1).rewrite.isDefinedAt(f.body))
assertFalse(Rules.slideTiling(s-2).rewrite.isDefinedAt(f.body))
val result = Rules.slideTiling(s+1).rewrite(f.body)
TypeChecker.check(result)
}
@Test
def mapJoin(): Unit = {
val N = SizeVar("N")
val M = SizeVar("M")
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N),
input => Map(id) o Join() $ input
)
assertTrue(EnablingRules.movingJoin.rewrite.isDefinedAt(f.body))
val result = EnablingRules.movingJoin.rewrite(f.body)
TypeChecker.check(result)
}
@Test
def simpleMapTest(): Unit = {
def f = fun(
ArrayTypeWSWC(Float, N),
input => Map(id) $ input
)
def goldF = fun(
ArrayTypeWSWC(Float, N),
input => MapGlb(id) $ input
)
val options = Seq(
Rewrite.applyRuleUntilCannot(f, OpenCLRules.mapSeq),
Rewrite.applyRuleUntilCannot(f, OpenCLRules.mapGlb)
)
val (gold, _) = Execute(128)[Array[Float]](goldF, A)
options.foreach(l => {
val (result, _) = Execute(128)[Array[Float]](l, A)
assertArrayEquals(l + " failed", gold, result, 0.0f)
})
}
@Test
def slightlyMoreComplexMap(): Unit = {
val goldF = fun(
ArrayTypeWSWC(Float, N),
Float,
(input, a) => MapGlb(fun(x => add(x, a))) $ input
)
def f = fun(
ArrayTypeWSWC(Float, N),
Float,
(input, a) => Map(fun(x => add(a, x))) $ input
)
val a = 1.0f
val (gold, _) = Execute(128)[Array[Float]](goldF, A, a)
val lambdaOptions = Seq(
Rewrite.applyRuleUntilCannot(f, OpenCLRules.mapSeq),
Rewrite.applyRuleUntilCannot(f, OpenCLRules.mapGlb)
)
lambdaOptions.zipWithIndex.foreach(l => {
val (result, _) = Execute(128)[Array[Float]](l._1, A, a)
assertArrayEquals(l + " failed", gold, result, 0.0f)
})
}
@Test
def joinSplit(): Unit = {
val M = SizeVar("M")
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N),
input => Map(Map(id)) $ input
)
TypeChecker(f)
val g = Rewrite.applyRuleAt(f, f.body, Rules.joinSplit)
val h = Rewrite.applyRuleUntilCannot(g, OpenCLRules.mapGlb)
val input = Array.fill[Float](128, 128)(util.Random.nextFloat())
val (result, _) = Execute(128)[Array[Float]](h, input)
assertArrayEquals(input.flatten, result, 0.0f)
assertEquals(ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N), h.body.t)
}
@Test
def simpleReduceTest(): Unit = {
val goldF = fun(
ArrayTypeWSWC(Float, N),
input => toGlobal(MapSeq(id)) o ReduceSeq(add, 0.0f) $ input
)
val f = fun(
ArrayTypeWSWC(Float, N),
input => Reduce(add, 0.0f) $ input
)
val lambda = Lower.sequential(f)
val (gold, _) = Execute(1, 1)[Array[Float]](goldF, A)
val (result, _) = Execute(1, 1)[Array[Float]](lambda, A)
assertArrayEquals(gold, result, 0.0f)
}
@Test
def joinFromZip0(): Unit = {
val t0 = ArrayTypeWSWC(ArrayTypeWSWC(Float, N), M)
val t1 = ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N)
val f = fun(
t0, t1,
(a,b) => Zip(Join() $ a, Join() $ b)
)
TypeChecker(f)
assertFalse(Rules.joinFromZip.isDefinedAt(f.body))
}
@Test
def joinFromZip1(): Unit = {
val t0 = ArrayTypeWSWC(ArrayTypeWSWC(Float, N), M)
val f = fun(
t0, t0,
(a,b) => Zip(Join() $ a, Join() $ b)
)
TypeChecker(f)
assertTrue(Rules.joinFromZip.isDefinedAt(f.body))
val result = Rewrite.applyRuleAt(f, f.body, Rules.joinFromZip)
TypeChecker(result)
assertEquals(f.body.t, result.body.t)
}
}
| lift-project/lift | src/test/rewriting/rules/TestRules.scala | Scala | mit | 9,390 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.json
import java.nio.charset.{Charset, StandardCharsets}
import java.util.{Locale, TimeZone}
import com.fasterxml.jackson.core.{JsonFactory, JsonParser}
import org.apache.commons.lang3.time.FastDateFormat
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.util._
/**
* Options for parsing JSON data into Spark SQL rows.
*
* Most of these map directly to Jackson's internal options, specified in [[JsonParser.Feature]].
*/
private[sql] class JSONOptions(
@transient val parameters: CaseInsensitiveMap[String],
defaultTimeZoneId: String,
defaultColumnNameOfCorruptRecord: String)
extends Logging with Serializable {
def this(
parameters: Map[String, String],
defaultTimeZoneId: String,
defaultColumnNameOfCorruptRecord: String = "") = {
this(
CaseInsensitiveMap(parameters),
defaultTimeZoneId,
defaultColumnNameOfCorruptRecord)
}
val samplingRatio =
parameters.get("samplingRatio").map(_.toDouble).getOrElse(1.0)
val primitivesAsString =
parameters.get("primitivesAsString").map(_.toBoolean).getOrElse(false)
val prefersDecimal =
parameters.get("prefersDecimal").map(_.toBoolean).getOrElse(false)
val allowComments =
parameters.get("allowComments").map(_.toBoolean).getOrElse(false)
val allowUnquotedFieldNames =
parameters.get("allowUnquotedFieldNames").map(_.toBoolean).getOrElse(false)
val allowSingleQuotes =
parameters.get("allowSingleQuotes").map(_.toBoolean).getOrElse(true)
val allowNumericLeadingZeros =
parameters.get("allowNumericLeadingZeros").map(_.toBoolean).getOrElse(false)
val allowNonNumericNumbers =
parameters.get("allowNonNumericNumbers").map(_.toBoolean).getOrElse(true)
val allowBackslashEscapingAnyCharacter =
parameters.get("allowBackslashEscapingAnyCharacter").map(_.toBoolean).getOrElse(false)
private val allowUnquotedControlChars =
parameters.get("allowUnquotedControlChars").map(_.toBoolean).getOrElse(false)
val compressionCodec = parameters.get("compression").map(CompressionCodecs.getCodecClassName)
val parseMode: ParseMode =
parameters.get("mode").map(ParseMode.fromString).getOrElse(PermissiveMode)
val columnNameOfCorruptRecord =
parameters.getOrElse("columnNameOfCorruptRecord", defaultColumnNameOfCorruptRecord)
val timeZone: TimeZone = DateTimeUtils.getTimeZone(
parameters.getOrElse(DateTimeUtils.TIMEZONE_OPTION, defaultTimeZoneId))
// Uses `FastDateFormat` which can be direct replacement for `SimpleDateFormat` and thread-safe.
val dateFormat: FastDateFormat =
FastDateFormat.getInstance(parameters.getOrElse("dateFormat", "yyyy-MM-dd"), Locale.US)
val timestampFormat: FastDateFormat =
FastDateFormat.getInstance(
parameters.getOrElse("timestampFormat", "yyyy-MM-dd'T'HH:mm:ss.SSSXXX"), timeZone, Locale.US)
val multiLine = parameters.get("multiLine").map(_.toBoolean).getOrElse(false)
/**
* A string between two consecutive JSON records.
*/
val lineSeparator: Option[String] = parameters.get("lineSep").map { sep =>
require(sep.nonEmpty, "'lineSep' cannot be an empty string.")
sep
}
/**
* Standard encoding (charset) name. For example UTF-8, UTF-16LE and UTF-32BE.
* If the encoding is not specified (None), it will be detected automatically
* when the multiLine option is set to `true`.
*/
val encoding: Option[String] = parameters.get("encoding")
.orElse(parameters.get("charset")).map { enc =>
// The following encodings are not supported in per-line mode (multiline is false)
// because they cause some problems in reading files with BOM which is supposed to
// present in the files with such encodings. After splitting input files by lines,
// only the first lines will have the BOM which leads to impossibility for reading
// the rest lines. Besides of that, the lineSep option must have the BOM in such
// encodings which can never present between lines.
val blacklist = Seq(Charset.forName("UTF-16"), Charset.forName("UTF-32"))
val isBlacklisted = blacklist.contains(Charset.forName(enc))
require(multiLine || !isBlacklisted,
s"""The $enc encoding in the blacklist is not allowed when multiLine is disabled.
|Blacklist: ${blacklist.mkString(", ")}""".stripMargin)
val isLineSepRequired =
multiLine || Charset.forName(enc) == StandardCharsets.UTF_8 || lineSeparator.nonEmpty
require(isLineSepRequired, s"The lineSep option must be specified for the $enc encoding")
enc
}
val lineSeparatorInRead: Option[Array[Byte]] = lineSeparator.map { lineSep =>
lineSep.getBytes(encoding.getOrElse("UTF-8"))
}
val lineSeparatorInWrite: String = lineSeparator.getOrElse("\\n")
/** Sets config options on a Jackson [[JsonFactory]]. */
def setJacksonOptions(factory: JsonFactory): Unit = {
factory.configure(JsonParser.Feature.ALLOW_COMMENTS, allowComments)
factory.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, allowUnquotedFieldNames)
factory.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, allowSingleQuotes)
factory.configure(JsonParser.Feature.ALLOW_NUMERIC_LEADING_ZEROS, allowNumericLeadingZeros)
factory.configure(JsonParser.Feature.ALLOW_NON_NUMERIC_NUMBERS, allowNonNumericNumbers)
factory.configure(JsonParser.Feature.ALLOW_BACKSLASH_ESCAPING_ANY_CHARACTER,
allowBackslashEscapingAnyCharacter)
factory.configure(JsonParser.Feature.ALLOW_UNQUOTED_CONTROL_CHARS, allowUnquotedControlChars)
}
}
| szhem/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JSONOptions.scala | Scala | apache-2.0 | 6,392 |
package main.scala.ProfileUpdater
import scala.io.Source
import scala.xml._
case class FieldPermissionChange(objectName : String, fieldName : String, setRead : Option[String], setWrite : Option[String])
class ObjectImporter(srcDirPath : String, objectName : String) {
private def getAllAvailableFields = {
(XML.loadString(Source.fromFile(srcDirPath + "/objects/" + objectName + ".object").mkString) \\\\ "fields").toList
}
private def nonRequiredFields(n : Node) : Boolean = {
val typeMD = (n \\\\ "type").headOption.getOrElse(<type></type>).text.equals("MasterDetail")
val required = (n \\\\ "required").text.equals("true")
! required && ! typeMD
}
def getAllReadableChanges : List[FieldPermissionChange] = {
getAllAvailableFields.filter(nonRequiredFields).map(n => nodeToReadable(n))
}
private def nodeToReadable(n : Node) : FieldPermissionChange = {
FieldPermissionChange(objectName, (n \\ "fullName").text, Some("true"), None)
}
def nonFormulaField(n : Node) : Boolean = {
(n \\\\ "formula").isEmpty
}
def nonSummaryField(n: Node): Boolean = {
!(n \\\\ "type").headOption.getOrElse(<type></type>).text.equals("Summary")
}
def isFieldWriteable(n : Node) : Boolean = {
nonFormulaField(n) && nonSummaryField(n)
}
def nodeToWriteable(n : Node) : FieldPermissionChange = {
FieldPermissionChange(objectName, (n \\ "fullName").text, Some("true"), Some("true"))
}
def getAllWriteableChanges : List[FieldPermissionChange] = {
getAllAvailableFields.filter(nonRequiredFields).filter(isFieldWriteable).map(nodeToWriteable)
}
def getAllReadWriteChanges : List[FieldPermissionChange] = {
getAllAvailableFields.filter(nonRequiredFields).map { n =>
if (isFieldWriteable(n)) {
nodeToWriteable(n)
} else {
nodeToReadable(n)
}
}
}
def getAllNoAccessChanges : List[FieldPermissionChange] = {
getAllAvailableFields.filter(nonRequiredFields).map(nodeToNoAccess)
}
private def nodeToNoAccess(n : Node) = {
FieldPermissionChange(objectName, (n \\ "fullName").text, Some("false"), Some("false"))
}
private def nodeToField(n : Node) : FieldPermissionChange =
FieldPermissionChange(objectName, (n \\ "fullName").text, None, None)
def getClearEntries : List[FieldPermissionChange] = {
getAllAvailableFields map nodeToField
}
}
| wojdyga/sfdc-profile-updater | src/main/scala/ProfileUpdater/ObjectImporter.scala | Scala | gpl-3.0 | 2,354 |
package controllers.security
import com.lvxingpai.yunkai.UserInfo
import Security.{ AuthenticatedBuilder, AuthenticatedRequest }
import core.utils.HanseResults
import libs.RequestProcessingExtended
import libs.RequestProcessingExtended.WrappedPayload
import play.api.mvc.{ AnyContent, BodyParser, BodyParsers, Result }
import scala.concurrent.{ ExecutionContext, Future }
/**
* Created by zephyre on 1/12/16.
*/
object AuthenticatedAction extends AuthenticatedBuilder[UserInfo](AuthenticatedBuilder.auth, _ => HanseResults.unauthorized()) {
def async2(block: AuthenticatedRequest[WrappedPayload[AnyContent], UserInfo] => Future[Result])(implicit ctx: ExecutionContext) = {
val bodyParser: BodyParser[WrappedPayload[AnyContent]] =
RequestProcessingExtended.wrappedBodyParser(BodyParsers.parse.anyContent)
AuthenticatedAction.async(bodyParser)(block)
}
}
| Lvxingpai/Hanse | app/controllers/security/AuthenticatedAction.scala | Scala | apache-2.0 | 879 |
package controllers
import com.gilt.cavellc.Client
import com.gilt.cavellc.models.{Member, Organization, Role, Team}
import controllers.helpers.{CreateToken, AddUserData}
import controllers.helpers.CaveForms._
import play.api.Logger
import play.api.i18n.Messages
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
case class TeamData(team: Team, role: Role, members: Seq[Member])
class Teams extends AbstractCaveController {
def team(organizationName: String, teamName: String) = caveAsyncAction { implicit request =>
withCaveClient { client =>
val data = for {
userOrganizations <- client.Users.getOrganizations()
organization <- getOrganization(organizationName, client, userOrganizations)
userTeams <- getUserTeams(userOrganizations, client)
roleInOrganization <- Future.successful(userOrganizations.filter(_.name == organizationName).head.role)
roleInTeam <- getRoleInTeam(organizationName, teamName, userTeams, roleInOrganization)
teamsInOrganization <- getTeamsInOrganization(organizationName, roleInOrganization, client)
team <- getSelectedTeam(organizationName, teamName, client, roleInTeam)
membersInTeam <- getMembers(organizationName, teamName, client, organization, roleInTeam, team)
} yield (organization, team, roleInTeam, membersInTeam.sortBy(_.user.firstName), userOrganizations.sortBy(_.name), userTeams.sortBy(_.userTeam.name), roleInOrganization)
data map {
case (Some(org), Some(team), roleInTeam, membersInTeam, userOrganizations, userTeams, roleInOrganization) =>
val menuData = SideMenuData(userOrganizations, userTeams, Some(org), Some(team), Some(roleInOrganization))
val teamData = TeamData(team, roleInTeam, membersInTeam)
val addUserFormWithOrgAndTeam = addUserForm.fill(AddUserData(EMPTY_STRING, organizationName, teamName, EMPTY_STRING))
val createTokenFormWithOrg = createTokenForm.fill(CreateToken(org.name, Some(team.name), EMPTY_STRING))
Ok(views.html.teams.team(menuData, teamData, addUserFormWithOrgAndTeam, createTokenFormWithOrg))
case _ =>
Logger.warn(s"Team not found $teamName @ $organizationName")
InternalServerError(views.html.errorpages.errorPage(Messages("cave.errors.5xx.general")))
}
}
}
private[controllers] def getMembers(organizationName: String, teamName: String, client: Client, organization: Option[Organization], role: Role, team: Option[Team]): Future[Seq[Member]] = {
if (organization.isDefined && team.isDefined && !Seq(Role.Viewer, Role.Team).contains(role))
client.Teams.getOrganizationsAndUsersByOrganizationAndTeam(organizationName, teamName).map(_.sortBy(_.user.firstName))
else
Future.successful(List.empty)
}
private[controllers] def getSelectedTeam(organizationName: String, teamName: String, client: Client, role: Role): Future[Option[Team]] = {
if (!Seq(Role.Viewer, Role.Team).contains(role))
client.Teams.getOrganizationsByOrganizationAndTeam(organizationName, teamName)
else
Future.successful(Some(Team(teamName, Seq.empty)))
}
}
| gilt/cave | www/app/controllers/Teams.scala | Scala | mit | 3,184 |
package com.gravity.goose
import extractors.PublishDateExtractor
import org.junit.Test
import org.junit.Assert._
import utils.FileHelper
import java.text.SimpleDateFormat
import org.jsoup.select.Selector
import org.jsoup.nodes.Element
import java.util.Date
/**
* Created by Jim Plush
* User: jim
* Date: 8/19/11
*/
class TextExtractionsTest {
def getHtml(filename: String): String = {
FileHelper.loadResourceFile(TestUtils.staticHtmlDir + filename, Goose.getClass)
}
@Test
def cnn1() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("cnn1.txt")
val url = "http://www.cnn.com/2010/POLITICS/08/13/democrats.social.security/index.html"
val article = TestUtils.getArticle(url = url, rawHTML = html)
val title = "Democrats to use Social Security against GOP this fall"
val content = "Washington (CNN) -- Democrats pledged "
TestUtils.runArticleAssertions(article = article, expectedTitle = title, expectedStart = content)
}
@Test
def businessWeek2() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("businessweek2.txt")
val url: String = "http://www.businessweek.com/technology/here-comes-apples-real-tv-09132011.html"
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = "At Home Depot, we first realized we needed to have a real conversation with",
expectedImage = null)
}
@Test
def businessWeek3() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("businessweek3.txt")
val url: String = "http://www.businessweek.com/management/five-social-media-lessons-for-business-09202011.html"
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = "Get ready, America, because by Christmas 2012 you will have an Apple TV in your living room",
expectedImage = null)
}
@Test
def techcrunch1() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("techcrunch1.txt")
val url = "http://techcrunch.com/2011/08/13/2005-zuckerberg-didnt-want-to-take-over-the-world/"
val content = "The Huffington Post has come across this fascinating five-minute interview"
val title = "2005 Zuckerberg Didn’t Want To Take Over The World"
val article = TestUtils.getArticle(url = url, rawHTML = html)
TestUtils.runArticleAssertions(article = article, expectedTitle = title, expectedStart = content)
}
@Test
def businessweek1() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("businessweek1.txt")
val url: String = "http://www.businessweek.com/magazine/content/10_34/b4192066630779.htm"
val title = "Olivia Munn: Queen of the Uncool"
val content = "Six years ago, Olivia Munn arrived in Hollywood with fading ambitions of making it as a sports reporter and set about deploying"
val article = TestUtils.getArticle(url = url, rawHTML = html)
TestUtils.runArticleAssertions(article = article, expectedTitle = title, expectedStart = content)
}
@Test
def foxNews() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("foxnews1.txt")
val url: String = "http://www.foxnews.com/politics/2010/08/14/russias-nuclear-help-iran-stirs-questions-improved-relations/"
val content = "Russia's announcement that it will help Iran get nuclear fuel is raising questions"
val article = TestUtils.getArticle(url = url, rawHTML = html)
TestUtils.runArticleAssertions(article = article, expectedStart = content)
}
@Test
def aolNews() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("aol1.txt")
val url: String = "http://www.aolnews.com/nation/article/the-few-the-proud-the-marines-getting-a-makeover/19592478"
val article = TestUtils.getArticle(url = url, rawHTML = html)
val content = "WASHINGTON (Aug. 13) -- Declaring \\"the maritime soul of the Marine Corps\\" is"
TestUtils.runArticleAssertions(article = article, expectedStart = content)
}
@Test
def huffingtonPost2() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("huffpo2.txt")
val url: String = "http://www.huffingtonpost.com/2011/10/06/alabama-workers-immigration-law_n_997793.html"
val article = TestUtils.getArticle(url = url, rawHTML = html)
val content = "MONTGOMERY, Ala. -- Alabama's strict new immigration law may be backfiring."
TestUtils.runArticleAssertions(article = article, expectedStart = content)
}
@Test
def testHuffingtonPost() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val url: String = "http://www.huffingtonpost.com/2010/08/13/federal-reserve-pursuing_n_681540.html"
val html = getHtml("huffpo1.txt")
val title: String = "Federal Reserve's Low Rate Policy Is A 'Dangerous Gamble,' Says Top Central Bank Official"
val content = "A top regional Federal Reserve official sharply criticized Friday"
val keywords = "federal, reserve's, low, rate, policy, is, a, 'dangerous, gamble,', says, top, central, bank, official, business"
val description = "A top regional Federal Reserve official sharply criticized Friday the Fed's ongoing policy of keeping interest rates near zero -- and at record lows -- as a \\"dangerous gamble.\\""
val article = TestUtils.getArticle(url = url, rawHTML = html)
TestUtils.runArticleAssertions(article = article, expectedTitle = title, expectedStart = content, expectedDescription = description)
val expectedTags = "Federal Open Market Committee" ::
"Federal Reserve" ::
"Federal Reserve Bank Of Kansas City" ::
"Financial Crisis" ::
"Financial Reform" ::
"Financial Regulation" ::
"Financial Regulatory Reform" ::
"Fomc" ::
"Great Recession" ::
"Interest Rates" ::
"Kansas City Fed" ::
"Monetary Policy" ::
"The Financial Fix" ::
"Thomas Hoenig" ::
"Too Big To Fail" ::
"Wall Street Reform" ::
"Business News" ::
Nil
assertNotNull("Tags should not be NULL!", article.tags)
assertTrue("Tags should not be empty!", article.tags.size > 0)
for (actualTag <- article.tags) {
assertTrue("Each Tag should be contained in the expected set!", expectedTags.contains(actualTag))
}
}
@Test
def wallStreetJournal() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("wsj1.txt")
val url: String = "http://online.wsj.com/article/SB10001424052748704532204575397061414483040.html"
val article = TestUtils.getArticle(url = url, rawHTML = html)
val content = "The Obama administration has paid out less than a third of the nearly $230 billion"
TestUtils.runArticleAssertions(article = article, expectedStart = content)
}
@Test
def usaToday() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("usatoday1.txt")
val url: String = "http://content.usatoday.com/communities/thehuddle/post/2010/08/brett-favre-practices-set-to-speak-about-return-to-minnesota-vikings/1"
val article = TestUtils.getArticle(url, rawHTML = html)
val content = "Brett Favre says he couldn't give up on one more"
TestUtils.runArticleAssertions(article = article, expectedStart = content)
}
@Test
def wiredPubDate() {
val url = "http://www.wired.com/playbook/2010/08/stress-hormones-boxing/";
val html = getHtml("wired1.txt")
//val fmt = new SimpleDateFormat("yyyy-MM-dd")
import com.github.nscala_time.time.Imports._
val dateParser = DateTimeFormat.forPattern("yyyy-MM-dd")
// example of a custom PublishDateExtractor
implicit val config = new Configuration();
config.enableImageFetching = false
config.setPublishDateExtractor(new PublishDateExtractor() {
@Override
def extract(rootElement: Element): DateTime = {
// look for this guy: <meta name="DisplayDate" content="2010-08-18" />
val elements = Selector.select("meta[name=DisplayDate]", rootElement);
if (elements.size() == 0) return null;
val metaDisplayDate = elements.get(0);
if (metaDisplayDate.hasAttr("content")) {
val dateStr = metaDisplayDate.attr("content");
return dateParser.parseDateTime(dateStr);
}
null;
}
});
val article = TestUtils.getArticle(url, rawHTML = html)
TestUtils.runArticleAssertions(
article,
"Stress Hormones Could Predict Boxing Dominance",
"On November 25, 1980, professional boxing");
val expectedDateString = "2010-08-18";
assertNotNull("publishDate should not be null!", article.publishDate);
assertEquals("Publish date should equal: \\"2010-08-18\\"", expectedDateString, dateParser.print(new DateTime(article.publishDate)));
}
@Test
def espn() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("espn1.txt")
val url: String = "http://sports.espn.go.com/espn/commentary/news/story?id=5461430"
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = "If you believe what college football coaches have said about sports")
}
@Test
def engadget() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("engadget1.txt")
val url: String = "http://www.engadget.com/2010/08/18/verizon-fios-set-top-boxes-getting-a-new-hd-guide-external-stor/"
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = "Streaming and downloading TV content to mobiles is nice")
}
@Test
def msn1() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("msn1.txt")
val expected = getHtml("msn1_result.txt")
val url: String = "http://lifestyle.msn.com/your-life/your-money-today/article.aspx?cp-documentid=31244150"
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = expected)
}
@Test
def guardian1() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("guardian1.txt")
val expected = getHtml("guardian1_result.txt")
val url: String = "http://www.guardian.co.uk/film/2011/nov/18/kristen-wiig-bridesmaids"
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = expected)
}
@Test
def time() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("time1.txt")
val url: String = "http://www.time.com/time/health/article/0,8599,2011497,00.html"
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = "This month, the federal government released",
expectedTitle = "Invisible Oil from BP Spill May Threaten Gulf Aquatic Life")
}
@Test
def time2() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("time2.txt")
val url: String = "http://newsfeed.time.com/2011/08/24/washington-monument-closes-to-repair-earthquake-induced-crack/"
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = "Despite what the jeers of jaded Californians might suggest")
}
@Test
def cnet() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("cnet1.txt")
val url: String = "http://news.cnet.com/8301-30686_3-20014053-266.html?tag=topStories1"
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = "NEW YORK--Verizon Communications is prepping a new")
}
@Test
def yahoo() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("yahoo1.txt")
val url: String = "http://news.yahoo.com/apple-says-steve-jobs-resigning-ceo-224628633.html"
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = "SAN FRANCISCO (AP) — Steve Jobs, the mind behind the iPhone")
}
@Test
def politico() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("politico1.txt")
val url: String = "http://www.politico.com/news/stories/1010/43352.html"
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = "If the newest Census Bureau estimates stay close to form")
}
@Test
def businessinsider1() {
val url = "http://www.businessinsider.com/goldman-on-the-fed-announcement-2011-9"
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("businessinsider1.txt")
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = "As everyone in the world was transfixed on the Fed")
}
@Test
def businessinsider2() {
val url = "http://www.businessinsider.com/goldman-on-the-fed-announcement-2011-9"
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("businessinsider2.txt")
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = "From Goldman on the FOMC operation twist announcement")
}
@Test
def cnbc1() {
val url = "http://www.cnbc.com/id/44613978"
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("cnbc1.txt")
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = "Some traders found Wednesday's Fed statement to be a bit gloomier than expected.")
}
/*
* --------------------------------------------------------
* Test Fixes for GitHub Issues Submitted
* --------------------------------------------------------
*/
@Test
def issue24() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("issue_24.txt")
val expected = getHtml("issue_24_result.txt")
val url: String = "http://danielspicar.github.com/goose-bug.html"
val article = TestUtils.getArticle(url, html)
assertEquals("The beginning of the article text was not as expected!", expected, article.cleanedArticleText)
}
@Test
def issue25() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("issue_25.txt")
val url: String = "http://www.accountancyage.com/aa/analysis/2111729/institutes-ifrs-bang"
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = "UK INSTITUTES have thrown their weight behind rapid adoption of international financial reporting standards in the US.")
}
@Test
def issue28() {
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("issue_28.txt")
val url: String = "http://www.telegraph.co.uk/foodanddrink/foodanddrinknews/8808120/Worlds-hottest-chilli-contest-leaves-two-in-hospital.html"
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = "Emergency services were called to Kismot Restaurant's curry-eating challenge,",
expectedImage = null)
}
@Test
def issue32() {
// this link is an example of web devs putting content not in paragraphs but embedding them in span tags with br's
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("issue_32.txt")
val url: String = "http://www.tulsaworld.com/site/articlepath.aspx?articleid=20111118_61_A16_Opposi344152&rss_lnk=7"
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = "Opposition to a proposal to remove certain personal data",
expectedImage = null)
}
@Test
def bug1() {
// html is not parsed properly
implicit val config = TestUtils.NO_IMAGE_CONFIG
val html = getHtml("bug1.html")
val url: String = "http://www.tulsaworld.com/site/articlepath.aspx?articleid=20111118_61_A16_Opposi344152&rss_lnk=7"
val article = TestUtils.getArticle(url, html)
TestUtils.runArticleAssertions(article = article,
expectedStart = " Produsele naturale şi ecologice au devenit u",
expectedImage = null)
}
}
| raisercostin/goose | src/test/scala/com/gravity/goose/TextExtractionsTest.scala | Scala | apache-2.0 | 16,403 |
package com.lucaongaro.similaria
/** Configuration options for [[com.lucaongaro.similaria.Similaria]]
* instances
*
* @constructor creates an Option object
* @param dbPath the path where to persist the database
* @param dbSize the maximum size of the database in bytes
*/
case class Options(
dbPath: String,
dbSize: Long
)
object Options {
/** Implicit default value for the options
*
* By default uses a path of 'db/similaria' and a size of 1GB
*/
implicit val default = new Options(
"db/similaria",
1073741824 // 1GB
)
}
| lucaong/similaria | src/main/scala/com/lucaongaro/similaria/Options.scala | Scala | mit | 567 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode}
import org.apache.spark.sql.types.{DataType, StringType}
/**
* Custom expression to override the deterministic property .
*/
case class CustomDeterministicExpression(nonDt: Expression ) extends Expression with Serializable{
override def nullable: Boolean = nonDt.nullable
override def eval(input: InternalRow): Any = nonDt.eval(input)
override def dataType: DataType = nonDt.dataType
override def children: Seq[Expression] = nonDt.children
def childexp: Expression = nonDt
override def genCode(ctx: CodegenContext): ExprCode = nonDt.genCode(ctx)
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = ev.copy()
}
| jackylk/incubator-carbondata | integration/spark/src/main/scala/org/apache/spark/sql/CustomDeterministicExpression.scala | Scala | apache-2.0 | 1,686 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.aggregate
import org.apache.spark.TaskContext
import org.apache.spark.rdd.{MapPartitionsWithPreparationRDD, RDD}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.errors._
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression2
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution.{UnaryNode, SparkPlan}
import org.apache.spark.sql.execution.metric.SQLMetrics
case class TungstenAggregate(
requiredChildDistributionExpressions: Option[Seq[Expression]],
groupingExpressions: Seq[NamedExpression],
nonCompleteAggregateExpressions: Seq[AggregateExpression2],
completeAggregateExpressions: Seq[AggregateExpression2],
initialInputBufferOffset: Int,
resultExpressions: Seq[NamedExpression],
child: SparkPlan)
extends UnaryNode {
override private[sql] lazy val metrics = Map(
"numInputRows" -> SQLMetrics.createLongMetric(sparkContext, "number of input rows"),
"numOutputRows" -> SQLMetrics.createLongMetric(sparkContext, "number of output rows"))
override def outputsUnsafeRows: Boolean = true
override def canProcessUnsafeRows: Boolean = true
override def canProcessSafeRows: Boolean = true
override def output: Seq[Attribute] = resultExpressions.map(_.toAttribute)
override def requiredChildDistribution: List[Distribution] = {
requiredChildDistributionExpressions match {
case Some(exprs) if exprs.length == 0 => AllTuples :: Nil
case Some(exprs) if exprs.length > 0 => ClusteredDistribution(exprs) :: Nil
case None => UnspecifiedDistribution :: Nil
}
}
// This is for testing. We force TungstenAggregationIterator to fall back to sort-based
// aggregation once it has processed a given number of input rows.
private val testFallbackStartsAt: Option[Int] = {
sqlContext.getConf("spark.sql.TungstenAggregate.testFallbackStartsAt", null) match {
case null | "" => None
case fallbackStartsAt => Some(fallbackStartsAt.toInt)
}
}
protected override def doExecute(): RDD[InternalRow] = attachTree(this, "execute") {
val numInputRows = longMetric("numInputRows")
val numOutputRows = longMetric("numOutputRows")
/**
* Set up the underlying unsafe data structures used before computing the parent partition.
* This makes sure our iterator is not starved by other operators in the same task.
*/
def preparePartition(): TungstenAggregationIterator = {
new TungstenAggregationIterator(
groupingExpressions,
nonCompleteAggregateExpressions,
completeAggregateExpressions,
initialInputBufferOffset,
resultExpressions,
newMutableProjection,
child.output,
testFallbackStartsAt,
numInputRows,
numOutputRows)
}
/** Compute a partition using the iterator already set up previously. */
def executePartition(
context: TaskContext,
partitionIndex: Int,
aggregationIterator: TungstenAggregationIterator,
parentIterator: Iterator[InternalRow]): Iterator[UnsafeRow] = {
val hasInput = parentIterator.hasNext
if (!hasInput) {
// We're not using the underlying map, so we just can free it here
aggregationIterator.free()
if (groupingExpressions.isEmpty) {
numOutputRows += 1
Iterator.single[UnsafeRow](aggregationIterator.outputForEmptyGroupingKeyWithoutInput())
} else {
// This is a grouped aggregate and the input iterator is empty,
// so return an empty iterator.
Iterator.empty
}
} else {
aggregationIterator.start(parentIterator)
aggregationIterator
}
}
// Note: we need to set up the iterator in each partition before computing the
// parent partition, so we cannot simply use `mapPartitions` here (SPARK-9747).
val resultRdd = {
new MapPartitionsWithPreparationRDD[UnsafeRow, InternalRow, TungstenAggregationIterator](
child.execute(), preparePartition, executePartition, preservesPartitioning = true)
}
resultRdd.asInstanceOf[RDD[InternalRow]]
}
override def simpleString: String = {
val allAggregateExpressions = nonCompleteAggregateExpressions ++ completeAggregateExpressions
testFallbackStartsAt match {
case None =>
val keyString = groupingExpressions.mkString("[", ",", "]")
val functionString = allAggregateExpressions.mkString("[", ",", "]")
val outputString = output.mkString("[", ",", "]")
s"TungstenAggregate(key=$keyString, functions=$functionString, output=$outputString)"
case Some(fallbackStartsAt) =>
s"TungstenAggregateWithControlledFallback $groupingExpressions " +
s"$allAggregateExpressions $resultExpressions fallbackStartsAt=$fallbackStartsAt"
}
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregate.scala | Scala | apache-2.0 | 5,772 |
package breeze.stats.regression
import breeze.generic.UFunc
import breeze.linalg._
import org.netlib.util.intW
import com.github.fommil.netlib.LAPACK.{getInstance=>lapack}
import java.util.Arrays
import spire.implicits.cfor
private case class LassoCalculator(data: DenseMatrix[Double], outputs: DenseVector[Double], lambda: Double, workArray: Array[Double], MAX_ITER: Int=100, IMPROVE_THRESHOLD: Double=1e-8) {
/*
* The main purpose of this complicated calculator object is to recycle all the assorted work arrays.
* If we didn't write it this way, we'd have to manually thread all the work arrays
* throughout a slew of functions.
*/
require(data.rows == outputs.size)
require(data.rows > data.cols)
require(data.rows == outputs.size)
require(workArray.size >= 2*data.rows*data.cols)
private val outputCopy = DenseVector.zeros[Double](outputs.size)
private val singleColumnMatrix = new DenseMatrix[Double](data.rows, 1)
private val resultVec = DenseVector.zeros[Double](data.cols)
lazy val result: LassoResult = {
var improvedResult = true
var iter = 0
while (improvedResult && (iter < MAX_ITER)) {
iter += 1
improvedResult = false
cfor(0)(i => i<data.cols, i=>i+1)(i => {
val eoc = estimateOneColumn(i)
val oldCoefficient = resultVec.unsafeValueAt(i)
resultVec.unsafeUpdate(i, shrink(eoc.coefficients(0)))
if (oldCoefficient != resultVec.unsafeValueAt(i)) {
improvedResult = true
}
})
}
LassoResult(resultVec, computeRsquared, lambda)
}
private def shrink(x: Double): Double = {
// Soft thresholding
val sb = math.signum(x)
val ab = sb*x
if (ab > lambda) {
sb*(ab-lambda)
} else {
0.0
}
}
private def copyColumn(column: Int): Unit = {
/* After running this routine, outputCopy should consist of the residuals after multiplying
* data against resultVec, excluding the specified column.
*
* The single column matrix should then be set to equal the data from that column.
*/
require(column < data.cols)
require(column >= 0)
cfor(0)(i => i < outputs.size, i => i+1)(i => {
singleColumnMatrix.unsafeUpdate(i, 0, data.unsafeValueAt(i, column))
var o = outputs.unsafeValueAt(i)
cfor(0)(j => j < data.cols, j => j+1)(j => {
if (j != column) {
o -= data.unsafeValueAt(i,j) * resultVec.unsafeValueAt(j)
}
})
outputCopy.unsafeUpdate(i, o)
})
}
private def computeRsquared = {
var r2 = 0.0
cfor(0)(i => i < outputs.size, i => i+1)(i => {
var o = outputs.unsafeValueAt(i)
cfor(0)(j => j < data.cols, j => j+1)(j => {
o -= data.unsafeValueAt(i,j) * resultVec.unsafeValueAt(j)
})
r2 += o*o
})
r2
}
private def estimateOneColumn(column: Int): LeastSquaresRegressionResult = {
/*
* Goal of this routine is to use the specified column to explain as much of the residual
* as possible, after using the already specified values in other columns.
*/
copyColumn(column)
leastSquaresDestructive(singleColumnMatrix, outputCopy, workArray)
}
}
case class LassoResult(coefficients: DenseVector[Double], rSquared: Double, lambda: Double) extends RegressionResult[DenseVector[Double], Double] {
require(lambda >= 0)
def apply(x: DenseVector[Double]): Double = coefficients.dot(x)
}
object lasso extends UFunc {
/*
* This ufunc implements lasso regression, as described in section 2.2 of
* Coordinate Descent Optimization for l1 Minimization with Application to Compressed Sensing; a Greedy Algorithm
* by Yingying Li Stanley Osher
* Download at: ftp://ftp.math.ucla.edu/pub/camreport/cam09-17.pdf
*/
implicit val matrixVectorWithWorkArray: Impl4[DenseMatrix[Double], DenseVector[Double], Double, Array[Double], LassoResult] = new Impl4[DenseMatrix[Double], DenseVector[Double], Double, Array[Double], LassoResult] {
def apply(data: DenseMatrix[Double], outputs: DenseVector[Double], lambda: Double, workArray: Array[Double]): LassoResult = LassoCalculator(data, outputs, lambda, workArray).result
}
implicit val matrixVectorSpecifiedWork: Impl4[DenseMatrix[Double], DenseVector[Double], Double, Int, LassoResult] = new Impl4[DenseMatrix[Double], DenseVector[Double], Double, Int, LassoResult] {
def apply(data: DenseMatrix[Double], outputs: DenseVector[Double], lambda: Double, workSize: Int): LassoResult = LassoCalculator(data, outputs, lambda, new Array[Double](workSize)).result
}
implicit val matrixVector: Impl3[DenseMatrix[Double], DenseVector[Double], Double, LassoResult] = new Impl3[DenseMatrix[Double], DenseVector[Double], Double, LassoResult] {
def apply(data: DenseMatrix[Double], outputs: DenseVector[Double], lambda: Double): LassoResult = LassoCalculator(data.copy, outputs.copy, lambda, new Array[Double](math.max(1, data.rows*data.cols*2))).result
}
}
| calippo/breeze | math/src/main/scala/breeze/stats/regression/Lasso.scala | Scala | apache-2.0 | 4,955 |
package com.twitter.finagle.redis.integration
import com.twitter.finagle.redis.naggati.RedisClientServerIntegrationTest
import com.twitter.finagle.redis.protocol._
import com.twitter.finagle.redis.tags.{ClientServerTest, RedisTest}
import com.twitter.finagle.redis.util.StringToChannelBuffer
import com.twitter.util.{Await, Future}
import org.jboss.netty.buffer.ChannelBuffer
import org.junit.Ignore
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@Ignore
@RunWith(classOf[JUnitRunner])
final class HyperLogLogClientServerIntegrationSuite extends RedisClientServerIntegrationTest {
implicit def convertToChannelBuffer(s: String): ChannelBuffer = StringToChannelBuffer(s)
test("PFADD should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
assert(Await.result(client(PFAdd(foo, List(bar)))) == IntegerReply(1))
}
}
test("PFCOUNT should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
val pfCountResult = client(PFAdd("foo", List("bar", "baz")))
.flatMap(_ => client(PFCount(List(StringToChannelBuffer("foo")))))
assert(Await.result(pfCountResult) == IntegerReply(2))
}
}
test("PFMERGE should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
val setup = List(PFAdd("foo", List("bar")), PFAdd("bar", List("baz"))) map client
val pfMergeResult = Future.collect(setup).flatMap(_ => client(PFMerge("baz", List("foo", "bar"))))
assert(Await.result(pfMergeResult) == OKStatusReply)
}
}
}
| a-manumohan/finagle | finagle-redis/src/test/scala/com/twitter/finagle/redis/commands/hyperloglog/HyperLogLogClientServerIntegrationSuite.scala | Scala | apache-2.0 | 1,573 |
package is.hail.io.index
import java.io.OutputStream
import is.hail.annotations.{Annotation, Region, RegionValueBuilder, StagedRegionValueBuilder}
import is.hail.asm4s._
import is.hail.expr.ir.{CodeParam, EmitClassBuilder, EmitCode, EmitCodeBuilder, EmitFunctionBuilder, EmitMethodBuilder, EmitParamType, ExecuteContext, IEmitCode, ParamType, coerce}
import is.hail.types
import is.hail.types._
import is.hail.types.encoded.EType
import is.hail.types.physical.{PBaseStruct, PBaseStructValue, PCanonicalArray, PCanonicalBaseStructSettable, PCanonicalStruct, PCode, PInt64, PType}
import is.hail.types.virtual.Type
import is.hail.io.fs.FS
import is.hail.io._
import is.hail.rvd.AbstractRVDSpec
import is.hail.utils._
import is.hail.utils.richUtils.ByteTrackingOutputStream
import org.json4s.Formats
import org.json4s.jackson.Serialization
trait AbstractIndexMetadata {
def fileVersion: Int
def branchingFactor: Int
def height: Int
def keyType: Type
def annotationType: Type
def nKeys: Long
def indexPath: String
def rootOffset: Long
def attributes: Map[String, Any]
}
case class IndexMetadataUntypedJSON(
fileVersion: Int,
branchingFactor: Int,
height: Int,
nKeys: Long,
indexPath: String,
rootOffset: Long,
attributes: Map[String, Any]
) {
def toMetadata(keyType: Type, annotationType: Type): IndexMetadata = IndexMetadata(
fileVersion, branchingFactor,
height, keyType, annotationType,
nKeys, indexPath, rootOffset, attributes)
}
case class IndexMetadata(
fileVersion: Int,
branchingFactor: Int,
height: Int,
keyType: Type,
annotationType: Type,
nKeys: Long,
indexPath: String,
rootOffset: Long,
attributes: Map[String, Any]
) extends AbstractIndexMetadata
object IndexWriter {
val version: SemanticVersion = SemanticVersion(1, 1, 0)
val spec: BufferSpec = BufferSpec.default
def builder(
ctx: ExecuteContext,
keyType: PType,
annotationType: PType,
branchingFactor: Int = 4096,
attributes: Map[String, Any] = Map.empty[String, Any]
): String => IndexWriter = {
val f = StagedIndexWriter.build(ctx, keyType, annotationType, branchingFactor, attributes);
{ path: String =>
new IndexWriter(keyType, annotationType, f(path))
}
}
}
class IndexWriter(keyType: PType, valueType: PType, comp: CompiledIndexWriter) extends AutoCloseable {
private val region = Region()
private val rvb = new RegionValueBuilder(region)
def appendRow(x: Annotation, offset: Long, annotation: Annotation): Unit = {
rvb.start(keyType)
rvb.addAnnotation(keyType.virtualType, x)
val koff = rvb.end()
rvb.start(valueType)
rvb.addAnnotation(valueType.virtualType, annotation)
val voff = rvb.end()
comp.apply(koff, offset, voff)
}
def trackedOS(): ByteTrackingOutputStream = comp.trackedOS()
def close(): Unit = {
region.close()
comp.close()
}
}
class IndexWriterArrayBuilder(name: String, maxSize: Int, sb: SettableBuilder, region: Value[Region], arrayType: PCanonicalArray) {
private val aoff = sb.newSettable[Long](s"${name}_aoff")
private val len = sb.newSettable[Int](s"${name}_len")
val eltType: PCanonicalStruct = types.coerce[PCanonicalStruct](arrayType.elementType)
private val elt = new PCanonicalBaseStructSettable(eltType, sb.newSettable[Long](s"${name}_elt_off"))
def length: Code[Int] = len
def loadFrom(cb: EmitCodeBuilder, a: Code[Long], l: Code[Int]): Unit = {
cb.assign(aoff, a)
cb.assign(len, l)
}
def create(cb: EmitCodeBuilder, dest: Code[Long]): Unit = {
cb.assign(aoff, arrayType.allocate(region, maxSize))
cb += arrayType.stagedInitialize(aoff, maxSize)
cb += PCode(arrayType, aoff).store(cb.emb, region, dest)
cb.assign(len, 0)
}
def storeLength(cb: EmitCodeBuilder): Unit = cb += arrayType.storeLength(aoff, length)
def setFieldValue(cb: EmitCodeBuilder, name: String, field: PCode): Unit = {
cb += eltType.setFieldPresent(elt.a, name)
cb += eltType.fieldType(name).constructAtAddressFromValue(cb.emb, eltType.fieldOffset(elt.a, name), region, field.pt, field.code, deepCopy = true)
}
def setField(cb: EmitCodeBuilder, name: String, v: => IEmitCode): Unit =
v.consume(cb,
cb += eltType.setFieldMissing(elt.a, name),
setFieldValue(cb, name, _))
def addChild(cb: EmitCodeBuilder): Unit = {
loadChild(cb, len)
cb.assign(len, len + 1)
}
def loadChild(cb: EmitCodeBuilder, idx: Code[Int]): Unit = cb += elt.store(PCode(eltType, arrayType.elementOffset(aoff, idx)))
def getLoadedChild: PBaseStructValue = elt
def getChild(idx: Value[Int]): PCode = PCode(eltType, arrayType.elementOffset(aoff, idx))
}
class StagedIndexWriterUtils(ib: Settable[IndexWriterUtils]) {
def create(cb: EmitCodeBuilder, path: Code[String], fs: Code[FS], meta: Code[StagedIndexMetadata]): Unit =
cb.assign(ib, Code.newInstance[IndexWriterUtils, String, FS, StagedIndexMetadata](path, fs, meta))
def size: Code[Int] = ib.invoke[Int]("size")
def add(cb: EmitCodeBuilder, r: Code[Region], aoff: Code[Long], len: Code[Int]): Unit =
cb += ib.invoke[Region, Long, Int, Unit]("add", r, aoff, len)
def update(cb: EmitCodeBuilder, idx: Code[Int], r: Code[Region], aoff: Code[Long], len: Code[Int]): Unit =
cb += ib.invoke[Int, Region, Long, Int, Unit]("update", idx, r, aoff, len)
def getRegion(idx: Code[Int]): Code[Region] = ib.invoke[Int, Region]("getRegion", idx)
def getArrayOffset(idx: Code[Int]): Code[Long] = ib.invoke[Int, Long]("getArrayOffset", idx)
def getLength(idx: Code[Int]): Code[Int] = ib.invoke[Int, Int]("getLength", idx)
def close(cb: EmitCodeBuilder): Unit = cb += ib.invoke[Unit]("close")
def bytesWritten: Code[Long] = ib.invoke[Long]("bytesWritten")
def os: Code[OutputStream] = ib.invoke[OutputStream]("os")
def writeMetadata(cb: EmitCodeBuilder, height: Code[Int], rootOffset: Code[Long], nKeys: Code[Long]): Unit =
cb += ib.invoke[Int, Long, Long, Unit]("writeMetadata", height, rootOffset, nKeys)
}
case class StagedIndexMetadata(
branchingFactor: Int,
keyType: Type,
annotationType: Type,
attributes: Map[String, Any]
) {
def serialize(out: OutputStream, height: Int, rootOffset: Long, nKeys: Long) {
import AbstractRVDSpec.formats
val metadata = IndexMetadata(IndexWriter.version.rep, branchingFactor, height, keyType, annotationType, nKeys, "index", rootOffset, attributes)
Serialization.write(metadata, out)
}
}
class IndexWriterUtils(path: String, fs: FS, meta: StagedIndexMetadata) {
val indexPath: String = path + "/index"
val metadataPath: String = path + "/metadata.json.gz"
val trackedOS: ByteTrackingOutputStream = new ByteTrackingOutputStream(fs.create(indexPath))
def bytesWritten: Long = trackedOS.bytesWritten
def os: OutputStream = trackedOS
def writeMetadata(height: Int, rootOffset: Long, nKeys: Long): Unit = {
using(fs.create(metadataPath)) { os => meta.serialize(os, height, rootOffset, nKeys) }
}
val rBuilder = new ArrayBuilder[Region]()
val aBuilder = new ArrayBuilder[Long]()
val lBuilder = new ArrayBuilder[Int]()
def size: Int = rBuilder.size
def add(r: Region, aoff: Long, len: Int): Unit = {
rBuilder += r
aBuilder += aoff
lBuilder += len
}
def update(idx: Int, r: Region, aoff: Long, len: Int): Unit = {
if (idx == size) {
add(r, aoff, len)
} else {
rBuilder.update(idx, r)
aBuilder.update(idx, aoff)
lBuilder.update(idx, len)
}
}
def getRegion(idx: Int): Region = rBuilder(idx)
def getArrayOffset(idx: Int): Long = aBuilder(idx)
def getLength(idx: Int): Int = lBuilder(idx)
def close(): Unit = {
rBuilder.result().foreach { r => r.close() }
trackedOS.close()
}
}
trait CompiledIndexWriter {
def init(path: String): Unit
def trackedOS(): ByteTrackingOutputStream
def apply(x: Long, offset: Long, annotation: Long): Unit
def close(): Unit
}
object StagedIndexWriter {
def build(
ctx: ExecuteContext,
keyType: PType,
annotationType: PType,
branchingFactor: Int = 4096,
attributes: Map[String, Any] = Map.empty[String, Any]
): String => CompiledIndexWriter = {
val fb = EmitFunctionBuilder[CompiledIndexWriter](ctx, "indexwriter",
FastIndexedSeq[ParamType](typeInfo[Long], typeInfo[Long], typeInfo[Long]),
typeInfo[Unit])
val cb = fb.ecb
val siw = new StagedIndexWriter(branchingFactor, keyType, annotationType, attributes, cb)
cb.newEmitMethod("init", FastIndexedSeq[ParamType](typeInfo[String]), typeInfo[Unit])
.voidWithBuilder(cb => siw.init(cb, cb.emb.getCodeParam[String](1)))
fb.emb.voidWithBuilder { cb =>
siw.add(cb,
IEmitCode(cb, false, PCode(keyType, fb.getCodeParam[Long](1))),
fb.getCodeParam[Long](2),
IEmitCode(cb, false, PCode(annotationType, fb.getCodeParam[Long](3))))
}
cb.newEmitMethod("close", FastIndexedSeq[ParamType](), typeInfo[Unit])
.voidWithBuilder(siw.close)
cb.newEmitMethod("trackedOS", FastIndexedSeq[ParamType](), typeInfo[ByteTrackingOutputStream])
.emitWithBuilder[ByteTrackingOutputStream] { _ => Code.checkcast[ByteTrackingOutputStream](siw.utils.os) }
val makeFB = fb.resultWithIndex()
{ path: String =>
val f = makeFB(0, null)
f.init(path)
f
}
}
def withDefaults(keyType: PType, cb: EmitClassBuilder[_],
branchingFactor: Int = 4096,
annotationType: PType = +PCanonicalStruct(),
attributes: Map[String, Any] = Map.empty[String, Any]): StagedIndexWriter =
new StagedIndexWriter(branchingFactor, keyType, annotationType, attributes, cb)
}
class StagedIndexWriter(branchingFactor: Int, keyType: PType, annotationType: PType, attributes: Map[String, Any], cb: EmitClassBuilder[_]) {
require(branchingFactor > 1)
private var elementIdx = cb.genFieldThisRef[Long]()
private val ob = cb.genFieldThisRef[OutputBuffer]()
private val utils = new StagedIndexWriterUtils(cb.genFieldThisRef[IndexWriterUtils]())
private val leafBuilder = new StagedLeafNodeBuilder(branchingFactor, keyType, annotationType, cb.fieldBuilder)
private val writeInternalNode: EmitMethodBuilder[_] = {
val m = cb.genEmitMethod[Int, Boolean, Unit]("writeInternalNode")
val internalBuilder = new StagedInternalNodeBuilder(branchingFactor, keyType, annotationType, m.localBuilder)
val parentBuilder = new StagedInternalNodeBuilder(branchingFactor, keyType, annotationType, m.localBuilder)
m.emitWithBuilder { cb =>
val level = m.getCodeParam[Int](1)
val isRoot = m.getCodeParam[Boolean](2)
val idxOff = cb.newLocal[Long]("indexOff")
cb.assign(idxOff, utils.bytesWritten)
internalBuilder.loadFrom(cb, utils, level)
cb += ob.writeByte(1.toByte)
internalBuilder.encode(cb, ob)
cb += ob.flush()
val next = m.newLocal[Int]("next")
cb.assign(next, level + 1)
cb.ifx(!isRoot, {
cb.ifx(utils.size.ceq(next),
parentBuilder.create(cb), {
cb.ifx(utils.getLength(next).ceq(branchingFactor),
cb += m.invokeCode[Unit](CodeParam(next), CodeParam(false)))
parentBuilder.loadFrom(cb, utils, next)
})
internalBuilder.loadChild(cb, 0)
parentBuilder.add(cb, idxOff, internalBuilder.getLoadedChild)
parentBuilder.store(cb, utils, next)
})
internalBuilder.reset(cb)
internalBuilder.store(cb, utils, level)
Code._empty
}
m
}
private val writeLeafNode: EmitMethodBuilder[_] = {
val m = cb.genEmitMethod[Unit]("writeLeafNode")
val parentBuilder = new StagedInternalNodeBuilder(branchingFactor, keyType, annotationType, m.localBuilder)
m.emitWithBuilder { cb =>
val idxOff = cb.newLocal[Long]("indexOff")
cb.assign(idxOff, utils.bytesWritten)
cb += ob.writeByte(0.toByte)
leafBuilder.encode(cb, ob)
cb += ob.flush()
cb.ifx(utils.getLength(0).ceq(branchingFactor),
cb += writeInternalNode.invokeCode[Unit](CodeParam(0), CodeParam(false)))
parentBuilder.loadFrom(cb, utils, 0)
leafBuilder.loadChild(cb, 0)
parentBuilder.add(cb, idxOff, leafBuilder.firstIdx.tcode[Long], leafBuilder.getLoadedChild)
parentBuilder.store(cb, utils, 0)
leafBuilder.reset(cb, elementIdx)
Code._empty
}
m
}
private val flush: EmitMethodBuilder[_] = {
val m = cb.genEmitMethod[Long]("flush")
m.emitWithBuilder { cb =>
val idxOff = cb.newLocal[Long]("indexOff")
val level = m.newLocal[Int]("level")
cb.ifx(leafBuilder.ab.length > 0, cb += writeLeafNode.invokeCode[Unit]())
cb.assign(level, 0)
cb.whileLoop(level < utils.size - 1, {
cb.ifx(utils.getLength(level) > 0,
cb += writeInternalNode.invokeCode[Unit](CodeParam(level), CodeParam(false)))
cb.assign(level, level + 1)
})
cb.assign(idxOff, utils.bytesWritten)
cb += writeInternalNode.invokeCode[Unit](CodeParam(level), CodeParam(true))
idxOff.load()
}
m
}
def add(cb: EmitCodeBuilder, key: => IEmitCode, offset: Code[Long], annotation: => IEmitCode) {
cb.ifx(leafBuilder.ab.length.ceq(branchingFactor),
cb += writeLeafNode.invokeCode[Unit]())
leafBuilder.add(cb, key, offset, annotation)
cb.assign(elementIdx, elementIdx + 1L)
}
def close(cb: EmitCodeBuilder): Unit = {
val off = cb.newLocal[Long]("lastOffset")
cb.assign(off, flush.invokeCode[Long]())
leafBuilder.close(cb)
utils.close(cb)
utils.writeMetadata(cb, utils.size + 1, off, elementIdx)
}
def init(cb: EmitCodeBuilder, path: Value[String]): Unit = {
val metadata = cb.emb.getObject(StagedIndexMetadata(
branchingFactor,
keyType.virtualType,
annotationType.virtualType,
attributes))
val internalBuilder = new StagedInternalNodeBuilder(branchingFactor, keyType, annotationType, cb.localBuilder)
cb.assign(elementIdx, 0L)
utils.create(cb, path, cb.emb.getFS, metadata)
cb.assign(ob, IndexWriter.spec.buildCodeOutputBuffer(utils.os))
leafBuilder.create(cb, 0L)
internalBuilder.create(cb)
internalBuilder.store(cb, utils, 0)
}
} | cseed/hail | hail/src/main/scala/is/hail/io/index/IndexWriter.scala | Scala | mit | 14,173 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.javadsl.api
import java.lang.invoke.SerializedLambda
import java.lang.reflect.Method
/**
* Resolves method references.
*
* This uses a JDK specced approach to, given a lambda that is implemented using LambdaMetaFactory, resolve that to
* the actual method that gets invoked.
*
* Essentially the way it works is it invokes the lambdas writeReplace method, which converts the lambda to a
* SerializedLambda, which contains all the necessary information to resolve the reference to the method.
*
* The SAM that the lambda implements must be Serializable for this to work.
*/
private[api] object MethodRefResolver {
/**
* Resolve the method ref for a lambda.
*/
def resolveMethodRef(lambda: Any): Method = {
val lambdaType = lambda.getClass
if (!classOf[java.io.Serializable].isInstance(lambda)) {
throw new IllegalArgumentException(
"Can only resolve method references from serializable SAMs, class was: " + lambdaType
)
}
val writeReplace =
try {
lambda.getClass.getDeclaredMethod("writeReplace")
} catch {
case e: NoSuchMethodError =>
throw new IllegalArgumentException(
"Passed in object does not provide a writeReplace method, hence it can't be a Java 8 method reference.",
e
)
}
writeReplace.setAccessible(true)
val serializedLambda = writeReplace.invoke(lambda) match {
case s: SerializedLambda => s
case other =>
throw new IllegalArgumentException(
"Passed in object does not writeReplace itself with SerializedLambda, hence it can't be a Java 8 method reference."
)
}
// Try to load the class that the method ref is defined on
val ownerClass = loadClass(lambdaType.getClassLoader, serializedLambda.getImplClass)
val argumentClasses = getArgumentClasses(lambdaType.getClassLoader, serializedLambda.getImplMethodSignature)
if (serializedLambda.getImplClass.equals("<init>")) {
throw new IllegalArgumentException("Passed in method ref is a constructor.")
} else {
ownerClass.getDeclaredMethod(serializedLambda.getImplMethodName, argumentClasses: _*)
}
}
private def loadClass(classLoader: ClassLoader, internalName: String) = {
Class.forName(internalName.replace('/', '.'), false, classLoader)
}
private def getArgumentClasses(classLoader: ClassLoader, methodDescriptor: String): List[Class[_]] = {
def parseArgumentClasses(offset: Int, arrayDepth: Int): List[Class[_]] = {
methodDescriptor.charAt(offset) match {
case ')' => Nil
case 'L' =>
val end = methodDescriptor.indexOf(';', offset)
val className = if (arrayDepth > 0) {
methodDescriptor.substring(offset - arrayDepth, end)
} else {
methodDescriptor.substring(offset + 1, end)
}
loadClass(classLoader, className) :: parseArgumentClasses(end + 1, 0)
case '[' =>
parseArgumentClasses(offset + 1, arrayDepth + 1)
case _ if arrayDepth > 0 =>
val className = methodDescriptor.substring(offset - arrayDepth, offset + 1)
loadClass(classLoader, className) :: parseArgumentClasses(offset + 1, 0)
case other =>
val clazz = other match {
case 'Z' => classOf[Boolean]
case 'C' => classOf[Char]
case 'B' => classOf[Byte]
case 'S' => classOf[Short]
case 'I' => classOf[Int]
case 'F' => classOf[Float]
case 'J' => classOf[Long]
case 'D' => classOf[Double]
case unknown => throw sys.error("Unknown primitive type: " + unknown)
}
clazz :: parseArgumentClasses(offset + 1, 0)
}
}
parseArgumentClasses(1, 0)
}
}
| lagom/lagom | service/javadsl/api/src/main/scala/com/lightbend/lagom/internal/javadsl/api/MethodRefResolver.scala | Scala | apache-2.0 | 3,941 |
package com.twitter.util
import scala.reflect.Manifest
/**
* The Try type represents a computation that may either result in an exception
* or return a value. It is analogous to the Either type but encodes common idioms
* for handling exceptional cases.
*/
object Try {
case class PredicateDoesNotObtain() extends Exception()
def apply[R](r: => R): Try[R] = {
try { Return(r) } catch {
case e => Throw(e)
}
}
}
trait Try[+R] {
/**
* Returns true if the Try is a Throw, false otherwise.
*/
def isThrow: Boolean
/**
* Returns true if the Try is a Return, false otherwise.
*/
def isReturn: Boolean
/**
* Returns the value from this Return or the given argument if this is a Throw.
*/
def getOrElse[R2 >: R](default: => R2) = if (isReturn) apply() else default
/**
* Calls the exceptionHandler with the exception if this is a Throw. This is like flatMap for the exception.
*/
def rescue[R2 >: R](rescueException: Throwable => Try[R2]): Try[R2]
/**
* Returns the value from this Return or throws the exception if this is a Throw
*/
def apply(): R
/**
* Applies the given function f if this is a Result.
*/
def foreach(f: R => Unit) { if (isReturn) f(apply()) }
/**
* Returns the given function applied to the value from this Return or returns this if this is a Throw
*/
def flatMap[R2](f: R => Try[R2]): Try[R2]
/**
* Maps the given function to the value from this Return or returns this if this is a Throw
*/
def map[X](f: R => X): Try[X]
/**
* Returns None if this is a Throw or the given predicate does not obtain. Returns some(this) otherwise.
*/
def filter(p: R => Boolean): Try[R]
/**
* Returns None if this is a Throw or a Some containing the value if this is a Return
*/
def toOption = if (isReturn) Some(apply()) else None
/**
* Returns this object. This is overridden by subclasses.
*/
def respond(k: Try[R] => Unit) = k(this)
}
final case class Throw[+R](e: Throwable) extends Try[R] {
def isThrow = true
def isReturn = false
def rescue[R2 >: R](rescueException: Throwable => Try[R2]) = rescueException(e)
def apply(): R = throw e
def flatMap[R2](f: R => Try[R2]) = Throw[R2](e)
def map[X](f: R => X) = Throw(e)
def filter(p: R => Boolean) = this
}
final case class Return[+R](r: R) extends Try[R] {
def isThrow = false
def isReturn = true
def rescue[R2 >: R](rescueException: Throwable => Try[R2]) = Return(r)
def apply() = r
def flatMap[R2](f: R => Try[R2]) = {
try {
f(r)
} catch {
case e => Throw(e)
}
}
def map[X](f: R => X) = Try[X](f(r))
def filter(p: R => Boolean) = if (p(apply())) this else Throw(new Try.PredicateDoesNotObtain)
} | mccv/util | src/main/scala/com/twitter/util/Try.scala | Scala | apache-2.0 | 2,753 |
package zeroformatter
import java.nio.charset.StandardCharsets
import spire.syntax.cfor._
import BinaryUtil._
trait Encoder {
def ensureCapacity(offset: Int, appendLength: Int): Unit
def writeBoolUnsafe(offset:Int, value: Boolean): Int
def writeBool(offset:Int, value: Boolean): Int
def writeByteUnsafe(offset:Int, value: Byte): Int
def writeByte(offset:Int, value: Byte): Int
def writeShortUnsafe(offset:Int, value: Short): Int
def writeShort(offset:Int, value: Short): Int
def writeIntUnsafe(offset:Int, value: Int): Int
def writeInt(offset:Int, value: Int): Int
def writeLongUnsafe(offset:Int, value: Long): Int
def writeLong(offset:Int, value: Long): Int
def writeFloatUnsafe(offset:Int, value: Float): Int
def writeFloat(offset:Int, value: Float): Int
def writeDoubleUnsafe(offset:Int, value: Double): Int
def writeDouble(offset:Int, value: Double): Int
def writeCharUnsafe(offset:Int, value: Char): Int
def writeChar(offset:Int, value: Char): Int
def writeByteArrayUnsafe(offset:Int, value: Array[Byte]): Int
def writeByteArrayUnsafe(offset: Int, value: Array[Byte], start: Int, len: Int): Int
def writeByteArray(offset:Int, value: Array[Byte]): Int
def writeByteArray(offset: Int, value: Array[Byte], start: Int, len: Int): Int
def toByteArray: Array[Byte]
def writeString(offset: Int, value: String): Int =
if(value == null) writeInt(offset, -1)
else {
val strBytes = value.getBytes(StandardCharsets.UTF_8)
val len = strBytes.length
ensureCapacity(offset, 4 + len)
val intSize = writeIntUnsafe(offset, len)
intSize + writeByteArrayUnsafe(offset + intSize, strBytes, 0, len)
}
}
final case class ArrayEncoder(private var buf: Array[Byte]) extends Encoder {
override def ensureCapacity(offset: Int, appendLength: Int): Unit = {
buf = BinaryUtil.ensureCapacity(buf, offset, appendLength)
}
override def toByteArray = buf
override def writeBoolUnsafe(offset: Int, value: Boolean): Int = {
buf(offset) = if(value) 1 else 0
1
}
override def writeBool(offset: Int, value: Boolean): Int = {
ensureCapacity(offset, 1)
writeBoolUnsafe(offset, value)
}
override def writeByteUnsafe(offset: Int, value: Byte): Int = {
buf(offset) = value
1
}
override def writeByte(offset: Int, value: Byte): Int = {
ensureCapacity(offset, 1)
buf(offset) = value
1
}
override def writeShortUnsafe(offset: Int, value: Short): Int = {
buf(offset) = value.asInstanceOf[Byte]
buf(offset + 1) = (value >>> 8).asInstanceOf[Byte]
2
}
override def writeShort(offset: Int, value: Short): Int = {
ensureCapacity(offset, 2)
writeShortUnsafe(offset, value)
}
override def writeIntUnsafe(offset: Int, value: Int): Int = {
@annotation.tailrec
def go(pos: Int, v: Int): Int = {
buf(offset + pos) = v.asInstanceOf[Byte]
if(pos == 3) 4
else go(pos + 1, v >>> 8)
}
go(0, value)
}
override def writeInt(offset: Int, value: Int): Int = {
ensureCapacity(offset, 4)
writeIntUnsafe(offset, value)
}
override def writeLongUnsafe(offset: Int, value: Long): Int = {
@annotation.tailrec
def go(pos: Int, v: Long): Int = {
buf(offset + pos) = v.asInstanceOf[Byte]
if(pos == 7) 8
else go(pos + 1, v >>> 8)
}
go(0, value)
}
override def writeLong(offset: Int, value: Long): Int = {
ensureCapacity(offset, 8)
writeLongUnsafe(offset, value)
}
override def writeFloatUnsafe(offset: Int, value: Float): Int =
writeIntUnsafe(offset, java.lang.Float.floatToIntBits(value))
override def writeFloat(offset: Int, value: Float): Int =
writeInt(offset, java.lang.Float.floatToIntBits(value))
override def writeDoubleUnsafe(offset: Int, value: Double): Int =
writeLongUnsafe(offset, java.lang.Double.doubleToLongBits(value))
override def writeDouble(offset: Int, value: Double): Int =
writeLong(offset, java.lang.Double.doubleToLongBits(value))
private[this] final val charSize = 2
override def writeCharUnsafe(offset: Int, value: Char): Int = {
val cs = allocate(charSize).putChar(value).array()
cfor(0)(_ <= 1, _ + 1){ i => buf(offset + i) = cs(i) }
charSize
}
override def writeChar(offset: Int, value: Char): Int = {
ensureCapacity(offset, charSize)
writeCharUnsafe(offset, value)
}
override def writeByteArrayUnsafe(offset: Int, value: Array[Byte]): Int =
writeByteArrayUnsafe(offset, value, 0, value.length)
override def writeByteArrayUnsafe(offset: Int, value: Array[Byte], start: Int, len: Int) = {
System.arraycopy(value, start, buf, offset, len)
len
}
override def writeByteArray(offset: Int, value: Array[Byte]): Int =
writeByteArray(offset, value, 0, value.length)
override def writeByteArray(offset: Int, value: Array[Byte], start: Int, len: Int) = {
ensureCapacity(offset, len)
writeByteArrayUnsafe(offset, value, start, len)
}
}
| pocketberserker/scala-zero-formatter | zero-formatter/src/main/scala/zeroformatter/Encoder.scala | Scala | mit | 4,978 |
package scutil.jcollection.extension
import java.util.concurrent.ConcurrentLinkedQueue
object ConcurrentLinkedQueueImplicits extends ConcurrentLinkedQueueImplicits
trait ConcurrentLinkedQueueImplicits {
implicit final class ConcurrentLinkedQueueExt[T](peer:ConcurrentLinkedQueue[T]) {
def pollOption():Option[T] = Option(peer.poll())
def peekOption:Option[T] = Option(peer.peek)
}
}
| ritschwumm/scutil | modules/jdk/src/main/scala/scutil/jcollection/extension/ConcurrentLinkedQueueImplicits.scala | Scala | bsd-2-clause | 392 |
package com.github.sparkfy.rpc
import com.github.sparkfy.SparkfyException
/**
* A factory class to create the [[RpcEnv]]. It must have an empty constructor so that it can be
* created using Reflection.
*/
trait RpcEnvFactory {
def create(config: RpcEnvConfig): RpcEnv
}
trait RpcEndpoint {
/**
* The [[RpcEnv]] that this [[RpcEndpoint]] is registered to.
*/
val rpcEnv: RpcEnv
/**
* The [[RpcEndpointRef]] of this [[RpcEndpoint]]. `self` will become valid when `onStart` is
* called. And `self` will become `null` when `onStop` is called.
*
* Note: Because before `onStart`, [[RpcEndpoint]] has not yet been registered and there is not
* valid [[RpcEndpointRef]] for it. So don't call `self` before `onStart` is called.
*/
final def self: RpcEndpointRef = {
require(rpcEnv != null, "rpcEnv has not been initialized")
rpcEnv.endpointRef(this)
}
/**
* Process messages from [[RpcEndpointRef.send]] or [[RpcCallContext.reply)]]. If receiving a
* unmatched message, [[SparkfyException]] will be thrown and sent to `onError`.
*/
def receive: PartialFunction[Any, Unit] = {
case _ => throw new SparkfyException(self + " does not implement 'receive'")
}
/**
* Process messages from [[RpcEndpointRef.ask]]. If receiving a unmatched message,
* [[SparkfyException]] will be thrown and sent to `onError`.
*/
def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case _ => context.sendFailure(new SparkfyException(self + " won't reply anything"))
}
/**
* Invoked when any exception is thrown during handling messages.
*/
def onError(cause: Throwable): Unit = {
// By default, throw e and let RpcEnv handle it
throw cause
}
/**
* Invoked when `remoteAddress` is connected to the current node.
*/
def onConnected(remoteAddress: RpcAddress): Unit = {
// By default, do nothing.
}
/**
* Invoked when `remoteAddress` is lost.
*/
def onDisconnected(remoteAddress: RpcAddress): Unit = {
// By default, do nothing.
}
/**
* Invoked when some network error happens in the connection between the current node and
* `remoteAddress`.
*/
def onNetworkError(cause: Throwable, remoteAddress: RpcAddress): Unit = {
// By default, do nothing.
}
/**
* Invoked before [[RpcEndpoint]] starts to handle any message.
*/
def onStart(): Unit = {
// By default, do nothing.
}
/**
* Invoked when [[RpcEndpoint]] is stopping. `self` will be `null` in this method and you cannot
* use it to send or ask messages.
*/
def onStop(): Unit = {
// By default, do nothing.
}
/**
* A convenient method to stop [[RpcEndpoint]].
*/
final def stop(): Unit = {
val _self = self
if (_self != null) {
rpcEnv.stop(_self)
}
}
}
/**
* A trait that requires RpcEnv thread-safely sending messages to it.
*
* Thread-safety means processing of one message happens before processing of the next message by
* the same [[ThreadSafeRpcEndpoint]]. In the other words, changes to internal fields of a
* [[ThreadSafeRpcEndpoint]] are visible when processing the next message, and fields in the
* [[ThreadSafeRpcEndpoint]] need not be volatile or equivalent.
*
* However, there is no guarantee that the same thread will be executing the same
* [[ThreadSafeRpcEndpoint]] for different messages.
*/
trait ThreadSafeRpcEndpoint extends RpcEndpoint {
}
| sparkfy/sparkfy | sparkfy-common/src/main/scala/com/github/sparkfy/rpc/RpcEndpoint.scala | Scala | apache-2.0 | 3,461 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CP673(value: Option[Int]) extends CtBoxIdentifier(name = "Market value of unsold assets") with CtOptionalInteger with Input with SelfValidatableBox[ComputationsBoxRetriever, Option[Int]] {
override def validate(boxRetriever: ComputationsBoxRetriever) = {
collectErrors(
validateZeroOrPositiveInteger()
)
}
}
object CP673 {
def apply(value: Int): CP673 = CP673(Some(value))
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CP673.scala | Scala | apache-2.0 | 1,150 |
package io.iohk.ethereum.consensus.blocks
import akka.util.ByteString
import io.iohk.ethereum.crypto.ECDSASignatureImplicits.ECDSASignatureOrdering
import io.iohk.ethereum.domain.BlockHeader.HeaderExtraFields.HefPostEcip1097
import io.iohk.ethereum.domain._
import io.iohk.ethereum.ledger.BloomFilter
class CheckpointBlockGenerator {
def generate(parent: Block, checkpoint: Checkpoint): Block = {
val blockNumber = parent.number + 1
// we are using a predictable value for timestamp so that each federation node generates identical block
// see ETCM-173
val timestamp = parent.header.unixTimestamp + 1
val checkpointWithSortedSignatures = Checkpoint(checkpoint.signatures.sorted)
val header = BlockHeader(
parentHash = parent.hash,
ommersHash = BlockHeader.EmptyOmmers,
beneficiary = BlockHeader.EmptyBeneficiary,
difficulty = parent.header.difficulty,
number = blockNumber,
gasLimit = parent.header.gasLimit,
unixTimestamp = timestamp,
extraData = ByteString.empty,
stateRoot = parent.header.stateRoot,
transactionsRoot = BlockHeader.EmptyMpt,
receiptsRoot = BlockHeader.EmptyMpt,
logsBloom = BloomFilter.EmptyBloomFilter,
gasUsed = UInt256.Zero,
mixHash = ByteString.empty,
nonce = ByteString.empty,
extraFields = HefPostEcip1097(false, Some(checkpointWithSortedSignatures))
)
Block(header, BlockBody.empty)
}
}
| input-output-hk/etc-client | src/main/scala/io/iohk/ethereum/consensus/blocks/CheckpointBlockGenerator.scala | Scala | mit | 1,451 |
package io.swagger.client.api
import io.swagger.client.model.UnitCategory
import io.swagger.client.model.Unit
import io.swagger.client.core._
import io.swagger.client.core.CollectionFormats._
import io.swagger.client.core.ApiKeyLocations._
object UnitsApi {
/**
* Get a list of the categories of measurement units such as 'Distance', 'Duration', 'Energy', 'Frequency', 'Miscellany', 'Pressure', 'Proportion', 'Rating', 'Temperature', 'Volume', and 'Weight'.
*
* Expected answers:
* code 200 : UnitCategory (Successful operation)
* code 401 : (Not Authenticated)
*/
def unitCategoriesGet(): ApiRequest[UnitCategory] =
ApiRequest[UnitCategory](ApiMethods.GET, "https://localhost/api", "/unitCategories", "application/json")
.withSuccessResponse[UnitCategory](200)
.withErrorResponse[Unit](401)
/**
* Get all available units
*
* Expected answers:
* code 200 : Seq[Unit] (Successful operation)
* code 401 : (Not Authenticated)
*
* @param unitName Unit name
* @param abbreviatedUnitName Restrict the results to a specific unit by providing the unit abbreviation.
* @param categoryName Restrict the results to a specific unit category by providing the unit category name.
*/
def unitsGet(unitName: Option[String] = None, abbreviatedUnitName: Option[String] = None, categoryName: Option[String] = None): ApiRequest[Seq[Unit]] =
ApiRequest[Seq[Unit]](ApiMethods.GET, "https://localhost/api", "/units", "application/json")
.withQueryParam("unitName", unitName)
.withQueryParam("abbreviatedUnitName", abbreviatedUnitName)
.withQueryParam("categoryName", categoryName)
.withSuccessResponse[Seq[Unit]](200)
.withErrorResponse[Unit](401)
}
| QuantiModo/QuantiModo-SDK-Akka-Scala | src/main/scala/io/swagger/client/api/UnitsApi.scala | Scala | gpl-2.0 | 1,854 |
/*
ASIB - A Scala IRC Bot
Copyright (C) 2012 Iain Cambridge
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package asib.command.join
import asib.Asib
class Greet extends AbstractJoinCommand {
def join(username: String, channel: String) = {
if (username != Asib.nick) {
Asib.sendMsg(channel, username + " welcome to " + channel + " I hope you enjoy " +
"your stay.")
}
}
} | icambridge-old/asib | src/main/scala/asib/command/join/Greet.scala | Scala | gpl-3.0 | 1,011 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.jdbc
import java.math.BigDecimal
import java.sql.{Connection, Date, Timestamp}
import java.util.{Properties, TimeZone}
import org.apache.spark.sql.{Row, SaveMode}
import org.apache.spark.sql.execution.{RowDataSourceScanExec, WholeStageCodegenExec}
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.execution.datasources.jdbc.{JDBCPartition, JDBCRelation}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
import org.apache.spark.tags.DockerTest
/**
* This patch was tested using the Oracle docker. Created this integration suite for the same.
* The ojdbc6-11.2.0.2.0.jar was to be downloaded from the maven repository. Since there was
* no jdbc jar available in the maven repository, the jar was downloaded from oracle site
* manually and installed in the local; thus tested. So, for SparkQA test case run, the
* ojdbc jar might be manually placed in the local maven repository(com/oracle/ojdbc6/11.2.0.2.0)
* while Spark QA test run.
*
* The following would be the steps to test this
* 1. Build Oracle database in Docker, please refer below link about how to.
* https://github.com/oracle/docker-images/blob/master/OracleDatabase/SingleInstance/README.md
* 2. export ORACLE_DOCKER_IMAGE_NAME=$ORACLE_DOCKER_IMAGE_NAME
* Pull oracle $ORACLE_DOCKER_IMAGE_NAME image - docker pull $ORACLE_DOCKER_IMAGE_NAME
* 3. Start docker - sudo service docker start
* 4. Download oracle 11g driver jar and put it in maven local repo:
* (com/oracle/ojdbc6/11.2.0.2.0/ojdbc6-11.2.0.2.0.jar)
* 5. The timeout and interval parameter to be increased from 60,1 to a high value for oracle test
* in DockerJDBCIntegrationSuite.scala (Locally tested with 200,200 and executed successfully).
* 6. Run spark test - ./build/sbt "test-only org.apache.spark.sql.jdbc.OracleIntegrationSuite"
*
* All tests in this suite are ignored because of the dependency with the oracle jar from maven
* repository.
*/
@DockerTest
class OracleIntegrationSuite extends DockerJDBCIntegrationSuite with SharedSQLContext {
import testImplicits._
override val db = new DatabaseOnDocker {
override val imageName = sys.env("ORACLE_DOCKER_IMAGE_NAME")
override val env = Map(
"ORACLE_ROOT_PASSWORD" -> "oracle"
)
override val usesIpc = false
override val jdbcPort: Int = 1521
override def getJdbcUrl(ip: String, port: Int): String =
s"jdbc:oracle:thin:system/oracle@//$ip:$port/xe"
override def getStartupProcessName: Option[String] = None
}
override def dataPreparation(conn: Connection): Unit = {
conn.prepareStatement("CREATE TABLE datetime (id NUMBER(10), d DATE, t TIMESTAMP)")
.executeUpdate()
conn.prepareStatement(
"""INSERT INTO datetime VALUES
|(1, {d '1991-11-09'}, {ts '1996-01-01 01:23:45'})
""".stripMargin.replaceAll("\n", " ")).executeUpdate()
conn.commit()
conn.prepareStatement(
"CREATE TABLE ts_with_timezone (id NUMBER(10), t TIMESTAMP WITH TIME ZONE)").executeUpdate()
conn.prepareStatement(
"INSERT INTO ts_with_timezone VALUES " +
"(1, to_timestamp_tz('1999-12-01 11:00:00 UTC','YYYY-MM-DD HH:MI:SS TZR'))").executeUpdate()
conn.prepareStatement(
"INSERT INTO ts_with_timezone VALUES " +
"(2, to_timestamp_tz('1999-12-01 12:00:00 PST','YYYY-MM-DD HH:MI:SS TZR'))").executeUpdate()
conn.commit()
conn.prepareStatement(
"CREATE TABLE tableWithCustomSchema (id NUMBER, n1 NUMBER(1), n2 NUMBER(1))").executeUpdate()
conn.prepareStatement(
"INSERT INTO tableWithCustomSchema values(12312321321321312312312312123, 1, 0)")
.executeUpdate()
conn.commit()
sql(
s"""
|CREATE TEMPORARY VIEW datetime
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$jdbcUrl', dbTable 'datetime', oracle.jdbc.mapDateToTimestamp 'false')
""".stripMargin.replaceAll("\n", " "))
conn.prepareStatement("CREATE TABLE datetime1 (id NUMBER(10), d DATE, t TIMESTAMP)")
.executeUpdate()
conn.commit()
sql(
s"""
|CREATE TEMPORARY VIEW datetime1
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$jdbcUrl', dbTable 'datetime1', oracle.jdbc.mapDateToTimestamp 'false')
""".stripMargin.replaceAll("\n", " "))
conn.prepareStatement("CREATE TABLE numerics (b DECIMAL(1), f DECIMAL(3, 2), i DECIMAL(10))")
.executeUpdate()
conn.prepareStatement(
"INSERT INTO numerics VALUES (4, 1.23, 9999999999)").executeUpdate()
conn.commit()
conn.prepareStatement("CREATE TABLE oracle_types (d BINARY_DOUBLE, f BINARY_FLOAT)")
.executeUpdate()
conn.commit()
conn.prepareStatement("CREATE TABLE datetimePartitionTest (id NUMBER(10), d DATE, t TIMESTAMP)")
.executeUpdate()
conn.prepareStatement(
"""INSERT INTO datetimePartitionTest VALUES
|(1, {d '2018-07-06'}, {ts '2018-07-06 05:50:00'})
""".stripMargin.replaceAll("\n", " ")).executeUpdate()
conn.prepareStatement(
"""INSERT INTO datetimePartitionTest VALUES
|(2, {d '2018-07-06'}, {ts '2018-07-06 08:10:08'})
""".stripMargin.replaceAll("\n", " ")).executeUpdate()
conn.prepareStatement(
"""INSERT INTO datetimePartitionTest VALUES
|(3, {d '2018-07-08'}, {ts '2018-07-08 13:32:01'})
""".stripMargin.replaceAll("\n", " ")).executeUpdate()
conn.prepareStatement(
"""INSERT INTO datetimePartitionTest VALUES
|(4, {d '2018-07-12'}, {ts '2018-07-12 09:51:15'})
""".stripMargin.replaceAll("\n", " ")).executeUpdate()
conn.commit()
}
test("SPARK-16625 : Importing Oracle numeric types") {
val df = sqlContext.read.jdbc(jdbcUrl, "numerics", new Properties)
val rows = df.collect()
assert(rows.size == 1)
val row = rows(0)
// The main point of the below assertions is not to make sure that these Oracle types are
// mapped to decimal types, but to make sure that the returned values are correct.
// A value > 1 from DECIMAL(1) is correct:
assert(row.getDecimal(0).compareTo(BigDecimal.valueOf(4)) == 0)
// A value with fractions from DECIMAL(3, 2) is correct:
assert(row.getDecimal(1).compareTo(BigDecimal.valueOf(1.23)) == 0)
// A value > Int.MaxValue from DECIMAL(10) is correct:
assert(row.getDecimal(2).compareTo(BigDecimal.valueOf(9999999999L)) == 0)
}
test("SPARK-12941: String datatypes to be mapped to Varchar in Oracle") {
// create a sample dataframe with string type
val df1 = sparkContext.parallelize(Seq(("foo"))).toDF("x")
// write the dataframe to the oracle table tbl
df1.write.jdbc(jdbcUrl, "tbl2", new Properties)
// read the table from the oracle
val dfRead = sqlContext.read.jdbc(jdbcUrl, "tbl2", new Properties)
// get the rows
val rows = dfRead.collect()
// verify the data type is inserted
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types(0).equals("class java.lang.String"))
// verify the value is the inserted correct or not
assert(rows(0).getString(0).equals("foo"))
}
test("SPARK-16625: General data types to be mapped to Oracle") {
val props = new Properties()
props.put("oracle.jdbc.mapDateToTimestamp", "false")
val schema = StructType(Seq(
StructField("boolean_type", BooleanType, true),
StructField("integer_type", IntegerType, true),
StructField("long_type", LongType, true),
StructField("float_Type", FloatType, true),
StructField("double_type", DoubleType, true),
StructField("byte_type", ByteType, true),
StructField("short_type", ShortType, true),
StructField("string_type", StringType, true),
StructField("binary_type", BinaryType, true),
StructField("date_type", DateType, true),
StructField("timestamp_type", TimestampType, true)
))
val tableName = "test_oracle_general_types"
val booleanVal = true
val integerVal = 1
val longVal = 2L
val floatVal = 3.0f
val doubleVal = 4.0
val byteVal = 2.toByte
val shortVal = 5.toShort
val stringVal = "string"
val binaryVal = Array[Byte](6, 7, 8)
val dateVal = Date.valueOf("2016-07-26")
val timestampVal = Timestamp.valueOf("2016-07-26 11:49:45")
val data = spark.sparkContext.parallelize(Seq(
Row(
booleanVal, integerVal, longVal, floatVal, doubleVal, byteVal, shortVal, stringVal,
binaryVal, dateVal, timestampVal
)))
val dfWrite = spark.createDataFrame(data, schema)
dfWrite.write.jdbc(jdbcUrl, tableName, props)
val dfRead = spark.read.jdbc(jdbcUrl, tableName, props)
val rows = dfRead.collect()
// verify the data type is inserted
val types = dfRead.schema.map(field => field.dataType)
assert(types(0).equals(DecimalType(1, 0)))
assert(types(1).equals(DecimalType(10, 0)))
assert(types(2).equals(DecimalType(19, 0)))
assert(types(3).equals(DecimalType(19, 4)))
assert(types(4).equals(DecimalType(19, 4)))
assert(types(5).equals(DecimalType(3, 0)))
assert(types(6).equals(DecimalType(5, 0)))
assert(types(7).equals(StringType))
assert(types(8).equals(BinaryType))
assert(types(9).equals(DateType))
assert(types(10).equals(TimestampType))
// verify the value is the inserted correct or not
val values = rows(0)
assert(values.getDecimal(0).compareTo(BigDecimal.valueOf(1)) == 0)
assert(values.getDecimal(1).compareTo(BigDecimal.valueOf(integerVal)) == 0)
assert(values.getDecimal(2).compareTo(BigDecimal.valueOf(longVal)) == 0)
assert(values.getDecimal(3).compareTo(BigDecimal.valueOf(floatVal)) == 0)
assert(values.getDecimal(4).compareTo(BigDecimal.valueOf(doubleVal)) == 0)
assert(values.getDecimal(5).compareTo(BigDecimal.valueOf(byteVal)) == 0)
assert(values.getDecimal(6).compareTo(BigDecimal.valueOf(shortVal)) == 0)
assert(values.getString(7).equals(stringVal))
assert(values.getAs[Array[Byte]](8).mkString.equals("678"))
assert(values.getDate(9).equals(dateVal))
assert(values.getTimestamp(10).equals(timestampVal))
}
test("SPARK-19318: connection property keys should be case-sensitive") {
def checkRow(row: Row): Unit = {
assert(row.getDecimal(0).equals(BigDecimal.valueOf(1)))
assert(row.getDate(1).equals(Date.valueOf("1991-11-09")))
assert(row.getTimestamp(2).equals(Timestamp.valueOf("1996-01-01 01:23:45")))
}
checkRow(sql("SELECT * FROM datetime where id = 1").head())
sql("INSERT INTO TABLE datetime1 SELECT * FROM datetime where id = 1")
checkRow(sql("SELECT * FROM datetime1 where id = 1").head())
}
test("SPARK-20557: column type TIMESTAMP with TIME ZONE should be recognized") {
val dfRead = sqlContext.read.jdbc(jdbcUrl, "ts_with_timezone", new Properties)
val rows = dfRead.collect()
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types(1).equals("class java.sql.Timestamp"))
}
test("Column type TIMESTAMP with SESSION_LOCAL_TIMEZONE is different from default") {
val defaultJVMTimeZone = TimeZone.getDefault
// Pick the timezone different from the current default time zone of JVM
val sofiaTimeZone = TimeZone.getTimeZone("Europe/Sofia")
val shanghaiTimeZone = TimeZone.getTimeZone("Asia/Shanghai")
val localSessionTimeZone =
if (defaultJVMTimeZone == shanghaiTimeZone) sofiaTimeZone else shanghaiTimeZone
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> localSessionTimeZone.getID) {
val e = intercept[java.sql.SQLException] {
val dfRead = sqlContext.read.jdbc(jdbcUrl, "ts_with_timezone", new Properties)
dfRead.collect()
}.getMessage
assert(e.contains("Unrecognized SQL type -101"))
}
}
/**
* Change the Time Zone `timeZoneId` of JVM before executing `f`, then switches back to the
* original after `f` returns.
* @param timeZoneId the ID for a TimeZone, either an abbreviation such as "PST", a full name such
* as "America/Los_Angeles", or a custom ID such as "GMT-8:00".
*/
private def withTimeZone(timeZoneId: String)(f: => Unit): Unit = {
val originalLocale = TimeZone.getDefault
try {
// Add Locale setting
TimeZone.setDefault(TimeZone.getTimeZone(timeZoneId))
f
} finally {
TimeZone.setDefault(originalLocale)
}
}
test("Column TIMESTAMP with TIME ZONE(JVM timezone)") {
def checkRow(row: Row, ts: String): Unit = {
assert(row.getTimestamp(1).equals(Timestamp.valueOf(ts)))
}
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> TimeZone.getDefault.getID) {
val dfRead = sqlContext.read.jdbc(jdbcUrl, "ts_with_timezone", new Properties)
withTimeZone("PST") {
assert(dfRead.collect().toSet ===
Set(
Row(BigDecimal.valueOf(1), java.sql.Timestamp.valueOf("1999-12-01 03:00:00")),
Row(BigDecimal.valueOf(2), java.sql.Timestamp.valueOf("1999-12-01 12:00:00"))))
}
withTimeZone("UTC") {
assert(dfRead.collect().toSet ===
Set(
Row(BigDecimal.valueOf(1), java.sql.Timestamp.valueOf("1999-12-01 11:00:00")),
Row(BigDecimal.valueOf(2), java.sql.Timestamp.valueOf("1999-12-01 20:00:00"))))
}
}
}
test("SPARK-18004: Make sure date or timestamp related predicate is pushed down correctly") {
val props = new Properties()
props.put("oracle.jdbc.mapDateToTimestamp", "false")
val schema = StructType(Seq(
StructField("date_type", DateType, true),
StructField("timestamp_type", TimestampType, true)
))
val tableName = "test_date_timestamp_pushdown"
val dateVal = Date.valueOf("2017-06-22")
val timestampVal = Timestamp.valueOf("2017-06-22 21:30:07")
val data = spark.sparkContext.parallelize(Seq(
Row(dateVal, timestampVal)
))
val dfWrite = spark.createDataFrame(data, schema)
dfWrite.write.jdbc(jdbcUrl, tableName, props)
val dfRead = spark.read.jdbc(jdbcUrl, tableName, props)
val millis = System.currentTimeMillis()
val dt = new java.sql.Date(millis)
val ts = new java.sql.Timestamp(millis)
// Query Oracle table with date and timestamp predicates
// which should be pushed down to Oracle.
val df = dfRead.filter(dfRead.col("date_type").lt(dt))
.filter(dfRead.col("timestamp_type").lt(ts))
val parentPlan = df.queryExecution.executedPlan
assert(parentPlan.isInstanceOf[WholeStageCodegenExec])
val node = parentPlan.asInstanceOf[WholeStageCodegenExec]
val metadata = node.child.asInstanceOf[RowDataSourceScanExec].metadata
// The "PushedFilters" part should exist in Dataframe's
// physical plan and the existence of right literals in
// "PushedFilters" is used to prove that the predicates
// pushing down have been effective.
assert(metadata.get("PushedFilters").isDefined)
assert(metadata("PushedFilters").contains(dt.toString))
assert(metadata("PushedFilters").contains(ts.toString))
val row = df.collect()(0)
assert(row.getDate(0).equals(dateVal))
assert(row.getTimestamp(1).equals(timestampVal))
}
test("SPARK-20427/SPARK-20921: read table use custom schema by jdbc api") {
// default will throw IllegalArgumentException
val e = intercept[org.apache.spark.SparkException] {
spark.read.jdbc(jdbcUrl, "tableWithCustomSchema", new Properties()).collect()
}
assert(e.getCause().isInstanceOf[ArithmeticException])
assert(e.getMessage.contains("Decimal precision 39 exceeds max precision 38"))
// custom schema can read data
val props = new Properties()
props.put("customSchema",
s"ID DECIMAL(${DecimalType.MAX_PRECISION}, 0), N1 INT, N2 BOOLEAN")
val dfRead = spark.read.jdbc(jdbcUrl, "tableWithCustomSchema", props)
val rows = dfRead.collect()
// verify the data type
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types(0).equals("class java.math.BigDecimal"))
assert(types(1).equals("class java.lang.Integer"))
assert(types(2).equals("class java.lang.Boolean"))
// verify the value
val values = rows(0)
assert(values.getDecimal(0).equals(new java.math.BigDecimal("12312321321321312312312312123")))
assert(values.getInt(1).equals(1))
assert(values.getBoolean(2).equals(false))
}
test("SPARK-22303: handle BINARY_DOUBLE and BINARY_FLOAT as DoubleType and FloatType") {
val tableName = "oracle_types"
val schema = StructType(Seq(
StructField("d", DoubleType, true),
StructField("f", FloatType, true)))
val props = new Properties()
// write it back to the table (append mode)
val data = spark.sparkContext.parallelize(Seq(Row(1.1, 2.2f)))
val dfWrite = spark.createDataFrame(data, schema)
dfWrite.write.mode(SaveMode.Append).jdbc(jdbcUrl, tableName, props)
// read records from oracle_types
val dfRead = sqlContext.read.jdbc(jdbcUrl, tableName, new Properties)
val rows = dfRead.collect()
assert(rows.size == 1)
// check data types
val types = dfRead.schema.map(field => field.dataType)
assert(types(0).equals(DoubleType))
assert(types(1).equals(FloatType))
// check values
val values = rows(0)
assert(values.getDouble(0) === 1.1)
assert(values.getFloat(1) === 2.2f)
}
test("SPARK-22814 support date/timestamp types in partitionColumn") {
val expectedResult = Set(
(1, "2018-07-06", "2018-07-06 05:50:00"),
(2, "2018-07-06", "2018-07-06 08:10:08"),
(3, "2018-07-08", "2018-07-08 13:32:01"),
(4, "2018-07-12", "2018-07-12 09:51:15")
).map { case (id, date, timestamp) =>
Row(BigDecimal.valueOf(id), Date.valueOf(date), Timestamp.valueOf(timestamp))
}
// DateType partition column
val df1 = spark.read.format("jdbc")
.option("url", jdbcUrl)
.option("dbtable", "datetimePartitionTest")
.option("partitionColumn", "d")
.option("lowerBound", "2018-07-06")
.option("upperBound", "2018-07-20")
.option("numPartitions", 3)
// oracle.jdbc.mapDateToTimestamp defaults to true. If this flag is not disabled, column d
// (Oracle DATE) will be resolved as Catalyst Timestamp, which will fail bound evaluation of
// the partition column. E.g. 2018-07-06 cannot be evaluated as Timestamp, and the error
// message says: Timestamp format must be yyyy-mm-dd hh:mm:ss[.fffffffff].
.option("oracle.jdbc.mapDateToTimestamp", "false")
.option("sessionInitStatement", "ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD'")
.load()
df1.logicalPlan match {
case LogicalRelation(JDBCRelation(_, parts, _), _, _, _) =>
val whereClauses = parts.map(_.asInstanceOf[JDBCPartition].whereClause).toSet
assert(whereClauses === Set(
""""D" < '2018-07-10' or "D" is null""",
""""D" >= '2018-07-10' AND "D" < '2018-07-14'""",
""""D" >= '2018-07-14'"""))
}
assert(df1.collect.toSet === expectedResult)
// TimestampType partition column
val df2 = spark.read.format("jdbc")
.option("url", jdbcUrl)
.option("dbtable", "datetimePartitionTest")
.option("partitionColumn", "t")
.option("lowerBound", "2018-07-04 03:30:00.0")
.option("upperBound", "2018-07-27 14:11:05.0")
.option("numPartitions", 2)
.option("oracle.jdbc.mapDateToTimestamp", "false")
.option("sessionInitStatement",
"ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'")
.load()
df2.logicalPlan match {
case LogicalRelation(JDBCRelation(_, parts, _), _, _, _) =>
val whereClauses = parts.map(_.asInstanceOf[JDBCPartition].whereClause).toSet
assert(whereClauses === Set(
""""T" < '2018-07-15 20:50:32.5' or "T" is null""",
""""T" >= '2018-07-15 20:50:32.5'"""))
}
assert(df2.collect.toSet === expectedResult)
}
test("query JDBC option") {
val expectedResult = Set(
(1, "1991-11-09", "1996-01-01 01:23:45")
).map { case (id, date, timestamp) =>
Row(BigDecimal.valueOf(id), Date.valueOf(date), Timestamp.valueOf(timestamp))
}
val query = "SELECT id, d, t FROM datetime WHERE id = 1"
// query option to pass on the query string.
val df = spark.read.format("jdbc")
.option("url", jdbcUrl)
.option("query", query)
.option("oracle.jdbc.mapDateToTimestamp", "false")
.load()
assert(df.collect.toSet === expectedResult)
// query option in the create table path.
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW queryOption
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$jdbcUrl',
| query '$query',
| oracle.jdbc.mapDateToTimestamp false)
""".stripMargin.replaceAll("\n", " "))
assert(sql("select id, d, t from queryOption").collect.toSet == expectedResult)
}
}
| actuaryzhang/spark | external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/OracleIntegrationSuite.scala | Scala | apache-2.0 | 21,804 |
package com.codahale.jerkson.util
package scalax
package rules
package scalasig
import ScalaSigEntryParsers._
trait Symbol extends Flags {
def name: String
def parent: Option[Symbol]
def children: Seq[Symbol]
def path: String = parent.map(_.path + ".").getOrElse("") + name
}
case object NoSymbol extends Symbol {
def name = "<no symbol>"
def parent = None
def hasFlag(flag: Long) = false
def children = Nil
}
abstract class ScalaSigSymbol extends Symbol {
def applyRule[A](rule: EntryParser[A]): A = expect(rule)(entry)
def applyScalaSigRule[A](rule: ScalaSigParsers.Parser[A]) = ScalaSigParsers.expect(rule)(entry.scalaSig)
def entry: ScalaSig#Entry
def index = entry.index
lazy val children: Seq[Symbol] = applyScalaSigRule(ScalaSigParsers.symbols) filter (_.parent == Some(this))
lazy val attributes: Seq[AttributeInfo] = applyScalaSigRule(ScalaSigParsers.attributes) filter (_.symbol == this)
}
case class ExternalSymbol(name: String, parent: Option[Symbol],
entry: ScalaSig#Entry) extends ScalaSigSymbol {
override def toString = path
def hasFlag(flag: Long) = false
}
case class SymbolInfo(name: String, owner: Symbol, flags: Int,
privateWithin: Option[AnyRef], info: Int,
entry: ScalaSig#Entry) {
def symbolString(any: AnyRef) = any match {
case sym: SymbolInfoSymbol => sym.index.toString
case other => other.toString
}
override def toString = name + ", owner=" + symbolString(owner) + ", flags=" + flags.toHexString + ", info=" + info + (privateWithin match {
case Some(any) => ", privateWithin=" + symbolString(any)
case None => " "
})
}
abstract class SymbolInfoSymbol extends ScalaSigSymbol {
def symbolInfo: SymbolInfo
def entry = symbolInfo.entry
def name = symbolInfo.name
def parent = Some(symbolInfo.owner)
def hasFlag(flag: Long) = (symbolInfo.flags & flag) != 0L
lazy val infoType = applyRule(parseEntry(typeEntry)(symbolInfo.info))
}
case class TypeSymbol(symbolInfo: SymbolInfo) extends SymbolInfoSymbol {
override def path = name
}
case class AliasSymbol(symbolInfo: SymbolInfo) extends SymbolInfoSymbol {
override def path = name
}
case class ClassSymbol(symbolInfo: SymbolInfo,
thisTypeRef: Option[Int]) extends SymbolInfoSymbol {
lazy val selfType = thisTypeRef.map {(x: Int) => applyRule(parseEntry(typeEntry)(x))}
}
case class ObjectSymbol(symbolInfo: SymbolInfo) extends SymbolInfoSymbol
case class MethodSymbol(symbolInfo: SymbolInfo,
aliasRef: Option[Int]) extends SymbolInfoSymbol
| virtualirfan/jerkson | src/main/scala/com/codahale/jerkson/util/scalax/rules/scalasig/Symbol.scala | Scala | mit | 2,634 |
package eventstore
package core
import ScalaCompat.JavaConverters._
import constants.MaxBatchSize
sealed trait OutLike {
def out: Out
}
@SerialVersionUID(1L)
final case class WithCredentials(
out: Out,
credentials: UserCredentials
) extends OutLike
sealed trait Message
sealed trait In extends Message
sealed trait Out extends Message with OutLike {
final def out: Out = this
final def withCredentials(login: String, password: String): WithCredentials =
withCredentials(UserCredentials(login = login, password = password))
final def withCredentials(x: UserCredentials): WithCredentials = WithCredentials(this, x)
}
sealed trait InOut extends In with Out
@SerialVersionUID(1L) private[eventstore] case object HeartbeatRequest extends InOut
@SerialVersionUID(1L) private[eventstore] case object HeartbeatResponse extends InOut
@SerialVersionUID(1L) case object Ping extends InOut
@SerialVersionUID(1L) case object Pong extends InOut
@SerialVersionUID(1L)
final case class IdentifyClient(
version: Int,
connectionName: Option[String]
) extends Out {
require(version >= 0, s"version must be >= 0, but is $version")
}
@SerialVersionUID(1L)
case object ClientIdentified extends In
@SerialVersionUID(1L)
final case class WriteEvents(
streamId: EventStream.Id,
events: List[EventData],
expectedVersion: ExpectedVersion,
requireMaster: Boolean
) extends Out
object WriteEvents {
object StreamMetadata {
def apply(
streamId: EventStream.Metadata,
data: Content,
eventId: Uuid,
expectedVersion: ExpectedVersion,
requireMaster: Boolean
): WriteEvents =
WriteEvents(streamId, List(EventData.StreamMetadata(data, eventId)), expectedVersion, requireMaster)
}
}
@SerialVersionUID(1L)
final case class WriteEventsCompleted(
numbersRange: Option[EventNumber.Range],
position: Option[Position.Exact]
) extends In
@SerialVersionUID(1L)
final case class DeleteStream(
streamId: EventStream.Id,
expectedVersion: ExpectedVersion.Existing,
hard: Boolean,
requireMaster: Boolean
) extends Out
@SerialVersionUID(1L)
final case class DeleteStreamCompleted(
position: Option[Position.Exact]
) extends In
@SerialVersionUID(1L)
final case class TransactionStart(
streamId: EventStream.Id,
expectedVersion: ExpectedVersion,
requireMaster: Boolean
) extends Out
@SerialVersionUID(1L)
final case class TransactionStartCompleted(
transactionId: Long
) extends In {
require(transactionId >= 0, s"transactionId must be >= 0, but is $transactionId")
}
@SerialVersionUID(1L)
final case class TransactionWrite(
transactionId: Long,
events: List[EventData],
requireMaster: Boolean
) extends Out {
require(transactionId >= 0, s"transactionId must be >= 0, but is $transactionId")
}
@SerialVersionUID(1L)
final case class TransactionWriteCompleted(
transactionId: Long
) extends In {
require(transactionId >= 0, s"transactionId must be >= 0, but is $transactionId")
}
@SerialVersionUID(1L)
final case class TransactionCommit(
transactionId: Long,
requireMaster: Boolean
) extends Out {
require(transactionId >= 0, s"transactionId must be >= 0, but is $transactionId")
}
@SerialVersionUID(1L)
final case class TransactionCommitCompleted(
transactionId: Long,
numbersRange: Option[EventNumber.Range],
position: Option[Position.Exact]
) extends In {
require(transactionId >= 0, s"transactionId must be >= 0, but is $transactionId")
}
@SerialVersionUID(1L)
final case class ReadEvent(
streamId: EventStream.Id,
eventNumber: EventNumber,
resolveLinkTos: Boolean,
requireMaster: Boolean
) extends Out
object ReadEvent {
object StreamMetadata {
def apply(
streamId: EventStream.Metadata,
eventNumber: EventNumber,
resolveLinkTos: Boolean,
requireMaster: Boolean
): ReadEvent = ReadEvent(streamId, eventNumber, resolveLinkTos, requireMaster)
}
}
@SerialVersionUID(1L)
final case class ReadEventCompleted(
event: Event
) extends In
@SerialVersionUID(1L)
final case class ReadStreamEvents(
streamId: EventStream.Id,
fromNumber: EventNumber,
maxCount: Int,
direction: ReadDirection,
resolveLinkTos: Boolean,
requireMaster: Boolean
) extends Out {
require(maxCount > 0, s"maxCount must be > 0, but is $maxCount")
require(maxCount <= MaxBatchSize, s"maxCount must be <= $MaxBatchSize, but is $maxCount")
require(
direction != ReadDirection.Forward || fromNumber != EventNumber.Last,
s"fromNumber must not be EventNumber.Last"
)
}
@SerialVersionUID(1L)
final case class ReadStreamEventsCompleted(
events: List[Event],
nextEventNumber: EventNumber,
lastEventNumber: EventNumber.Exact,
endOfStream: Boolean,
lastCommitPosition: Long,
direction: ReadDirection
) extends In {
require(events.size <= MaxBatchSize, s"events.size must be <= $MaxBatchSize, but is ${events.size}")
require(
direction != ReadDirection.Forward || nextEventNumber != EventNumber.Last,
s"lastEventNumber must not be EventNumber.Last"
)
def eventsJava: java.util.List[Event] = events.asJava
}
@SerialVersionUID(1L)
final case class ReadAllEvents(
fromPosition: Position,
maxCount: Int,
direction: ReadDirection,
resolveLinkTos: Boolean,
requireMaster: Boolean
) extends Out {
require(maxCount > 0, s"maxCount must be > 0, but is $maxCount")
require(maxCount <= MaxBatchSize, s"maxCount must be <= $MaxBatchSize, but is $maxCount")
}
@SerialVersionUID(1L)
final case class ReadAllEventsCompleted(
events: List[IndexedEvent],
position: Position.Exact,
nextPosition: Position.Exact,
direction: ReadDirection
) extends In {
require(events.size <= MaxBatchSize, s"events.size must be <= $MaxBatchSize, but is ${events.size}")
def eventsJava: java.util.List[IndexedEvent] = events.asJava
}
object PersistentSubscription {
type PSS = settings.PersistentSubscriptionSettings
@SerialVersionUID(1L)
final case class Create(
streamId: EventStream.Id,
groupName: String,
settings: PSS
) extends Out {
require(groupName != null, "groupName must not be null")
require(groupName.nonEmpty, "groupName must not be empty")
}
@SerialVersionUID(1L) case object CreateCompleted extends In
@SerialVersionUID(1L)
final case class Update(
streamId: EventStream.Id,
groupName: String,
settings: PSS
) extends Out {
require(groupName != null, "groupName must not be null")
require(groupName.nonEmpty, "groupName must not be empty")
}
@SerialVersionUID(1L) case object UpdateCompleted extends In
@SerialVersionUID(1L)
final case class Delete(
streamId: EventStream.Id,
groupName: String
) extends Out {
require(groupName != null, "groupName must not be null")
require(groupName.nonEmpty, "groupName must not be empty")
}
@SerialVersionUID(1L) case object DeleteCompleted extends In
@SerialVersionUID(1L)
final case class Ack(
subscriptionId: String,
eventIds: List[Uuid]
) extends Out {
require(subscriptionId != null, "subscriptionId must not be null")
require(subscriptionId.nonEmpty, "subscriptionId must not be empty")
require(eventIds.nonEmpty, "eventIds must not be empty")
}
@SerialVersionUID(1L)
final case class Nak(
subscriptionId: String,
eventIds: List[Uuid],
action: Nak.Action,
message: Option[String] = None
) extends Out {
require(subscriptionId != null, "subscriptionId must not be null")
require(subscriptionId.nonEmpty, "subscriptionId must not be empty")
require(eventIds.nonEmpty, "eventIds must not be empty")
}
object Nak {
sealed trait Action
object Action {
@SerialVersionUID(1L) case object Unknown extends Action
@SerialVersionUID(1L) case object Park extends Action
@SerialVersionUID(1L) case object Retry extends Action
@SerialVersionUID(1L) case object Skip extends Action
@SerialVersionUID(1L) case object Stop extends Action
}
}
@SerialVersionUID(1L)
final case class Connect(
streamId: EventStream.Id,
groupName: String,
bufferSize: Int = 10
) extends Out
@SerialVersionUID(1L)
final case class Connected(
subscriptionId: String,
lastCommit: Long,
lastEventNumber: Option[EventNumber.Exact]
) extends In
@SerialVersionUID(1L)
final case class EventAppeared(
event: Event
) extends In
}
@SerialVersionUID(1L)
final case class SubscribeTo(
stream: EventStream,
resolveLinkTos: Boolean
) extends Out
sealed trait SubscribeCompleted extends In
@SerialVersionUID(1L)
final case class SubscribeToAllCompleted(
lastCommit: Long
) extends SubscribeCompleted {
require(lastCommit >= 0, s"lastCommit must be >= 0, but is $lastCommit")
}
@SerialVersionUID(1L)
final case class SubscribeToStreamCompleted(
lastCommit: Long,
lastEventNumber: Option[EventNumber.Exact] = None
) extends SubscribeCompleted {
require(lastCommit >= 0, s"lastCommit must be >= 0, but is $lastCommit")
}
@SerialVersionUID(1L)
final case class StreamEventAppeared(
event: IndexedEvent
) extends In
@SerialVersionUID(1L)
case object Unsubscribe extends Out {
/**
* Java API
*/
def getInstance: Unsubscribe.type = this
}
@SerialVersionUID(1L)
case object Unsubscribed extends In {
/**
* Java API
*/
def getInstance: Unsubscribed.type = this
}
@SerialVersionUID(1L)
case object ScavengeDatabase extends Out {
/**
* Java API
*/
def getInstance: ScavengeDatabase.type = this
}
@SerialVersionUID(1L)
final case class ScavengeDatabaseResponse(
scavengeId: Option[String]
) extends In
@SerialVersionUID(1L)
case object Authenticate extends Out {
/**
* Java API
*/
def getInstance: Authenticate.type = this
}
@SerialVersionUID(1L)
case object Authenticated extends In {
/**
* Java API
*/
def getInstance: Authenticated.type = this
} | EventStore/EventStore.JVM | core/src/main/scala/eventstore/core/Message.scala | Scala | bsd-3-clause | 10,074 |
package org.automanlang.core.policy.price
import org.automanlang.core.logging.{LogType, LogLevelDebug, DebugLog}
import org.automanlang.core.question.Question
import org.automanlang.core.scheduler.{SchedulerState, Task}
import org.automanlang.core.policy._
class MLEPricePolicy(question: Question) extends PricePolicy(question) {
private def numAnswered(ts: List[Task]) : Int = {
ts.count { t =>
t.state == SchedulerState.ANSWERED ||
t.state == SchedulerState.ACCEPTED ||
t.state == SchedulerState.REJECTED ||
t.state == SchedulerState.DUPLICATE
}
}
private def numTimeouts(ts: List[Task]) : Int = {
// find the last round where we spawned tasks
val last_spawn_round = ts.map(_.round).max
// get the tasks from that round
val last_round = ts.filter(_.round == last_spawn_round)
// get # timed-out tasks
last_round.count { t =>
t.state == SchedulerState.TIMEOUT || t.state == SchedulerState.CANCELLED
}
}
private def initialReward(tasks: List[Task]) : BigDecimal = {
val reward = calculateInitialReward()
DebugLog(s"Initial reward is $$$reward. Round = ${nextRound(tasks, suffered_timeout = false)}.", LogLevelDebug(), LogType.STRATEGY, question.id)
reward
}
private def timeoutReward(tasks: List[Task]) : BigDecimal = {
val current_reward = currentReward(tasks)
// # unanswered in last roundcannot be zero,
// otherwise a timeout would not have occurred
assert(numTimeouts(tasks) != 0)
val num_answered = numAnswered(tasks)
// Use the MLE for the Bernoulli distribution (the mean) to
// find the probability that a task will be accepted (p_a).
// We assume that p_a is a fixed population parameter unaffected by price.
val p_a: BigDecimal = BigDecimal(num_answered) / BigDecimal(tasks.size)
// Maximal safe growth rate; see CACM paper.
// Here, we never more than double the reward.
val growth_rate: BigDecimal = (1.0 / p_a).min(2.0)
val reward = (growth_rate * current_reward).setScale(2, math.BigDecimal.RoundingMode.FLOOR)
DebugLog(s"Timeout occurred. New reward is $$$reward because the estimated acceptance " +
s"rate is $p_a per round and the current reward is $$$current_reward. Round = ${nextRound(tasks, suffered_timeout = true)}.",
LogLevelDebug(),
LogType.STRATEGY,
question.id)
reward
}
private def keepCurrentReward(tasks: List[Task]) : BigDecimal = {
val current_reward = currentReward(tasks)
DebugLog(s"Insufficient agreement. Keeping reward of $$$current_reward. Round = ${nextRound(tasks, suffered_timeout = false)}.",
LogLevelDebug(),
LogType.STRATEGY,
question.id)
current_reward
}
private def noResponsesReward(tasks: List[Task]) : BigDecimal = {
// if timeouts occur, a round will contain mixed prices; take the max
val current_reward = tasks.map(_.cost).max
// double the reward
val reward = (2.0 * current_reward).setScale(2, math.BigDecimal.RoundingMode.FLOOR)
DebugLog(s"Timeout occurred. New reward is $$$reward because we cannot estimate acceptance " +
s"rate and the current reward is $$$current_reward. Round = ${nextRound(tasks, suffered_timeout = true)}.",
LogLevelDebug(),
LogType.STRATEGY,
question.id)
reward
}
private def currentReward(tasks: List[Task]) : BigDecimal = {
assert(tasks.nonEmpty)
// if timeouts occur, a round will contain mixed prices; take the max
tasks.map(_.cost).max
}
def calculateReward(tasks: List[Task], currentRound: Int, timeout_occurred: Boolean) : BigDecimal = {
if (currentRound == 0 && tasks.isEmpty) {
// first round, base case
initialReward(tasks)
} else if (numAnswered(tasks) != 0) {
if (timeout_occurred) {
// timeout case
timeoutReward(tasks)
} else {
// non-timeout case
keepCurrentReward(tasks)
}
} else {
// the your-task-sucks-so-badly-nobody-will-take-it case
noResponsesReward(tasks)
}
}
}
| dbarowy/AutoMan | libautoman/src/main/scala/org/automanlang/core/policy/price/MLEPricePolicy.scala | Scala | gpl-2.0 | 4,062 |
package com.robocubs4205.cubscout.model.scorecard
/**
* Created by trevor on 7/29/17.
*/
case class Result(id:Long,matchId:Long,robotId:Long,scorecardId:Long)
case class FieldResult(id:Long,resultId:Long,fieldSectionId:Long,score:Float)
| robocubs4205/cubscout-server | common/src/main/scala/com/robocubs4205/cubscout/model/scorecard/Result.scala | Scala | mit | 243 |
/*
* -╥⌐⌐⌐⌐ -⌐⌐⌐⌐-
* ≡╢░░░░⌐\\░░░φ ╓╝░░░░⌐░░░░╪╕
* ╣╬░░` `░░░╢┘ φ▒╣╬╝╜ ░░╢╣Q
* ║╣╬░⌐ ` ╤▒▒▒Å` ║╢╬╣
* ╚╣╬░⌐ ╔▒▒▒▒`«╕ ╢╢╣▒
* ╫╬░░╖ .░ ╙╨╨ ╣╣╬░φ ╓φ░╢╢Å
* ╙╢░░░░⌐"░░░╜ ╙Å░░░░⌐░░░░╝`
* ``˚¬ ⌐ ˚˚⌐´
*
* Copyright © 2016 Flipkart.com
*/
package com.flipkart.connekt.receptors.directives
import akka.http.scaladsl.model.{HttpRequest, RemoteAddress}
import akka.http.scaladsl.server.RouteResult.Complete
import akka.http.scaladsl.server._
import akka.http.scaladsl.server.directives.{BasicDirectives, MiscDirectives}
import com.flipkart.connekt.commons.factories.{ConnektLogger, LogFile}
trait AccessLogDirective extends BasicDirectives with MiscDirectives {
private val logFormat = "%s %s %s %s %s"
// logs just the request method and response status at info level
private def logRequestAndResponse(req: HttpRequest, remoteAddress: RemoteAddress): Any => Unit = {
case (routeResult: Complete, time: Long) =>
val remoteIp = remoteAddress.toOption.map(_.getHostAddress).getOrElse("0.0.0.0")
ConnektLogger(LogFile.ACCESS).info(logFormat.format(remoteIp, req.method.value, req.uri, routeResult.response.status.intValue(), time))
case _ =>
}
/**
* Produces a log entry for every incoming request and [[RouteResult]].
*/
def logTimedRequestResult: Directive0 =
extractRequestContext.flatMap { ctx ⇒
val startTs = System.currentTimeMillis
extractClientIP.flatMap { address ⇒
mapRouteResult { result ⇒
val timeTaken = System.currentTimeMillis - startTs
logRequestAndResponse(ctx.request, address)((result, timeTaken))
result
}
}
}
}
| Flipkart/connekt | receptors/src/main/scala/com/flipkart/connekt/receptors/directives/AccessLogDirective.scala | Scala | mit | 2,021 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.engine.core.testutils.benchmark
import java.io.{File, FileWriter}
import java.util.Calendar
import com.bwsw.sj.engine.core.testutils.benchmark.loader.{BenchmarkDataSenderConfig, BenchmarkDataSenderParameters, SenderFactory}
import com.typesafe.config.ConfigFactory
/**
* Provides methods for running [[Benchmark]] and writing a result into a file
*
* @author Pavel Tomskikh
*/
class BenchmarkRunner[T <: BenchmarkParameters, S <: BenchmarkDataSenderParameters, C <: BenchmarkDataSenderConfig]
(outputFilenamePrefix: String,
senderFactory: SenderFactory[S, C],
benchmarkFactory: BenchmarkFactory[T, C])
extends App {
println(Calendar.getInstance().getTime)
private val config = ConfigFactory.load()
private val runnerConfig = new BenchmarkRunnerConfig(config, outputFilenamePrefix)
private val (sender, senderConfig) = senderFactory.create(config)
private val benchmark = benchmarkFactory.create(config, senderConfig)
benchmark.start()
private val results = run()
writeResult(results)
benchmark.stop()
private val resultsString = results.mkString("\n")
println("DONE")
println("Results:")
println(resultsString)
println(Calendar.getInstance().getTime)
System.exit(0)
def run(): Iterable[Results] = {
sender.warmUp()
benchmark.warmUp(sender.warmingUpParameters.messagesCount)
sender.flatMap { senderParameters =>
printWithTime(s"Sender parameters: ${senderParameters.toSeq.mkString(",")}")
sender.send(senderParameters)
benchmark.map { benchmarkParameters =>
printWithTime(s"Benchmark parameters: ${benchmarkParameters.toSeq.mkString(",")}")
val results = (1 to runnerConfig.repetitions).map { _ =>
val result = benchmark.run(benchmarkParameters, senderParameters.messagesCount)
printWithTime(result)
result
}
Results(senderParameters, benchmarkParameters, results)
}
}
}
def writeResult(benchmarkResults: Iterable[Results]): Unit = {
val writer = new FileWriter(new File(runnerConfig.outputFileName))
writer.write(benchmarkResults.mkString("\n"))
writer.close()
}
private def printWithTime(a: Any): Unit =
println(s"[${Calendar.getInstance().getTime}] $a")
}
case class Results(dataLoaderParams: BenchmarkDataSenderParameters,
moduleBenchParams: BenchmarkParameters,
results: Seq[Long]) {
val averageResult: Long = {
val successResults = results.filter(_ >= 0)
if (successResults.nonEmpty) successResults.sum / successResults.length
else -1L
}
def toSeq: Seq[Any] =
dataLoaderParams.toSeq ++ moduleBenchParams.toSeq ++ results :+ averageResult
override def toString: String = toSeq.mkString(",")
}
| bwsw/sj-platform | core/sj-engine-core/src/main/scala/com/bwsw/sj/engine/core/testutils/benchmark/BenchmarkRunner.scala | Scala | apache-2.0 | 3,580 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.