code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Copyright 2017 Zhang Di
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.dizhang.seqspark
import org.apache.spark.{SparkConf, SparkContext}
import org.dizhang.seqspark.util.SeqContext
import org.dizhang.seqspark.util.UserConfig.RootConfig
import org.dizhang.seqspark.meta._
import org.slf4j.{Logger, LoggerFactory}
/**
* meta analysis
*/
object MetaAnalysis {
val logger: Logger = LoggerFactory.getLogger(this.getClass)
def apply(seqContext: SeqContext): Unit = {
logger.info("start meta analysis")
/** Spark configuration */
val mm = new MetaMaster(seqContext)
mm.run()
logger.info("end meta analysis")
}
}
| statgenetics/seqspark | src/main/scala/org/dizhang/seqspark/MetaAnalysis.scala | Scala | apache-2.0 | 1,178 |
package scalan.collections
import scala.collection.immutable.HashSet
import scala.language.reflectiveCalls
import scalan._
class HashSetTests extends BaseCtxTests {
trait HashSetSimple extends HashSetsDsl {
lazy val tElem = element[HashSet[Int]]
lazy val empty = SHashSet.empty[Int]
lazy val t1 = fun { (t: Rep[SHashSet[Int]]) => t }
lazy val t2 = fun { (in: Rep[(SHashSet[Int],Int)]) => val Pair(t, i) = in; t + i }
lazy val t3 = fun { (e: Rep[Int]) => SHashSet.empty[Int] + e }
lazy val t4 = fun { (t: Rep[SHashSet[Int]]) => t.map(fun { x => x + 1 }) }
lazy val t5 = fun { (in: Rep[(SHashSet[Int],Int)]) => val Pair(t, i) = in; t + i }
lazy val t6 = fun { (t: Rep[(SHashSet[Int],Int)]) => {
t._1.map(fun { x => x + t._2 })
}}
lazy val t7 = fun { (t: Rep[(SHashSet[Int],Int)]) => {
t._1.fold(t._2)(fun { x => x._1 + x._2 })
}}
}
test("simpleHashsetStaged") {
val ctx = new TestContext with HashSetSimple with HashSetsDslExp {
def test() = {
//assert(!isInlineThunksOnForce, "precondition for tests")
{
//TODO make this work (recognizer should deal with BaseTypeElem)
// val Def(Lambda(_, _, x, SThrowableMethods.getMessage(obj))) = t1
// assert(x == obj)
}
}
}
ctx.test
ctx.emit("empty", ctx.empty)
ctx.emit("t1", ctx.t1)
ctx.emit("t2", ctx.t2)
ctx.emit("t3", ctx.t3)
ctx.emit("t4", ctx.t4)
ctx.emit("t5", ctx.t5)
ctx.emit("t6", ctx.t6)
ctx.emit("t7", ctx.t7)
}
test("simpleHashsetSeq") {
val ctx = new ScalanDslStd with HashSetSimple with HashSetsDslStd {
def test() = {
//assert(!isInlineThunksOnForce, "precondition for tests")
}
}
import ctx._
ctx.test
{
val res = ctx.t2((SHashSet.empty[Int], 10))
assertResult(SHashSetImpl(HashSet(10)))(res)
}
{
val res = ctx.t3(10)
assertResult(SHashSetImpl(HashSet(10)))(res)
}
{
val res = ctx.t4(SHashSetImpl(HashSet(10, 20, 30)))
assertResult(SHashSetImpl(HashSet(11, 21, 31)))(res)
}
{
val res = ctx.t7((SHashSetImpl(HashSet(10, 20, 30)),0))
assertResult(60)(res)
}
}
}
| scalan/scalan | collections/src/test/scala/scalan/collections/HashSetTests.scala | Scala | apache-2.0 | 2,207 |
/*
* The MIT License
*
* Copyright (c) 2016 Fulcrum Genomics LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.fulcrumgenomics.bam
import com.fulcrumgenomics.FgBioDef._
import com.fulcrumgenomics.bam.api.{SamRecord, SamSource, SamWriter}
import com.fulcrumgenomics.cmdline.{ClpGroups, FgBioTool}
import com.fulcrumgenomics.commons.util.LazyLogging
import com.fulcrumgenomics.sopt.cmdline.ValidationException
import com.fulcrumgenomics.sopt.{arg, clp}
import com.fulcrumgenomics.util.Io
import htsjdk.samtools.SAMFileHeader.{GroupOrder, SortOrder}
import htsjdk.samtools.SamPairUtil.SetMateInfoIterator
@clp(group=ClpGroups.SamOrBam, description=
"""
|Adds and/or fixes mate information on paired-end reads. Sets the MQ (mate mapping quality),
|`MC` (mate cigar string), ensures all mate-related flag fields are set correctly, and that
|the mate reference and mate start position are correct.
|
|Supplementary records are handled correctly (updated with their mate's non-supplemental
|attributes). Secondary alignments are passed through but are not updated.
|
|The input file must be query-name sorted or query-name grouped (i.e. all records from the same
|query sequence must be adjacent in the file, though the ordering between queries is unspecified).
""")
class SetMateInformation
(
@arg(flag='i', doc="Input SAM/BAM/CRAM file.") val input: PathToBam = Io.StdIn,
@arg(flag='o', doc="Output SAM/BAM/CRAM file.") val output: PathToBam = Io.StdOut,
@arg(flag='r', doc="Reference fasta, only needed if writing CRAM.") val ref: Option[PathToFasta] = None,
@arg(flag='x', doc="If specified, do not fail when reads marked as paired are missing their mate pairs.")
val allowMissingMates: Boolean = false
) extends FgBioTool with LazyLogging {
Io.assertReadable(input)
Io.assertCanWriteFile(output)
private val in = SamSource(input)
if (in.header.getSortOrder != SortOrder.queryname && in.header.getGroupOrder != GroupOrder.query) {
in.safelyClose()
throw new ValidationException("Input is not queryname sorted or grouped.")
}
override def execute(): Unit = {
val out = SamWriter(output, in.header, ref=ref)
val iterator = new SetMateInfoIterator(in.iterator.map(_.asSam), true, allowMissingMates).map(_.asInstanceOf[SamRecord])
out ++= iterator
out.close()
in.safelyClose()
}
}
| fulcrumgenomics/fgbio | src/main/scala/com/fulcrumgenomics/bam/SetMateInformation.scala | Scala | mit | 3,506 |
package com.socrata.balboa.metrics.data.impl
import java.io.IOException
import java.util.Date
import com.socrata.balboa.metrics.{Metrics, Timeslice}
import com.socrata.balboa.metrics.data.{DataStore, Period}
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.mutable
/**
* Buffers metrics across all metric sources for some
* time period. This only keeps a single buffer for the
* current time slice.
*
* Metrics with timestamps older than the current slice:
* If a metric comes in with a timestamp older than the
* current slice it will be passed to the underlying
* datastore immediately.
*
* Metrics in the current slice:
* If a metric comes in with a timestamp within the current
* slice it will be aggregated
*
* Metrics in the future:
* Metrics in the future will trigger a flush of the
* buffer and the current slice will be set to the
* future timestamp of that metric.
*/
class BufferedDataStore(underlying: DataStore,
timeService: TimeService = new TimeService,
val bufferGranularity: Long) extends DataStoreImpl {
val log: Logger = LoggerFactory.getLogger(classOf[BufferedDataStore])
var buffer = new mutable.HashMap[String, Metrics]
var currentSlice: Long = -1
@throws[Exception]
override def checkHealth(): Unit = underlying.checkHealth()
override def heartbeat(): Unit = {
val timestamp = timeService.currentTimeMillis()
val nearestSlice = timestamp - (timestamp % bufferGranularity)
if (nearestSlice > currentSlice) {
try {
flushExpired(timestamp)
} catch {
case (e: IOException) =>
log.error("Unable to flush buffered metrics at regular heartbeat. This is bad.", e)
}
}
}
@throws[IOException]
def flushExpired(timestamp: Long): Unit = {
buffer.synchronized {
val nearestSlice = timestamp - (timestamp % bufferGranularity)
if (nearestSlice > currentSlice) {
log.info(s"Flushing ${buffer.size} entities to underlying datastore from the last ${bufferGranularity}ms")
// flush metrics
buffer.foreach({ case (entity, metrics) =>
// If a failure occurs in the underlying datastore the exception
// chain back up and keep the buffer in memory
log.info(" flushing " + entity)
underlying.persist(entity, currentSlice, metrics)
})
buffer.clear()
currentSlice = nearestSlice
}
}
}
@throws[IOException]
override def persist(entityId: String, timestamp: Long, metrics: Metrics): Unit = {
buffer.synchronized {
if (timestamp < currentSlice) {
// Metrics older than our current slice do not get aggregated.
underlying.persist(entityId, timestamp, metrics)
} else {
flushExpired(timestamp)
val existing = buffer.get(entityId)
existing match {
case Some(existing) =>
existing.merge(metrics)
buffer.put(entityId, existing)
case None =>
buffer.put(entityId, metrics)
}
}
}
}
override def entities(): Iterator[String] = underlying.entities()
override def entities(pattern: String): Iterator[String] = underlying.entities(pattern)
override def slices(entityId: String, period: Period, start: Date, end: Date): Iterator[Timeslice] =
underlying.slices(entityId, period, start, end)
override def find(entityId: String, period: Period, start: Date): Iterator[Metrics] =
underlying.find(entityId, period, start)
override def find(entityId: String, period: Period, start: Date, end: Date): Iterator[Metrics] =
underlying.find(entityId, period, start, end)
override def find(entityId: String, start: Date, end: Date): Iterator[Metrics] =
underlying.find(entityId, start, end)
override def onStop(): Unit = heartbeat()
}
| socrata-platform/balboa | balboa-common/src/main/scala/com/socrata/balboa/metrics/data/impl/BufferedDataStore.scala | Scala | apache-2.0 | 3,884 |
package org.jetbrains.plugins.scala
package refactoring.changeSignature
import java.io.File
import com.intellij.openapi.util.io.FileUtilRt
import com.intellij.openapi.vfs.CharsetToolkit
import com.intellij.psi._
import com.intellij.psi.impl.source.PostprocessReformattingAspect
import com.intellij.refactoring.changeSignature._
import org.jetbrains.plugins.scala.base.ScalaLightPlatformCodeInsightTestCaseAdapter
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.psi.api.base.ScMethodLike
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createTypeFromText
import org.jetbrains.plugins.scala.lang.psi.types.api._
import org.jetbrains.plugins.scala.lang.refactoring.changeSignature.changeInfo.ScalaChangeInfo
import org.jetbrains.plugins.scala.lang.refactoring.changeSignature.{ScalaChangeSignatureProcessor, ScalaParameterInfo}
import org.jetbrains.plugins.scala.project.ProjectContext
import org.jetbrains.plugins.scala.settings.annotations._
import org.jetbrains.plugins.scala.util._
import org.junit.Assert._
import scala.annotation.nowarn
/**
* Nikolay.Tropin
* 2014-08-14
*/
@nowarn("msg=ScalaLightPlatformCodeInsightTestCaseAdapter")
abstract class ChangeSignatureTestBase extends ScalaLightPlatformCodeInsightTestCaseAdapter {
var targetMethod: PsiMember = null
protected var isAddDefaultValue = false
implicit def projectContext: ProjectContext = getProjectAdapter
override def getTestDataPath = folderPath
def folderPath: String
def mainFileName(testName: String): String
def mainFileAfterName(testName: String): String
def secondFileName(testName: String): String
def secondFileAfterName(testName: String): String
def processor(newVisibility: String,
newName: String,
newReturnType: String,
newParams: => Seq[Seq[ParameterInfo]]): ChangeSignatureProcessorBase
def findTargetElement: PsiMember
protected def doTest(newVisibility: String,
newName: String,
newReturnType: String,
newParams: => Seq[Seq[ParameterInfo]],
settings: ScalaCodeStyleSettings = TypeAnnotationSettings.alwaysAddType(ScalaCodeStyleSettings.getInstance(getProjectAdapter))): Unit = {
val testName = getTestName(false)
val oldSettings = ScalaCodeStyleSettings.getInstance(getProjectAdapter).clone()
TypeAnnotationSettings.set(getProjectAdapter, settings)
val secondName = secondFileName(testName)
val checkSecond = secondName != null
val secondFile = if (checkSecond) {
val secondFileText = getTextFromTestData(secondName)
addFileToProject(secondName, secondFileText)
} else null
val fileName = mainFileName(testName)
configureByFile(fileName)
targetMethod = findTargetElement
processor(newVisibility, newName, newReturnType, newParams).run()
PostprocessReformattingAspect.getInstance(getProjectAdapter).doPostponedFormatting()
val mainAfterText = getTextFromTestData(mainFileAfterName(testName))
TypeAnnotationSettings.set(getProjectAdapter, oldSettings.asInstanceOf[ScalaCodeStyleSettings])
assertEquals(mainAfterText, getFileAdapter.getText)
if (checkSecond) {
val secondAfterText = getTextFromTestData(secondFileAfterName(testName))
assertEquals(secondAfterText, secondFile.getText)
}
}
protected def addFileToProject(fileName: String, text: String): PsiFile =
PsiFileTestUtil.addFileToProject(fileName, text, getProjectAdapter)
protected def getTextFromTestData(fileName: String) = {
val file = new File(getTestDataPath + fileName)
FileUtilRt.loadFile(file, CharsetToolkit.UTF8, true)
}
protected def getPsiTypeFromText(typeText: String, context: PsiElement): PsiType = {
val factory: JavaCodeFragmentFactory = JavaCodeFragmentFactory.getInstance(getProjectAdapter)
factory.createTypeCodeFragment(typeText, context, false).getType
}
protected def javaProcessor(newVisibility: String,
newName: String,
newReturnType: String,
newParams: => Seq[Seq[ParameterInfo]]): ChangeSignatureProcessorBase = {
val psiMethod = targetMethod.asInstanceOf[PsiMethod]
val retType =
if (newReturnType != null) getPsiTypeFromText(newReturnType, psiMethod) else psiMethod.getReturnType
val params = newParams.flatten.map(_.asInstanceOf[ParameterInfoImpl]).toArray
new ChangeSignatureProcessor(getProjectAdapter, psiMethod, /*generateDelegate = */ false,
newVisibility, newName, retType, params, Array.empty)
}
protected def scalaProcessor(newVisibility: String,
newName: String,
newReturnType: String,
newParams: => Seq[Seq[ParameterInfo]],
isAddDefaultValue: Boolean): ChangeSignatureProcessorBase = {
val maybeReturnType = targetMethod match {
case fun: ScFunction =>
Option(newReturnType).flatMap {
createTypeFromText(_, fun, fun)
}.orElse {
fun.returnType.toOption
}
case _ => None
}
val params = newParams.map(_.map(_.asInstanceOf[ScalaParameterInfo]))
// TODO Having this repeated separately somehow defies the purpose of testing
val annotationNeeded = ScalaTypeAnnotationSettings(targetMethod.getProject).isTypeAnnotationRequiredFor(
Declaration(targetMethod, Visibility(newVisibility)), Location(targetMethod), Some(Definition(targetMethod)))
val changeInfo =
ScalaChangeInfo(newVisibility, targetMethod.asInstanceOf[ScMethodLike], newName, maybeReturnType.getOrElse(Any), params,
isAddDefaultValue, Some(annotationNeeded))
new ScalaChangeSignatureProcessor(changeInfo)(getProjectAdapter)
}
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/refactoring/changeSignature/ChangeSignatureTestBase.scala | Scala | apache-2.0 | 6,011 |
package examples.circe
import com.twitter.finagle.Service
import com.twitter.finagle.http.Method.Post
import com.twitter.finagle.http.{Request, Response, Status}
import com.twitter.util.Future
import io.circe.generic.auto._
import io.fintrospect.RouteSpec
import io.fintrospect.formats.Circe
import io.fintrospect.formats.Circe.Auto._
import io.fintrospect.formats.Circe.responseSpec
import io.fintrospect.parameters.{Body, Path}
/**
* This endpoint uses the "Circe.Auto.InOut" Filter to automatically create a HTTP 200 response from some returned case class content.
*/
class AddMessage(emails: Emails) {
private val exampleEmail = Email(EmailAddress("you@github.com"), EmailAddress("wife@github.com"), "when are you going to be home for dinner", 250)
private val email = Body.of(Circe.bodySpec[Email](), "email", exampleEmail)
private def addEmail(address: EmailAddress): Service[Request, Response] =
InOut(Service.mk {
newEmail: Email => {
// validate that the receiver is as passed as the one in the URL
if (address == newEmail.to) emails.add(newEmail)
Future(emails.forUser(newEmail.to))
}
})
val route = RouteSpec("add an email and return the new inbox contents for the receiver")
.body(email)
.returning(responseSpec(Status.Ok -> "new list of emails for the 'to' user", Seq(exampleEmail)))
.at(Post) / "email" / Path.of(EmailAddress.spec, "email") bindTo addEmail
}
| daviddenton/fintrospect | src/main/scala/examples/circe/AddMessage.scala | Scala | apache-2.0 | 1,446 |
/**
* Licensed to Gravity.com under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Gravity.com licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gravity.goose.extractors
import org.jsoup.nodes.Element
import com.github.nscala_time.time.Imports._
/**
* Implement this class to extract the {@link DateTime} of when this article was published.
*/
/**
* Created by IntelliJ IDEA.
* User: robbie
* Date: 5/19/11
* Time: 2:50 PM
*/
abstract class PublishDateExtractor extends Extractor[DateTime] {
/**
* Intended to search the DOM and identify the {@link DateTime} of when this article was published.
* <p>This will be called by the {@link com.jimplush.goose.ContentExtractor#extractContent(String)} method and will be passed to {@link com.jimplush.goose.Article#setPublishDate(org.joda.time.DateTime)}</p>
*
* @param rootElement passed in from the {@link com.jimplush.goose.ContentExtractor} after the article has been parsed
* @return {@link DateTime} of when this particular article was published or <code>null</code> if no date could be found.
*/
def extract(rootElement: Element): DateTime
}
| raisercostin/goose | src/main/scala/com/gravity/goose/extractors/PublishDateExtractor.scala | Scala | apache-2.0 | 1,770 |
//cannot.inline.implicit.element
implicit val /*caret*/name: String = ???
def foo(implicit name: String) = ???
foo
| jastice/intellij-scala | scala/scala-impl/testdata/inline/warnings/ImplicitVal.scala | Scala | apache-2.0 | 116 |
/**
* Copyright (C) 2015-2016 DANS - Data Archiving and Networked Services (info@dans.knaw.nl)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.api.sword2
import java.util
import javax.crypto.Mac
import javax.crypto.spec.SecretKeySpec
import javax.naming.ldap.InitialLdapContext
import javax.naming.{AuthenticationException, Context}
import org.apache.commons.lang.StringUtils._
import org.slf4j.LoggerFactory
import org.swordapp.server.{AuthCredentials, SwordAuthException, SwordError}
import scala.util.{Failure, Success, Try}
object Authentication {
val log = LoggerFactory.getLogger(getClass)
def hash(password: String, userName: String): String = {
val signingKey = new SecretKeySpec(userName.getBytes(), "HmacSHA1")
val mac = Mac.getInstance("HmacSHA1")
mac.init(signingKey)
val rawHmac = mac.doFinal(password.getBytes())
new sun.misc.BASE64Encoder().encode(rawHmac)
}
@throws(classOf[SwordError])
@throws(classOf[SwordAuthException])
def checkAuthentication(auth: AuthCredentials)(implicit settings: Settings): Try[Unit] = {
log.debug("Checking that onBehalfOf is not specified")
if (isNotBlank(auth.getOnBehalfOf)) {
Failure(new SwordError("http://purl.org/net/sword/error/MediationNotAllowed"))
}
else {
log.debug(s"Checking credentials for user ${auth.getUsername}")
settings.auth match {
case SingleUserAuthSettings(user, password) =>
if (user != auth.getUsername || password != hash(auth.getPassword, auth.getUsername)) Failure(new SwordAuthException)
else {
log.info("Single user log in SUCCESS")
Success(())
}
case authSettings: LdapAuthSettings => authenticateThroughLdap(auth.getUsername, auth.getPassword, authSettings).map {
case false => Failure(new SwordAuthException)
case true => log.info(s"User ${auth.getUsername} authentication through LDAP successful")
log.info("LDAP log in SUCCESS")
Success(())
}
case _ => Failure(new RuntimeException("Authentication not properly configured. Contact service admin"))
}
}
}
private def authenticateThroughLdap(user: String, password: String, authSettings: LdapAuthSettings): Try[Boolean] = {
getInitialContext(user, password, authSettings).map {
context =>
val attrs = context.getAttributes(s"uid=$user, ${authSettings.usersParentEntry}")
val enabled = attrs.get(authSettings.swordEnabledAttributeName)
enabled != null && enabled.size == 1 && enabled.get(0) == authSettings.swordEnabledAttributeValue
}.recoverWith {
case t: AuthenticationException => Success(false)
case t => Failure(new RuntimeException("Error trying to authenticate", t))
}
}
private def getInitialContext(user: String, password: String, authSettings: LdapAuthSettings): Try[InitialLdapContext] = Try {
val env = new util.Hashtable[String, String]()
env.put(Context.PROVIDER_URL, authSettings.ldapUrl.toString)
env.put(Context.SECURITY_AUTHENTICATION, "simple")
env.put(Context.SECURITY_PRINCIPAL, s"uid=$user, ${authSettings.usersParentEntry}")
env.put(Context.SECURITY_CREDENTIALS, password)
env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.ldap.LdapCtxFactory")
new InitialLdapContext(env, null)
}
}
| vesaakerman/easy-sword2 | src/main/scala/nl/knaw/dans/api/sword2/Authentication.scala | Scala | apache-2.0 | 3,890 |
package cook.actor.impl
import cook.actor.ConfigLoader
import cook.actor.ConfigManager
import cook.actor.ConfigRefLoader
import cook.actor.ConfigRefManager
import cook.actor.ConsoleOutputter
import cook.actor.StatusManager
import cook.actor.TargetBuilder
import cook.actor.TargetManager
import cook.app.Global
import akka.actor.{ TypedActor, TypedProps, SupervisorStrategy }
import akka.event.Logging
trait TypedActorBase extends TypedActor.Supervisor {
val log = Logging(TypedActor.context.system, TypedActor.context.self)
override def supervisorStrategy: SupervisorStrategy = SupervisorStrategy.stoppingStrategy
}
object ActorRefs {
import Global.system
lazy val configRefLoader =
TypedActor(system).typedActorOf(
TypedProps[ConfigRefLoader],
system.actorFor("/user/ConfigRefLoader"))
lazy val configRefManager =
TypedActor(system).typedActorOf(
TypedProps[ConfigRefManager],
system.actorFor("/user/ConfigRefManager"))
lazy val configLoader =
TypedActor(system).typedActorOf(
TypedProps[ConfigLoader],
system.actorFor("/user/ConfigLoader"))
lazy val configManager =
TypedActor(system).typedActorOf(
TypedProps[ConfigManager],
system.actorFor("/user/ConfigManager"))
lazy val targetManager =
TypedActor(system).typedActorOf(
TypedProps[TargetManager],
system.actorFor("/user/TargetManager"))
lazy val targetBuilder =
TypedActor(system).typedActorOf(
TypedProps[TargetBuilder],
system.actorFor("/user/TargetBuilder"))
lazy val consoleOutputter =
TypedActor(system).typedActorOf(
TypedProps[ConsoleOutputter],
system.actorFor("/user/ConsoleOutputter"))
implicit lazy val statusManager =
TypedActor(system).typedActorOf(
TypedProps[StatusManager],
system.actorFor("/user/StatusManager"))
}
| timgreen/cook | src/cook/actor/impl/TypedActorBase.scala | Scala | apache-2.0 | 1,853 |
package io.kaitai.struct
import io.kaitai.struct.datatype.DataType
import io.kaitai.struct.datatype.DataType.UserType
import io.kaitai.struct.exprlang.Ast
import io.kaitai.struct.format._
import io.kaitai.struct.languages.components.{LanguageCompiler, LanguageCompilerStatic}
class HtmlClassCompiler(classSpecs: ClassSpecs, topClass: ClassSpec) extends DocClassCompiler(classSpecs, topClass) {
import HtmlClassCompiler._
override def outFileName(topClass: ClassSpec): String = s"${topClass.nameAsStr}.html"
override def indent: String = ""
override def fileHeader(topClass: ClassSpec): Unit = {
out.puts(
s"""
|<!doctype html>
|<html lang="en">
| <head>
| <!-- Required meta tags -->
| <meta charset="utf-8">
| <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
|
| <!-- Bootstrap CSS -->
| <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.2.1/css/bootstrap.min.css" integrity="sha384-GJzZqFGwb1QTTN6wy59ffF1BuGJpLSa9DkKMp0DgiMDm4iYMj70gZWKYbI706tWS" crossorigin="anonymous">
|
| <title>${type2str(topClass.name.last)} format specification</title>
| </head>
| <body>
<div class="container">
| <h1>${type2str(topClass.name.last)} format specification</h1>
|
""".stripMargin)
// TODO: parse & output meta/title, meta/file-extensions, etc
}
override def fileFooter(topClass: ClassSpec): Unit = {
out.puts(
"""
| </div>
| <!-- Optional JavaScript -->
| <!-- jQuery first, then Popper.js, then Bootstrap JS -->
| <script src="https://code.jquery.com/jquery-3.3.1.slim.min.js" integrity="sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo" crossorigin="anonymous"></script>
| <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.6/umd/popper.min.js" integrity="sha384-wHAiFfRlMFy6i5SRaxvfOCifBUQy1xHdJ/yoi7FRNXMRBu5WHdZYu1hA6ZOblgut" crossorigin="anonymous"></script>
| <script src="https://stackpath.bootstrapcdn.com/bootstrap/4.2.1/js/bootstrap.min.js" integrity="sha384-B0UglyR+jN6CkvvICOB2joaf5I4l3gm9GU6Hc1og6Ls7i6U/mkkaduKaBhlAXv9k" crossorigin="anonymous"></script>
| </body>
|</html>
""".stripMargin)
}
override def classHeader(classSpec: ClassSpec): Unit = {
out.puts(s"<a name='${classSpec2Anchor(classSpec)}'></a>")
out.puts(s"<$headerByIndent>Type: ${type2str(classSpec.name.last)}</$headerByIndent>")
out.puts
classSpec.doc.summary.foreach(summary =>
out.puts(s"<p>$summary</p>")
)
out.inc
}
override def classFooter(classSpec: ClassSpec): Unit = {
out.dec
}
override def seqHeader(classSpec: ClassSpec): Unit = {
out.puts("<table class=\\"table\\">")
out.puts("<tr><th>Offset</th><th>Size</th><th>ID</th><th>Type</th><th>Note</th></tr>")
}
override def seqFooter(classSpec: ClassSpec): Unit = {
out.puts("</table>")
}
override def compileSeqAttr(classSpec: ClassSpec, attr: AttrSpec, seqPos: Option[Int], sizeElement: Sized, sizeContainer: Sized): Unit = {
out.puts("<tr>")
out.puts(s"<td>${GraphvizClassCompiler.seqPosToStr(seqPos).getOrElse("???")}</td>")
out.puts(s"<td>...</td>")
out.puts(s"<td>${attr.id.humanReadable}</td>")
out.puts(s"<td>${kaitaiType2NativeType(attr.dataType)}</td>")
out.puts(s"<td>${attr.doc.summary.getOrElse("")}</td>")
out.puts("</tr>")
}
override def compileParseInstance(classSpec: ClassSpec, inst: ParseInstanceSpec): Unit = {
out.puts(s"<p><b>Parse instance</b>: ${inst.id.humanReadable}</p>")
out.puts("<table class=\\"table\\">")
out.puts("<tr>")
out.puts(s"<td>${expression(inst.pos)}</td>")
out.puts(s"<td>...</td>")
out.puts(s"<td>${inst.id.humanReadable}</td>")
out.puts(s"<td>${kaitaiType2NativeType(inst.dataType)}</td>")
out.puts(s"<td>${inst.doc.summary.getOrElse("")}</td>")
out.puts("</tr>")
out.puts("</table>")
}
override def compileValueInstance(vis: ValueInstanceSpec): Unit = {
out.puts(s"value instance: ${vis}")
}
override def compileEnum(enumName: String, enumColl: EnumSpec): Unit = {
out.puts(s"<a name='${enumSpec2Anchor(enumColl)}'></a>")
out.puts(s"<$headerByIndent>Enum: $enumName</$headerByIndent>")
out.puts
out.puts("<table class=\\"table\\">")
out.puts("<tr>")
out.puts("<th>ID</th><th>Name</th><th>Note</th>")
out.puts("</tr>")
enumColl.sortedSeq.foreach { case (id, value) =>
out.puts("<tr>")
out.puts(s"<td>$id</td><td>${value.name}</td><td>${value.doc.summary.getOrElse("")}</td></tr>")
out.puts("</tr>")
}
out.puts("</table>")
}
def headerByIndent: String = s"h${out.indentLevel + 1}"
def expression(exOpt: Option[Ast.expr]): String = {
exOpt match {
case Some(ex) => translator.translate(ex)
case None => ""
}
}
}
object HtmlClassCompiler extends LanguageCompilerStatic {
// FIXME: Unused, should be probably separated from LanguageCompilerStatic
override def getCompiler(
tp: ClassTypeProvider,
config: RuntimeConfig
): LanguageCompiler = ???
def type2str(name: String): String = Utils.upperCamelCase(name)
def classSpec2Anchor(spec: ClassSpec): String = "type-" + spec.name.mkString("-")
def enumSpec2Anchor(spec: EnumSpec): String = "enum-" + spec.name.mkString("-")
def kaitaiType2NativeType(attrType: DataType): String = attrType match {
case ut: UserType =>
"<a href=\\"#" + classSpec2Anchor(ut.classSpec.get) + "\\">" + type2str(ut.name.last) + "</a>"
case _ => GraphvizClassCompiler.dataTypeName(attrType)
}
}
| kaitai-io/kaitai_struct_compiler | shared/src/main/scala/io/kaitai/struct/HtmlClassCompiler.scala | Scala | gpl-3.0 | 5,768 |
package harvester.manager.actors
import
akka.actor.{
ActorSystem,
Props,
ActorRef,
Actor,
ActorLogging
},
akka.util.Timeout,
harvester.scheduler.Scheduler,
members.{
HPartner
},
org.joda.time.{
DateTime,
Duration,
Instant
},
scala.concurrent.{
Await,
duration
}
// I honestly don't really know what this is for yet.
// FSM Please
class Partner(harvestPartner: HPartner, harvestScheduler: ActorRef) extends Actor with ActorLogging {
import
context._,
Partner._
implicit val timeout: Timeout = Timeout(duration.Duration(1000, "seconds"))
/**
Looking for a better solution for this
- mutable state within the partner actor to adapt during a harvest cycle
*/
var activeCycle: Cycle = NoCycle()
var activeBatch: Batch = NoBatch()
var eventStream: HarvestEventStream = NoStream() // it truns out i might not need this
var registered = false
var pauseUntil: DateTime = _
override def preStart() = {
import scala.util.{
Failure,
Success
}
// make this a blocking call solution
// so it will fail if a partner cannot
// be attached to the scheduler
// case Registered() => { registered = true; println("registered") }
regsiter.onComplete {
case Success(registerAskResponse) => {
setRegistered(true)
}
case Failure(ex) => {
setRegistered(false)
// retry?
throw new Exception(s"Cannot start Partner Actor ${harvestPartner.name}")
}
}
}
def receive = {
case Next (numberOfUsers) => next(numberOfUsers)
case Throttle(limit) => println("throttle")
case Pause (delay) => println("pause")
case Start() => cycle
}
private def cycle = {
harvestPartner.cycle
}
private def next(numberOfUsers: Option[Int]): Unit = {
println("next")
val notPaused = true //FIXME
if(registered && notPaused) {
schedule(numberOfUsers)
} else {
error("called next but cannot deliver new batch of users.")
}
}
private def pause(delay: Duration) = {
// nowInstant + delay
// scheduler ! UnpauseAt()
}
private def setRegistered(r: Boolean): Unit = {
if(r) {
} else {
}
}
private def schedule(numberOfUsers: Option[Int]): Unit = {
val userBatchNext = activeCycle.next
if(userBatchNext.size > 0) {
harvestScheduler ! Request(userBatchNext)
} else {
complete
}
}
private def complete = {
// reset
activeCycle = NoCycle()
activeBatch = NoBatch()
eventStream = NoStream()
harvestScheduler ! CycleComplete()
}
private def start = {
activeCycle = ActiveCycle()
harvestScheduler ! Request(activeCycle.next)
}
private def regsiter = {
import
akka.pattern.ask
harvestScheduler ? Register(self)
}
}
object Partner {
import
org.joda.time.Duration,
members.HUser
/*
It has been recommended that communication protols are centralized so that
they can be easily identified.
If that's the case I need to move these into an object that contains all
messages that the system's actors can send to each other.
*/
case class CycleComplete()
// the size attribute should be something
// we can do more with
case class Next(size: Option[Int])
case class Pause(duration: Duration)
case class Register(partner: ActorRef)
case class Registered()
case class Throttle(downsize: Int)
case class Unthrottle()
case class SendMore()
case class Start()
case class Request(users: Set[HUser])
// harvest instance case classes
trait HarvestEventStream
case class NoStream() extends HarvestEventStream
trait Batch
case class NoBatch() extends Batch
trait Cycle {
def next: Set[HUser]
}
case class NoCycle() extends Cycle {
def next = Set[HUser]()
}
case class ActiveCycle() extends Cycle {
def next = Set[HUser]()
}
def error(message: String) = {
// router ! message
}
def props(harvestPartner: HPartner, harvestScheduler: ActorRef)(implicit as: ActorSystem) = {
// where do i send schedule events?
Props(new Partner(harvestPartner, harvestScheduler))
}
} | somethingconcon/harvester | src/main/scala/geezeo/manager/actors/Partner.scala | Scala | unlicense | 4,278 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2014 Matthias Langer (t3l@threelights.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.latrobe.blaze.modules
import edu.latrobe._
import edu.latrobe.blaze._
import edu.latrobe.blaze.modules.jvm._
import scala.util.hashing._
/**
* Mean pooling layer.
*
* p = position in output
* m = size covered by kernel at position p
*
* m
* ---
* 1 \\
* f(p_a) = --- / x_pi
* m ---
* i
*
* d f(p_a) 1
* -------- = -
* d x_pa m
*
* d f(p_a) 1
* ------------ = -
* d x_pb, a!=b m
*
* ---
* D f(p_a) \\ d f(p_a)
* -------- = / -------- di
* D x_pa --- d x_pi
* i
*
* ---
* 1 \\
* = - / di
* m ---
* i
*
*/
abstract class MeanPooling
extends PoolingLayerEx[MeanPoolingBuilder] {
final val includePadding
: Boolean = builder.includePadding
}
final class MeanPoolingBuilder
extends PoolingLayerExBuilder[MeanPoolingBuilder] {
override def repr
: MeanPoolingBuilder = this
var includePadding
: Boolean = true
def setIncludePadding(value: Boolean)
: MeanPoolingBuilder = {
includePadding_=(value)
this
}
override protected def doToString()
: List[Any] = includePadding :: super.doToString()
override def hashCode()
: Int = MurmurHash3.mix(super.hashCode(), includePadding.hashCode())
override def canEqual(that: Any)
: Boolean = that.isInstanceOf[MeanPoolingBuilder]
override protected def doEquals(other: Equatable)
: Boolean = super.doEquals(other) && (other match {
case other: MeanPoolingBuilder =>
includePadding == other.includePadding
case _ =>
false
})
override protected def doCopy()
: MeanPoolingBuilder = MeanPoolingBuilder()
override def copyTo(other: InstanceBuilder)
: Unit = {
super.copyTo(other)
other match {
case other: MeanPoolingBuilder =>
other.includePadding = includePadding
case _ =>
}
}
// ---------------------------------------------------------------------------
// Weights / binding related
// ---------------------------------------------------------------------------
override def outputPlatformFor(hints: BuildHints)
: Platform = MeanPoolingBuilder.outputPlatformFor(this, hints)
// Lookup variant and create object.
override def build(hints: BuildHints,
seed: InstanceSeed,
weightsBuilder: ValueTensorBufferBuilder)
: Module = MeanPoolingBuilder.lookupAndBuild(
this, hints, seed, weightsBuilder
)
}
object MeanPoolingBuilder
extends ModuleVariantTable[MeanPoolingBuilder] {
register(2, MeanPooling_JVM_Baseline_Description)
final def apply()
: MeanPoolingBuilder = new MeanPoolingBuilder
final def apply(kernel: Kernel)
: MeanPoolingBuilder = apply().setKernel(kernel)
final def apply(kernel: Kernel, includePadding: Boolean)
: MeanPoolingBuilder = apply(kernel).setIncludePadding(includePadding)
}
| bashimao/ltudl | blaze/src/main/scala/edu/latrobe/blaze/modules/MeanPooling.scala | Scala | apache-2.0 | 3,688 |
package br.unb.cic.tp1.mh.ast
import br.unb.cic.tp1.exceptions.ExpressaoInvalida
import br.unb.cic.tp1.mh.visitors.Visitor
case class ExpRelMaior(lhs: Expressao, rhs: Expressao) extends Expressao {
override def avaliar(): Valor = {
if (lhs.verificaTipo != rhs.verificaTipo) throw ExpressaoInvalida()
val v1 = lhs.avaliar().asInstanceOf[ValorInteiro]
val v2 = rhs.avaliar().asInstanceOf[ValorInteiro]
if (v1.v > v2.v) ValorBooleano(true) else ValorBooleano(false)
}
override def verificaTipo: Tipo = {
if (lhs.verificaTipo == TInt() && rhs.verificaTipo == TInt()) TBool() else TErro()
}
override def aceitar(v: Visitor): Unit = v.visitar(this)
}
case class ExpRelMaiorIg(lhs: Expressao, rhs: Expressao) extends Expressao {
override def avaliar(): Valor = {
if (lhs.verificaTipo != rhs.verificaTipo) throw ExpressaoInvalida()
val v1 = lhs.avaliar().asInstanceOf[ValorInteiro]
val v2 = rhs.avaliar().asInstanceOf[ValorInteiro]
if (v1.v >= v2.v) ValorBooleano(true) else ValorBooleano(false)
}
override def verificaTipo: Tipo = {
if (lhs.verificaTipo == TInt() && rhs.verificaTipo == TInt()) TBool() else TErro()
}
override def aceitar(v: Visitor): Unit = v.visitar(this)
} | PeterTowers/TP1-022017 | mhs/src/main/scala/br/unb/cic/tp1/mh/ast/ExpRelMaior.scala | Scala | mit | 1,241 |
package com.stevens
trait Queryable {
def findNearestNeighbor(point: Array[Int]): LabeledPoint
}
| steven-s/scala-computational-geometry | src/main/scala/com/stevens/Queryable.scala | Scala | mit | 100 |
/*
* Copyright 2022 LINE Corporation
*
* LINE Corporation licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.linecorp.armeria.common.scalapb
import com.google.common.collect.MapMaker
import com.linecorp.armeria.common.grpc.GrpcJsonMarshaller
import com.linecorp.armeria.common.scalapb.ScalaPbJsonMarshaller.{
jsonDefaultParser,
jsonDefaultPrinter,
messageCompanionCache,
typeMapperMethodCache
}
import io.grpc.MethodDescriptor.Marshaller
import java.io.{InputStream, OutputStream}
import java.lang.reflect.Field
import java.util.concurrent.ConcurrentMap
import scala.io.{Codec, Source}
import scalapb.grpc.TypeMappedMarshaller
import scalapb.json4s.{Parser, Printer}
import scalapb.{GeneratedMessage, GeneratedMessageCompanion, GeneratedSealedOneof, TypeMapper}
/**
* A [[com.linecorp.armeria.common.grpc.GrpcJsonMarshaller]] that serializes and deserializes
* a [[scalapb.GeneratedMessage]] to and from JSON.
*/
final class ScalaPbJsonMarshaller private (
jsonPrinter: Printer = jsonDefaultPrinter,
jsonParser: Parser = jsonDefaultParser
) extends GrpcJsonMarshaller {
// TODO(ikhoon): Remove this forked file if https://github.com/lampepfl/dotty/issues/11332 is fixed.
override def serializeMessage[A](marshaller: Marshaller[A], message: A, os: OutputStream): Unit =
message match {
case msg: GeneratedSealedOneof =>
os.write(jsonPrinter.print(msg.asMessage).getBytes())
case msg: GeneratedMessage =>
os.write(jsonPrinter.print(msg).getBytes())
case _ =>
throw new IllegalStateException(
s"Unexpected message type: ${message.getClass} (expected: ${classOf[GeneratedMessage]})")
}
override def deserializeMessage[A](marshaller: Marshaller[A], in: InputStream): A = {
val companion = getMessageCompanion(marshaller)
val jsonString = Source.fromInputStream(in)(Codec.UTF8).mkString
val message = jsonParser.fromJsonString(jsonString)(companion)
marshaller match {
case marshaller: TypeMappedMarshaller[_, _] =>
val method = typeMapperMethodCache.computeIfAbsent(
marshaller,
key => {
val field = key.getClass.getDeclaredField("typeMapper")
field.setAccessible(true)
field
})
val typeMapper = method.get(marshaller).asInstanceOf[TypeMapper[GeneratedMessage, A]]
typeMapper.toCustom(message)
case _ =>
message.asInstanceOf[A]
}
}
private def getMessageCompanion[A](marshaller: Marshaller[A]): GeneratedMessageCompanion[GeneratedMessage] = {
val companion = messageCompanionCache.get(marshaller)
if (companion != null)
companion
else
messageCompanionCache.computeIfAbsent(
marshaller,
key => {
val field = key.getClass.getDeclaredField("companion")
field.setAccessible(true)
field.get(marshaller).asInstanceOf[GeneratedMessageCompanion[GeneratedMessage]]
}
)
}
}
/**
* A companion object for [[com.linecorp.armeria.common.scalapb.ScalaPbJsonMarshaller]].
*/
object ScalaPbJsonMarshaller {
private val messageCompanionCache: ConcurrentMap[Marshaller[_], GeneratedMessageCompanion[GeneratedMessage]] =
new MapMaker().weakKeys().makeMap()
private val typeMapperMethodCache: ConcurrentMap[Marshaller[_], Field] =
new MapMaker().weakKeys().makeMap()
private val jsonDefaultPrinter: Printer = new Printer().includingDefaultValueFields
private val jsonDefaultParser: Parser = new Parser()
private val defaultInstance: ScalaPbJsonMarshaller = new ScalaPbJsonMarshaller()
/**
* Returns a newly-created [[com.linecorp.armeria.common.scalapb.ScalaPbJsonMarshaller]].
*/
def apply(
jsonPrinter: Printer = jsonDefaultPrinter,
jsonParser: Parser = jsonDefaultParser): ScalaPbJsonMarshaller =
if (jsonPrinter == jsonDefaultPrinter && jsonParser == jsonDefaultParser)
defaultInstance
else
new ScalaPbJsonMarshaller(jsonPrinter, jsonParser)
}
| line/armeria | scalapb/scalapb_3/src/main/scala/com/linecorp/armeria/common/scalapb/ScalaPbJsonMarshaller.scala | Scala | apache-2.0 | 4,539 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.fnothaft.gnocchi.models
trait Phenotype[T] extends Product {
val phenotype: String
val sampleId: String
val value: T
def toDouble: Array[Double]
}
/**
* Implementation of [[Phenotype]] that formalizes multiple regression with covariates.
*
* @param phenotype Line that contains the names of all the phenotypes (primary and covariates), separated by spaces
* @param sampleId Individual's sample id
* @param value Array of all phenotypes being used. First item is primary phenotype, the rest are covariates
*/
case class MultipleRegressionDoublePhenotype(phenotype: String,
sampleId: String,
value: Array[Double]) extends Phenotype[Array[Double]] {
def toDouble: Array[Double] = value
}
| kunalgosar/gnocchi | gnocchi-core/src/main/scala/net/fnothaft/gnocchi/models/Phenotype.scala | Scala | apache-2.0 | 1,600 |
import scala.quoted.*
import scala.quoted.staging.*
object Test {
given Compiler = Compiler.make(getClass.getClassLoader)
def main(args: Array[String]): Unit = run {
def test[T: Type](clazz: java.lang.Class[T])(using Quotes) = {
val lclazz = Expr(clazz)
val name = '{ ($lclazz).getCanonicalName }
println(name.show)
'{ println($name) }
}
// primitive arrays
'{
${test(classOf[Array[Int]])}
${test(classOf[Array[Long]])}
${test(classOf[Array[Float]])}
${test(classOf[Array[Double]])}
}
}
}
| dotty-staging/dotty | tests/run-staging/i3947i.scala | Scala | apache-2.0 | 568 |
/*
* IJ-Plugins
* Copyright (C) 2002-2021 Jarek Sacha
* Author's email: jpsacha at gmail dot com
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Latest release available at https://github.com/ij-plugins/ijp-toolkit/
*/
package ij_plugins.toolkit.filters
import ij.gui.{DialogListener, GenericDialog}
import ij.plugin.filter.PlugInFilter._
import ij.plugin.filter.{ExtendedPlugInFilter, PlugInFilterRunner}
import ij.process.{Blitter, FloatProcessor, ImageProcessor}
import ij.{IJ, ImagePlus}
import ij_plugins.toolkit.filters.CoherenceEnhancingDiffusionPlugIn._
import ij_plugins.toolkit.ui.progress.IJProgressBarAdapter
import ij_plugins.toolkit.util.IJPUtils
import java.awt.AWTEvent
import java.util.concurrent.atomic.AtomicBoolean
object CoherenceEnhancingDiffusionPlugIn {
val FLAGS: Int =
DOES_8G |
DOES_16 |
DOES_32 |
PARALLELIZE_STACKS |
ExtendedPlugInFilter.KEEP_PREVIEW
private val CONFIG = CoherenceEnhancingDiffusion.Config()
private val debugMode = new AtomicBoolean(false)
private val TITLE = "Coherence Enhancing Diffusion"
private val DESCRIPTION = ""
private val HELP_URL = "https://github.com/ij-plugins/ijp-toolkit/wiki/Filters"
}
/**
* ImageJ plugin that runs `CoherenceEnhancingDiffusion` filter.
*/
final class CoherenceEnhancingDiffusionPlugIn extends ExtendedPlugInFilter with DialogListener {
private var imp: ImagePlus = _
private var nPasses = 0
private var passCount = 0
def setup(arg: String, imp: ImagePlus): Int = {
this.imp = imp
FLAGS
}
def showDialog(imp: ImagePlus, command: String, pfr: PlugInFilterRunner): Int = {
val dialog = new GenericDialog(TITLE) {
addPanel(IJPUtils.createInfoPanel(TITLE, DESCRIPTION))
addNumericField("Lambda (>0), limit of diffusion", CONFIG.lambda, 6, 8, "")
addNumericField("Sigma (>0), smooth for first derivative", CONFIG.sigma, 6, 8, "")
addNumericField("Rho (>0), smooth for second derivative", CONFIG.rho, 6, 8, "")
addNumericField("Step_size (<0.25)", CONFIG.stepSize, 6, 8, "")
addNumericField("m (>1), best keep it equal to 1", CONFIG.m, 6, 8, "")
addNumericField("Number_of_steps", CONFIG.numberOfSteps, 0)
addCheckbox("Show_debug_data", debugMode.get)
addHelp(HELP_URL)
addPreviewCheckbox(pfr)
}
dialog.addDialogListener(this)
dialog.showDialog()
if (dialog.wasCanceled) return DONE
IJ.setupDialog(imp, FLAGS)
}
def dialogItemChanged(gd: GenericDialog, e: AWTEvent): Boolean = {
CONFIG synchronized {
CONFIG.lambda = gd.getNextNumber
CONFIG.sigma = gd.getNextNumber
CONFIG.rho = gd.getNextNumber
CONFIG.stepSize = gd.getNextNumber
CONFIG.m = gd.getNextNumber
CONFIG.numberOfSteps = Math.round(gd.getNextNumber).toInt
}
debugMode.set(gd.getNextBoolean)
true
}
def setNPasses(nPasses: Int): Unit = {
this.nPasses = nPasses
this.passCount = 0
}
def run(ip: ImageProcessor): Unit = {
passCount += 1
val statsMessage = if (nPasses > 1) TITLE + " - pass " + passCount + "/" + nPasses + ". " else TITLE
IJ.showStatus(statsMessage)
val src = ip.convertToFloat.asInstanceOf[FloatProcessor]
val filter = new CoherenceEnhancingDiffusion(CONFIG)
val progressListener = new IJProgressBarAdapter()
filter.addProgressListener(progressListener)
val dest =
try filter.run(src)
finally filter.removeProgressListener(progressListener)
if (debugMode.get) {
filter.alpha.resetMinAndMax()
new ImagePlus("alpha", filter.alpha).show()
filter.c2.resetMinAndMax()
new ImagePlus("c2", filter.c2).show()
}
ip.copyBits(dest, 0, 0, Blitter.COPY)
}
}
| ij-plugins/ijp-toolkit | src/main/scala/ij_plugins/toolkit/filters/CoherenceEnhancingDiffusionPlugIn.scala | Scala | lgpl-2.1 | 4,431 |
package com.twitter.util
import scala.collection.mutable
trait Pool[A] {
def reserve(): Future[A]
def release(a: A): Unit
}
class SimplePool[A](items: mutable.Queue[Future[A]]) extends Pool[A] {
def this(initialItems: Seq[A]) = this {
val queue = new mutable.Queue[Future[A]]
queue ++= initialItems.map(Future(_))
queue
}
private val requests = new mutable.Queue[Promise[A]]
def reserve(): Future[A] = synchronized {
if (items.isEmpty) {
val future = new Promise[A]
requests += future
future
} else {
items.dequeue()
}
}
def release(item: A): Unit = {
items += Future[A](item)
synchronized {
if (requests.nonEmpty && items.nonEmpty)
Some((requests.dequeue(), items.dequeue()))
else
None
} map { case (request, currItem) =>
currItem.respond(request() = _)
}
}
}
abstract class FactoryPool[A](numItems: Int) extends Pool[A] {
private val healthyQueue = new HealthyQueue[A](makeItem, numItems, isHealthy)
private val simplePool = new SimplePool[A](healthyQueue)
def reserve(): Future[A] = simplePool.reserve()
def release(a: A): Unit = simplePool.release(a)
def dispose(a: A): Unit = {
healthyQueue += makeItem()
}
protected def makeItem(): Future[A]
protected def isHealthy(a: A): Boolean
}
private class HealthyQueue[A](
makeItem: () => Future[A],
numItems: Int,
isHealthy: A => Boolean)
extends mutable.Queue[Future[A]]
{
0.until(numItems) foreach { _ => this += makeItem() }
override def +=(elem: Future[A]): HealthyQueue.this.type = synchronized {
super.+=(elem)
}
override def +=:(elem: Future[A]): HealthyQueue.this.type = synchronized {
super.+=:(elem)
}
override def enqueue(elems: Future[A]*): Unit = synchronized {
super.enqueue(elems: _*)
}
override def dequeue(): Future[A] = synchronized {
if (isEmpty) throw new NoSuchElementException("queue empty")
super.dequeue() flatMap { item =>
if (isHealthy(item)) {
Future(item)
} else {
val item = makeItem()
synchronized {
enqueue(item)
dequeue()
}
}
}
}
}
| BuoyantIO/twitter-util | util-core/src/main/scala/com/twitter/util/Pool.scala | Scala | apache-2.0 | 2,184 |
package com.github.j5ik2o.reactive.redis.feature
import java.util.UUID
import akka.actor.ActorSystem
import com.github.j5ik2o.reactive.redis.AbstractRedisClientSpec
import com.github.j5ik2o.reactive.redis.command.strings.BitFieldRequest.SingedBitType
import com.github.j5ik2o.reactive.redis.command.strings.{ BitFieldRequest, BitOpRequest, BitPosRequest, StartAndEnd }
import com.github.j5ik2o.reactive.redis.util.BitUtil
import org.scalacheck.Shrink
import cats.implicits._
abstract class AbstractStringsFeatureSpec extends AbstractRedisClientSpec(ActorSystem("StringsFeatureSpec")) {
implicit val noShrink: Shrink[String] = Shrink.shrinkAny
"StringsFeature" - {
"append" in forAll(keyStrValueGen) {
case (k, v) =>
val result = runProgram(for {
ar1 <- redisClient.append(k, v)
gr1 <- redisClient.get(k)
ar2 <- redisClient.append(k, v)
gr2 <- redisClient.get(k)
} yield (ar1, ar2, gr1, gr2))
result._1.value shouldBe v.length
result._2.value shouldBe v.length * 2
result._3.value shouldBe Some(v)
result._4.value shouldBe Some(v + v)
}
"bitcount" in forAll(keyStrValueGen) {
case (k, v) =>
val end = v.length / 2
val expectedBitCount = BitUtil.getBitCount(v, Some(StartAndEnd(0, end)))
val result = runProgram(for {
_ <- redisClient.set(k, v)
br <- redisClient.bitCount(k, startAndEnd = Some(StartAndEnd(0, end)))
} yield br)
result.value shouldBe expectedBitCount
}
"bitField" in {
val k = UUID.randomUUID().toString
val result = runProgram(for {
br <- redisClient
.bitField(k, BitFieldRequest.IncrBy(bitType = SingedBitType(bit = 5), offset = 100, increment = 1))
} yield br)
result.value shouldBe List(1)
}
"bitOp" in {
val k1 = UUID.randomUUID().toString
val k2 = UUID.randomUUID().toString
val result = runProgram(for {
_ <- redisClient.set(k1, "foobar")
_ <- redisClient.set(k2, "abcdef")
br <- redisClient.bitOp(BitOpRequest.Operand.AND, "dest", k1, k2)
gr <- redisClient.get("dest")
} yield (br, gr))
result._1.value shouldBe 6
result._2.value shouldBe Some("`bc`ab")
}
"bitPos" in {
val k = UUID.randomUUID().toString
val result = runProgram(for {
_ <- redisClient.set(k, String.valueOf(0))
_ <- redisClient.setBit(k, 3, 1)
_ <- redisClient.setBit(k, 7, 1)
_ <- redisClient.setBit(k, 13, 1)
_ <- redisClient.setBit(k, 39, 1)
br1 <- redisClient.bitPos(k, 1)
br2 <- redisClient.bitPos(k, 0)
br3 <- redisClient.bitPos(k, 1, Some(BitPosRequest.StartAndEnd(1)))
} yield (br1, br2, br3))
result._1.value shouldBe 2
result._2.value shouldBe 0
result._3.value shouldBe 13
}
"decr" in forAll(keyNumValueGen) {
case (k, v) =>
val result = runProgram(for {
_ <- redisClient.set(k, v)
gr <- redisClient.decr(k)
} yield gr)
result.value shouldBe (v - 1)
}
"decrBy" in forAll(keyNumValueGen) {
case (k, v) =>
val result = runProgram(for {
_ <- redisClient.set(k, v)
gr <- redisClient.decrBy(k, 3)
} yield gr)
result.value shouldBe (v - 3)
}
"get" in forAll(keyStrValueGen) {
case (k, v) =>
val result = runProgram(for {
_ <- redisClient.set(k, v)
result <- redisClient.get(k)
} yield result)
result.value shouldBe Some(v)
}
"getBit" in {
val k = UUID.randomUUID().toString
val result = runProgram(for {
_ <- redisClient.setBit(k, 7, 1)
gr1 <- redisClient.getBit(k, 0)
gr2 <- redisClient.getBit(k, 7)
gr3 <- redisClient.getBit(k, 100)
} yield (gr1, gr2, gr3))
result._1.value shouldBe 0
result._2.value shouldBe 1
result._3.value shouldBe 0
}
"getRange" in {
val k = UUID.randomUUID().toString
val result = runProgram(for {
_ <- redisClient.set(k, "This is a string")
gr1 <- redisClient.getRange(k, StartAndEnd(0, 3))
gr2 <- redisClient.getRange(k, StartAndEnd(-3, -1))
gr3 <- redisClient.getRange(k, StartAndEnd(0, -1))
gr4 <- redisClient.getRange(k, StartAndEnd(10, 100))
} yield (gr1, gr2, gr3, gr4))
result._1.value shouldBe Some("This")
result._2.value shouldBe Some("ing")
result._3.value shouldBe Some("This is a string")
result._4.value shouldBe Some("string")
}
"getSet" in {
val k = UUID.randomUUID().toString
val result = runProgram(for {
_ <- redisClient.incr(k)
gr1 <- redisClient.getSet(k, "0")
gr2 <- redisClient.get(k)
} yield (gr1, gr2))
result._1.value shouldBe Some("1")
result._2.value shouldBe Some("0")
}
"incr" in {}
"incrBy" in {}
"incrByFloat" in {}
"mget" in {}
"mset" in {}
"msetNx" in {}
"psetEx" in {}
"set" in {}
"setBit" in {}
"setEx" in {}
"setNx" in {}
"setRange" in {}
"strLen" in forAll(keyStrValueGen) {
case (k, v) =>
val result = runProgram(for {
_ <- redisClient.set(k, v)
result <- redisClient.get(k)
strLen <- redisClient.strLen(k)
} yield (result, strLen))
result._1.value shouldBe Some(v)
result._1.value.get.length shouldBe result._2.value
result._2.value shouldBe v.length
}
}
}
| j5ik2o/reactive-redis | core/src/test/scala/com/github/j5ik2o/reactive/redis/feature/AbstractStringsFeatureSpec.scala | Scala | mit | 5,632 |
package com.blogspot.ramannanda.scala.algorithms.cp3.adhoc.chess
import scala.io.StdIn
//uva 278
object NonAttackingPosn {
def numMoves(piece: Char, nCols: Int, nRows: Int): Int = {
piece match {
case 'r' => Math.min(nRows, nCols)
case 'Q' => Math.min(nRows, nCols)
case 'k' => (nCols * nRows + 1) / 2 //place them on all white or black
case 'K' => (nCols + 1) / 2 * (nRows + 1) / 2 //every alternate row and column is safe
}
}
def main(args: Array[String]): Unit = {
val numCases = StdIn.readLine().trim.toInt
for (i <- 0 until numCases) {
val input = StdIn.readLine().split("\\\\s+")
println(numMoves(input(0).charAt(0), input(1).toInt, input(2).toInt))
}
}
}
| ramannanda9/algorithms-in-scala | src/main/scala/com/blogspot/ramannanda/scala/algorithms/cp3/adhoc/chess/NonAttackingPosn.scala | Scala | gpl-3.0 | 744 |
package at.forsyte.apalache.tla.lir.predef
import at.forsyte.apalache.tla.lir.values.TlaPredefSet
| konnov/apalache | tlair/src/main/scala/at/forsyte/apalache/tla/lir/predef/standardSets.scala | Scala | apache-2.0 | 99 |
/*
* Copyright (c) 2014 Oculus Info Inc.
* http://www.oculusinfo.com/
*
* Released under the MIT License.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.oculusinfo.tilegen.datasets
import java.text.SimpleDateFormat
import scala.reflect.ClassTag
import com.oculusinfo.binning.TileData
import com.oculusinfo.tilegen.tiling.AnalysisDescription
import com.oculusinfo.tilegen.tiling.CartesianIndexScheme
import com.oculusinfo.tilegen.tiling.IndexScheme
import com.oculusinfo.tilegen.tiling.IPv4Analytics
import com.oculusinfo.tilegen.tiling.IPv4ZCurveIndexScheme
import com.oculusinfo.tilegen.tiling.TimeRangeCartesianIndexScheme
import com.oculusinfo.tilegen.tiling.TimeIndexScheme
import com.oculusinfo.tilegen.tiling.LineSegmentIndexScheme
import com.oculusinfo.tilegen.util.PropertiesWrapper
object CSVIndexExtractor {
def fromProperties (properties: PropertiesWrapper): CSVIndexExtractor[_] = {
var indexType = properties.getString(
"oculus.binning.index.type",
"The type of index to use in the data. Currently supported options "+
"are cartesian (the default), graph, timerange, and ipv4.",
Some("cartesian"))
if ("graph" == indexType) {
val graphDataType = properties.getString(
"oculus.binning.graph.data",
"The type of graph data to tile (nodes or edges). Default is nodes.",
Some("nodes"))
if ("nodes" == graphDataType)
indexType = "cartesian"
}
indexType match {
case "cartesian" => {
val xVar = properties.getString("oculus.binning.xField",
"The field to use for the X axis of tiles produced",
Some(CSVDatasetBase.ZERO_STR))
val yVar = properties.getString("oculus.binning.yField",
"The field to use for the Y axis of tiles produced",
Some(CSVDatasetBase.ZERO_STR))
new CartesianIndexExtractor(xVar, yVar)
}
case "graph" => {
// edges require two cartesian endpoints
val xVar1 = properties.getString("oculus.binning.xField",
"The field to use for the X axis for edge start pt",
Some(CSVDatasetBase.ZERO_STR))
val yVar1 = properties.getString("oculus.binning.yField",
"The field to use for the Y axis for edge start pt",
Some(CSVDatasetBase.ZERO_STR))
val xVar2 = properties.getString("oculus.binning.xField2",
"The field to use for the X axis for edge end pt",
Some(CSVDatasetBase.ZERO_STR))
val yVar2 = properties.getString("oculus.binning.yField2",
"The field to use for the Y axis for edge end pt",
Some(CSVDatasetBase.ZERO_STR))
new LineSegmentIndexExtractor(xVar1, yVar1, xVar2, yVar2)
}
case "timerange" => {
val xVar = properties.getString("oculus.binning.xField",
"The field to use for the X axis of tiles produced",
Some(CSVDatasetBase.ZERO_STR))
val yVar = properties.getString("oculus.binning.yField",
"The field to use for the Y axis of tiles produced",
Some(CSVDatasetBase.ZERO_STR))
val timeVar = properties.getString("oculus.binning.timeField",
"The field to use for the time axis of tiles produced",
Some(CSVDatasetBase.ZERO_STR))
val startDateFormat = new SimpleDateFormat(
properties.getString("oculus.binning.timeRange.dateFormat",
"The parsing format to use for 'oculus.binning.timeRange.startDate'",
Some("yyMMddHHmm")))
val startDate = startDateFormat.parse(
properties.getString("oculus.binning.timeRange.startDate",
"The initial date to base the time ranges on.",
Some(""))).getTime()
val secsPerRange = properties.getDouble("oculus.binning.timeRange.secondsPerRange",
"The number of seconds each range should represent",
Some(60 * 60 * 24))
new TimeRangeCartesianIndexExtractor(timeVar, xVar, yVar, startDate, secsPerRange)
}
case "ipv4" => {
val ipVar = properties.getString("oculus.binning.ipv4Field",
"The field from which to get the ipv4 address. "+
"Field type must be \"ipv4\".",
None)
new IPv4IndexExtractor(ipVar)
}
}
}
}
abstract class CSVIndexExtractor[IT: ClassTag] extends Serializable {
val indexTypeTag = implicitly[ClassTag[IT]]
// The fields this extractor needs
def fields: Array[String]
// The name of the indexing scheme - usually refering to the fields it
// uses - for use in table naming.
def name: String
// A description of the data set axes
def description: String
// The index scheme the binner needs to know what to do with the index
// values we generate
def indexScheme: IndexScheme[IT]
// Get the index value from the field values
def calculateIndex (fieldValues: Map[String, Any]): IT
// Indicate if the index implies a density strip
def isDensityStrip: Boolean
// List any tile analytics automatically associated with this index extractor
def getTileAnalytics[BT]: Seq[AnalysisDescription[TileData[BT], _]]
// List any data analytics automatically associated with this index extractor
def getDataAnalytics: Seq[AnalysisDescription[(IT, _), _]]
}
abstract class TimeRangeCSVIndexExtractor[IT: ClassTag] extends CSVIndexExtractor[IT] {
// The time based index scheme the binner needs to know what to do with the index
// values we generate
def timeIndexScheme: TimeIndexScheme[IT]
def msPerTimeRange: Double
}
class CartesianIndexExtractor(xVar: String, yVar: String)
extends CSVIndexExtractor[(Double, Double)]
{
private val scheme = new CartesianIndexScheme
def fields = Array(xVar, yVar)
def name = xVar + "." + yVar
def description = xVar + " vs. " + yVar
def indexScheme = scheme
def calculateIndex (fieldValues: Map[String, Any]): (Double, Double) =
(fieldValues(xVar).asInstanceOf[Double], fieldValues(yVar).asInstanceOf[Double])
def isDensityStrip = yVar == CSVDatasetBase.ZERO_STR
def getTileAnalytics[BT]: Seq[AnalysisDescription[TileData[BT], _]] = Seq()
def getDataAnalytics: Seq[AnalysisDescription[((Double, Double), _), _]] = Seq()
}
class LineSegmentIndexExtractor
(xVar: String, yVar: String, xVar2: String, yVar2: String)
extends CSVIndexExtractor[(Double, Double, Double, Double)]
{
private val scheme = new LineSegmentIndexScheme
def fields = Array(xVar, yVar, xVar2, yVar2)
def name = xVar + "." + yVar //xVar + "." + yVar + "." + xVar2 + "." + yVar2
def description = xVar + " vs. " + yVar //xVar + " vs. " + yVar + "and" + xVar2 + "vs" + yVar2
def indexScheme = scheme
def calculateIndex(fieldValues: Map[String, Any]): (Double, Double, Double, Double) =
(fieldValues(xVar).asInstanceOf[Double],
fieldValues(yVar).asInstanceOf[Double],
fieldValues(xVar2).asInstanceOf[Double],
fieldValues(yVar2).asInstanceOf[Double])
def isDensityStrip = yVar == CSVDatasetBase.ZERO_STR
def getTileAnalytics[BT]: Seq[AnalysisDescription[TileData[BT], _]] = Seq()
def getDataAnalytics: Seq[AnalysisDescription[((Double, Double, Double, Double), _), _]] = Seq()
}
class IPv4IndexExtractor (ipField: String) extends CSVIndexExtractor[Array[Byte]] {
private val scheme = new IPv4ZCurveIndexScheme
def fields = Array(ipField)
def name = ipField
def description = ipField
def indexScheme = scheme
def calculateIndex (fieldValues: Map[String, Any]): Array[Byte] = {
val address = fieldValues(ipField) match {
case value: Long => value
case value: Int => value.toLong
case value: Double => value.round.toLong
case _ => 0L
}
Array(((address & 0xff000000L) >> 24).toByte,
((address & 0xff0000L) >> 16).toByte,
((address & 0xff00L) >> 8).toByte,
((address & 0xff)).toByte)
}
val isDensityStrip = false
def getTileAnalytics[BT]: Seq[AnalysisDescription[TileData[BT], _]] =
Seq(IPv4Analytics.getCIDRBlockAnalysis[BT](),
IPv4Analytics.getMinIPAddressAnalysis[BT](),
IPv4Analytics.getMaxIPAddressAnalysis[BT]())
def getDataAnalytics: Seq[AnalysisDescription[(Array[Byte], _), _]] = Seq()
}
class TimeRangeCartesianIndexExtractor
(timeVar: String, xVar: String, yVar: String,
startDate: Double, secsPerPeriod: Double)
extends TimeRangeCSVIndexExtractor[(Double, Double, Double)]
{
private val scheme = new TimeRangeCartesianIndexScheme
def msPerTimeRange = secsPerPeriod * 1000
/**
* Floors the date value to the last time period
*/
def floorDate(d: Double) = Math.floor((d - startDate) / msPerTimeRange) * msPerTimeRange + startDate
def fields = Array(timeVar, xVar, yVar)
def name = xVar + "." + yVar
def description = xVar + " vs. " + yVar
def indexScheme = scheme
def timeIndexScheme = scheme
def calculateIndex (fieldValues: Map[String, Any]): (Double, Double, Double) =
(floorDate(fieldValues(timeVar).asInstanceOf[Double]),
fieldValues(xVar).asInstanceOf[Double],
fieldValues(yVar).asInstanceOf[Double])
def isDensityStrip = yVar == CSVDatasetBase.ZERO_STR
def getTileAnalytics[BT]: Seq[AnalysisDescription[TileData[BT], _]] = Seq()
def getDataAnalytics: Seq[AnalysisDescription[((Double, Double, Double), _), _]] = Seq()
}
| aashish24/aperture-tiles | tile-generation/src/main/scala/com/oculusinfo/tilegen/datasets/IndexExtractor.scala | Scala | mit | 10,746 |
package com.codexica.s3crate.filetree.history.snapshotstore
import play.api.libs.json.Json
import com.codexica.encryption.EncryptionDetails
import com.codexica.s3crate.filetree.history.CompressionMethod
/**
* @author Josh Albrecht (joshalbrecht@gmail.com)
*/
case class DataBlob(
location: RemoteFileSystemTypes.S3Path,
encryption: Option[EncryptionDetails],
compressionMethod: CompressionMethod
)
object DataBlob {
implicit val format = Json.format[DataBlob]
}
| joshalbrecht/s3crate | src/main/scala/com/codexica/s3crate/filetree/history/snapshotstore/DataBlob.scala | Scala | mit | 479 |
package io.flow.play.clients
import io.flow.play.util.{Config, DefaultConfig}
import scala.collection.mutable
class MockConfig @javax.inject.Inject() (
defaultConfig: DefaultConfig
) extends Config {
override def optionalMap(name: String): Option[Map[String, Seq[String]]] = {
values.get(name).map {
_.asInstanceOf[Map[String, Seq[String]]]
}.orElse(defaultConfig.optionalMap(name))
}
override def optionalList(name: String): Option[Seq[String]] = {
values.get(name) match {
case Some(v) => Some(v.asInstanceOf[Seq[String]])
case None => defaultConfig.optionalList(name)
}
}
def set(name: String, value: Seq[String]): Unit = {
values += (name -> value)
}
val values: mutable.Map[String, Any] = {
val d = scala.collection.mutable.Map[String, Any]()
d += ("JWT_SALT" -> "test")
d
}
def set(name: String, value: String): Unit = {
values += (name -> value)
}
override def get(name: String): Option[String] = {
values.get(name) match {
case Some(v) => Some(v.toString)
case None => defaultConfig.get(name)
}
}
}
| flowcommerce/lib-play | app/io/flow/play/clients/MockConfig.scala | Scala | mit | 1,117 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.containerpool.test
import java.time.Instant
import scala.collection.mutable
import scala.concurrent.duration._
import org.junit.runner.RunWith
import org.scalamock.scalatest.MockFactory
import org.scalatest.BeforeAndAfterAll
import org.scalatest.FlatSpec
import org.scalatest.FlatSpecLike
import org.scalatest.Matchers
import org.scalatest.junit.JUnitRunner
import akka.actor.ActorRefFactory
import akka.actor.ActorSystem
import akka.testkit.ImplicitSender
import akka.testkit.TestKit
import akka.testkit.TestProbe
import whisk.common.TransactionId
import whisk.core.connector.ActivationMessage
import whisk.core.containerpool._
import whisk.core.entity._
import whisk.core.entity.ExecManifest.RuntimeManifest
import whisk.core.entity.ExecManifest.ImageName
import whisk.core.entity.size._
import whisk.core.connector.MessageFeed
/**
* Behavior tests for the ContainerPool
*
* These tests test the runtime behavior of a ContainerPool actor.
*/
@RunWith(classOf[JUnitRunner])
class ContainerPoolTests
extends TestKit(ActorSystem("ContainerPool"))
with ImplicitSender
with FlatSpecLike
with Matchers
with BeforeAndAfterAll
with MockFactory {
override def afterAll = TestKit.shutdownActorSystem(system)
val timeout = 5.seconds
// Common entities to pass to the tests. We don't really care what's inside
// those for the behavior testing here, as none of the contents will really
// reach a container anyway. We merely assert that passing and extraction of
// the values is done properly.
val exec = CodeExecAsString(RuntimeManifest("actionKind", ImageName("testImage")), "testCode", None)
val memoryLimit = 256.MB
/** Creates a `Run` message */
def createRunMessage(action: ExecutableWhiskAction, invocationNamespace: EntityName) = {
val message = ActivationMessage(
TransactionId.testing,
action.fullyQualifiedName(true),
action.rev,
Identity(Subject(), invocationNamespace, AuthKey(), Set()),
ActivationId(),
invocationNamespace.toPath,
InstanceId(0),
blocking = false,
content = None)
Run(action, message)
}
val invocationNamespace = EntityName("invocationSpace")
val differentInvocationNamespace = EntityName("invocationSpace2")
val action = ExecutableWhiskAction(EntityPath("actionSpace"), EntityName("actionName"), exec)
val differentAction = action.copy(name = EntityName("actionName2"))
val runMessage = createRunMessage(action, invocationNamespace)
val runMessageDifferentNamespace = createRunMessage(action, differentInvocationNamespace)
val runMessageDifferentEverything = createRunMessage(differentAction, differentInvocationNamespace)
/** Helper to create PreWarmedData */
def preWarmedData(kind: String, memoryLimit: ByteSize = memoryLimit) =
PreWarmedData(stub[Container], kind, memoryLimit)
/** Helper to create WarmedData */
def warmedData(action: ExecutableWhiskAction = action,
namespace: String = "invocationSpace",
lastUsed: Instant = Instant.now) =
WarmedData(stub[Container], EntityName(namespace), action, lastUsed)
/** Creates a sequence of containers and a factory returning this sequence. */
def testContainers(n: Int) = {
val containers = (0 to n).map(_ => TestProbe())
val queue = mutable.Queue(containers: _*)
val factory = (fac: ActorRefFactory) => queue.dequeue().ref
(containers, factory)
}
behavior of "ContainerPool"
/*
* CONTAINER SCHEDULING
*
* These tests only test the simplest approaches. Look below for full coverage tests
* of the respective scheduling methods.
*/
it should "reuse a warm container" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val pool = system.actorOf(ContainerPool.props(factory, 2, 2, feed.ref))
pool ! runMessage
containers(0).expectMsg(runMessage)
containers(0).send(pool, NeedWork(warmedData()))
pool ! runMessage
containers(0).expectMsg(runMessage)
containers(1).expectNoMsg(100.milliseconds)
}
it should "create a container if it cannot find a matching container" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val pool = system.actorOf(ContainerPool.props(factory, 2, 2, feed.ref))
pool ! runMessage
containers(0).expectMsg(runMessage)
// Note that the container doesn't respond, thus it's not free to take work
pool ! runMessage
containers(1).expectMsg(runMessage)
}
it should "remove a container to make space in the pool if it is already full and a different action arrives" in within(
timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
// a pool with only 1 slot
val pool = system.actorOf(ContainerPool.props(factory, 1, 1, feed.ref))
pool ! runMessage
containers(0).expectMsg(runMessage)
containers(0).send(pool, NeedWork(warmedData()))
feed.expectMsg(MessageFeed.Processed)
pool ! runMessageDifferentEverything
containers(0).expectMsg(Remove)
containers(1).expectMsg(runMessageDifferentEverything)
}
it should "cache a container if there is still space in the pool" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
// a pool with only 1 active slot but 2 slots in total
val pool = system.actorOf(ContainerPool.props(factory, 1, 2, feed.ref))
// Run the first container
pool ! runMessage
containers(0).expectMsg(runMessage)
containers(0).send(pool, NeedWork(warmedData(lastUsed = Instant.EPOCH)))
feed.expectMsg(MessageFeed.Processed)
// Run the second container, don't remove the first one
pool ! runMessageDifferentEverything
containers(1).expectMsg(runMessageDifferentEverything)
containers(1).send(pool, NeedWork(warmedData(lastUsed = Instant.now)))
feed.expectMsg(MessageFeed.Processed)
pool ! runMessageDifferentNamespace
containers(2).expectMsg(runMessageDifferentNamespace)
// 2 Slots exhausted, remove the first container to make space
containers(0).expectMsg(Remove)
}
it should "remove a container to make space in the pool if it is already full and another action with different invocation namespace arrives" in within(
timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
// a pool with only 1 slot
val pool = system.actorOf(ContainerPool.props(factory, 1, 1, feed.ref))
pool ! runMessage
containers(0).expectMsg(runMessage)
containers(0).send(pool, NeedWork(warmedData()))
feed.expectMsg(MessageFeed.Processed)
pool ! runMessageDifferentNamespace
containers(0).expectMsg(Remove)
containers(1).expectMsg(runMessageDifferentNamespace)
}
/*
* CONTAINER PREWARMING
*/
it should "create prewarmed containers on startup" in within(timeout) {
val (containers, factory) = testContainers(1)
val feed = TestProbe()
val pool =
system.actorOf(ContainerPool.props(factory, 0, 0, feed.ref, Some(PrewarmingConfig(1, exec, memoryLimit))))
containers(0).expectMsg(Start(exec, memoryLimit))
}
it should "use a prewarmed container and create a new one to fill its place" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val pool =
system.actorOf(ContainerPool.props(factory, 1, 1, feed.ref, Some(PrewarmingConfig(1, exec, memoryLimit))))
containers(0).expectMsg(Start(exec, memoryLimit))
containers(0).send(pool, NeedWork(preWarmedData(exec.kind)))
pool ! runMessage
containers(1).expectMsg(Start(exec, memoryLimit))
}
it should "not use a prewarmed container if it doesn't fit the kind" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val alternativeExec = CodeExecAsString(RuntimeManifest("anotherKind", ImageName("testImage")), "testCode", None)
val pool = system.actorOf(
ContainerPool.props(factory, 1, 1, feed.ref, Some(PrewarmingConfig(1, alternativeExec, memoryLimit))))
containers(0).expectMsg(Start(alternativeExec, memoryLimit)) // container0 was prewarmed
containers(0).send(pool, NeedWork(preWarmedData(alternativeExec.kind)))
pool ! runMessage
containers(1).expectMsg(runMessage) // but container1 is used
}
it should "not use a prewarmed container if it doesn't fit memory wise" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val alternativeLimit = 128.MB
val pool =
system.actorOf(ContainerPool.props(factory, 1, 1, feed.ref, Some(PrewarmingConfig(1, exec, alternativeLimit))))
containers(0).expectMsg(Start(exec, alternativeLimit)) // container0 was prewarmed
containers(0).send(pool, NeedWork(preWarmedData(exec.kind, alternativeLimit)))
pool ! runMessage
containers(1).expectMsg(runMessage) // but container1 is used
}
/*
* CONTAINER DELETION
*/
it should "not reuse a container which is scheduled for deletion" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val pool = system.actorOf(ContainerPool.props(factory, 2, 2, feed.ref))
// container0 is created and used
pool ! runMessage
containers(0).expectMsg(runMessage)
containers(0).send(pool, NeedWork(warmedData()))
// container0 is reused
pool ! runMessage
containers(0).expectMsg(runMessage)
containers(0).send(pool, NeedWork(warmedData()))
// container0 is deleted
containers(0).send(pool, ContainerRemoved)
// container1 is created and used
pool ! runMessage
containers(1).expectMsg(runMessage)
}
}
/**
* Unit tests for the ContainerPool object.
*
* These tests test only the "static" methods "schedule" and "remove"
* of the ContainerPool object.
*/
@RunWith(classOf[JUnitRunner])
class ContainerPoolObjectTests extends FlatSpec with Matchers with MockFactory {
val actionExec = CodeExecAsString(RuntimeManifest("actionKind", ImageName("testImage")), "testCode", None)
val standardNamespace = EntityName("standardNamespace")
val differentNamespace = EntityName("differentNamespace")
/** Helper to create a new action from String representations */
def createAction(namespace: String = "actionNS", name: String = "actionName") =
ExecutableWhiskAction(EntityPath(namespace), EntityName(name), actionExec)
/** Helper to create WarmedData with sensible defaults */
def warmedData(action: ExecutableWhiskAction = createAction(),
namespace: String = standardNamespace.asString,
lastUsed: Instant = Instant.now) =
WarmedData(stub[Container], EntityName(namespace), action, lastUsed)
/** Helper to create PreWarmedData with sensible defaults */
def preWarmedData(kind: String = "anyKind") = PreWarmedData(stub[Container], kind, 256.MB)
/** Helper to create NoData */
def noData() = NoData()
behavior of "ContainerPool schedule()"
it should "not provide a container if idle pool is empty" in {
ContainerPool.schedule(createAction(), standardNamespace, Map()) shouldBe None
}
it should "reuse an applicable warm container from idle pool with one container" in {
val data = warmedData()
val pool = Map('name -> data)
// copy to make sure, referencial equality doesn't suffice
ContainerPool.schedule(data.action.copy(), data.invocationNamespace, pool) shouldBe Some('name, data)
}
it should "reuse an applicable warm container from idle pool with several applicable containers" in {
val data = warmedData()
val pool = Map('first -> data, 'second -> data)
ContainerPool.schedule(data.action.copy(), data.invocationNamespace, pool) should (be(Some('first, data)) or be(
Some('second, data)))
}
it should "reuse an applicable warm container from idle pool with several different containers" in {
val matchingData = warmedData()
val pool = Map('none -> noData(), 'pre -> preWarmedData(), 'warm -> matchingData)
ContainerPool.schedule(matchingData.action.copy(), matchingData.invocationNamespace, pool) shouldBe Some(
'warm,
matchingData)
}
it should "not reuse a container from idle pool with non-warm containers" in {
val data = warmedData()
// data is **not** in the pool!
val pool = Map('none -> noData(), 'pre -> preWarmedData())
ContainerPool.schedule(data.action.copy(), data.invocationNamespace, pool) shouldBe None
}
it should "not reuse a warm container with different invocation namespace" in {
val data = warmedData()
val pool = Map('warm -> data)
val differentNamespace = EntityName(data.invocationNamespace.asString + "butDifferent")
data.invocationNamespace should not be differentNamespace
ContainerPool.schedule(data.action.copy(), differentNamespace, pool) shouldBe None
}
it should "not reuse a warm container with different action name" in {
val data = warmedData()
val differentAction = data.action.copy(name = EntityName(data.action.name.asString + "butDifferent"))
val pool = Map('warm -> data)
data.action.name should not be differentAction.name
ContainerPool.schedule(differentAction, data.invocationNamespace, pool) shouldBe None
}
it should "not reuse a warm container with different action version" in {
val data = warmedData()
val differentAction = data.action.copy(version = data.action.version.upMajor)
val pool = Map('warm -> data)
data.action.version should not be differentAction.version
ContainerPool.schedule(differentAction, data.invocationNamespace, pool) shouldBe None
}
behavior of "ContainerPool remove()"
it should "not provide a container if pool is empty" in {
ContainerPool.remove(createAction(), standardNamespace, Map()) shouldBe None
}
it should "not provide a container from busy pool with non-warm containers" in {
val pool = Map('none -> noData(), 'pre -> preWarmedData())
ContainerPool.remove(createAction(), standardNamespace, pool) shouldBe None
}
it should "not provide a container from pool with one single free container with the same action and namespace" in {
val data = warmedData()
val pool = Map('warm -> data)
// same data --> no removal
ContainerPool.remove(data.action, data.invocationNamespace, pool) shouldBe None
// different action, same namespace --> remove
ContainerPool.remove(createAction(data.action.name + "butDifferent"), data.invocationNamespace, pool) shouldBe Some(
'warm)
// different namespace, same action --> remove
ContainerPool.remove(data.action, differentNamespace, pool) shouldBe Some('warm)
// different everything --> remove
ContainerPool.remove(createAction(data.action.name + "butDifferent"), differentNamespace, pool) shouldBe Some('warm)
}
it should "provide oldest container from busy pool with multiple containers" in {
val commonNamespace = differentNamespace.asString
val first = warmedData(namespace = commonNamespace, lastUsed = Instant.ofEpochMilli(1))
val second = warmedData(namespace = commonNamespace, lastUsed = Instant.ofEpochMilli(2))
val oldest = warmedData(namespace = commonNamespace, lastUsed = Instant.ofEpochMilli(0))
val pool = Map('first -> first, 'second -> second, 'oldest -> oldest)
ContainerPool.remove(createAction(), standardNamespace, pool) shouldBe Some('oldest)
}
}
| duynguyen/incubator-openwhisk | tests/src/test/scala/whisk/core/containerpool/test/ContainerPoolTests.scala | Scala | apache-2.0 | 16,316 |
/**
* Copyright (c) 2002-2012 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.pipes
import org.neo4j.graphdb.{Transaction, TransactionFailureException, GraphDatabaseService}
import org.neo4j.kernel.impl.nioneo.store.{ConstraintViolationException, InvalidRecordException}
import org.neo4j.cypher.{NodeStillHasRelationshipsException, InternalException}
import org.neo4j.cypher.internal.symbols.SymbolTable
class CommitPipe(source: Pipe, graph: GraphDatabaseService) extends PipeWithSource(source) {
lazy val still_has_relationships = "Node record Node\\\\[(\\\\d),.*] still has relationships".r
def createResults(state: QueryState) = {
lazy val tx = state.transaction match {
case None => throw new InternalException("Expected to be in a transaction but wasn't")
case Some(tx : Transaction) => tx
}
try {
try {
val result = source.createResults(state).toList
tx.success()
result
} catch {
case e => {
tx.failure()
throw e
}
} finally {
tx.finish()
}
} catch {
case e: TransactionFailureException => {
var cause:Throwable = e
while(cause.getCause != null)
{
cause = cause.getCause
if(cause.isInstanceOf[ConstraintViolationException])
{
cause.getMessage match {
case still_has_relationships(id) => throw new NodeStillHasRelationshipsException(id.toLong, e)
case _ => throw e
}
}
}
throw e
}
}
}
def executionPlan() = source.executionPlan() + "\\r\\nTransactionBegin()"
// def symbols = source.symbols
def symbols = source.symbols
def dependencies = Seq()
def deps = Map()
def assertTypes(symbols: SymbolTable) {}
}
| dksaputra/community | cypher/src/main/scala/org/neo4j/cypher/internal/pipes/CommitPipe.scala | Scala | gpl-3.0 | 2,583 |
// Copyright 2014 Foursquare Labs Inc. All Rights Reserved.
package io.fsq.twofishes.indexer.scalding
import com.twitter.scalding._
import com.twitter.scalding.typed.TypedSink
import io.fsq.twofishes.gen._
import io.fsq.twofishes.indexer.util.SpindleSequenceFileSource
import io.fsq.twofishes.util._
import org.apache.hadoop.io.LongWritable
class BasePrematchedPolygonsImporterJob(
name: String,
inputSpec: TwofishesImporterInputSpec,
args: Args
) extends TwofishesImporterJob(name, inputSpec, args) {
(for {
line <- lines
if !line.startsWith("#")
parts = line.split("\\t")
if parts.size == 6
polygonId <- Helpers.TryO({ parts(0).toLong }).toList
featureIdsString <- parts(2).split(",")
featureId <- Helpers.TryO({ featureIdsString.toLong }).toList
} yield {
val matchingValue = PolygonMatchingValue.newBuilder
.polygonId(polygonId)
.featureId(featureId)
.result
(new LongWritable(featureId) -> matchingValue)
}).group
.maxBy(_.polygonIdOption.getOrElse(0L))
.write(
TypedSink[(LongWritable, PolygonMatchingValue)](
SpindleSequenceFileSource[LongWritable, PolygonMatchingValue](outputPath)
)
)
}
| foursquare/fsqio | src/jvm/io/fsq/twofishes/indexer/scalding/BasePrematchedPolygonsImporterJob.scala | Scala | apache-2.0 | 1,196 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.models.autoencoder
import java.nio.ByteBuffer
import java.nio.file.{Files, Path}
import com.intel.analytics.bigdl.dataset.ByteRecord
import scopt.OptionParser
object Utils {
val trainMean = 0.13066047740239436
val trainStd = 0.30810779333114624
case class TrainParams(
folder: String = "./",
checkpoint: Option[String] = None,
modelSnapshot: Option[String] = None,
stateSnapshot: Option[String] = None,
batchSize: Int = 150,
maxEpoch: Int = 10,
graphModel: Boolean = false
)
val trainParser = new OptionParser[TrainParams]("BigDL Autoencoder on MNIST") {
opt[String]('f', "folder")
.text("where you put the MNIST data")
.action((x, c) => c.copy(folder = x))
opt[String]("model")
.text("model snapshot location")
.action((x, c) => c.copy(modelSnapshot = Some(x)))
opt[String]("state")
.text("state snapshot location")
.action((x, c) => c.copy(stateSnapshot = Some(x)))
opt[String]("checkpoint")
.text("where to cache the model and state")
.action((x, c) => c.copy(checkpoint = Some(x)))
opt[Int]('b', "batchSize")
.text("batch size")
.action((x, c) => c.copy(batchSize = x))
opt[Int]('e', "maxEpoch")
.text("max epoch")
.action((x, c) => c.copy(maxEpoch = x))
opt[Unit]('g', "graphModel")
.text("use graph model")
.action((x, c) => c.copy(graphModel = true))
}
private[bigdl] def load(featureFile: Path, labelFile: Path): Array[ByteRecord] = {
val labelBuffer = ByteBuffer.wrap(Files.readAllBytes(labelFile))
val featureBuffer = ByteBuffer.wrap(Files.readAllBytes(featureFile))
val labelMagicNumber = labelBuffer.getInt()
require(labelMagicNumber == 2049)
val featureMagicNumber = featureBuffer.getInt()
require(featureMagicNumber == 2051)
val labelCount = labelBuffer.getInt()
val featureCount = featureBuffer.getInt()
require(labelCount == featureCount)
val rowNum = featureBuffer.getInt()
val colNum = featureBuffer.getInt()
val result = new Array[ByteRecord](featureCount)
var i = 0
while (i < featureCount) {
val img = new Array[Byte]((rowNum * colNum))
var y = 0
while (y < rowNum) {
var x = 0
while (x < colNum) {
img(x + y * colNum) = featureBuffer.get()
x += 1
}
y += 1
}
result(i) = ByteRecord(img, labelBuffer.get().toFloat + 1.0f)
i += 1
}
result
}
}
| jenniew/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/models/autoencoder/Utils.scala | Scala | apache-2.0 | 3,119 |
package net.ruippeixotog.scalascraper.dsl
import net.ruippeixotog.scalascraper.model._
/** A type class indicating that an [[net.ruippeixotog.scalascraper.model.ElementQuery]] of some
* [[net.ruippeixotog.scalascraper.model.Element]] type can be created from an object of a given type.
*
* @tparam A
* the type of the object to be made into an `ElementQuery`
*/
trait ToQuery[A] {
/** The type of the element in the `ElementQuery`.
*/
type Out <: Element
/** Creates an `ElementQuery` for an object of type `A`.
*
* @param a
* the object for which an `ElementQuery` is to be created
* @return
* an `ElementQuery` for the given object.
*/
def apply(a: A): ElementQuery[Out]
}
object ToQuery extends LowerPriorityToQuery {
type Aux[A, E <: Element] = ToQuery[A] { type Out = E }
def apply[A](implicit toQuery: ToQuery[A]): Aux[A, toQuery.Out] = toQuery
implicit def queryToQuery[E <: Element] =
new ToQuery[ElementQuery[E]] {
type Out = E
def apply(query: ElementQuery[E]) = query
}
implicit def typedElemToQuery[E <: Element.Strict[E]] =
new ToQuery[E] {
type Out = E
def apply(elem: E) = ElementQuery(elem)
}
implicit def typedDocToQuery[D <: Document, E <: Element.Strict[E]](implicit ev: D <:< Document.Typed[E]) =
new ToQuery[D] {
type Out = E
def apply(doc: D) = ElementQuery(ev(doc).root)
}
}
trait LowerPriorityToQuery {
implicit def elemToQuery[E <: Element] =
new ToQuery[E] {
type Out = Element
def apply(elem: E) = ElementQuery[Element](elem)
}
implicit def docToQuery[D <: Document] =
new ToQuery[D] {
type Out = Element
def apply(doc: D) = ElementQuery[Element](doc.root)
}
}
| ruippeixotog/scala-scraper | core/src/main/scala/net/ruippeixotog/scalascraper/dsl/ToQuery.scala | Scala | mit | 1,766 |
/*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.caviar_data
import java.io.File
import ParseCAVIAR._
import com.mongodb.casbah.MongoClient
import com.mongodb.casbah.commons.MongoDBObject
import com.mongodb.casbah.Imports._
import scala.collection.immutable.SortedMap
object ParseCAVIAR_DB_per_video {
def main(args: Array[String]) = {
val dataPath = args(0)
run(dataPath)
}
def run(path: String) = {
//val dbName = if (fixedBorders) fixedBordersDBName else originalDBName
val mongoClient = MongoClient()
val d = new File(path)
val innerFolders = d.listFiles.sortBy(_.getName.split("-")(0).toInt)
//var lastTime = 0
var videoCounter = 0
for (f <- innerFolders) {
videoCounter += 1
println(s"Parsing video ${f.getCanonicalPath}")
val files = f.listFiles.filter(x => dataFileNames.exists(p => x.getName.contains(p)))
val contents =
(for (f <- files)
yield scala.io.Source.fromFile(f).getLines().filter(p => !p.startsWith("%"))).
toList.flatten.mkString.replaceAll("\\s", "").split("\\."
).toList
val parsed = contents.flatMap(x =>
//parseAll(caviarParser(lastTime),x).getOrElse(List(""))).filter(_!="").asInstanceOf[List[Atom]]
parseAll(caviarParser(0), x).getOrElse(List(""))).filter(_ != "").asInstanceOf[List[Atom]]
val atoms = SortedMap[Int, List[Atom]]() ++ parsed.groupBy(_.time.toInt)
/*
for ( (k,v) <- atoms ) {
val narrative = v.filter(x => !x.annotationAtom).flatMap(z => z.atoms)
val annotation = v.filter(x => x.annotationAtom).flatMap(z => z.atoms)
val entry = MongoDBObject("time" -> k) ++ ("annotation" -> annotation) ++ ("narrative" -> narrative)
collection.insert(entry)
}
*/
var hasMeeting = false
var hasMoving = false
val dbEntries = atoms.foldLeft(Vector[DBObject]()) { (entries, mapRecord) =>
val (time, atoms) = (mapRecord._1, mapRecord._2)
val narrative = atoms.filter(x => !x.annotationAtom).flatMap(z => z.atoms)
val annotation = atoms.filter(x => x.annotationAtom).flatMap(z => z.atoms)
if (annotation.exists(p => p.contains("meeting"))) hasMeeting = true
if (annotation.exists(p => p.contains("moving"))) hasMoving = true
val entry = MongoDBObject("time" -> time) ++ ("annotation" -> annotation) ++ ("narrative" -> narrative)
entries :+ entry
}
val dbName =
if (hasMeeting && hasMoving) s"caviar-video-$videoCounter-meeting-moving"
else if (hasMeeting) s"caviar-video-$videoCounter-meeting"
else if (hasMoving) s"caviar-video-$videoCounter-moving"
else s"caviar-video-$videoCounter"
mongoClient.dropDatabase(dbName)
val collection = mongoClient(dbName)("examples")
println(s"Inserting data in $dbName")
dbEntries foreach (entry => collection.insert(entry))
//lastTime = atoms.keySet.toList.reverse.head+40 // the last time point
}
}
}
| nkatzz/OLED | src/main/scala/experiments/datautils/caviar_data/ParseCAVIAR_DB_per_video.scala | Scala | gpl-3.0 | 3,704 |
package code.snippet
import net.liftweb._
import net.liftweb.common.{Box,Full,Empty,Failure}
import code.model._
import code.comet._
import scala.xml._
import net.liftweb.util._
import Helpers._
import _root_.net.liftweb.http._
import _root_.net.liftweb.mapper._
import js._
import JsCmds._
import JE._
import _root_.net.liftweb.http.SHtml._
import java.util.Date
class ProfilePicChanger {
def currUser = User.currentUser
val max = 1024*1024
def render = {
def process(cu: User, fp: FileParamHolder) = {
if (fp.file == null || fp.file.length == 0 || fp.file.length > max) {
S.error("No empty or files larger than " +
(max/1024/1024).toInt.toString + " MB")
} else {
FileServicer.saveProfilePic(cu, fp)
S.notice("Thanks for the upload")
S.redirectTo("/profile")
}
}
currUser match {
case Full(cu) => {
"#profpic [src]" #> RendererHelper.getProfilePictureUrl(cu) &
"type=file" #> SHtml.fileUpload(fp => process(cu, fp))
}
case _ => "*" #> ClearNodes // don't render if error
}
}
}
| Cerovec/LiftSocial | src/main/scala/code/snippet/ProfilePicChanger.scala | Scala | apache-2.0 | 1,076 |
/*
* CoinTosses.scala
* Book example unit test.
*
* Created By: Michael Reposa (mreposa@cra.com), Avi Pfeffer (apfeffer@cra.com)
* Creation Date: Feb 26, 2016
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email figaro@cra.com for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.test.book.chap06
import com.cra.figaro.language.Universe
import com.cra.figaro.library.atomic.continuous.Beta
import com.cra.figaro.language.Flip
import com.cra.figaro.algorithm.sampling.Importance
import org.scalatest.Matchers
import org.scalatest.WordSpec
import com.cra.figaro.test.tags.BookExample
import com.cra.figaro.test.tags.NonDeterministic
object CoinTosses {
def main(args: Array[String]) {
val outcomes = "10"
val numTosses = outcomes.length
val bias = Beta(2,5)
val tosses = Array.fill(numTosses)(Flip(bias))
val nextToss = Flip(bias)
for {
toss <- 0 until numTosses
} {
val outcome = outcomes(toss) == 'H'
tosses(toss).observe(outcome)
}
val algorithm = Importance(nextToss, bias)
algorithm.start()
Thread.sleep(1000)
algorithm.stop()
println("Average bias = " + algorithm.mean(bias))
println("Probability of heads on next toss = " + algorithm.probability(nextToss, true))
algorithm.kill()
}
}
class CoinTossesTest extends WordSpec with Matchers {
Universe.createNew()
"Coin Tosses" should {
val outcomes = "10"
val numTosses = outcomes.length
val bias = Beta(2,5)
val tosses = Array.fill(numTosses)(Flip(bias))
val nextToss = Flip(bias)
for {
toss <- 0 until numTosses
} {
val outcome = outcomes(toss) == 'H'
tosses(toss).observe(outcome)
}
val algorithm = Importance(nextToss, bias)
algorithm.start()
Thread.sleep(1000)
algorithm.stop()
val avgBias = algorithm.mean(bias)
val pHeads = algorithm.probability(nextToss, true)
algorithm.kill()
"have an average bias equal to 0.222 +- 0.003" taggedAs (BookExample, NonDeterministic) in {
avgBias should be(0.222 +- 0.003)
}
"have a probability of heads on next toss equal to 0.227 +- 0.003" taggedAs (BookExample, NonDeterministic) in {
pHeads should be(0.227 +- 0.003)
}
}
}
| scottcb/figaro | Figaro/src/test/scala/com/cra/figaro/test/book/chap06/CoinTosses.scala | Scala | bsd-3-clause | 2,368 |
package org.sisioh.config
import com.typesafe.config.ConfigValue
import scala.collection.JavaConverters._
object ConfigurationValue {
def apply(core: ConfigValue): ConfigurationValue =
new ConfigurationValueImpl(core)
}
trait ConfigurationValue extends ConfigurationMergeable {
val underlying: ConfigValue
def origin: ConfigurationOrigin
def valueType: ConfigurationValueType.Value
def value: Option[Any]
def valueAsString: Option[String]
def valueAsBoolean: Option[Boolean]
def valueAsNumber: Option[Number]
def valueAsSeq: Option[Seq[Any]]
def valueAsMap: Option[Map[String, Any]]
def atPath(path: String): Configuration
def atKey(key: String): Configuration
def withFallback(other: ConfigurationMergeable): ConfigurationValue
}
private[config] case class ConfigurationValueImpl(underlying: ConfigValue)
extends ConfigurationValue {
def origin = ConfigurationOrigin(underlying.origin())
def valueType = ConfigurationValueType(underlying.valueType())
def value: Option[Any] = {
underlying.unwrapped() match {
case null => None
case v: java.util.Map[_, _] => Some(v.asScala)
case v: java.util.List[_] => Some(v.asScala.toSeq)
case v => Some(v)
}
}
def valueAsString: Option[String] = {
valueType match {
case ConfigurationValueType.String =>
value.map(_.asInstanceOf[String])
case _ =>
None
}
}
def valueAsBoolean: Option[Boolean] = {
valueType match {
case ConfigurationValueType.Boolean =>
value.map(_.asInstanceOf[Boolean])
case _ =>
None
}
}
def valueAsNumber: Option[Number] = {
valueType match {
case ConfigurationValueType.Number =>
value.map(_.asInstanceOf[Number])
case _ =>
None
}
}
def valueAsSeq: Option[Seq[Any]] = {
valueType match {
case ConfigurationValueType.List =>
value.map(_.asInstanceOf[Seq[_]])
case _ =>
None
}
}
def valueAsMap: Option[Map[String, Any]] = {
valueType match {
case ConfigurationValueType.Object =>
value.map(_.asInstanceOf[Map[String, _]])
case _ =>
None
}
}
def withFallback(other: ConfigurationMergeable) =
ConfigurationValue(underlying.withFallback(other.underlying))
def atPath(path: String) = Configuration(underlying.atPath(path))
def atKey(key: String) = Configuration(underlying.atKey(key))
}
| sisioh/sisioh-config | config/src/main/scala/org/sisioh/config/ConfigurationValue.scala | Scala | apache-2.0 | 2,498 |
package com.divisiblebyzero.ada.common
import org.apache.log4j.Logger
trait Logging {
val loggerName = this.getClass.getName
lazy val logger = Logger.getLogger(loggerName)
def trace(message: String) { logger.trace(message) }
def trace(message: String, e: Throwable) { logger.trace(message, e) }
def debug(message: String) { logger.debug(message) }
def debug(message: String, e: Throwable) { logger.debug(message, e) }
def info(message: String) { logger.info(message) }
def info(message: String, e: Throwable) { logger.info(message, e) }
def warn(message: String) { logger.warn(message) }
def warn(message: String, e: Throwable) { logger.warn(message, e) }
def error(message: String) { logger.error(message) }
def error(message: String, e: Throwable) { logger.error(message, e) }
def fatal(message: String) { logger.fatal(message) }
def fatal(message: String, e: Throwable) { logger.fatal(message, e) }
}
| eczarny/ada | src/main/scala/com/divisiblebyzero/ada/common/Logging.scala | Scala | mit | 940 |
package com.twitter.finagle.client
import com.twitter.finagle._
import com.twitter.finagle.context
import com.twitter.finagle.context.Contexts
import com.twitter.finagle.factory.{
BindingFactory, RefcountedFactory, StatsFactoryWrapper, TimeoutFactory}
import com.twitter.finagle.filter._
import com.twitter.finagle.loadbalancer.LoadBalancerFactory
import com.twitter.finagle.param._
import com.twitter.finagle.server.ServerInfo
import com.twitter.finagle.service._
import com.twitter.finagle.stack.Endpoint
import com.twitter.finagle.stack.nilStack
import com.twitter.finagle.stats.{LoadedHostStatsReceiver, ClientStatsReceiver}
import com.twitter.finagle.tracing._
import com.twitter.finagle.transport.Transport
import com.twitter.finagle.util.Showable
import com.twitter.util.Future
import com.twitter.util.registry.GlobalRegistry
object StackClient {
/**
* Canonical Roles for each Client-related Stack modules.
*/
object Role extends Stack.Role("StackClient") {
val pool = Stack.Role("Pool")
val requestDraining = Stack.Role("RequestDraining")
val prepFactory = Stack.Role("PrepFactory")
/** PrepConn is special in that it's the first role before the `Endpoint` role */
val prepConn = Stack.Role("PrepConn")
val protoTracing = Stack.Role("protoTracing")
}
/**
* For feature roll out only.
*/
private val EnableNackACID: String = "com.twitter.finagle.core.UseClientNackAdmissionFilter"
private def enableNackAC: Boolean = CoreToggles(EnableNackACID)(ServerInfo().id.hashCode)
/**
* A [[com.twitter.finagle.Stack]] representing an endpoint.
* Note that this is terminated by a [[com.twitter.finagle.service.FailingFactory]]:
* users are expected to terminate it with a concrete service factory.
*
* @see [[com.twitter.finagle.tracing.WireTracingFilter]]
* @see [[com.twitter.finagle.service.ExpiringService]]
* @see [[com.twitter.finagle.service.FailFastFactory]]
* @see [[com.twitter.finagle.service.PendingRequestFilter]]
* @see [[com.twitter.finagle.client.DefaultPool]]
* @see [[com.twitter.finagle.service.TimeoutFilter]]
* @see [[com.twitter.finagle.service.FailureAccrualFactory]]
* @see [[com.twitter.finagle.service.StatsServiceFactory]]
* @see [[com.twitter.finagle.service.StatsFilter]]
* @see [[com.twitter.finagle.filter.DtabStatsFilter]]
* @see [[com.twitter.finagle.tracing.ClientDestTracingFilter]]
* @see [[com.twitter.finagle.filter.MonitorFilter]]
* @see [[com.twitter.finagle.filter.ExceptionSourceFilter]]
* @see [[com.twitter.finagle.client.LatencyCompensation]]
*/
def endpointStack[Req, Rep]: Stack[ServiceFactory[Req, Rep]] = {
// Ensure that we have performed global initialization.
com.twitter.finagle.Init()
/**
* N.B. see the note in `newStack` regarding up / down orientation in the stack.
*/
val stk = new StackBuilder[ServiceFactory[Req, Rep]](nilStack[Req, Rep])
/**
* `prepConn` is the bottom of the stack by definition. This position represents
* the first module to handle newly connected [[Transport]]s and dispatchers.
*
* finagle-thrift uses this role to install session upgrading logic from
* vanilla Thrift to Twitter Thrift.
*/
stk.push(Role.prepConn, identity[ServiceFactory[Req, Rep]](_))
/**
* `WriteTracingFilter` annotates traced requests. Annotations are timestamped
* so this should be low in the stack to accurately delineate between wire time
* and handling time.
*/
stk.push(WireTracingFilter.module)
/**
* `ExpiringService` enforces an idle timeout and total ttl for connections.
* This module must be beneath the DefaultPool in order to apply per connection.
*
* N.B. the difference between this connection ttl and the `DefaultPool` ttl
* (via CachingPool) is that this applies to *all* connections and `DefaultPool`
* only expires connections above the low watermark.
*/
stk.push(ExpiringService.client)
/**
* `FailFastFactory` accumulates failures per connection, marking the endpoint
* as unavailable so that modules higher in the stack can dispatch requests
* around the failing endpoint.
*/
stk.push(FailFastFactory.module)
/**
* `PendingRequestFilter` enforces a limit on the number of pending requests
* for a single connection. It must be beneath the `DefaultPool` module so that
* its limits are applied per connection rather than per endpoint.
*/
stk.push(PendingRequestFilter.module)
/**
* `DefaultPool` configures connection pooling. Like the `LoadBalancerFactory`
* module it is a potentially aggregate [[ServiceFactory]] composed of multiple
* [[Service Services]] which represent a distinct session to the same endpoint.
*/
stk.push(DefaultPool.module)
/**
* `TimeoutFilter` enforces static request timeouts and broadcast request deadlines,
* sending a best-effort interrupt for expired requests.
* It must be beneath the `StatsFilter` so that timeouts are properly recorded.
*/
stk.push(TimeoutFilter.clientModule)
/**
* `FailureAccrualFactory` accrues request failures per endpoint updating its
* status so that modules higher in the stack may route around an unhealthy
* endpoint.
*
* It must be above `DefaultPool` to accumulate failures across all sessions
* to an endpoint.
* It must be above `TimeoutFilter` so that it can observe request timeouts.
* It must be above `PendingRequestFilter` so that it can observe client
* admission rejections.
*/
stk.push(FailureAccrualFactory.module)
/**
* `ExceptionRemoteInfoFactory` fills in remote info (upstream addr/client id,
* downstream addr/client id, and trace id) in exceptions. This needs to be near the top
* of the stack so that failures anywhere lower in the stack have remote
* info added to them, but below the stats, tracing, and monitor filters so these filters
* see exceptions with remote info added.
*/
stk.push(ExceptionRemoteInfoFactory.module)
/**
* `StatsServiceFactory` exports a gauge which reports the status of the stack
* beneath it. It must be above `FailureAccrualFactory` in order to record
* failure accrual's aggregate view of health over multiple requests.
*/
stk.push(StatsServiceFactory.module)
/**
* `StatsFilter` installs a (wait for it...) stats filter on active sessions.
* It must be above the `TimeoutFilter` so that it can record timeouts as failures.
* It has no other position constraint.
*/
stk.push(StatsFilter.module)
/**
* `DtabStatsFilter` exports dtab stats. It has no relative position constraints
* within the endpoint stack.
*/
stk.push(DtabStatsFilter.module)
/**
* `ClientDestTracingFilter` annotates the trace with the destination endpoint's
* socket address. It has no position constraints within the endpoint stack.
*/
stk.push(ClientDestTracingFilter.module)
/**
* `MonitorFilter` installs a configurable exception handler ([[Monitor]]) for
* client sessions. There is no specific position constraint but higher in the
* stack is preferable so it can wrap more application logic.
*/
stk.push(MonitorFilter.clientModule)
/**
* `ExceptionSourceFilter` is the exception handler of last resort. It recovers
* application errors into failed [[Future Futures]] and attributes the failures to
* clients by client label. This needs to be at the top of the endpoint stack so that
* failures anywhere lower in the stack have endpoints attributed to them.
*/
stk.push(ExceptionSourceFilter.module)
/**
* `LatencyCompensation` configures latency compensation based on destination.
*
* It must appear above consumers of the the c.t.f.client.Compensation param, so
* above `TimeoutFilter`.
*
* It is only evaluated at stack creation time.
*/
stk.push(LatencyCompensation.module)
stk.result
}
/**
* Creates a default finagle client [[com.twitter.finagle.Stack]].
* The stack can be configured via [[com.twitter.finagle.Stack.Param]]'s
* in the finagle package object ([[com.twitter.finagle.param]]) and specific
* params defined in the companion objects of the respective modules.
*
* @see [[com.twitter.finagle.client.StackClient#endpointStack]]
* @see [[com.twitter.finagle.loadbalancer.LoadBalancerFactory]]
* @see [[com.twitter.finagle.factory.StatsFactoryWrapper]]
* @see [[com.twitter.finagle.client.StatsScoping]]
* @see [[com.twitter.finagle.client.AddrMetadataExtraction]]
* @see [[com.twitter.finagle.factory.BindingFactory]]
* @see [[com.twitter.finagle.factory.RefcountedFactory]]
* @see [[com.twitter.finagle.factory.TimeoutFactory]]
* @see [[com.twitter.finagle.FactoryToService]]
* @see [[com.twitter.finagle.service.Retries]]
* @see [[com.twitter.finagle.tracing.ClientTracingFilter]]
* @see [[com.twitter.finagle.tracing.TraceInitializerFilter]]
*/
def newStack[Req, Rep]: Stack[ServiceFactory[Req, Rep]] = {
/*
* NB on orientation: we here speak of "up" / "down" or "above" /
* "below" in terms of a request's traversal of the stack---a
* request starts at the top and goes down, a response returns
* back up. This is opposite to how modules are written on the
* page; a request starts at the bottom of the `newStack` method
* and goes up.
*/
val stk = new StackBuilder(endpointStack[Req, Rep])
/*
* These modules balance requests across cluster endpoints and
* handle automatic requeuing of failed requests.
*
* * `LoadBalancerFactory` balances requests across the endpoints
* of a cluster given by the `LoadBalancerFactory.Dest`
* param. It must appear above the endpoint stack, and below
* `BindingFactory` in order to satisfy the
* `LoadBalancerFactory.Dest` param.
*
* * `StatsFactoryWrapper` tracks the service acquisition latency
* metric. It must appear above `LoadBalancerFactory` in order
* to track service acquisition from the load balancer, and
* below `FactoryToService` so that it is called on each
* service acquisition.
*
* * `Role.requestDraining` ensures that a service is not closed
* until all outstanding requests on it have completed. It must
* appear below `FactoryToService` so that services are not
* prematurely closed by `FactoryToService`. (However it is
* only effective for services which are called multiple times,
* which is never the case when `FactoryToService` is enabled.)
*
* * `TimeoutFactory` times out service acquisition from
* `LoadBalancerFactory`. It must appear above
* `LoadBalancerFactory` in order to time out service
* acquisition from the load balancer, and below
* `FactoryToService` so that it is called on each service
* acquisition.
*
* * `Role.prepFactory` is a hook used to inject codec-specific
* behavior; it is used in the HTTP codec to avoid closing a
* service while a chunked response is being read. It must
* appear below `FactoryToService` so that services are not
* prematurely closed by `FactoryToService`.
*
* * `FactoryToService` acquires a new endpoint service from the
* load balancer on each request (and closes it after the
* response completes).
*
* * `NackAdmissionFilter` probabilistically drops requests if the client
* is receiving a large fraction of nack responses. This indicates an
* overload situation. Since this filter should operate on all requests
* sent over the wire, it must be below `Retries`. Since it
* aggregates the status of the entire cluster, it must be above the
* `LoadBalancerFactory`.
*
* * `Retries` retries `RetryPolicy.RetryableWriteException`s
* automatically. It must appear above `FactoryToService` so
* that service acquisition failures are retried.
*
* * `ClearContextValueFilter` clears the configured Context key,
* `Retries`, in the request's Context. This module must
* come before `Retries` so that it doesn't clear the `Retries`
* set by this client. `Retries` is only meant to be propagated
* one hop from the client to the server. The client overwrites `Retries`
* in the `RequeueFilter` with its own value; however, if the client
* has removed `Retries` in its configuration, we want `Retries`
* to be cleared so the server doesn't see a value set by another client.
*/
stk.push(LoadBalancerFactory.module)
stk.push(StatsFactoryWrapper.module)
stk.push(Role.requestDraining, (fac: ServiceFactory[Req, Rep]) =>
new RefcountedFactory(fac))
stk.push(TimeoutFactory.module)
stk.push(Role.prepFactory, identity[ServiceFactory[Req, Rep]](_))
stk.push(FactoryToService.module)
if (enableNackAC) stk.push(NackAdmissionFilter.module) // Rollout Only
stk.push(Retries.moduleRequeueable)
stk.push(ClearContextValueFilter.module(context.Retries))
/*
* These modules deal with name resolution and request
* distribution (when a name resolves to a `Union` of clusters).
*
* * `StatsScoping` modifies the `Stats` param based on the
* `AddrMetadata` and `Scoper` params; it permits stats further
* down the stack to be scoped according to the destination
* cluster. It must appear below `AddrMetadataExtraction` to
* satisfy the `AddrMetadata` param, and above
* `RequeuingFilter` (and everything below it) which must have
* stats scoped to the destination cluster.
*
* * `AddrMetadataExtraction` extracts `Addr.Metadata` from the
* `LoadBalancerFactory.Dest` param and puts it in the
* `AddrMetadata` param. (Arguably this should happen directly
* in `BindingFactory`.) It must appear below `BindingFactory`
* to satisfy the `LoadBalanceFactory.Dest param`, and above
* `StatsScoping` to provide the `AddrMetadata` param.
*
* * `EndpointRecorder` passes endpoint information to the
* `EndpointRegistry`. It must appear below `BindingFactory` so
* `BindingFactory` can set the `Name.Bound` `BindingFactory.Dest`
* param.
*
* * `BindingFactory` resolves the destination `Name` into a
* `NameTree`, and distributes requests to destination clusters
* according to the resolved `NameTree`. Cluster endpoints are
* passed down in the `LoadBalancerFactory.Dest` param. It must
* appear above 'AddrMetadataExtraction' and
* `LoadBalancerFactory` to provide the
* `LoadBalancerFactory.Dest` param.
*
* * `TimeoutFactory` times out name resolution, which happens in
* the service acquisition phase in `BindingFactory`; once the
* name is resolved, a service is acquired as soon as
* processing hits the `FactoryToService` further down the
* stack. It must appear above `BindingFactory` in order to
* time out name resolution, and below `FactoryToService` so
* that it is called on each service acquisition.
*
* * `FactoryToService` acquires a new service on each request
* (and closes it after the response completes). This has three
* purposes: first, so that the per-request `Dtab.local` may be
* taken into account in name resolution; second, so that each
* request is distributed across the `NameTree`; and third, so
* that name resolution and request distribution are included
* in the request trace span. (Both name resolution and request
* distribution are performed in the service acquisition
* phase.) It must appear above `BindingFactory` and below
* tracing setup.
*/
stk.push(StatsScoping.module)
stk.push(AddrMetadataExtraction.module)
stk.push(EndpointRecorder.module)
stk.push(BindingFactory.module)
stk.push(TimeoutFactory.module)
stk.push(FactoryToService.module)
/*
* These modules set up tracing for the request span:
*
* * `Role.protoTracing` is a hook for protocol-specific tracing
*
* * `ClientTracingFilter` traces request send / receive
* events. It must appear above all other modules except
* `TraceInitializerFilter` so it delimits all tracing in the
* course of a request.
*
* * `TraceInitializerFilter` allocates a new trace span per
* request. It must appear above all other modules so the
* request span encompasses all tracing in the course of a
* request.
*/
stk.push(Role.protoTracing, identity[ServiceFactory[Req, Rep]](_))
stk.push(Failure.module)
stk.push(ClientTracingFilter.module)
stk.push(TraceInitializerFilter.clientModule)
stk.push(RegistryEntryLifecycle.module)
stk.result
}
/**
* The default params used for client stacks.
*/
val defaultParams: Stack.Params =
Stack.Params.empty +
Stats(ClientStatsReceiver) +
LoadBalancerFactory.HostStats(LoadedHostStatsReceiver)
}
/**
* A [[com.twitter.finagle.Client Client]] that may have its
* [[com.twitter.finagle.Stack Stack]] transformed.
*
* A `StackBasedClient` is weaker than a `StackClient` in that the
* specific `Req`, `Rep` types of its stack are not exposed.
*/
trait StackBasedClient[Req, Rep] extends Client[Req, Rep]
with Stack.Parameterized[StackBasedClient[Req, Rep]]
with Stack.Transformable[StackBasedClient[Req, Rep]]
/**
* A [[com.twitter.finagle.Client Client]] that composes a
* [[com.twitter.finagle.Stack Stack]].
*/
trait StackClient[Req, Rep] extends StackBasedClient[Req, Rep]
with Stack.Parameterized[StackClient[Req, Rep]]
with Stack.Transformable[StackClient[Req, Rep]] {
/** The current stack. */
def stack: Stack[ServiceFactory[Req, Rep]]
/** The current parameter map. */
def params: Stack.Params
/** A new StackClient with the provided stack. */
def withStack(stack: Stack[ServiceFactory[Req, Rep]]): StackClient[Req, Rep]
def transformed(t: Stack.Transformer): StackClient[Req, Rep] =
withStack(t(stack))
// these are necessary to have the right types from Java
def withParams(ps: Stack.Params): StackClient[Req, Rep]
def configured[P: Stack.Param](p: P): StackClient[Req, Rep]
def configured[P](psp: (P, Stack.Param[P])): StackClient[Req, Rep]
def configuredParams(params: Stack.Params): StackClient[Req, Rep]
}
/**
* The standard template implementation for
* [[com.twitter.finagle.client.StackClient]].
*
* @see The [[http://twitter.github.io/finagle/guide/Clients.html user guide]]
* for further details on Finagle clients and their configuration.
* @see [[StackClient.newStack]] for the default modules used by Finagle
* clients.
*/
trait StdStackClient[Req, Rep, This <: StdStackClient[Req, Rep, This]]
extends StackClient[Req, Rep]
with Stack.Parameterized[This]
with CommonParams[This]
with ClientParams[This]
with WithClientAdmissionControl[This]
with WithClientTransport[This]
with WithClientSession[This]
with WithSessionQualifier[This] { self =>
/**
* The type we write into the transport.
*/
protected type In
/**
* The type we read out of the transport.
*/
protected type Out
/**
* Defines a typed [[com.twitter.finagle.client.Transporter]] for this client.
* Concrete StackClient implementations are expected to specify this.
*/
protected def newTransporter(): Transporter[In, Out]
/**
* Defines a dispatcher, a function which reconciles the stream based
* `Transport` with a Request/Response oriented `Service`.
* Together with a `Transporter`, it forms the foundation of a
* finagle client. Concrete implementations are expected to specify this.
*
* @see [[com.twitter.finagle.dispatch.GenSerialServerDispatcher]]
*/
protected def newDispatcher(transport: Transport[In, Out]): Service[Req, Rep]
def withStack(stack: Stack[ServiceFactory[Req, Rep]]): This =
copy1(stack = stack)
/**
* Creates a new StackClient with `f` applied to `stack`.
*
* For expert users only.
*/
def transformed(f: Stack[ServiceFactory[Req, Rep]] => Stack[ServiceFactory[Req, Rep]]): This =
copy1(stack = f(stack))
/**
* Creates a new StackClient with parameter `p`.
*/
override def configured[P: Stack.Param](p: P): This =
withParams(params + p)
/**
* Creates a new StackClient with parameter `psp._1` and Stack Param type `psp._2`.
*/
override def configured[P](psp: (P, Stack.Param[P])): This = {
val (p, sp) = psp
configured(p)(sp)
}
/**
* Creates a new StackClient with additional parameters `newParams`.
*/
override def configuredParams(newParams: Stack.Params): This = {
withParams(params ++ newParams)
}
/**
* Creates a new StackClient with `params` used to configure this StackClient's `stack`.
*/
def withParams(params: Stack.Params): This =
copy1(params = params)
/**
* Prepends `filter` to the top of the client. That is, after materializing
* the client (newClient/newService) `filter` will be the first element which
* requests flow through. This is a familiar chaining combinator for filters and
* is particularly useful for `StdStackClient` implementations that don't expose
* services but instead wrap the resulting service with a rich API.
*/
def filtered(filter: Filter[Req, Rep, Req, Rep]): This = {
val role = Stack.Role(filter.getClass.getSimpleName)
val stackable = Filter.canStackFromFac.toStackable(role, filter)
withStack(stackable +: stack)
}
/**
* A copy constructor in lieu of defining StackClient as a
* case class.
*/
protected def copy1(
stack: Stack[ServiceFactory[Req, Rep]] = this.stack,
params: Stack.Params = this.params): This { type In = self.In; type Out = self.Out }
/**
* A stackable module that creates new `Transports` (via transporter)
* when applied.
*/
protected def endpointer: Stackable[ServiceFactory[Req, Rep]] =
new Stack.Module[ServiceFactory[Req, Rep]] {
val role = Endpoint
val description = "Send requests over the wire"
val parameters = Seq(implicitly[Stack.Param[Transporter.EndpointAddr]])
def make(prms: Stack.Params, next: Stack[ServiceFactory[Req, Rep]]) = {
val Transporter.EndpointAddr(addr) = prms[Transporter.EndpointAddr]
val factory = addr match {
case com.twitter.finagle.exp.Address.ServiceFactory(sf: ServiceFactory[Req, Rep], _) => sf
case Address.Failed(e) => new FailingFactory[Req, Rep](e)
case Address.Inet(ia, _) =>
val endpointClient = copy1(params=prms)
val transporter = endpointClient.newTransporter()
// Export info about the transporter type so that we can query info
// about its implementation at runtime. This assumes that the `toString`
// of the implementation is sufficiently descriptive.
val transporterImplKey = Seq(
ClientRegistry.registryName,
endpointClient.params[ProtocolLibrary].name,
endpointClient.params[Label].label,
"Transporter")
GlobalRegistry.get.put(transporterImplKey, transporter.toString)
val mkFutureSvc: () => Future[Service[Req, Rep]] =
() => transporter(ia).map { trans =>
// we do not want to capture and request specific Locals
// that would live for the life of the session.
Contexts.letClearAll {
endpointClient.newDispatcher(trans)
}
}
ServiceFactory(mkFutureSvc)
}
Stack.Leaf(this, factory)
}
}
/**
* @inheritdoc
*
* @param label0 if an empty String is provided, then the label
* from the [[Label]] [[Stack.Params]] is used.
* If that is also an empty String, then `dest` is used.
*/
def newClient(dest: Name, label0: String): ServiceFactory[Req, Rep] = {
val Stats(stats) = params[Stats]
val Label(label1) = params[Label]
// For historical reasons, we have two sources for identifying
// a client. The most recently set `label0` takes precedence.
val clientLabel = (label0, label1) match {
case (Label.Default, Label.Default) => Showable.show(dest)
case (Label.Default, l1) => l1
case _ => label0
}
val clientStack = stack ++ (endpointer +: nilStack)
val clientParams = params +
Label(clientLabel) +
Stats(stats.scope(clientLabel)) +
BindingFactory.Dest(dest)
clientStack.make(clientParams)
}
override def newService(dest: Name, label: String): Service[Req, Rep] = {
val client = copy1(
params = params + FactoryToService.Enabled(true)
).newClient(dest, label)
new FactoryToService[Req, Rep](client)
}
}
| spockz/finagle | finagle-core/src/main/scala/com/twitter/finagle/client/StackClient.scala | Scala | apache-2.0 | 25,353 |
package slick.memory
import scala.language.{implicitConversions, existentials}
import scala.collection.mutable.Builder
import scala.reflect.ClassTag
import scala.util.control.NonFatal
import slick.dbio._
import slick.ast._
import TypeUtil._
import slick.compiler._
import slick.profile._
import slick.relational.{ResultConverterCompiler, ResultConverter, CompiledMapping}
import slick.util.{DumpInfo, ??}
/** A profile and driver for interpreted queries on top of the in-memory database. */
trait MemoryProfile extends RelationalProfile with MemoryQueryingProfile { driver: MemoryDriver =>
type SchemaDescription = SchemaDescriptionDef
type InsertInvoker[T] = InsertInvokerDef[T]
type Backend = HeapBackend
val backend: Backend = HeapBackend
val api: API = new API {}
lazy val queryCompiler = compiler + new MemoryCodeGen
lazy val updateCompiler = compiler
lazy val deleteCompiler = compiler
lazy val insertCompiler = QueryCompiler(Phase.assignUniqueSymbols, Phase.inferTypes, new InsertCompiler(InsertCompiler.NonAutoInc), new MemoryInsertCodeGen)
override protected def computeCapabilities = super.computeCapabilities ++ MemoryProfile.capabilities.all
def createInsertInvoker[T](tree: Node): InsertInvoker[T] = new InsertInvokerDef[T](tree)
def buildSequenceSchemaDescription(seq: Sequence[_]): SchemaDescription = ??
def buildTableSchemaDescription(table: Table[_]): SchemaDescription = new DDL(Vector(table))
type QueryActionExtensionMethods[R, S <: NoStream] = QueryActionExtensionMethodsImpl[R, S]
type StreamingQueryActionExtensionMethods[R, T] = StreamingQueryActionExtensionMethodsImpl[R, T]
type SchemaActionExtensionMethods = SchemaActionExtensionMethodsImpl
type InsertActionExtensionMethods[T] = InsertActionExtensionMethodsImpl[T]
def createQueryActionExtensionMethods[R, S <: NoStream](tree: Node, param: Any): QueryActionExtensionMethods[R, S] =
new QueryActionExtensionMethods[R, S](tree, param)
def createStreamingQueryActionExtensionMethods[R, T](tree: Node, param: Any): StreamingQueryActionExtensionMethods[R, T] =
new StreamingQueryActionExtensionMethods[R, T](tree, param)
def createSchemaActionExtensionMethods(schema: SchemaDescription): SchemaActionExtensionMethods =
new SchemaActionExtensionMethodsImpl(schema)
def createInsertActionExtensionMethods[T](compiled: CompiledInsert): InsertActionExtensionMethods[T] =
new InsertActionExtensionMethodsImpl[T](compiled)
lazy val MappedColumnType = new MappedColumnTypeFactory
class MappedColumnTypeFactory extends super.MappedColumnTypeFactory {
def base[T : ClassTag, U : BaseColumnType](tmap: T => U, tcomap: U => T): BaseColumnType[T] = {
assertNonNullType(implicitly[BaseColumnType[U]])
new MappedColumnType(implicitly[BaseColumnType[U]], tmap, tcomap)
}
}
class MappedColumnType[T, U](val baseType: ColumnType[U], toBase: T => U, toMapped: U => T)(implicit val classTag: ClassTag[T]) extends ScalaType[T] with BaseTypedType[T] {
def nullable: Boolean = baseType.nullable
def ordered: Boolean = baseType.ordered
def scalaOrderingFor(ord: Ordering): scala.math.Ordering[T] = new scala.math.Ordering[T] {
val uOrdering = baseType.scalaOrderingFor(ord)
def compare(x: T, y: T): Int = uOrdering.compare(toBase(x), toBase(y))
}
}
trait API extends super[RelationalProfile].API with super[MemoryQueryingProfile].API {
type SimpleDBIO[+R] = SimpleMemoryAction[R]
val SimpleDBIO = SimpleMemoryAction
}
protected def createInterpreter(db: Backend#Database, param: Any): QueryInterpreter = new QueryInterpreter(db, param) {
override def run(n: Node) = n match {
case ResultSetMapping(_, from, CompiledMapping(converter, _)) :@ CollectionType(cons, el) =>
val fromV = run(from).asInstanceOf[TraversableOnce[Any]]
val b = cons.createBuilder(el.classTag).asInstanceOf[Builder[Any, Any]]
b ++= fromV.map(v => converter.asInstanceOf[ResultConverter[MemoryResultConverterDomain, _]].read(v.asInstanceOf[QueryInterpreter.ProductValue]))
b.result()
case n => super.run(n)
}
}
def runSynchronousQuery[R](tree: Node, param: Any)(implicit session: Backend#Session): R =
createInterpreter(session.database, param).run(tree).asInstanceOf[R]
class InsertInvokerDef[T](tree: Node) {
protected[this] val ResultSetMapping(_, Insert(_, table: TableNode, _), CompiledMapping(converter, _)) = tree
type SingleInsertResult = Unit
type MultiInsertResult = Unit
def += (value: T)(implicit session: Backend#Session) {
val htable = session.database.getTable(table.tableName)
val buf = htable.createInsertRow
converter.asInstanceOf[ResultConverter[MemoryResultConverterDomain, Any]].set(value, buf)
htable.append(buf)
}
def ++= (values: Iterable[T])(implicit session: Backend#Session): Unit =
values.foreach(this += _)
}
class DDL(val tables: Vector[Table[_]]) extends SchemaDescriptionDef {
def ++(other: SchemaDescription): SchemaDescription =
new DDL(tables ++ other.asInstanceOf[DDL].tables)
}
type DriverAction[+R, +S <: NoStream, -E <: Effect] = FixedBasicAction[R, S, E]
type StreamingDriverAction[+R, +T, -E <: Effect] = FixedBasicStreamingAction[R, T, E]
protected[this] def dbAction[R, S <: NoStream, E <: Effect](f: Backend#Session => R): DriverAction[R, S, E] = new DriverAction[R, S, E] with SynchronousDatabaseAction[R, S, Backend#This, E] {
def run(ctx: Backend#Context): R = f(ctx.session)
def getDumpInfo = DumpInfo("MemoryProfile.DriverAction")
}
class StreamingQueryAction[R, T](tree: Node, param: Any) extends StreamingDriverAction[R, T, Effect.Read] with SynchronousDatabaseAction[R, Streaming[T], Backend#This, Effect.Read] {
type StreamState = Iterator[T]
protected[this] def getIterator(ctx: Backend#Context): Iterator[T] = {
val inter = createInterpreter(ctx.session.database, param)
val ResultSetMapping(_, from, CompiledMapping(converter, _)) = tree
val pvit = inter.run(from).asInstanceOf[TraversableOnce[QueryInterpreter.ProductValue]].toIterator
pvit.map(converter.asInstanceOf[ResultConverter[MemoryResultConverterDomain, T]].read _)
}
def run(ctx: Backend#Context): R =
createInterpreter(ctx.session.database, param).run(tree).asInstanceOf[R]
override def emitStream(ctx: Backend#StreamingContext, limit: Long, state: StreamState): StreamState = {
val it = if(state ne null) state else getIterator(ctx)
var count = 0L
while(count < limit && it.hasNext) {
count += 1
ctx.emit(it.next)
}
if(it.hasNext) it else null
}
def head: DriverAction[T, NoStream, Effect.Read] = new DriverAction[T, NoStream, Effect.Read] with SynchronousDatabaseAction[T, NoStream, Backend#This, Effect.Read] {
def run(ctx: Backend#Context): T = getIterator(ctx).next
def getDumpInfo = DumpInfo("MemoryProfile.StreamingQueryAction.first")
}
def headOption: DriverAction[Option[T], NoStream, Effect.Read] = new DriverAction[Option[T], NoStream, Effect.Read] with SynchronousDatabaseAction[Option[T], NoStream, Backend#This, Effect.Read] {
def run(ctx: Backend#Context): Option[T] = {
val it = getIterator(ctx)
if(it.hasNext) Some(it.next) else None
}
def getDumpInfo = DumpInfo("MemoryProfile.StreamingQueryAction.firstOption")
}
def getDumpInfo = DumpInfo("MemoryProfile.StreamingQueryAction")
}
class QueryActionExtensionMethodsImpl[R, S <: NoStream](tree: Node, param: Any) extends super.QueryActionExtensionMethodsImpl[R, S] {
def result: DriverAction[R, S, Effect.Read] =
new StreamingQueryAction[R, Nothing](tree, param).asInstanceOf[DriverAction[R, S, Effect.Read]]
}
class StreamingQueryActionExtensionMethodsImpl[R, T](tree: Node, param: Any) extends QueryActionExtensionMethodsImpl[R, Streaming[T]](tree, param) with super.StreamingQueryActionExtensionMethodsImpl[R, T] {
override def result: StreamingDriverAction[R, T, Effect.Read] = super.result.asInstanceOf[StreamingDriverAction[R, T, Effect.Read]]
}
class SchemaActionExtensionMethodsImpl(schema: SchemaDescription) extends super.SchemaActionExtensionMethodsImpl {
protected[this] val tables = schema.asInstanceOf[DDL].tables
def create = dbAction { session =>
tables.foreach(t =>
session.database.createTable(t.tableName,
t.create_*.map { fs => new HeapBackend.Column(fs, typeInfoFor(fs.tpe)) }.toIndexedSeq,
t.indexes.toIndexedSeq, t.tableConstraints.toIndexedSeq)
)
}
def drop = dbAction { session =>
tables.foreach(t => session.database.dropTable(t.tableName))
}
}
class InsertActionExtensionMethodsImpl[T](compiled: CompiledInsert) extends super.InsertActionExtensionMethodsImpl[T] {
protected[this] val inv = createInsertInvoker[T](compiled)
type SingleInsertResult = Unit
type MultiInsertResult = Unit
def += (value: T) = dbAction(inv.+=(value)(_))
def ++= (values: Iterable[T]) = dbAction(inv.++=(values)(_))
}
}
object MemoryProfile {
object capabilities {
/** Supports all MemoryProfile features which do not have separate capability values */
val other = Capability("memory.other")
/** All MemoryProfile capabilities */
val all = Set(other)
}
}
trait MemoryDriver extends RelationalDriver with MemoryQueryingDriver with MemoryProfile { driver =>
override val profile: MemoryProfile = this
override def computeQueryCompiler = super.computeQueryCompiler ++ QueryCompiler.interpreterPhases
class InsertMappingCompiler(insert: Insert) extends ResultConverterCompiler[MemoryResultConverterDomain] {
val Insert(_, table: TableNode, ProductNode(cols)) = insert
val tableColumnIdxs = table.driverTable.asInstanceOf[Table[_]].create_*.zipWithIndex.toMap
def createColumnConverter(n: Node, idx: Int, column: Option[FieldSymbol]): ResultConverter[MemoryResultConverterDomain, _] =
new InsertResultConverter(tableColumnIdxs(column.get))
class InsertResultConverter(tidx: Int) extends ResultConverter[MemoryResultConverterDomain, Any] {
def read(pr: MemoryResultConverterDomain#Reader) = ??
def update(value: Any, pr: MemoryResultConverterDomain#Updater) = ??
def set(value: Any, pp: MemoryResultConverterDomain#Writer) = pp(tidx) = value
override def getDumpInfo = super.getDumpInfo.copy(mainInfo = s"tidx=$tidx")
def width = 1
}
}
class MemoryInsertCodeGen extends CodeGen {
def compileServerSideAndMapping(serverSide: Node, mapping: Option[Node], state: CompilerState) =
(serverSide, mapping.map(new InsertMappingCompiler(serverSide.asInstanceOf[Insert]).compileMapping))
}
}
object MemoryDriver extends MemoryDriver
/** A non-streaming Action that wraps a synchronous MemoryProfile API call. */
case class SimpleMemoryAction[+R](f: HeapBackend#Context => R) extends SynchronousDatabaseAction[R, NoStream, HeapBackend, Effect.All] {
def run(context: HeapBackend#Context): R = f(context)
def getDumpInfo = DumpInfo(name = "SimpleMemoryAction")
}
| bmclane/slick | slick/src/main/scala/slick/memory/MemoryProfile.scala | Scala | bsd-2-clause | 11,159 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.api
import slamdata.Predef._
import quasar.effect.Timing
import quasar.fs.mount.cache.VCache, VCache.VCacheExpR
import org.http4s.Header
import org.http4s.headers.Expires
import org.http4s.util.Renderer
import scalaz._, Scalaz._
import scalaz.syntax.tag._
object VCacheMiddleware {
def apply[S[_]](
service: QHttpService[S]
)(implicit
R: VCacheExpR.Ops[S],
T: Timing.Ops[S],
C: Catchable[Free[S, ?]]
): QHttpService[S] =
QHttpService { case req =>
(service(req) ⊛ R.ask ⊛ T.timestamp) { case (resp, ex, ts) =>
val cacheHeaders = ex.unwrap.foldMap(e =>
Header(Expires.name.value, Renderer.renderString(e.v)) ::
ts.isAfter(e.v).fold(List(StaleHeader), Nil))
resp.modifyHeaders(_ ++ cacheHeaders)
}
}
}
| jedesah/Quasar | web/src/main/scala/quasar/api/VCacheMiddleware.scala | Scala | apache-2.0 | 1,406 |
// $Id$
package scala.tools.selectivecps
import scala.tools.nsc.transform._
import scala.tools.nsc.symtab._
import scala.tools.nsc.plugins._
/**
* In methods marked @cps, explicitly name results of calls to other @cps methods
*/
abstract class SelectiveANFTransform extends PluginComponent with Transform with
TypingTransformers with CPSUtils {
// inherits abstract value `global` and class `Phase` from Transform
import global._ // the global environment
import definitions._ // standard classes and methods
import typer.atOwner // methods to type trees
override def description = "ANF pre-transform for @cps"
/** the following two members override abstract members in Transform */
val phaseName: String = "selectiveanf"
protected def newTransformer(unit: CompilationUnit): Transformer =
new ANFTransformer(unit)
class ANFTransformer(unit: CompilationUnit) extends TypingTransformer(unit) {
var cpsAllowed: Boolean = false // detect cps code in places we do not handle (yet)
object RemoveTailReturnsTransformer extends Transformer {
override def transform(tree: Tree): Tree = tree match {
case Block(stms, r @ Return(expr)) =>
treeCopy.Block(tree, stms, expr)
case Block(stms, expr) =>
treeCopy.Block(tree, stms, transform(expr))
case If(cond, r1 @ Return(thenExpr), r2 @ Return(elseExpr)) =>
treeCopy.If(tree, cond, transform(thenExpr), transform(elseExpr))
case If(cond, r1 @ Return(thenExpr), elseExpr) =>
treeCopy.If(tree, cond, transform(thenExpr), transform(elseExpr))
case If(cond, thenExpr, r2 @ Return(elseExpr)) =>
treeCopy.If(tree, cond, transform(thenExpr), transform(elseExpr))
case If(cond, thenExpr, elseExpr) =>
treeCopy.If(tree, cond, transform(thenExpr), transform(elseExpr))
case Try(block, catches, finalizer) =>
treeCopy.Try(tree,
transform(block),
(catches map (t => transform(t))).asInstanceOf[List[CaseDef]],
transform(finalizer))
case CaseDef(pat, guard, r @ Return(expr)) =>
treeCopy.CaseDef(tree, pat, guard, expr)
case CaseDef(pat, guard, body) =>
treeCopy.CaseDef(tree, pat, guard, transform(body))
case Return(_) =>
reporter.error(tree.pos, "return expressions in CPS code must be in tail position")
tree
case _ =>
super.transform(tree)
}
}
def removeTailReturns(body: Tree): Tree = {
// support body with single return expression
body match {
case Return(expr) => expr
case _ => RemoveTailReturnsTransformer.transform(body)
}
}
override def transform(tree: Tree): Tree = {
if (!cpsEnabled) return tree
tree match {
// Maybe we should further generalize the transform and move it over
// to the regular Transformer facility. But then, actual and required cps
// state would need more complicated (stateful!) tracking.
// Making the default case use transExpr(tree, None, None) instead of
// calling super.transform() would be a start, but at the moment,
// this would cause infinite recursion. But we could remove the
// ValDef case here.
case dd @ DefDef(mods, name, tparams, vparamss, tpt, rhs0) =>
debuglog("transforming " + dd.symbol)
atOwner(dd.symbol) {
val rhs =
if (cpsParamTypes(tpt.tpe).nonEmpty) removeTailReturns(rhs0)
else rhs0
val rhs1 = transExpr(rhs, None, getExternalAnswerTypeAnn(tpt.tpe))(getExternalAnswerTypeAnn(tpt.tpe).isDefined)
debuglog("result "+rhs1)
debuglog("result is of type "+rhs1.tpe)
treeCopy.DefDef(dd, mods, name, transformTypeDefs(tparams), transformValDefss(vparamss),
transform(tpt), rhs1)
}
case ff @ Function(vparams, body) =>
debuglog("transforming anon function " + ff.symbol)
atOwner(ff.symbol) {
//val body1 = transExpr(body, None, getExternalAnswerTypeAnn(body.tpe))
// need to special case partial functions: if expected type is @cps
// but all cases are pure, then we would transform
// { x => x match { case A => ... }} to
// { x => shiftUnit(x match { case A => ... })}
// which Uncurry cannot handle (see function6.scala)
// thus, we push down the shiftUnit to each of the case bodies
val ext = getExternalAnswerTypeAnn(body.tpe)
val pureBody = getAnswerTypeAnn(body.tpe).isEmpty
implicit val isParentImpure = ext.isDefined
def transformPureMatch(tree: Tree, selector: Tree, cases: List[CaseDef]) = {
val caseVals = cases map { case cd @ CaseDef(pat, guard, body) =>
// if (!hasPlusMarker(body.tpe)) body modifyType (_ withAnnotation newPlusMarker()) // TODO: to avoid warning
val bodyVal = transExpr(body, None, ext) // ??? triggers "cps-transformed unexpectedly" warning in transTailValue
treeCopy.CaseDef(cd, transform(pat), transform(guard), bodyVal)
}
treeCopy.Match(tree, transform(selector), caseVals)
}
def transformPureVirtMatch(body: Block, selDef: ValDef, cases: List[Tree], matchEnd: Tree) = {
val stats = transform(selDef) :: (cases map (transExpr(_, None, ext)))
treeCopy.Block(body, stats, transExpr(matchEnd, None, ext))
}
val body1 = body match {
case Match(selector, cases) if ext.isDefined && pureBody =>
transformPureMatch(body, selector, cases)
// virtpatmat switch
case Block(List(selDef: ValDef), mat@Match(selector, cases)) if ext.isDefined && pureBody =>
treeCopy.Block(body, List(transform(selDef)), transformPureMatch(mat, selector, cases))
// virtpatmat
case b@Block(matchStats@((selDef: ValDef) :: cases), matchEnd) if ext.isDefined && pureBody && (matchStats forall treeInfo.hasSynthCaseSymbol) =>
transformPureVirtMatch(b, selDef, cases, matchEnd)
// virtpatmat that stores the scrut separately -- TODO: can we eliminate this case??
case Block(List(selDef0: ValDef), mat@Block(matchStats@((selDef: ValDef) :: cases), matchEnd)) if ext.isDefined && pureBody && (matchStats forall treeInfo.hasSynthCaseSymbol)=>
treeCopy.Block(body, List(transform(selDef0)), transformPureVirtMatch(mat, selDef, cases, matchEnd))
case _ =>
transExpr(body, None, ext)
}
debuglog("anf result "+body1+"\\nresult is of type "+body1.tpe)
treeCopy.Function(ff, transformValDefs(vparams), body1)
}
case vd @ ValDef(mods, name, tpt, rhs) => // object-level valdefs
debuglog("transforming valdef " + vd.symbol)
if (getExternalAnswerTypeAnn(tpt.tpe).isEmpty) {
atOwner(vd.symbol) {
val rhs1 = transExpr(rhs, None, None)
treeCopy.ValDef(vd, mods, name, transform(tpt), rhs1)
}
} else {
reporter.error(tree.pos, "cps annotations not allowed on by-value parameters or value definitions")
super.transform(tree)
}
case TypeTree() =>
// circumvent cpsAllowed here
super.transform(tree)
case Apply(_,_) =>
// this allows reset { ... } in object constructors
// it's kind of a hack to put it here (see note above)
transExpr(tree, None, None)
case _ =>
if (hasAnswerTypeAnn(tree.tpe)) {
if (tree.symbol.isLazy) {
reporter.error(tree.pos, "implementation restriction: cps annotations not allowed on lazy value definitions")
cpsAllowed = false
} else if (!cpsAllowed)
reporter.error(tree.pos, "cps code not allowed here / " + tree.getClass + " / " + tree)
log(tree)
}
cpsAllowed = false
super.transform(tree)
}
}
def transExpr(tree: Tree, cpsA: CPSInfo, cpsR: CPSInfo)(implicit isAnyParentImpure: Boolean = false): Tree = {
transTailValue(tree, cpsA, cpsR)(cpsR.isDefined || isAnyParentImpure) match {
case (Nil, b) => b
case (a, b) =>
treeCopy.Block(tree, a,b)
}
}
def transArgList(fun: Tree, args: List[Tree], cpsA: CPSInfo)(implicit isAnyParentImpure: Boolean): (List[List[Tree]], List[Tree], CPSInfo) = {
val formals = fun.tpe.paramTypes
val overshoot = args.length - formals.length
var spc: CPSInfo = cpsA
val (stm,expr) = (for ((a,tp) <- args.zip(formals ::: List.fill(overshoot)(NoType))) yield {
tp match {
case TypeRef(_, ByNameParamClass, List(elemtp)) =>
// note that we're not passing just isAnyParentImpure
(Nil, transExpr(a, None, getAnswerTypeAnn(elemtp))(getAnswerTypeAnn(elemtp).isDefined || isAnyParentImpure))
case _ =>
val (valStm, valExpr, valSpc) = transInlineValue(a, spc)
spc = valSpc
(valStm, valExpr)
}
}).unzip
(stm,expr,spc)
}
// precondition: cpsR.isDefined "implies" isAnyParentImpure
def transValue(tree: Tree, cpsA: CPSInfo, cpsR: CPSInfo)(implicit isAnyParentImpure: Boolean): (List[Tree], Tree, CPSInfo) = {
// return value: (stms, expr, spc), where spc is CPSInfo after stms but *before* expr
implicit val pos = tree.pos
tree match {
case Block(stms, expr) =>
val (cpsA2, cpsR2) = (cpsA, linearize(cpsA, getAnswerTypeAnn(tree.tpe))) // tbd
// val (cpsA2, cpsR2) = (None, getAnswerTypeAnn(tree.tpe))
val (a, b) = transBlock(stms, expr, cpsA2, cpsR2)(cpsR2.isDefined || isAnyParentImpure)
val tree1 = (treeCopy.Block(tree, a, b)) // no updateSynthFlag here!!!
(Nil, tree1, cpsA)
case If(cond, thenp, elsep) =>
/* possible situations:
cps before (cpsA)
cps in condition (spc) <-- synth flag set if *only* here!
cps in (one or both) branches */
val (condStats, condVal, spc) = transInlineValue(cond, cpsA)
val (cpsA2, cpsR2) = if (hasSynthMarker(tree.tpe))
(spc, linearize(spc, getAnswerTypeAnn(tree.tpe))) else
(None, getAnswerTypeAnn(tree.tpe)) // if no cps in condition, branches must conform to tree.tpe directly
val thenVal = transExpr(thenp, cpsA2, cpsR2)(cpsR2.isDefined || isAnyParentImpure)
val elseVal = transExpr(elsep, cpsA2, cpsR2)(cpsR2.isDefined || isAnyParentImpure)
// check that then and else parts agree (not necessary any more, but left as sanity check)
if (cpsR.isDefined) {
if (elsep == EmptyTree)
reporter.error(tree.pos, "always need else part in cps code")
}
if (hasAnswerTypeAnn(thenVal.tpe) != hasAnswerTypeAnn(elseVal.tpe)) {
reporter.error(tree.pos, "then and else parts must both be cps code or neither of them")
}
(condStats, updateSynthFlag(treeCopy.If(tree, condVal, thenVal, elseVal)), spc)
case Match(selector, cases) =>
val (selStats, selVal, spc) = transInlineValue(selector, cpsA)
val (cpsA2, cpsR2) =
if (hasSynthMarker(tree.tpe)) (spc, linearize(spc, getAnswerTypeAnn(tree.tpe)))
else (None, getAnswerTypeAnn(tree.tpe))
val caseVals = cases map { case cd @ CaseDef(pat, guard, body) =>
val bodyVal = transExpr(body, cpsA2, cpsR2)(cpsR2.isDefined || isAnyParentImpure)
treeCopy.CaseDef(cd, transform(pat), transform(guard), bodyVal)
}
(selStats, updateSynthFlag(treeCopy.Match(tree, selVal, caseVals)), spc)
// this is utterly broken: LabelDefs need to be considered together when transforming them to DefDefs:
// suppose a Block {L1; ... ; LN}
// this should become {D1def ; ... ; DNdef ; D1()}
// where D$idef = def L$i(..) = {L$i.body; L${i+1}(..)}
case ldef @ LabelDef(name, params, rhs) =>
// println("trans LABELDEF "+(name, params, tree.tpe, hasAnswerTypeAnn(tree.tpe)))
// TODO why does the labeldef's type have a cpsMinus annotation, whereas the rhs does not? (BYVALmode missing/too much somewhere?)
if (hasAnswerTypeAnn(tree.tpe)) {
// currentOwner.newMethod(name, tree.pos, Flags.SYNTHETIC) setInfo ldef.symbol.info
val sym = ldef.symbol resetFlag Flags.LABEL
val rhs1 = rhs //new TreeSymSubstituter(List(ldef.symbol), List(sym)).transform(rhs)
val rhsVal = transExpr(rhs1, None, getAnswerTypeAnn(tree.tpe))(getAnswerTypeAnn(tree.tpe).isDefined || isAnyParentImpure) changeOwner (currentOwner -> sym)
val stm1 = localTyper.typed(DefDef(sym, rhsVal))
// since virtpatmat does not rely on fall-through, don't call the labels it emits
// transBlock will take care of calling the first label
// calling each labeldef is wrong, since some labels may be jumped over
// we can get away with this for now since the only other labels we emit are for tailcalls/while loops,
// which do not have consecutive labeldefs (and thus fall-through is irrelevant)
if (treeInfo.hasSynthCaseSymbol(ldef)) (List(stm1), localTyper.typed{Literal(Constant(()))}, cpsA)
else {
assert(params.isEmpty, "problem in ANF transforming label with non-empty params "+ ldef)
(List(stm1), localTyper.typed{Apply(Ident(sym), List())}, cpsA)
}
} else {
val rhsVal = transExpr(rhs, None, None)
(Nil, updateSynthFlag(treeCopy.LabelDef(tree, name, params, rhsVal)), cpsA)
}
case Try(block, catches, finalizer) =>
val blockVal = transExpr(block, cpsA, cpsR)
val catchVals = for {
cd @ CaseDef(pat, guard, body) <- catches
bodyVal = transExpr(body, cpsA, cpsR)
} yield {
treeCopy.CaseDef(cd, transform(pat), transform(guard), bodyVal)
}
val finallyVal = transExpr(finalizer, None, None) // for now, no cps in finally
(Nil, updateSynthFlag(treeCopy.Try(tree, blockVal, catchVals, finallyVal)), cpsA)
case Assign(lhs, rhs) =>
// allow cps code in rhs only
val (stms, expr, spc) = transInlineValue(rhs, cpsA)
(stms, updateSynthFlag(treeCopy.Assign(tree, transform(lhs), expr)), spc)
case Return(expr0) =>
if (isAnyParentImpure)
reporter.error(tree.pos, "return expression not allowed, since method calls CPS method")
val (stms, expr, spc) = transInlineValue(expr0, cpsA)
(stms, updateSynthFlag(treeCopy.Return(tree, expr)), spc)
case Throw(expr0) =>
val (stms, expr, spc) = transInlineValue(expr0, cpsA)
(stms, updateSynthFlag(treeCopy.Throw(tree, expr)), spc)
case Typed(expr0, tpt) =>
// TODO: should x: A @cps[B,C] have a special meaning?
// type casts used in different ways (see match2.scala, #3199)
val (stms, expr, spc) = transInlineValue(expr0, cpsA)
val tpt1 = if (treeInfo.isWildcardStarArg(tree)) tpt else
treeCopy.TypeTree(tpt).setType(removeAllCPSAnnotations(tpt.tpe))
// (stms, updateSynthFlag(treeCopy.Typed(tree, expr, tpt1)), spc)
(stms, treeCopy.Typed(tree, expr, tpt1).setType(removeAllCPSAnnotations(tree.tpe)), spc)
case TypeApply(fun, args) =>
val (stms, expr, spc) = transInlineValue(fun, cpsA)
(stms, updateSynthFlag(treeCopy.TypeApply(tree, expr, args)), spc)
case Select(qual, name) =>
val (stms, expr, spc) = transInlineValue(qual, cpsA)
(stms, updateSynthFlag(treeCopy.Select(tree, expr, name)), spc)
case Apply(fun, args) =>
val (funStm, funExpr, funSpc) = transInlineValue(fun, cpsA)
val (argStm, argExpr, argSpc) = transArgList(fun, args, funSpc)
(funStm ::: (argStm.flatten), updateSynthFlag(treeCopy.Apply(tree, funExpr, argExpr)),
argSpc)
case _ =>
cpsAllowed = true
(Nil, transform(tree), cpsA)
}
}
// precondition: cpsR.isDefined "implies" isAnyParentImpure
def transTailValue(tree: Tree, cpsA: CPSInfo, cpsR: CPSInfo)(implicit isAnyParentImpure: Boolean): (List[Tree], Tree) = {
val (stms, expr, spc) = transValue(tree, cpsA, cpsR)
val bot = linearize(spc, getAnswerTypeAnn(expr.tpe))(tree.pos)
val plainTpe = removeAllCPSAnnotations(expr.tpe)
if (cpsR.isDefined && !bot.isDefined) {
if (!expr.isEmpty && (expr.tpe.typeSymbol ne NothingClass)) {
// must convert!
debuglog("cps type conversion (has: " + cpsA + "/" + spc + "/" + expr.tpe + ")")
debuglog("cps type conversion (expected: " + cpsR.get + "): " + expr)
if (!hasPlusMarker(expr.tpe))
reporter.warning(tree.pos, "expression " + tree + " is cps-transformed unexpectedly")
try {
val Some((a, b)) = cpsR
/* Since shiftUnit is bounded [A,B,C>:B] this may not typecheck
* if C is overly specific. So if !(B <:< C), call shiftUnit0
* instead, which takes only two type arguments.
*/
val conforms = a <:< b
val call = localTyper.typedPos(tree.pos)(
Apply(
TypeApply(
gen.mkAttributedRef( if (conforms) MethShiftUnit else MethShiftUnit0 ),
List(TypeTree(plainTpe), TypeTree(a)) ++ ( if (conforms) List(TypeTree(b)) else Nil )
),
List(expr)
)
)
// This is today's sick/meaningless heuristic for spotting breakdown so
// we don't proceed until stack traces start draping themselves over everything.
// If there are wildcard types in the tree and B == Nothing, something went wrong.
// (I thought WildcardTypes would be enough, but nope. 'reset0 { 0 }' has them.)
//
// Code as simple as reset((_: String).length)
// will crash meaninglessly without this check. See SI-3718.
//
// TODO - obviously this should be done earlier, differently, or with
// a more skilled hand. Most likely, all three.
if ((b.typeSymbol eq NothingClass) && call.tpe.exists(_ eq WildcardType))
reporter.error(tree.pos, "cannot cps-transform malformed (possibly in shift/reset placement) expression")
else
return ((stms, call))
}
catch {
case ex:TypeError =>
reporter.error(ex.pos, "cannot cps-transform expression " + tree + ": " + ex.msg)
}
}
} else if (!cpsR.isDefined && bot.isDefined) {
// error!
debuglog("cps type error: " + expr)
//println("cps type error: " + expr + "/" + expr.tpe + "/" + getAnswerTypeAnn(expr.tpe))
//println(cpsR + "/" + spc + "/" + bot)
reporter.error(tree.pos, "found cps expression in non-cps position")
} else {
// all is well
if (hasPlusMarker(expr.tpe)) {
reporter.warning(tree.pos, "expression " + expr + " of type " + expr.tpe + " is not expected to have a cps type")
expr modifyType removeAllCPSAnnotations
}
// TODO: sanity check that types agree
}
(stms, expr)
}
def transInlineValue(tree: Tree, cpsA: CPSInfo)(implicit isAnyParentImpure: Boolean): (List[Tree], Tree, CPSInfo) = {
val (stms, expr, spc) = transValue(tree, cpsA, None) // never required to be cps
getAnswerTypeAnn(expr.tpe) match {
case spcVal @ Some(_) =>
val valueTpe = removeAllCPSAnnotations(expr.tpe)
val sym: Symbol = (
currentOwner.newValue(newTermName(unit.fresh.newName("tmp")), tree.pos, Flags.SYNTHETIC)
setInfo valueTpe
setAnnotations List(AnnotationInfo(MarkerCPSSym.tpe_*, Nil, Nil))
)
expr.changeOwner(currentOwner -> sym)
(stms ::: List(ValDef(sym, expr) setType(NoType)),
Ident(sym) setType(valueTpe) setPos(tree.pos), linearize(spc, spcVal)(tree.pos))
case _ =>
(stms, expr, spc)
}
}
def transInlineStm(stm: Tree, cpsA: CPSInfo)(implicit isAnyParentImpure: Boolean): (List[Tree], CPSInfo) = {
stm match {
// TODO: what about DefDefs?
// TODO: relation to top-level val def?
// TODO: what about lazy vals?
case tree @ ValDef(mods, name, tpt, rhs) =>
val (stms, anfRhs, spc) = atOwner(tree.symbol) { transValue(rhs, cpsA, None) }
val tv = new ChangeOwnerTraverser(tree.symbol, currentOwner)
stms.foreach(tv.traverse(_))
// TODO: symbol might already have annotation. Should check conformance
// TODO: better yet: do without annotations on symbols
val spcVal = getAnswerTypeAnn(anfRhs.tpe)
spcVal foreach (_ => tree.symbol setAnnotations List(AnnotationInfo(MarkerCPSSym.tpe_*, Nil, Nil)))
(stms:::List(treeCopy.ValDef(tree, mods, name, tpt, anfRhs)), linearize(spc, spcVal)(tree.pos))
case _ =>
val (headStms, headExpr, headSpc) = transInlineValue(stm, cpsA)
val valSpc = getAnswerTypeAnn(headExpr.tpe)
(headStms:::List(headExpr), linearize(headSpc, valSpc)(stm.pos))
}
}
// precondition: cpsR.isDefined "implies" isAnyParentImpure
def transBlock(stms: List[Tree], expr: Tree, cpsA: CPSInfo, cpsR: CPSInfo)(implicit isAnyParentImpure: Boolean): (List[Tree], Tree) = {
def rec(currStats: List[Tree], currAns: CPSInfo, accum: List[Tree]): (List[Tree], Tree) =
currStats match {
case Nil =>
val (anfStats, anfExpr) = transTailValue(expr, currAns, cpsR)
(accum ++ anfStats, anfExpr)
case stat :: rest =>
val (stats, nextAns) = transInlineStm(stat, currAns)
rec(rest, nextAns, accum ++ stats)
}
val (anfStats, anfExpr) = rec(stms, cpsA, List())
// println("\\nanf-block:\\n"+ ((stms :+ expr) mkString ("{", "\\n", "}")) +"\\nBECAME\\n"+ ((anfStats :+ anfExpr) mkString ("{", "\\n", "}")))
// println("synth case? "+ (anfStats map (t => (t, t.isDef, treeInfo.hasSynthCaseSymbol(t)))))
// SUPER UGLY HACK: handle virtpatmat-style matches, whose labels have already been turned into DefDefs
if (anfStats.nonEmpty && (anfStats forall (t => !t.isDef || treeInfo.hasSynthCaseSymbol(t)))) {
val (prologue, rest) = (anfStats :+ anfExpr) span (s => !s.isInstanceOf[DefDef]) // find first case
// println("rest: "+ rest)
// val (defs, calls) = rest partition (_.isInstanceOf[DefDef])
if (rest.nonEmpty) {
// the filter drops the ()'s emitted when transValue encountered a LabelDef
val stats = prologue ++ (rest filter (_.isInstanceOf[DefDef])).reverse // ++ calls
// println("REVERSED "+ (stats mkString ("{", "\\n", "}")))
(stats, localTyper.typed{Apply(Ident(rest.head.symbol), List())}) // call first label to kick-start the match
} else (anfStats, anfExpr)
} else (anfStats, anfExpr)
}
}
}
| danslapman/scala-continuations | plugin/src/main/scala-2.12/scala/tools/selectivecps/SelectiveANFTransform.scala | Scala | bsd-3-clause | 23,837 |
//
// PorcBackend.scala -- Scala class PorcBackend
// Project OrcScala
//
// Created by amp on Aug 28, 2013.
//
// Copyright (c) 2018 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc
import java.io.{ ObjectInputStream, ObjectOutputStream }
import orc.ast.porc.MethodCPS
import orc.compile.orctimizer.PorcOrcCompiler
import orc.compile.parse.OrcInputContext
import orc.error.compiletime.CompileLogger
import orc.error.loadtime.{ DeserializationTypeException, LoadingException }
import orc.progress.ProgressMonitor
/** A backend implementation using the Orctimizer and Porc compilers.
*
* This is designed to be extended with a runtime which takes Porc as input.
*
* @author amp
*/
abstract class PorcBackend extends Backend[MethodCPS] {
lazy val compiler: Compiler[MethodCPS] = new PorcOrcCompiler() with Compiler[MethodCPS] {
def compile(source: OrcInputContext, options: OrcCompilationOptions,
compileLogger: CompileLogger, progress: ProgressMonitor): MethodCPS = {
this(source, modifyCompilationOptions(options), compileLogger, progress)
}
}
protected def modifyCompilationOptions(options: OrcCompilationOptions): OrcCompilationOptions = options
// NOTE: If needed we could implement an XML serializer for Porc. We could also make things even simpler by just using java serialization here.
val serializer: Option[CodeSerializer[MethodCPS]] = Some(new CodeSerializer[MethodCPS] {
/** Generate a serialized form from <code>code</code>.
*/
def serialize(code: MethodCPS, out: java.io.OutputStream): Unit = {
val oout = new ObjectOutputStream(out)
oout.writeObject(code)
}
/** Take a serialized form and rebuild the original compiled code object.
*/
@throws(classOf[LoadingException])
def deserialize(in: java.io.InputStream): MethodCPS = {
val oin = new ObjectInputStream(in)
val o = oin.readObject()
o match {
case m: MethodCPS =>
m
case _ =>
throw new DeserializationTypeException("Loaded data was of the incorrect format.")
}
}
})
def createRuntime(options: OrcExecutionOptions): Runtime[MethodCPS]
}
| orc-lang/orc | OrcScala/src/orc/PorcBackend.scala | Scala | bsd-3-clause | 2,407 |
package nodes.util
import breeze.linalg.{DenseMatrix, DenseVector}
import workflow.Transformer
import scala.reflect.ClassTag
/**
* Concats a Seq of DenseVectors into a single DenseVector.
*/
case class VectorCombiner[T : ClassTag]()(implicit zero: breeze.storage.Zero[T])
extends Transformer[Seq[DenseVector[T]], DenseVector[T]] {
def apply(in: Seq[DenseVector[T]]): DenseVector[T] = DenseVector.vertcat(in:_*)
}
| tomerk/keystone | src/main/scala/nodes/util/VectorCombiner.scala | Scala | apache-2.0 | 424 |
/*
* Copyright 2017-2018 47 Degrees, LLC. <http://www.47deg.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package freestyle
package free
@free
@debug
trait SCtors1 {
def x(a: Int): FS[Int]
def y(a: Int): FS[Int]
}
@free
trait SCtors2 {
def i(a: Int): FS[Int]
def j(a: Int): FS[Int]
}
@free
trait SCtors3 {
def o(a: Int): FS[Int]
def p(a: Int): FS[Int]
}
@free
trait SCtors4 {
def k(a: Int): FS[Int]
def m(a: Int): FS[Int]
}
@free
trait MixedFreeS {
def x: FS[Int]
def y: FS[Int]
def z: FS[Int]
}
@free
trait S1 {
def x(n: Int): FS[Int]
}
@free
trait S2 {
def y(n: Int): FS[Int]
}
@debug
@module
trait M1 {
val sctors1: SCtors1
val sctors2: SCtors2
}
@module
trait M2 {
val sctors3: SCtors3
val sctors4: SCtors4
}
@debug
@module
trait O1 {
val m1: M1
val m2: M2
/*test*/
}
@module
trait O2 {
val o1: O1
val x = 1
def y = 2
}
@module
trait O3 {
def x = 1
def y = 2
}
@module
trait StateProp {
val s1: S1
val s2: S2
}
object comp {
@module
trait FSMod {
val sCtors1: SCtors1
def x(a: Int): FS.Seq[Int] = sCtors1.x(a)
def y(b: Int): FS.Seq[Int] = sCtors1.y(b)
}
}
object interps {
implicit val optionHandler1: FSHandler[SCtors1.Op, Option] = new SCtors1.Handler[Option] {
def x(a: Int): Option[Int] = Some(a)
def y(a: Int): Option[Int] = Some(a)
}
implicit val listHandler1: FSHandler[SCtors1.Op, List] = new SCtors1.Handler[List] {
def x(a: Int): List[Int] = List(a)
def y(a: Int): List[Int] = List(a)
}
implicit val optionHandler2: FSHandler[SCtors2.Op, Option] = new SCtors2.Handler[Option] {
def i(a: Int): Option[Int] = Some(a)
def j(a: Int): Option[Int] = Some(a)
}
implicit val listHandler2: FSHandler[SCtors2.Op, List] = new SCtors2.Handler[List] {
def i(a: Int): List[Int] = List(a)
def j(a: Int): List[Int] = List(a)
}
implicit val optionHandler3: FSHandler[SCtors3.Op, Option] = new SCtors3.Handler[Option] {
def o(a: Int): Option[Int] = Some(a)
def p(a: Int): Option[Int] = Some(a)
}
implicit val listHandler3: FSHandler[SCtors3.Op, List] = new SCtors3.Handler[List] {
def o(a: Int): List[Int] = List(a)
def p(a: Int): List[Int] = List(a)
}
implicit val optionHandler4: FSHandler[SCtors4.Op, Option] = new SCtors4.Handler[Option] {
def k(a: Int): Option[Int] = Some(a)
def m(a: Int): Option[Int] = Some(a)
}
implicit val listHandler4: FSHandler[SCtors4.Op, List] = new SCtors4.Handler[List] {
def k(a: Int): List[Int] = List(a)
def m(a: Int): List[Int] = List(a)
}
}
| frees-io/freestyle | modules/core/shared/src/test/scala/freestyle/free/Utils.scala | Scala | apache-2.0 | 3,094 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers
import model.Exceptions.NotFoundException
import model.FlagCandidateCommands
import model.FlagCandidatePersistedObject._
import org.mockito.Mockito._
import play.api.libs.json.{ JsValue, Json }
import play.api.mvc.Result
import play.api.test.Helpers._
import play.api.test.{ FakeHeaders, FakeRequest, Helpers }
import repositories.application.FlagCandidateRepository
import testkit.UnitWithAppSpec
import scala.concurrent.Future
class FlagCandidateControllerSpec extends UnitWithAppSpec {
val mockFlagCandidateRepository = mock[FlagCandidateRepository]
val testableFlagCandidateController = new FlagCandidateController(
stubControllerComponents(playBodyParsers = stubPlayBodyParsers(materializer)),
mockFlagCandidateRepository
)
"Flag Candidate Controller" should {
"Return a candidate issue" in {
val issue = "some issue"
val flagCandidate = FlagCandidate("appId", Some(issue))
when(mockFlagCandidateRepository.tryGetCandidateIssue("appId")).thenReturn(Future.successful(Some(flagCandidate)))
val response = testableFlagCandidateController.find("appId")(FakeRequest())
asFlagCandidate(response) must be(FlagCandidateCommands.FlagCandidate(issue))
}
"Return Not Found for get issue if there it does not exist" in {
when(mockFlagCandidateRepository.tryGetCandidateIssue("appId")).thenReturn(Future.successful(None))
val response = testableFlagCandidateController.find("appId")(FakeRequest())
status(response) mustBe NOT_FOUND
}
"Save a new issue for the candidate" in {
val flagCandidate = FlagCandidate("appId", Some("some issue"))
when(mockFlagCandidateRepository.save(flagCandidate)).thenReturn(Future.successful(()))
val response = testableFlagCandidateController.save("appId")(createPutRequest("appId", Json.toJson(flagCandidate).toString()))
status(response) mustBe OK
verify(mockFlagCandidateRepository).save(flagCandidate)
}
"Return NOT_FOUND when save an issue for incorrect applicationId" in {
val flagCandidate = FlagCandidate("appId", Some("some issue"))
when(mockFlagCandidateRepository.save(flagCandidate)).thenReturn(Future.failed(new NotFoundException()))
val response = testableFlagCandidateController.save("appId")(createPutRequest("appId", Json.toJson(flagCandidate).toString()))
status(response) mustBe NOT_FOUND
}
"Remove an issue for the candidate" in {
when(mockFlagCandidateRepository.remove("appId")).thenReturn(Future.successful(()))
val response = testableFlagCandidateController.remove("appId")(FakeRequest())
status(response) mustBe NO_CONTENT
}
"Return NOT_FOUND when remove an issue for incorrect applicationId" in {
when(mockFlagCandidateRepository.remove("appId")).thenReturn(Future.failed(new NotFoundException()))
val response = testableFlagCandidateController.remove("appId")(FakeRequest())
status(response) mustBe NOT_FOUND
}
}
def asFlagCandidate(response: Future[Result]) = contentAsJson(response).as[JsValue].as[FlagCandidateCommands.FlagCandidate]
def createPutRequest(appId: String, jsonString: String) = {
val json = Json.parse(jsonString)
FakeRequest(Helpers.PUT, controllers.routes.FlagCandidateController.save(appId).url, FakeHeaders(), json)
.withHeaders("Content-Type" -> "application/json")
}
}
| hmrc/fset-faststream | test/controllers/FlagCandidateControllerSpec.scala | Scala | apache-2.0 | 4,010 |
package utils.json
import models._
import play.api.libs.json._
object ResponseMessageSerializers {
private[this] val serverErrorWrites = Json.writes[ServerError]
private[this] val pongWrites = Json.writes[Pong]
private[this] val versionResponseWrites = Json.writes[VersionResponse]
private[this] val disconnectedResponseWrites = Json.writes[Disconnected]
implicit val responseMessageWrites = Writes[ResponseMessage] { r: ResponseMessage =>
val json = r match {
case se: ServerError => serverErrorWrites.writes(se)
case p: Pong => pongWrites.writes(p)
case vr: VersionResponse => versionResponseWrites.writes(vr)
case SendDebugInfo => JsObject(Nil)
case d: Disconnected => disconnectedResponseWrites.writes(d)
case _ => throw new IllegalArgumentException(s"Unhandled ResponseMessage type [${r.getClass.getSimpleName}].")
}
JsObject(Seq("c" -> JsString(r.getClass.getSimpleName.replace("$", "")), "v" -> json))
}
val messageSetWrites = Writes[MessageSet] { ms: MessageSet =>
JsObject(Seq("c" -> JsString("MessageSet"), "v" -> JsObject(Seq("messages" -> JsArray(ms.messages.map(responseMessageWrites.writes))))))
}
}
| agilemobiledev/boilerplay | app/utils/json/ResponseMessageSerializers.scala | Scala | apache-2.0 | 1,187 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.stream.datastore
import java.nio.charset.StandardCharsets
import java.util.concurrent.atomic.AtomicLong
import com.google.common.io.Resources
import org.apache.commons.io.IOUtils
import org.apache.commons.net.DefaultSocketFactory
import org.geotools.data.DataStoreFinder
import org.geotools.factory.CommonFactoryFinder
import org.junit.runner.RunWith
import org.opengis.feature.simple.SimpleFeature
import org.opengis.filter.Filter
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
import scala.concurrent.Future
@RunWith(classOf[JUnitRunner])
class StreamDataStoreTest extends Specification {
sequential
val count = new AtomicLong(0)
val count2 = new AtomicLong(0)
val count3 = new AtomicLong(0)
val ff = CommonFactoryFinder.getFilterFactory2()
val sourceConf =
"""
|{
| type = "generic"
| source-route = "netty4:tcp://localhost:5899?textline=true"
| sft = {
| type-name = "testdata"
| fields = [
| { name = "label", type = "String" }
| { name = "geom", type = "Point", index = true, srid = 4326, default = true }
| { name = "dtg", type = "Date", index = true }
| ]
| }
| converter = {
| id-field = "md5(string2bytes($0))"
| type = "delimited-text"
| format = "DEFAULT"
| fields = [
| { name = "label", transform = "trim($1)" }
| { name = "geom", transform = "point($2::double, $3::double)" }
| { name = "dtg", transform = "datetime($4)" }
| ]
| }
|}
""".stripMargin
val sds = DataStoreFinder.getDataStore(Map(
StreamDataStoreParams.StreamDatastoreConfig.key -> sourceConf,
StreamDataStoreParams.CacheTimeout.key -> Integer.valueOf(2)
)).asInstanceOf[StreamDataStore]
"StreamDataStore" should {
"be built from a conf string" >> {
sds must not beNull
}
"read and write" >> {
val listener = new StreamListener {
override def onNext(sf: SimpleFeature): Unit = {
count.incrementAndGet()
}
}
sds.registerListener(listener)
val listener2 = StreamListener { sf => count2.incrementAndGet() }
sds.registerListener(listener2)
val bboxFilter = ff.bbox("geom", 49.0, 79.0, 51.0, 80.0, "EPSG:4326")
val listener3 = StreamListener(bboxFilter, _ => count3.incrementAndGet())
sds.registerListener(listener3)
val fs = sds.getFeatureSource("testdata")
"handle new data" >> {
val url = Resources.getResource("testdata.tsv")
val lines = Resources.readLines(url, StandardCharsets.UTF_8)
val socketFactory = new DefaultSocketFactory
Future {
val socket = socketFactory.createSocket("localhost", 5899)
val os = socket.getOutputStream
IOUtils.writeLines(lines, IOUtils.LINE_SEPARATOR_UNIX, os)
os.flush()
// wait for data to arrive at the server
Thread.sleep(4000)
os.close()
}
Thread.sleep(1000)
fs.getFeatures(Filter.INCLUDE).features().hasNext must beTrue
}
"support listeners" >> {
count.get() must equalTo(7)
count2.get() must equalTo(7)
count3.get() must equalTo(3)
}
"handle bbox filters" >> {
fs.getFeatures(bboxFilter).size() must be equalTo 3
}
"expire data after the appropriate amount of time" >> {
Thread.sleep(3000)
fs.getFeatures(Filter.INCLUDE).features().hasNext must beFalse
}
ok
}
}
} | jahhulbert-ccri/geomesa | geomesa-stream/geomesa-stream-datastore/src/test/scala/org/locationtech/geomesa/stream/datastore/StreamDataStoreTest.scala | Scala | apache-2.0 | 4,424 |
package doodle
package image
package examples
import scala.math.BigDecimal
import doodle.core._
import doodle.random._
import cats.instances.list._
import cats.syntax.traverse._
object Epicycloid {
type Epicycloid = List[(Double, Double, Boolean)]
val size = 200
def eval(t: Angle, pattern: Epicycloid): Vec = {
def loop(pattern: Epicycloid): Vec =
pattern match {
case Nil => Vec.zero
case (weight, freq, flipped) :: tail => {
val angle = (t * freq)
(if (flipped) Vec(weight * angle.sin, weight * angle.cos)
else Vec(weight * angle.cos, weight * angle.sin)) + loop(tail)
}
}
loop(pattern) * size.toDouble
}
val randomEpicycloid: Random[Epicycloid] = {
val elt: Random[(Double, Double, Boolean)] =
for {
w <- Random.double
f <- Random.int(1, 12)
p <- Random.discrete((false -> 0.75), (true -> 0.25))
} yield (w, f.toDouble, p)
for {
n <- Random.int(1, 7)
elts <- (0 until n).toList.map(_ => elt).sequence
} yield elts
}
def render(epicycloid: Epicycloid): Image =
Image.closedPath(
PathElement.moveTo(eval(Angle.zero, epicycloid).toPoint) ::
(BigDecimal(0.0) to 1.0 by 0.001).map { t =>
val angle = Angle.turns(t.doubleValue)
PathElement.lineTo(eval(angle, epicycloid).toPoint)
}.toList
)
def image: Image =
randomEpicycloid.map(render _).run
}
| underscoreio/doodle | image/shared/src/main/scala/doodle/image/examples/Epicycloid.scala | Scala | apache-2.0 | 1,452 |
/*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.norm
trait ValueFunction {
def apply(timestamp: Long, value: Double)
}
/**
* Simple value function used for testing and debugging. Collects output in
* a list so it is easy to examine.
*/
class ListValueFunction extends ValueFunction {
private var builder = List.newBuilder[(Long, Double)]
var f: ValueFunction = this
def update(timestamp: Long, value: Double): List[(Long, Double)] = {
builder = List.newBuilder[(Long, Double)]
f(timestamp, value)
builder.result
}
def apply(timestamp: Long, value: Double) {
builder += timestamp -> value
}
}
| rspieldenner/atlas | atlas-core/src/main/scala/com/netflix/atlas/core/norm/ValueFunction.scala | Scala | apache-2.0 | 1,210 |
package org.bitcoins.core.protocol.script
import org.bitcoins.core.crypto.ECPrivateKey
import org.bitcoins.core.script.bitwise.OP_EQUALVERIFY
import org.bitcoins.core.script.constant.{ScriptConstant, ScriptNumber, BytesToPushOntoStack, ScriptToken}
import org.bitcoins.core.script.crypto.{OP_CHECKSIG, OP_HASH160}
import org.bitcoins.core.script.locktime.OP_CHECKLOCKTIMEVERIFY
import org.bitcoins.core.script.stack.{OP_DROP, OP_DUP}
import org.bitcoins.core.util.TestUtil
import org.scalatest.{FlatSpec, MustMatchers}
/**
* Created by tom on 9/21/16.
*/
class CLTVScriptPubKeyTest extends FlatSpec with MustMatchers {
val expectedAsm : Seq[ScriptToken] =
List(OP_DUP, OP_HASH160, BytesToPushOntoStack(20), ScriptConstant("31a420903c05a0a7de2de40c9f02ebedbacdc172"), OP_EQUALVERIFY, OP_CHECKSIG)
//from b30d3148927f620f5b1228ba941c211fdabdae75d0ba0b688a58accbf018f3cc
val rawScriptPubKey = TestUtil.rawP2PKHScriptPubKey
val scriptPubKey = ScriptPubKey(rawScriptPubKey)
"CLTVScriptPubKey" must "return the expected asm from hex" in {
val expectedCLTVAsm : Seq[ScriptToken] =
List(BytesToPushOntoStack(4), ScriptConstant("2b07ae57"), OP_CHECKLOCKTIMEVERIFY, OP_DROP) ++ expectedAsm
val cltv = CLTVScriptPubKey(ScriptNumber(1471022891), scriptPubKey)
cltv.asm must be (expectedCLTVAsm)
}
it must "determine the correct underlying scriptPubKey, and locktime inside a CLTVScriptPubKey" in {
val scriptNum17 = ScriptNumber(17)
val scriptNum5 = ScriptNumber(5)
val negativeOne = ScriptNumber(-1)
val pubKey = ECPrivateKey().publicKey
val p2pkh = P2PKHScriptPubKey(pubKey)
CLTVScriptPubKey(scriptNum17, p2pkh).scriptPubKeyAfterCLTV must be (p2pkh)
CLTVScriptPubKey(scriptNum5, p2pkh).scriptPubKeyAfterCLTV must be (p2pkh)
CLTVScriptPubKey(negativeOne, p2pkh).scriptPubKeyAfterCLTV must be (p2pkh)
CLTVScriptPubKey(scriptNum17, p2pkh).locktime must be (scriptNum17)
CLTVScriptPubKey(scriptNum5, p2pkh).locktime must be (scriptNum5)
}
}
| SuredBits/bitcoin-s-sidechains | src/test/scala/org/bitcoins/core/protocol/script/CLTVScriptPubKeyTest.scala | Scala | mit | 2,015 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\\* */
package squants
import org.scalatest.{ Matchers, FlatSpec }
/**
* @author garyKeorkunian
* @since 0.1
*
*/
class MetricSystemSpec extends FlatSpec with Matchers {
"The Metric System multipliers" should "convert as expected" in {
import MetricSystem._
Yocto should be(1e-24)
Zepto should be(1e-21)
Atto should be(1e-18)
Femto should be(1e-15)
Pico should be(1e-12)
Nano should be(1e-9)
Micro should be(1e-6)
Milli should be(1e-3)
Centi should be(1e-2)
Deci should be(1e-1)
Deca should be(1e1)
Hecto should be(1e2)
Kilo should be(1e3)
Mega should be(1e6)
Tera should be(1e12)
Peta should be(1e15)
Exa should be(1e18)
Giga should be(1e9)
Zetta should be(1e21)
Yotta should be(1e24)
}
}
| rmihael/squants | shared/src/test/scala/squants/MetricSystemSpec.scala | Scala | apache-2.0 | 1,309 |
package org.bitcoins.core.protocol.transaction
import org.bitcoins.core.gen.WitnessGenerators
import org.scalacheck.{ Prop, Properties }
/**
* Created by chris on 11/28/16.
*/
class TransactionWitnessSpec extends Properties("TransactionWitnessSpec") {
property("serialization symmetry") = {
Prop.forAll(WitnessGenerators.transactionWitness) { witness =>
TransactionWitness(witness.hex, witness.witnesses.size) == witness
}
}
}
| Christewart/bitcoin-s-core | src/test/scala/org/bitcoins/core/protocol/transaction/TransactionWitnessSpec.scala | Scala | mit | 450 |
/*
* Copyright 2017 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.featran.transformers
import breeze.stats.distributions.VonMises
import com.spotify.featran.{FeatureBuilder, FlatReader, FlatWriter}
import com.twitter.algebird.Aggregator
/**
* Transform a column of continuous features that represent the mean of a von Mises distribution to
* n columns of continuous features. The number n represent the number of points to evaluate the von
* Mises distribution. The von Mises pdf is given by
*
* f(x | mu, kappa, scale) = exp(kappa * cos(scale*(x-mu)) / (2*pi*Io(kappa))
*
* and is only valid for x, mu in the interval [0, 2*pi/scale].
*/
object VonMisesEvaluator extends SettingsBuilder {
/**
* Create a new [[VonMisesEvaluator]] instance.
* @param kappa
* measure of concentration
* @param scale
* scaling factor
* @param points
* points to evaluate the distribution with
*/
def apply(
name: String,
kappa: Double,
scale: Double,
points: Array[Double]
): Transformer[Double, Unit, Unit] =
new VonMisesEvaluator(name, kappa, scale, points)
/**
* Create a new [[VonMisesEvaluator]] from a settings object
* @param setting
* Settings object
*/
def fromSettings(setting: Settings): Transformer[Double, Unit, Unit] = {
val params = setting.params
val k = params("kappa").toDouble
val s = params("scale").toDouble
val str = params("points")
val points = str.slice(1, str.length - 1).split(",").map(_.toDouble)
VonMisesEvaluator(setting.name, k, s, points)
}
def getProbability(x: Double, mu: Double, kappa: Double, scale: Double): Double = {
val muScaled = mu * scale
val vm = VonMises(muScaled, kappa)
vm.pdf(scale * x)
}
}
private[featran] class VonMisesEvaluator(
name: String,
val kappa: Double,
val scale: Double,
val points: Array[Double]
) extends Transformer[Double, Unit, Unit](name) {
private val pMax = points.max
private val upperBound = 2 * math.Pi / scale
checkRange("point", pMax, 0.0, upperBound)
override val aggregator: Aggregator[Double, Unit, Unit] =
Aggregators.unit[Double]
override def featureDimension(c: Unit): Int = points.length
override def featureNames(c: Unit): Seq[String] = names(points.length)
override def buildFeatures(a: Option[Double], c: Unit, fb: FeatureBuilder[_]): Unit = a match {
case Some(mu) =>
checkRange("mu", mu, 0.0, upperBound)
val probs =
points.map(VonMisesEvaluator.getProbability(_, mu, kappa, scale))
fb.add(names(points.length), probs)
case None => fb.skip(points.length)
}
override def encodeAggregator(c: Unit): String = ""
override def decodeAggregator(s: String): Unit = ()
override def params: Map[String, String] =
Map(
"kappa" -> kappa.toString,
"scale" -> scale.toString,
"points" -> points.mkString("[", ",", "]")
)
override def flatRead[T: FlatReader]: T => Option[Any] = FlatReader[T].readDouble(name)
override def flatWriter[T](implicit fw: FlatWriter[T]): Option[Double] => fw.IF =
fw.writeDouble(name)
}
| spotify/featran | core/src/main/scala/com/spotify/featran/transformers/VonMisesEvaluator.scala | Scala | apache-2.0 | 3,660 |
/*
* Copyright (C) 2010 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.core.workflow.mole
import org.openmole.core.context._
import org.openmole.core.exception._
import org.openmole.core.expansion.FromContext
import org.openmole.core.workflow.job.RuntimeTask
import org.openmole.core.workflow.task._
import org.openmole.core.workflow.tools.DefaultSet
import org.openmole.core.workflow.transition._
import org.openmole.core.workflow.validation._
import org.openmole.tool.random._
object MoleCapsule {
case class Master(persist: Seq[String])
def apply(task: Task, strain: Boolean = false, funnel: Boolean = false, master: Option[Master] = None) =
new MoleCapsule(task, strain = strain, funnel = funnel || MoleTask.isMoleTask(task), master = master)
def isStrainer(c: MoleCapsule) = c.strain
/* Test wether there is a path from this slot reaching the root of the mole without looping to the capsule it is bounded to */
def reachRootWithNoLoop(mole: Mole)(slot: TransitionSlot): Boolean = {
def previousCapsules(s: TransitionSlot) = mole.inputTransitions(s).map { _.start }
def loopToCapsule(s: TransitionSlot) = previousCapsules(s).exists(_ == slot.capsule)
var reachRoot = false
val seen = collection.mutable.Set[MoleCapsule]()
val toProceed = collection.mutable.Stack[TransitionSlot]()
toProceed.pushAll(previousCapsules(slot).flatMap(mole.slots))
while (!reachRoot && !toProceed.isEmpty) {
val s = toProceed.pop()
if (!loopToCapsule(s)) {
if (s.capsule == mole.root) reachRoot = true
else {
val capsules = previousCapsules(s)
for {
c ← capsules
if !seen.contains(c)
s ← mole.slots(c)
} toProceed.push(s)
seen ++= capsules
}
}
}
reachRoot
}
def received(capsule: MoleCapsule, mole: Mole, sources: Sources, hooks: Hooks): PrototypeSet =
if (capsule == mole.root) mole.inputs
else {
val slots = mole.slots(capsule)
val noStrainer = slots.toSeq.filter(s ⇒ MoleCapsule.reachRootWithNoLoop(mole)(s))
val bySlot =
for {
slot ← noStrainer
received = TypeUtil.validTypes(mole, sources, hooks)(slot)
} yield received.map(_.toVal)
val allNames = bySlot.toSeq.flatMap(_.map(_.name)).distinct
val byName = bySlot.map(_.toSeq.groupBy(_.name).withDefaultValue(Seq.empty))
def haveAllTheSameType(ps: Seq[Val[_]]) = ps.map(_.`type`).distinct.size == 1
def inAllSlots(ps: Seq[Val[_]]) = ps.size == noStrainer.size
val prototypes =
for {
name ← allNames
inSlots = byName.map(_(name).toSeq).toSeq
if inSlots.forall(haveAllTheSameType)
oneBySlot = inSlots.map(_.head)
if inAllSlots(oneBySlot) && haveAllTheSameType(oneBySlot)
} yield oneBySlot.head
prototypes
}
}
/**
* A capsule containing a task.
*
* @param _task task inside this capsule
* @param strain true if this capsule let pass all data through
*/
class MoleCapsule(val _task: Task, val strain: Boolean, val funnel: Boolean, val master: Option[MoleCapsule.Master]) {
def task(mole: Mole, sources: Sources, hooks: Hooks) = runtimeTask(mole, sources, hooks).task
def runtimeTask(mole: Mole, sources: Sources, hooks: Hooks) = {
val withInputs =
_task match {
case task: MoleTask ⇒ (MoleTask.mole composeLens Mole.inputs) modify (_ ++ inputs(mole, sources, hooks)) apply task
case task ⇒ task
}
RuntimeTask(withInputs, strain)
}
/**
* Get the inputs data taken by this capsule, generally it is empty if the capsule
* is empty or the input of the task inside the capsule. It can be different
* in some cases.
*
* @return the input of the capsule
*/
def inputs(mole: Mole, sources: Sources, hooks: Hooks): PrototypeSet = {
if (strain || funnel) receivedInputs(mole, sources, hooks) ++ capsuleInputs(mole, sources, hooks)
else capsuleInputs(mole, sources, hooks)
}
/**
* Get the outputs data taken by this capsule, generally it is empty if the capsule
* is empty or the output of the task inside the capsule. It can be different
* in some cases.
*
* @return the output of the capsule
*/
def outputs(mole: Mole, sources: Sources, hooks: Hooks): PrototypeSet =
if (strain) strainedOutputs(mole, sources, hooks) ++ capsuleOutputs(mole, sources, hooks)
else capsuleOutputs(mole, sources, hooks)
def capsuleInputs(mole: Mole, sources: Sources, hooks: Hooks): PrototypeSet =
_task.inputs -- sources(this).flatMap(_.outputs) -- sources(this).flatMap(_.inputs) ++ sources(this).flatMap(_.inputs)
def capsuleOutputs(mole: Mole, sources: Sources, hooks: Hooks): PrototypeSet =
_task.outputs -- hooks(this).flatMap(_.outputs) ++ hooks(this).flatMap(_.outputs)
private def receivedInputs(mole: Mole, sources: Sources, hooks: Hooks) = {
lazy val capsInputs = capsuleInputs(mole, sources, hooks)
MoleCapsule.received(this, mole, sources, hooks).filterNot(d ⇒ capsInputs.contains(d.name))
}
private def strainedOutputs(mole: Mole, sources: Sources, hooks: Hooks) = {
lazy val capsOutputs = capsuleOutputs(mole, sources, hooks)
MoleCapsule.received(this, mole, sources, hooks).filterNot(d ⇒ capsOutputs.contains(d.name))
}
override def toString =
(if (!strain) "capsule" else "strainerCapsule") + s"@$hashCode:${_task}"
}
object StrainerCapsule {
def apply(task: Task) = MoleCapsule(task, strain = true)
}
object MasterCapsule {
def apply(task: Task, persist: Seq[Val[_]], strain: Boolean) = MoleCapsule(task, strain = strain, master = Some(MoleCapsule.Master(persist.map(_.name))))
def apply(t: Task, persist: Val[_]*): MoleCapsule = apply(t, persist, false)
def toPersist(master: MoleCapsule.Master, context: Context): Context = master.persist.map { n ⇒ context.variables.getOrElse(n, throw new UserBadDataError(s"Variable $n has not been found in the context")) }
}
| openmole/openmole | openmole/core/org.openmole.core.workflow/src/main/scala/org/openmole/core/workflow/mole/MoleCapsule.scala | Scala | agpl-3.0 | 6,713 |
package com.edinhodzic.service.repository
import com.edinhodzic.service.domain.Identifiable
import scala.util.Try
trait AbstractPartialCrudRepository[T <: Identifiable] {
def create(resource: T): Try[T]
def read(resourceId: String): Try[Option[T]]
//def update(resource: T): Try[Option[T]]
def delete(resourceId: String): Try[Option[Unit]]
} | edinhodzic/jersey-rest-service | src/main/scala/com/edinhodzic/service/repository/AbstractPartialCrudRepository.scala | Scala | apache-2.0 | 357 |
/*
* Copyright 2017-2018 Azad Bolour
* Licensed under GNU Affero General Public License v3.0 -
* https://github.com/azadbolour/boardgame/blob/master/LICENSE.md
*/
package com.bolour.boardgame.scala.server.service
import com.bolour.util.scala.server.BasicServerUtil.stringId
import com.bolour.boardgame.scala.server.domain.Player
import com.typesafe.config.ConfigFactory
import com.bolour.boardgame.scala.common.domain._
import com.bolour.plane.scala.domain.Point
import org.scalatest.{FlatSpec, Matchers}
import org.slf4j.LoggerFactory
class GameServiceTest extends FlatSpec with Matchers {
val logger = LoggerFactory.getLogger(this.getClass)
val dimension = 5
val trayCapacity = 2
val center = dimension / 2
val tinyLang = "tiny"
val name = "John"
val genType = PieceProviderType.Random
val gameParams = GameParams(dimension, trayCapacity, tinyLang, name, genType)
val service = new GameServiceImpl(ConfigFactory.load())
service.migrate()
service.addPlayer(Player(stringId, name))
def piecePoint(letter: Char, row: Int, col: Int) = PiecePoint(Piece(letter, stringId()), Point(row, col))
val top = piecePoint('S', center - 1, center)
val bottom = piecePoint('T', center + 1, center)
// _ S _
// B E T
// _ T _
val piecePoints = List(
piecePoint('B', center, center - 1),
piecePoint('E', center, center),
piecePoint('T', center, center + 1),
top,
bottom,
)
def startGameAndCommitPlay(initUserPieces: List[Piece], playPieces: List[PlayPiece]) = {
val initPieces = InitPieces(piecePoints, initUserPieces, List())
val pointValues = List.fill(dimension, dimension)(1)
for {
game <- service.startGame(gameParams, initPieces, pointValues)
(score, replacementPieces, deadPoints) <- service.commitPlay(game.gameBase.gameId, playPieces)
} yield (game, score, replacementPieces)
}
"game service" should "accept valid crosswords" in {
// Allow only O to be used.
val uPieces = List(Piece('O', stringId()), Piece('O', stringId()))
val playPieces = List(
PlayPiece(bottom.piece, bottom.point, false),
// Add O to the bottom right getting word TO and crossword TO (which is valid).
PlayPiece(uPieces(0), Point(center + 1, center + 1), true)
)
for {
(game, _, replacementPieces) <- startGameAndCommitPlay(uPieces, playPieces)
_ = replacementPieces.length shouldBe 1
} yield game
}
"game service" should "reject invalid crosswords" in {
// Allow only O to be used.
val uPieces = List(Piece('O', stringId()), Piece('O', stringId()))
val playPieces = List(
PlayPiece(top.piece, top.point, false),
// Add O to the top right getting word SO and crossword OT (which is invalid).
PlayPiece(uPieces(0), Point(center - 1, center + 1), true)
)
val tried = startGameAndCommitPlay(uPieces, playPieces)
tried.isFailure shouldBe true
}
}
| azadbolour/boardgame | scala-server/test/com/bolour/boardgame/scala/server/service/GameServiceTest.scala | Scala | agpl-3.0 | 2,921 |
package io.youi.spatial
sealed trait Point extends SpatialValue[Point] {
def x: Double
def y: Double
def set(x: Double, y: Double): Point
def set(that: Point): Point = set(that.x, that.y)
def duplicate(): Point
override def equals(obj: scala.Any): Boolean = obj match {
case that: Point => x <=> that.x && y <=> that.y
case _ => false
}
override def toString: String = s"Point(x: $x, y: $y)"
def ==(other: Point): Boolean = {
x <=> other.x && y <=> other.y
}
/*
scalar operations
*/
def +(scalar: Double): Point = {
set(
x + scalar,
y + scalar
)
}
def -(scalar: Double): Point = {
set(
x - scalar,
y - scalar
)
}
def *(scalar: Double): Point = {
set(
x * scalar,
y * scalar
)
}
def /(scalar: Double): Point = {
set(
x / scalar,
y / scalar
)
}
/*
Point operations
Note: There is no such thing as "dividing" two vectors.
*/
def +(other: Point): Point = {
set(
x + other.x,
y + other.y
)
}
def -(other: Point): Point = {
set(
x - other.x,
y - other.y
)
}
def *(other: Point): Point = {
set(
x * other.x,
y * other.y
)
}
def rotate(value: Double): Point = {
val radians = value * (math.Pi * 2.0)
val sin = math.sin(radians)
val cos = math.cos(radians)
set(
x * cos - y * sin,
x * sin + y * cos
)
}
}
class MutablePoint(var x: Double = 0.0, var y: Double = 0.0) extends Point {
override def set(x: Double = x, y: Double = y): Point = {
this.x = x
this.y = y
this
}
override def duplicate(): Point = new MutablePoint(x, y)
override def isMutable: Boolean = true
override def mutable: MutablePoint = this
override def immutable: ImmutablePoint = ImmutablePoint(x, y)
}
case class ImmutablePoint(x: Double = 0.0, y: Double = 0.0) extends Point {
override def set(x: Double, y: Double): Point = ImmutablePoint(x, y)
override def duplicate(): Point = ImmutablePoint(x, y)
override def isMutable: Boolean = false
override def mutable: MutablePoint = new MutablePoint(x, y)
override def immutable: ImmutablePoint = this
}
object Point {
lazy val zero: Point = apply()
def apply(x: Double = 0.0, y: Double = 0.0): Point = ImmutablePoint(x, y)
def mutable(x: Double = 0.0, y: Double = 0.0): MutablePoint = new MutablePoint(x, y)
} | outr/youi | spatial/shared/src/main/scala/io/youi/spatial/Point.scala | Scala | mit | 2,429 |
package blended.itestsupport.jolokia
import akka.actor.{ActorSystem, Props}
import blended.itestsupport.condition.AsyncCondition
import blended.jolokia.{JolokiaClient, JolokiaObject, JolokiaVersion}
import blended.util.logging.Logger
import scala.concurrent.duration.FiniteDuration
import scala.util.{Success, Try}
object JolokiaAvailableCondition {
def apply(
client : JolokiaClient,
t: Option[FiniteDuration] = None,
)(implicit actorSys: ActorSystem) =
AsyncCondition(Props(JolokiaAvailableChecker(client)), s"JolokiaAvailableCondition(${client.url})", t)
}
private[jolokia] object JolokiaAvailableChecker {
def apply(
client : JolokiaClient
): JolokiaAvailableChecker = new JolokiaAvailableChecker(client)
}
private[jolokia] class JolokiaAvailableChecker(
client: JolokiaClient
) extends JolokiaChecker(client) {
private val log : Logger = Logger[JolokiaAvailableChecker]
override def toString: String = s"JolokiaAvailableCondition(${client.url}])"
override def exec(client: JolokiaClient): Try[JolokiaObject] = client.version
override def assertJolokia(obj: Try[JolokiaObject]): Boolean = obj match {
case Success(v : JolokiaVersion) =>
log.info(s"Jolokia [$v] discovered.")
true
case _ => false
}
}
| woq-blended/blended | blended.itestsupport/src/main/scala/blended/itestsupport/jolokia/JolokiaAvailableChecker.scala | Scala | apache-2.0 | 1,269 |
/**
* Copyright 2013-2015, AlwaysResolve Project (alwaysresolve.org), MOYD.CO LTD
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package records
import io.netty.buffer.ByteBuf
import payload.Name
import org.slf4j.LoggerFactory
import payload.RRData
case class RRSIG(
timetolive: Long,
typeCovered: Short,
algorithm: Byte,
labels: Byte,
originalTTL: Long,
signatureExpiration: Long,
signatureInception: Long,
keyTag: Short,
signerName: List[Array[Byte]],
signature: Array[Byte]
) extends AbstractRecord {
val description = "RRSIG"
def isEqualTo(any: Any) = any match {
case r: RRSIG => r.timetolive == timetolive && typeCovered == r.typeCovered &&
algorithm == r.algorithm && labels == r.labels && originalTTL == r.originalTTL && signatureExpiration == r.signatureExpiration && signatureInception == r.signatureInception &&
keyTag == r.keyTag && signerName == r.signerName && signature == r.signature
case _ => false
}
def toByteArray = RRData.shortToBytes(typeCovered) ++ Array[Byte](algorithm) ++ Array[Byte](labels) ++ RRData.intToBytes(originalTTL.toInt) ++
RRData.intToBytes(signatureExpiration.toInt) ++ RRData.intToBytes(signatureInception.toInt) ++ RRData.shortToBytes(keyTag) ++ Name.toByteArray(signerName) ++ signature
def toCompressedByteArray(input: (Array[Byte], Map[String, Int])) = {
(input._1 ++ toByteArray, input._2)
}
}
object RRSIG {
val logger = LoggerFactory.getLogger("app")
def apply(buf: ByteBuf, recordclass: Int, size: Int, offset: Int = 0) = {
logger.error("Should not be called")
val typeCovered = buf.readUnsignedShort.toShort
val algorithm = buf.readByte()
val labels = buf.readByte()
val originalTTL = buf.readUnsignedInt()
val signatureExpiration = buf.readUnsignedInt()
val signatureInception = buf.readUnsignedInt()
val keyTag = buf.readUnsignedShort.toShort
val signerName = List(Array[Byte]())
val signature = Array[Byte]()
new RRSIG(60, typeCovered, algorithm, labels, originalTTL, signatureExpiration, signatureInception, keyTag, signerName, signature)
}
} | Moydco/AlwaysResolveDNS | src/main/scala/records/RRSIG.scala | Scala | apache-2.0 | 2,810 |
/**
* Copyright 2015 Eric Loots
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.sequoia.sastest
import com.sequoia.sastest.SASInterfaceHelpers._
object GetCars {
def main(args: Array[String]): Unit = {
import tablerowspecs.Car
val host: String = SASConfig.sasHostname
val port: Int = SASConfig.sasPort
val sasHost = SASHost(host, port)
val sasCredentials = SASCredentials(SASConfig.sasUsername, SASConfig.sasPassword)
implicit val SASContext(sasLanguage, ctx, adm) = sasContext(sasHost.host, sasHost.port, sasCredentials.userName, sasCredentials.password)
val TestOutput(_, columnMapping, rawOutputData) =
retrieveSasTable("SASHELP.CARS", Car)
val cars = rawOutputData.tail
.map {
case values =>
//println(values.toList)
val car = Car(
{ val cyls = values(columnMapping(0)); if (cyls == "") None else Some(cyls.toInt)},
values(columnMapping(1)),
values(columnMapping(2)).toDouble,
values(columnMapping(3)).toInt,
values(columnMapping(4)).replaceAll("\\$","").replaceAll(",","").toInt,
values(columnMapping(5)).toInt,
values(columnMapping(6)).toInt,
values(columnMapping(7)).toInt,
values(columnMapping(8)).replaceAll("\\$","").replaceAll(",","").toInt,
values(columnMapping(9)),
values(columnMapping(10)),
values(columnMapping(11)),
values(columnMapping(12)),
values(columnMapping(13)).toInt,
values(columnMapping(14)).toInt
)
println(car)
car
}
println(cars.mkString("\n"))
}
}
| eloots/ScalaTestSas | src/main/scala/com/sequoia/sastest/GetCars.scala | Scala | apache-2.0 | 2,160 |
/*
,i::,
:;;;;;;;
;:,,::;.
1ft1;::;1tL
t1;::;1,
:;::; _____ __ ___ __
fCLff ;:: tfLLC / ___/ / |/ /____ _ _____ / /_
CLft11 :,, i1tffLi \\__ \\ ____ / /|_/ // __ `// ___// __ \\
1t1i .;; .1tf ___/ //___// / / // /_/ // /__ / / / /
CLt1i :,: .1tfL. /____/ /_/ /_/ \\__,_/ \\___//_/ /_/
Lft1,:;: , 1tfL:
;it1i ,,,:::;;;::1tti s_mach.concurrent
.t1i .,::;;; ;1tt Copyright (c) 2017 S-Mach, Inc.
Lft11ii;::;ii1tfL: Author: lance.gatlin@gmail.com
.L1 1tt1ttt,,Li
...1LLLL...
*/
package s_mach.concurrent.util
/**
* A case class to report progress in a computation
* @param completed the count of iterations completed so far
* @param optTotal Some(total iterations in computation) or None if the
* computation has an unknown size
* @param startTime_ns the time in nanoseconds returned by System.nanoTime when
* the computation was started
*/
case class Progress(
completed: Int,
optTotal: Option[Int],
startTime_ns: Long
)
| S-Mach/s_mach.concurrent | src/main/scala/s_mach/concurrent/util/Progress.scala | Scala | mit | 1,244 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import java.util.Properties
import scala.collection.JavaConversions._
import joptsimple.OptionParser
import metrics.KafkaMetricsReporter
import server.{KafkaConfig, KafkaServerStartable, KafkaServer}
import kafka.utils.{VerifiableProperties, CommandLineUtils, Logging}
import org.apache.kafka.common.utils.Utils
object Kafka extends Logging {
def getPropsFromArgs(args: Array[String]): Properties = {
val optionParser = new OptionParser
val overrideOpt = optionParser.accepts("override", "Optional property that should override values set in server.properties file")
.withRequiredArg()
.ofType(classOf[String])
if (args.length == 0) {
CommandLineUtils.printUsageAndDie(optionParser, "USAGE: java [options] %s server.properties [--override property=value]*".format(classOf[KafkaServer].getSimpleName()))
}
val props = Utils.loadProps(args(0))
if(args.length > 1) {
val options = optionParser.parse(args.slice(1, args.length): _*)
if(options.nonOptionArguments().size() > 0) {
CommandLineUtils.printUsageAndDie(optionParser, "Found non argument parameters: " + options.nonOptionArguments().toArray.mkString(","))
}
props.putAll(CommandLineUtils.parseKeyValueArgs(options.valuesOf(overrideOpt)))
}
props
}
def main(args: Array[String]): Unit = {
try {
val serverProps = getPropsFromArgs(args)
val serverConfig = KafkaConfig.fromProps(serverProps)
KafkaMetricsReporter.startReporters(new VerifiableProperties(serverProps))
val kafkaServerStartable = new KafkaServerStartable(serverConfig)
// attach shutdown handler to catch control-c
Runtime.getRuntime().addShutdownHook(new Thread() {
override def run() = {
kafkaServerStartable.shutdown
}
})
kafkaServerStartable.startup
kafkaServerStartable.awaitShutdown
}
catch {
case e: Throwable =>
fatal(e)
System.exit(1)
}
System.exit(0)
}
}
| usakey/kafka | core/src/main/scala/kafka/Kafka.scala | Scala | apache-2.0 | 2,823 |
package slick.compiler
import slick.ast.Library.AggregateFunctionSymbol
import slick.ast.TypeUtil._
import slick.ast.Util._
import slick.ast._
import slick.util.ConstArray
/** Rewrite aggregation function calls to Aggregate nodes. */
class CreateAggregates extends Phase {
val name = "createAggregates"
def apply(state: CompilerState) = {
if(state.get(Phase.assignUniqueSymbols).map(_.aggregate).getOrElse(true))
state.map(_.replace({
case n @ Apply(f: AggregateFunctionSymbol, ConstArray(from)) =>
logger.debug("Converting aggregation function application", n)
val CollectionType(_, elType @ Type.Structural(StructType(els))) = from.nodeType
val s = new AnonSymbol
val a = Aggregate(s, from, Apply(f, ConstArray(f match {
case Library.CountAll => LiteralNode(1)
case _ => Select(Ref(s) :@ elType, els.head._1) :@ els.head._2
}))(n.nodeType)).infer()
logger.debug("Converted aggregation function application", a)
inlineMap(a)
case n @ Bind(s1, from1, Pure(sel1, ts1)) if !from1.isInstanceOf[GroupBy] =>
val (sel2, temp) = liftAggregates(sel1, s1)
if(temp.isEmpty) n else {
logger.debug("Lifting aggregates into join in:", n)
logger.debug("New mapping with temporary refs:", sel2)
val sources = (from1 match {
case Pure(StructNode(ConstArray()), _) => Vector.empty[(TermSymbol, Node)]
case _ => Vector(s1 -> from1)
}) ++ temp.map { case (s, n) => (s, Pure(n)) }
val from2 = sources.init.foldRight(sources.last._2) {
case ((_, n), z) => Join(new AnonSymbol, new AnonSymbol, n, z, JoinType.Inner, LiteralNode(true))
}.infer()
logger.debug("New 'from' with joined aggregates:", from2)
val repl: Map[TermSymbol, List[TermSymbol]] = sources match {
case Vector((s, n)) => Map(s -> List(s1))
case _ =>
val len = sources.length
val it = Iterator.iterate(s1)(_ => ElementSymbol(2))
sources.zipWithIndex.map { case ((s, _), i) =>
val l = List.iterate(s1, i+1)(_ => ElementSymbol(2))
s -> (if(i == len-1) l else l :+ ElementSymbol(1))
}.toMap
}
logger.debug("Replacement paths: " + repl)
val scope = Type.Scope(s1 -> from2.nodeType.asCollectionType.elementType)
val replNodes = repl.transform((_, ss) => FwdPath(ss).infer(scope))
logger.debug("Replacement path nodes: ", StructNode(ConstArray.from(replNodes)))
val sel3 = sel2.replace({ case n @ Ref(s) => replNodes.getOrElse(s, n) }, keepType = true)
val n2 = Bind(s1, from2, Pure(sel3, ts1)).infer()
logger.debug("Lifted aggregates into join in:", n2)
n2
}
}, keepType = true, bottomUp = true))
else state
}
/** Recursively inline mapping Bind calls under an Aggregate */
def inlineMap(a: Aggregate): Aggregate = a.from match {
case Bind(s1, f1, Pure(StructNode(defs1), ts1)) if !f1.isInstanceOf[GroupBy] => // mergeToComprehensions always needs a Bind around a GroupBy
logger.debug("Inlining mapping Bind under Aggregate", a)
val defs1M = defs1.iterator.toMap
val sel = a.select.replace({
case FwdPath(s :: f :: rest) if s == a.sym =>
rest.foldLeft(defs1M(f)) { case (n, s) => n.select(s) }.infer()
}, keepType = true)
val a2 = Aggregate(s1, f1, sel) :@ a.nodeType
logger.debug("Inlining mapping Bind under Aggregate", a2)
inlineMap(a2)
case _ => a
}
/** Find all scalar Aggregate calls in a sub-tree that do not refer to the given Symbol,
* and replace them by temporary Refs. */
def liftAggregates(n: Node, outer: TermSymbol): (Node, Map[TermSymbol, Aggregate]) = n match {
case a @ Aggregate(s1, f1, sel1) =>
if(a.findNode {
case n: PathElement => n.sym == outer
case _ => false
}.isDefined) (a, Map.empty)
else {
val s, f = new AnonSymbol
val a2 = Aggregate(s1, f1, StructNode(ConstArray(f -> sel1))).infer()
(Select(Ref(s) :@ a2.nodeType, f).infer(), Map(s -> a2))
}
case n :@ CollectionType(_, _) =>
(n, Map.empty)
case n =>
val mapped = n.children.map(liftAggregates(_, outer))
val m = mapped.iterator.flatMap(_._2).toMap
val n2 =
if(m.isEmpty) n else n.withChildren(mapped.map(_._1)) :@ n.nodeType
(n2, m)
}
}
| slick/slick | slick/src/main/scala/slick/compiler/CreateAggregates.scala | Scala | bsd-2-clause | 4,610 |
/*
* Copyright (C) 2010 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.plugin.domain.modifier
import org.openmole.core.dsl._
import org.openmole.core.dsl.extension._
object MapDomain {
implicit def isDiscrete[D, I, O]: DiscreteFromContextDomain[MapDomain[D, I, O], O] = domain ⇒
Domain(
domain.iterator,
domain.inputs,
domain.validate
)
}
case class MapDomain[D, -I, +O](domain: D, f: FromContext[I ⇒ O])(implicit discrete: DiscreteFromContextDomain[D, I]) { d ⇒
def iterator = FromContext { p ⇒
import p._
val fVal = f.from(context)
discrete(domain).domain.from(context).map { fVal }
}
def inputs = discrete(domain).inputs ++ f.inputs
def validate = discrete(domain).validate ++ f.validate
}
| openmole/openmole | openmole/plugins/org.openmole.plugin.domain.modifier/src/main/scala/org/openmole/plugin/domain/modifier/MapDomain.scala | Scala | agpl-3.0 | 1,423 |
package org.scalaide.core.internal.project
import java.net.URLClassLoader
import java.util.Properties
import java.util.zip.ZipEntry
import java.util.zip.ZipFile
import scala.Left
import scala.Right
import scala.collection.mutable.Set
import scala.tools.nsc.settings.ScalaVersion
import scala.util.Failure
import scala.util.Success
import scala.util.Try
import org.eclipse.core.runtime.FileLocator
import org.eclipse.core.runtime.IPath
import org.eclipse.core.runtime.Path
import org.eclipse.core.runtime.Platform
import org.eclipse.jdt.core.IClasspathEntry
import org.eclipse.jdt.core.JavaCore
import org.osgi.framework.Bundle
import org.osgi.framework.Version
import org.scalaide.core.IScalaInstallation
import org.scalaide.core.IScalaInstallationChoice
import org.scalaide.core.IScalaModule
import org.scalaide.core.internal.ScalaPlugin
import org.scalaide.util.eclipse.EclipseUtils
import org.scalaide.util.eclipse.OSGiUtils
import org.scalaide.util.internal.CompilerUtils.isBinarySame
import org.scalaide.util.internal.CompilerUtils.shortString
import sbt.internal.inc.ScalaInstance
sealed trait ScalaInstallationLabel extends Serializable
case class BundledScalaInstallationLabel() extends ScalaInstallationLabel
case class MultiBundleScalaInstallationLabel() extends ScalaInstallationLabel
case class CustomScalaInstallationLabel(label: String) extends ScalaInstallationLabel
/**
* A type that marks the choice of a Labeled Scala Installation : either a Scala Version,
* which will dereference to the latest available bundle with the same binary version, or
* a scala installation hashcode, which will dereference to the Labeled installation which
* hashes to it, if available.
*
* @see ScalaInstallation.resolve
*/
case class ScalaInstallationChoice(marker: Either[ScalaVersion, Int]) extends Serializable with IScalaInstallationChoice {
override def toString() = marker match {
case Left(version) => shortString(version)
case Right(hash) => hash.toString
}
override def equals(o: Any) = PartialFunction.cond(o) {
case that: ScalaInstallationChoice => (marker, that.marker) match {
case (Right(h1), Right(h2)) => h1 == h2
case (Left(v1), Left(v2)) => isBinarySame(v1, v2)
case _ => false
}
}
}
object ScalaInstallationChoice {
def apply(si: LabeledScalaInstallation): ScalaInstallationChoice = ScalaInstallationChoice(Right(si.hashString.hashCode()))
def apply(sv: ScalaVersion): ScalaInstallationChoice = ScalaInstallationChoice(Left(sv))
}
/**
* This class represents a valid Scala installation. It encapsulates
* a Scala version and paths to the standard Scala jar files:
*
* - scala-library.jar
* - scala-compiler.jar
* - scala-reflect.jar
* - others (actors, swing, etc.)
*/
trait ScalaInstallation extends IScalaInstallation {
/** The version of Scala */
def version: ScalaVersion
def compiler: ScalaModule
def library: ScalaModule
def extraJars: Seq[ScalaModule]
/**
* All jars provided by Scala (including the compiler)
* @see The note in [[MultiBundleScalaInstallation]] below
*/
def allJars: Seq[ScalaModule] =
library +: compiler +: extraJars
override def toString() =
s"Scala $version: \\n\\t${allJars.mkString("\\n\\t")})"
def isValid(): Boolean = {
allJars forall (_.isValid())
}
}
/**
* A tag for serializable tagging of Scala Installations
*/
trait LabeledScalaInstallation extends ScalaInstallation {
def label: ScalaInstallationLabel
// to recover bundle-less Bundle values from de-serialized Scala Installations
// this should be relaxed for bundles : our bundles are safe, having one with just the same version should be enough
def similar(that: LabeledScalaInstallation): Boolean =
this.label == that.label && this.compiler == that.compiler && this.library == that.library && this.extraJars.toSet == that.extraJars.toSet
def getName(): Option[String] = PartialFunction.condOpt(label) { case CustomScalaInstallationLabel(tag) => tag }
def hashString: String = {
val jarSeq = allJars map (_.hashString)
getName().fold(jarSeq)(str => str +: jarSeq).mkString
}
override def hashCode() = hashString.hashCode()
override def equals(o: Any) = PartialFunction.cond(o) { case lsi: LabeledScalaInstallation => lsi.hashCode() == this.hashCode() }
}
case class ScalaModule(classJar: IPath, sourceJar: Option[IPath]) extends IScalaModule {
def isValid(): Boolean = {
sourceJar.fold(List(classJar))(List(_, classJar)) forall { path => path.toFile().isFile() }
}
def libraryEntries(): IClasspathEntry = {
JavaCore.newLibraryEntry(classJar, sourceJar.orNull, null)
}
private def relativizedString(path: IPath) = {
path.makeRelativeTo(ScalaPlugin().getStateLocation()).toPortableString()
}
def hashString: String = sourceJar.map { relativizedString }.fold(relativizedString(classJar))(s => relativizedString(classJar) + s)
}
object ScalaModule {
def apply(bundleId: String, classJar: IPath): ScalaModule = {
ScalaModule(classJar, EclipseUtils.computeSourcePath(bundleId, classJar))
}
}
/**
* Represent a version of Scala installed as a bundle containing the necessary jars.
*/
case class BundledScalaInstallation(
override val version: ScalaVersion,
bundle: Bundle,
override val library: ScalaModule,
override val compiler: ScalaModule) extends LabeledScalaInstallation {
import BundledScalaInstallation._
override val label = BundledScalaInstallationLabel()
def osgiVersion = bundle.getVersion()
override lazy val extraJars =
Seq(
findExtraJar(bundle, ScalaReflectPath, ScalaReflectSourcesPath),
findExtraJar(bundle, ScalaSwingPath, ScalaSwingSourcesPath)).flatten
private def findExtraJar(bundle: Bundle, classPath: String, sourcePath: String): Option[ScalaModule] = {
OSGiUtils.pathInBundle(bundle, classPath).map { p =>
ScalaModule(p, OSGiUtils.pathInBundle(bundle, sourcePath))
}
}
}
object BundledScalaInstallation {
val ScalaLibraryPath = "target/jars/scala-library.jar"
val ScalaLibrarySourcesPath = "target/jars/scala-library-src.jar"
val ScalaCompilerPath = "target/jars/scala-compiler.jar"
val ScalaCompilerSourcesPath = "target/jars/scala-compiler-src.jar"
val ScalaReflectPath = "target/jars/scala-reflect.jar"
val ScalaReflectSourcesPath = "target/jars/scala-reflect-src.jar"
val ScalaSwingPath = "target/jars/scala-swing.jar"
val ScalaSwingSourcesPath = "target/jars/scala-swing-src.jar"
def apply(bundle: Bundle): Option[BundledScalaInstallation] = {
for {
scalaLibrary <- OSGiUtils.pathInBundle(bundle, ScalaLibraryPath)
version <- ScalaInstallation.extractVersion(scalaLibrary)
scalaCompiler <- OSGiUtils.pathInBundle(bundle, ScalaCompilerPath)
} yield BundledScalaInstallation(
version,
bundle,
ScalaModule(scalaLibrary, OSGiUtils.pathInBundle(bundle, ScalaLibrarySourcesPath)),
ScalaModule(scalaCompiler, OSGiUtils.pathInBundle(bundle, ScalaCompilerSourcesPath)))
}
val ScalaBundleJarsRegex = "org\\\\.scala-ide\\\\.scala[0-9]{3}\\\\.jars".r
/**
* Find and return the complete bundled Scala installations.
*/
def detectBundledInstallations(): List[BundledScalaInstallation] = {
// find the bundles with the right pattern
val matchingBundles: List[Bundle] =
ScalaPlugin().getBundle().getBundleContext().getBundles().to[List]
.filter { b => ScalaBundleJarsRegex.unapplySeq(b.getSymbolicName()).isDefined }
matchingBundles.flatMap(BundledScalaInstallation(_))
}
}
/**
* Represent a version of Scala installed as a set of bundles, each bundle with an identical version.
*
* TODO: We SHOULD reuse the current class loader if this installation is the platform installation.
*
* @note We don't reuse it because of weird interactions between the OSGi classloader and the compiler-bridge.jar,
* resulting in AbstractMethodErrors. The `Reporter` interface is defined in scala-reflect, but implemented in
* compiler-bridge.jar (which is NOT a bundle), and `info0` is not seen.
*
* See ticket #1002175
*/
case class MultiBundleScalaInstallation(
override val version: ScalaVersion,
libraryBundleVersion: Version,
override val library: ScalaModule,
override val compiler: ScalaModule) extends LabeledScalaInstallation {
import MultiBundleScalaInstallation._
override val label = MultiBundleScalaInstallationLabel()
def osgiVersion = libraryBundleVersion
override lazy val extraJars = Seq(
findLibraryForBundle(ScalaReflectBundleId, libraryBundleVersion),
findLibraryForBundle(ScalaSwingBundleId, libraryBundleVersion)).flatten
}
object MultiBundleScalaInstallation {
val ScalaLibraryBundleId = "org.scala-lang.scala-library"
val ScalaCompilerBundleId = "org.scala-lang.scala-compiler"
val ScalaSwingBundleId = "org.scala-lang.scala-swing"
val ScalaReflectBundleId = "org.scala-lang.scala-reflect"
val ScalaXmlBundleId = "org.scala-lang.modules.scala-xml"
val ScalaParserCombinatorsBundleId = "org.scala-lang.modules.scala-parser-combinators"
private def bundlePath(bundle: Bundle) =
Path.fromOSString(FileLocator.getBundleFile(bundle).getAbsolutePath())
private def findBundle(bundleId: String, version: Version): Option[Bundle] = {
def doesBundleVersionQualifierEncloseVersionQualifier(bundleQualifier: String, qualifier: String) =
qualifier.intersect(bundleQualifier) == qualifier
Option(Platform.getBundles(bundleId, null)).getOrElse(Array()).to[List].find { bundle =>
val bundleVersion = bundle.getVersion
bundleVersion.getMajor == version.getMajor &&
bundleVersion.getMinor == version.getMinor &&
bundleVersion.getMicro == version.getMicro &&
doesBundleVersionQualifierEncloseVersionQualifier(bundleVersion.getQualifier, version.getQualifier)
}
}
private def findLibraryForBundle(bundleId: String, version: Version): Option[ScalaModule] = {
val classPath = findBundle(bundleId, version).map(bundlePath)
classPath.map(cp => ScalaModule(cp, EclipseUtils.computeSourcePath(bundleId, cp)))
}
def apply(libraryBundle: Bundle): Option[MultiBundleScalaInstallation] = {
val libraryBundleVersion = libraryBundle.getVersion()
for {
version <- ScalaInstallation.extractVersion(bundlePath(libraryBundle))
library = bundlePath(libraryBundle)
compiler <- findLibraryForBundle(ScalaCompilerBundleId, libraryBundleVersion)
} yield MultiBundleScalaInstallation(
version,
libraryBundleVersion,
ScalaModule(bundlePath(libraryBundle), EclipseUtils.computeSourcePath(ScalaLibraryBundleId, library)),
compiler)
}
def detectInstallations(): List[MultiBundleScalaInstallation] = {
val scalaLibraryBundles = Platform.getBundles(ScalaLibraryBundleId, null).to[List]
scalaLibraryBundles.flatMap(MultiBundleScalaInstallation(_))
}
}
object ScalaInstallation {
val installationsTracker = new ScalaInstallationSaver()
private def savedScalaInstallations() = Try(installationsTracker.getSavedInstallations())
lazy val initialScalaInstallations = savedScalaInstallations() match {
case Success(sis) => sis filter (_.isValid()) filter { deserial => !(bundledInstallations ++ multiBundleInstallations exists (_.similar(deserial))) }
// we need to silently fail, as this happens early in initialization
case Failure(throwable) => Nil
}
// This lets you see installs retrieved from serialized bundles as newly-defined custom installations
private def customize(install: LabeledScalaInstallation) = install.label match {
case CustomScalaInstallationLabel(tag) => install
case BundledScalaInstallationLabel() | MultiBundleScalaInstallationLabel() => new LabeledScalaInstallation() {
override def label = new CustomScalaInstallationLabel(s"Scala (legacy with hash ${ScalaInstallationChoice(install).toString()})")
override def compiler = install.compiler
override def library = install.library
override def extraJars = install.extraJars
override def version = install.version
}
}
def scalaInstanceForInstallation(si: IScalaInstallation): ScalaInstance = {
val store = ScalaPlugin().classLoaderStore
val scalaLoader: ClassLoader = store.getOrUpdate(si)(new URLClassLoader(si.allJars.map(_.classJar.toFile.toURI.toURL).toArray, ClassLoader.getSystemClassLoader))
new ScalaInstance(si.version.unparse, scalaLoader, si.library.classJar.toFile, si.compiler.classJar.toFile, si.extraJars.map(_.classJar.toFile).toArray, None)
}
lazy val customInstallations: Set[LabeledScalaInstallation] = initialScalaInstallations.map(customize(_))(collection.breakOut)
/** Return the Scala installation currently running in Eclipse. */
lazy val platformInstallation: LabeledScalaInstallation =
multiBundleInstallations.find(_.version == ScalaVersion.current).get
lazy val bundledInstallations: List[LabeledScalaInstallation] =
BundledScalaInstallation.detectBundledInstallations()
lazy val multiBundleInstallations: List[LabeledScalaInstallation] =
MultiBundleScalaInstallation.detectInstallations()
def availableBundledInstallations: List[LabeledScalaInstallation] = {
multiBundleInstallations ++ bundledInstallations
}
def availableInstallations: List[LabeledScalaInstallation] = {
multiBundleInstallations ++ bundledInstallations ++ customInstallations
}
val LibraryPropertiesPath = "library.properties"
def labelInFile(scalaPath: IPath): Option[String] = {
val scalaJarRegex = """scala-(\\w+)(?:.2\\.\\d+(?:\\.\\d*)?(?:-.*)?)?.jar""".r
scalaPath.toFile().getName() match {
case scalaJarRegex(qualifier) => Some(qualifier + ".properties")
case _ => None
}
}
def extractVersion(scalaLibrary: IPath): Option[ScalaVersion] = {
val propertiesPath = labelInFile(scalaLibrary).getOrElse(LibraryPropertiesPath)
val zipFile = new ZipFile(scalaLibrary.toFile())
try {
def getVersion(propertiesEntry: ZipEntry) = {
val properties = new Properties()
properties.load(zipFile.getInputStream(propertiesEntry))
Option(properties.getProperty("version.number"))
}
for {
propertiesEntry <- Option(zipFile.getEntry(propertiesPath))
version <- getVersion(propertiesEntry)
} yield ScalaVersion(version)
} finally {
zipFile.close()
}
}
def resolve(choice: IScalaInstallationChoice): Option[LabeledScalaInstallation] = choice.marker match {
case Left(version) => availableBundledInstallations.filter { si => isBinarySame(version, si.version) }.sortBy(_.version).lastOption
case Right(hash) => availableInstallations.find(si => ScalaInstallationChoice(si).toString equals hash.toString())
}
}
| sschaef/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/core/internal/project/ScalaInstallation.scala | Scala | bsd-3-clause | 14,821 |
package ru.biocad.ig.regions.annotators
import scala.collection.immutable
import scala.collection.mutable.ArrayBuffer
import ru.biocad.ig.igcont.Container
import ru.biocad.ig.common.io.MarkingUtils
import ru.biocad.ig.common.io.fasta.FastaReader
import java.io.File
import com.typesafe.scalalogging.LazyLogging
import ru.biocad.ig.common.io.common.Sequence
/**
* Created with IntelliJ IDEA.
* User: pavel
* Date: 13.01.14
* Time: 15:24
*/
object RegionAnnotatorUtils extends LazyLogging {
val regs = Array("FR1", "CDR1", "FR2", "CDR2", "FR3", "CDR3", "FR4")
val reg = "Region"
def createRegionsContainer(cont : Container, fasta : File, kabat : File) : Unit = {
cont.addAnnotations(reg, regs)
// Load fasta
logger.info(s"Loading reference sequences from fasta: ${fasta.getName}")
new FastaReader(fasta).foreach(rec => cont.push(rec.sequence, rec.name))
// Load kabat
logger.info(s"Loading region bounds annotations: ${kabat.getName}")
MarkingUtils.readMarking(kabat).foreach(tpl => {
val (name, arr) = tpl
try {
val record = cont.record(name)
processMarkup(arr, (pos, reg) => record.setAnnotation(pos, 0, reg))
}
catch {
case e : NoSuchElementException =>
logger.warn(s"Record $name was not found in container")
}
})
}
def processMarkup(arr : Array[(Int, Int)], callback : (Int, Int) => Unit) : Unit = {
arr.zipWithIndex.foreach(tpl => {
val ((start, end), reg) = tpl
if (start != -1 && end != -1) {
(start to end).foreach(pos => {
callback(pos, reg)
})
}
})
}
def a2r(a : immutable.HashMap[String, String]) : Int =
if (a.contains(reg))
regs.indexOf(a(reg))
else
7
def annotations2regions(anno : Iterable[(Char, immutable.HashMap[String, String])]) : Iterable[Int] = {
val result = ArrayBuffer.fill[Int](anno.size)(0)
anno.zipWithIndex.foreach(tpl => {
val (node, i) = tpl
result(i) = a2r(node._2)
})
result
}
def constructMarkup(anno : String) : String = {
val r = constructMarkupArray(anno)
val sb = new StringBuilder()
(0 until r.size).foreach(i => {
sb.append("\\t%d\\t%d".format(r(i)(0), r(i)(1)))
})
sb.toString()
}
def constructMarkupArray(anno : String) : Array[Array[Int]] = {
val r = Array.fill[Array[Int]](7)(Array.fill[Int](2)(0))
anno.zipWithIndex.foreach(tpl => {
val (c, i) = tpl
val ci = c.asDigit
if (ci != 7) {
if (r(ci)(0) == 0) {
r(ci)(0) = i + 1
}
else {
r(ci)(1) = i + 1
}
}
})
r
}
def restoreAnnotation(markup : String) : String = {
val line = markup.split('\\t').filter(_ != "")
val arr = MarkingUtils.markup2array(line)
val anno = ArrayBuffer.empty[Int]
processMarkup(arr, (_, reg) => anno += reg)
anno.mkString
}
def simpleFilter(anno : String) : String =
restoreAnnotation(constructMarkup(anno))
def savingSimpleFilter(anno : String) : String = {
val r = constructMarkupArray(anno)
(0 until r.size).foreach(i =>
if (i != r.size - 1 && r(i)(1) > r(i + 1)(0)) {
r(i)(1) = r(i + 1)(0) - 1
}
)
val s = anno.map(_ => 7).toArray
r.zipWithIndex.foreach(tpl => {
val (pos, region) = tpl
if (!(pos(0) == 0 && pos(1) == pos(0))) (pos(0) - 1 until pos(1)).foreach(i => s(i) = region)
})
s.mkString
}
def makeRefsString(rec : Sequence, references : Iterable[(String, Double)]) : String =
"> %s\\n%s\\n\\n".format(rec.name, references.map(tpl =>
"%s\\t%f\\n".format(tpl._1, tpl._2)).mkString
)
def makeRegions(rec : Sequence, markup : String, regions : Iterable[Int]) : Option[String] = {
def check(arr : Array[(Int, Int)]) : Boolean = {
implicit def tplTwo2traversable[T](a : (T, T)) : Traversable[T] = {
Seq(a._1, a._2)
}
arr.flatten.sliding(2).forall(a => a.head < a.last)
}
val line = markup.split('\\t').filter(_ != "")
val arr = MarkingUtils.markup2array(line)
if (check(arr)) {
val sb = StringBuilder.newBuilder
regions.foreach(r => sb ++= rec.sequence.substring(arr(r)._1, arr(r)._2 + 1) + '$')
Some(sb.mkString.dropRight(1))
}
else {
None
}
}
}
| zmactep/igcat | ig-regions/src/main/scala/ru/biocad/ig/regions/annotators/RegionAnnotatorUtils.scala | Scala | bsd-2-clause | 4,321 |
package spark.scheduler.cluster
import java.io.{File, FileInputStream, FileOutputStream}
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashMap
import scala.collection.mutable.HashSet
import spark._
import spark.TaskState.TaskState
import spark.scheduler._
import java.nio.ByteBuffer
import java.util.concurrent.atomic.AtomicLong
/**
* The main TaskScheduler implementation, for running tasks on a cluster. Clients should first call
* start(), then submit task sets through the runTasks method.
*/
class ClusterScheduler(sc: SparkContext)
extends TaskScheduler
with Logging {
// How often to check for speculative tasks
val SPECULATION_INTERVAL = System.getProperty("spark.speculation.interval", "100").toLong
val activeTaskSets = new HashMap[String, TaskSetManager]
var activeTaskSetsQueue = new ArrayBuffer[TaskSetManager]
val taskIdToTaskSetId = new HashMap[Long, String]
val taskIdToSlaveId = new HashMap[Long, String]
val taskSetTaskIds = new HashMap[String, HashSet[Long]]
// Incrementing Mesos task IDs
val nextTaskId = new AtomicLong(0)
// Which hosts in the cluster are alive (contains hostnames)
val hostsAlive = new HashSet[String]
// Which slave IDs we have executors on
val slaveIdsWithExecutors = new HashSet[String]
val slaveIdToHost = new HashMap[String, String]
// JAR server, if any JARs were added by the user to the SparkContext
var jarServer: HttpServer = null
// URIs of JARs to pass to executor
var jarUris: String = ""
// Listener object to pass upcalls into
var listener: TaskSchedulerListener = null
var backend: SchedulerBackend = null
val mapOutputTracker = SparkEnv.get.mapOutputTracker
override def setListener(listener: TaskSchedulerListener) {
this.listener = listener
}
def initialize(context: SchedulerBackend) {
backend = context
createJarServer()
}
def newTaskId(): Long = nextTaskId.getAndIncrement()
override def start() {
backend.start()
if (System.getProperty("spark.speculation", "false") == "true") {
new Thread("ClusterScheduler speculation check") {
setDaemon(true)
override def run() {
while (true) {
try {
Thread.sleep(SPECULATION_INTERVAL)
} catch {
case e: InterruptedException => {}
}
checkSpeculatableTasks()
}
}
}.start()
}
}
def submitTasks(taskSet: TaskSet) {
val tasks = taskSet.tasks
logInfo("Adding task set " + taskSet.id + " with " + tasks.length + " tasks")
this.synchronized {
val manager = new TaskSetManager(this, taskSet)
activeTaskSets(taskSet.id) = manager
activeTaskSetsQueue += manager
taskSetTaskIds(taskSet.id) = new HashSet[Long]()
}
backend.reviveOffers()
}
def taskSetFinished(manager: TaskSetManager) {
this.synchronized {
activeTaskSets -= manager.taskSet.id
activeTaskSetsQueue -= manager
taskIdToTaskSetId --= taskSetTaskIds(manager.taskSet.id)
taskIdToSlaveId --= taskSetTaskIds(manager.taskSet.id)
taskSetTaskIds.remove(manager.taskSet.id)
}
}
/**
* Called by cluster manager to offer resources on slaves. We respond by asking our active task
* sets for tasks in order of priority. We fill each node with tasks in a round-robin manner so
* that tasks are balanced across the cluster.
*/
def resourceOffers(offers: Seq[WorkerOffer]): Seq[Seq[TaskDescription]] = {
synchronized {
// Mark each slave as alive and remember its hostname
for (o <- offers) {
slaveIdToHost(o.slaveId) = o.hostname
hostsAlive += o.hostname
}
// Build a list of tasks to assign to each slave
val tasks = offers.map(o => new ArrayBuffer[TaskDescription](o.cores))
val availableCpus = offers.map(o => o.cores).toArray
var launchedTask = false
for (manager <- activeTaskSetsQueue.sortBy(m => (m.taskSet.priority, m.taskSet.stageId))) {
do {
launchedTask = false
for (i <- 0 until offers.size) {
val sid = offers(i).slaveId
val host = offers(i).hostname
manager.slaveOffer(sid, host, availableCpus(i)) match {
case Some(task) =>
tasks(i) += task
val tid = task.taskId
taskIdToTaskSetId(tid) = manager.taskSet.id
taskSetTaskIds(manager.taskSet.id) += tid
taskIdToSlaveId(tid) = sid
slaveIdsWithExecutors += sid
availableCpus(i) -= 1
launchedTask = true
case None => {}
}
}
} while (launchedTask)
}
return tasks
}
}
def statusUpdate(tid: Long, state: TaskState, serializedData: ByteBuffer) {
var taskSetToUpdate: Option[TaskSetManager] = None
var failedHost: Option[String] = None
var taskFailed = false
synchronized {
try {
if (state == TaskState.LOST && taskIdToSlaveId.contains(tid)) {
// We lost the executor on this slave, so remember that it's gone
val slaveId = taskIdToSlaveId(tid)
val host = slaveIdToHost(slaveId)
if (hostsAlive.contains(host)) {
slaveIdsWithExecutors -= slaveId
hostsAlive -= host
activeTaskSetsQueue.foreach(_.hostLost(host))
failedHost = Some(host)
}
}
taskIdToTaskSetId.get(tid) match {
case Some(taskSetId) =>
if (activeTaskSets.contains(taskSetId)) {
//activeTaskSets(taskSetId).statusUpdate(status)
taskSetToUpdate = Some(activeTaskSets(taskSetId))
}
if (TaskState.isFinished(state)) {
taskIdToTaskSetId.remove(tid)
if (taskSetTaskIds.contains(taskSetId)) {
taskSetTaskIds(taskSetId) -= tid
}
taskIdToSlaveId.remove(tid)
}
if (state == TaskState.FAILED) {
taskFailed = true
}
case None =>
logInfo("Ignoring update from TID " + tid + " because its task set is gone")
}
} catch {
case e: Exception => logError("Exception in statusUpdate", e)
}
}
// Update the task set and DAGScheduler without holding a lock on this, because that can deadlock
if (taskSetToUpdate != None) {
taskSetToUpdate.get.statusUpdate(tid, state, serializedData)
}
if (failedHost != None) {
listener.hostLost(failedHost.get)
backend.reviveOffers()
}
if (taskFailed) {
// Also revive offers if a task had failed for some reason other than host lost
backend.reviveOffers()
}
}
def error(message: String) {
synchronized {
if (activeTaskSets.size > 0) {
// Have each task set throw a SparkException with the error
for ((taskSetId, manager) <- activeTaskSets) {
try {
manager.error(message)
} catch {
case e: Exception => logError("Exception in error callback", e)
}
}
} else {
// No task sets are active but we still got an error. Just exit since this
// must mean the error is during registration.
// It might be good to do something smarter here in the future.
logError("Exiting due to error from cluster scheduler: " + message)
System.exit(1)
}
}
}
override def stop() {
if (backend != null) {
backend.stop()
}
if (jarServer != null) {
jarServer.stop()
}
}
override def defaultParallelism() = backend.defaultParallelism()
// Create a server for all the JARs added by the user to SparkContext.
// We first copy the JARs to a temp directory for easier server setup.
private def createJarServer() {
val jarDir = Utils.createTempDir()
logInfo("Temp directory for JARs: " + jarDir)
val filenames = ArrayBuffer[String]()
// Copy each JAR to a unique filename in the jarDir
for ((path, index) <- sc.jars.zipWithIndex) {
val file = new File(path)
if (file.exists) {
val filename = index + "_" + file.getName
Utils.copyFile(file, new File(jarDir, filename))
filenames += filename
}
}
// Create the server
jarServer = new HttpServer(jarDir)
jarServer.start()
// Build up the jar URI list
val serverUri = jarServer.uri
jarUris = filenames.map(f => serverUri + "/" + f).mkString(",")
System.setProperty("spark.jar.uris", jarUris)
logInfo("JAR server started at " + serverUri)
}
// Check for speculatable tasks in all our active jobs.
def checkSpeculatableTasks() {
var shouldRevive = false
synchronized {
for (ts <- activeTaskSetsQueue) {
shouldRevive |= ts.checkSpeculatableTasks()
}
}
if (shouldRevive) {
backend.reviveOffers()
}
}
def slaveLost(slaveId: String) {
var failedHost: Option[String] = None
synchronized {
val host = slaveIdToHost(slaveId)
if (hostsAlive.contains(host)) {
slaveIdsWithExecutors -= slaveId
hostsAlive -= host
activeTaskSetsQueue.foreach(_.hostLost(host))
failedHost = Some(host)
}
}
if (failedHost != None) {
listener.hostLost(failedHost.get)
backend.reviveOffers()
}
}
}
| ankurdave/arthur | core/src/main/scala/spark/scheduler/cluster/ClusterScheduler.scala | Scala | bsd-3-clause | 9,447 |
package safe.strings
import org.scalacheck.Arbitrary
import org.scalatest.WordSpec
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import shapeless.test.illTyped
class CanBeSafeSpec extends WordSpec with GeneratorDrivenPropertyChecks {
"ss\\"${safe(value)}\\"" should {
"not compile when passed an unsafe type" in {
val value = new UnsafeType
illTyped { """ ss"${safe(value)}" """}
}
"compile when passed an CanBeSafe safe type" in {
val value = new CanBeSafeType
assert(ss"${safe(value)}" == value.toString)
}
"compile when passed a subclass of an AllCanBeSafe safe type" in {
val value = new CanBeSafeChild
assert(ss"${safe(value)}" == value.toString)
}
"compile when passed a Safe type" in {
val value = new SafeType
assert(ss"${safe(value)}" == value.toString)
}
s"print all AnyVal primitives equal to toString" in {
forAll { (value: AnyVal) =>
assert(ss"${safe(value)}" == value.toString)
}
}
s"print all Strings equal to themselves" in {
forAll { (value: String) =>
assert(ss"${safe(value)}" == value)
}
}
s"print all Numbers equal to themselves" in {
forAll { (value: Number) =>
assert(ss"${safe(value)}" == value.toString)
}
}
// Copied from ImplicitlySafeSpec, but using safe keyword
"print a single item using the implicit Safe for that type" in {
val value = new SafeType
assert(ss"${safe(value)}" == value.toString)
}
"print a sequence of items using the implicit Safe for that type in the toString of the collection" in {
val values = Seq.fill(5)(new SafeType)
assert(ss"${safe(values)}" == values.toString)
}
"print a tuple of items using the implicit Safe for that type and the toString of the tuple" in {
val values = (new SafeType, new SafeType)
assert(ss"${safe(values)}" == values.toString)
}
"print a subclass of an item that is implicitly AllSafe using the toString of the class" in {
val value = new SafeChild
assert(ss"${safe(value)}" == value.toString)
}
"print a case class of items that are all implicitly Safe using the toString of the class" in {
val value = SafeCaseClass(new SafeType, new AnotherSafeType)
assert(ss"${safe(value)}" == value.toString)
}
"print a case class of items that are both implicitly Safe and AllSafe using the toString of the class" in {
val value = SafeCaseClassWithChild(new SafeType, new AnotherSafeType, new SafeChild)
assert(ss"${safe(value)}" == value.toString)
}
behave like itPrintsCollectionsOf[AnyVal]("AnyVal")
behave like itPrintsCollectionsOf[String]("String")
behave like itPrintsCollectionsOf[Number]("Number")
"print an infinite Stream in finite time; the same as Stream.toString" in {
def streamFrom(start: Int): Stream[Int] = {
if (start > 10) {
val example = streamFrom(1)
fail(s"Stream is running infinitely. It should have printed: $example")
}
start #:: streamFrom(start + 1)
}
val stream = streamFrom(1)
assert(ss"${safe(stream)}" == stream.toString)
}
}
def itPrintsCollectionsOf[T: Arbitrary: CanBeSafe](tpe: String): Unit = {
s"print all Traversables of $tpe the same as toString" in {
forAll { (values: Seq[T]) =>
val traversable = Traversable(values: _*)
assert(ss"${safe(traversable)}" == traversable.toString)
}
}
s"print all Iterables of $tpe the same as toString" in {
forAll { (values: Seq[T]) =>
val iterable = Iterable(values: _*)
assert(ss"${safe(iterable)}" == iterable.toString)
}
}
s"print all Seqs of $tpe the same as toString" in {
forAll { (values: Seq[T]) =>
val seq = Seq(values: _*)
assert(ss"${safe(values)}" == values.toString)
assert(ss"${safe(seq)}" == seq.toString)
}
}
s"print all Lists of $tpe the same as toString" in {
forAll { (values: Seq[T]) =>
val list = List(values: _*)
assert(ss"${safe(list)}" == list.toString)
}
}
s"print all Streams of $tpe the same as toString" in {
forAll { (values: Seq[T]) =>
val stream = Stream(values: _*)
assert(ss"${safe(stream)}" == stream.toString)
}
}
}
}
| jeffmay/secure-string-context | src/test/scala/safe/strings/CanBeSafeSpec.scala | Scala | apache-2.0 | 4,407 |
package scalatags
import scalatags.Text.TypedTag
object Pretty {
implicit class PrettyOps(val t: Text.TypedTag[String]) extends AnyVal{
def prettyWriteTo(t: Text.TypedTag[String],strb: StringBuilder, depth: Int, step: Int): Unit = {
val indent = " " * depth * step
val builder = new text.Builder()
t.build(builder)
val attrs = builder.attrs.take(builder.attrIndex)
val children = builder.children.take(builder.childIndex).toList
def escape(s: String): String = {
val sb = new StringBuilder
Escaping.escape(s, sb)
sb.toString()
}
strb ++= indent += '<' ++= t.tag
strb ++= attrs.map(a => " " + a._1 + " = \\"" + escape(a._2) + "\\" ").mkString
if (children.isEmpty && t.void) {
strb ++= "/>"
} else {
strb ++= ">"
for (c <- children) {
c match {
case t: TypedTag[String] =>
strb ++= "\\n"
prettyWriteTo(t, strb, depth + 1, step)
case any =>
strb ++= "\\n" ++= " " * (depth + 1) * step
any.writeTo(strb)
}
}
strb ++= "\\n" ++= indent ++= "</" ++= t.tag += '>'
}
}
def pretty(step: Int = 4): String = {
val strb = new StringBuilder
prettyWriteTo(t, strb, 0, step)
strb.toString()
}
def pretty(): String = pretty(4)
}
implicit val prettyPrint = true
implicit val noPrettyPrint = false
}
| teodimoff/rOut | html/src/io/rout/html/Pretty.scala | Scala | apache-2.0 | 1,471 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fb
import org.orbeon.oxf.test.{XFormsSupport, DocumentTestBase}
import org.orbeon.saxon.dom4j.DocumentWrapper
import org.orbeon.scaxon.XML._
import org.orbeon.oxf.xforms.XFormsContainingDocument
import org.orbeon.oxf.xml.dom4j.Dom4jUtils
import org.orbeon.oxf.xml.TransformerUtils
import org.orbeon.saxon.om.NodeInfo
trait FormBuilderSupport extends XFormsSupport {
self: DocumentTestBase ⇒
val TemplateDoc = "oxf:/forms/orbeon/builder/form/template.xml"
// Run the body in the action context of a form which simulates the main Form Builder model
def withActionAndFBDoc[T](url: String)(body: DocumentWrapper ⇒ T): T =
withActionAndFBDoc(formBuilderContainingDocument(url))(body)
private def formBuilderContainingDocument(url: String) =
setupDocument(formBuilderDoc(url))
def withActionAndFBDoc[T](doc: XFormsContainingDocument)(body: DocumentWrapper ⇒ T): T = {
withActionAndDoc(doc) {
body(
doc.models
find (_.getId == "fr-form-model")
flatMap (m ⇒ Option(m.getInstance("fb-form-instance")))
map (_.documentInfo.asInstanceOf[DocumentWrapper])
orNull
)
}
}
def prettyPrintElem(elem: NodeInfo): Unit =
println(Dom4jUtils.domToPrettyString(TransformerUtils.tinyTreeToDom4j(elem)))
private def formBuilderDoc(url: String) =
elemToDom4j(
<xh:html xmlns:xh="http://www.w3.org/1999/xhtml"
xmlns:xf="http://www.w3.org/2002/xforms"
xmlns:xxf="http://orbeon.org/oxf/xml/xforms"
xmlns:ev="http://www.w3.org/2001/xml-events"
xmlns:xbl="http://www.w3.org/ns/xbl">
<xh:head>
<xf:model id="fr-form-model">
<xf:instance id="fb-form-instance" xxf:index="id"><dummy/></xf:instance>
<xf:instance id="fr-form-instance" src={url}/>
<xf:instance id="fr-form-resources" src="oxf:/forms/orbeon/builder/form/resources.xml"
xxf:readonly="true" xxf:cache="true"/>
<xf:var name="model" value="xh:head/xf:model[@id = 'fr-form-model']"/>
<xf:var name="metadata-instance" value="$model/xf:instance[@id = 'fr-form-metadata']/*"/>
<xf:var name="resources" value="$model/xf:instance[@id = 'fr-form-resources']/*"/>
<xf:var name="current-resources" value="$resources/resource[1]"/>
<xf:instance id="fb-variables">
<variables>
<selected-cell/>
</variables>
</xf:instance>
<xf:var name="variables" value="instance('fb-variables')"/>
<xf:var name="selected-cell" value="$variables/selected-cell"/>
<xf:instance id="fb-components-instance">
<components/>
</xf:instance>
<xf:var name="component-bindings" value="instance('fb-components-instance')//xbl:binding"/>
<xf:action ev:event="xforms-model-construct-done">
<!-- Load components -->
<xf:insert context="instance('fb-components-instance')"
origin="xxf:call-xpl('oxf:/org/orbeon/oxf/fb/simple-toolbox.xpl', (), (), 'data')"/>
<!-- First store into a temporary document so that multiple inserts won't cause repeat processing until we are done -->
<xf:var name="temp" value="xxf:create-document()"/>
<xf:insert
context="$temp"
origin="
xxf:call-xpl(
'oxf:/forms/orbeon/builder/form/annotate.xpl',
'data',
xxf:call-xpl(
'oxf:/forms/orbeon/builder/form/add-template-bindings.xpl',
(
'data',
'bindings'
),
(
instance('fr-form-instance'),
instance('fb-components-instance')
),
'data'
),
'data'
)"
/>
<xf:action type="xpath" xmlns:fbf="java:org.orbeon.oxf.fb.FormBuilder">
fbf:initializeGrids($temp)
</xf:action>
<xf:insert ref="instance('fb-form-instance')" origin="$temp"/>
</xf:action>
</xf:model>
<xf:model id="fr-resources-model">
<xf:var name="fr-form-resources" value="xxf:instance('fr-form-resources')/resource[@xml:lang = 'en']"/>
</xf:model>
</xh:head>
<xh:body>
</xh:body>
</xh:html>)
}
| wesley1001/orbeon-forms | src/test/scala/org/orbeon/oxf/fb/FormBuilderSupport.scala | Scala | lgpl-2.1 | 5,298 |
object Test {
implicit val foo: languageFeature.dynamics = language.dynamics
def main(args: Array[String]): Unit = ()
}
| folone/dotty | tests/run/t6290.scala | Scala | bsd-3-clause | 124 |
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.3
* @date Sat Mar 8 14:24:11 EST 2014
* @see LICENSE (MIT style license file).
*/
package scalation.stat
import scalation.linalgebra.VectorD
import scalation.plot.{FramelessPlot, Plot}
import scalation.random.{Distribution, Parameters}
import scalation.random.CDF.buildEmpiricalCDF
import scalation.random.Quantile.empiricalInv
import scalation.util.Error
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Q_Q_Plot` object produces Quantile-Quantile plots that are used to
* compare probability distributions.
*/
object Q_Q_Plot
extends Error
{
/** Debug flag
*/
private val DEBUG = false
/** Whether the plot is to be embedded or has its own frame
* To change, set to true before calling plot
*/
var frameless = false
/** Whether to transform the data to zero mean and unit standard deviation
* To change, set to true before calling plot
*/
var makeStandard = false
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Produce a Q-Q plot for the two data vectors.
* @param fv the first data vector
* @param gv the second data vector
*/
def plot (fv: VectorD, gv: VectorD): FramelessPlot =
{
val n = fv.dim
if (gv.dim != n) flaw ("plot", "vectors must have the same size")
val fv_ = if (makeStandard) fv.standardize else fv
val pv = new VectorD (n)
for (i <- 1 until n) {
val p = i / n.toDouble
pv(i-1) = p
if (DEBUG) println ("pv = " + pv + ", fv = " + fv_(i-1) + ", gv = " + gv(i-1))
} // for
if (frameless) new FramelessPlot (pv, fv_, gv)
else { new Plot (pv, fv_, gv); null }
} // plot
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Produce a Q-Q plot for the data vector and the distribution.
* @param fv the data vector
* @param gInv the inverse CDF
* @param g_df the degrees of freedom for the distribution
* @param n the number of intervals
*/
def plot (fv: VectorD, gInv: Distribution, g_df: Parameters, n: Int): FramelessPlot =
{
val eCDF = buildEmpiricalCDF (fv)
val fiv = new VectorD (n) // to hold vector of values for fInv
val gv = new VectorD (n) // to hold vector of values for gInv
for (i <- 1 until n) {
val p = i / n.toDouble
fiv(i-1) = empiricalInv (p, eCDF)
gv(i-1) = gInv (p, g_df)
} // for
if (frameless) new FramelessPlot (fiv, fiv, gv)
else { new Plot (fiv, fiv, gv); null }
} // plot
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Produce a Q-Q plot for the two distribution.
* @param fInv the first inverse CDF
* @param f_df the degrees of freedom for the first distribution
* @param gInv the second inverse CDF
* @param g_df the degrees of freedom for the second distribution
* @param n the number of intervals
*/
def plot (fInv: Distribution, f_df: Parameters, gInv: Distribution, g_df: Parameters,
n: Int): FramelessPlot =
{
val fv = new VectorD (n) // to hold vector of values for fInv
val gv = new VectorD (n) // to hold vector of values for gInv
for (i <- 1 until n) {
val p = i / n.toDouble
fv(i-1) = fInv (p, f_df)
gv(i-1) = gInv (p, g_df)
} // for
if (frameless) new FramelessPlot (fv, fv, gv)
else { new Plot (fv, fv, gv); null }
} // plot
} // Q_Q_Plot object
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Q_Q_PlotTest` object is used to test the `Q_Q_Plot` object:
* distribution vs. distribution.
* > run-main scalation.stat.Q_Q_PlotTest
*/
object Q_Q_PlotTest extends App
{
import scalation.random.Quantile.{normalInv, studentTInv}
Q_Q_Plot.plot (normalInv, null, studentTInv, Vector (10), 200)
Q_Q_Plot.plot (normalInv, null, normalInv, null, 200)
} // Q_Q_PlotTest object
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Q_Q_PlotTest2` object is used to test the `Q_Q_Plot` object:
* data vector vs. distribution.
* > run-main scalation.stat.Q_Q_PlotTest2
*/
object Q_Q_PlotTest2 extends App
{
import scalation.random.Quantile.normalInv
import scalation.random.Normal
val nrv = Normal ()
val data = VectorD (for (i <- 0 until 400) yield nrv.gen)
Q_Q_Plot.plot (data, normalInv, null, 200)
} // Q_Q_PlotTest2 object
| NBKlepp/fda | scalation_1.3/scalation_mathstat/src/main/scala/scalation/stat/Q_Q_Plot.scala | Scala | mit | 4,856 |
package com.github.takezoe.scala.jdbc.validation
import scala.reflect.macros.blackbox.Context
import net.sf.jsqlparser.JSQLParserException
import net.sf.jsqlparser.parser.CCJSqlParserUtil
object SqlValidator {
def validateSql(sql: String, types: Seq[String], c: Context): Unit = {
try {
CCJSqlParserUtil.parse(sql)
} catch {
case e: JSQLParserException => c.error(c.enclosingPosition, e.getCause.getMessage)
}
}
}
| takezoe/scala-jdbc | src/main/scala/com/github/takezoe/scala/jdbc/validation/SqlValidator.scala | Scala | apache-2.0 | 446 |
package com.pygmalios.rawKafkaCassandra.actors
import akka.actor._
import com.pygmalios.rawKafkaCassandra.RawKafkaCassandraConfig
import com.pygmalios.rawKafkaCassandra.akka24.BackoffSupervisor
import com.pygmalios.rawKafkaCassandra.cassandra.CassandraSessionFactory
import scala.concurrent.duration._
/**
* Top-level actor.
*/
class KafkaToCassandra(cassandraSessionFactory: CassandraSessionFactory) extends Actor with ActorLogging with RawKafkaCassandraConfig {
private val cassandraClusterBackoffSupervisor = backoffSupervisorProps(
Props(new TopicsManager(kafkaTopicsConfig, cassandraSessionFactory, StreamingReaderWriter.factory)),
"topicsManager")
override def receive = Actor.emptyBehavior
private def backoffSupervisorProps(childProps: Props, childName: String): ActorRef =
// TODO: Configurable
context.actorOf(BackoffSupervisor.props(
childProps = childProps,
childName = childName,
minBackoff = 0.1.seconds,
maxBackoff = 30.seconds,
randomFactor = 0.2))
}
object KafkaToCassandra {
def factory(actorRefFactory: ActorRefFactory, cassandraSessionFactory: CassandraSessionFactory): ActorRef =
actorRefFactory.actorOf(Props(new KafkaToCassandra(cassandraSessionFactory)), "kafkaToCassandra")
} | pygmalios/raw-kafka-cassandra | src/main/scala/com/pygmalios/rawKafkaCassandra/actors/KafkaToCassandra.scala | Scala | apache-2.0 | 1,268 |
/* *\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\* */
package squants.energy
import squants._
/**
* Represents a quantity of power density
*
* @author Nicolas Vinuesa
* @since 1.4
*
* @param value value in [[squants.energy.WattsPerCubicMeter]]
*/
final class PowerDensity private (val value: Double, val unit: PowerDensityUnit)
extends Quantity[PowerDensity] {
def dimension = PowerDensity
def *(that: Volume): Power = Watts(this.toWattsPerCubicMeter * that.toCubicMeters)
def toWattsPerCubicMeter = to(WattsPerCubicMeter)
}
object PowerDensity extends Dimension[PowerDensity] {
private[energy] def apply[A](n: A, unit: PowerDensityUnit)(implicit num: Numeric[A]) = new PowerDensity(num.toDouble(n), unit)
def apply(value: Any) = parse(value)
def name = "PowerDensity"
def primaryUnit = WattsPerCubicMeter
def siUnit = WattsPerCubicMeter
def units = Set(WattsPerCubicMeter)
}
trait PowerDensityUnit extends UnitOfMeasure[PowerDensity] with UnitConverter {
def apply[A](n: A)(implicit num: Numeric[A]) = PowerDensity(n, this)
}
object WattsPerCubicMeter extends PowerDensityUnit with PrimaryUnit with SiUnit {
val symbol = "W/m³"
}
object PowerDensityConversions {
lazy val wattPerCubicMeter = WattsPerCubicMeter(1)
implicit class PowerDensityConversions[A](n: A)(implicit num: Numeric[A]) {
def wattsPerCubicMeter = WattsPerCubicMeter(n)
}
implicit object PowerDensityNumeric extends AbstractQuantityNumeric[PowerDensity](PowerDensity.primaryUnit)
}
| garyKeorkunian/squants | shared/src/main/scala/squants/energy/PowerDensity.scala | Scala | apache-2.0 | 1,987 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
//#compile-time-di-evolutions
import play.api.ApplicationLoader.Context
import play.api.BuiltInComponentsFromContext
import play.api.db.Database
import play.api.db.DBComponents
import play.api.db.HikariCPComponents
import play.api.db.evolutions.EvolutionsComponents
import play.api.routing.Router
import play.filters.HttpFiltersComponents
class AppComponents(cntx: Context)
extends BuiltInComponentsFromContext(cntx)
with DBComponents
with EvolutionsComponents
with HikariCPComponents
with HttpFiltersComponents {
// this will actually run the database migrations on startup
applicationEvolutions
//###skip: 1
val router = Router.empty
}
//#compile-time-di-evolutions
| benmccann/playframework | documentation/manual/working/commonGuide/database/code/CompileTimeDIEvolutions.scala | Scala | apache-2.0 | 766 |
case class A protected (x: Int)
case class B protected (x: Int)(y: Int)
class C {
def f = A(1)
def g = B(1)(2) // was: constructor B in class B cannot be accessed in class C
}
object Test {
def main(args: Array[String]): Unit = {
new C().f
new C().g
}
}
| scala/scala | test/files/run/t9546e.scala | Scala | apache-2.0 | 273 |
package org.jetbrains.plugins.scala.lang.scaladoc.psi.impl
import java.util
import com.intellij.psi._
import com.intellij.psi.impl.source.tree.LazyParseablePsiElement
import com.intellij.psi.javadoc.PsiDocTag
import com.intellij.psi.scope.PsiScopeProcessor
import com.intellij.psi.tree.IElementType
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScClass
import org.jetbrains.plugins.scala.lang.psi.api.{ScalaElementVisitor, ScalaPsiElement}
import org.jetbrains.plugins.scala.lang.scaladoc.lexer.ScalaDocTokenType
import org.jetbrains.plugins.scala.lang.scaladoc.parser.ScalaDocElementTypes
import org.jetbrains.plugins.scala.lang.scaladoc.psi.api.{ScDocComment, ScDocDescriptionPart, ScDocTag}
import scala.collection.mutable
final class ScDocCommentImpl(buffer: CharSequence,
override val getTokenType: IElementType)
extends LazyParseablePsiElement(getTokenType, buffer)
with ScDocComment {
override def getOwner: PsiDocCommentOwner = getParent match {
case owner: PsiDocCommentOwner if owner.getDocComment eq this => owner
case _ => null
}
override def processDeclarations(processor: PsiScopeProcessor, state: ResolveState, lastParent: PsiElement,
place: PsiElement): Boolean = {
super.processDeclarations(processor, state, lastParent, place) && !Option(getOwner).exists {
case owner: ScClass =>
owner.membersWithSynthetic.exists {
case named: PsiNamedElement => !processor.execute(named, state)
case _ => false
}
case _ => false
}
}
override def toString: String = "DocComment"
//todo: implement me
override def getTags: Array[PsiDocTag] = findTagsByName(_ => true)
override def getDescriptionElements: Array[PsiElement] =
descriptionElements.toArray
override def descriptionParts: Seq[ScDocDescriptionPart] =
descriptionElements
.filter(_.isInstanceOf[ScDocDescriptionPart])
.map(_.asInstanceOf[ScDocDescriptionPart])
.toSeq
private def descriptionElements: Iterator[PsiElement] = {
val beforeTags = this.getFirstChildNode.treeNextNodes.takeWhile(_.getElementType != ScalaDocElementTypes.DOC_TAG)
beforeTags
.filter { node =>
val elementType = node.getElementType
elementType != ScalaDocTokenType.DOC_COMMENT_START &&
elementType != ScalaDocTokenType.DOC_COMMENT_END &&
elementType != ScalaDocTokenType.DOC_COMMENT_LEADING_ASTERISKS
}
.map(_.getPsi)
}
override def findTagByName(name: String): PsiDocTag = {
val tags = findTagsByName(name)
tags.headOption.orNull
}
override def tags: Seq[ScDocTag] =
this.children.filter(_.isInstanceOf[ScDocTag]).map(_.asInstanceOf[ScDocTag]).toSeq
override def findTagsByName(name: String): Array[PsiDocTag] =
findTagsByName(_ == name)
override def findTagsByName(filter: String => Boolean): Array[PsiDocTag] =
this.children
.takeWhile(_.getNode.getElementType != ScalaDocTokenType.DOC_COMMENT_END)
.filter {
case docTag: ScDocTag if docTag.getNode.getElementType == ScalaDocElementTypes.DOC_TAG => filter(docTag.name)
case _ => false
}
.map(_.asInstanceOf[ScDocTag])
.toArray
override protected def findChildrenByClassScala[T >: Null <: ScalaPsiElement](aClass: Class[T]): Array[T] = {
val result: util.List[T] = new util.ArrayList[T]
var cur: PsiElement = getFirstChild
while (cur != null) {
if (aClass.isInstance(cur)) result.add(cur.asInstanceOf[T])
cur = cur.getNextSibling
}
result.toArray[T](java.lang.reflect.Array.newInstance(aClass, result.size).asInstanceOf[Array[T]])
}
override protected def findChildByClassScala[T >: Null <: ScalaPsiElement](aClass: Class[T]): T = {
var cur: PsiElement = getFirstChild
while (cur != null) {
if (aClass.isInstance(cur)) return cur.asInstanceOf[T]
cur = cur.getNextSibling
}
null
}
override protected def acceptScala(visitor: ScalaElementVisitor): Unit = {
visitor.visitDocComment(this)
}
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/scaladoc/psi/impl/ScDocCommentImpl.scala | Scala | apache-2.0 | 4,158 |
package notification.controllers
import java.time.{Duration, ZonedDateTime}
import java.util.UUID
import authentication.AuthAction
import com.gu.notificationschedule.dynamo.{NotificationSchedulePersistenceAsync, NotificationsScheduleEntry}
import models.Notification
import play.api.libs.json.Json
import play.api.mvc.{AbstractController, Action, ControllerComponents}
import scala.concurrent.ExecutionContext
class Schedule(authAction: AuthAction, controllerComponents: ControllerComponents, notificationSchedulePersistence: NotificationSchedulePersistenceAsync)
(implicit executionContext: ExecutionContext) extends AbstractController(controllerComponents) {
def scheduleNotification(dateString: String): Action[Notification] = authAction.async(parse.json[Notification]) { request =>
val date = ZonedDateTime.parse(dateString)
val notification = request.body
val sevenDaysInSeconds = Duration.ofDays(7).getSeconds
notificationSchedulePersistence.writeAsync(NotificationsScheduleEntry(
notification.id.toString,
Json.prettyPrint(Json.toJson(notification)),
date.toEpochSecond,
date.toEpochSecond + sevenDaysInSeconds
), None).future.map( _ => {
Ok
})
}
}
| guardian/mobile-n10n | notification/app/notification/controllers/Schedule.scala | Scala | apache-2.0 | 1,233 |
/* ____ __ ____ ____ ____ ___ ____ __ __ ____
* ( _ \\ /__\\ (_ )(_ _)( ___)/ __) ( _ \\( )( )( _ \\ Read
* ) / /(__)\\ / /_ _)(_ )__) \\__ \\ )___/ )(__)( ) _ < README.txt
* (_)\\_)(__)(__)(____)(____)(____)(___/ (__) (______)(____/ LICENSE.txt
*/
package razie.diesel.engine
import razie.diesel.engine.nodes.CanHtml
import scala.Option.option2Iterable
/** a trace. traces can be communicated between engines. Also, this is what you get when setting the
* response type to trace
*/
case class DieselTrace (
root:DomAst,
node:String, // actual server node
engineId:String, // engine Id
app:String, // application/system id twitter:pc:v5
details:String="",
parentNodeId:Option[String]=None ) extends CanHtml with InfoNode {
def toj: Map[String, Any] =
Map(
"class" -> "DieselTrace",
"ver" -> "v1",
"node" -> node,
"engineId" -> engineId,
"app" -> app,
"details" -> details,
"id" -> root.id,
"parentNodeId" -> parentNodeId.mkString,
"root" -> root.toj
)
def toJson = toj
override def toHtml =
span("trace::", "primary") +
s"$details (node=$node, engine=$engineId, app=$app) :: " //+ root.toHtml
override def toString = toHtml
def toAst = {
val me = new DomAst (this, AstKinds.SUBTRACE)
me.appendAllNoEvents(List(root))
me
}
}
| razie/diesel-rx | diesel/src/main/scala/razie/diesel/engine/DieselTrace.scala | Scala | apache-2.0 | 1,419 |
package com.arcusys.valamis.util
//
// Created by iliya.tryapitsin on 12.02.14.
//
trait HexHelper {
private final val hexCode: Array[Char] = "0123456789abcdef".toCharArray
def toHexString(data: Array[Byte]): String = {
if (data == null) {
return null
}
val r: StringBuilder = new StringBuilder(data.length * 2)
for (b <- data) {
r.append(hexCode((b >> 4) & 0xF))
r.append(hexCode((b & 0xF)))
}
return r.toString
}
}
object HexHelper {
def apply() = new HexHelper {}
}
| igor-borisov/valamis | valamis-util/src/main/scala/com/arcusys/valamis/util/HexHelper.scala | Scala | gpl-3.0 | 523 |
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.service.downloads
import java.io._
import java.net.URL
import java.util.concurrent.atomic.AtomicBoolean
import android.graphics.Bitmap
import android.media.ExifInterface
import com.waz.api.NetworkMode
import com.waz.api.impl.ErrorResponse
import com.waz.api.impl.ErrorResponse.internalError
import com.waz.api.impl.ProgressIndicator.Callback
import com.waz.bitmap.video.VideoTranscoder
import com.waz.bitmap.{BitmapDecoder, BitmapUtils}
import com.waz.cache.{CacheEntry, CacheService}
import com.waz.content.AssetsStorage
import com.waz.content.UserPreferences.DownloadImagesAlways
import com.waz.content.WireContentProvider.CacheUri
import com.waz.log.BasicLogging.LogTag
import com.waz.log.BasicLogging.LogTag.DerivedLogTag
import com.waz.model.AssetData.{RemoteData, WithExternalUri, WithProxy, WithRemoteData}
import com.waz.model._
import com.waz.service.assets.AudioTranscoder
import com.waz.service.tracking.TrackingService
import com.waz.service.{NetworkModeService, UserService, ZMessaging}
import com.waz.sync.client.AssetClient
import com.waz.threading.CancellableFuture.CancelException
import com.waz.threading.{CancellableFuture, Threading}
import com.waz.ui.MemoryImageCache
import com.waz.ui.MemoryImageCache.BitmapRequest
import com.waz.utils.events.{EventStream, Signal}
import com.waz.utils.wrappers.{Context, URI}
import com.waz.utils.{CancellableStream, returning}
import com.waz.znet2.http
import com.waz.znet2.http.HttpClient.AutoDerivationOld._
import com.waz.znet2.http.Request.UrlCreator
import com.waz.znet2.http.{Method, Request, RequestInterceptor, ResponseCode}
import scala.concurrent.{Future, Promise}
import scala.util.control.NonFatal
trait AssetLoader {
def onDownloadStarting: EventStream[AssetId]
def onDownloadDone: EventStream[AssetId]
def onDownloadFailed: EventStream[(AssetId, ErrorResponse)]
//guarantees to either return a defined cache entry, or throw an exception
def loadAsset(asset: AssetData, callback: Callback, force: Boolean): CancellableFuture[CacheEntry]
def loadFromBitmap(assetId: AssetId, bitmap: Bitmap, orientation: Int = ExifInterface.ORIENTATION_NORMAL): Future[Array[Byte]]
}
class AssetLoaderImpl(context: Context,
assetStorage: Option[AssetsStorage],
network: NetworkModeService,
client: AssetClient,
audioTranscoder: AudioTranscoder,
videoTranscoder: VideoTranscoder,
cache: CacheService,
imgCache: MemoryImageCache,
bitmapDecoder: BitmapDecoder,
tracking: TrackingService)
(implicit
urlCreator: UrlCreator,
authInterceptor: RequestInterceptor) extends AssetLoader with DerivedLogTag {
private lazy val downloadAlways = Option(ZMessaging.currentAccounts).map(_.activeZms).map {
_.flatMap {
case None => Signal.const(false)
case Some(z) => z.userPrefs.preference(DownloadImagesAlways).signal
}
}.getOrElse {
// warn("No CurrentAccounts available - this may be being called too early...")
Signal.const(true)
}
private lazy val downloadEnabled = (for {
//Will be set by UserPreferences when available, defaults to always download (false) otherwise.
downloadAlways <- downloadAlways
onWifi <- network.networkMode.map(_ == NetworkMode.WIFI)
} yield downloadAlways || onWifi).disableAutowiring()
import AssetLoader._
import com.waz.threading.Threading.Implicits.Background
override val onDownloadStarting = EventStream[AssetId]()
override val onDownloadDone = EventStream[AssetId]()
override val onDownloadFailed = EventStream[(AssetId, ErrorResponse)]()
override def loadAsset(asset: AssetData, callback: Callback, force: Boolean): CancellableFuture[CacheEntry] = {
// verbose(s"loadAsset: ${asset.id}, isDownloadable?: ${asset.isDownloadable}, force?: $force, mime: ${asset.mime}, name: ${asset.name}")
returning(asset match {
case _ if asset.mime == Mime.Audio.PCM => transcodeAudio(asset, callback)
case _ => CancellableFuture.lift(cache.getEntry(asset.cacheKey)).flatMap {
case Some(cd) => CancellableFuture.successful(cd)
case None if asset.isDownloadable && force => download(asset, callback)
case None if asset.isDownloadable => CancellableFuture.lift(downloadEnabled.head).flatMap {
case true => download(asset, callback)
case false => CancellableFuture.failed(DownloadOnWifiOnlyException)
}
case _ =>
(asset.mime, asset.source) match {
case (Mime.Video(), Some(uri)) => transcodeVideo(asset.cacheKey, asset.mime, asset.name, uri, callback)
case (_, Some(uri)) => loadFromUri(asset.cacheKey, asset.mime, asset.name, uri)
case _ => CancellableFuture.failed(new Exception(s"Not enough information to load asset data: ${asset.id}"))
}
}
})(_.failed.foreach(throw _))
}
private def download(asset: AssetData, callback: Callback) = {
onDownloadStarting ! asset.id
def finish(a: AssetData, entry: CacheEntry) = {
CancellableFuture.lift(cache.move(a.cacheKey, entry, a.mime, a.name).andThen { case _ =>
onDownloadDone ! asset.id
})
}
//TODO Strange situation, only in one case we need request without authorization. Maybe we can get rid of this special case
(asset match {
case WithRemoteData(RemoteData(Some(rId), token, otrKey, sha, _)) =>
// verbose(s"Downloading wire asset: ${asset.id}: $rId")
val path = AssetClient.getAssetPath(rId, otrKey, asset.convId)
val headers = token.fold(Map.empty[String, String])(t => Map("Asset-Token" -> t.str))
val request = Request.Get(relativePath = path, headers = http.Headers(headers))
client.loadAsset(request, otrKey, sha, callback)
case WithExternalUri(uri) =>
// verbose(s"Downloading external asset: ${asset.id}: $uri")
val request = Request.create(method = Method.Get, url = new URL(uri.toString))
val resp = client.loadAsset(request, callback = callback)
if (uri == UserService.UnsplashUrl)
resp.flatMap {
case Right(entry) => CancellableFuture.successful(Right(entry))
case Left(_) =>
CancellableFuture.lift(cache.addStream(CacheKey(), context.getAssets.open("unsplash_default.jpeg"), Mime.Image.Jpg)
.map(Right(_))
.recover {
case NonFatal(e) => Left(internalError(s"Failed to load default unsplash image, ${e.getMessage}"))
})
}
else resp
case WithProxy(proxy) =>
// verbose(s"Downloading asset from proxy: ${asset.id}: $proxy")
val request = Request.Get(relativePath = proxy)
client.loadAsset(request, callback = callback)
case _ => CancellableFuture.successful(Left(internalError(s"Tried to download asset ${asset.id} without enough information to complete download")))
}).flatMap {
case Right(entry) => finish(asset, entry)
case Left(err) =>
if (err.code == ResponseCode.NotFound) {
// verbose(s"Asset not found. Removing from local storage $asset")
assetStorage.foreach(storage => storage.remove(asset.id))
}
if (err.isFatal) onDownloadFailed ! (asset.id, err)
CancellableFuture.failed(DownloadFailedException(err))
}
}
private def transcodeAudio(asset: AssetData, callback: Callback) = {
// verbose(s"transcodeAudio: asset: ${asset.id}, cachekey: ${asset.cacheKey}, mime: ${asset.mime}, uri: ${asset.source}")
val entry = cache.createManagedFile()
val uri = asset.source.getOrElse(CacheUri(asset.cacheKey, context))
audioTranscoder(uri, entry.cacheFile, callback).flatMap { _ =>
// verbose(s"loaded audio from ${asset.cacheKey}, resulting file size: ${entry.length}")
CancellableFuture.lift(cache.move(asset.cacheKey, entry, Mime.Audio.MP4, asset.name, cacheLocation = Some(cache.cacheDir)))
}.recoverWith {
case ex: CancelException => CancellableFuture.failed(ex)
case NonFatal(ex) =>
tracking.exception(ex, s"audio transcoding failed for uri")
CancellableFuture.failed(ex)
}
}
private def openStream(uri: URI) = AssetLoader.openStream(context, uri)
private def transcodeVideo(cacheKey: CacheKey, mime: Mime, name: Option[String], uri: URI, callback: Callback) = {
// verbose(s"transcodeVideo: cacheKey: $cacheKey, mime: $mime, name: $name, uri: $uri")
val entry = cache.createManagedFile()
// TODO: check input type, size, bitrate, maybe we don't need to transcode it
returning(videoTranscoder(uri, entry.cacheFile, callback).flatMap { _ =>
// verbose(s"loaded video from $cacheKey, resulting file size: ${entry.length}")
CancellableFuture.lift(cache.move(cacheKey, entry, Mime.Video.MP4, if (mime == Mime.Video.MP4) name else name.map(_ + ".mp4"), cacheLocation = Some(cache.cacheDir)))
.map { entry =>
//TODO AN-5742 Use CacheService to store temp vids instead of handling them manually
entry.file.foreach { file => if(file.getName.startsWith("VID_")) file.delete() }
entry
}
}.recoverWith {
case ex: CancelException => CancellableFuture.failed(ex)
case NonFatal(ex) =>
tracking.exception(ex, s"video transcoding failed for uri")
addStreamToCache(cacheKey, mime, name, openStream(uri))
})(_.failed.foreach(throw _))
}
private def addStreamToCache(cacheKey: CacheKey, mime: Mime, name: Option[String], stream: => InputStream) = {
val promise = Promise[CacheEntry]
val cancelled = new AtomicBoolean(false)
// important: this file needs to be stored unencrypted so we can process it (for e.g. video thumbnails); specifying an explicit cache location "forces" that (for now)
promise.tryCompleteWith {
cache.addStream(cacheKey, new CancellableStream(stream, cancelled), mime, name, cacheLocation = Some(cache.cacheDir), execution = Threading.BlockingIO)
}
new CancellableFuture(promise) {
override def cancel()(implicit tag: LogTag): Boolean = cancelled.compareAndSet(false, true)
}
}
private def loadFromUri(cacheKey: CacheKey, mime: Mime, name: Option[String], uri: URI) = {
// verbose(s"loadFromUri: cacheKey: $cacheKey, mime: $mime, name: $name, uri: $uri")
addStreamToCache(cacheKey, mime, name, openStream(uri))
}
override def loadFromBitmap(assetId: AssetId, bitmap: Bitmap, orientation: Int = ExifInterface.ORIENTATION_NORMAL): Future[Array[Byte]] = Future {
val req = BitmapRequest.Regular(bitmap.getWidth)
val mime = Mime(BitmapUtils.getMime(bitmap))
imgCache.reserve(assetId, req, bitmap.getWidth, bitmap.getHeight)
val img: Bitmap = bitmapDecoder.withFixedOrientation(bitmap, orientation)
imgCache.add(assetId, req, img)
// verbose(s"compressing $assetId")
val before = System.nanoTime
val bos = new ByteArrayOutputStream(65536)
val format = BitmapUtils.getCompressFormat(mime.str)
img.compress(format, 85, bos)
val bytes = bos.toByteArray
val duration = (System.nanoTime - before) / 1e6d
// debug(s"compression took: $duration ms (${img.getWidth} x ${img.getHeight}, ${img.getByteCount} bytes -> ${bytes.length} bytes, ${img.getConfig}, $mime, $format)")
bytes
}(Threading.ImageDispatcher)
}
object AssetLoader {
abstract class DownloadException extends Exception {
def isRecoverable: Boolean
}
case class DownloadFailedException(error: ErrorResponse) extends DownloadException {
override def isRecoverable: Boolean = !error.isFatal
override def getMessage = s"Download failed with error: $error, should retry?: $isRecoverable"
}
case object DownloadOnWifiOnlyException extends DownloadException {
override def isRecoverable = false
override def getMessage = "Attempted to download image when not on Wifi and DownloadImagesAlways is set to false"
}
def openStream(context: Context, uri: URI) = {
val cr = context.getContentResolver
Option(cr.openInputStream(URI.unwrap(uri)))
.orElse(Option(cr.openFileDescriptor(URI.unwrap(uri), "r")).map(file => new FileInputStream(file.getFileDescriptor)))
.getOrElse(throw new FileNotFoundException(s"Can not load image from: $uri"))
}
}
| wireapp/wire-android-sync-engine | zmessaging/src/main/scala/com/waz/service/downloads/AssetLoader.scala | Scala | gpl-3.0 | 13,258 |
package edu.gemini.spModel.gemini.niri
import edu.gemini.spModel.gemini.niri.Niri.Filter
import edu.gemini.spModel.gemini.niri.Niri.Disperser
import edu.gemini.spModel.gemini.niri.InstNIRI.calcWavelength
import org.junit.Test
import org.junit.Assert._
/**
* Test cases for the observing wavelength calculation.
*/
class ObsWavelengthCalcTest {
@Test def testDisperserNone() {
assertEquals(Filter.BBF_H.getWavelengthAsString, calcWavelength(Disperser.NONE, Filter.BBF_H))
}
@Test def testDisperserSome() {
assertFalse(Disperser.J.getCentralWavelengthAsString == Filter.BBF_H.getWavelengthAsString)
assertEquals(Disperser.J.getCentralWavelengthAsString, calcWavelength(Disperser.J, Filter.BBF_H))
}
} | arturog8m/ocs | bundle/edu.gemini.pot/src/test/scala/edu/gemini/spModel/gemini/niri/ObsWavelengthCalcTest.scala | Scala | bsd-3-clause | 725 |
package github.keylity
import org.specs2.mutable._
import scala.collection.mutable.Buffer
import scala.collection.mutable.ArrayBuffer
class StaticProxySpec extends Specification {
"StaticProxy behavior as transactional" should {
object transactional {
def apply(output: Buffer[String]) = StaticProxy(
before = () => output += "begin",
success = () => output += "commit",
failure = e => output += ("rollback: " + e.getMessage),
after = () => output += "cleanup")
}
object trans2 { // Use fluent builder
def apply(output: Buffer[String]) = {
StaticProxy.builder()
.before{()=> output += "begin"}
.success{()=> output += "commit"}
.failure{e=> output += ("rollback: " + e.getMessage)}
.after{()=> output += "cleanup"}
.build()
}
}
"Do surrounding work for normal flow" in {
val output = ArrayBuffer[String]()
def process(): Unit = transactional(output) {
output += "business done"
}
process()
output must_== ArrayBuffer("begin", "business done", "commit", "cleanup")
val output2 = ArrayBuffer[String]()
trans2(output2) {
output2 += "builder also works"
}()
output2 must_== ArrayBuffer("begin", "builder also works", "commit", "cleanup")
}
"Do surrounding work for exceptional flow" in {
val output = ArrayBuffer[String]()
def process(): Unit = transactional(output) {
throw new IllegalArgumentException("illegal format")
}
process must throwA[IllegalArgumentException]
output must_== ArrayBuffer("begin", "rollback: illegal format", "cleanup")
}
}
}
| sorra/keylity | src/test/scala/github/keylity/StaticProxySpec.scala | Scala | mit | 1,713 |
package com.greencatsoft.angularjs.internal
import scala.scalajs.js
private[angularjs] trait Module extends js.Object {
def factory(name: String, constructor: js.Array[js.Any]): Module = js.native
def controller(name: String, constructor: js.Array[js.Any]): Module = js.native
def service(name: String, constructor: js.Array[js.Any]): Module = js.native
def config(constructor: js.Array[js.Any]): Module = js.native
def run(constructor: js.Array[js.Any]): Module = js.native
def directive(name: String, directiveFactory: js.Array[js.Any]): Module = js.native
def filter(name: String, constructor: js.Array[js.Any]): Module = js.native
}
| jmnarloch/scalajs-angular | src/main/scala/com/greencatsoft/angularjs/internal/Module.scala | Scala | apache-2.0 | 660 |
/*
* Copyright (C) 2014 LAMP/EPFL
* Copyright (C) 2014 Typesafe Inc. <http://www.typesafe.com>
*/
package selfassembly.examples.pickling
import scala.language.experimental.macros
import scala.reflect.runtime.universe.Mirror
trait Pickle {
type ValueType
val value: ValueType
type PickleFormatType <: PickleFormat
def unpickle[T] = macro Compat.UnpickleMacros_pickleUnpickle[T]
}
trait PickleFormat {
type PickleType <: Pickle
type OutputType
def createBuilder(): PBuilder
def createBuilder(out: OutputType): PBuilder
def createReader(pickle: PickleType, mirror: Mirror): PReader
}
| phaller/selfassembly | src/main/scala/selfassembly/examples/pickling/PickleFormat.scala | Scala | bsd-3-clause | 607 |
package chandu0101.scalajs.react.components.textfields
import chandu0101.macros.tojs.JSMacro
import japgolly.scalajs.react._
import scala.scalajs.js
import scala.scalajs.js.{Array => JArray}
case class ReactTagsInput(onBlur: js.UndefOr[() => Unit] = js.undefined,
onKeyDown: js.UndefOr[ReactEventI => Unit] = js.undefined,
onTagRemove: js.UndefOr[String => Unit] = js.undefined,
onChange: js.UndefOr[(JArray[String], String) => Unit] = js.undefined,
removeKeys: js.UndefOr[JArray[Int]] = js.undefined,
validate: js.UndefOr[String => Boolean] = js.undefined,
classNamespace: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined,
addOnBlur: js.UndefOr[Boolean] = js.undefined,
placeholder: js.UndefOr[String] = js.undefined,
valueLink: js.UndefOr[js.Object] = js.undefined,
onKeyUp: js.UndefOr[ReactEventI => Unit] = js.undefined,
key: js.UndefOr[String] = js.undefined,
addKeys: js.UndefOr[JArray[Int]] = js.undefined,
onTagAdd: js.UndefOr[String => Unit] = js.undefined,
validateAsync: js.UndefOr[js.Function] = js.undefined,
onChangeInput: js.UndefOr[String => Unit] = js.undefined,
defaultValue: js.UndefOr[JArray[String]] = js.undefined,
transform: js.UndefOr[String => Unit] = js.undefined,
value: js.UndefOr[JArray[String]] = js.undefined) {
def apply() = {
val props = JSMacro[ReactTagsInput](this)
val f = React.asInstanceOf[js.Dynamic].createFactory(js.Dynamic.global.ReactTagsInput)
f(props).asInstanceOf[ReactComponentU_]
}
}
@js.native
trait ReactTagsInputM extends js.Object {
def focus(): Unit = js.native
def clear(): Unit = js.native
def getTags(): JArray[String] = js.native
def addTag(tag: String): Unit = js.native
def removeTag(tag: String): Unit = js.native
}
| mproch/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/textfields/ReactTagsInput.scala | Scala | apache-2.0 | 2,233 |
package com.datawizards.dmg.service
object HDFSServiceImpl extends HDFSService {
override def copyLocalFileToHDFS(localFilePath: String, hdfsPath: String): Unit = {
val command = s"hdfs dfs -copyFromLocal $localFilePath $hdfsPath"
ConsoleCommandExecutor.execute(command)
}
}
| mateuszboryn/data-model-generator | src/main/scala/com/datawizards/dmg/service/HDFSServiceImpl.scala | Scala | apache-2.0 | 288 |
package com.github.mdr.mash.ns.git
import com.github.mdr.mash.functions.{ BoundParams, MashFunction, ParameterModel }
import com.github.mdr.mash.runtime.MashList
import scala.collection.JavaConverters._
object PullFunction extends MashFunction("git.pull") {
val params = ParameterModel.Empty
def call(boundParams: BoundParams): MashList = {
GitHelper.withGit { git ⇒
val pullResult = git.pull.call()
val fetchResult = pullResult.getFetchResult
val updates = fetchResult.getTrackingRefUpdates.asScala.toSeq
MashList(updates.map(FetchFunction.asMashObject))
}
}
override def typeInferenceStrategy = Seq(FetchBranchUpdateClass)
override def summaryOpt = Some("Fetch from and integrate with another repository or a local branch")
} | mdr/mash | src/main/scala/com/github/mdr/mash/ns/git/PullFunction.scala | Scala | mit | 779 |
package graphql.resolvers
import akka.actor.{Actor, ActorLogging}
import akka.pattern._
import com.google.inject.Inject
import common.implicits.RichDBIO._
import common.ActorNamed
import model.PaginationParams
import model.ItemsPayload
import repositories.ItemRepository
import scala.concurrent.ExecutionContext
object ItemResolver extends ActorNamed {
final val name = "ItemResolver"
}
/**
* Defines the resolve function for pagination input using actor model.
*
* @param itemRepo provides methods for operating an entity in a database
*/
class ItemResolver @Inject()(itemRepo: ItemRepository)(implicit executionContext: ExecutionContext)
extends Actor
with ActorLogging {
override def receive: Receive = {
case paginationParams: PaginationParams => {
log.debug(s"Received message: [ $paginationParams ]")
itemRepo
.getPaginatedObjectsList(paginationParams)
.map(res => ItemsPayload(hasNextPage = res.hasNextPage, entities = res.entities, totalCount = res.totalCount))
.run
.pipeTo(sender)
}
}
}
| sysgears/apollo-universal-starter-kit | modules/pagination/server-scala/src/main/scala/graphql/resolvers/ItemResolver.scala | Scala | mit | 1,072 |
/*
* The MIT License
*
* Copyright (c) 2017 Fulcrum Genomics LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
package com.fulcrumgenomics.str.vcf
import com.fulcrumgenomics.FgBioDef.{FilePath, PathPrefix, PathToIntervals, PathToVcf, _}
import com.fulcrumgenomics.cmdline.ClpGroups
import com.fulcrumgenomics.commons.io.{Io, PathUtil}
import com.fulcrumgenomics.commons.util.{LazyLogging, SimpleCounter}
import com.fulcrumgenomics.sopt.util._
import com.fulcrumgenomics.sopt.{arg, clp}
import com.fulcrumgenomics.str.cmdline.FgStrTool
import com.fulcrumgenomics.str.vcf.StrGenotypeConcordance._
import com.fulcrumgenomics.str.vcf.StrInterval.StrAllele
import com.fulcrumgenomics.util.Rscript
import enumeratum.values.{StringEnum, StringEnumEntry}
import htsjdk.variant.vcf.VCFFileReader
import scala.collection.mutable.ListBuffer
import scala.util.Failure
object StrGenotypeConcordance {
sealed trait ConcordanceType extends StringEnumEntry {
import com.fulcrumgenomics.str.vcf.StrGenotypeConcordance.ConcordanceType._
def name: String = this.value
def terminalName: String = {
this match {
case TruePositive | TrueNegative => KBLD(KCYN(name))
case FalsePositive | FalseNegative => KBLDRED(name)
case NoCall => KBLD(KYEL(name))
}
}
}
case object ConcordanceType extends StringEnum[ConcordanceType] {
override val values = findValues
val maxNameLength: Int = this.values.map(_.name.length).max
case object TruePositive extends ConcordanceType { val value: String = "True Positive" }
case object TrueNegative extends ConcordanceType { val value: String = "True Negative" }
case object FalsePositive extends ConcordanceType { val value: String = "False Positive" }
case object FalseNegative extends ConcordanceType { val value: String = "False Negative" }
case object NoCall extends ConcordanceType { val value: String = "No Call" }
}
}
@clp(group=ClpGroups.VcfOrBcf, description=
"""
|Calculates the concordance of STR genotypes.
|
|## Inputs
|
|The VCF from `GenotypeFromGroupedBam` should be given as input.
|
|An interval list specifying the set of regions over which to call STRs should be given. The name field should
|contain a comma list of values as follows:
| 1. the repeat unit length (ex. `3` for the tri-nucleotide repeat `TCATCATCATCA`).
| 2. the number of repeat units (ex. `4` for the tri-nucleotide repeat `TCATCATCATCA`).
| 3. the name of the STR (ex. D1S1656)
|Additional columns can be given for one or more expected truth alleles. For example, a known haploid
|call should have one extra column, a known diploid call should have two extra columns, and so on.
|
|The `--max-distance` can be used to consider two similar STR calls the same, for example `--max-distance 0.5` will
|consider the calls `17.0` and `17.5` to be the same.
|
|## Outputs
|
| * `<outout>.pdf` - the same plot as generated by `GenotypeFromGroupedBam`.
| * `<output>.concordance.tab> - the count per concordance state; computed on a per-allele basis.
| * logging information: the tool will also log the following information: per STR:
| 1. the STR name, # of repeats for the reference, ground truth (both alleles), genotype call (both alleles), and
| concordance states (one per allele).
| 2. a line per observed allele in the raw per-molecule calls, with called # of repeats, the stutter length, the
| raw coverage, and true or false based on if the site is "confident site" (see above).
| 3. summary tables for the stutter and concordance counts.
| NB: terminal escape codes are used for logging to highlight the concordance states.
""")
class StrGenotypeConcordance
(
@arg(flag='i', doc="Input VCF of variant calls from `GenotypeFromGroupedBam`.") val input: PathToVcf,
@arg(flag='l', doc="Interval list with the STR regions and known calls.") val intervals: PathToIntervals,
@arg(flag='o', doc="Prefix for all output files.") val output: PathPrefix,
@arg(flag='m', doc="Maximum repeat distance to consider two calls the same") val maxDistance: Double = 0,
private val skipPlots: Boolean = false // for not plotting in tests
) extends FgStrTool with LazyLogging {
import com.fulcrumgenomics.str.vcf.StrGenotypeConcordance.ConcordanceType._
private val ScriptPath = "com/fulcrumgenomics/str/vcf/StrGenotypeDuplexMolecules.R"
Io.assertReadable(input)
Io.assertReadable(intervals)
Io.assertCanWriteFile(output)
// find the one closes to zero
private def minAbs(x: Double*): Double = {
val min = x.map(Math.abs).min
x.find { y => Math.abs(y) == min }.get
}
private def count(str: StrInterval, call: Double, known: Double): ConcordanceType = {
if (Math.abs(known - call) <= maxDistance) {
if (Math.abs(str.refLength - call) <= maxDistance) TrueNegative else TruePositive
}
else {
if (Math.abs(str.refLength - call) <= maxDistance) FalseNegative else FalsePositive
}
}
private def logConcordance(str: StrInterval, truth: Seq[Double], call: Seq[Double], concordances: Seq[ConcordanceType]): Unit = {
val truthStr = truth.map(t => f"$t%.2f").mkString(",")
val callStr = call.map(c => f"$c%.2f").mkString(",")
logger.info(f"[${KBLD(KWHT(str.name))}] Ref [${str.refLength.toDouble}%.2f] Truth [$truthStr] Call [$callStr] Concordance [${concordances.map(_.terminalName).mkString(",")}]")
}
override def execute(): Unit = {
def f(ext: String): FilePath = PathUtil.pathTo(output + ext)
val infoWriter = Io.toWriter(f(".info.txt"))
val vcfIn = new VCFFileReader(input.toFile, false)
val strIntervals = StrInterval.loadIntervals(this.intervals).toSeq
require(vcfIn.getFileHeader.getNGenotypeSamples == 1, s"Expected a single sample, found '${vcfIn.getFileHeader.getNGenotypeSamples}'")
val alleleConcordanceCounter = {
val counter = new SimpleCounter[ConcordanceType]()
ConcordanceType.values.foreach { concordanceType => counter.count(concordanceType, 0) }
counter
}
strIntervals.filter { str => str.truthCalls.nonEmpty}.foreach { str =>
var numOverlapping = 0
val knowns = str.truthCalls.map(_.toDouble).sorted // from low to high
vcfIn.query(str.chrom, str.start, str.end)
.filter(_.getNoCallCount == 0)
.foreach { ctx =>
// Get all calls seen, not just the one genotyped
val counts = {
val refCount = ctx.getAttributeAsInt("REFAC", 0)
val altCounts = ctx.getAttributeAsIntList("AC", 0).map(_.toInt).toSeq
refCount +: altCounts
}
val allCalls = StrAllele.toCalls(str, ctx, counts)
// Get the genotype calls
val calls = ctx.getGenotype(0)
.getAttributeAsString("STR_GT", "")
.split(',')
.map(_.toDouble)
.toSeq.sorted // from low to high
// Update the concordance counts
val concordances = {
// TODO: order the concordances in the same order as calls
// Match the calls with the knowns. Iteratively find the pair of call and known that are the closest match.
val knownsLeft = ListBuffer[Double](knowns:_*)
val callsLeft = ListBuffer[Double](calls:_*)
val callConcordances = ListBuffer[ConcordanceType]()
while (callsLeft.nonEmpty && knownsLeft.nonEmpty) {
// find the call and known pair that have the minimum distance
val (call, known, _) = callsLeft.map { _call =>
val minKnown = knownsLeft.minBy { _known => Math.abs(_known - _call) }
(_call, minKnown, Math.abs(minKnown - _call))
}.minBy(_._3)
callConcordances += count(str, call, known)
callsLeft -= call
knownsLeft -= call
}
// In case we have a mismatching # of alleles.
val extraConcordances = if (calls.length < knowns.length) {
Seq.range(0, knowns.length - calls.length).map { _ => FalseNegative }
}
else if (calls.length > knowns.length) {
Seq.range(0, calls.length - knowns.length).map { _ => FalsePositive }
}
else {
Seq.empty
}
callConcordances ++ extraConcordances
}
concordances.foreach { c => alleleConcordanceCounter.count(c) }
logConcordance(str, knowns, calls, concordances)
// Log some stutter information.
allCalls.foreach { call =>
val stutter = {
val abs = minAbs(knowns.map(k => call.repeatLength - k):_*)
if (abs < 0) Math.ceil(abs) else Math.floor(abs)
}.toInt
logger.info(f"\\tstutter [$stutter] call [${call.repeatLength}] depth [${call.count}]")
}
logger.info("")
infoWriter.write(str.toLongString(allCalls) + "\\n")
numOverlapping += 1
}
if (numOverlapping == 0) {
alleleConcordanceCounter.count(NoCall, 2)
logConcordance(str, knowns, knowns.map(_ => -1.0), Seq(NoCall, NoCall))
logger.info("")
}
else {
require(numOverlapping == 1, s"Found $numOverlapping variants for str '${str.name}'")
}
}
infoWriter.close()
vcfIn.safelyClose()
// Concordance
{
val lines = ListBuffer[String]()
lines += "type\\tcount"
ConcordanceType.values.foreach { concordance =>
lines += f"${concordance.name.padTo(ConcordanceType.maxNameLength, " ").mkString("")}\\t${alleleConcordanceCounter.countOf(concordance)}%6d"
}
val out = f(".concordance.tab")
Io.writeLines(out, lines)
logger.info(KBLD(KGRN("Per-Allele Concordance:")))
lines.foreach { line => logger.info(" " + line.replace('\\t', ' ')) }
logger.info(" " + out)
}
if (!skipPlots && strIntervals.nonEmpty) {
Rscript.execIfAvailable(ScriptPath, f(".info.txt").toString, f(".pdf").toString) match {
case Failure(e) => logger.warning(s"Generation of PDF plots failed: ${e.getMessage}")
case _ => Unit
}
}
else {
logger.warning("No variants outputted, skipping plots")
}
}
} | fulcrumgenomics/fgstr | tools/src/main/scala/com/fulcrumgenomics/str/vcf/StrGenotypeConcordance.scala | Scala | mit | 11,447 |
package smartupedit
package io
import org.specs2._
class IntegrationSpec extends Specification { def is = s2"""
action escalation
new file action on a client with changed content
must not trigger while asking to save but canceling the overwrite $n1
open file action on a client with changed content
must not trigger while asking to save but canceling the overwrite $o1
quit action on a client with changed content
must not trigger while asking to save but canceling the overwrite $q1
"""
// -----------------------------------------------------------------------------------------------
// tests
// -----------------------------------------------------------------------------------------------
def n1 = {
val client = new FullMockClient(askOverwriteOption = DialogResult.Cancel)
client.hasChanged = true
client.newFile()
MockClientState(client) === MockClientState (
hasBeenAskedToOverwrite = true,
hasBeenAskedToSave = true,
hasChanged = true
)
}
def o1 = {
val client = new FullMockClient(askOverwriteOption = DialogResult.Cancel)
client.hasChanged = true
client.open()
MockClientState(client) === MockClientState (
hasBeenAskedToOverwrite = true,
hasBeenAskedToSave = true,
hasChanged = true
)
}
def q1 = {
val client = new FullMockClient(askOverwriteOption = DialogResult.Cancel)
client.hasChanged = true
client.quit()
MockClientState(client) === MockClientState (
hasBeenAskedToOverwrite = true,
hasBeenAskedToSave = true,
hasChanged = true
)
}
}
| wookietreiber/smartupedit | core/test/scala/IntegrationSpec.scala | Scala | gpl-3.0 | 1,778 |
import stainless.lang._
import stainless.collection._
object BVMaxInterpret {
case class Reg(n: BigInt)
sealed abstract class Op {
def out: Reg
}
case class BVSge(out: Reg, ra: Reg, rb: Reg) extends Op
case class BVNeg(out: Reg, ra: Reg) extends Op
case class BVXor(out: Reg, ra: Reg, rb: Reg) extends Op
case class BVAnd(out: Reg, ra: Reg, rb: Reg) extends Op
case class State(
registers: CMap[Reg, Int],
) {
def load(reg: Reg): Int = registers(reg)
def store(reg: Reg, v: Int): State = copy(registers = registers.updated(reg, v))
}
object State {
def empty: State = State(CMap(_ => 0))
}
def eval1(op: Op, state: State): Int = op match {
case BVSge(_, ra, rb) =>
if (state.load(ra) >= state.load(rb)) 1 else 0
case BVNeg(_, ra) =>
-state.load(ra)
case BVXor(_, ra, rb) =>
state.load(ra) ^ state.load(rb)
case BVAnd(_, ra, rb) =>
state.load(ra) & state.load(rb)
}
def eval(op: Op, state: State): State = {
val res = eval1(op, state)
state.store(op.out, res)
}
case class Program(stmts: List[Op], ret: Reg)
def interpret(prog: Program, init: State): Int = {
def go(stmts: List[Op], state: State): State = {
decreases(stmts)
stmts match {
case Nil() => state
case Cons(op, rest) => go(rest, eval(op, state))
}
}
val end = go(prog.stmts, init)
end.load(prog.ret)
}
val r0 = Reg(0)
val r1 = Reg(1)
val r2 = Reg(2)
val r3 = Reg(3)
val r4 = Reg(4)
val r5 = Reg(5)
val r6 = Reg(6)
def bvmax(r0v: Int, r1v: Int) = {
val init = State.empty.store(r0, r0v).store(r1, r1v)
val prog = Program(
List(
BVSge(r2, r0, r1),
BVNeg(r3, r2),
BVXor(r4, r0, r1),
BVAnd(r5, r3, r4),
BVXor(r6, r1, r5),
),
r6
)
interpret(prog, init)
}
def thm(a: Int, b: Int) = {
assert(bvmax(a, b) == stainless.math.max(a, b))
}
}
| epfl-lara/stainless | frontends/benchmarks/verification/valid/BVMaxInterpret.scala | Scala | apache-2.0 | 1,975 |
package me.enkode.j8
import org.scalatest.{Matchers, FlatSpec}
class ScalaFunction2UnitSupportTest extends FlatSpec with Matchers {
case class Fixture(scalaFunction2: (Int, Int) ⇒ Unit) extends ScalaFunction2UnitSupport[Int, Int]
"ScalaFunction2UnitSupport" should "be able to convert a scala (Int, Int) ⇒ Unit to a java BiConsumer[Int, Int]" in {
var consumed = false
object Consumer extends Fixture((a, b) ⇒ consumed = true)
Consumer.asJava.accept(1, 5)
consumed should be (true)
}
}
| kender/java8-converters | src/test/scala/me/enkode/j8/ScalaFunction2UnitSupportTest.scala | Scala | mit | 517 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.utils._
import kafka.common._
import java.nio.ByteBuffer
import java.util.Properties
import kafka.log.{FileMessageSet, LogConfig}
import org.I0Itec.zkclient.ZkClient
import scala.collection._
import kafka.message._
import java.util.concurrent.TimeUnit
import kafka.metrics.KafkaMetricsGroup
import com.yammer.metrics.core.Gauge
import scala.Some
import kafka.common.TopicAndPartition
import kafka.tools.MessageFormatter
import java.io.PrintStream
import org.apache.kafka.common.protocol.types.{Struct, Schema, Field}
import org.apache.kafka.common.protocol.types.Type.STRING
import org.apache.kafka.common.protocol.types.Type.INT32
import org.apache.kafka.common.protocol.types.Type.INT64
import java.util.concurrent.atomic.AtomicBoolean
/**
* Configuration settings for in-built offset management
* @param maxMetadataSize The maximum allowed metadata for any offset commit.
* @param loadBufferSize Batch size for reading from the offsets segments when loading offsets into the cache.
* @param offsetsRetentionMs Offsets older than this retention period will be discarded.
* @param offsetsRetentionCheckIntervalMs Frequency at which to check for stale offsets.
* @param offsetsTopicNumPartitions The number of partitions for the offset commit topic (should not change after deployment).
* @param offsetsTopicSegmentBytes The offsets topic segment bytes should be kept relatively small to facilitate faster
* log compaction and faster offset loads
* @param offsetsTopicReplicationFactor The replication factor for the offset commit topic (set higher to ensure availability).
* @param offsetsTopicCompressionCodec Compression codec for the offsets topic - compression should be turned on in
* order to achieve "atomic" commits.
* @param offsetCommitTimeoutMs The offset commit will be delayed until all replicas for the offsets topic receive the
* commit or this timeout is reached. (Similar to the producer request timeout.)
* @param offsetCommitRequiredAcks The required acks before the commit can be accepted. In general, the default (-1)
* should not be overridden.
*/
case class OffsetManagerConfig(maxMetadataSize: Int = OffsetManagerConfig.DefaultMaxMetadataSize,
loadBufferSize: Int = OffsetManagerConfig.DefaultLoadBufferSize,
offsetsRetentionMs: Long = 24*60*60000L,
offsetsRetentionCheckIntervalMs: Long = OffsetManagerConfig.DefaultOffsetsRetentionCheckIntervalMs,
offsetsTopicNumPartitions: Int = OffsetManagerConfig.DefaultOffsetsTopicNumPartitions,
offsetsTopicSegmentBytes: Int = OffsetManagerConfig.DefaultOffsetsTopicSegmentBytes,
offsetsTopicReplicationFactor: Short = OffsetManagerConfig.DefaultOffsetsTopicReplicationFactor,
offsetsTopicCompressionCodec: CompressionCodec = OffsetManagerConfig.DefaultOffsetsTopicCompressionCodec,
offsetCommitTimeoutMs: Int = OffsetManagerConfig.DefaultOffsetCommitTimeoutMs,
offsetCommitRequiredAcks: Short = OffsetManagerConfig.DefaultOffsetCommitRequiredAcks)
object OffsetManagerConfig {
val DefaultMaxMetadataSize = 4096
val DefaultLoadBufferSize = 5*1024*1024
val DefaultOffsetsRetentionCheckIntervalMs = 600000L
val DefaultOffsetsTopicNumPartitions = 1
val DefaultOffsetsTopicSegmentBytes = 100*1024*1024
val DefaultOffsetsTopicReplicationFactor = 1.toShort
val DefaultOffsetsTopicCompressionCodec = NoCompressionCodec
val DefaultOffsetCommitTimeoutMs = 5000
val DefaultOffsetCommitRequiredAcks = (-1).toShort
}
class OffsetManager(val config: OffsetManagerConfig,
replicaManager: ReplicaManager,
zkClient: ZkClient,
scheduler: Scheduler) extends Logging with KafkaMetricsGroup {
/* offsets and metadata cache */
private val offsetsCache = new Pool[GroupTopicPartition, OffsetAndMetadata]
private val followerTransitionLock = new Object
private val loadingPartitions: mutable.Set[Int] = mutable.Set()
private val shuttingDown = new AtomicBoolean(false)
scheduler.schedule(name = "offsets-cache-compactor",
fun = compact,
period = config.offsetsRetentionCheckIntervalMs,
unit = TimeUnit.MILLISECONDS)
newGauge("NumOffsets",
new Gauge[Int] {
def value = offsetsCache.size
}
)
newGauge("NumGroups",
new Gauge[Int] {
def value = offsetsCache.keys.map(_.group).toSet.size
}
)
private def compact() {
debug("Compacting offsets cache.")
val startMs = SystemTime.milliseconds
val staleOffsets = offsetsCache.filter(startMs - _._2.timestamp > config.offsetsRetentionMs)
debug("Found %d stale offsets (older than %d ms).".format(staleOffsets.size, config.offsetsRetentionMs))
// delete the stale offsets from the table and generate tombstone messages to remove them from the log
val tombstonesForPartition = staleOffsets.map { case(groupTopicAndPartition, offsetAndMetadata) =>
val offsetsPartition = partitionFor(groupTopicAndPartition.group)
trace("Removing stale offset and metadata for %s: %s".format(groupTopicAndPartition, offsetAndMetadata))
offsetsCache.remove(groupTopicAndPartition)
val commitKey = OffsetManager.offsetCommitKey(groupTopicAndPartition.group,
groupTopicAndPartition.topicPartition.topic, groupTopicAndPartition.topicPartition.partition)
(offsetsPartition, new Message(bytes = null, key = commitKey))
}.groupBy{ case (partition, tombstone) => partition }
// Append the tombstone messages to the offset partitions. It is okay if the replicas don't receive these (say,
// if we crash or leaders move) since the new leaders will get rid of stale offsets during their own purge cycles.
val numRemoved = tombstonesForPartition.flatMap { case(offsetsPartition, tombstones) =>
val partitionOpt = replicaManager.getPartition(OffsetManager.OffsetsTopicName, offsetsPartition)
partitionOpt.map { partition =>
val appendPartition = TopicAndPartition(OffsetManager.OffsetsTopicName, offsetsPartition)
val messages = tombstones.map(_._2).toSeq
trace("Marked %d offsets in %s for deletion.".format(messages.size, appendPartition))
try {
partition.appendMessagesToLeader(new ByteBufferMessageSet(config.offsetsTopicCompressionCodec, messages:_*))
tombstones.size
}
catch {
case t: Throwable =>
error("Failed to mark %d stale offsets for deletion in %s.".format(messages.size, appendPartition), t)
// ignore and continue
0
}
}
}.sum
debug("Removed %d stale offsets in %d milliseconds.".format(numRemoved, SystemTime.milliseconds - startMs))
}
def offsetsTopicConfig: Properties = {
val props = new Properties
props.put(LogConfig.SegmentBytesProp, config.offsetsTopicSegmentBytes.toString)
props.put(LogConfig.CleanupPolicyProp, "compact")
props
}
def partitionFor(group: String): Int = Utils.abs(group.hashCode) % config.offsetsTopicNumPartitions
/**
* Fetch the current offset for the given group/topic/partition from the underlying offsets storage.
*
* @param key The requested group-topic-partition
* @return If the key is present, return the offset and metadata; otherwise return None
*/
private def getOffset(key: GroupTopicPartition) = {
val offsetAndMetadata = offsetsCache.get(key)
if (offsetAndMetadata == null)
OffsetMetadataAndError.NoOffset
else
OffsetMetadataAndError(offsetAndMetadata.offset, offsetAndMetadata.metadata, ErrorMapping.NoError)
}
/**
* Put the (already committed) offset for the given group/topic/partition into the cache.
*
* @param key The group-topic-partition
* @param offsetAndMetadata The offset/metadata to be stored
*/
private def putOffset(key: GroupTopicPartition, offsetAndMetadata: OffsetAndMetadata) {
offsetsCache.put(key, offsetAndMetadata)
}
def putOffsets(group: String, offsets: Map[TopicAndPartition, OffsetAndMetadata]) {
// this method is called _after_ the offsets have been durably appended to the commit log, so there is no need to
// check for current leadership as we do for the offset fetch
trace("Putting offsets %s for group %s in offsets partition %d.".format(offsets, group, partitionFor(group)))
offsets.foreach { case (topicAndPartition, offsetAndMetadata) =>
putOffset(GroupTopicPartition(group, topicAndPartition), offsetAndMetadata)
}
}
/**
* The most important guarantee that this API provides is that it should never return a stale offset. i.e., it either
* returns the current offset or it begins to sync the cache from the log (and returns an error code).
*/
def getOffsets(group: String, topicPartitions: Seq[TopicAndPartition]): Map[TopicAndPartition, OffsetMetadataAndError] = {
trace("Getting offsets %s for group %s.".format(topicPartitions, group))
val offsetsPartition = partitionFor(group)
/**
* followerTransitionLock protects against fetching from an empty/cleared offset cache (i.e., cleared due to a
* leader->follower transition). i.e., even if leader-is-local is true a follower transition can occur right after
* the check and clear the cache. i.e., we would read from the empty cache and incorrectly return NoOffset.
*/
followerTransitionLock synchronized {
if (leaderIsLocal(offsetsPartition)) {
if (loadingPartitions synchronized loadingPartitions.contains(offsetsPartition)) {
debug("Cannot fetch offsets for group %s due to ongoing offset load.".format(group))
topicPartitions.map { topicAndPartition =>
val groupTopicPartition = GroupTopicPartition(group, topicAndPartition)
(groupTopicPartition.topicPartition, OffsetMetadataAndError.OffsetsLoading)
}.toMap
} else {
if (topicPartitions.size == 0) {
// Return offsets for all partitions owned by this consumer group. (this only applies to consumers that commit offsets to Kafka.)
offsetsCache.filter(_._1.group == group).map { case(groupTopicPartition, offsetAndMetadata) =>
(groupTopicPartition.topicPartition, OffsetMetadataAndError(offsetAndMetadata.offset, offsetAndMetadata.metadata, ErrorMapping.NoError))
}.toMap
} else {
topicPartitions.map { topicAndPartition =>
val groupTopicPartition = GroupTopicPartition(group, topicAndPartition)
(groupTopicPartition.topicPartition, getOffset(groupTopicPartition))
}.toMap
}
}
} else {
debug("Could not fetch offsets for group %s (not offset coordinator).".format(group))
topicPartitions.map { topicAndPartition =>
val groupTopicPartition = GroupTopicPartition(group, topicAndPartition)
(groupTopicPartition.topicPartition, OffsetMetadataAndError.NotOffsetManagerForGroup)
}.toMap
}
}
}
/**
* Asynchronously read the partition from the offsets topic and populate the cache
*/
def loadOffsetsFromLog(offsetsPartition: Int) {
val topicPartition = TopicAndPartition(OffsetManager.OffsetsTopicName, offsetsPartition)
loadingPartitions synchronized {
if (loadingPartitions.contains(offsetsPartition)) {
info("Offset load from %s already in progress.".format(topicPartition))
} else {
loadingPartitions.add(offsetsPartition)
scheduler.schedule(topicPartition.toString, loadOffsets)
}
}
def loadOffsets() {
info("Loading offsets from " + topicPartition)
val startMs = SystemTime.milliseconds
try {
replicaManager.logManager.getLog(topicPartition) match {
case Some(log) =>
var currOffset = log.logSegments.head.baseOffset
val buffer = ByteBuffer.allocate(config.loadBufferSize)
// loop breaks if leader changes at any time during the load, since getHighWatermark is -1
while (currOffset < getHighWatermark(offsetsPartition) && !shuttingDown.get()) {
buffer.clear()
val messages = log.read(currOffset, config.loadBufferSize).asInstanceOf[FileMessageSet]
messages.readInto(buffer, 0)
val messageSet = new ByteBufferMessageSet(buffer)
messageSet.foreach { msgAndOffset =>
require(msgAndOffset.message.key != null, "Offset entry key should not be null")
val key = OffsetManager.readMessageKey(msgAndOffset.message.key)
if (msgAndOffset.message.payload == null) {
if (offsetsCache.remove(key) != null)
trace("Removed offset for %s due to tombstone entry.".format(key))
else
trace("Ignoring redundant tombstone for %s.".format(key))
} else {
val value = OffsetManager.readMessageValue(msgAndOffset.message.payload)
putOffset(key, value)
trace("Loaded offset %s for %s.".format(value, key))
}
currOffset = msgAndOffset.nextOffset
}
}
if (!shuttingDown.get())
info("Finished loading offsets from %s in %d milliseconds."
.format(topicPartition, SystemTime.milliseconds - startMs))
case None =>
warn("No log found for " + topicPartition)
}
}
catch {
case t: Throwable =>
error("Error in loading offsets from " + topicPartition, t)
}
finally {
loadingPartitions synchronized loadingPartitions.remove(offsetsPartition)
}
}
}
private def getHighWatermark(partitionId: Int): Long = {
val partitionOpt = replicaManager.getPartition(OffsetManager.OffsetsTopicName, partitionId)
val hw = partitionOpt.map { partition =>
partition.leaderReplicaIfLocal().map(_.highWatermark).getOrElse(-1L)
}.getOrElse(-1L)
hw
}
private def leaderIsLocal(partition: Int) = { getHighWatermark(partition) != -1L }
/**
* When this broker becomes a follower for an offsets topic partition clear out the cache for groups that belong to
* that partition.
* @param offsetsPartition Groups belonging to this partition of the offsets topic will be deleted from the cache.
*/
def clearOffsetsInPartition(offsetsPartition: Int) {
debug("Deleting offset entries belonging to [%s,%d].".format(OffsetManager.OffsetsTopicName, offsetsPartition))
followerTransitionLock synchronized {
offsetsCache.keys.foreach { key =>
if (partitionFor(key.group) == offsetsPartition) {
offsetsCache.remove(key)
}
}
}
}
def shutdown() {
shuttingDown.set(true)
}
}
object OffsetManager {
val OffsetsTopicName = "__consumer_offsets"
private case class KeyAndValueSchemas(keySchema: Schema, valueSchema: Schema)
private val CURRENT_OFFSET_SCHEMA_VERSION = 0.toShort
private val OFFSET_COMMIT_KEY_SCHEMA_V0 = new Schema(new Field("group", STRING),
new Field("topic", STRING),
new Field("partition", INT32))
private val KEY_GROUP_FIELD = OFFSET_COMMIT_KEY_SCHEMA_V0.get("group")
private val KEY_TOPIC_FIELD = OFFSET_COMMIT_KEY_SCHEMA_V0.get("topic")
private val KEY_PARTITION_FIELD = OFFSET_COMMIT_KEY_SCHEMA_V0.get("partition")
private val OFFSET_COMMIT_VALUE_SCHEMA_V0 = new Schema(new Field("offset", INT64),
new Field("metadata", STRING, "Associated metadata.", ""),
new Field("timestamp", INT64))
private val VALUE_OFFSET_FIELD = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("offset")
private val VALUE_METADATA_FIELD = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("metadata")
private val VALUE_TIMESTAMP_FIELD = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("timestamp")
// map of versions to schemas
private val OFFSET_SCHEMAS = Map(0 -> KeyAndValueSchemas(OFFSET_COMMIT_KEY_SCHEMA_V0, OFFSET_COMMIT_VALUE_SCHEMA_V0))
private val CURRENT_SCHEMA = schemaFor(CURRENT_OFFSET_SCHEMA_VERSION)
private def schemaFor(version: Int) = {
val schemaOpt = OFFSET_SCHEMAS.get(version)
schemaOpt match {
case Some(schema) => schema
case _ => throw new KafkaException("Unknown offset schema version " + version)
}
}
/**
* Generates the key for offset commit message for given (group, topic, partition)
*
* @return key for offset commit message
*/
def offsetCommitKey(group: String, topic: String, partition: Int, versionId: Short = 0): Array[Byte] = {
val key = new Struct(CURRENT_SCHEMA.keySchema)
key.set(KEY_GROUP_FIELD, group)
key.set(KEY_TOPIC_FIELD, topic)
key.set(KEY_PARTITION_FIELD, partition)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + key.sizeOf)
byteBuffer.putShort(CURRENT_OFFSET_SCHEMA_VERSION)
key.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Generates the payload for offset commit message from given offset and metadata
*
* @param offsetAndMetadata consumer's current offset and metadata
* @return payload for offset commit message
*/
def offsetCommitValue(offsetAndMetadata: OffsetAndMetadata): Array[Byte] = {
val value = new Struct(CURRENT_SCHEMA.valueSchema)
value.set(VALUE_OFFSET_FIELD, offsetAndMetadata.offset)
value.set(VALUE_METADATA_FIELD, offsetAndMetadata.metadata)
value.set(VALUE_TIMESTAMP_FIELD, offsetAndMetadata.timestamp)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + value.sizeOf)
byteBuffer.putShort(CURRENT_OFFSET_SCHEMA_VERSION)
value.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Decodes the offset messages' key
*
* @param buffer input byte-buffer
* @return an GroupTopicPartition object
*/
def readMessageKey(buffer: ByteBuffer): GroupTopicPartition = {
val version = buffer.getShort()
val keySchema = schemaFor(version).keySchema
val key = keySchema.read(buffer).asInstanceOf[Struct]
val group = key.get(KEY_GROUP_FIELD).asInstanceOf[String]
val topic = key.get(KEY_TOPIC_FIELD).asInstanceOf[String]
val partition = key.get(KEY_PARTITION_FIELD).asInstanceOf[Int]
GroupTopicPartition(group, TopicAndPartition(topic, partition))
}
/**
* Decodes the offset messages' payload and retrieves offset and metadata from it
*
* @param buffer input byte-buffer
* @return an offset-metadata object from the message
*/
def readMessageValue(buffer: ByteBuffer): OffsetAndMetadata = {
if(buffer == null) { // tombstone
null
} else {
val version = buffer.getShort()
val valueSchema = schemaFor(version).valueSchema
val value = valueSchema.read(buffer).asInstanceOf[Struct]
val offset = value.get(VALUE_OFFSET_FIELD).asInstanceOf[Long]
val metadata = value.get(VALUE_METADATA_FIELD).asInstanceOf[String]
val timestamp = value.get(VALUE_TIMESTAMP_FIELD).asInstanceOf[Long]
OffsetAndMetadata(offset, metadata, timestamp)
}
}
// Formatter for use with tools such as console consumer: Consumer should also set exclude.internal.topics to false.
// (specify --formatter "kafka.server.OffsetManager\\$OffsetsMessageFormatter" when consuming __consumer_offsets)
class OffsetsMessageFormatter extends MessageFormatter {
def writeTo(key: Array[Byte], value: Array[Byte], output: PrintStream) {
val formattedKey = if (key == null) "NULL" else OffsetManager.readMessageKey(ByteBuffer.wrap(key)).toString
val formattedValue = if (value == null) "NULL" else OffsetManager.readMessageValue(ByteBuffer.wrap(value)).toString
output.write(formattedKey.getBytes)
output.write("::".getBytes)
output.write(formattedValue.getBytes)
output.write("\\n".getBytes)
}
}
}
case class GroupTopicPartition(group: String, topicPartition: TopicAndPartition) {
def this(group: String, topic: String, partition: Int) =
this(group, new TopicAndPartition(topic, partition))
override def toString =
"[%s,%s,%d]".format(group, topicPartition.topic, topicPartition.partition)
}
| stealthly/kafka | core/src/main/scala/kafka/server/OffsetManager.scala | Scala | apache-2.0 | 21,504 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.nlp.parse
import cc.factorie.app.nlp._
import cc.factorie._
import cc.factorie.app.nlp.pos.PennPosTag
import scala.collection.mutable.{HashMap, ArrayBuffer}
import scala.util.parsing.json.JSON
import scala.annotation.tailrec
import java.io._
import cc.factorie.util.{Logger, BinarySerializer, FileUtils}
import scala._
import cc.factorie.optimize._
import scala.concurrent.Await
import cc.factorie.variable.{LabeledCategoricalVariable, BinaryFeatureVectorVariable, CategoricalVectorDomain, CategoricalDomain}
import scala.collection.mutable
import cc.factorie.app.classify.backend._
import scala.Some
import scala.Some
/** Default transition-based dependency parser. */
class TransitionBasedParser extends DocumentAnnotator {
private val logger = Logger.getLogger(this.getClass.getName)
def this(stream:InputStream) = { this(); deserialize(stream) }
def this(file: File) = this(new FileInputStream(file))
def this(url:java.net.URL) = {
this()
val stream = url.openConnection.getInputStream
if (stream.available <= 0) throw new Error("Could not open "+url)
logger.debug("TransitionBasedParser loading from "+url)
deserialize(stream)
}
case class ParseDecision(action: String) {
val Array(lrnS, srpS, label) = action.split(" ")
val leftOrRightOrNo = lrnS.toInt // leftarc-rightarc-noarc
val shiftOrReduceOrPass = srpS.toInt // shift-reduce-pass
}
object labelDomain extends CategoricalDomain[String]
val defaultCategory = "-1 -1 N"
labelDomain += defaultCategory
class ParseDecisionVariable(targetDecision: ParseDecision, val state: ParseState) extends LabeledCategoricalVariable(targetDecision.action) {
def domain = labelDomain
val features = new NonProjDependencyParserFeatures(this)
/* Include <NULL>s */
// featureGenerators.foreach(f => features += f.apply(state))
/* DO NOT include <NULL>s */
// TODO if we want to keep this in here, change implementation to use Option instead of <NULL> string?
featureGenerators.foreach(f => {
val featString = f.apply(state)
if("<NULL>".r.findAllIn(featString).length-1 != "\\\\|".r.findAllIn(featString).length) features += featString
})
}
object featuresDomain extends CategoricalVectorDomain[String]
class NonProjDependencyParserFeatures(val decisionVariable: ParseDecisionVariable) extends BinaryFeatureVectorVariable[String] {
def domain = featuresDomain
override def skipNonCategories = domain.dimensionDomain.frozen
/* remove bias for now */
//this += "BIAS"
}
// Serialization
def serialize(file: File): Unit = {
if (file.getParentFile ne null) file.getParentFile.mkdirs()
serialize(new java.io.FileOutputStream(file))
}
def deserialize(file: File): Unit = {
require(file.exists(), "Trying to load non-existent file: '" +file)
deserialize(new java.io.FileInputStream(file))
}
def serialize(stream: java.io.OutputStream): Unit = {
import cc.factorie.util.CubbieConversions._
// Sparsify the evidence weights
import scala.language.reflectiveCalls
val sparseEvidenceWeights = new la.DenseLayeredTensor2(featuresDomain.dimensionDomain.size, labelDomain.size, new la.SparseIndexedTensor1(_))
model.weights.value.foreachElement((i, v) => if (v != 0.0) sparseEvidenceWeights += (i, v))
model.weights.set(sparseEvidenceWeights)
val dstream = new java.io.DataOutputStream(new BufferedOutputStream(stream))
BinarySerializer.serialize(featuresDomain.dimensionDomain, dstream)
BinarySerializer.serialize(labelDomain, dstream)
BinarySerializer.serialize(model, dstream)
dstream.close() // TODO Are we really supposed to close here, or is that the responsibility of the caller?
}
def deserialize(stream: java.io.InputStream): Unit = {
import cc.factorie.util.CubbieConversions._
// Get ready to read sparse evidence weights
val dstream = new java.io.DataInputStream(new BufferedInputStream(stream))
BinarySerializer.deserialize(featuresDomain.dimensionDomain, dstream)
BinarySerializer.deserialize(labelDomain, dstream)
import scala.language.reflectiveCalls
model.weights.set(new la.DenseLayeredTensor2(featuresDomain.dimensionDomain.size, labelDomain.size, new la.SparseIndexedTensor1(_)))
BinarySerializer.deserialize(model, dstream)
println("TransitionBasedParser model parameters oneNorm "+model.parameters.oneNorm)
dstream.close() // TODO Are we really supposed to close here, or is that the responsibility of the caller?
}
val parseDecisionCache = collection.mutable.HashMap[String,ParseDecision]()
def getParseDecision(s: String): ParseDecision = parseDecisionCache.getOrElseUpdate(s, new ParseDecision(s))
def classify(v: ParseDecisionVariable) = getParseDecision(labelDomain.category(model.classification(v.features.value).bestLabelIndex))
lazy val model = new LinearMulticlassClassifier(labelDomain.size, featuresDomain.dimensionSize)
def trainFromVariables(vs: Iterable[ParseDecisionVariable], trainer: MulticlassClassifierTrainer[LinearMulticlassClassifier], evaluate: (LinearMulticlassClassifier) => Unit) {
trainer.baseTrain(model, vs.map(_.target.intValue).toSeq, vs.map(_.features.value).toSeq, vs.map(v => 1.0).toSeq, evaluate)
}
def train(trainSentences:Seq[Sentence], testSentences:Seq[Sentence], numBootstrappingIterations:Int = 2, l1Factor:Double = 0.00001, l2Factor:Double = 0.00001, nThreads: Int = 1)(implicit random: scala.util.Random): Unit = {
featuresDomain.dimensionDomain.gatherCounts = true
var trainingVars: Iterable[ParseDecisionVariable] = generateDecisions(trainSentences, ParserConstants.TRAINING, nThreads)
println("Before pruning # features " + featuresDomain.dimensionDomain.size)
println("TransitionBasedParser.train first 20 feature counts: "+featuresDomain.dimensionDomain.counts.toSeq.take(20))
featuresDomain.dimensionDomain.trimBelowCount(5) // Every feature is actually counted twice, so this removes features that were seen 2 times or less
featuresDomain.freeze()
println("After pruning # features " + featuresDomain.dimensionSize)
trainingVars = generateDecisions(trainSentences, ParserConstants.TRAINING, nThreads)
val numTrainSentences = trainSentences.size
val optimizer = new AdaGradRDA(1.0, 0.1, l1Factor / numTrainSentences, l2Factor / numTrainSentences)
trainDecisions(trainingVars, optimizer, trainSentences, testSentences)
trainingVars = null // Allow them to be GC'ed
for (i <- 0 until numBootstrappingIterations) {
println("Boosting iteration " + (i+1))
trainDecisions(generateDecisions(trainSentences, ParserConstants.BOOSTING, nThreads), optimizer, trainSentences, testSentences)
}
}
def trainDecisions(trainDecisions:Iterable[ParseDecisionVariable], optimizer:optimize.GradientOptimizer, trainSentences:Seq[Sentence], testSentences:Seq[Sentence])(implicit random: scala.util.Random): Unit = {
def evaluate(c: LinearMulticlassClassifier) {
println(model.weights.value.toSeq.count(_ == 0).toFloat/model.weights.value.length +" sparsity")
println(" TRAIN "+testString(trainSentences))
println(" TEST "+testString(testSentences))
}
new OnlineLinearMulticlassTrainer(optimizer=optimizer, maxIterations=2).baseTrain(model, trainDecisions.map(_.target.intValue).toSeq, trainDecisions.map(_.features.value).toSeq, trainDecisions.map(v => 1.0).toSeq, evaluate=evaluate)
}
def testString(testSentences:Seq[Sentence]): String = {
val(las, uas, tokSpeed, sentSpeed) = test(testSentences)
"LAS="+las+" UAS="+uas+s" ${tokSpeed} tokens/sec ${sentSpeed} sentences/sec"
}
def test(testSentences:Seq[Sentence]): (Double, Double, Double, Double) = {
var i = 0
val numSentences = testSentences.size
var t0: Long = 0
var totalTime: Long = 0
while(i < numSentences){
t0 = System.currentTimeMillis()
process(testSentences(i))
totalTime += System.currentTimeMillis() - t0
i += 1
}
val totalTokens = testSentences.map(_.length).sum
val totalSentences = testSentences.size
val pred = testSentences.map(_.attr[ParseTree])
(ParserEval.calcLas(pred), ParserEval.calcUas(pred), totalTokens*1000.0/totalTime, totalSentences*1000.0/totalTime)
}
lazy val testFeatureSpec = io.Source.fromURL(this.getClass.getResource("/parser-features.json")).getLines().mkString("\\n")
lazy val featureGenerators: Seq[DependencyFeatures.DependencyFeatureGenerator] = DependencyFeatures.fromJSON(testFeatureSpec)
object ParserConstants {
val ROOT_ID = 0
val SHIFT = 1
val REDUCE = 2
val PASS = 3
val LEFT = 4
val RIGHT = 5
val NO = 6
val TRAINING = 7
val PREDICTING = 8
val BOOSTING = 9
def getString(constantVal: Int): String = constantVal match {
case ParserConstants.SHIFT => "shift"
case ParserConstants.REDUCE => "reduce"
case ParserConstants.PASS => "pass"
case ParserConstants.LEFT => "left"
case ParserConstants.RIGHT => "right"
case ParserConstants.NO => "no"
case ParserConstants.TRAINING => "training"
case ParserConstants.PREDICTING => "predicting"
case ParserConstants.BOOSTING => "boosting"
case ParserConstants.ROOT_ID => "root id"
case _ => throw new Error(s"Integer value $constantVal is not defined in ParserConstants")
}
}
def generateDecisions(ss: Iterable[Sentence], mode: Int, nThreads: Int): Iterable[ParseDecisionVariable] = {
val decs = cc.factorie.util.Threading.parMap(ss, nThreads)(s => {
val oracle: NonProjectiveOracle = {
if (mode == ParserConstants.TRAINING) new NonprojectiveGoldOracle(s)
else new NonprojectiveBoostingOracle(s, classify)
}
new NonProjectiveShiftReduce(oracle.predict).parse(s)
oracle.instances.toSeq
})
decs.flatten
}
def boosting(ss: Iterable[Sentence], nThreads: Int, trainer: MulticlassClassifierTrainer[LinearMulticlassClassifier], evaluate: LinearMulticlassClassifier => Unit) =
trainFromVariables(generateDecisions(ss, ParserConstants.BOOSTING, nThreads), trainer, evaluate)
// For DocumentAnnotator trait
def process(doc: Document) = { doc.sentences.foreach(process); doc }
def prereqAttrs = Seq(classOf[Sentence], classOf[PennPosTag], classOf[lemma.WordNetTokenLemma]) // Sentence also includes Token
def postAttrs = Seq(classOf[ParseTree])
override def tokenAnnotationString(token:Token): String = {
val sentence = token.sentence
val pt = if (sentence ne null) sentence.attr[ParseTree] else null
if (pt eq null) "_\\t_"
else (pt.parentIndex(token.positionInSentence)+1).toString+"\\t"+pt.label(token.positionInSentence).categoryValue
}
//override def tokenAnnotationString(token:Token): String = { val parse = token.parseParent; if (parse ne null) parse.positionInSentence+"\\t"+token.parseLabel.categoryValue else "_\\t_" }
def process(s: Sentence): Sentence = {
val parse = s.attr.getOrElseUpdate(new ParseTree(s))
new NonProjectiveShiftReduce(predict = classify).parse(s).zipWithIndex.map(dt => {
parse.setParent(dt._2, dt._1._1)
parse.label(dt._2).set(ParseTreeLabelDomain.index(dt._1._2))(null)
})
s
}
/* Takes features and turns them into a parse decision using predict(ParseDecisionVariable => ParseDecision) */
val defaultDecision = ParseDecision(defaultCategory)
class NonProjectiveShiftReduce(val predict: ParseDecisionVariable => ParseDecision) {
import ParserConstants._
def parse(s: Sentence) = {
// ParseState(lambda, beta, reduceID, sentence)
val state = new ParseState(0, 1, collection.mutable.HashSet[Int](), s)
while(state.input < state.sentenceTokens.length) {
if (state.stack < 0)
noShift(state)
else {
val decision = new ParseDecisionVariable(defaultDecision, state)
val label = predict(decision)
val beta = state.inputToken(0)
val lambda = state.stackToken(0)
/* Debugging output */
// println(s"${ParserConstants.getString(label.leftOrRightOrNo)} ${ParserConstants.getString(label.shiftOrReduceOrPass)} ${label.label}, lambda: ${lambda.form}, beta: ${beta.form}")
// println(s"lambda: form=${lambda.form}, head=${if (lambda.hasHead) lambda.head.depToken.form else "<NULL>"}, head2=${if (lambda.hasGrandHead) lambda.grandHead.depToken.form else "<NULL>"}, lmd=${if (lambda.form != "<NULL>") lambda.leftmostDependent.form else "<NULL>"}, lmd2=${if (lambda.form != "<NULL>") lambda.leftmostDependent2.form else "<NULL>"}, rmd=${if (lambda.form != "<NULL>") lambda.rightmostDependent.form else "<NULL>"}, rmd2=${if (lambda.form != "<NULL>") lambda.rightmostDependent2.form else "<NULL>"}")
// println(s"beta: form=${beta.form}, head=${if (beta.hasHead) beta.head.depToken.form else "<NULL>"}, head2=${if (beta.hasGrandHead) beta.grandHead.depToken.form else "<NULL>"}, lmd=${if (beta.form != "<NULL>") beta.leftmostDependent.form else "<NULL>"}, lmd2=${if (beta.form != "<NULL>") beta.leftmostDependent2.form else "<NULL>"}, rmd=${if (beta.form != "<NULL>") beta.rightmostDependent.form else "<NULL>"}, rmd2=${if (beta.form != "<NULL>") beta.rightmostDependent2.form else "<NULL>"}")
// println()
if (label.leftOrRightOrNo == LEFT) {
if (state.stack == ROOT_ID) noShift(state)
else if (beta.isDescendentOf(lambda)) noPass(state)
else if (label.shiftOrReduceOrPass == REDUCE) leftReduce(label.label, state)
else leftPass(label.label, state)
}
else if (label.leftOrRightOrNo == RIGHT) {
if (lambda.isDescendentOf(beta)) noPass(state)
else if (label.shiftOrReduceOrPass == SHIFT) rightShift(label.label, state)
else rightPass(label.label, state)
}
else {
if (label.shiftOrReduceOrPass == SHIFT) noShift(state)
else if (label.shiftOrReduceOrPass == REDUCE && lambda.hasHead) noReduce(state)
else noPass(state)
}
}
}
state.sentenceTokens.drop(1).map(dt => if (dt.hasHead) (dt.head.depToken.thisIdx-1, dt.head.label) else (-1,""))
}
private def passAux(state: ParseState): Unit = {
var i = state.stack - 1
while (i >= 0) {
if (!state.reducedIds.contains(i)) {
state.stack = i
return
}
i -= 1
}
state.stack = i
}
private def leftArc(label: String, state: ParseState) { state.stackToken(0).setHead(state.inputToken(0), label) }
private def rightArc(label: String, state: ParseState) { state.inputToken(0).setHead(state.stackToken(0), label) }
private def shift(state: ParseState) { state.stack = state.input; state.input += 1 }
private def reduce(state: ParseState) { state.reducedIds.add(state.stack); passAux(state) }
private def pass(state: ParseState) { passAux(state: ParseState) }
private def noShift(state: ParseState) { shift(state) }
private def noReduce(state: ParseState) { reduce(state) }
private def noPass(state: ParseState) { pass(state) }
private def leftReduce(label: String, state: ParseState) { leftArc(label, state); reduce(state) }
private def leftPass(label: String, state: ParseState) { leftArc(label, state); pass(state) }
private def rightShift(label: String, state: ParseState) { rightArc(label, state); shift(state) }
private def rightPass(label: String, state: ParseState) { rightArc(label, state); pass(state) }
}
trait NonProjectiveOracle {
import ParserConstants._
val sentence: Sentence
def predict(state: ParseDecisionVariable): ParseDecision
var instances = new ArrayBuffer[ParseDecisionVariable] { override val initialSize = 100 }
def getSimpleDepArcs = sentence.parse.targetParents.map(_ + 1).zip(sentence.parse.labels.map(_.target.value.category))
def getDepArcs = { Seq((-1, "<ROOT-ROOT>")) ++ getSimpleDepArcs.map { case (i: Int, l: String) => (i, l) } }
val goldHeads = getDepArcs
def getGoldDecision(state: ParseState): ParseDecision = {
val shiftOrReduceOrPass =
getGoldLRN(state) match {
case LEFT => if (shouldGoldReduce(hasHead=true, state=state)) REDUCE else PASS
case RIGHT => if (shouldGoldShift(state=state)) SHIFT else PASS
case _ => {
if (shouldGoldShift(state=state)) SHIFT
else if (shouldGoldReduce(hasHead=false, state=state)) REDUCE
else PASS
}
}
new ParseDecision(getGoldLRN(state) + " " + shiftOrReduceOrPass + " " + getGoldLabel(state))
}
def getGoldLabel(state: ParseState): String = {
if (goldHeads(state.stack)._1 == state.input) goldHeads(state.stack)._2
else if (goldHeads(state.input)._1 == state.stack) goldHeads(state.input)._2
else "N"
}
def getGoldLRN(state: ParseState): Int = {
if (goldHeads(state.stack)._1 == state.input) LEFT
else if (goldHeads(state.input)._1 == state.stack) RIGHT
else NO
}
def shouldGoldShift(state: ParseState): Boolean = {
if (goldHeads(state.input)._1 < state.stack) return false
else
for (i <- (state.stack - 1) until 0 by -1) if (!state.reducedIds.contains(i)) {
if (goldHeads(i)._1 == state.input)
return false
}
true
}
def shouldGoldReduce(hasHead: Boolean, state: ParseState): Boolean = {
if (!hasHead && !state.stackToken(0).hasHead)
return false
for (i <- (state.input + 1) until state.sentenceTokens.length)
if (goldHeads(i)._1 == state.stack)
return false
true
}
}
class NonprojectiveGoldOracle(val sentence: Sentence) extends NonProjectiveOracle {
def predict(decisionVariable: ParseDecisionVariable): ParseDecision = {
val decision = getGoldDecision(decisionVariable.state)
instances += new ParseDecisionVariable(decision, decisionVariable.state)
decision
}
}
class NonprojectiveBoostingOracle(val sentence: Sentence, basePredict: ParseDecisionVariable => ParseDecision) extends NonProjectiveOracle {
def predict(decisionVariable: ParseDecisionVariable): ParseDecision = {
val label = new ParseDecisionVariable(getGoldDecision(decisionVariable.state), decisionVariable.state)
instances += label
basePredict(label)
}
}
object DependencyFeatures {
val locationAbbrevs = collection.mutable.HashMap(
"S_LAMBDA" -> "l",
"S_STACK" -> "s",
"S_BETA" -> "b",
"R_H" -> "h", // head
"R_LMD" -> "lmd", // left-most dependent
"R_RMD" -> "rmd" // right-most dependent
)
val formAbbrevs = collection.mutable.HashMap(
"F_FORM" -> "f",
"F_LEMMA" -> "m",
"F_POS" -> "p",
"F_DEPREL" -> "d",
"F_LNPL" -> "lnpl", // left-nearest punctuation of lambda
"F_RNPL" -> "rnpl", // right-nearest punctuation of lambda
"F_LNPB" -> "lnpb", // left-nearest punctuation of beta
"F_RNPB" -> "rnpb" // right-nearest punctuation of beta
)
val locationFns: HashMap[String, (Int) => (ParseState) => DepToken] = mutable.HashMap(
"b" -> ((offset: Int) => (state: ParseState) => state.inputToken(offset)),
"l" -> ((offset: Int) => (state: ParseState) => state.lambdaToken(offset)),
"s" -> ((offset: Int) => (state: ParseState) => state.stackToken(offset)),
"l_h" -> ((_: Int) => (state: ParseState) => if (state.lambdaToken(0).hasHead) state.lambdaToken(0).head.depToken else null),
"l_lmd" -> ((offset: Int) => (state: ParseState) => state.lambdaToken(offset).leftmostDependent),
"l_rmd" -> ((offset: Int) => (state: ParseState) => state.lambdaToken(offset).rightmostDependent),
"b_lmd" -> ((offset: Int) => (state: ParseState) => state.stackToken(offset).leftmostDependent),
"b_rmd" -> ((offset: Int) => (state: ParseState) => state.stackToken(offset).rightmostDependent),
// left-nearest sibling of stack
"l_lns" -> ((offset: Int) => (state: ParseState) => state.lambdaToken(offset).leftNearestSibling),
/* 3rd order features */
// grand-head of lambda
"l_h2" -> ((_: Int) => (state: ParseState) => if (state.lambdaToken(0).hasGrandHead) state.lambdaToken(0).grandHead.depToken else null),
// 2nd left-most dependent of lambda
"l_lmd2" -> ((offset: Int) => (state: ParseState) => state.lambdaToken(offset).leftmostDependent2),
// 2nd right-most dependent of lambda
"l_rmd2" -> ((offset: Int) => (state: ParseState) => state.lambdaToken(offset).rightmostDependent2),
// 2nd left-most dependent of beta
"b_lmd2" -> ((offset: Int) => (state: ParseState) => state.stackToken(offset).leftmostDependent2)
)
// TODO make this nicer
val formFns = HashMap(
"f" -> ((t: DepToken) => "f:" + (if (t != null) t.form else "<NULL>")),
"m" -> ((t: DepToken) => "m:" + (if (t != null) t.lemma else "<NULL>")),
"p" -> ((t: DepToken) => "p:" + (if (t != null) t.pos else "<NULL>")),
"d" -> ((t: DepToken) => "d:" + (if (t != null && t.hasHead) t.head.label else "<NULL>")),
"b0" -> ((t: DepToken) => "lFirst:" + (if (t != null && t.thisIdx != -1) t.state.lambdaToken(0) eq t.state.sentenceTokens(1) else false).toString),
"b1" -> ((t: DepToken) => "bLast:" + (if (t != null) t.state.stackToken(0) eq t.state.sentenceTokens.last else false).toString),
"b2" -> ((t: DepToken) => "adjacent:" + (if (t != null) t.state.input - t.state.stack == 1 else false).toString)
)
/* Takes a string definition of a feature template and applies it to a ParseState to get
* the string feature for the given ParseState */
def generators(locationOffsetAndForm: String): (ParseState => String) = {
val LocationOffsetAndForm = """([a-z_]*)[+]?([-0-9]*):([a-z]*[0-9]?)""".r
locationOffsetAndForm match {
case LocationOffsetAndForm(location, offset, form) => {
val locationFn = locationFns(location)(if (offset == "") 0 else offset.toInt)
(state: ParseState) => location + offset + ":" + formFns(form)(locationFn(state))
}
case _ => throw new Error("Couldn't parse location and form from feature generator string.")
}
}
abstract class DependencyFeatureGenerator extends (ParseState => String)
class SingletonDependencyFeatureGenerator(f: String) extends DependencyFeatureGenerator {
lazy val featureFn = generators(f)
def apply(s: ParseState): String = featureFn(s)
}
class CompositeDependencyFeatureGenerator(gens: Seq[DependencyFeatureGenerator]) extends DependencyFeatureGenerator {
def apply(s: ParseState) = gens.map(_.apply(s)).mkString("|")
}
private def stripJSONComments(s: String) = s.split("\\n").map(_.split("#").head).mkString("\\n")
def fromJSON(source: String) = {
val someJson = JSON.parseFull(stripJSONComments(source))
val featureSpec = someJson match {
case map: Some[Map[String, List[List[String]]] @unchecked] => map.get("features")
case _ => throw new Error()
}
featureSpec.map(fs => {
val fGens = fs.map(f => new SingletonDependencyFeatureGenerator(f))
if (fGens.length > 1) new CompositeDependencyFeatureGenerator(fGens)
else fGens.head
})
}
}
class DepToken(val form: String, val lemma: String, val pos: String, val thisIdx: Int, val state: ParseState) {
var head: DepArc = null
def grandHead: DepArc = if(hasHead) head.depToken.head else null
def hasHead: Boolean = head != null
def hasGrandHead: Boolean = grandHead != null
def setHead(headToken: DepToken, label: String) {
head = new DepArc(headToken, label)
if(head.depToken.thisIdx != -1){
// set left and rightmost dependencies
if (thisIdx < head.depToken.thisIdx)
state.leftmostDeps(head.depToken.thisIdx) = thisIdx
else
state.rightmostDeps(head.depToken.thisIdx) = thisIdx
}
}
def leftmostDependent: DepToken = {
if (thisIdx == -1) state.nullToken
else{
val i = state.leftmostDeps(thisIdx)
if (i == -1) state.nullToken
else state.sentenceTokens(i)
}
}
def rightmostDependent: DepToken = {
if (thisIdx == -1) state.nullToken
else{
val i = state.rightmostDeps(thisIdx)
if (i == -1) state.nullToken
else state.sentenceTokens(i)
}
}
def leftmostDependent2: DepToken = {
if (thisIdx == -1) state.nullToken
else{
val i = state.leftmostDeps(thisIdx)
if (i == -1) state.nullToken
else{
val j = state.leftmostDeps(i)
if (j == -1) state.nullToken
else state.sentenceTokens(j)
}
}
}
def rightmostDependent2: DepToken = {
if (thisIdx == -1) state.nullToken
else{
val i = state.rightmostDeps(thisIdx)
if (i == -1) state.nullToken
else{
val j = state.rightmostDeps(i)
if (j == -1) state.nullToken
else state.sentenceTokens(j)
}
}
}
def leftNearestSibling: DepToken = {
if(hasHead){
var i = thisIdx - 1
var sib = state.nullToken
while(i >= 0 && sib == state.nullToken){
if (state.sentenceTokens(i).hasHead && state.sentenceTokens(i).head.depToken == head.depToken)
sib = state.sentenceTokens(i)
i -= 1
}
sib
}
else state.nullToken
}
def rightNearestSibling: DepToken = {
if(hasHead){
var i = thisIdx + 1
var sib = state.nullToken
while(i < state.sentenceTokens.size && sib == state.nullToken){
if (state.sentenceTokens(i).hasHead && state.sentenceTokens(i).head.depToken == head.depToken)
sib = state.sentenceTokens(i)
i += 1
}
sib
}
else state.nullToken
}
@tailrec final def isDescendentOf(that: DepToken): Boolean = {
if (!hasHead) false
else if (this.head.depToken == that) true
else this.head.depToken.isDescendentOf(that)
}
}
case class DepArc(depToken: DepToken, label: String)
class ParseState(var stack: Int, var input: Int, val reducedIds: collection.mutable.HashSet[Int], sentence: Sentence) {
private def depToken(token: Token, idx: Int, state: ParseState) = new DepToken(form = token.string, lemma = token.lemmaString, pos = token.posTag.categoryValue, thisIdx=idx, state=state)
val rootToken = new DepToken(form = "<ROOT>", lemma = "<ROOT>", pos = "<ROOT>", thisIdx = 0, state=this)
val nullToken = new DepToken(form = "<NULL>", lemma = "<NULL>", pos = "<NULL>", thisIdx = -1, state=this)
val sentenceTokens = (Seq(rootToken) ++ sentence.tokens.zipWithIndex.map(t => depToken(t._1, t._2+1, this))).toArray
val leftmostDeps = Array.fill[Int](sentenceTokens.size)(-1)
val rightmostDeps = Array.fill[Int](sentenceTokens.size)(-1)
def inputToken(offset: Int): DepToken = {
val i = input + offset
if (i < 0 || sentenceTokens.size - 1 < i) nullToken
else sentenceTokens(i)
}
def lambdaToken(offset: Int): DepToken = {
val i = stack + offset
if (i < 0 || sentenceTokens.size - 1 < i) nullToken
else sentenceTokens(i)
}
def stackToken(offset: Int): DepToken = {
if (offset == 0)
return sentenceTokens(stack)
var off = math.abs(offset)
var dir = if (offset < 0) -1 else 1
var i = stack + dir
while (0 < i && i < input) {
if (!reducedIds.contains(i)) {
off -= 1
if (off == 0)
return sentenceTokens(i)
}
i += dir
}
nullToken
}
}
}
class WSJTransitionBasedParser(url:java.net.URL) extends TransitionBasedParser(url)
object WSJTransitionBasedParser extends WSJTransitionBasedParser(cc.factorie.util.ClasspathURL[WSJTransitionBasedParser](".factorie"))
class OntonotesTransitionBasedParser(url:java.net.URL) extends TransitionBasedParser(url)
object OntonotesTransitionBasedParser extends OntonotesTransitionBasedParser(cc.factorie.util.ClasspathURL[OntonotesTransitionBasedParser](".factorie"))
class TransitionBasedParserArgs extends cc.factorie.util.DefaultCmdOptions with SharedNLPCmdOptions{
val trainFiles = new CmdOption("train", Nil.asInstanceOf[List[String]], "FILENAME...", "")
val testFiles = new CmdOption("test", Nil.asInstanceOf[List[String]], "FILENAME...", "")
val trainDir = new CmdOption("trainDir", "", "FILENAME", "Directory containing training files.")
val testDir = new CmdOption("testDir", "", "FILENAME", "Directory containing test files.")
val devDir = new CmdOption("devDir", "", "FILENAME", "Directory containing dev files.")
val devFiles = new CmdOption("dev", Nil.asInstanceOf[List[String]], "FILENAME...", "")
val ontonotes = new CmdOption("onto", true, "BOOLEAN", "Whether data are in Ontonotes format or otherwise (WSJ or CoNLL)")
val wsj = new CmdOption("wsj", false, "BOOLEAN", "Whether data are in WSJ format or otherwise (Ontonotes or CoNLL)")
val cutoff = new CmdOption("cutoff", 0, "", "")
val loadModel = new CmdOption("load", "", "", "")
val nThreads = new CmdOption("nThreads", 1, "INT", "How many threads to use during training.")
val useSVM = new CmdOption("use-svm", false, "BOOL", "Whether to use SVMs to train")
val modelDir = new CmdOption("model", "model", "FILENAME", "File in which to save the trained model.")
val bootstrapping = new CmdOption("bootstrap", 0, "INT", "The number of bootstrapping iterations to do. 0 means no bootstrapping.")
val saveModel = new CmdOption("save-model", true,"BOOLEAN","whether to write out a model file or not")
val l1 = new CmdOption("l1", 0.000001,"FLOAT","l1 regularization weight")
val l2 = new CmdOption("l2", 0.00001,"FLOAT","l2 regularization weight")
val rate = new CmdOption("rate", 1.0,"FLOAT","base learning rate")
val maxIters = new CmdOption("max-iterations", 5, "INT", "iterations of training per round")
val delta = new CmdOption("delta", 0.1,"FLOAT","learning rate decay")
}
object TransitionBasedParserTrainer extends cc.factorie.util.HyperparameterMain {
def evaluateParameters(args: Array[String]) = {
val opts = new TransitionBasedParserArgs
implicit val random = new scala.util.Random(0)
opts.parse(args)
assert(opts.trainFiles.wasInvoked || opts.trainDir.wasInvoked)
// Load the sentences
def loadSentences(listOpt: opts.CmdOption[List[String]], dirOpt: opts.CmdOption[String]): Seq[Sentence] = {
var fileList = Seq.empty[String]
if (listOpt.wasInvoked) fileList = listOpt.value.toSeq
if (dirOpt.wasInvoked) fileList ++= FileUtils.getFileListFromDir(dirOpt.value)
fileList.flatMap(fname => {
if(opts.wsj.value)
load.LoadWSJMalt.fromFilename(fname, loadPos=load.AnnotationTypes.AUTO).head.sentences.toSeq
else if (opts.ontonotes.value)
load.LoadOntonotes5.fromFilename(fname, loadLemma=load.AnnotationTypes.AUTO, loadPos=load.AnnotationTypes.AUTO).head.sentences.toSeq
else
load.LoadConll2008.fromFilename(fname).head.sentences.toSeq
})
}
val sentencesFull = loadSentences(opts.trainFiles, opts.trainDir)
val devSentencesFull = loadSentences(opts.devFiles, opts.devDir)
val testSentencesFull = loadSentences(opts.testFiles, opts.testDir)
val trainPortionToTake = if(opts.trainPortion.wasInvoked) opts.trainPortion.value.toDouble else 1.0
val testPortionToTake = if(opts.testPortion.wasInvoked) opts.testPortion.value.toDouble else 1.0
val sentences = sentencesFull.take((trainPortionToTake*sentencesFull.length).floor.toInt)
val testSentences = testSentencesFull.take((testPortionToTake*testSentencesFull.length).floor.toInt)
val devSentences = devSentencesFull.take((testPortionToTake*devSentencesFull.length).floor.toInt)
println("Total train sentences: " + sentences.size)
println("Total test sentences: " + testSentences.size)
def testSingle(c: TransitionBasedParser, ss: Seq[Sentence], extraText: String = ""): Unit = {
if (ss.nonEmpty) {
println(extraText + " " + c.testString(ss))
}
}
def testAll(c: TransitionBasedParser, extraText: String = ""): Unit = {
println("\\n")
testSingle(c, sentences, "Train " + extraText)
testSingle(c, devSentences, "Dev " + extraText)
testSingle(c, testSentences, "Test " + extraText)
}
// Load other parameters
val numBootstrappingIterations = opts.bootstrapping.value.toInt
val c = new TransitionBasedParser
val l1 = 2*opts.l1.value / sentences.length
val l2 = 2*opts.l2.value / sentences.length
val optimizer = new AdaGradRDA(opts.rate.value, opts.delta.value, l1, l2)
println(s"Initializing trainer (${opts.nThreads.value} threads)")
val trainer = if (opts.useSVM.value) new SVMMulticlassTrainer(opts.nThreads.value)
else new OnlineLinearMulticlassTrainer(optimizer=optimizer, useParallel=if (opts.nThreads.value > 1) true else false, nThreads=opts.nThreads.value, objective=OptimizableObjectives.hingeMulticlass, maxIterations=opts.maxIters.value)
def evaluate(cls: LinearMulticlassClassifier) {
println(cls.weights.value.toSeq.count(x => x == 0).toFloat/cls.weights.value.length +" sparsity")
testAll(c, "iteration ")
}
c.featuresDomain.dimensionDomain.gatherCounts = true
println("Generating decisions...")
c.generateDecisions(sentences, c.ParserConstants.TRAINING, opts.nThreads.value)
println("Before pruning # features " + c.featuresDomain.dimensionDomain.size)
c.featuresDomain.dimensionDomain.trimBelowCount(2*opts.cutoff.value)
c.featuresDomain.freeze()
c.featuresDomain.dimensionDomain.gatherCounts = false
println("After pruning # features " + c.featuresDomain.dimensionDomain.size)
println("Training...")
var trainingVs = c.generateDecisions(sentences, c.ParserConstants.TRAINING, opts.nThreads.value)
/* Print out features */
// sentences.take(5).foreach(s => {
// println(s"Sentence: ${s.tokens.map(_.string).mkString(" ")}")
// val trainingVariables = c.generateDecisions(Seq(s), c.ParserConstants.TRAINING, opts.nThreads.value)
// trainingVariables.foreach(tv => {
// println(s"Training decision: ${
// val transition = tv.categoryValue.split(" ")
// transition.take(2).map(x => c.ParserConstants.getString(x.toInt)).mkString(" ") + " " + transition(2)
// }; features: ${
// tv.features.domain.dimensionDomain.categories.zip(tv.features.value.toSeq).filter(_._2 == 1.0).map(_._1).mkString(" ")
// }")
// })
// println()
// })
c.trainFromVariables(trainingVs, trainer, evaluate)
trainingVs = null // GC the old training labels
for (i <- 0 until numBootstrappingIterations) {
println("Boosting iteration " + i)
c.boosting(sentences, nThreads=opts.nThreads.value, trainer=trainer, evaluate=evaluate)
}
//testSentences.par.foreach(c.process)
//testSentences.foreach(c.process)
if (opts.saveModel.value) {
val modelUrl: String = if (opts.modelDir.wasInvoked) opts.modelDir.value else opts.modelDir.defaultValue + System.currentTimeMillis().toString + ".factorie"
c.serialize(new java.io.File(modelUrl))
val d = new TransitionBasedParser
d.deserialize(new java.io.File(modelUrl))
testSingle(d, testSentences, "Post serialization accuracy ")
}
val testLAS = ParserEval.calcLas(testSentences.map(_.attr[ParseTree]))
if(opts.targetAccuracy.wasInvoked) cc.factorie.assertMinimalAccuracy(testLAS,opts.targetAccuracy.value.toDouble)
testLAS
}
}
object TransitionBasedParserTester {
def main(args: Array[String]) {
val opts = new TransitionBasedParserArgs
opts.parse(args)
assert(opts.testDir.wasInvoked || opts.testFiles.wasInvoked)
// load model from file if given,
// else if the wsj command line param was specified use wsj model,
// otherwise ontonotes model
val parser = {
if(opts.modelDir.wasInvoked) new TransitionBasedParser(new File(opts.modelDir.value))
else if(opts.wsj.value) WSJTransitionBasedParser
else OntonotesTransitionBasedParser
}
assert(!(opts.testDir.wasInvoked && opts.testFiles.wasInvoked))
val testFileList = if(opts.testDir.wasInvoked) FileUtils.getFileListFromDir(opts.testDir.value) else opts.testFiles.value.toSeq
val testPortionToTake = if(opts.testPortion.wasInvoked) opts.testPortion.value else 1.0
val testDocs = testFileList.map(fname => {
if(opts.wsj.value)
load.LoadWSJMalt.fromFilename(fname, loadLemma=load.AnnotationTypes.AUTO, loadPos=load.AnnotationTypes.AUTO).head
else
load.LoadOntonotes5.fromFilename(fname, loadLemma=load.AnnotationTypes.AUTO, loadPos=load.AnnotationTypes.AUTO).head
})
val testSentencesFull = testDocs.flatMap(_.sentences)
val testSentences = testSentencesFull.take((testPortionToTake*testSentencesFull.length).floor.toInt)
println(parser.testString(testSentences))
}
}
object TransitionBasedParserOptimizer {
def main(args: Array[String]) {
val opts = new TransitionBasedParserArgs
opts.parse(args)
opts.saveModel.setValue(false)
val l1 = cc.factorie.util.HyperParameter(opts.l1, new cc.factorie.util.LogUniformDoubleSampler(1e-10, 1e2))
val l2 = cc.factorie.util.HyperParameter(opts.l2, new cc.factorie.util.LogUniformDoubleSampler(1e-10, 1e2))
val rate = cc.factorie.util.HyperParameter(opts.rate, new cc.factorie.util.LogUniformDoubleSampler(1e-4, 1e4))
//val delta = cc.factorie.util.HyperParameter(opts.delta, new cc.factorie.util.LogUniformDoubleSampler(1e-4, 1e4))
val cutoff = cc.factorie.util.HyperParameter(opts.cutoff, new cc.factorie.util.SampleFromSeq[Int](Seq(0, 1, 2)))
//val bootstrap = cc.factorie.util.HyperParameter(opts.bootstrapping, new cc.factorie.util.SampleFromSeq[Int](Seq(0, 1, 2)))
val maxit = cc.factorie.util.HyperParameter(opts.maxIters, new cc.factorie.util.SampleFromSeq[Int](Seq(3, 5, 7)))
/*
val ssh = new cc.factorie.util.SSHActorExecutor("apassos",
Seq("avon1", "avon2"),
"/home/apassos/canvas/factorie-test",
"try-log/",
"cc.factorie.app.nlp.parse.TransitionBasedParser",
10, 5)
*/
val qs = new cc.factorie.util.QSubExecutor(48, "cc.factorie.app.nlp.parse.TransitionBasedParserTrainer")
val optimizer = new cc.factorie.util.HyperParameterSearcher(opts, Seq(l1, l2, rate, cutoff, maxit), qs.execute, 250, 220, 60)
val result = optimizer.optimize()
println("Got results: " + result.mkString(" "))
opts.saveModel.setValue(true)
println("Running best configuration...")
import scala.concurrent.duration._
Await.result(qs.execute(opts.values.flatMap(_.unParse).toArray), 2.hours)
println("Done")
}
}
| iesl/fuse_ttl | src/factorie-factorie_2.11-1.1/src/main/scala/cc/factorie/app/nlp/parse/TransitionBasedParser.scala | Scala | apache-2.0 | 40,094 |
package mesosphere.marathon.core.flow.impl
import akka.actor.{ Cancellable, Actor, ActorLogging, Props }
import akka.event.{ EventStream, LoggingReceive }
import mesosphere.marathon.MarathonSchedulerDriverHolder
import mesosphere.marathon.core.base.Clock
import mesosphere.marathon.core.flow.ReviveOffersConfig
import mesosphere.marathon.core.flow.impl.ReviveOffersActor.OffersWanted
import mesosphere.marathon.event.{ SchedulerReregisteredEvent, SchedulerRegisteredEvent }
import mesosphere.marathon.state.Timestamp
import rx.lang.scala.{ Observable, Subscription }
import scala.annotation.tailrec
import scala.concurrent.duration._
private[flow] object ReviveOffersActor {
def props(
clock: Clock, conf: ReviveOffersConfig,
marathonEventStream: EventStream,
offersWanted: Observable[Boolean], driverHolder: MarathonSchedulerDriverHolder): Props = {
Props(new ReviveOffersActor(clock, conf, marathonEventStream, offersWanted, driverHolder))
}
private[impl] case object TimedCheck
private[impl] case class OffersWanted(wanted: Boolean)
}
/**
* Revive offers whenever interest is signaled but maximally every 5 seconds.
*/
private[impl] class ReviveOffersActor(
clock: Clock, conf: ReviveOffersConfig,
marathonEventStream: EventStream,
offersWanted: Observable[Boolean],
driverHolder: MarathonSchedulerDriverHolder) extends Actor with ActorLogging {
private[impl] var subscription: Subscription = _
private[impl] var offersCurrentlyWanted: Boolean = false
private[impl] var revivesNeeded: Int = 0
private[impl] var lastRevive: Timestamp = Timestamp(0)
private[impl] var nextReviveCancellableOpt: Option[Cancellable] = None
override def preStart(): Unit = {
subscription = offersWanted.map(OffersWanted(_)).subscribe(self ! _)
marathonEventStream.subscribe(self, classOf[SchedulerRegisteredEvent])
marathonEventStream.subscribe(self, classOf[SchedulerReregisteredEvent])
}
override def postStop(): Unit = {
subscription.unsubscribe()
nextReviveCancellableOpt.foreach(_.cancel())
nextReviveCancellableOpt = None
marathonEventStream.unsubscribe(self)
}
@tailrec
private[this] def reviveOffers(): Unit = {
val now: Timestamp = clock.now()
val nextRevive = lastRevive + conf.minReviveOffersInterval().milliseconds
if (nextRevive <= now) {
log.info("=> revive offers NOW, canceling any scheduled revives")
nextReviveCancellableOpt.foreach(_.cancel())
nextReviveCancellableOpt = None
driverHolder.driver.foreach(_.reviveOffers())
lastRevive = now
revivesNeeded -= 1
if (revivesNeeded > 0) {
log.info("{} further revives still needed. Repeating reviveOffers according to --{} {}",
revivesNeeded, conf.reviveOffersRepetitions.name, conf.reviveOffersRepetitions())
reviveOffers()
}
}
else {
lazy val untilNextRevive = now until nextRevive
if (nextReviveCancellableOpt.isEmpty) {
log.info("=> Schedule next revive at {} in {}, adhering to --{} {} (ms)",
nextRevive, untilNextRevive, conf.minReviveOffersInterval.name, conf.minReviveOffersInterval())
nextReviveCancellableOpt = Some(schedulerCheck(untilNextRevive))
}
else if (log.isDebugEnabled) {
log.info("=> Next revive already scheduled at {} not yet due for {}", nextRevive, untilNextRevive)
}
}
}
override def receive: Receive = LoggingReceive {
Seq(
receiveOffersWantedNotifications,
receiveReviveOffersEvents
).reduceLeft[Receive](_.orElse[Any, Unit](_))
}
private[this] def receiveOffersWantedNotifications: Receive = {
case OffersWanted(true) =>
log.info("Received offers WANTED notification")
offersCurrentlyWanted = true
initiateNewSeriesOfRevives()
case OffersWanted(false) =>
log.info("Received offers NOT WANTED notification, canceling {} revives", revivesNeeded)
offersCurrentlyWanted = false
revivesNeeded = 0
nextReviveCancellableOpt.foreach(_.cancel())
nextReviveCancellableOpt = None
}
def initiateNewSeriesOfRevives(): Unit = {
revivesNeeded = conf.reviveOffersRepetitions()
reviveOffers()
}
private[this] def receiveReviveOffersEvents: Receive = {
case msg @ (_: SchedulerRegisteredEvent | _: SchedulerReregisteredEvent | OfferReviverDelegate.ReviveOffers) =>
if (offersCurrentlyWanted) {
log.info(s"Received reviveOffers notification: ${msg.getClass.getSimpleName}")
initiateNewSeriesOfRevives()
}
else {
log.info(s"Ignoring ${msg.getClass.getSimpleName} because no one is currently interested in offers")
}
case ReviveOffersActor.TimedCheck =>
log.info(s"Received TimedCheck")
if (revivesNeeded > 0) {
reviveOffers()
}
else {
log.info("=> no revives needed right now")
}
}
protected def schedulerCheck(duration: FiniteDuration): Cancellable = {
import context.dispatcher
context.system.scheduler.scheduleOnce(duration, self, ReviveOffersActor.TimedCheck)
}
}
| vivekjuneja/marathon | src/main/scala/mesosphere/marathon/core/flow/impl/ReviveOffersActor.scala | Scala | apache-2.0 | 5,112 |
package kornell.server.api
import javax.ws.rs._
import kornell.core.entity.CertificateDetails
import kornell.server.jdbc.repository.CertificateDetailsRepo
import kornell.server.util.AccessDeniedErr
import kornell.server.util.Conditional.toConditional
class CertificateDetailsResource(uuid: String) {
@GET
@Produces(Array(CertificateDetails.TYPE))
def get: CertificateDetails = {
CertificateDetailsRepo(uuid).get
}.requiring(isPlatformAdmin, AccessDeniedErr())
.or(isInstitutionAdmin, AccessDeniedErr())
.or(isPublisher, AccessDeniedErr())
.get
@PUT
@Consumes(Array(CertificateDetails.TYPE))
@Produces(Array(CertificateDetails.TYPE))
def update(certificateDetails: CertificateDetails): CertificateDetails = {
CertificateDetailsRepo(uuid).update(certificateDetails)
}.requiring(isPlatformAdmin, AccessDeniedErr())
.or(isInstitutionAdmin, AccessDeniedErr())
.or(isPublisher, AccessDeniedErr())
.get
@DELETE
@Produces(Array(CertificateDetails.TYPE))
def delete(): CertificateDetails = {
CertificateDetailsRepo(uuid).delete
}.requiring(isPlatformAdmin, AccessDeniedErr())
.or(isInstitutionAdmin, AccessDeniedErr())
.or(isPublisher, AccessDeniedErr())
.get
}
object CertificateDetailsResource {
def apply(uuid: String) = new CertificateDetailsResource(uuid)
}
| Craftware/Kornell | kornell-api/src/main/scala/kornell/server/api/CertificateDetailsResource.scala | Scala | apache-2.0 | 1,336 |
package at.forsyte.apalache.tla.imp
import at.forsyte.apalache.tla.imp.src.{SaveToStoreTracker, SourceLocation, SourceStore}
import at.forsyte.apalache.tla.lir._
import at.forsyte.apalache.tla.lir.oper.TlaFunOper
import at.forsyte.apalache.tla.lir.transformations.impl.IdleTracker
import at.forsyte.apalache.tla.lir.transformations.standard.ReplaceFixed
import tla2sany.semantic.{OpApplNode, OpDefNode}
/**
* Translate an operator definition to a TlaOper.
*
* @author konnov
*/
class OpDefTranslator(sourceStore: SourceStore, context: Context) {
def translate(node: OpDefNode): TlaOperDecl = {
val params = node.getParams.toList map FormalParamTranslator().translate
val nodeName = node.getName.toString.intern()
val isRecursive = node.getInRecursive
if (!isRecursive) {
node.getBody match {
case app: OpApplNode if "$RecursiveFcnSpec" == app.getOperator.getName.toString =>
// this is a definition of a recursive function, translate to recFunDef
val body = ExprOrOpArgNodeTranslator(sourceStore, context, OutsideRecursion())
.translate(node.getBody)
val recFunRef = OperEx(TlaFunOper.recFunRef)
// save the source location of the call to the recursive function, point to the definition
sourceStore.addRec(recFunRef, SourceLocation(node.getBody.getLocation))
// the body still can refer to the function by its name, replace it with recFunRef
val replaced = ReplaceFixed(NameEx(nodeName), recFunRef, new SaveToStoreTracker(sourceStore))(body)
// store the source location
sourceStore.addRec(replaced, SourceLocation(node.getBody.getLocation))
// return the operator whose body is a recursive function
val operDecl = TlaOperDecl(nodeName, List(), replaced)
operDecl.isRecursive = false
operDecl
case _ =>
// non-recursive declarations are easy
TlaOperDecl(nodeName, params,
ExprOrOpArgNodeTranslator(sourceStore, context, OutsideRecursion())
.translate(node.getBody))
}
} else {
// in recursive declarations, the applications of recursive operators are replaced by calls to formal parameters
val body = ExprOrOpArgNodeTranslator(sourceStore, context, InsideRecursion())
.translate(node.getBody)
val decl = TlaOperDecl(nodeName, params, body)
decl.isRecursive = true
decl
}
}
}
object OpDefTranslator {
def apply( sourceStore: SourceStore, context: Context) : OpDefTranslator = {
new OpDefTranslator(sourceStore, context)
}
}
| konnov/apalache | tla-import/src/main/scala/at/forsyte/apalache/tla/imp/OpDefTranslator.scala | Scala | apache-2.0 | 2,626 |
package org.jetbrains.plugins.scala.lang.psi.stubs
import com.intellij.psi.stubs.StubElement
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScExtensionBody
trait ScExtensionBodyStub extends StubElement[ScExtensionBody]
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/stubs/ScExtensionBodyStub.scala | Scala | apache-2.0 | 233 |
/*
* The MIT License
*
* Copyright (c) 2020 Fulcrum Genomics
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
package com.fulcrumgenomics.util
import com.fulcrumgenomics.FgBioDef.PathToSequenceDictionary
import com.fulcrumgenomics.fasta.{SequenceDictionary, SequenceMetadata}
import com.fulcrumgenomics.testing.UnitSpec
class UpdateDelimitedFileContigNamesTest extends UnitSpec {
private def toSequenceMetadata(name: String, alias: String*): SequenceMetadata = {
SequenceMetadata(name=name, length=0, aliases=alias)
}
private val dict: SequenceDictionary = {
SequenceDictionary(
toSequenceMetadata(name="1-new", "1-old"),
toSequenceMetadata(name="2-new", "2-old"),
toSequenceMetadata(name="3-new", "3-old")
)
}
private val pathToSequenceDictionary: PathToSequenceDictionary = {
val path = makeTempFile("test.", "in.dict")
dict.write(path)
path
}
private def runTest(delimiter: Char,
columns: Seq[Int],
actual: Seq[String],
expected: Seq[String],
outputFirstNumLines: Int = 0,
comment: String = "#",
skipMissing: Boolean = false,
sortOrder: SortOrder = SortOrder.Unsorted,
contig: Option[Int] = None,
position: Option[Int] = None,
maxObjectsInRam: Int = 1e6.toInt): Unit = {
val input = makeTempFile("test.", "in.txt")
val output = makeTempFile("test.", "out.txt")
Io.writeLines(input, actual)
val tool = new UpdateDelimitedFileContigNames(
input = input,
dict = pathToSequenceDictionary,
columns = columns,
delimiter = delimiter,
comment = comment,
output = output,
outputFirstNumLines = outputFirstNumLines,
skipMissing = skipMissing,
sortOrder = sortOrder,
contig = contig,
position = position,
maxObjectsInRam = maxObjectsInRam
)
executeFgbioTool(tool)
Io.readLines(output).toSeq should contain theSameElementsInOrderAs expected
}
"UpdateDelimitedFileContigNames" should "update a delimited file" in {
Seq(',', '\\t', ':').foreach { delimiter =>
runTest(
delimiter = delimiter,
columns = Seq(2),
actual = Seq(Seq("na", "na", "1-old", "na", "na").mkString(delimiter.toString)),
expected = Seq(Seq("na", "na", "1-new", "na", "na").mkString(delimiter.toString)),
)
}
}
it should "update multiple columns in a delimited file" in {
Seq(',', '\\t', ':').foreach { delimiter =>
runTest(
delimiter = delimiter,
columns = Seq(2, 4),
actual = Seq(Seq("3-old", "na", "1-old", "na", "2-old").mkString(delimiter.toString)),
expected = Seq(Seq("3-old", "na", "1-new", "na", "2-new").mkString(delimiter.toString))
)
}
}
it should "skip the first n-lines" in {
Seq(0, 1, 2).foreach { outputFirstNumLines =>
val actual = Seq(
"1-old,na",
"2-old,na",
"3-old,na"
)
val allModified = Seq(
"1-new,na",
"2-new,na",
"3-new,na"
)
val expected = actual.take(outputFirstNumLines) ++ allModified.drop(outputFirstNumLines)
runTest(
delimiter = ',',
columns = Seq(0),
actual = actual,
expected = expected,
outputFirstNumLines = outputFirstNumLines
)
}
}
it should "skip the comment lines" in {
Seq(0, 1, 2).foreach { outputFirstNumLines =>
val actual = Seq(
"$1-old,na",
"2-old,na",
"$2-old,na",
"3-old,na"
)
val expected = Seq(
"$1-old,na",
"2-new,na",
"$2-old,na",
"3-new,na"
)
runTest(
delimiter = ',',
columns = Seq(0),
actual = actual,
expected = expected,
comment = "$"
)
}
}
it should "fail if contig names are not found in the dict" in {
val exception = intercept[Exception] {
runTest(
delimiter = '\\t',
columns = Seq(2),
actual = Seq(Seq("na", "na", "4-old", "na", "na").mkString("\\t")),
expected = Seq.empty,
)
}
exception.getMessage.contains("Did not find contig") shouldBe true
}
it should "skip lines if not all contig names can be updated with --skip-missing" in {
val actual = Seq(
"1-old,4-old", // skipped, second can't be updated
"2-old,3-old", // updated
"4-old,5-old", // skipped, both can't be updated
"5-old,1-old", // skipped, first can't be updated
)
val expected = Seq(
"2-new,3-new"
)
runTest(
delimiter = ',',
columns = Seq(0, 1),
actual = actual,
expected = expected,
skipMissing = true
)
}
it should "sort by contig name only" in {
val actual = Seq(
"1-old,0",
"3-old,4",
"3-old,3",
"2-old,2"
)
val expected = Seq(
"1-new,0",
"2-new,2",
"3-new,4",
"3-new,3"
)
// --contig not specified
runTest(
delimiter = ',',
columns = Seq(0),
actual = actual,
expected = expected,
sortOrder = SortOrder.ByContigOnly
)
// --contig specified
runTest(
delimiter = ',',
columns = Seq(0),
actual = actual,
expected = expected,
sortOrder = SortOrder.ByContigOnly,
contig = Some(0)
)
}
it should "sort by coordinate" in {
val actual = Seq(
"1-old,0,2-old,1",
"3-old,4,2-old,2",
"3-old,1,2-old,3",
"2-old,2,2-old,4",
"2-old,2,2-old,5"
)
// sorts by col0, col1, then line number (col3)s
val expectedCol0 = Seq(
"1-new,0,2-new,1",
"2-new,2,2-new,4",
"2-new,2,2-new,5",
"3-new,1,2-new,3",
"3-new,4,2-new,2"
)
// col2 has the same value for all, so sorting by position in col1
val expectedCol2 = Seq(
"1-new,0,2-new,1",
"3-new,1,2-new,3",
"2-new,2,2-new,4",
"2-new,2,2-new,5",
"3-new,4,2-new,2"
)
// --contig is not specified, so defaults to column 0
runTest(
delimiter = ',',
columns = Seq(0, 2),
actual = actual,
expected = expectedCol0,
sortOrder = SortOrder.ByCoordinate,
position = Some(1)
)
// --contig is specified as column 0
runTest(
delimiter = ',',
columns = Seq(0, 2),
actual = actual,
expected = expectedCol0,
sortOrder = SortOrder.ByCoordinate,
contig = Some(0),
position = Some(1)
)
// --contig is not specified, so defaults to column 0
runTest(
delimiter = ',',
columns = Seq(0, 2),
actual = actual,
expected = expectedCol2,
sortOrder = SortOrder.ByCoordinate,
contig = Some(2),
position = Some(1)
)
// tests that when all values are the same, we sort by line number
val clones = Seq.range(0, 100).map(_ => "1-new,0,2-new,1")
runTest(
delimiter = ',',
columns = Seq(0, 2),
actual = clones,
expected = clones,
sortOrder = SortOrder.ByCoordinate,
contig = Some(2),
position = Some(1)
)
}
it should "spill to disk when sorting" in {
val actual = Seq.range(0, 1000).map { i => f"1-old,0,2-old,$i" }
val expected = Seq.range(0, 1000).map { i => f"1-new,0,2-new,$i" }
runTest(
delimiter = ',',
columns = Seq(0, 2),
actual = actual,
expected = expected,
sortOrder = SortOrder.ByCoordinate,
position = Some(1),
maxObjectsInRam = 10
)
}
}
| fulcrumgenomics/fgbio | src/test/scala/com/fulcrumgenomics/util/UpdateDelimitedFileContigNamesTest.scala | Scala | mit | 9,057 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.