code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package io.cronit.utils
import org.joda.time.DateTimeZone
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}
class ConfigurationTest extends FlatSpec with Matchers with BeforeAndAfter {
after {
System.setProperty("env", "development")
}
"It" should " load 'development' environment variables when env did not defined " in {
val configuration = new Configuration {}
configuration.config.getString("environment") shouldEqual "development"
}
"It" should " load 'specific' environment variables when env is defined " in {
System.setProperty("env", "qa")
val configuration = new Configuration {}
configuration.config.getString("environment") shouldEqual "qa"
}
"It" should " return GMT timezone from config" in {
System.setProperty("env", "qa")
val configuration = new Configuration {}
configuration.jodaTimeZone shouldEqual DateTimeZone.forID("GMT")
}
"It" should " return path with root" in {
val configuration = new Configuration {}
configuration.urlFor("/path") shouldEqual "http://localhost:8888/path"
}
}
|
212data/cronit-service
|
src/test/scala/io/cronit/utils/ConfigurationTest.scala
|
Scala
|
apache-2.0
| 1,083
|
package io.udash.web.guide.components
import io.udash.bootstrap.utils.BootstrapStyles
import io.udash.bootstrap.utils.BootstrapStyles.Color
import io.udash.css.CssStyleName
object BootstrapUtils {
/**
* Wells component from bootstrap3 is absent in bootstrap4.
* These well-like styles make elements look like the good old bootstrap3 well.
*
* Source: https://getbootstrap.com/docs/3.3/components/#wells
*/
def wellStyles: Seq[CssStyleName] = Seq(
BootstrapStyles.Card.card,
BootstrapStyles.Card.body,
BootstrapStyles.Background.color(Color.Light),
)
}
|
UdashFramework/udash-core
|
guide/guide/.js/src/main/scala/io/udash/web/guide/components/BootstrapUtils.scala
|
Scala
|
apache-2.0
| 594
|
package monocle.function
import monocle.function.fields._
import monocle.{Iso, Optional, Prism}
import scala.annotation.implicitNotFound
import scalaz.{Applicative, \\/}
/**
* Typeclass that defines a [[Prism]] between an `S` and its init `S` and last `S`
* @tparam S source of [[Prism]] and init of [[Prism]] target
* @tparam A last of [[Prism]] target, `A` is supposed to be unique for a given `S`
*/
@implicitNotFound("Could not find an instance of Snoc[${S},${A}], please check Monocle instance location policy to " +
"find out which import is necessary")
abstract class Snoc[S, A] extends Serializable {
def snoc: Prism[S, (S, A)]
def initOption: Optional[S, S] = snoc composeLens first
def lastOption: Optional[S, A] = snoc composeLens second
}
trait SnocFunctions {
final def snoc[S, A](implicit ev: Snoc[S, A]): Prism[S, (S, A)] = ev.snoc
final def initOption[S, A](implicit ev: Snoc[S, A]): Optional[S, S] = ev.initOption
final def lastOption[S, A](implicit ev: Snoc[S, A]): Optional[S, A] = ev.lastOption
/** append an element to the end */
final def _snoc[S, A](init: S, last: A)(implicit ev: Snoc[S, A]): S =
ev.snoc.reverseGet((init, last))
/** deconstruct an S between its init and last */
final def _unsnoc[S, A](s: S)(implicit ev: Snoc[S, A]): Option[(S, A)] =
ev.snoc.getOption(s)
}
object Snoc extends SnocFunctions {
/** lift an instance of [[Snoc]] using an [[Iso]] */
def fromIso[S, A, B](iso: Iso[S, A])(implicit ev: Snoc[A, B]): Snoc[S, B] = new Snoc[S, B] {
val snoc: Prism[S, (S, B)] =
iso composePrism ev.snoc composeIso iso.reverse.first
}
/************************************************************************************************/
/** Std instances */
/************************************************************************************************/
import scalaz.std.option._
implicit def listSnoc[A]: Snoc[List[A], A] = new Snoc[List[A], A]{
val snoc = Prism[List[A], (List[A], A)](
s => Applicative[Option].apply2(\\/.fromTryCatchNonFatal(s.init).toOption, s.lastOption)((_,_))){
case (init, last) => init :+ last
}
}
implicit def streamSnoc[A]: Snoc[Stream[A], A] = new Snoc[Stream[A], A]{
val snoc = Prism[Stream[A], (Stream[A], A)]( s =>
for {
init <- if(s.isEmpty) None else Some(s.init)
last <- s.lastOption
} yield (init, last)){
case (init, last) => init :+ last
}
}
implicit val stringSnoc: Snoc[String, Char] = new Snoc[String, Char]{
val snoc =
Prism[String, (String, Char)](
s => if(s.isEmpty) None else Some((s.init, s.last))){
case (init, last) => init :+ last
}
}
implicit def vectorSnoc[A]: Snoc[Vector[A], A] = new Snoc[Vector[A], A]{
val snoc = Prism[Vector[A], (Vector[A], A)](
v => if(v.isEmpty) None else Some((v.init, v.last))){
case (xs, x) => xs :+ x
}
}
/************************************************************************************************/
/** Scalaz instances */
/************************************************************************************************/
import scalaz.IList
implicit def iListSnoc[A]: Snoc[IList[A], A] = new Snoc[IList[A], A]{
val snoc = Prism[IList[A], (IList[A], A)](
il => Applicative[Option].apply2(il.initOption, il.lastOption)((_,_))){
case (init, last) => init :+ last
}
}
}
|
rperry/Monocle
|
core/shared/src/main/scala/monocle/function/Snoc.scala
|
Scala
|
mit
| 3,570
|
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer (t3l@threelights.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze.initializers
import edu.latrobe._
import edu.latrobe.blaze._
import scala.util.hashing._
/**
* Complex initializers that operate on the outputs of other initializers. This
* is usually used for calibration algorithms.
*/
abstract class DependentInitializer[TBuilder <: DependentInitializerBuilder[_]]
extends InitializerEx[TBuilder] {
final val source
: Initializer = builder.source.build(seed)
override def apply(module: Module,
reference: LabeledBufferReference,
weights: ValueTensor,
inputFanSize: Int,
outputFanSize: Int)
: Unit = source.apply(
module,
reference,
weights,
inputFanSize,
outputFanSize
)
// ---------------------------------------------------------------------------
// State management.
// ---------------------------------------------------------------------------
override def state: InstanceState = DependentInitializerState(
super.state, source.state
)
override def restoreState(state: InstanceState): Unit = {
super.restoreState(state.parent)
state match {
case state: DependentInitializerState =>
source.restoreState(state.baseInitializer)
case _ =>
throw new MatchError(state)
}
}
}
abstract class DependentInitializerBuilder[TThis <: DependentInitializerBuilder[_]]
extends InitializerExBuilder[TThis] {
final private var _source
: InitializerBuilder = GaussianDistributionBuilder()
final def source
: InitializerBuilder = _source
final def source_=(value: InitializerBuilder): Unit = {
require(value != null)
_source = value
}
final def setSource(value: InitializerBuilder): TThis = {
source_=(value)
repr
}
override def hashCode()
: Int = MurmurHash3.mix(super.hashCode(), _source.hashCode())
override protected def doEquals(other: Equatable)
: Boolean = super.doEquals(other) && (other match {
case other: DependentInitializerBuilder[TThis] =>
_source == other._source
case _ =>
false
})
override def copyTo(other: InstanceBuilder): Unit = {
super.copyTo(other)
other match {
case other: DependentInitializerBuilder[TThis] =>
other._source = _source
case _ =>
}
}
override protected def doPermuteSeeds(fn: BuilderSeed => BuilderSeed)
: Unit = {
super.doPermuteSeeds(fn)
_source.permuteSeeds(fn)
}
}
final case class DependentInitializerState(override val parent: InstanceState,
baseInitializer: InstanceState)
extends InstanceState {
}
|
bashimao/ltudl
|
blaze/src/main/scala/edu/latrobe/blaze/initializers/DependentInitializer.scala
|
Scala
|
apache-2.0
| 3,357
|
package reo7sp.boardpp.ui.widgets.awt
import javax.swing.JTextField
import javax.swing.border.EmptyBorder
/**
* Created by reo7sp on 12/27/13 at 9:59 PM
*/
class FancyTextField extends JTextField {
setBorder(new EmptyBorder(8, 8, 8, 8))
}
|
reo7sp/BoardPP
|
src/main/java/reo7sp/boardpp/ui/widgets/awt/FancyTextField.scala
|
Scala
|
apache-2.0
| 245
|
/*
* Copyright (C) 2016 DANS - Data Archiving and Networked Services (info@dans.knaw.nl)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.agreement
import nl.knaw.dans.lib.error._
import nl.knaw.dans.lib.logging.DebugEnhancedLogging
import nl.knaw.dans.lib.logging.servlet.{ LogResponseBodyOnError, PlainLogFormatter, ServletLogger }
import org.scalatra.{ InternalServerError, NotFound, Ok, ScalatraServlet }
class EasyDepositAgreementCreatorServlet(app: EasyDepositAgreementCreatorApp,
version: String)
extends ScalatraServlet
with ServletLogger
with PlainLogFormatter
with LogResponseBodyOnError
with DebugEnhancedLogging {
get("/") {
contentType = "text/plain"
Ok(s"EASY Deposit Agreement Creator Service running ($version)")
}
post("/create") {
contentType = "application/pdf"
val datasetId = params("datasetId")
val isSample = params.getAsOrElse("sample", false)
app.createAgreement(datasetId, isSample)(() => response.outputStream)
.map(_ => Ok())
.doIfFailure {
case e =>
contentType = "text/plain"
logger.error(e.getMessage, e)
}
.getOrRecover {
case e: LdapError => NotFound(e.getMessage)
case e: NoDatasetFoundException => NotFound(e.getMessage)
case e: FedoraUnavailableException => InternalServerError(e.getMessage)
case e: GeneratorError => InternalServerError(e.getMessage)
case e => InternalServerError(e.getMessage)
}
}
}
|
DANS-KNAW/easy-license-creator
|
src/main/scala/nl/knaw/dans/easy/agreement/EasyDepositAgreementCreatorServlet.scala
|
Scala
|
apache-2.0
| 2,068
|
package com.eevolution.context.dictionary.infrastructure.repository
import java.util.UUID
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.Image
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcSession
import scala.concurrent.{ExecutionContext, Future}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/EmerisScala
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 07/11/17.
*/
/**
* Image Repository
* @param session
* @param executionContext
*/
class ImageRepository (session: JdbcSession)(implicit executionContext: ExecutionContext)
extends api.repository.ImageRepository[Image , Int]
with ImageMapping {
def getById(id: Int): Future[Image] = {
Future(run(queryImage.filter(_.imageId == lift(id))).headOption.get)
}
def getByUUID(uuid: UUID): Future[Image] = {
Future(run(queryImage.filter(_.uuid == lift(uuid.toString))).headOption.get)
}
def getByImageId(id : Int) : Future[List[Image]] = {
Future(run(queryImage))
}
def getAll() : Future[List[Image]] = {
Future(run(queryImage))
}
def getAllByPage(page: Int, pageSize: Int): Future[PaginatedSequence[Image]] = {
val offset = page * pageSize
val limit = (page + 1) * pageSize
for {
count <- countImage()
elements <- if (offset > count) Future.successful(Nil)
else selectImage(offset, limit)
} yield {
PaginatedSequence(elements, page, pageSize, count)
}
}
private def countImage() = {
Future(run(queryImage.size).toInt)
}
private def selectImage(offset: Int, limit: Int): Future[Seq[Image]] = {
Future(run(queryImage).drop(offset).take(limit).toSeq)
}
}
|
adempiere/ADReactiveSystem
|
dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/ImageRepository.scala
|
Scala
|
gpl-3.0
| 2,621
|
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt.internal.util
object Types extends Types
trait Types extends TypeFunctions {
val :^: = KCons
type :+:[H, T <: HList] = HCons[H, T]
val :+: = HCons
}
|
sbt/sbt
|
internal/util-collection/src/main/scala/sbt/internal/util/Types.scala
|
Scala
|
apache-2.0
| 315
|
package top.spoofer.hbrdd.unit
/**
* HbRddReader 提供了将Array[Byte]转换为A类型的接口
* @tparam A 类型参数
*/
trait HbRddFormatsReader[A] extends Serializable {
def formatsRead(readData: Array[Byte]): A
}
/**
* HbRddWriter 提供了将 A类型转化为Array[Byte]的接口
* @tparam A 类型参数
*/
trait HbRddFormatsWriter[A] extends Serializable {
def formatsWrite(writeData: A): Array[Byte]
}
trait HbRddFormats[A] extends HbRddFormatsReader[A] with HbRddFormatsWriter[A]
|
TopSpoofer/hbrdd
|
src/main/scala/top/spoofer/hbrdd/unit/HbRddFormats.scala
|
Scala
|
apache-2.0
| 506
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.system
import java.util
import java.util.ArrayDeque
import java.util.concurrent.TimeUnit
import java.util.Collections
import java.util.HashMap
import java.util.HashSet
import java.util.Queue
import java.util.Set
import scala.collection.JavaConverters._
import org.apache.samza.serializers.SerdeManager
import org.apache.samza.util.{Logging, TimerUtil}
import org.apache.samza.system.chooser.MessageChooser
import org.apache.samza.SamzaException
import org.apache.samza.config.TaskConfig
object SystemConsumers {
val DEFAULT_NO_NEW_MESSAGES_TIMEOUT = 10
val DEFAULT_DROP_SERIALIZATION_ERROR = false
}
/**
* The SystemConsumers class coordinates between all SystemConsumers, the
* MessageChooser, and the SamzaContainer. Its job is to poll each
* SystemConsumer for messages, update the
* {@link org.apache.samza.system.chooser.MessageChooser} with new incoming
* messages, poll the MessageChooser for the next message to process, and
* return that message to the SamzaContainer.
*/
class SystemConsumers (
/**
* The class that determines the order to process incoming messages.
*/
chooser: MessageChooser,
/**
* A map of SystemConsumers that should be polled for new messages.
*/
consumers: Map[String, SystemConsumer],
/**
* Provides a mapping from system name to a {@see SystemAdmin}.
*/
systemAdmins: SystemAdmins,
/**
* The class that handles deserialization of incoming messages.
*/
serdeManager: SerdeManager = new SerdeManager,
/**
* A helper class to hold all of SystemConsumers' metrics.
*/
metrics: SystemConsumersMetrics = new SystemConsumersMetrics,
/**
* If MessageChooser returns null when it's polled, SystemConsumers will
* poll each SystemConsumer with a timeout next time it tries to poll for
* messages. Setting the timeout to 0 means that SamzaContainer's main
* thread will sit in a tight loop polling every SystemConsumer over and
* over again if no new messages are available.
*/
noNewMessagesTimeout: Int = SystemConsumers.DEFAULT_NO_NEW_MESSAGES_TIMEOUT,
/**
* This parameter is to define how to deal with deserialization failure. If
* set to true, the task will notAValidEvent the messages when deserialization fails.
* If set to false, the task will throw SamzaException and fail the container.
*/
dropDeserializationError: Boolean = SystemConsumers.DEFAULT_DROP_SERIALIZATION_ERROR,
/**
* <p>Defines an upper bound for how long the SystemConsumers will wait
* before polling systems for more data. The default setting is 50ms, which
* means that SystemConsumers will poll for new messages for all
* SystemStreamPartitions with empty buffers every 50ms. SystemConsumers
* will also poll for new messages any time that there are no available
* messages to process, or any time the MessageChooser returns a null
* IncomingMessageEnvelope.</p>
*
* <p>This parameter also implicitly defines how much latency is introduced
* by SystemConsumers. If a message is available for a SystemStreamPartition
* with no remaining unprocessed messages, the SystemConsumers will poll for
* it within 50ms of its availability in the stream system.</p>
*/
val pollIntervalMs: Int = TaskConfig.DEFAULT_POLL_INTERVAL_MS,
/**
* Clock can be used to inject a custom clock when mocking this class in
* tests. The default implementation returns the current system clock time.
*/
val clock: () => Long = () => System.nanoTime()) extends Logging with TimerUtil {
/**
* Mapping from the {@see SystemStreamPartition} to the registered offsets.
*/
private val sspToRegisteredOffsets = new HashMap[SystemStreamPartition, String]()
/**
* A buffer of incoming messages grouped by SystemStreamPartition. These
* messages are handed out to the MessageChooser as it needs them.
*/
private val unprocessedMessagesBySSP = new HashMap[SystemStreamPartition, Queue[IncomingMessageEnvelope]]()
/**
* Set of SSPs that are currently at end-of-stream.
*/
private val endOfStreamSSPs = new HashSet[SystemStreamPartition]()
/**
* A set of SystemStreamPartitions grouped by systemName. This is used as a
* cache to figure out which SystemStreamPartitions we need to poll from the
* underlying system consumer.
*/
private val emptySystemStreamPartitionsBySystem = new HashMap[String, Set[SystemStreamPartition]]()
/**
* Default timeout to noNewMessagesTimeout. Every time SystemConsumers
* receives incoming messages, it sets timeout to 0. Every time
* SystemConsumers receives no new incoming messages from the MessageChooser,
* it sets timeout to noNewMessagesTimeout again.
*/
var timeout = noNewMessagesTimeout
/**
* The last time that systems were polled for new messages.
*/
var lastPollNs = 0L
/**
* Total number of unprocessed messages in unprocessedMessagesBySSP.
*/
var totalUnprocessedMessages = 0
debug("Got stream consumers: %s" format consumers)
debug("Got no new message timeout: %s" format noNewMessagesTimeout)
metrics.setTimeout(() => timeout)
metrics.setNeededByChooser(() => emptySystemStreamPartitionsBySystem.size)
metrics.setUnprocessedMessages(() => totalUnprocessedMessages)
def start {
for ((systemStreamPartition, offset) <- sspToRegisteredOffsets.asScala) {
val consumer = consumers(systemStreamPartition.getSystem)
consumer.register(systemStreamPartition, offset)
}
debug("Starting consumers.")
emptySystemStreamPartitionsBySystem.asScala ++= unprocessedMessagesBySSP
.keySet
.asScala
.groupBy(_.getSystem)
.mapValues(systemStreamPartitions => new util.HashSet(systemStreamPartitions.toSeq.asJava))
consumers
.keySet
.foreach(metrics.registerSystem)
consumers
.values
.foreach(_.start)
chooser.start
refresh
}
def stop {
debug("Stopping consumers.")
consumers.values.foreach(_.stop)
chooser.stop
}
def register(systemStreamPartition: SystemStreamPartition, offset: String) {
debug("Registering stream: %s, %s" format (systemStreamPartition, offset))
if (IncomingMessageEnvelope.END_OF_STREAM_OFFSET.equals(offset)) {
info("Stream : %s is already at end of stream" format (systemStreamPartition))
endOfStreamSSPs.add(systemStreamPartition)
return
}
metrics.registerSystemStreamPartition(systemStreamPartition)
unprocessedMessagesBySSP.put(systemStreamPartition, new ArrayDeque[IncomingMessageEnvelope]())
chooser.register(systemStreamPartition, offset)
try {
val consumer = consumers(systemStreamPartition.getSystem)
val existingOffset = sspToRegisteredOffsets.get(systemStreamPartition)
val systemAdmin = systemAdmins.getSystemAdmin(systemStreamPartition.getSystem)
val offsetComparisonResult = systemAdmin.offsetComparator(existingOffset, offset)
if (existingOffset == null || (offsetComparisonResult != null && offsetComparisonResult > 0)) {
sspToRegisteredOffsets.put(systemStreamPartition, offset)
}
} catch {
case e: NoSuchElementException => throw new SystemConsumersException("can't register " + systemStreamPartition.getSystem + "'s consumer.", e)
}
}
def isEndOfStream(systemStreamPartition: SystemStreamPartition) = {
endOfStreamSSPs.contains(systemStreamPartition)
}
def choose (updateChooser: Boolean = true): IncomingMessageEnvelope = {
val envelopeFromChooser = chooser.choose
updateTimer(metrics.deserializationNs) {
if (envelopeFromChooser == null) {
trace("Chooser returned null.")
metrics.choseNull.inc
// Sleep for a while so we don't poll in a tight loop, but, don't do this when called from the RunLoop
// code because in that case the chooser will not get updated with a new message for an SSP until after a
// message is processed, See how updateChooser variable is used below. The RunLoop has its own way to
// block when there is no work to process.
timeout = if (updateChooser) noNewMessagesTimeout else 0
} else {
val systemStreamPartition = envelopeFromChooser.getSystemStreamPartition
if (envelopeFromChooser.isEndOfStream) {
info("End of stream reached for partition: %s" format systemStreamPartition)
endOfStreamSSPs.add(systemStreamPartition)
}
trace("Chooser returned an incoming message envelope: %s" format envelopeFromChooser)
// Ok to give the chooser a new message from this stream.
timeout = 0
metrics.choseObject.inc
metrics.systemStreamMessagesChosen(envelopeFromChooser.getSystemStreamPartition).inc
if (updateChooser) {
trace("Update chooser for " + systemStreamPartition.getPartition)
tryUpdate(systemStreamPartition)
}
}
}
updateTimer(metrics.pollNs) {
if (envelopeFromChooser == null || TimeUnit.NANOSECONDS.toMillis(clock() - lastPollNs) > pollIntervalMs) {
refresh
}
}
envelopeFromChooser
}
/**
* Poll all SystemStreamPartitions for which there are currently no new
* messages to process.
*/
private def poll(systemName: String) {
trace("Polling system consumer: %s" format systemName)
metrics.systemPolls(systemName).inc
trace("Getting fetch map for system: %s" format systemName)
val systemFetchSet : util.Set[SystemStreamPartition] =
if (emptySystemStreamPartitionsBySystem.containsKey(systemName)) {
val sspToFetch = new util.HashSet(emptySystemStreamPartitionsBySystem.get(systemName))
sspToFetch.removeAll(endOfStreamSSPs)
sspToFetch
} else {
Collections.emptySet()
}
// Poll when at least one SSP in this system needs more messages.
if (systemFetchSet != null && systemFetchSet.size > 0) {
val consumer = consumers(systemName)
trace("Fetching: %s" format systemFetchSet)
metrics.systemStreamPartitionFetchesPerPoll(systemName).inc(systemFetchSet.size)
val systemStreamPartitionEnvelopes = consumer.poll(systemFetchSet, timeout)
trace("Got incoming message envelopes: %s" format systemStreamPartitionEnvelopes)
metrics.systemMessagesPerPoll(systemName).inc
val sspAndEnvelopeIterator = systemStreamPartitionEnvelopes.entrySet.iterator
while (sspAndEnvelopeIterator.hasNext) {
val sspAndEnvelope = sspAndEnvelopeIterator.next
val systemStreamPartition = sspAndEnvelope.getKey
val envelopes = new ArrayDeque(sspAndEnvelope.getValue)
val numEnvelopes = envelopes.size
totalUnprocessedMessages += numEnvelopes
if (numEnvelopes > 0) {
unprocessedMessagesBySSP.put(systemStreamPartition, envelopes)
// Update the chooser if it needs a message for this SSP.
if (emptySystemStreamPartitionsBySystem.get(systemStreamPartition.getSystem).remove(systemStreamPartition)) {
tryUpdate(systemStreamPartition)
}
}
}
} else {
trace("Skipping polling for %s. Already have messages available for all registered SystemStreamPartitions." format systemName)
}
}
def tryUpdate(ssp: SystemStreamPartition) {
var updated = false
try {
updated = update(ssp)
} finally {
if (!updated) {
// if failed to update the chooser, add the ssp back into the emptySystemStreamPartitionBySystem map to ensure that we will poll for the next message
emptySystemStreamPartitionsBySystem.get(ssp.getSystem).add(ssp)
}
}
}
private def refresh {
trace("Refreshing chooser with new messages.")
// Update last poll time so we don't poll too frequently.
lastPollNs = clock()
// Poll every system for new messages.
consumers.keys.map(poll(_))
}
/**
* Tries to update the message chooser with an envelope from the supplied
* SystemStreamPartition if an envelope is available.
*/
private def update(systemStreamPartition: SystemStreamPartition) = {
var updated = false
val q = unprocessedMessagesBySSP.get(systemStreamPartition)
while (q.size > 0 && !updated) {
val rawEnvelope = q.remove
val deserializedEnvelope = try {
Some(serdeManager.fromBytes(rawEnvelope))
} catch {
case e: Throwable if !dropDeserializationError =>
throw new SystemConsumersException(
"Cannot deserialize an incoming message for %s"
.format(systemStreamPartition.getSystemStream.toString), e)
case ex: Throwable =>
debug("Cannot deserialize an incoming message for %s. Dropping the error message."
.format(systemStreamPartition.getSystemStream.toString), ex)
metrics.deserializationError.inc
None
}
if (deserializedEnvelope.isDefined) {
chooser.update(deserializedEnvelope.get)
updated = true
}
totalUnprocessedMessages -= 1
}
updated
}
}
/**
* When SystemConsumer registers consumers, there are situations where system can not recover
* from. Such as a failed consumer is used in task.input and changelogs.
* SystemConsumersException is thrown to indicate a hard failure when the system can not recover from.
*/
class SystemConsumersException(s: String, t: Throwable) extends SamzaException(s, t) {
def this(s: String) = this(s, null)
}
|
Swrrt/Samza
|
samza-core/src/main/scala/org/apache/samza/system/SystemConsumers.scala
|
Scala
|
apache-2.0
| 14,301
|
package org.openguard.core
import java.util.Date
import com.sksamuel.scrimage.Image
/**
* Created by pbolle on 20.06.15.
*/
class Photo(var image: Image,var path: String, var date: Date = new Date()) {
}
|
pbolle/openvideoguard
|
core/src/main/scala/org/openguard/core/Photo.scala
|
Scala
|
apache-2.0
| 210
|
package com.chrisomeara.pillar
import com.datastax.driver.core.querybuilder.QueryBuilder
import com.datastax.driver.core.{Metadata, Session}
import org.scalatest.matchers.ShouldMatchers
trait AcceptanceAssertions extends ShouldMatchers {
val session: Session
val keyspaceName: String
protected def assertEmptyAppliedMigrationsTable() {
session.execute(QueryBuilder.select().from(keyspaceName, "applied_migrations")).all().size() should equal(0)
}
protected def assertKeyspaceDoesNotExist() {
val metadata: Metadata = session.getCluster.getMetadata
metadata.getKeyspace(keyspaceName) should be(null)
}
}
|
smr-co-uk/pillar
|
src/test/scala/com/chrisomeara/pillar/AcceptanceAssertions.scala
|
Scala
|
mit
| 631
|
package cpup.mc.oldenMagic.content
import net.minecraft.item.ItemStack
import net.minecraft.world.World
import net.minecraft.entity.player.EntityPlayer
class ItemWand extends TItemBase {
override def onItemRightClick(stack: ItemStack, world: World, player: EntityPlayer) = {
mod.proxy.activateSpellCasting(player)
stack
}
}
|
CoderPuppy/oldenmagic-mc
|
src/main/scala/cpup/mc/oldenMagic/content/ItemWand.scala
|
Scala
|
mit
| 331
|
package reopp.common.guardedcommands.chocox
/**
* Created with IntelliJ IDEA.
*
* Created by jose on 06/02/13.
*/
import org.scalatest.FunSpec
import reopp.common.guardedcommands.dataconnectors.{GCADrain, GCWriter, GCMerger, GCFilter}
import reopp.common.{Buffer, Utils, Function, Predicate}
import reopp.common.Utils._
import reopp.common.guardedcommands.dataconnectors.ConnectorGen._
import reopp.common.guardedcommands.{Impl, Formula}
class TestChocoX extends FunSpec {
describe ("Simple connector") {
val func = Function("func") {
case x:String =>
println("~~~~ asked func applied to "+x)
"f-"+x
}
val pred = Predicate("pred") {
case x:String =>
println("~~~~ testing if f-aaa == "+x)
x == ("f-aaa")
}
val c2 =
writer("a",List("aaa")) ++
transf("a","b",func) ++
filter("b","c",pred) ++
reader("c",2)
val cs = c2.getConstraints.close
////
val a = mkVar("a")
val b = mkVar("b")
val c = mkVar("c")
val cs2 = Formula(
a :== "aaa",
(a :< pred) --> (b := a)
)
val res = ChocoX.solve(cs)
println("-----------\\n"+cs.commands.mkString("\\n"))
println("-----------")
if (res.isDefined) print("solved:\\n"+res.get)
else println("no solution")
// if (res.isDefined) println("partial eval: "+c.partialEval(res.get))
// it ("c should have a sol") {assert (res.isDefined)}
}
}
|
joseproenca/ip-constraints
|
code/src/test/scala/reopp/reopp/common/guardedcommands/chocox/TestChocoX.scala
|
Scala
|
mit
| 1,448
|
package com.twitter.finagle.client
import com.twitter.finagle._
import com.twitter.finagle.context
import com.twitter.finagle.context.Contexts
import com.twitter.finagle.factory.{
BindingFactory, RefcountedFactory, StatsFactoryWrapper, TimeoutFactory}
import com.twitter.finagle.filter.{ClearContextValueFilter, DtabStatsFilter, ExceptionSourceFilter, MonitorFilter}
import com.twitter.finagle.loadbalancer.LoadBalancerFactory
import com.twitter.finagle.param._
import com.twitter.finagle.service._
import com.twitter.finagle.stack.Endpoint
import com.twitter.finagle.stack.nilStack
import com.twitter.finagle.stats.{LoadedHostStatsReceiver, ClientStatsReceiver}
import com.twitter.finagle.tracing._
import com.twitter.finagle.transport.Transport
import com.twitter.finagle.util.Showable
import com.twitter.util.Future
import com.twitter.util.registry.GlobalRegistry
object StackClient {
/**
* Canonical Roles for each Client-related Stack modules.
*/
object Role extends Stack.Role("StackClient") {
val pool = Stack.Role("Pool")
val requestDraining = Stack.Role("RequestDraining")
val prepFactory = Stack.Role("PrepFactory")
/** PrepConn is special in that it's the first role before the `Endpoint` role */
val prepConn = Stack.Role("PrepConn")
val protoTracing = Stack.Role("protoTracing")
}
/**
* A [[com.twitter.finagle.Stack]] representing an endpoint.
* Note that this is terminated by a [[com.twitter.finagle.service.FailingFactory]]:
* users are expected to terminate it with a concrete service factory.
*
* @see [[com.twitter.finagle.tracing.WireTracingFilter]]
* @see [[com.twitter.finagle.service.ExpiringService]]
* @see [[com.twitter.finagle.service.FailFastFactory]]
* @see [[com.twitter.finagle.service.PendingRequestFilter]]
* @see [[com.twitter.finagle.client.DefaultPool]]
* @see [[com.twitter.finagle.service.TimeoutFilter]]
* @see [[com.twitter.finagle.service.FailureAccrualFactory]]
* @see [[com.twitter.finagle.service.StatsServiceFactory]]
* @see [[com.twitter.finagle.service.StatsFilter]]
* @see [[com.twitter.finagle.filter.DtabStatsFilter]]
* @see [[com.twitter.finagle.tracing.ClientDestTracingFilter]]
* @see [[com.twitter.finagle.filter.MonitorFilter]]
* @see [[com.twitter.finagle.filter.ExceptionSourceFilter]]
* @see [[com.twitter.finagle.client.LatencyCompensation]]
*/
def endpointStack[Req, Rep]: Stack[ServiceFactory[Req, Rep]] = {
// Ensure that we have performed global initialization.
com.twitter.finagle.Init()
/**
* N.B. see the note in `newStack` regarding up / down orientation in the stack.
*/
val stk = new StackBuilder[ServiceFactory[Req, Rep]](nilStack[Req, Rep])
/**
* `prepConn` is the bottom of the stack by definition. This position represents
* the first module to handle newly connected [[Transport]]s and dispatchers.
*
* finagle-thrift uses this role to install session upgrading logic from
* vanilla Thrift to Twitter Thrift.
*/
stk.push(Role.prepConn, identity[ServiceFactory[Req, Rep]](_))
/**
* `WriteTracingFilter` annotates traced requests. Annotations are timestamped
* so this should be low in the stack to accurately delineate between wire time
* and handling time.
*/
stk.push(WireTracingFilter.module)
/**
* `ExpiringService` enforces an idle timeout and total ttl for connections.
* This module must be beneath the DefaultPool in order to apply per connection.
*
* N.B. the difference between this connection ttl and the `DefaultPool` ttl
* (via CachingPool) is that this applies to *all* connections and `DefaultPool`
* only expires connections above the low watermark.
*/
stk.push(ExpiringService.client)
/**
* `FailFastFactory` accumulates failures per connection, marking the endpoint
* as unavailable so that modules higher in the stack can dispatch requests
* around the failing endpoint.
*/
stk.push(FailFastFactory.module)
/**
* `PendingRequestFilter` enforces a limit on the number of pending requests
* for a single connection. It must be beneath the `DefaultPool` module so that
* its limits are applied per connection rather than per endpoint.
*/
stk.push(PendingRequestFilter.module)
/**
* `DefaultPool` configures connection pooling. Like the `LoadBalancerFactory`
* module it is a potentially aggregate [[ServiceFactory]] composed of multiple
* [[Service Services]] which represent a distinct session to the same endpoint.
*/
stk.push(DefaultPool.module)
/**
* `TimeoutFilter` enforces static request timeouts and broadcast request deadlines,
* sending a best-effort interrupt for expired requests.
* It must be beneath the `StatsFilter` so that timeouts are properly recorded.
*/
stk.push(TimeoutFilter.clientModule)
/**
* `ExceptionRemoteInfoFactory` fills in remote info (upstream addr/client id,
* downstream addr/client id, and trace id) in exceptions. This needs to be near the top
* of the stack so that failures anywhere lower in the stack have remote
* info added to them, but below the stats, tracing, and monitor filters so these filters
* see exceptions with remote info added.
*/
stk.push(ExceptionRemoteInfoFactory.module)
/**
* `FailureAccrualFactory` accrues request failures per endpoint updating its
* status so that modules higher in the stack may route around an unhealthy
* endpoint.
*
* It must be above `DefaultPool` to accumulate failures across all sessions
* to an endpoint.
* It must be above `TimeoutFilter` so that it can observe request timeouts.
* It must be above `PendingRequestFilter` so that it can observe client
* admission rejections.
*/
stk.push(FailureAccrualFactory.module)
/**
* `StatsServiceFactory` exports a gauge which reports the status of the stack
* beneath it. It must be above `FailureAccrualFactory` in order to record
* failure accrual's aggregate view of health over multiple requests.
*/
stk.push(StatsServiceFactory.module)
/**
* `StatsFilter` installs a (wait for it...) stats filter on active sessions.
* It must be above the `TimeoutFilter` so that it can record timeouts as failures.
* It has no other position constraint.
*/
stk.push(StatsFilter.module)
/**
* `DtabStatsFilter` exports dtab stats. It has no relative position constraints
* within the endpoint stack.
*/
stk.push(DtabStatsFilter.module)
/**
* `ClientDestTracingFilter` annotates the trace with the destination endpoint's
* socket address. It has no position constraints within the endpoint stack.
*/
stk.push(ClientDestTracingFilter.module)
/**
* `MonitorFilter` installs a configurable exception handler ([[Monitor]]) for
* client sessions. There is no specific position constraint but higher in the
* stack is preferable so it can wrap more application logic.
*/
stk.push(MonitorFilter.module)
/**
* `ExceptionSourceFilter` is the exception handler of last resort. It recovers
* application errors into failed [[Future Futures]] and attributes the failures to
* clients by client label. This needs to be at the top of the endpoint stack so that
* failures anywhere lower in the stack have endpoints attributed to them.
*/
stk.push(ExceptionSourceFilter.module)
/**
* `LatencyCompensation` configures latency compensation based on destination.
*
* It must appear above consumers of the the c.t.f.client.Compensation param, so
* above `TimeoutFilter`.
*
* It is only evaluated at stack creation time.
*/
stk.push(LatencyCompensation.module)
stk.result
}
/**
* Creates a default finagle client [[com.twitter.finagle.Stack]].
* The stack can be configured via [[com.twitter.finagle.Stack.Param]]'s
* in the finagle package object ([[com.twitter.finagle.param]]) and specific
* params defined in the companion objects of the respective modules.
*
* @see [[com.twitter.finagle.client.StackClient#endpointStack]]
* @see [[com.twitter.finagle.loadbalancer.LoadBalancerFactory]]
* @see [[com.twitter.finagle.factory.StatsFactoryWrapper]]
* @see [[com.twitter.finagle.client.StatsScoping]]
* @see [[com.twitter.finagle.client.AddrMetadataExtraction]]
* @see [[com.twitter.finagle.factory.BindingFactory]]
* @see [[com.twitter.finagle.factory.RefcountedFactory]]
* @see [[com.twitter.finagle.factory.TimeoutFactory]]
* @see [[com.twitter.finagle.FactoryToService]]
* @see [[com.twitter.finagle.service.Retries]]
* @see [[com.twitter.finagle.tracing.ClientTracingFilter]]
* @see [[com.twitter.finagle.tracing.TraceInitializerFilter]]
*/
def newStack[Req, Rep]: Stack[ServiceFactory[Req, Rep]] = {
/*
* NB on orientation: we here speak of "up" / "down" or "above" /
* "below" in terms of a request's traversal of the stack---a
* request starts at the top and goes down, a response returns
* back up. This is opposite to how modules are written on the
* page; a request starts at the bottom of the `newStack` method
* and goes up.
*/
val stk = new StackBuilder(endpointStack[Req, Rep])
/*
* These modules balance requests across cluster endpoints and
* handle automatic requeuing of failed requests.
*
* * `LoadBalancerFactory` balances requests across the endpoints
* of a cluster given by the `LoadBalancerFactory.Dest`
* param. It must appear above the endpoint stack, and below
* `BindingFactory` in order to satisfy the
* `LoadBalancerFactory.Dest` param.
*
* * `StatsFactoryWrapper` tracks the service acquisition latency
* metric. It must appear above `LoadBalancerFactory` in order
* to track service acquisition from the load balancer, and
* below `FactoryToService` so that it is called on each
* service acquisition.
*
* * `Role.requestDraining` ensures that a service is not closed
* until all outstanding requests on it have completed. It must
* appear below `FactoryToService` so that services are not
* prematurely closed by `FactoryToService`. (However it is
* only effective for services which are called multiple times,
* which is never the case when `FactoryToService` is enabled.)
*
* * `TimeoutFactory` times out service acquisition from
* `LoadBalancerFactory`. It must appear above
* `LoadBalancerFactory` in order to time out service
* acquisition from the load balancer, and below
* `FactoryToService` so that it is called on each service
* acquisition.
*
* * `Role.prepFactory` is a hook used to inject codec-specific
* behavior; it is used in the HTTP codec to avoid closing a
* service while a chunked response is being read. It must
* appear below `FactoryToService` so that services are not
* prematurely closed by `FactoryToService`.
*
* * `FactoryToService` acquires a new endpoint service from the
* load balancer on each request (and closes it after the
* response completes).
*
* * `Retries` retries `RetryPolicy.RetryableWriteException`s
* automatically. It must appear above `FactoryToService` so
* that service acquisition failures are retried.
*
* * `ClearContextValueFilter` clears the configured Context key,
* `Retries`, in the request's Context. This module must
* come before `Retries` so that it doesn't clear the `Retries`
* set by this client. `Retries` is only meant to be propagated
* one hop from the client to the server. The client overwrites `Retries`
* in the `RequeueFilter` with its own value; however, if the client
* has removed `Retries` in its configuration, we want `Retries`
* to be cleared so the server doesn't see a value set by another client.
*/
stk.push(LoadBalancerFactory.module)
stk.push(StatsFactoryWrapper.module)
stk.push(Role.requestDraining, (fac: ServiceFactory[Req, Rep]) =>
new RefcountedFactory(fac))
stk.push(TimeoutFactory.module)
stk.push(Role.prepFactory, identity[ServiceFactory[Req, Rep]](_))
stk.push(FactoryToService.module)
stk.push(Retries.moduleRequeueable)
stk.push(ClearContextValueFilter.module(context.Retries))
/*
* These modules deal with name resolution and request
* distribution (when a name resolves to a `Union` of clusters).
*
* * `StatsScoping` modifies the `Stats` param based on the
* `AddrMetadata` and `Scoper` params; it permits stats further
* down the stack to be scoped according to the destination
* cluster. It must appear below `AddrMetadataExtraction` to
* satisfy the `AddrMetadata` param, and above
* `RequeuingFilter` (and everything below it) which must have
* stats scoped to the destination cluster.
*
* * `AddrMetadataExtraction` extracts `Addr.Metadata` from the
* `LoadBalancerFactory.Dest` param and puts it in the
* `AddrMetadata` param. (Arguably this should happen directly
* in `BindingFactory`.) It must appear below `BindingFactory`
* to satisfy the `LoadBalanceFactory.Dest param`, and above
* `StatsScoping` to provide the `AddrMetadata` param.
*
* * `EndpointRecorder` passes endpoint information to the
* `EndpointRegistry`. It must appear below `BindingFactory` so
* `BindingFactory` can set the `Name.Bound` `BindingFactory.Dest`
* param.
*
* * `BindingFactory` resolves the destination `Name` into a
* `NameTree`, and distributes requests to destination clusters
* according to the resolved `NameTree`. Cluster endpoints are
* passed down in the `LoadBalancerFactory.Dest` param. It must
* appear above 'AddrMetadataExtraction' and
* `LoadBalancerFactory` to provide the
* `LoadBalancerFactory.Dest` param.
*
* * `TimeoutFactory` times out name resolution, which happens in
* the service acquisition phase in `BindingFactory`; once the
* name is resolved, a service is acquired as soon as
* processing hits the `FactoryToService` further down the
* stack. It must appear above `BindingFactory` in order to
* time out name resolution, and below `FactoryToService` so
* that it is called on each service acquisition.
*
* * `FactoryToService` acquires a new service on each request
* (and closes it after the response completes). This has three
* purposes: first, so that the per-request `Dtab.local` may be
* taken into account in name resolution; second, so that each
* request is distributed across the `NameTree`; and third, so
* that name resolution and request distribution are included
* in the request trace span. (Both name resolution and request
* distribution are performed in the service acquisition
* phase.) It must appear above `BindingFactory` and below
* tracing setup.
*/
stk.push(StatsScoping.module)
stk.push(AddrMetadataExtraction.module)
stk.push(EndpointRecorder.module)
stk.push(BindingFactory.module)
stk.push(TimeoutFactory.module)
stk.push(FactoryToService.module)
/*
* These modules set up tracing for the request span:
*
* * `Role.protoTracing` is a hook for protocol-specific tracing
*
* * `ClientTracingFilter` traces request send / receive
* events. It must appear above all other modules except
* `TraceInitializerFilter` so it delimits all tracing in the
* course of a request.
*
* * `TraceInitializerFilter` allocates a new trace span per
* request. It must appear above all other modules so the
* request span encompasses all tracing in the course of a
* request.
*/
stk.push(Role.protoTracing, identity[ServiceFactory[Req, Rep]](_))
stk.push(Failure.module)
stk.push(ClientTracingFilter.module)
stk.push(TraceInitializerFilter.clientModule)
stk.push(RegistryEntryLifecycle.module)
stk.result
}
/**
* The default params used for client stacks.
*/
val defaultParams: Stack.Params =
Stack.Params.empty +
Stats(ClientStatsReceiver) +
LoadBalancerFactory.HostStats(LoadedHostStatsReceiver)
}
/**
* A [[com.twitter.finagle.Client Client]] that may have its
* [[com.twitter.finagle.Stack Stack]] transformed.
*
* A `StackBasedClient` is weaker than a `StackClient` in that the
* specific `Req`, `Rep` types of its stack are not exposed.
*/
trait StackBasedClient[Req, Rep] extends Client[Req, Rep]
with Stack.Parameterized[StackBasedClient[Req, Rep]]
with Stack.Transformable[StackBasedClient[Req, Rep]]
/**
* A [[com.twitter.finagle.Client Client]] that composes a
* [[com.twitter.finagle.Stack Stack]].
*/
trait StackClient[Req, Rep] extends StackBasedClient[Req, Rep]
with Stack.Parameterized[StackClient[Req, Rep]]
with Stack.Transformable[StackClient[Req, Rep]] {
/** The current stack. */
def stack: Stack[ServiceFactory[Req, Rep]]
/** The current parameter map. */
def params: Stack.Params
/** A new StackClient with the provided stack. */
def withStack(stack: Stack[ServiceFactory[Req, Rep]]): StackClient[Req, Rep]
def transformed(t: Stack.Transformer): StackClient[Req, Rep] =
withStack(t(stack))
// these are necessary to have the right types from Java
def withParams(ps: Stack.Params): StackClient[Req, Rep]
def configured[P: Stack.Param](p: P): StackClient[Req, Rep]
def configured[P](psp: (P, Stack.Param[P])): StackClient[Req, Rep]
}
/**
* The standard template implementation for
* [[com.twitter.finagle.client.StackClient]].
*
* @see The [[http://twitter.github.io/finagle/guide/Clients.html user guide]]
* for further details on Finagle clients and their configuration.
* @see [[StackClient.newStack]] for the default modules used by Finagle
* clients.
*/
trait StdStackClient[Req, Rep, This <: StdStackClient[Req, Rep, This]]
extends StackClient[Req, Rep]
with Stack.Parameterized[This]
with CommonParams[This]
with ClientParams[This]
with WithClientAdmissionControl[This]
with WithClientTransport[This]
with WithClientSession[This]
with WithSessionQualifier[This] { self =>
/**
* The type we write into the transport.
*/
protected type In
/**
* The type we read out of the transport.
*/
protected type Out
/**
* Defines a typed [[com.twitter.finagle.client.Transporter]] for this client.
* Concrete StackClient implementations are expected to specify this.
*/
protected def newTransporter(): Transporter[In, Out]
/**
* Defines a dispatcher, a function which reconciles the stream based
* `Transport` with a Request/Response oriented `Service`.
* Together with a `Transporter`, it forms the foundation of a
* finagle client. Concrete implementations are expected to specify this.
*
* @see [[com.twitter.finagle.dispatch.GenSerialServerDispatcher]]
*/
protected def newDispatcher(transport: Transport[In, Out]): Service[Req, Rep]
def withStack(stack: Stack[ServiceFactory[Req, Rep]]): This =
copy1(stack = stack)
/**
* Creates a new StackClient with `f` applied to `stack`.
*
* For expert users only.
*/
def transformed(f: Stack[ServiceFactory[Req, Rep]] => Stack[ServiceFactory[Req, Rep]]): This =
copy1(stack = f(stack))
/**
* Creates a new StackClient with parameter `p`.
*/
override def configured[P: Stack.Param](p: P): This =
withParams(params + p)
/**
* Creates a new StackClient with parameter `psp._1` and Stack Param type `psp._2`.
*/
override def configured[P](psp: (P, Stack.Param[P])): This = {
val (p, sp) = psp
configured(p)(sp)
}
/**
* Creates a new StackClient with `params` used to configure this StackClient's `stack`.
*/
def withParams(params: Stack.Params): This =
copy1(params = params)
/**
* Prepends `filter` to the top of the client. That is, after materializing
* the client (newClient/newService) `filter` will be the first element which
* requests flow through. This is a familiar chaining combinator for filters and
* is particularly useful for `StdStackClient` implementations that don't expose
* services but instead wrap the resulting service with a rich API.
*/
def filtered(filter: Filter[Req, Rep, Req, Rep]): This = {
val role = Stack.Role(filter.getClass.getSimpleName)
val stackable = Filter.canStackFromFac.toStackable(role, filter)
withStack(stackable +: stack)
}
/**
* A copy constructor in lieu of defining StackClient as a
* case class.
*/
protected def copy1(
stack: Stack[ServiceFactory[Req, Rep]] = this.stack,
params: Stack.Params = this.params): This { type In = self.In; type Out = self.Out }
/**
* A stackable module that creates new `Transports` (via transporter)
* when applied.
*/
protected def endpointer: Stackable[ServiceFactory[Req, Rep]] =
new Stack.Module[ServiceFactory[Req, Rep]] {
val role = Endpoint
val description = "Send requests over the wire"
val parameters = Seq(implicitly[Stack.Param[Transporter.EndpointAddr]])
def make(prms: Stack.Params, next: Stack[ServiceFactory[Req, Rep]]) = {
val Transporter.EndpointAddr(addr) = prms[Transporter.EndpointAddr]
val factory = addr match {
case com.twitter.finagle.exp.Address.ServiceFactory(sf: ServiceFactory[Req, Rep], _) => sf
case Address.Failed(e) => new FailingFactory[Req, Rep](e)
case Address.Inet(ia, _) =>
val endpointClient = copy1(params=prms)
val transporter = endpointClient.newTransporter()
// Export info about the transporter type so that we can query info
// about its implementation at runtime. This assumes that the `toString`
// of the implementation is sufficiently descriptive.
val transporterImplKey = Seq(
ClientRegistry.registryName,
endpointClient.params[ProtocolLibrary].name,
endpointClient.params[Label].label,
"Transporter")
GlobalRegistry.get.put(transporterImplKey, transporter.toString)
val mkFutureSvc: () => Future[Service[Req, Rep]] =
() => transporter(ia).map { trans =>
// we do not want to capture and request specific Locals
// that would live for the life of the session.
Contexts.letClear {
endpointClient.newDispatcher(trans)
}
}
ServiceFactory(mkFutureSvc)
}
Stack.Leaf(this, factory)
}
}
def newClient(dest: Name, label0: String): ServiceFactory[Req, Rep] = {
val Stats(stats) = params[Stats]
val Label(label1) = params[Label]
// For historical reasons, we have two sources for identifying
// a client. The most recently set `label0` takes precedence.
val clientLabel = (label0, label1) match {
case ("", "") => Showable.show(dest)
case ("", l1) => l1
case (l0, l1) => l0
}
val clientStack = stack ++ (endpointer +: nilStack)
val clientParams = params +
Label(clientLabel) +
Stats(stats.scope(clientLabel)) +
BindingFactory.Dest(dest)
clientStack.make(clientParams)
}
override def newService(dest: Name, label: String): Service[Req, Rep] = {
val client = copy1(
params = params + FactoryToService.Enabled(true)
).newClient(dest, label)
new FactoryToService[Req, Rep](client)
}
}
|
adriancole/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/client/StackClient.scala
|
Scala
|
apache-2.0
| 24,122
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.stream.table.stringexpr
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.expressions.utils.Func0
import org.apache.flink.table.utils.{TableTestBase, Top3WithMapView}
import org.junit.Test
class TableAggregateStringExpressionTest extends TableTestBase {
@Test
def testNonGroupedTableAggregate(): Unit = {
val util = streamTestUtil()
val t = util.addTable[(Int, Long, String)]('a, 'b, 'c)
val top3 = new Top3WithMapView
util.tableEnv.registerFunction("top3", top3)
util.tableEnv.registerFunction("Func0", Func0)
// Expression / Scala API
val resScala = t
.flatAggregate(top3('a))
.select(Func0('f0) as 'a, 'f1 as 'b)
// String / Java API
val resJava = t
.flatAggregate("top3(a)")
.select("Func0(f0) as a, f1 as b")
verifyTableEquals(resJava, resScala)
}
@Test
def testGroupedTableAggregate(): Unit = {
val util = streamTestUtil()
val t = util.addTable[(Int, Long, String)]('a, 'b, 'c)
val top3 = new Top3WithMapView
util.tableEnv.registerFunction("top3", top3)
util.tableEnv.registerFunction("Func0", Func0)
// Expression / Scala API
val resScala = t
.groupBy('b % 5)
.flatAggregate(top3('a))
.select(Func0('f0) as 'a, 'f1 as 'b)
// String / Java API
val resJava = t
.groupBy("b % 5")
.flatAggregate("top3(a)")
.select("Func0(f0) as a, f1 as b")
verifyTableEquals(resJava, resScala)
}
@Test
def testAliasNonGroupedTableAggregate(): Unit = {
val util = streamTestUtil()
val t = util.addTable[(Int, Long, String)]('a, 'b, 'c)
val top3 = new Top3WithMapView
util.tableEnv.registerFunction("top3", top3)
util.tableEnv.registerFunction("Func0", Func0)
// Expression / Scala API
val resScala = t
.flatAggregate(top3('a) as ('d, 'e))
.select('*)
// String / Java API
val resJava = t
.flatAggregate("top3(a) as (d, e)")
.select($"*")
verifyTableEquals(resJava, resScala)
}
@Test
def testAliasGroupedTableAggregate(): Unit = {
val util = streamTestUtil()
val t = util.addTable[(Int, Long, String)]('a, 'b, 'c)
val top3 = new Top3WithMapView
util.tableEnv.registerFunction("top3", top3)
util.tableEnv.registerFunction("Func0", Func0)
// Expression / Scala API
val resScala = t
.groupBy('b)
.flatAggregate(top3('a) as ('d, 'e))
.select('*)
// String / Java API
val resJava = t
.groupBy("b")
.flatAggregate("top3(a) as (d, e)")
.select($"*")
verifyTableEquals(resJava, resScala)
}
}
|
jinglining/flink
|
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/stream/table/stringexpr/TableAggregateStringExpressionTest.scala
|
Scala
|
apache-2.0
| 3,491
|
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.azure.documentdb.sink
import com.datamountaineer.streamreactor.connect.azure.documentdb.Json
import com.datamountaineer.streamreactor.connect.azure.documentdb.config.{DocumentDbConfig, DocumentDbConfigConstants, DocumentDbSinkSettings}
import com.microsoft.azure.documentdb._
import io.confluent.connect.avro.AvroData
import org.apache.kafka.connect.sink.{SinkRecord, SinkTaskContext}
import org.mockito.ArgumentMatchers.{any, eq => mockEq}
import org.mockito.MockitoSugar
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import scala.collection.JavaConverters._
class DocumentDbSinkTaskStructTest extends AnyWordSpec with Matchers with MockitoSugar with MatchingArgument {
private val connection = "https://accountName.documents.azure.com:443/"
private val avroData = new AvroData(4)
"DocumentDbSinkTask" should {
"handle STRUCT INSERTS with default consistency level" in {
val map = Map(
DocumentDbConfigConstants.DATABASE_CONFIG -> "database1",
DocumentDbConfigConstants.CONNECTION_CONFIG -> connection,
DocumentDbConfigConstants.MASTER_KEY_CONFIG -> "secret",
DocumentDbConfigConstants.KCQL_CONFIG -> "INSERT INTO coll1 SELECT * FROM topic1;INSERT INTO coll2 SELECT * FROM topic2"
).asJava
val documentClient = mock[DocumentClient]
val dbResource: ResourceResponse[Database] = mock[ResourceResponse[Database]]
when(dbResource.getResource).thenReturn(mock[Database])
Seq("dbs/database1/colls/coll1",
"dbs/database1/colls/coll2").foreach { c =>
val resource = mock[ResourceResponse[DocumentCollection]]
when(resource.getResource).thenReturn(mock[DocumentCollection])
when(documentClient.readCollection(mockEq(c), any(classOf[RequestOptions])))
.thenReturn(resource)
when(documentClient.readCollection(mockEq(c), mockEq(null)))
.thenReturn(resource)
}
when(documentClient.readDatabase(mockEq("dbs/database1"), mockEq(null)))
.thenReturn(dbResource)
// val task = new DocumentDbSinkTask(_ => documentClient)
// val context = mock[SinkTaskContext]
// when(context.configs()).thenReturn(map)
// task.initialize(context)
// task.start(map)
val json1 = scala.io.Source.fromFile(getClass.getResource(s"/transaction1.json").toURI.getPath).mkString
val tx1 = Json.fromJson[Transaction](json1)
val json2 = scala.io.Source.fromFile(getClass.getResource(s"/transaction2.json").toURI.getPath).mkString
val tx2 = Json.fromJson[Transaction](json2)
val sinkRecord1 = new SinkRecord("topic1", 0, null, null, Transaction.ConnectSchema, tx1.toStruct(), 1000)
val sinkRecord2 = new SinkRecord("topic2", 0, null, null, Transaction.ConnectSchema, tx2.toStruct(), 1000)
val doc1 = new Document(json1)
val r1 = mock[ResourceResponse[Document]]
when(r1.getResource).thenReturn(doc1)
when(documentClient.createDocument(mockEq("dbs/database1/colls/coll1"),
argThat {
argument: Document => argument != null && argument.toString == doc1.toString
}, argThat { argument: RequestOptions =>
argument.getConsistencyLevel == ConsistencyLevel.Session
}, mockEq(false)))
.thenReturn(r1)
val doc2 = new Document(json2)
val r2 = mock[ResourceResponse[Document]]
when(r2.getResource).thenReturn(doc2)
when(documentClient.createDocument(mockEq("dbs/database1/colls/coll2"),
argThat { argument: Document =>
argument != null && argument.toString == doc2.toString
}, argThat { argument: RequestOptions =>
argument.getConsistencyLevel == ConsistencyLevel.Session
}, mockEq(false)))
.thenReturn(r2)
val config = DocumentDbConfig(map)
val settings = DocumentDbSinkSettings(config)
val kcqlMap = settings.kcql.map(c => c.getSource -> c).toMap
val writer = new DocumentDbWriter(kcqlMap, settings, documentClient)
writer.write(Seq(sinkRecord1, sinkRecord2))
verify(documentClient).createDocument(mockEq("dbs/database1/colls/coll1"),
argThat { argument: Document => argument.toString == doc1.toString
}, argThat { argument: RequestOptions =>
argument.getConsistencyLevel == ConsistencyLevel.Session
}, mockEq(false))
verify(documentClient).createDocument(mockEq("dbs/database1/colls/coll2"),
argThat { argument: Document => doc2.toString == argument.toString
},
argThat { argument: RequestOptions =>
argument.getConsistencyLevel == ConsistencyLevel.Session
}
, mockEq(false))
}
"handle STRUCT INSERTS with Eventual consistency level" in {
val map = Map(
DocumentDbConfigConstants.DATABASE_CONFIG -> "database1",
DocumentDbConfigConstants.CONNECTION_CONFIG -> connection,
DocumentDbConfigConstants.MASTER_KEY_CONFIG -> "secret",
DocumentDbConfigConstants.CONSISTENCY_CONFIG -> ConsistencyLevel.Eventual.toString,
DocumentDbConfigConstants.KCQL_CONFIG -> "INSERT INTO coll1 SELECT * FROM topic1;INSERT INTO coll2 SELECT * FROM topic2"
).asJava
val documentClient = mock[DocumentClient]
val dbResource: ResourceResponse[Database] = mock[ResourceResponse[Database]]
when(dbResource.getResource).thenReturn(mock[Database])
Seq("dbs/database1/colls/coll1",
"dbs/database1/colls/coll2").foreach { c =>
val resource = mock[ResourceResponse[DocumentCollection]]
when(resource.getResource).thenReturn(mock[DocumentCollection])
when(documentClient.readCollection(mockEq(c), any(classOf[RequestOptions])))
.thenReturn(resource)
when(documentClient.readCollection(mockEq(c), mockEq(null)))
.thenReturn(resource)
}
when(documentClient.readDatabase(mockEq("dbs/database1"), mockEq(null)))
.thenReturn(dbResource)
// val task = new DocumentDbSinkTask(s => documentClient)
// val context = mock[SinkTaskContext]
// when(context.configs()).thenReturn(map)
// task.initialize(context)
// task.start(map)
val json1 = scala.io.Source.fromFile(getClass.getResource(s"/transaction1.json").toURI.getPath).mkString
val tx1 = Json.fromJson[Transaction](json1)
val json2 = scala.io.Source.fromFile(getClass.getResource(s"/transaction2.json").toURI.getPath).mkString
val tx2 = Json.fromJson[Transaction](json2)
val sinkRecord1 = new SinkRecord("topic1", 0, null, null, Transaction.ConnectSchema, tx1.toStruct(), 1000)
val sinkRecord2 = new SinkRecord("topic2", 0, null, null, Transaction.ConnectSchema, tx2.toStruct(), 1000)
val doc1 = new Document(json1)
val r1 = mock[ResourceResponse[Document]]
when(r1.getResource).thenReturn(doc1)
when(
documentClient.createDocument(mockEq("dbs/database1/colls/coll1"),
argThat { argument: Document =>
argument != null && argument.toString == doc1.toString
},
argThat { argument: RequestOptions =>
argument.getConsistencyLevel == ConsistencyLevel.Eventual
}, mockEq(false)))
.thenReturn(r1)
val doc2 = new Document(json2)
val r2 = mock[ResourceResponse[Document]]
when(r2.getResource).thenReturn(doc2)
when(
documentClient.createDocument(mockEq("dbs/database1/colls/coll2"),
argThat { argument: Document =>
argument != null && argument.toString == doc2.toString
},
argThat { argument: RequestOptions =>
argument.getConsistencyLevel == ConsistencyLevel.Eventual
}, mockEq(false)))
.thenReturn(r2)
val config = DocumentDbConfig(map)
val settings = DocumentDbSinkSettings(config)
val kcqlMap = settings.kcql.map(c => c.getSource -> c).toMap
val writer = new DocumentDbWriter(kcqlMap, settings, documentClient)
writer.write(Seq(sinkRecord1, sinkRecord2))
verify(documentClient)
.createDocument(mockEq("dbs/database1/colls/coll1"),
argThat { argument: Document =>
argument.toString == doc1.toString
},
argThat { argument: RequestOptions =>
argument.getConsistencyLevel == ConsistencyLevel.Eventual
},
mockEq(false))
verify(documentClient)
.createDocument(
mockEq("dbs/database1/colls/coll2"),
argThat { argument: Document =>
doc2.toString == argument.toString
},
argThat { argument: RequestOptions =>
argument.getConsistencyLevel == ConsistencyLevel.Eventual
},
mockEq(false))
}
"handle STRUCT UPSERT with Eventual consistency level" in {
val map = Map(
DocumentDbConfigConstants.DATABASE_CONFIG -> "database1",
DocumentDbConfigConstants.CONNECTION_CONFIG -> connection,
DocumentDbConfigConstants.MASTER_KEY_CONFIG -> "secret",
DocumentDbConfigConstants.CONSISTENCY_CONFIG -> ConsistencyLevel.Eventual.toString,
DocumentDbConfigConstants.KCQL_CONFIG -> "UPSERT INTO coll1 SELECT * FROM topic1 PK time"
).asJava
val documentClient = mock[DocumentClient]
val dbResource: ResourceResponse[Database] = mock[ResourceResponse[Database]]
when(dbResource.getResource).thenReturn(mock[Database])
val resource = mock[ResourceResponse[DocumentCollection]]
when(resource.getResource).thenReturn(mock[DocumentCollection])
when(documentClient.readCollection(mockEq("dbs/database1/colls/coll1"), any(classOf[RequestOptions])))
.thenReturn(resource)
when(documentClient.readDatabase(mockEq("dbs/database1"), mockEq(null)))
.thenReturn(dbResource)
val json1 = scala.io.Source.fromFile(getClass.getResource(s"/transaction1.json").toURI.getPath).mkString
val tx1 = Json.fromJson[Transaction](json1)
val json2 = scala.io.Source.fromFile(getClass.getResource(s"/transaction2.json").toURI.getPath).mkString
val tx2 = Json.fromJson[Transaction](json2)
val sinkRecord1 = new SinkRecord("topic1", 0, null, null, Transaction.ConnectSchema, tx1.toStruct(), 1000)
val sinkRecord2 = new SinkRecord("topic1", 0, null, null, Transaction.ConnectSchema, tx2.toStruct(), 1000)
val doc1 = new Document(json1)
doc1.setId(doc1.get("time").toString)
val r1 = mock[ResourceResponse[Document]]
when(r1.getResource).thenReturn(doc1)
when(
documentClient
.upsertDocument(
mockEq("dbs/database1/colls/coll1"),
argThat { argument: Document =>
argument != null && argument.toString == doc1.toString
},
argThat { argument: RequestOptions => argument.getConsistencyLevel == ConsistencyLevel.Eventual
}, mockEq(true)))
.thenReturn(r1)
val doc2 = new Document(json2)
doc2.setId(doc2.get("time").toString)
val r2 = mock[ResourceResponse[Document]]
when(r2.getResource).thenReturn(doc2)
when(
documentClient
.upsertDocument(
mockEq("dbs/database1/colls/coll1"),
argThat { argument: Document =>
argument != null && argument.toString == doc2.toString
},
argThat { argument: RequestOptions =>
argument.getConsistencyLevel == ConsistencyLevel.Eventual
}, mockEq(true)))
.thenReturn(r2)
val config = DocumentDbConfig(map)
val settings = DocumentDbSinkSettings(config)
val kcqlMap = settings.kcql.map(c => c.getSource -> c).toMap
val writer = new DocumentDbWriter(kcqlMap, settings, documentClient)
writer.write(Seq(sinkRecord1, sinkRecord2))
verify(documentClient)
.upsertDocument(
mockEq("dbs/database1/colls/coll1"),
argThat { argument: Document =>
argument.toString == doc1.toString
},
argThat { argument: RequestOptions =>
argument.getConsistencyLevel == ConsistencyLevel.Eventual
},
mockEq(true))
verify(documentClient)
.upsertDocument(
mockEq("dbs/database1/colls/coll1"),
argThat { argument: Document =>
doc2.toString == argument.toString
},
argThat { argument: RequestOptions =>
argument.getConsistencyLevel == ConsistencyLevel.Eventual
},
mockEq(true))
}
}
}
|
datamountaineer/stream-reactor
|
kafka-connect-azure-documentdb/src/test/scala/com/datamountaineer/streamreactor/connect/azure/documentdb/sink/DocumentDbSinkTaskStructTest.scala
|
Scala
|
apache-2.0
| 13,224
|
package org.jetbrains.plugins.hocon.formatting
import com.intellij.formatting._
import com.intellij.lang.ASTNode
import com.intellij.psi.TokenType
import com.intellij.psi.formatter.common.AbstractBlock
import org.jetbrains.plugins.hocon.lexer.HoconTokenSets
import org.jetbrains.plugins.hocon.parser.HoconElementType
import scala.collection.JavaConverters._
class HoconBlock(formatter: HoconFormatter, node: ASTNode, indent: Indent, wrap: Wrap, alignment: Alignment)
extends AbstractBlock(node, wrap, alignment) {
import org.jetbrains.plugins.hocon.CommonUtil._
// HoconFormatter needs these to be able to return exactly the same instances of Wrap and Alignment for
// children of this block
private val wrapCache = {
val pathValueSeparatorType =
if (node.getElementType == HoconElementType.BareObjectField)
node.childrenIterator.map(_.getElementType).find(HoconTokenSets.PathValueSeparator.contains)
else None
new formatter.WrapCache(pathValueSeparatorType)
}
private val alignmentCache = new formatter.AlignmentCache
override def getIndent = indent
override def getChildAttributes(newChildIndex: Int) =
new ChildAttributes(formatter.getChildIndent(node), formatter.getChildAlignment(alignmentCache, node))
def buildChildren() = children.asJava
def isLeaf =
formatter.getChildren(node).isEmpty
def getSpacing(child1: Block, child2: Block) =
if (child1 == null)
formatter.getFirstSpacing(node, child2.asInstanceOf[HoconBlock].getNode)
else
formatter.getSpacing(node, child1.asInstanceOf[HoconBlock].getNode, child2.asInstanceOf[HoconBlock].getNode)
lazy val children: Seq[Block] =
formatter.getChildren(node)
.filterNot(n => n.getTextLength == 0 || n.getElementType == TokenType.WHITE_SPACE)
.map(createChildBlock).toVector
private def createChildBlock(child: ASTNode) =
new HoconBlock(formatter, child,
formatter.getIndent(node, child),
formatter.getWrap(wrapCache, node, child),
formatter.getAlignment(alignmentCache, node, child))
override def toString =
s"${node.getElementType}[${node.getText.replaceAllLiterally("\\n", "\\\\n")}]${node.getTextRange}" + {
if (isLeaf) "" else children.mkString("\\n", "\\n", "").indent(" ")
}
}
|
triggerNZ/intellij-scala
|
src/org/jetbrains/plugins/hocon/formatting/HoconBlock.scala
|
Scala
|
apache-2.0
| 2,299
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
* file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
* to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package kafka.api
import java.{lang, util}
import org.apache.kafka.clients.producer.ProducerConfig
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.clients.consumer._
import org.apache.kafka.common.serialization.ByteArrayDeserializer
import org.apache.kafka.common.{PartitionInfo, TopicPartition}
import kafka.utils.{TestUtils, Logging}
import kafka.server.KafkaConfig
import java.util.ArrayList
import org.junit.Assert._
import scala.collection.JavaConversions._
import kafka.coordinator.ConsumerCoordinator
/**
* Integration tests for the new consumer that cover basic usage as well as server failures
*/
class ConsumerTest extends IntegrationTestHarness with Logging {
val producerCount = 1
val consumerCount = 2
val serverCount = 3
val topic = "topic"
val part = 0
val tp = new TopicPartition(topic, part)
val part2 = 1
val tp2 = new TopicPartition(topic, part2)
// configure the servers and clients
this.serverConfig.setProperty(KafkaConfig.ControlledShutdownEnableProp, "false") // speed up shutdown
this.serverConfig.setProperty(KafkaConfig.OffsetsTopicReplicationFactorProp, "3") // don't want to lose offset
this.serverConfig.setProperty(KafkaConfig.OffsetsTopicPartitionsProp, "1")
this.serverConfig.setProperty(KafkaConfig.ConsumerMinSessionTimeoutMsProp, "100") // set small enough session timeout
this.producerConfig.setProperty(ProducerConfig.ACKS_CONFIG, "all")
this.consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "my-test")
this.consumerConfig.setProperty(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, 4096.toString)
this.consumerConfig.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false")
override def setUp() {
super.setUp()
// create the test topic with all the brokers as replicas
TestUtils.createTopic(this.zkClient, topic, 2, serverCount, this.servers)
}
def testSimpleConsumption() {
val numRecords = 10000
sendRecords(numRecords)
assertEquals(0, this.consumers(0).subscriptions.size)
this.consumers(0).subscribe(tp)
assertEquals(1, this.consumers(0).subscriptions.size)
this.consumers(0).seek(tp, 0)
consumeRecords(this.consumers(0), numRecords = numRecords, startingOffset = 0)
// check async commit callbacks
val commitCallback = new CountConsumerCommitCallback()
this.consumers(0).commit(CommitType.ASYNC, commitCallback)
// shouldn't make progress until poll is invoked
Thread.sleep(10)
assertEquals(0, commitCallback.count)
awaitCommitCallback(this.consumers(0), commitCallback)
}
def testCommitSpecifiedOffsets() {
sendRecords(5, tp)
sendRecords(7, tp2)
this.consumers(0).subscribe(tp)
this.consumers(0).subscribe(tp2)
// Need to poll to join the group
this.consumers(0).poll(50)
val pos1 = this.consumers(0).position(tp)
val pos2 = this.consumers(0).position(tp2)
this.consumers(0).commit(Map[TopicPartition,java.lang.Long]((tp, 3L)), CommitType.SYNC)
assertEquals(3, this.consumers(0).committed(tp))
intercept[NoOffsetForPartitionException] {
this.consumers(0).committed(tp2)
}
// positions should not change
assertEquals(pos1, this.consumers(0).position(tp))
assertEquals(pos2, this.consumers(0).position(tp2))
this.consumers(0).commit(Map[TopicPartition,java.lang.Long]((tp2, 5L)), CommitType.SYNC)
assertEquals(3, this.consumers(0).committed(tp))
assertEquals(5, this.consumers(0).committed(tp2))
// Using async should pick up the committed changes after commit completes
val commitCallback = new CountConsumerCommitCallback()
this.consumers(0).commit(Map[TopicPartition,java.lang.Long]((tp2, 7L)), CommitType.ASYNC, commitCallback)
awaitCommitCallback(this.consumers(0), commitCallback)
assertEquals(7, this.consumers(0).committed(tp2))
}
def testAutoOffsetReset() {
sendRecords(1)
this.consumers(0).subscribe(tp)
consumeRecords(this.consumers(0), numRecords = 1, startingOffset = 0)
}
def testSeek() {
val consumer = this.consumers(0)
val totalRecords = 50L
sendRecords(totalRecords.toInt)
consumer.subscribe(tp)
consumer.seekToEnd(tp)
assertEquals(totalRecords, consumer.position(tp))
assertFalse(consumer.poll(totalRecords).iterator().hasNext)
consumer.seekToBeginning(tp)
assertEquals(0, consumer.position(tp), 0)
consumeRecords(consumer, numRecords = 1, startingOffset = 0)
val mid = totalRecords / 2
consumer.seek(tp, mid)
assertEquals(mid, consumer.position(tp))
consumeRecords(consumer, numRecords = 1, startingOffset = mid.toInt)
}
def testGroupConsumption() {
sendRecords(10)
this.consumers(0).subscribe(topic)
consumeRecords(this.consumers(0), numRecords = 1, startingOffset = 0)
}
def testPositionAndCommit() {
sendRecords(5)
// committed() on a partition with no committed offset throws an exception
intercept[NoOffsetForPartitionException] {
this.consumers(0).committed(new TopicPartition(topic, 15))
}
// position() on a partition that we aren't subscribed to throws an exception
intercept[IllegalArgumentException] {
this.consumers(0).position(new TopicPartition(topic, 15))
}
this.consumers(0).subscribe(tp)
assertEquals("position() on a partition that we are subscribed to should reset the offset", 0L, this.consumers(0).position(tp))
this.consumers(0).commit(CommitType.SYNC)
assertEquals(0L, this.consumers(0).committed(tp))
consumeRecords(this.consumers(0), 5, 0)
assertEquals("After consuming 5 records, position should be 5", 5L, this.consumers(0).position(tp))
this.consumers(0).commit(CommitType.SYNC)
assertEquals("Committed offset should be returned", 5L, this.consumers(0).committed(tp))
sendRecords(1)
// another consumer in the same group should get the same position
this.consumers(1).subscribe(tp)
consumeRecords(this.consumers(1), 1, 5)
}
def testPartitionsFor() {
val numParts = 2
TestUtils.createTopic(this.zkClient, "part-test", numParts, 1, this.servers)
val parts = this.consumers(0).partitionsFor("part-test")
assertNotNull(parts)
assertEquals(2, parts.length)
assertNull(this.consumers(0).partitionsFor("non-exist-topic"))
}
def testListTopics() {
val numParts = 2
val topic1: String = "part-test-topic-1"
val topic2: String = "part-test-topic-2"
val topic3: String = "part-test-topic-3"
TestUtils.createTopic(this.zkClient, topic1, numParts, 1, this.servers)
TestUtils.createTopic(this.zkClient, topic2, numParts, 1, this.servers)
TestUtils.createTopic(this.zkClient, topic3, numParts, 1, this.servers)
val topics = this.consumers.head.listTopics()
assertNotNull(topics)
assertEquals(5, topics.size())
assertEquals(5, topics.keySet().size())
assertEquals(2, topics.get(topic1).length)
assertEquals(2, topics.get(topic2).length)
assertEquals(2, topics.get(topic3).length)
}
def testPartitionReassignmentCallback() {
val callback = new TestConsumerReassignmentCallback()
this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "100"); // timeout quickly to avoid slow test
val consumer0 = new KafkaConsumer(this.consumerConfig, callback, new ByteArrayDeserializer(), new ByteArrayDeserializer())
consumer0.subscribe(topic)
// the initial subscription should cause a callback execution
while(callback.callsToAssigned == 0)
consumer0.poll(50)
// get metadata for the topic
var parts = consumer0.partitionsFor(ConsumerCoordinator.OffsetsTopicName)
while(parts == null)
parts = consumer0.partitionsFor(ConsumerCoordinator.OffsetsTopicName)
assertEquals(1, parts.size)
assertNotNull(parts(0).leader())
// shutdown the coordinator
val coordinator = parts(0).leader().id()
this.servers(coordinator).shutdown()
// this should cause another callback execution
while(callback.callsToAssigned < 2)
consumer0.poll(50)
assertEquals(2, callback.callsToAssigned)
assertEquals(2, callback.callsToRevoked)
consumer0.close()
}
def testUnsubscribeTopic() {
val callback = new TestConsumerReassignmentCallback()
this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "100"); // timeout quickly to avoid slow test
val consumer0 = new KafkaConsumer(this.consumerConfig, callback, new ByteArrayDeserializer(), new ByteArrayDeserializer())
try {
consumer0.subscribe(topic)
// the initial subscription should cause a callback execution
while (callback.callsToAssigned == 0)
consumer0.poll(50)
consumer0.unsubscribe(topic)
assertEquals(0, consumer0.subscriptions.size())
} finally {
consumer0.close()
}
}
def testPartitionPauseAndResume() {
sendRecords(5)
this.consumers(0).subscribe(tp)
consumeRecords(this.consumers(0), 5, 0)
this.consumers(0).pause(tp)
sendRecords(5)
assertTrue(this.consumers(0).poll(0).isEmpty)
this.consumers(0).resume(tp)
consumeRecords(this.consumers(0), 5, 5)
}
def testPauseStateNotPreservedByRebalance() {
this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "100"); // timeout quickly to avoid slow test
val consumer0 = new KafkaConsumer(this.consumerConfig, null, new ByteArrayDeserializer(), new ByteArrayDeserializer())
sendRecords(5)
consumer0.subscribe(topic)
consumeRecords(consumer0, 5, 0)
consumer0.pause(tp)
// subscribe to a new topic to trigger a rebalance
consumer0.subscribe("topic2")
// after rebalance, our position should be reset and our pause state lost,
// so we should be able to consume from the beginning
consumeRecords(consumer0, 0, 5)
}
private class TestConsumerReassignmentCallback extends ConsumerRebalanceCallback {
var callsToAssigned = 0
var callsToRevoked = 0
def onPartitionsAssigned(consumer: Consumer[_,_], partitions: java.util.Collection[TopicPartition]) {
info("onPartitionsAssigned called.")
callsToAssigned += 1
}
def onPartitionsRevoked(consumer: Consumer[_,_], partitions: java.util.Collection[TopicPartition]) {
info("onPartitionsRevoked called.")
callsToRevoked += 1
}
}
private def sendRecords(numRecords: Int): Unit = {
sendRecords(numRecords, tp)
}
private def sendRecords(numRecords: Int, tp: TopicPartition) {
val futures = (0 until numRecords).map { i =>
this.producers(0).send(new ProducerRecord(tp.topic(), tp.partition(), i.toString.getBytes, i.toString.getBytes))
}
futures.map(_.get)
}
private def consumeRecords(consumer: Consumer[Array[Byte], Array[Byte]], numRecords: Int, startingOffset: Int) {
val records = new ArrayList[ConsumerRecord[Array[Byte], Array[Byte]]]()
val maxIters = numRecords * 300
var iters = 0
while (records.size < numRecords) {
for (record <- consumer.poll(50))
records.add(record)
if(iters > maxIters)
throw new IllegalStateException("Failed to consume the expected records after " + iters + " iterations.")
iters += 1
}
for (i <- 0 until numRecords) {
val record = records.get(i)
val offset = startingOffset + i
assertEquals(topic, record.topic())
assertEquals(part, record.partition())
assertEquals(offset.toLong, record.offset())
}
}
private def awaitCommitCallback(consumer: Consumer[Array[Byte], Array[Byte]], commitCallback: CountConsumerCommitCallback): Unit = {
val startCount = commitCallback.count
val started = System.currentTimeMillis()
while (commitCallback.count == startCount && System.currentTimeMillis() - started < 10000)
this.consumers(0).poll(10000)
assertEquals(startCount + 1, commitCallback.count)
}
private class CountConsumerCommitCallback extends ConsumerCommitCallback {
var count = 0
override def onComplete(offsets: util.Map[TopicPartition, lang.Long], exception: Exception): Unit = count += 1
}
}
|
wayilau/kafka
|
core/src/test/scala/integration/kafka/api/ConsumerTest.scala
|
Scala
|
apache-2.0
| 13,012
|
/*
* Copyright (C) 2014 AyaIB Developers (http://github.com/fauu/ayaib)
*
* This software is licensed under the GNU General Public License
* (version 3 or later). See the COPYING file in this distribution.
*
* You should have received a copy of the GNU Library General Public License
* along with this software. If not, see <http://www.gnu.org/licenses/>.
*
* Authored by: Piotr Grabowski <fau999@gmail.com>
*/
package models
import anorm._
import anorm.SqlParser._
import play.api.db.DB
import play.api.Play.current
case class Board(
id: Pk[Int] = NotAssigned,
uri: String,
title: String,
subtitle: Option[String] = None,
numPages: Int)
object Board {
val TableName = "board"
private val LoadAll =
"SELECT * FROM {tableName}".replace("{tableName}", TableName)
private val LoadByUri =
"SELECT * FROM {tableName} WHERE uri = {uri}".replace("{tableName}", TableName)
private val boardParser = {
get[Pk[Int]]("id") ~
get[String]("uri") ~
get[String]("title") ~
get[Option[String]]("subtitle") ~
get[Int]("pages") map {
case id ~ uri ~ title ~ subtitle ~ numPages => Board(id, uri, title, subtitle, numPages)
}
}
def loadAll: List[Board] = {
DB.withConnection { implicit connection =>
SQL(LoadAll).as(boardParser *).toList
}
}
def loadByUri(uri: String): Option[Board] = {
DB.withConnection { implicit connection =>
SQL(LoadByUri).on('uri -> uri).as(boardParser.singleOpt)
}
}
}
|
fauu/AyaIB-old
|
app/models/Board.scala
|
Scala
|
gpl-3.0
| 1,504
|
/**
* Copyright (c) 2014, MoonGene. All rights reserved.
*
* This source code is licensed under the GPL license found in the
* LICENSE_GPL file in the root directory of this source tree. An alternative
* commercial license is also available upon request.
*/
package com.moongene.services.core
import concurrent.Future
import reactivemongo.core.commands.LastError
import com.moongene.Core
import com.moongene.models.messages.DBLoad
//Utility trait to catch results from DB queries and approriately decrement current load and check for errors
trait MetricsHelper extends ExecutionTrait {
import Implicit._
def dbCallMetric(call: Future[LastError], lastErrorCheck: Boolean = true, recoverCheck: Boolean = true) = {
Core.metricsLogger ! DBLoad(out = 1, cur = 1)
call.map({ lastError =>
Core.metricsLogger ! DBLoad(cur = -1, err = if (!lastError.ok && lastErrorCheck) 1 else 0)
}).recover {
case _ => if(recoverCheck) { Core.metricsLogger ! DBLoad(cur = -1, err = 1) }
}
}
}
|
MoonGene/Analytics
|
src/gene/src/main/scala/com/moongene/services/core/MetricsHelper.scala
|
Scala
|
gpl-3.0
| 1,013
|
package exerciseTwo
import akka.actor.ActorSystem
// ------------
// EXERCISE 2
// ------------
// Fill in the code necessary to handle receiving a new message to generate the
// properties for a random circle. For testing just println when the message is received.
// See video.imageUtils.CircleProperties and video.imageUtils.ImageUtils
class CircleGenerator extends akka.actor.Actor {
override def receive: Receive = ???
}
object CircleGenerator {
/**
* run:
* ./activator 'runMain exerciseTwo.CircleGenerator'
*
*/
def main(args: Array[String]): Unit = {
// ActorSystem represents the "engine" we run in, including threading configuration and concurrency semantics.
val system = ActorSystem()
// Fill in the code necessary to create the Actor in the ActorSystem and send it a message.
// TODO - Your code here.
}
}
|
retroryan/streams-workshop
|
src/exercises/exerciseTwo/CircleGenerator.scala
|
Scala
|
cc0-1.0
| 863
|
package doodle
package java2d
import doodle.core.Base64
import doodle.effect.Writer._
import doodle.syntax._
import minitest._
object Base64Spec extends SimpleTestSuite {
def base64Distance[A](b1: Base64[A], b2: Base64[A]): Double = {
import java.util.{Base64 => JBase64}
val d1 = JBase64.getDecoder().decode(b1.value)
val d2 = JBase64.getDecoder().decode(b2.value)
d1.zip(d2).foldLeft(0.0) { (accum, elts) =>
val (byte1, byte2) = elts
accum + Math.abs(byte1 - byte2)
}
}
val image = circle[Algebra, Drawing](20.0)
test("base64 should work with png") {
val (_, b1) = image.base64[Png]()
val (_, b2) = image.base64[Png]()
assert(base64Distance(b1, b2) <= (b1.value.length * 2))
}
test("base64 should work with gif") {
val (_, b1) = image.base64[Gif]()
val (_, b2) = image.base64[Gif]()
assert(base64Distance(b1, b2) <= (b1.value.length * 2))
}
test("base64 should work with jpg") {
val (_, b1) = image.base64[Jpg]()
val (_, b2) = image.base64[Jpg]()
assert(base64Distance(b1, b2) <= (b1.value.length * 2))
}
}
|
underscoreio/doodle
|
java2d/src/test/scala/doodle/java2d/Base64Spec.scala
|
Scala
|
apache-2.0
| 1,105
|
package com.sksamuel.elastic4s
import com.sksamuel.elastic4s.json.{JsonValue, XContentBuilder, XContentFactory}
import com.sksamuel.elastic4s.requests.count.CountRequest
/**
* A typeclass that is used to build the json bodies for requests.
*
* They accept a request instance, such as CountRequest or SearchRequest and return
* a [[JsonValue]] which models the json to be used.
*/
trait BodyBuilder[R] {
def toJson(req: R): JsonValue
}
object CountBodyBuilder {
def toJson(req: CountRequest): JsonValue = {
val builder = XContentFactory.jsonBuilder()
// req.query.map(QueryBuilderFn.apply).foreach(builder.rawField("query", _))
builder.value
}
}
|
sksamuel/elastic4s
|
elastic4s-handlers/src/main/scala/com/sksamuel/elastic4s/BodyBuilder.scala
|
Scala
|
apache-2.0
| 676
|
package com.binarymechanic.data
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.SparkContext
import org.apache.spark.mllib.linalg.{Vector => MLVector}
import org.apache.spark.sql.SQLContext
import org.bdgenomics.adam.rdd.ADAMContext._
/** Provides a location to create data conversion methods, loading, static data types */
object DataLib {
/** loads vcf data into parquet format
*
* @param sc SparkContext
* @param vcfPath File(s) to load
* @param destinationPath Where to save the data in parquet format
*/
def vcfToParqet(sc: SparkContext, vcfPath: String, destinationPath: String): Unit = {
val fs: FileSystem = FileSystem.get(sc.hadoopConfiguration)
fs.delete(new Path(destinationPath), true)
sc.loadGenotypes(vcfPath).adamParquetSave(destinationPath)
}
/**
* Panel contains columns for person ID (sample), population code (pop), super population code (super_pop), and gender.
* This will be our lookup panel for each person's population membership for our supervised learning model.
* The population code definitions can be found at http://www.1000genomes.org/cell-lines-and-dna-coriell.
*
* @param sqlContext SqlContext
* @param panelPath Path to panel file
*
* example data:
*
* sample pop super_pop gender
* HG00096 GBR EUR male
* HG00097 GBR EUR female
* HG00099 GBR EUR female
*/
def getPanel(sqlContext: SQLContext, panelPath: String) = {
sqlContext.read
.format("com.databricks.spark.csv")
.option("header", "true")
.option("inferSchema", "true")
.option("delimiter", "\\\\t")
.load(panelPath)
}
/**
* Static map of data panel country map to accepted visual map name of countries
*/
def getCountryMap(): Map[String,String] =
Map(
"ACB" -> "BRB",
"ASW" -> "USA",
"BEB" -> "BGD",
"CDX" -> "CHN",
"CHB" -> "CHN",
"CHS" -> "CHN",
"CLM" -> "COL",
"ESN" -> "NGA",
"FIN" -> "FIN",
"GBR" -> "GBR",
"GIH" -> "IND",
"GWD" -> "GMB",
"IBS" -> "ESP",
"ITU" -> "IND",
"JPT" -> "JPN",
"KHV" -> "VNM",
"LWK" -> "KEN",
"MSL" -> "SLE",
"MXL" -> "MEX",
"PEL" -> "PER",
"PJL" -> "PAK",
"PUR" -> "PRI",
"STU" -> "LKA",
"TSI" -> "ITA",
"YRI" -> "NGA"
)
/* An example of a variant
{
"variant": {
"variantErrorProbability": 100,
"contig": {
"contigName": "6",
"contigLength": null,
"contigMD5": null,
"referenceURL": null,
"assembly": null,
"species": null,
"referenceIndex": null
},
"start": 88942,
"end": 88943,
"referenceAllele": "G",
"alternateAllele": "A",
"svAllele": null,
"isSomatic": false
},
"variantCallingAnnotations": {
"variantIsPassing": true,
"variantFilters": [],
"downsampled": null,
"baseQRankSum": null,
"fisherStrandBiasPValue": null,
"rmsMapQ": null,
"mapq0Reads": null,
"mqRankSum": null,
"readPositionRankSum": null,
"genotypePriors": [],
"genotypePosteriors": [],
"vqslod": null,
"culprit": null,
"attributes": {}
},
"sampleId": "HG00253",
"sampleDescription": null,
"processingDescription": null,
"alleles": [
"Ref",
"Ref"
],
"expectedAlleleDosage": null,
"referenceReadDepth": null,
"alternateReadDepth": null,
"readDepth": null,
"minReadDepth": null,
"genotypeQuality": null,
"genotypeLikelihoods": [],
"nonReferenceLikelihoods": [],
"strandBiasComponents": [],
"splitFromMultiAllelic": false,
"isPhased": true,
"phaseSetId": null,
"phaseQuality": null
}
*/
}
|
binarymechanic/genomeanalysis
|
src/main/scala/com/binarymechanic/data/DataLib.scala
|
Scala
|
apache-2.0
| 4,011
|
package org.jetbrains.plugins.scala.lang.psi.impl.base.patterns
import com.intellij.lang.ASTNode
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScGivenPattern
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScGiven
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementImpl
import org.jetbrains.plugins.scala.lang.psi.types.result.TypeResult
final class ScGivenPatternImpl(node: ASTNode)
extends ScalaPsiElementImpl(node)
with ScPatternImpl
with ScGivenPattern
with TypedPatternLikeImpl {
override def typeElement: ScTypeElement = findChild[ScTypeElement].get
override def `type`(): TypeResult = typeElement.`type`()
override def isWildcard: Boolean = false
override def nameId: PsiElement = typeElement
override def name: String = ScGiven.generateAnonymousGivenName(typeElement)
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/base/patterns/ScGivenPatternImpl.scala
|
Scala
|
apache-2.0
| 994
|
package uberset.basic_compiler
/*
Author: uberset
Date: 2015-11-09
Licence: GPL v2
*/
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
object Interpreter {
case class Status(
lines: Seq[Line],
var in: List[String],
var lineIndex: Int = 0,
var running: Boolean = true,
variables: mutable.Map[String, Int] = mutable.HashMap[String, Int](),
arrays: mutable.Map[String, Array[Int]] = mutable.HashMap[String, Array[Int]](),
stack: mutable.Stack[Int] = mutable.Stack[Int](),
out: ListBuffer[String] = ListBuffer[String]()
)
def run(p: Program, in: List[String]): Seq[String] = {
val s = Status(p.lines, in)
while(s.running) run(s)
s.out
}
def run(s: Status): Unit = {
val line = s.lines(s.lineIndex)
runStm(line.stm, s)
s.lineIndex += 1
if(s.lineIndex >= s.lines.length) s.running = false
}
def runStm(stm: Statement, s: Status): Unit = {
stm match {
case stm: Print => run(stm, s)
case stm: Goto => run(stm, s)
case stm: Gosub => run(stm, s)
case stm: Return => run(stm, s)
case stm: Let => run(stm, s)
case stm: If => run(stm, s)
case stm: Rem => ()
case stm: Input => run(stm, s)
case stm: Dim => run(stm, s)
case stm: For => run(stm, s)
case stm: Next => run(stm, s)
}
}
def run(nxt: Next, s: Status): Unit = {
val Next(id) = nxt
val jump = s.stack.pop()
val step = s.stack.pop()
val to = s.stack.pop()
val valu = s.variables(id) + step
if(step > 0 && valu <= to || step <0 && valu >= to || step == 0 && valu == to) {
setVariable(Variable(id), valu, s)
s.stack.push(to)
s.stack.push(step)
s.stack.push(jump)
s.lineIndex = jump
}
}
def run(f: For, s: Status): Unit = {
val For(id, from, to, step) = f
val fr = evalExpr(from, s)
val t = evalExpr(to,s)
val st = step match {
case Some(expr) => evalExpr(expr, s)
case None => 1
}
// initialize variable
setVariable(Variable(id), fr, s)
// push to, step and line index to stack
s.stack.push(t)
s.stack.push(st)
s.stack.push(s.lineIndex)
}
def run(dim: Dim, s: Status): Unit = {
val Dim(id, upper) = dim
s.arrays.get(id) match {
case Some(_) => fail(s"Array $id was already dimensioned.")
case None => s.arrays.put(id, new Array[Int](upper+1))
}
}
def run(input: Input, s: Status): Unit = {
val vari = input.variable
val string = s.in.head
val valu = string.toInt
s.in = s.in.tail
setVariable(vari, valu, s)
}
def evalCond(c: Condition, s: Status): Boolean = {
val Condition(e1, op, e2) = c
op match {
case LT() => evalExpr(e1, s) < evalExpr(e2, s)
case GT() => evalExpr(e1, s) > evalExpr(e2, s)
case EQ() => evalExpr(e1, s) == evalExpr(e2, s)
case NE() => evalExpr(e1, s) != evalExpr(e2, s)
case LE() => evalExpr(e1, s) <= evalExpr(e2, s)
case GE() => evalExpr(e1, s) >= evalExpr(e2, s)
}
}
def run(i: If, s: Status): Unit = {
val If(c, nr) = i
val v = evalCond(c, s)
if(v) goto(nr, s)
}
def run(l: Let, s: Status): Unit = {
val Let(vari, expr) = l
val valu = evalExpr(expr, s)
setVariable(vari, valu, s)
}
def run(p: Print, s: Status): Unit = {
s.out.append(evalPrintArg(p.arg, s), "\\n")
}
def evalPrintArg(arg: PrintArgument, s: Status): String = {
arg match {
case StringArg(s) => s
case expr: Expression => evalExpr(expr, s).toString
}
}
def evalExpr(expr: Expression, s: Status): Int = {
val Expression(neg, term, ops) = expr
var v = evalTerm(term, s)
if(neg) v = (-v)
for((op, t) <- ops) v = evalTerm(v, op, t, s)
v
}
def evalTerm(v: Int, op: AddOp, t: Term, s: Status): Int = {
val v2 = evalTerm(t, s)
op match {
case Add() => v + v2
case Sub() => v - v2
}
}
def evalTerm(term: Term, s: Status): Int = {
val Term(factor, ops) = term
var v = evalFactor(factor, s)
for((op, f) <- ops) v = evalFactor(v, op, f, s)
v
}
def evalFactor(v: Int, op: MulOp, f: Factor, s: Status): Int = {
val v2 = evalFactor(f, s)
op match {
case Mul() => v * v2
case Div() => v / v2
}
}
def evalFactor(factor: Factor, s: Status): Int = {
factor match {
case IntValue(i) => i
case v: Variable => evalVariable(v, s)
case expr: Expression => evalExpr(expr, s)
}
}
def evalVariable(v: Variable, s: Status): Int = {
val Variable(id, sub) = v
s.arrays.get(id) match {
// array found
case Some(values) =>
sub match {
case Some(expr) => values(evalExpr(expr, s))
case None => fail(s"Subscript for array $id expected.").asInstanceOf[Int] // the compiler complains without cast :(
}
// array not found
case None =>
sub match {
case Some(expr) => fail(s"Array $id must be dimensioned.").asInstanceOf[Int] // the compiler complains without cast :(
case None => s.variables.getOrElse(id, 0)
}
}
}
def setVariable(v: Variable, value: Int, s: Status): Unit = {
val Variable(id, sub) = v
s.arrays.get(id) match {
// array found
case Some(values) =>
sub match {
case Some(expr) => values(evalExpr(expr, s)) = value
case None => fail(s"Subscript for array $id expected.")
}
// array not found
case None =>
sub match {
case Some(expr) => fail(s"Array $id must be dimensioned.")
case None => s.variables.put(id, value)
}
}
}
def run(g: Return, s: Status): Unit = {
s.lineIndex = s.stack.pop()
}
def run(g: Gosub, s: Status): Unit = {
s.stack.push(s.lineIndex)
goto(g.nr, s)
}
def run(g: Goto, s: Status): Unit = goto(g.nr, s)
def goto(nr: Int, s: Status): Unit = {
val index = findLineIndex(nr, s.lines)
s.lineIndex = index - 1
}
def findLineIndex(nr: Int, lines: Seq[Line]): Int = {
for(i <- 0 until lines.length) {
if(lines(i).nr == Some(nr)) return i
}
throw new Exception(s"Line number $nr not found.")
}
def fail(msg: String): Null = {
throw new Exception(msg)
}
}
|
uberset/basic-compiler
|
src/main/scala/uberset/basic_compiler/Interpreter.scala
|
Scala
|
gpl-2.0
| 7,154
|
object ch2 {
def slow_fib(n:Int) : Int = {
if(n<=1)
1
else
fib(n-1)+fib(n-2)
}
def fib(n:Int) : Int = {
@annotation.tailrec
def fib_tr(n:Int, f1:Int, f2:Int) : Int = {
if(n<=1)
f1
else
fib_tr(n-1,f1+f2,f1)
}
fib_tr(n,1,1)
}
def isSorted[A](arr:Array[A], ordered:(A,A)=>Boolean):Boolean = {
var idx=0;
for(idx <- 1 to arr.length) {
if(!ordered(arr(idx-1),arr(idx)))
return false;
}
return true;
}
// this functions transforms a function of 2 args into a function of one arg, that returns a function taking the second arg
def curry[A,B,C](f:(A,B)=>C):A=>(B=>C) = {
(a:A) => ((b:B) => f(a,b));
}
// this reverses the currying
def uncurry[A,B,C](f:A=>(B=>C)):(A,B)=>C = {
(a:A,b:B) => f(a)(b)
}
def compose[A,B,C](f:B=>C, g:A=>B): A=>C = {
(a:A)=>f(g(a))
}
}
|
okaram/scala
|
fpinscala/src/main/scala/ch2.scala
|
Scala
|
unlicense
| 831
|
package com.twitter.finagle.serverset2
import com.twitter.finagle.partitioning.zk.ZkMetadata
import com.twitter.finagle.{Addr, Address}
import com.twitter.util._
import java.net.InetSocketAddress
/**
* The Stabilizer attempts to dampen address changes for the observers so that the they
* are not affected by transient or flapping states from the external system which
* supplies the source addresses. It does so in two important ways:
*
* 1. Failure stabilization: In case of transient failures (flapping), we suppress failed resolution
* states when we have an existing good state until we get new information or sufficient time has
* passed. The previously known good state is returned indefinitely until a new update which puts
* the responsibility of managing stale data on the callers.
*
* 2. Update stabilization: In scenarios where the source is volatile or churning the
* underlying addresses, the stabilizer buffers (and batches) consecutive runs of
* bound addresses so that bursty changes to individual addresses are not propagated immediately.
*/
private object Stabilizer {
/**
* The state object used to coalesce updates.
*
* @param result represents the [[Addr]] which will be published to the event.
*
* @param buffer represents the buffer of [[Addr.Bound]] addresses
* which merges consecutive bound updates and eventually is flushed to `result`.
*
* @param last the most recently observed [[Addr.Bound]].
*/
private case class State(result: Addr, buffer: Addr.Bound, last: Addr.Bound)
private val EmptyBound: Addr.Bound = Addr.Bound(Set(), Addr.Metadata.empty)
private val InitState = State(Addr.Pending, EmptyBound, EmptyBound)
private def coalesce(addrOrEpoch: Event[Either[Addr, Unit]]): Event[Addr] = {
addrOrEpoch
.foldLeft(InitState) {
// new addr bound – merge it with our buffer.
case (State(result, buffer, _), Left(newBound: Addr.Bound)) =>
// if `result` is non-bound flush `newBound` immediately (buffer has been reset already)
val newResult = result match {
case _: Addr.Bound => result
case _ => newBound
}
// We propagate the metadata from `newBound` and replace the
// addresses with the merged set.
val newBuffer = newBound.copy(addrs = merge(buffer.addrs, newBound.addrs))
State(newResult, newBuffer, newBound)
// non-bound address
case (state, Left(nonBound)) =>
(state.result, nonBound) match {
// failure/pending: propagate stale state if we have a previous bound.
case (_: Addr.Bound, Addr.Failed(_) | Addr.Pending) => state
// This guarantees that `result` is never set to an [[Addr.Failed]].
case (_, Addr.Failed(_)) => state.copy(result = Addr.Neg)
case (_, Addr.Pending) => state.copy(result = Addr.Pending)
// All other non-bound state gets propagated immediately. The state is also reset here.
case (_, _) => State(nonBound, EmptyBound, EmptyBound)
}
// epoch turned – promote buffer.
case (State(_: Addr.Bound, buffer, last), Right(())) => State(buffer, last, last)
case (state, Right(())) => state
}.map {
case State(result, _, _) => result
}
}
/**
* Merge `next` with `prev` taking into account shard ids and weights, preferring
* `next` over `prev` when encountering duplicates. If shardIds are present eliminate
* duplicates using that, otherwise eliminate duplicates based on raw addresses. ShardId
* based de-duping is extremely important for observers working against partitioned clusters.
*/
private def merge(prev: Set[Address], next: Set[Address]): Set[Address] = {
var shards: Set[Int] = Set.empty
var inets: Set[InetSocketAddress] = Set.empty
// populate `shards` and `inets` with the data from `next`
val nextIter = next.iterator
while (nextIter.hasNext) {
nextIter.next() match {
case Address.Inet(inet, md) =>
inets = inets + inet
ZkMetadata.fromAddrMetadata(md) match {
case Some(ZkMetadata(Some(shardId), _)) =>
shards = shards + shardId
case _ => // nop
}
case _ => // nop
}
}
// the subset of `prev` to merge with `next`
val filteredPrev: Set[Address] = prev.filter {
case Address.Inet(inet, md) =>
ZkMetadata.fromAddrMetadata(md) match {
case Some(ZkMetadata(Some(shardId), _)) => !shards.contains(shardId)
case _ => !inets.contains(inet)
}
case _ => true
}
filteredPrev ++ next
}
def apply(va: Var[Addr], epoch: Epoch): Var[Addr] = {
Var.async[Addr](Addr.Pending) { updatableAddr =>
val addrOrEpoch: Event[Either[Addr, Unit]] =
if (epoch.period != Duration.Zero) {
va.changes.select(epoch.event)
} else {
// This is a special case for integration testing, we want to
// prevent the update dampening behavior by triggering
// epochs manually.
new Event[Either[Addr, Unit]] {
def register(s: Witness[Either[Addr, Unit]]): Closable = {
va.changes.respond { addr =>
s.notify(Left(addr))
s.notify(Right(()))
s.notify(Right(()))
}
}
}
}
coalesce(addrOrEpoch).register(Witness(updatableAddr))
}
}
}
|
twitter/finagle
|
finagle-serversets/src/main/scala/com/twitter/finagle/serverset2/Stabilizer.scala
|
Scala
|
apache-2.0
| 5,527
|
package net.arya.intern
/**
* Created by arya on 7/25/14.
*/
class InternTest extends org.scalatest.FunSuite {
import syntax._
test("two interned objects should be identical") {
val a = List(1,2,3).intern
val b = (1 :: 2 :: 3 :: Nil).intern
assert(a eq b)
}
test("internDeep interns Lists' insides") {
val a = List(1,2,3,4,5).internDeep
val b = List(99,2,3,4,5).internDeep
assert(a.tail eq b.tail)
}
}
|
refried/intern
|
src/test/scala/net/arya/intern/InternTest.scala
|
Scala
|
gpl-2.0
| 443
|
package com.sfxcode.nosql.mongo.gridfs
import java.io.OutputStream
import java.nio.{Buffer, ByteBuffer}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}
import com.typesafe.scalalogging.LazyLogging
import org.mongodb.scala.Observer
case class GridFSStreamObserver(outputStream: OutputStream) extends Observer[ByteBuffer] with LazyLogging {
val completed = new AtomicBoolean(false)
val resultLength = new AtomicLong(0)
override def onNext(buffer: ByteBuffer): Unit = {
val bytes = new Array[Byte](buffer.remaining())
resultLength.set(resultLength.get() + bytes.length)
buffer.get(bytes, 0, bytes.length)
buffer.asInstanceOf[Buffer].clear()
outputStream.write(bytes)
}
override def onError(e: Throwable): Unit = {
logger.error(e.getMessage, e)
outputStream.close()
resultLength.set(-1)
completed.set(true)
}
override def onComplete(): Unit = {
outputStream.close()
completed.set(true)
}
}
|
sfxcode/simple-mongo
|
src/main/scala/com/sfxcode/nosql/mongo/gridfs/GridFSStreamObserver.scala
|
Scala
|
apache-2.0
| 970
|
package org.bitcoins.core.protocol.ln
import org.bitcoins.core.config.NetworkParameters
import org.bitcoins.core.protocol.ln.LnParams._
import org.bitcoins.core.protocol.ln.currency.{LnCurrencyUnit, LnCurrencyUnits}
import org.bitcoins.core.util.Bech32HumanReadablePart
import scodec.bits.ByteVector
import scala.util.matching.Regex
import scala.util.{Failure, Success, Try}
sealed abstract class LnHumanReadablePart extends Bech32HumanReadablePart {
require(amount.isEmpty || amount.get.toBigInt > 0,
s"Invoice amount must be greater then 0, got $amount")
require(
amount.isEmpty || amount.get.toMSat <= LnPolicy.maxAmountMSat,
s"Invoice amount must not exceed ${LnPolicy.maxAmountMSat}, got ${amount.get.toMSat}")
def network: LnParams
def amount: Option[LnCurrencyUnit]
override lazy val chars: String = {
val amountEncoded = amount.map(_.toEncodedString).getOrElse("")
network.invoicePrefix + amountEncoded
}
lazy val bytes: ByteVector =
ByteVector.encodeAscii(chars) match {
case Left(exc) => throw exc
case Right(bytevec) => bytevec
}
override lazy val toString: String = chars
}
object LnHumanReadablePart {
/** Prefix for generating a LN invoice on the Bitcoin MainNet */
case class lnbc(override val amount: Option[LnCurrencyUnit])
extends LnHumanReadablePart {
override def network: LnParams = LnBitcoinMainNet
}
/** Prefix for generating a LN invoice on the Bitcoin TestNet3 */
case class lntb(override val amount: Option[LnCurrencyUnit])
extends LnHumanReadablePart {
override def network: LnParams = LnBitcoinTestNet
}
/** Prefix for genearting a LN invoice on the Bitcoin RegTest */
case class lnbcrt(override val amount: Option[LnCurrencyUnit])
extends LnHumanReadablePart {
def network: LnParams = LnBitcoinRegTest
}
/** Tries to construct a LN HRP with optional amount specified from the given string */
def apply(bech32: String): Try[LnHumanReadablePart] = fromString(bech32)
def apply(network: NetworkParameters): LnHumanReadablePart = {
val lnNetwork = LnParams.fromNetworkParameters(network)
LnHumanReadablePart.fromLnParams(lnNetwork)
}
def apply(
network: NetworkParameters,
amount: LnCurrencyUnit): LnHumanReadablePart = {
val lnNetwork = LnParams.fromNetworkParameters(network)
LnHumanReadablePart(lnNetwork, Some(amount))
}
def apply(network: LnParams): LnHumanReadablePart = {
fromLnParams(network)
}
/**
* Will return a [[org.bitcoins.core.protocol.ln.LnHumanReadablePart LnHumanReadablePart]]
* without a [[org.bitcoins.core.protocol.ln.currency.LnCurrencyUnit LnCurrencyUnit]] encoded in the invoice
*/
def fromLnParams(network: LnParams): LnHumanReadablePart = {
LnHumanReadablePart(network, None)
}
/**
* Will return a [[org.bitcoins.core.protocol.ln.LnHumanReadablePart LnHumanReadablePart]]
* with the provide [[org.bitcoins.core.protocol.ln.currency.LnCurrencyUnit LnCurrencyUnit]] encoded in the invoice
*/
def apply(
network: LnParams,
amount: Option[LnCurrencyUnit]): LnHumanReadablePart = {
fromParamsAmount(network, amount)
}
def fromParamsAmount(
network: LnParams,
amount: Option[LnCurrencyUnit]): LnHumanReadablePart = {
network match {
case LnParams.LnBitcoinMainNet => lnbc(amount)
case LnParams.LnBitcoinTestNet => lntb(amount)
case LnParams.LnBitcoinRegTest => lnbcrt(amount)
}
}
/**
* First two chars MUST be 'ln'
* Next chars must be the BIP173 currency prefixes. For more information, see
* [[https://github.com/lightningnetwork/lightning-rfc/blob/master/11-payment-encoding.md#human-readable-part BOLT11]]
* and
* [[https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki#Specification BIP173]]
*/
def fromString(bech32: String): Try[LnHumanReadablePart] = {
//Select all of the letters, until we hit a number, as the network
val networkPattern: Regex = "^[a-z]*".r
val networkStringOpt = networkPattern.findFirstIn(bech32)
val lnParamsOpt = networkStringOpt.flatMap(LnParams.fromPrefixString)
if (lnParamsOpt.isEmpty) {
Failure(
new IllegalArgumentException(
s"Could not parse a valid network prefix, got $bech32"))
} else {
val lnParams = lnParamsOpt.get
val prefixSize = lnParams.invoicePrefix.length
val amountString = bech32.slice(prefixSize, bech32.length)
val amount = LnCurrencyUnits.fromEncodedString(amountString).toOption
//If we are able to parse something as an amount, but are unable to convert it to a LnCurrencyUnit, we should fail.
if (amount.isEmpty && !amountString.isEmpty) {
Failure(
new IllegalArgumentException(
s"Parsed an amount, " +
s"but could not convert to a valid currency, got: $amountString"))
} else {
Success(LnHumanReadablePart(lnParams, amount))
}
}
}
}
|
bitcoin-s/bitcoin-s-core
|
core/src/main/scala/org/bitcoins/core/protocol/ln/LnHumanReadablePart.scala
|
Scala
|
mit
| 5,014
|
package org.openmole.plugin.task.timing
import org.openmole.core.highlight.HighLight
import org.openmole.core.pluginregistry.PluginRegistry
import org.osgi.framework.{ BundleActivator, BundleContext }
class Activator extends BundleActivator {
override def stop(context: BundleContext): Unit =
PluginRegistry.unregister(this)
override def start(context: BundleContext): Unit = {
import org.openmole.core.highlight.HighLight._
val keyWords: Vector[HighLight] =
Vector(
TaskHighLight(objectName(TimingTask))
)
PluginRegistry.register(this, Vector(this.getClass.getPackage), highLight = keyWords)
}
}
|
openmole/openmole
|
openmole/plugins/org.openmole.plugin.task.timing/src/main/scala/org/openmole/plugin/task/timing/Activator.scala
|
Scala
|
agpl-3.0
| 645
|
package pimpathon.frills
import pimpathon.genTraversableLike.GTLGT
import scala.collection.{GenTraversable, GenTraversableLike}
import scala.collection.immutable.List
import scalaz.{NonEmptyList, \\/}
import pimpathon.frills.genTraversableLike.{GenTraversableLikeFrillsMixin, GenTraversableLikeOfDisjunctionFrillsMixin}
import pimpathon.list._
object list {
implicit class ListFrills[A](self: List[A]) extends GenTraversableLikeFrillsMixin[A, List] {
def toNel: Option[NonEmptyList[A]] = self.unconsC(None, head ⇒ tail ⇒ Some(NonEmptyList(head, tail: _*)))
protected def gtl: GTLGT[A] = self
protected def cc: List[A] = self
}
implicit class ListOfDisjunctionsFrills[L, R](self: List[L \\/ R]) extends GenTraversableLikeOfDisjunctionFrillsMixin[L, R] {
protected def gtl: GenTraversableLike[L \\/ R, GenTraversable[L \\/ R]] = self
}
}
|
raymanoz/pimpathon
|
src/main/scala/pimpathon/frills/list.scala
|
Scala
|
apache-2.0
| 869
|
package info.hupel.isabelle.api
import scala.collection.mutable.ListBuffer
// FIXME code mostly copied from xml.scala and yxml.scala
object XML {
private val X = '\u0005'
private val Y = '\u0006'
private def prettyEscape(string: String) = string
.replace("&", "&")
.replace("<", "<")
.replace(">", ">")
.replace("\"", """)
.replace("'", "'")
sealed abstract class Tree {
def toYXML: String = bodyToYXML(List(this))
def pretty(indent: Int): String
final def pretty: String = pretty(0)
def compact: String
def stripMarkup: String
}
final case class Elem(markup: Markup, body: Body) extends Tree {
def pretty(indent: Int) = {
val attrs = (if (markup._2.isEmpty) "" else " ") + markup._2.map { case (k, v) => s"$k='${prettyEscape(v)}'" }.mkString(" ")
if (body.isEmpty) {
" " * indent + "<" + markup._1 + attrs + " />"
}
else {
val head = " " * indent + "<" + markup._1 + attrs + ">"
val rows = body.map(_.pretty(indent + 2)).mkString("\n", "\n", "\n")
val foot = " " * indent + "</" + markup._1 + ">"
head + rows + foot
}
}
def compact = {
val (name, attrs) = markup
val compactAttrs = attrs.map { case (k, v) => s"""$k="$v"""" }.mkString(" ")
s"<$name $compactAttrs>${body.map(_.compact).mkString("")}</$name>"
}
def stripMarkup = body.map(_.stripMarkup).mkString(" ")
}
final case class Text(content: String) extends Tree {
def pretty(indent: Int) =
" " * indent + prettyEscape(content)
def compact = content
def stripMarkup = content
}
type Body = List[Tree]
@inline
def elem(markup: Markup, body: Body): Tree = Elem(markup, body)
@inline
def text(content: String): Tree = Text(content)
private def parse_attrib(source: CharSequence) = {
val s = source.toString
val i = s.indexOf('=')
if (i <= 0) sys.error("bad attribute")
(s.substring(0, i), s.substring(i + 1))
}
def fromYXML(source: String): Tree = bodyFromYXML(source) match {
case List(result) => result
case Nil => Text("")
case _ => sys.error("multiple results")
}
def bodyFromYXML(source: String): Body = {
def buffer(): ListBuffer[Tree] = new ListBuffer[Tree]
var stack: List[(Markup, ListBuffer[Tree])] = List((("", Nil), buffer()))
def add(x: Tree) = (stack: @unchecked) match {
case ((_, body) :: _) => body += x; ()
}
def push(name: String, atts: List[(String, String)])
{
if (name == "") sys.error("bad element")
else stack = ((name, atts), buffer()) :: stack
}
def pop()
{
(stack: @unchecked) match {
case ((("", Nil), _) :: _) => sys.error("unbalanced element")
case ((markup, body) :: pending) =>
stack = pending
add(Elem(markup, body.toList))
}
}
for (chunk <- source.split(X) if chunk.length != 0) {
if (chunk.length == 1 && chunk.charAt(0) == Y) pop()
else {
chunk.split(Y).toList match {
case ch :: name :: atts if ch.length == 0 =>
push(name.toString, atts.map(parse_attrib))
case txts => for (txt <- txts) add(Text(txt.toString))
}
}
}
(stack: @unchecked) match {
case List((("", Nil), body)) => body.toList
case ((name, _), _) :: _ => sys.error("unbalanced element")
}
}
def bodyToYXML(body: Body): String = {
val s = new StringBuilder
def attrib(p: (String, String)) = { s += Y; s ++= p._1; s += '='; s ++= p._2; () }
def tree(t: Tree): Unit =
t match {
case Elem((name, atts), ts) =>
s += X; s += Y; s ++= name; atts.foreach(attrib); s += X
ts.foreach(tree)
s += X; s += Y; s += X
()
case Text(text) =>
s ++= text
()
}
body.foreach(tree)
s.toString
}
}
|
larsrh/libisabelle
|
modules/pide-interface/src/main/scala/XML.scala
|
Scala
|
apache-2.0
| 3,916
|
package test;
object Main extends App {
class Global {
case class Template(x : Int, y : Int) {
Console.println("outer: " + Global.this);
}
}
trait Contexts { self: Analyzer =>
val xxx : global.Template = {
assert(globalInit0 != null);
globalInit0.Template(10, 20);
}
}
abstract class Analyzer extends Contexts {
type Global <: Main.Global;
final val global : Global = globalInit;
def globalInit : Global;
final def globalInit0 = globalInit.asInstanceOf[global.type];
}
object global0 extends Global {
object analyzer extends Analyzer {
type Global = global0.type;
override def globalInit = global0;
}
}
Console.println(global0.analyzer.xxx);
}
|
folone/dotty
|
tests/untried/neg/t783.scala
|
Scala
|
bsd-3-clause
| 734
|
/*******************************************************************************
Copyright (c) 2012-2013, S-Core, KAIST.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
***************************************************************************** */
package kr.ac.kaist.jsaf.tests
import junit.framework.Test
import junit.framework.TestSuite
import kr.ac.kaist.jsaf.compiler.Predefined
import kr.ac.kaist.jsaf.Shell
import kr.ac.kaist.jsaf.ShellParameters
import kr.ac.kaist.jsaf.analysis.typing.AddressManager
// class definition for eclipse JUnit runner
class TypingTAJSMicroJUTest
object TypingTAJSMicroJUTest {
val TESTS_DIR = "tests/typing_tests/TAJS_micro"
Shell.pred = new Predefined(new ShellParameters())
val EXCLUDE = Set(
"XXX",
"NYI"
)
def main(args: String*) = junit.textui.TestRunner.run(suite)
def suite(): Test = {
// Initialize AddressManager
AddressManager.reset()
val suite = new TestSuite("Typing TAJS Micro Test")
val testcases = collectTestcase(TESTS_DIR)
for (tc <- testcases) {
//$JUnit-BEGIN$
suite.addTest(new SemanticsTest(TESTS_DIR, tc, "dense"))
//$JUnit-END$
}
suite
}
private def collectTestcase(dirname: String) = {
val dir = FileTests.directoryAsFile(dirname)
val filtered = dir.list.toSeq.filter(fname =>
fname.endsWith(".js") &&
!EXCLUDE.exists(prefix => fname.startsWith(prefix)))
filtered.sorted
}
}
|
darkrsw/safe
|
src/main/scala/kr/ac/kaist/jsaf/tests/TypingTAJSMicroJUTest.scala
|
Scala
|
bsd-3-clause
| 1,539
|
/*
* Copyright (c) 2015, 2016 Alexey Kuzin <amkuzink@gmail.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
package sourepoheatmap.gui
import scalafx.geometry.{Insets, Pos}
import scalafx.scene.Scene
import scalafx.scene.control.{TextArea, Label}
import scalafx.scene.layout.VBox
import scalafx.scene.paint.Color
import scalafx.stage.{StageStyle, Window, Stage}
/** About stage contains information about creators.
*
* @author Alexey Kuzin <amkuzink@gmail.com>
*/
class AboutStage(owner: Window) extends Stage { about =>
private val mAppNameLabel = new Label("Sourepo Heatmap") {
alignment = Pos.BaselineLeft
padding = Insets(10)
style = "-fx-font-family: 'sans-serif';" +
"-fx-font-size: 24px"
}
private val mCreatorsTextArea = new TextArea {
editable = false
maxWidth = 340
maxHeight = 165
text = "Copyright (C) 2015. All rights reserved.\\n" +
"Licensed under 3-clause BSD License.\\n\\n" +
"Creator: Alexey Kuzin (amkuzink@gmail.com)"
}
initOwner(owner)
initStyle(StageStyle.UNDECORATED)
title = "About Sourepo Heatmap"
resizable = false
minWidth = 340
minHeight = 215
scene = new Scene {
fill = Color.Azure
content = new VBox {
alignmentInParent = Pos.TopLeft
style = "-fx-border-color: black;" +
"-fx-border-width: 2px;"
children = List(mAppNameLabel, mCreatorsTextArea)
}
}
focused.onChange((_, _, newValue) => {
if (!newValue) about.close()
})
}
|
leviathan941/sourepoheatmap
|
guiapp/src/main/scala/sourepoheatmap/gui/AboutStage.scala
|
Scala
|
bsd-3-clause
| 2,993
|
package sbtdocker
object Instructions {
trait Instruction {
this: Product =>
def arguments = productIterator.mkString(" ")
def instructionName = productPrefix.toUpperCase
override def toString = s"$instructionName $arguments"
@deprecated("Use toString instead.", "0.4.0")
def toInstructionString = toString
}
private def escapeQuotationMarks(str: String) = str.replace("\\"", "\\\\\\"")
private def escapeWhitespaces(str: String) = str.replace("\\n", "\\\\n").replace("\\t", "\\\\t")
trait SeqArguments {
this: Instruction =>
def args: Seq[String]
def shellFormat: Boolean
private def execArguments = args.map(escapeQuotationMarks).map(escapeWhitespaces).mkString("[\\"", "\\", \\"", "\\"]")
private def wrapIfWhitespaces(argument: String) = {
if (argument.exists(_.isWhitespace)) {
'"' + argument + '"'
} else {
argument
}
}
private def shellArguments = args.map(escapeQuotationMarks).map(escapeWhitespaces).map(wrapIfWhitespaces).mkString(" ")
override def arguments = if (shellFormat) shellArguments else execArguments
}
case class From(image: String) extends Instruction
case class Maintainer(name: String) extends Instruction
object Run {
def shell(args: String*) = new Run(true, args: _*)
def apply(args: String*) = new Run(false, args: _*)
}
/**
* RUN instruction.
* @param shellFormat true if the command should be executed in a shell
* @param args command
*/
case class Run(shellFormat: Boolean, args: String*) extends Instruction with SeqArguments
object Cmd {
def shell(args: String*) = new Cmd(true, args: _*)
def apply(args: String*) = new Cmd(false, args: _*)
}
/**
* CMD instruction.
* @param shellFormat true if the command should be executed in a shell
* @param args command
*/
case class Cmd(shellFormat: Boolean, args: String*) extends Instruction with SeqArguments
case class Expose(ports: Int*) extends Instruction {
override def arguments = ports.mkString(" ")
}
case class Env(key: String, value: String) extends Instruction
case class Add(from: String, to: String) extends Instruction
case class Copy(from: String, to: String) extends Instruction
object EntryPoint {
def shell(args: String*) = new EntryPoint(true, args: _*)
def apply(args: String*) = new EntryPoint(false, args: _*)
}
/**
* ENTRYPOINT instruction.
* @param shellFormat true if the command should be executed in a shell
* @param args command
*/
case class EntryPoint(shellFormat: Boolean, args: String*) extends Instruction with SeqArguments
case class Volume(mountPoint: String) extends Instruction
case class User(username: String) extends Instruction
case class WorkDir(path: String) extends Instruction
case class OnBuild(instruction: Instruction) extends Instruction {
override def arguments = instruction.toString
}
}
|
Banno/sbt-docker
|
src/main/scala/sbtdocker/Instructions.scala
|
Scala
|
mit
| 2,951
|
package skuber
import akka.stream.KillSwitches
import akka.stream.scaladsl.{Keep, Sink}
import org.scalatest.Matchers
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.time.{Seconds, Span}
import skuber.apps.v1.{Deployment, DeploymentList}
import scala.concurrent.duration._
import scala.concurrent.Await
import scala.language.postfixOps
class WatchContinuouslySpec extends K8SFixture with Eventually with Matchers with ScalaFutures {
implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = Span(200, Seconds), interval = Span(5, Seconds))
behavior of "WatchContinuously"
it should "continuously watch changes on a resource - deployments" in { k8s =>
import skuber.api.client.EventType
val deploymentOneName = java.util.UUID.randomUUID().toString
val deploymentTwoName = java.util.UUID.randomUUID().toString
val deploymentOne = getNginxDeployment(deploymentOneName, "1.7.9")
val deploymentTwo = getNginxDeployment(deploymentTwoName, "1.7.9")
val stream = k8s.list[DeploymentList].map { l =>
k8s.watchAllContinuously[Deployment](Some(l.resourceVersion))
.viaMat(KillSwitches.single)(Keep.right)
.filter(event => event._object.name == deploymentOneName || event._object.name == deploymentTwoName)
.filter(event => event._type == EventType.ADDED || event._type == EventType.DELETED)
.toMat(Sink.collection)(Keep.both)
.run()
}
// Wait for watch to be confirmed before performing the actions that create new events to be watched
Await.result(stream, 5.seconds)
//Create first deployment and delete it.
k8s.create(deploymentOne).futureValue.name shouldBe deploymentOneName
eventually {
k8s.get[Deployment](deploymentOneName).futureValue.status.get.availableReplicas shouldBe 1
}
k8s.delete[Deployment](deploymentOneName).futureValue
/*
* Request times for request is defaulted to 30 seconds.
* The idle timeout is also defaulted to 60 seconds.
* This will ensure multiple requests are performed by
* the source including empty responses
*/
pause(62.seconds)
//Create second deployment and delete it.
k8s.create(deploymentTwo).futureValue.name shouldBe deploymentTwoName
eventually {
k8s.get[Deployment](deploymentTwoName).futureValue.status.get.availableReplicas shouldBe 1
}
k8s.delete[Deployment](deploymentTwoName).futureValue
// cleanup
stream.map { killSwitch =>
killSwitch._1.shutdown()
}
stream.futureValue._2.futureValue.toList.map { d =>
(d._type, d._object.name)
} shouldBe List(
(EventType.ADDED, deploymentOneName),
(EventType.DELETED, deploymentOneName),
(EventType.ADDED, deploymentTwoName),
(EventType.DELETED, deploymentTwoName)
)
}
it should "continuously watch changes on a named resource obj from the beginning - deployment" in { k8s =>
import skuber.api.client.EventType
val deploymentName = java.util.UUID.randomUUID().toString
val deployment = getNginxDeployment(deploymentName, "1.7.9")
k8s.create(deployment).futureValue.name shouldBe deploymentName
eventually {
k8s.get[Deployment](deploymentName).futureValue.status.get.availableReplicas shouldBe 1
}
val stream = k8s.get[Deployment](deploymentName).map { d =>
k8s.watchContinuously[Deployment](d)
.viaMat(KillSwitches.single)(Keep.right)
.filter(event => event._object.name == deploymentName)
.filter(event => event._type == EventType.ADDED || event._type == EventType.DELETED)
.toMat(Sink.collection)(Keep.both)
.run()
}
/*
* Request times for request is defaulted to 30 seconds.
* The idle timeout is also defaulted to 60 seconds.
* This will ensure multiple requests are performed by
* the source including empty responses
*/
pause(62.seconds)
k8s.delete[Deployment](deploymentName).futureValue
// cleanup
stream.map { killSwitch =>
killSwitch._1.shutdown()
}
val f1 = stream.futureValue
val f2 = f1._2.futureValue
f2.toList.map { d =>
(d._type, d._object.name)
} shouldBe List(
(EventType.ADDED, deploymentName),
(EventType.DELETED, deploymentName)
)
}
it should "continuously watch changes on a named resource from the beginning - deployment" in { k8s =>
import skuber.api.client.EventType
val deploymentName = java.util.UUID.randomUUID().toString
val deployment = getNginxDeployment(deploymentName, "1.7.9")
k8s.create(deployment).futureValue.name shouldBe deploymentName
eventually {
k8s.get[Deployment](deploymentName).futureValue.status.get.availableReplicas shouldBe 1
}
val stream = k8s.get[Deployment](deploymentName).map { d =>
k8s.watchContinuously[Deployment](deploymentName, None)
.viaMat(KillSwitches.single)(Keep.right)
.filter(event => event._object.name == deploymentName)
.filter(event => event._type == EventType.ADDED || event._type == EventType.DELETED)
.toMat(Sink.collection)(Keep.both)
.run()
}
/*
* Request times for request is defaulted to 30 seconds.
* This will ensure multiple requests are performed by
* the source including empty responses
*/
pause(62.seconds)
k8s.delete[Deployment](deploymentName).futureValue
// cleanup
stream.map { killSwitch =>
killSwitch._1.shutdown()
}
stream.futureValue._2.futureValue.toList.map { d =>
(d._type, d._object.name)
} shouldBe List(
(EventType.ADDED, deploymentName),
(EventType.DELETED, deploymentName)
)
}
it should "continuously watch changes on a named resource from a point in time - deployment" in { k8s =>
import skuber.api.client.EventType
val deploymentName = java.util.UUID.randomUUID().toString
val deployment = getNginxDeployment(deploymentName, "1.7.9")
k8s.create(deployment).futureValue.name shouldBe deploymentName
eventually {
k8s.get[Deployment](deploymentName).futureValue.status.get.availableReplicas shouldBe 1
}
val stream = k8s.get[Deployment](deploymentName).map { d =>
k8s.watchContinuously[Deployment](deploymentName, Some(d.resourceVersion))
.viaMat(KillSwitches.single)(Keep.right)
.filter(event => event._object.name == deploymentName)
.filter(event => event._type == EventType.ADDED || event._type == EventType.DELETED)
.toMat(Sink.collection)(Keep.both)
.run()
}
/*
* Request times for request is defaulted to 30 seconds.
* The idle timeout is also defaulted to 60 seconds.
* This will ensure multiple requests are performed by
* the source including empty responses
*/
pause(62.seconds)
k8s.delete[Deployment](deploymentName).futureValue
// cleanup
stream.map { killSwitch =>
killSwitch._1.shutdown()
}
stream.futureValue._2.futureValue.toList.map { d =>
(d._type, d._object.name)
} shouldBe List(
(EventType.DELETED, deploymentName)
)
}
def pause(length: Duration): Unit ={
Thread.sleep(length.toMillis)
}
def getNginxDeployment(name: String, version: String): Deployment = {
import LabelSelector.dsl._
val nginxContainer = getNginxContainer(version)
val nginxTemplate = Pod.Template.Spec.named("nginx").addContainer(nginxContainer).addLabel("app" -> "nginx")
Deployment(name).withTemplate(nginxTemplate).withLabelSelector("app" is "nginx")
}
def getNginxContainer(version: String): Container = Container(name = "nginx", image = "nginx:" + version).exposePort(80)
}
|
doriordan/skuber
|
client/src/it/scala/skuber/WatchContinuouslySpec.scala
|
Scala
|
apache-2.0
| 7,714
|
/*
* Copyright 2014 The Guardian
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package lib
import org.kohsuke.github.{GHOrganization, GHUser, GHIssue}
import collection.convert.wrapAll._
import Implicits._
import org.joda.time.DateTime
sealed trait StateUpdate {
val issueCanBeClosed: Boolean
}
case object UserHasLeftOrg extends StateUpdate {
override val issueCanBeClosed = true
}
case class MemberUserUpdate(oldProblems: Set[AccountRequirement],
currentProblems: Set[AccountRequirement],
terminationDate: DateTime,
orgMembershipWillBeConcealed: Boolean,
terminationWarning: Option[TerminationSchedule]) extends StateUpdate {
val isChange = oldProblems != currentProblems
val issueCanBeClosed = currentProblems.isEmpty
val userShouldReceiveFinalWarning = terminationWarning.isDefined
val worthyOfComment = issueCanBeClosed || isChange || orgMembershipWillBeConcealed || userShouldReceiveFinalWarning
val fixedRequirements = oldProblems -- currentProblems
}
case class MembershipTermination(problems: Set[AccountRequirement]) extends StateUpdate {
override val issueCanBeClosed = true
}
|
guardian/gu-who
|
app/lib/StateUpdate.scala
|
Scala
|
apache-2.0
| 1,743
|
package notebook.kernel
import java.io.{StringWriter, PrintWriter, ByteArrayOutputStream}
import java.net.{URLDecoder, JarURLConnection}
import java.util.ArrayList
import scala.collection.JavaConversions
import scala.collection.JavaConversions._
import scala.xml.{NodeSeq, Text}
import scala.util.control.NonFatal
import scala.util.Try
import tools.nsc.Settings
import tools.nsc.interpreter._
import tools.nsc.interpreter.Completion.{Candidates, ScalaCompleter}
import tools.nsc.interpreter.Results.{Incomplete => ReplIncomplete, Success => ReplSuccess, Error}
import tools.jline.console.completer.{ArgumentCompleter, Completer}
import org.apache.spark.repl._
import notebook.front.Widget
import notebook.util.Match
import notebook.kernel._
import notebook.kernel.repl.common._
class Repl(val compilerOpts: List[String], val jars:List[String]=Nil) extends ReplT {
val LOG = org.slf4j.LoggerFactory.getLogger(classOf[Repl])
def this() = this(Nil)
private lazy val stdoutBytes = new ReplOutputStream
private lazy val stdout = new PrintWriter(stdoutBytes)
private var loop:HackSparkILoop = _
private var _classServerUri:Option[String] = None
private var _initFinished: Boolean = false
private var _evalsUntilInitFinished: Int = 0
private var _needsDropOnReplay: Boolean = false
def setInitFinished(): Unit = {
_initFinished = true
_needsDropOnReplay = _evalsUntilInitFinished > 0
}
def classServerUri: Option[String] = {
_classServerUri
}
val interp:org.apache.spark.repl.SparkIMain = {
val settings = new Settings
settings.embeddedDefaults[Repl]
if (!compilerOpts.isEmpty) settings.processArguments(compilerOpts, false)
// fix for #52
settings.usejavacp.value = false
// fix for #52
val urls: IndexedSeq[String] = {
import java.net.URLClassLoader
import java.io.File
def urls(cl:ClassLoader, acc:IndexedSeq[String]=IndexedSeq.empty):IndexedSeq[String] = {
if (cl != null) {
val us = if (!cl.isInstanceOf[URLClassLoader]) {
acc
} else {
acc ++ (cl.asInstanceOf[URLClassLoader].getURLs map { u =>
val f = new File(u.getFile)
URLDecoder.decode(f.getAbsolutePath, "UTF8")
})
}
urls(cl.getParent, us)
} else {
acc
}
}
val loader = getClass.getClassLoader
val gurls = urls(loader).distinct//.filter(!_.contains("logback-classic"))//.filter(!_.contains("sbt/"))
gurls
}
val classpath = urls// map {_.toString}
settings.classpath.value = classpath.distinct.mkString(java.io.File.pathSeparator)
//bootclasspath → settings.classpath.isDefault = false → settings.classpath is used
settings.bootclasspath.value += scala.tools.util.PathResolver.Environment.javaBootClassPath
settings.bootclasspath.value += java.io.File.pathSeparator + settings.classpath.value
// LOG the classpath
// debug the classpath → settings.Ylogcp.value = true
//val i = new HackIMain(settings, stdout)
loop = new HackSparkILoop(stdout)
loop.addCps(jars)
loop.process(settings)
val i = {
val l:HackSparkILoop = loop.asInstanceOf[HackSparkILoop]
l.interpreter
}
//i.initializeSynchronous()
_classServerUri = Some(i.classServerUri)
i.asInstanceOf[org.apache.spark.repl.SparkIMain]
}
private lazy val completion = {
//new JLineCompletion(interp)
new SparkJLineCompletion(interp)
}
private def scalaToJline(tc: ScalaCompleter): Completer = new Completer {
def complete(_buf: String, cursor: Int, candidates: JList[CharSequence]): Int = {
val buf = if (_buf == null) "" else _buf
val Candidates(newCursor, newCandidates) = tc.complete(buf, cursor)
newCandidates foreach (candidates add _)
newCursor
}
}
private lazy val argCompletor = {
val arg = new ArgumentCompleter(new JLineDelimiter, scalaToJline(completion.completer()))
// turns out this is super important a line
arg.setStrict(false)
arg
}
private lazy val stringCompletor = StringCompletorResolver.completor
private def getCompletions(line: String, cursorPosition: Int) = {
val candidates = new ArrayList[CharSequence]()
argCompletor.complete(line, cursorPosition, candidates)
candidates map { _.toString } toList
}
private def listDefinedTerms(request: interp.Request): List[NameDefinition] = {
request.handlers.flatMap { h =>
val maybeTerm = h.definesTerm.map(_.encoded)
val maybeType = h.definesType.map(_.encoded)
val references = h.referencedNames.toList.map(_.encoded)
(maybeTerm, maybeType) match {
case (Some(term), _) =>
val termType = getTypeNameOfTerm(term).getOrElse("<unknown>")
Some(TermDefinition(term, termType, references))
case (_, Some(tpe)) =>
Some(TypeDefinition(tpe, "type", references))
case _ => None
}
}
}
def getTypeNameOfTerm(termName: String): Option[String] = {
val tpe = try {
interp.typeOfTerm(termName).toString
} catch {
case exc: RuntimeException => println("Unable to get symbol type", exc); "<notype>"
}
tpe match {
case "<notype>" => // "<notype>" can be also returned by typeOfTerm
interp.classOfTerm(termName).map(_.getName)
case _ =>
// remove some crap
Some(
tpe
.replace("iwC$", "")
.replaceAll("^\\\\(\\\\)" , "") // 2.11 return types prefixed, like `()Person`
)
}
}
/**
* Evaluates the given code. Swaps out the `println` OutputStream with a version that
* invokes the given `onPrintln` callback everytime the given code somehow invokes a
* `println`.
*
* Uses compile-time implicits to choose a renderer. If a renderer cannot be found,
* then just uses `toString` on result.
*
* I don't think this is thread-safe (largely because I don't think the underlying
* IMain is thread-safe), it certainly isn't designed that way.
*
* @param code
* @param onPrintln
* @return result and a copy of the stdout buffer during the duration of the execution
*/
def evaluate( code: String,
onPrintln: String => Unit = _ => (),
onNameDefinion: NameDefinition => Unit = _ => ()
): (EvaluationResult, String) = {
stdout.flush()
stdoutBytes.reset()
// capture stdout if the code the user wrote was a println, for example
stdoutBytes.aop = onPrintln
val res = Console.withOut(stdoutBytes) {
interp.interpret(code)
}
stdout.flush()
stdoutBytes.aop = _ => ()
val result = res match {
case ReplSuccess =>
val request:interp.Request = interp.getClass.getMethods.find(_.getName == "prevRequestList").map(_.invoke(interp)).get.asInstanceOf[List[interp.Request]].last
//val request:Request = interp.prevRequestList.last
listDefinedTerms(request).foreach(onNameDefinion)
val lastHandler/*: interp.memberHandlers.MemberHandler*/ = request.handlers.last
try {
val lastStatementReturnsValue = listDefinedTerms(request).exists(_.name.matches("res[0-9]+"))
val evalValue = if (lastHandler.definesValue && lastStatementReturnsValue) {
// This is true for def's with no parameters, not sure that executing/outputting this is desirable
// CY: So for whatever reason, line.evalValue attemps to call the $eval method
// on the class...a method that does not exist. Not sure if this is a bug in the
// REPL or some artifact of how we are calling it.
// RH: The above comment may be going stale given the shenanigans I'm pulling below.
val line = request.lineRep
val renderObjectCode =
"""object $rendered {
| %s
| val rendered: _root_.notebook.front.Widget = { %s }
| %s
|}""".stripMargin.format(
request.importsPreamble,
request.fullPath(lastHandler.definesTerm.get),
request.importsTrailer
)
if (line.compile(renderObjectCode)) {
try {
val classLoader = interp.getClass.getMethods.find(_.getName == "classLoader").map(_.invoke(interp)).get.asInstanceOf[java.lang.ClassLoader]
val renderedClass2 = Class.forName(
line.pathTo("$rendered")+"$", true, classLoader
)
val o = renderedClass2.getDeclaredField(interp.global.nme.MODULE_INSTANCE_FIELD.toString).get()
def iws(o:Any):NodeSeq = {
val iw = o.getClass.getMethods.find(_.getName == "$iw")
val o2 = iw map { m =>
m.invoke(o)
}
o2 match {
case Some(o3) =>
iws(o3)
case None =>
val r = o.getClass.getDeclaredMethod("rendered").invoke(o)
val h = r.asInstanceOf[Widget].toHtml
h
}
}
iws(o)
} catch {
case NonFatal(e) =>
e.printStackTrace
LOG.error("Ooops, exception in the cell", e)
<span style="color:red;">Ooops, exception in the cell: {e.getMessage}</span>
}
} else {
// a line like println(...) is technically a val, but returns null for some reason
// so wrap it in an option in case that happens...
Option(line.call("$result")) map { result => Text(try { result.toString } catch { case NonFatal(e) => "Fail to `toString` the result: " + e.getMessage }) } getOrElse NodeSeq.Empty
}
} else {
NodeSeq.Empty
}
Success(evalValue)
}
catch {
case NonFatal(e) =>
val ex = new StringWriter()
e.printStackTrace(new PrintWriter(ex))
Failure(ex.toString)
}
case ReplIncomplete => Incomplete
case Error => Failure(stdoutBytes.toString)
}
if ( !_initFinished ) {
_evalsUntilInitFinished = _evalsUntilInitFinished + 1
}
(result, stdoutBytes.toString)
}
def addCp(newJars:List[String]) = {
val requests = interp.getClass.getMethods.find(_.getName == "prevRequestList").map(_.invoke(interp)).get.asInstanceOf[List[interp.Request]]
var prevCode = requests.map(_.originalLine).drop( _evalsUntilInitFinished )
interp.close() // this will close the repl class server, which is needed in order to reuse `-Dspark.replClassServer.port`!
val r = new Repl(compilerOpts, newJars:::jars)
(r, () => prevCode foreach (c => r.evaluate(c, _ => ())))
}
def complete(line: String, cursorPosition: Int): (String, Seq[Match]) = {
def literalCompletion(arg: String) = {
val LiteralReg = """.*"([\\w/]+)""".r
arg match {
case LiteralReg(literal) => Some(literal)
case _ => None
}
}
// CY: Don't ask to explain why this works. Look at JLineCompletion.JLineTabCompletion.complete.mkDotted
// The "regularCompletion" path is the only path that is (likely) to succeed
// so we want access to that parsed version to pull out the part that was "matched"...
// ...just...trust me.
val delim = argCompletor.getDelimiter
val list = delim.delimit(line, cursorPosition)
val bufferPassedToCompletion = list.getCursorArgument
val actCursorPosition = list.getArgumentPosition
val parsed = Parsed.dotted(bufferPassedToCompletion, actCursorPosition) // withVerbosity verbosity
val matchedText = bufferPassedToCompletion.takeRight(actCursorPosition - parsed.position)
literalCompletion(bufferPassedToCompletion) match {
case Some(literal) =>
// strip any leading quotes
stringCompletor.complete(literal)
case None =>
val candidates = getCompletions(line, cursorPosition)
(matchedText, if (candidates.size > 0 && candidates.head.isEmpty) {
List()
} else {
candidates.map(Match(_))
})
}
}
def objectInfo(line: String, position:Int): Seq[String] = {
// CY: The REPL is stateful -- it isn't until you ask to complete
// the thing twice does it give you the method signature (i.e. you
// hit tab twice). So we simulate that here... (nutty, I know)
getCompletions(line, position)
getCompletions(line, position)
}
def sparkContextAvailable: Boolean = {
interp.allImportedNames.exists(_.toString == "sparkContext")
}
def stop(): Unit = {
interp.close()
}
}
|
deanwampler/spark-notebook
|
modules/spark/src/main/scala_2.10/spark-last/notebook/kernel/Repl.scala
|
Scala
|
apache-2.0
| 12,840
|
package es.weso.computex
import org.rogach.scallop.Scallop
import com.typesafe.config.ConfigFactory
import org.rogach.scallop.ScallopConf
import org.rogach.scallop.exceptions.Help
import org.slf4j.LoggerFactory
import com.hp.hpl.jena.rdf.model.ModelFactory
import com.hp.hpl.jena.util.FileManager
import com.hp.hpl.jena.rdf.model.Model
import scala.io.Source
import es.weso.utils.JenaUtils._
import com.hp.hpl.jena.query.ResultSet
import scala.collection.mutable.ArrayBuffer
import play.api.libs.json._
import java.io.File
import java.io.BufferedWriter
import java.io.FileWriter
import java.io.FileOutputStream
import com.hp.hpl.jena.rdf.model.SimpleSelector
import com.hp.hpl.jena.rdf.model.RDFNode
import com.hp.hpl.jena.rdf.model.ResourceFactory
import com.hp.hpl.jena.rdf.model.Resource
import com.hp.hpl.jena.rdf.model.Literal
import com.hp.hpl.jena.rdf.model.Property
import PREFIXES._
import scala.collection.JavaConverters
class AddDatasetsOpts(arguments: Array[String],
onError: (Throwable, Scallop) => Nothing
) extends ScallopConf(arguments) {
banner("""| Generate Computation Datasets
| Options:
|""".stripMargin)
footer("Enjoy!")
version("0.1")
val fileName = opt[String]("file",
required=true,
descr = "Turtle file")
val output = opt[String]("out",
descr = "Output file")
val version = opt[Boolean]("version",
noshort = true,
descr = "Print version")
val help = opt[Boolean]("help",
noshort = true,
descr = "Show this message")
override protected def onError(e: Throwable) = onError(e, builder)
}
object AddDatasets extends App {
def imputedDatasets(m:Model) : Model = {
val newModel = ModelFactory.createDefaultModel()
val iter = m.listSubjectsWithProperty(rdf_type,qb_DataSet)
while (iter.hasNext) {
val dataset = iter.nextResource()
val newDataSet = newModel.createResource()
newModel.add(newDataSet,rdf_type,qb_DataSet)
val computation = newModel.createResource
newModel.add(computation,rdf_type,cex_ImputeDataSet)
newModel.add(computation,cex_method,cex_AvgGrowth2Missing)
newModel.add(computation,cex_method,cex_MeanBetweenMissing)
newModel.add(computation,cex_method,cex_CopyRaw)
newModel.add(computation,cex_dataSet,dataset)
newModel.add(newDataSet,cex_computation,computation)
newModel.add(newDataSet,sdmxAttribute_unitMeasure,dbpedia_Year)
newModel.add(newDataSet,qb_structure,wf_onto_DSD)
val iterSlices = m.listStatements(dataset,qb_slice,null : RDFNode)
while (iterSlices.hasNext) {
val slice = iterSlices.next.getObject().asResource()
val newSlice = newModel.createResource()
newModel.add(newSlice,rdf_type,qb_Slice)
newModel.add(newSlice,cex_indicator,findProperty_asResource(m,slice,cex_indicator))
newModel.add(newSlice,wf_onto_ref_year,findProperty_asLiteral(m,slice,wf_onto_ref_year))
newModel.add(newSlice,qb_sliceStructure,wf_onto_sliceByArea)
newModel.add(newDataSet,qb_slice,newSlice)
}
}
newModel.setNsPrefixes(PREFIXES.cexMapping)
newModel
}
def normalizedDatasets(m:Model) : Model = {
val newModel = ModelFactory.createDefaultModel()
val datasetsIter = m.listSubjectsWithProperty(rdf_type,qb_DataSet)
while (datasetsIter.hasNext) {
val dataset = datasetsIter.nextResource()
val computation = findProperty_asResource(m,dataset,cex_computation)
val typeComputation = findProperty(m,computation,rdf_type)
if (typeComputation == cex_ImputeDataSet) {
val newDataSet = newModel.createResource()
newModel.add(newDataSet,rdf_type,qb_DataSet)
val computation = newModel.createResource
newModel.add(computation,rdf_type,cex_NormalizeDataSet)
newModel.add(computation,cex_dataSet,dataset)
newModel.add(newDataSet,cex_computation,computation)
newModel.add(newDataSet,sdmxAttribute_unitMeasure,dbpedia_Year)
newModel.add(newDataSet,qb_structure,wf_onto_DSD)
val iterSlices = m.listStatements(dataset,qb_slice,null : RDFNode)
while (iterSlices.hasNext) {
val slice = iterSlices.next.getObject().asResource()
val newSlice = newModel.createResource()
newModel.add(newSlice,rdf_type,qb_Slice)
newModel.add(newSlice,cex_indicator,findProperty_asResource(m,slice,cex_indicator))
newModel.add(newSlice,wf_onto_ref_year,findProperty_asLiteral(m,slice,wf_onto_ref_year))
newModel.add(newSlice,qb_sliceStructure,wf_onto_sliceByArea)
newModel.add(newDataSet,qb_slice,newSlice)
}
}
}
newModel.setNsPrefixes(PREFIXES.cexMapping)
newModel
}
def adjustedDatasets(m:Model) : Model = {
val newModel = ModelFactory.createDefaultModel()
val datasetsIter = m.listSubjectsWithProperty(rdf_type,qb_DataSet)
while (datasetsIter.hasNext) {
val dataset = datasetsIter.nextResource()
val computation = findProperty_asResource(m,dataset,cex_computation)
val typeComputation = findProperty(m,computation,rdf_type)
if (typeComputation == cex_NormalizeDataSet) {
val newDataSet = newModel.createResource()
newModel.add(newDataSet,rdf_type,qb_DataSet)
val computation = newModel.createResource
newModel.add(computation,rdf_type,cex_AdjustDataSet)
newModel.add(computation,cex_dataSet,dataset)
newModel.add(newDataSet,cex_computation,computation)
newModel.add(newDataSet,sdmxAttribute_unitMeasure,dbpedia_Year)
newModel.add(newDataSet,qb_structure,wf_onto_DSD)
val iterSlices = m.listStatements(dataset,qb_slice,null : RDFNode)
while (iterSlices.hasNext) {
val slice = iterSlices.next.getObject().asResource()
val newSlice = newModel.createResource()
newModel.add(newSlice,rdf_type,qb_Slice)
newModel.add(newSlice,cex_indicator,findProperty_asResource(m,slice,cex_indicator))
newModel.add(newSlice,wf_onto_ref_year,findProperty_asLiteral(m,slice,wf_onto_ref_year))
newModel.add(newSlice,qb_sliceStructure,wf_onto_sliceByArea)
newModel.add(newDataSet,qb_slice,newSlice)
}
}
}
newModel.setNsPrefixes(PREFIXES.cexMapping)
newModel
}
def addDatasets(m: Model) : Model = {
m.add(imputedDatasets(m))
m.add(normalizedDatasets(m))
}
override def main(args: Array[String]) {
val logger = LoggerFactory.getLogger("Application")
val conf = ConfigFactory.load()
val opts = new AddDatasetsOpts(args,onError)
try {
val model = ModelFactory.createDefaultModel
val inputStream = FileManager.get.open(opts.fileName())
model.read(inputStream,"","TURTLE")
val newModel = addDatasets(model)
if (opts.output.get == None) newModel.write(System.out,"TURTLE")
else {
val fileOutput = opts.output()
newModel.write(new FileOutputStream(fileOutput),"TURTLE")
}
} catch {
case e: Exception => println("\\nException:\\n" + e.getLocalizedMessage())
}
}
private def onError(e: Throwable, scallop: Scallop) = e match {
case Help(s) =>
println("Help: " + s)
scallop.printHelp
sys.exit(0)
case _ =>
println("Error: %s".format(e.getMessage))
scallop.printHelp
sys.exit(1)
}
}
|
weso/computex
|
app/es/weso/computex/AddDatasets.scala
|
Scala
|
apache-2.0
| 7,372
|
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala.gridfs
import java.util.concurrent.TimeUnit
import com.mongodb.reactivestreams.client.gridfs.GridFSFindPublisher
import org.mongodb.scala.{ BaseSpec, Document }
import org.reactivestreams.Publisher
import org.scalamock.scalatest.proxy.MockFactory
import scala.concurrent.duration.Duration
class GridFSFindObservableSpec extends BaseSpec with MockFactory {
val wrapper = mock[GridFSFindPublisher]
val gridFSFindObservable = GridFSFindObservable(wrapper)
"GridFSFindObservable" should "have the same methods as the wrapped GridFSFindPublisher" in {
val mongoPublisher: Set[String] = classOf[Publisher[Document]].getMethods.map(_.getName).toSet
val wrapped = classOf[GridFSFindPublisher].getMethods.map(_.getName).toSet -- mongoPublisher - "collation"
val local = classOf[GridFSFindObservable].getMethods.map(_.getName).toSet
wrapped.foreach((name: String) => {
val cleanedName = name.stripPrefix("get")
assert(local.contains(name) | local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name")
})
}
it should "call the underlying methods" in {
val batchSize = 20
val filter = Document("{a: 1}")
val limit = 10
val maxTime = Duration(10, "second") //scalatyle:ignore
val skip = 5
val sort = Document("{_id: 1}")
wrapper.expects(Symbol("batchSize"))(batchSize).once()
wrapper.expects(Symbol("filter"))(filter).once()
wrapper.expects(Symbol("limit"))(limit).once()
wrapper.expects(Symbol("maxTime"))(maxTime.toMillis, TimeUnit.MILLISECONDS).once()
wrapper.expects(Symbol("noCursorTimeout"))(true).once()
wrapper.expects(Symbol("skip"))(skip).once()
wrapper.expects(Symbol("sort"))(sort).once()
gridFSFindObservable.batchSize(batchSize)
gridFSFindObservable.filter(filter)
gridFSFindObservable.limit(limit)
gridFSFindObservable.maxTime(maxTime)
gridFSFindObservable.noCursorTimeout(true)
gridFSFindObservable.skip(skip)
gridFSFindObservable.sort(sort)
}
}
|
rozza/mongo-java-driver
|
driver-scala/src/test/scala/org/mongodb/scala/gridfs/GridFSFindObservableSpec.scala
|
Scala
|
apache-2.0
| 2,633
|
package stepping
class StepFilters {
var mutableVar = "var"
val immutableVal = "val"
def foo(a: String, b: String): Unit = {}
def mainTest(): Unit = {
mutableVar // line 11
immutableVal
mutableVar = immutableVal
foo(mutableVar, immutableVal)
fors(); bridges()
}
def fors(): Unit = {
val lst = List("one", "two", "three")
for (n <- lst) { // line 25
debug.Helper.noop(immutableVal)
println(n)
}
}
def bridges(): Unit = {
val c: Base[Int] = new Concrete
c.base(10) // line 34
println(c.base(10))
2 + c.base(10)
debug.Helper.noop(null)
}
}
class Base[T] {
def base(x: T): Int = 0
}
class Concrete extends Base[Int] {
override def base(x: Int): Int = {
println(x) // line 49
x
}
}
object StepFilters {
def main(args: Array[String]): Unit = {
new StepFilters().mainTest()
}
}
|
stephenh/scala-ide
|
org.scala-ide.sdt.debug.tests/test-workspace/debug/src/stepping/StepFilters.scala
|
Scala
|
bsd-3-clause
| 902
|
package me.invkrh.raft.deploy
import java.nio.file.{Files, Paths}
import com.typesafe.config.{Config, ConfigFactory}
import me.invkrh.raft.exception.{RaftConfigDirectoryNotFoundException, RaftConfigFileNotFoundException}
trait ConfigHolder {
val config: Config
}
trait RaftConfig extends ConfigHolder {
override val config: Config = {
val confDir = System.getenv("RAFT_CONF_DIR")
if (confDir == null) {
throw RaftConfigDirectoryNotFoundException()
} else {
val configPath = Paths.get(confDir + "/raft.conf")
if (Files.exists(configPath)) {
ConfigFactory.parseFile(configPath.toFile)
} else {
throw RaftConfigFileNotFoundException(configPath.toString)
}
}
}
}
|
invkrh/akka-raft
|
src/main/scala/me/invkrh/raft/deploy/ConfigHolder.scala
|
Scala
|
mit
| 732
|
package common
import play.api.libs.concurrent.Akka
import play.api.Play.current
import scala.concurrent.ExecutionContext
object ExecutionContexts {
implicit val fastOps: ExecutionContext = play.api.libs.concurrent.Execution.Implicits.defaultContext
implicit val internetIOOps: ExecutionContext = Akka.system.dispatchers.lookup("contexts.internet-io-ops")
implicit val verySlowOps: ExecutionContext = Akka.system.dispatchers.lookup("contexts.very-slow-ops")
}
|
ubenzer/tezapp
|
app/common/ExecutionContexts.scala
|
Scala
|
agpl-3.0
| 468
|
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2007-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.swing
import scala.collection.mutable.Buffer
object SequentialContainer {
/**
* Utility trait for wrapping sequential containers.
*/
trait Wrapper extends SequentialContainer with Container.Wrapper {
override val contents: Buffer[Component] = new Content
//def contents_=(c: Component*) { contents.clear(); contents ++= c }
}
}
/**
* A container for which a sequential order of children makes sense, such as
* flow panels, or menus. Its contents are mutable.
*/
trait SequentialContainer extends Container {
/**
* The mutable child components of this container. The order matters and
* usually indicates the layout of the children.
*/
override def contents: Buffer[Component]
//def contents_=(c: Component*)
}
|
SethTisue/scala-swing
|
src/main/scala/scala/swing/SequentialContainer.scala
|
Scala
|
bsd-3-clause
| 1,294
|
package time
import java.lang.{Integer => JInt}
import java.util.{List => JList}
import java.util.Calendar
import java.util.Properties;
import java.io.StringReader;
import scala.util.Sorting.quickSort
import scala.collection.JavaConversions._
import scala.collection.mutable.Buffer
import edu.stanford.nlp.util.logging.Redwood.Util._
import edu.stanford.nlp.util.logging.StanfordRedwoodConfiguration;
import edu.stanford.nlp.util.CoreMap
import edu.stanford.nlp.util.ArrayCoreMap
import edu.stanford.nlp.ling.CoreLabel
import edu.stanford.nlp.ling.CoreAnnotation
import edu.stanford.nlp.ling.CoreAnnotations._
import edu.stanford.nlp.time.JodaTimeUtils
import edu.stanford.nlp.ie.NumberNormalizer
import edu.stanford.nlp.process.PTBTokenizer
import edu.stanford.nlp.process.CoreLabelTokenFactory
import edu.stanford.nlp.pipeline._
import org.joda.time.DateTime
import org.joda.time.Period
import org.joda.time.DateTimeZone
import org.goobs.database._
import org.goobs.stanford.CoreMapDatum
import org.goobs.testing.Dataset
import org.goobs.stanford.JavaNLP._
//------------------------------------------------------------------------------
// UTILITIES
//------------------------------------------------------------------------------
// -- ENUMERATIONS --
object Language extends Enumeration {
val english = Value
}
object NumberType extends Enumeration {
val NONE, ORDINAL, NUMBER, UNIT, REAL = Value
}
// -- AUX CLASSES --
@SerialVersionUID(-7775965489080356070L)
case class TimexMetaInfo(doc:String,sentence:Int,word:Int,tid:String)
// -- ANNOTATIONS --
class TimeExpressionsAnnotation extends CoreAnnotation[java.util.List[CoreMap]]{
def getType:Class[java.util.List[CoreMap]] = classOf[java.util.List[CoreMap]]
}
class TimeValueAnnotation extends CoreAnnotation[Array[String]]{
def getType:Class[Array[String]] = classOf[Array[String]]
}
class OriginalTimeMetaAnnotation extends CoreAnnotation[TimexMetaInfo]{
def getType:Class[TimexMetaInfo] = classOf[TimexMetaInfo]
}
class OriginalTimeTypeAnnotation extends CoreAnnotation[String]{
def getType:Class[String] = classOf[String]
}
class OriginalTimeValueAnnotation extends CoreAnnotation[String]{
def getType:Class[String] = classOf[String]
}
class TimeIdentifierAnnotation extends CoreAnnotation[String]{
def getType:Class[String] = classOf[String]
}
class IsTestAnnotation extends CoreAnnotation[Boolean]{
def getType:Class[Boolean] = classOf[Boolean]
}
class OriginalTokensAnnotation
extends CoreAnnotation[java.util.List[CoreLabel]] {
def getType:Class[java.util.List[CoreLabel]]
= classOf[java.util.List[CoreLabel]]
}
class OriginalBeginIndexAnnotation
extends CoreAnnotation[java.lang.Integer]{
def getType:Class[java.lang.Integer] = classOf[java.lang.Integer]
}
class OriginalEndIndexAnnotation
extends CoreAnnotation[java.lang.Integer]{
def getType:Class[java.lang.Integer] = classOf[java.lang.Integer]
}
//------------------------------------------------------------------------------
// DATA
//------------------------------------------------------------------------------
class TimeDataset(val data:Dataset[CoreMapDatum]) {
def slice(minInclusive:Int,maxExclusive:Int):TimeDataset
= new TimeDataset(data.slice(minInclusive,maxExclusive))
def slice(isTest:Boolean):TimeDataset = {
//(variables)
var minInc = 0
var maxExc = 0
var seenAny:Boolean = false
var seenOpposite:Boolean = false
var i=0
//(iterate)
data.iterator.foreach{ (doc:CoreMapDatum) =>
val docTest = doc.get[Boolean,IsTestAnnotation](classOf[IsTestAnnotation])
if(docTest == isTest){
//((case: in set))
assert(!seenAny || !seenOpposite,"Interleaved train/test")
if(!seenAny) {
minInc = i;
seenAny = true
}
maxExc = i + 1
} else {
//((case: not in set))
if(seenAny){
seenOpposite = true
}
}
//((increment))
i += 1
}
//(slice)
slice(minInc,maxExc)
}
def train:TimeDataset = slice(false)
def test:TimeDataset = slice(true)
private def tokens(docI:Int,sentI:Int):Buffer[CoreLabel] = {
data
.get(docI)
.get[JList[CoreMap],SentencesAnnotation](classOf[SentencesAnnotation])
.get(sentI)
.get[JList[CoreLabel],TokensAnnotation](classOf[TokensAnnotation])
}
def goldTimes:Iterable[(Int,Int,CoreMap)] = {
var lst = List[(Int,Int,CoreMap)]()
//--Each Document
data.zipWithIndex.foreach{ case (datum:CoreMapDatum,docI:Int) =>
//--Each Sentence
datum.get[JList[CoreMap],SentencesAnnotation](classOf[SentencesAnnotation])
.zipWithIndex
.foreach{ case (sent:CoreMap,sentI:Int) =>
//--Each Time Expression
sent.get[JList[CoreMap],TimeExpressionsAnnotation](classOf[TimeExpressionsAnnotation])
.foreach{ (exp:CoreMap) =>
lst = (docI,sentI,exp) :: lst
}
}
}
lst
}
def goldSpans(train:Boolean,index:Indexing=Indexing()
):Array[(TimeSent,Temporal,Time)] = {
goldTimes.map{ case (docI:Int,sentI:Int,expr:CoreMap) =>
//(get terms)
val gold =
DataLib.array2JodaTime(
expr.get[Array[String],TimeValueAnnotation](classOf[TimeValueAnnotation])
)
val ground = new Time(new DateTime(
data
.get(docI)
.get[Calendar,CalendarAnnotation](classOf[CalendarAnnotation])
.getTimeInMillis ))
//(create sentence)
( DataLib.mkTimeSent(expr,tokens(docI,sentI),index,train), gold, ground)
}.toArray
}
}
//------------------------------------------------------------------------------
// DATA Processing
//------------------------------------------------------------------------------
object DataLib {
val Tm = """^T([0-9]{1,2})([0-9]{1,2})?$""".r
val Year = """^([0-9]{2,4})$""".r
val YearMonth = """^([0-9]{4})-?([0-9]{1,2})$""".r
val YearMonthDayHourMin =
"""^([0-9]{4})-?([0-9]{1,2})-?([0-9]{1,2})T?([0-9]{1,2})?([0-9]{1,2})?$""".r
val YearMonthDayTime =
"""^([0-9]{4})-?([0-9]{1,2})-?([0-9]{1,2})T?(MO|AF|EV|NI)?$""".r
val TimeOfDay =
"""^T(MO|AF|EV|NI)$""".r
val TimeOfDayDuration =
"""^PT(MO|AF|EV|NI)$""".r
val YearWeekWE = """^([0-9]{4})-?W([0-9]{1,2})-?(WE)?$""".r
val YearQuarter = """^([0-9]{4})-?Q([1-4])$""".r
val YearHalf = """^([0-9]{4})-?H([1-2])$""".r
val YearSixth = """^([0-9]{4})-?B([1-6])$""".r
val YearSeason = """^([0-9]{4})-?(SP|SU|FA|WI)$""".r
val Period = """^P(([0-9]*|X)(D|W|M|Q|Y|E|C|L|H|S|T))+$""".r
val periodPattern = java.util.regex.Pattern.compile(
"""([0-9]*|X)(D|W|M|Q|Y|E|C|L|H|S|T)""")
val Unk = """^(.*X.*)$""".r
class IsTokenized extends CoreAnnotation[Boolean]{
def getType:Class[Boolean] = classOf[Boolean]
}
class IsNumbered extends CoreAnnotation[Boolean]{
def getType:Class[Boolean] = classOf[Boolean]
}
def timex2JodaTime(timex:String,ground:DateTime):Any = {
val str = timex.trim.replaceAll("""\\s+""","")
val pass1 = str match {
case Tm(hr,min) =>
val base = ground.withHourOfDay(hr.toInt).
withMinuteOfHour(if(min == null || min.equals("")) 0 else min.toInt)
(base,
if(min == null || min.equals("")){ base.plusHours(1) }
else{ base.plusMinutes(1) }
)
case Year(y) =>
val (yr,dur) = y.length match {
case 2 => (1900 + y.toInt, 1)
case 3 => (y.toInt*10, 10)
case 4 => (y.toInt, 1)
}
val base = new DateTime(yr.toInt,1,1,0,0,0,0)
(base, base.plusYears(dur))
case YearMonth(year,month) =>
val base = new DateTime(year.toInt, math.max(month.toInt,1), 1,
0,0,0,0)
(base, base.plusMonths(1))
case YearMonthDayHourMin(year,month,day,hour,min) =>
val hr = if(hour == null || hour.equals("")){ 0 }else{ hour.toInt-1 }
val mn = if(min == null || min.equals("")){ 0 }else{ min.toInt }
val base =
new DateTime(year.toInt,month.toInt,day.toInt,hr,mn,0,0)
(base,
if(mn != 0){
base.plusMinutes(1)
} else if(hr != 0){
base.plusHours(1)
} else {
base.plusDays(1)
}
)
case YearMonthDayTime(year,month,day,time) =>
val base = time match {
case "MO" => new DateTime(year.toInt,month.toInt,day.toInt,8,0,0,0)
case "AF" => new DateTime(year.toInt,month.toInt,day.toInt,12,0,0,0)
case "EV" => new DateTime(year.toInt,month.toInt,day.toInt,16,0,0,0)
case "NI" => new DateTime(year.toInt,month.toInt,day.toInt,20,0,0,0)
}
(base, base.plusHours(4))
case TimeOfDay(time) =>
val base = time match {
case "MO" => new DateTime(ground.getYear,ground.getMonthOfYear,ground.getDayOfMonth,8,0,0,0)
case "AF" => new DateTime(ground.getYear,ground.getMonthOfYear,ground.getDayOfMonth,12,0,0,0)
case "EV" => new DateTime(ground.getYear,ground.getMonthOfYear,ground.getDayOfMonth,16,0,0,0)
case "NI" => new DateTime(ground.getYear,ground.getMonthOfYear,ground.getDayOfMonth,20,0,0,0)
}
(base, base.plusHours(4))
case _ => null
}
val pass2 = if(pass1 != null) pass1 else str match {
case YearWeekWE(year,week,we) =>
val base = we match {
case "WE" =>
val b = new DateTime
b.withYear(year.toInt).
withWeekOfWeekyear(week.toInt).
withDayOfWeek(6).
withMillisOfDay(0)
case _ =>
val b = new DateTime
b.withYear(year.toInt).
withWeekOfWeekyear(week.toInt).
withDayOfWeek(1).
withMillisOfDay(0)
}
(base,
we match{
case "WE" => base.plusDays(2)
case _ => base.plusWeeks(1)
}
)
case YearQuarter(year,quarter) =>
val base = new DateTime(year.toInt,(quarter.toInt-1)*3+1,1,0,0,0,0)
(base,base.plusMonths(3))
case YearHalf(year,half) =>
val base = new DateTime(year.toInt,(half.toInt-1)*6+1,1,0,0,0,0)
(base,base.plusMonths(6))
case YearSixth(year,sixth) =>
val base = new DateTime(year.toInt,(sixth.toInt-1)*2+1,1,0,0,0,0)
(base,base.plusMonths(6))
case YearSeason(year,season) =>
val quarter = season match {
case "WI" => 4
case "SP" => 1
case "SU" => 2
case "FA" => 3
}
val base = new DateTime(year.toInt,(quarter.toInt)*3,1,0,0,0,0)
(base,base.plusMonths(3))
case _ => null
}
val pass3 = if(pass2 != null) pass2 else str match {
case Period(junk:String,_*) =>
val matcher = periodPattern.matcher(str)
var period:Period = new Period
var seenTime:Boolean = false
var fuzzy:Boolean = false
while(matcher.find){
val numString:String = matcher.group(1)
val unit:String = matcher.group(2)
if(numString.equals("")){
seenTime = true
} else if(fuzzy){
throw new IllegalArgumentException("Multiple fuzzy times")
} else {
val num:Int =
if(numString.equals("X")){ fuzzy = true; 1 }
else{ numString.toInt }
period = (unit,seenTime) match {
case ("L",_) => period.plusYears(1000*num)
case ("C",_) => period.plusYears(100*num)
case ("E",_) => period.plusYears(10*num)
case ("Y",_) => period.plusYears(1*num)
case ("Q",_) => period.plusMonths(3*num)
case ("M",false) => period.plusMonths(1*num)
case ("W",_) => period.plusWeeks(1*num)
case ("D",_) => period.plusDays(1*num)
case ("H",_) => period.plusHours(1*num)
case ("M",true) => period.plusMinutes(1*num)
case ("S",_) => period.plusSeconds(1*num)
}
}
}
(period,fuzzy)
case TimeOfDayDuration(time) =>
var period:Period = new Period
period.plusHours(4)
(period, false)
case "PAST_REF" => ("PAST",ground)
case "FUTURE_REF" => (ground,"FUTURE")
case "PRESENT_REF" => (ground,ground)
case Unk(x) => str
case "undef" => str
case _ =>
val base = new DateTime(str)
(base,base)
}
pass3
}
def jodaTime2Array(time:Any,timex:String):Array[String] = {
time match {
case (begin:DateTime,end:DateTime) =>
Array[String]("RANGE",begin.toString,end.toString)
case (begin:String,end:DateTime) =>
Array[String]("RANGE",begin,end.toString)
case (begin:DateTime,end:String) =>
Array[String]("RANGE",begin.toString,end)
case (begin:Period,fuzzy:Boolean) =>
val opts = new JodaTimeUtils.ConversionOptions
opts.approximate = true
def mkVal(i:Int) = if(fuzzy && i != 0) "x" else ""+i
Array[String]("PERIOD",
mkVal(begin.getYears),
mkVal(begin.getMonths),
mkVal(begin.getWeeks),
mkVal(begin.getDays),
mkVal(begin.getHours),
mkVal(begin.getMinutes),
mkVal(begin.getSeconds) )
case (s:String) => Array[String]("UNK",s)
case _ => throw new IllegalArgumentException("Unknown time: " + time)
}
}
def array2JodaTime(timeVal:Array[String]):Temporal = {
assert(timeVal.length > 0, "No time value for timex!")
val inType:String = timeVal(0).trim
inType match {
case "INSTANT" => {
//(case: instant time)
assert(timeVal.length == 2, "Instant has one element")
if(timeVal(1).trim == "NOW"){
Range(Duration.ZERO)
} else {
val rawTime = new DateTime(timeVal(1).trim)
val time:Time =
if(rawTime.equals(Time.DAWN_OF)){ Time.DAWN_OF }
else if(rawTime.equals(Time.END_OF)){ Time.END_OF }
else Time(rawTime)
Range(time)
}
}
case "RANGE" => {
//(case: range)
assert(timeVal.length == 3, "Range has two elements")
val begin:String = timeVal(1).trim
val end:String = timeVal(2).trim
val beginTime:Time
= if(begin.equals("PAST")) Time.DAWN_OF else Time(new DateTime(begin))
val endTime:Time
=if(end.equals("FUTURE")) Time.END_OF else Time(new DateTime(end))
Range(
{if(beginTime.equals(Time.DAWN_OF)){ Time.DAWN_OF }
else if(beginTime.equals(Time.END_OF)){ Time.END_OF }
else beginTime},
{if(endTime.equals(Time.DAWN_OF)){ Time.DAWN_OF }
else if(endTime.equals(Time.END_OF)){ Time.END_OF }
else endTime}
)
}
case "PERIOD" => {
//(case: duration)
assert(timeVal.length == 8, "Period has invalid element count")
var isApprox = false
def mkInt(str:String):Int = {
if(str.equalsIgnoreCase("x")){ isApprox = true; 1 }else{ str.toInt }
}
val rtn:Duration = Duration(new Period(
mkInt(timeVal(1)),
mkInt(timeVal(2)),
mkInt(timeVal(3)),
mkInt(timeVal(4)),
mkInt(timeVal(5)),
mkInt(timeVal(6)),
mkInt(timeVal(7)),
0
))
if(isApprox){ ~rtn } else { rtn }
}
case "UNK" => {
new UnkTime
}
case _ => throw new IllegalStateException("Unknown time: " +
inType + " for timex: " + timeVal.mkString(" "))
}
}
def copyTime(lbl:CoreLabel):CoreMap = {
val rtn = new ArrayCoreMap(6)
//(time)
rtn.set(classOf[OriginalTimeMetaAnnotation],
lbl.get[TimexMetaInfo,OriginalTimeMetaAnnotation]
(classOf[OriginalTimeMetaAnnotation]))
rtn.set(classOf[OriginalTimeTypeAnnotation],
lbl.get[String,OriginalTimeTypeAnnotation]
(classOf[OriginalTimeTypeAnnotation]))
rtn.set(classOf[OriginalTimeValueAnnotation],
lbl.get[String,OriginalTimeValueAnnotation]
(classOf[OriginalTimeValueAnnotation]))
rtn.set(classOf[TimeIdentifierAnnotation],
lbl.get[String,TimeIdentifierAnnotation]
(classOf[TimeIdentifierAnnotation]))
rtn.set(classOf[TimeValueAnnotation],
lbl.get[Array[String],TimeValueAnnotation]
(classOf[TimeValueAnnotation]))
rtn.set(classOf[TimeValueAnnotation],
lbl.get[Array[String],TimeValueAnnotation]
(classOf[TimeValueAnnotation]))
//(return)
rtn
}
def isTimex(lbl:CoreLabel):Boolean
= lbl.get[String,TimeIdentifierAnnotation](
classOf[TimeIdentifierAnnotation]) != null
def relinkTimexes(sent:CoreMap):Int = {
//--Variables
val origTokens:Seq[CoreLabel]
= sent.get[JList[CoreLabel],OriginalTokensAnnotation](
classOf[OriginalTokensAnnotation])
val tokens:Seq[CoreLabel]
= sent.get[JList[CoreLabel],TokensAnnotation](
classOf[TokensAnnotation])
//--Functions
def copyTime(lbl:CoreLabel,startIndex:Int):CoreMap = {
val rtn = DataLib.copyTime(lbl)
//(old begin)
rtn.set(classOf[OriginalBeginIndexAnnotation],
lbl.get[java.lang.Integer,TokenBeginAnnotation]
(classOf[TokenBeginAnnotation]))
//(new begin)
rtn.set(classOf[BeginIndexAnnotation],
new java.lang.Integer(startIndex))
//(return)
rtn
}
//--Relink
val (revTimexes,endedOnTimex,lastTid)
= tokens.zipWithIndex.foldLeft(List[CoreMap](),false,""){
case ((timexes:List[CoreMap],lastTimex:Boolean,lastTid:String),
(tok:CoreLabel,index:Int)) =>
val thisTid:String =
if(isTimex(tok)) tok.get[String,TimeIdentifierAnnotation](classOf[TimeIdentifierAnnotation])
else ""
if(!lastTimex && isTimex(tok)){
//(case: start timex)
(copyTime(tok,index) :: timexes, true, thisTid)
} else if(lastTimex && isTimex(tok) && lastTid == thisTid){
//(case: in timex)
(timexes, true, thisTid)
} else if(!lastTimex && !isTimex(tok)){
//(case: not in timex)
(timexes, false, thisTid)
} else if(lastTimex && (!isTimex(tok) || lastTid != thisTid)){
//(case: ended timex)
timexes.head.set(classOf[EndIndexAnnotation],
new java.lang.Integer(index))
timexes.head.set(classOf[OriginalEndIndexAnnotation],
tokens.get(index-1).get[java.lang.Integer,TokenEndAnnotation]
(classOf[TokenEndAnnotation]))
if (isTimex(tok)) {
//(case: immediately started a new one)
(copyTime(tok,index) :: timexes, true, thisTid)
} else {
(timexes, false, thisTid)
}
} else {
throw new IllegalStateException("impossible")
}
}
//(last timex)
if(endedOnTimex){
revTimexes.head.set(classOf[EndIndexAnnotation],
new java.lang.Integer(tokens.size))
revTimexes.head.set(classOf[OriginalEndIndexAnnotation],
tokens.get(tokens.length-1).get[java.lang.Integer,TokenEndAnnotation]
(classOf[TokenEndAnnotation]))
}
val timexes = revTimexes.reverse
//--Merge
//(collapse timexes with gaps in them)
val filtered = (1 until timexes.length).map{ (i:Int) =>
val lastTID = timexes(i-1).get[String,TimeIdentifierAnnotation](
classOf[TimeIdentifierAnnotation])
val thisTID = timexes(i).get[String,TimeIdentifierAnnotation](
classOf[TimeIdentifierAnnotation])
if(lastTID == thisTID){
timexes(i-1).set(classOf[EndIndexAnnotation],
timexes(i).get[java.lang.Integer,EndIndexAnnotation]
(classOf[EndIndexAnnotation]))
timexes(i-1).set(classOf[OriginalEndIndexAnnotation],
timexes(i).get[java.lang.Integer,OriginalEndIndexAnnotation]
(classOf[OriginalEndIndexAnnotation]))
log("collapsed a timex: " + timexes(i))
None
} else {
Some(timexes(i))
}
}.filter{_.isDefined}.map{ _.get }.toList
//(append first timex)
val rtn = if(timexes.length < 2){ timexes } else { timexes(0) :: filtered }
//--Set
//(error check)
val tids = rtn.map{
_.get[String,TimeIdentifierAnnotation](
classOf[TimeIdentifierAnnotation]) }
if(tids.length != Set(tids:_*).size){
throw new IllegalStateException(""+tids.length + " > " + Set(tids:_*).size)
}
//(set)
sent.set(classOf[TimeExpressionsAnnotation],
seqAsJavaList(rtn))
rtn.length
}
private lazy val tokenFact =
PTBTokenizer.factory(new CoreLabelTokenFactory(),
PTBTokenizerAnnotator.DEFAULT_OPTIONS)
private def tokenize(orig:CoreLabel, str:String,offsetZero:Int,
index:Int):Array[CoreLabel] = {
val tokIter = tokenFact.getTokenizer(new StringReader(str))
var offset = offsetZero
tokIter.map{ (label:CoreLabel) =>
//(merge labels)
label.set(classOf[CharacterOffsetBeginAnnotation],
new java.lang.Integer(offset))
label.set(classOf[CharacterOffsetEndAnnotation],
new java.lang.Integer(offset+label.originalText.length))
offset += label.originalText.length
//(save token offsets)
val merged = CoreMaps.merge(orig,label)
val begin:Int = {
if(label.get[JInt,TokenBeginAnnotation](classOf[TokenBeginAnnotation]) != null){
label.get[JInt,TokenBeginAnnotation](classOf[TokenBeginAnnotation])
} else {
index
}
}
val end:Int = {
if(label.get[JInt,TokenEndAnnotation](classOf[TokenEndAnnotation]) != null){
label.get[JInt,TokenEndAnnotation](classOf[TokenEndAnnotation])
} else {
index+1
}
}
merged.set(classOf[TokenBeginAnnotation],new java.lang.Integer(begin))
merged.set(classOf[TokenEndAnnotation],new java.lang.Integer(end))
assert(isTimex(orig) == isTimex(merged),
"timex mismatch on tokenization")
merged
}.toArray
}
def retokSentence(sent:CoreMap):Unit = {
//--Retokenize Sentence
val origTokens=sent.get[java.util.List[CoreLabel],TokensAnnotation](TOKENS)
val origLength = origTokens.size
val retok = origTokens.zipWithIndex.foldRight(List[CoreLabel]()){
case ((word:CoreLabel,index:Int),soFar:List[CoreLabel]) =>
val orig = word.originalText
val baseOffset = word.beginPosition
val finalOffsetGold = word.endPosition
val (lastTerm,otherTerms,finalOffset)
= orig.toCharArray.foldLeft(
(new StringBuilder,List[CoreLabel](),baseOffset)){
case ((tok:StringBuilder,toks:List[CoreLabel],offset:Int),chr:Char) =>
chr match {
//(tokenize on -)
case '-' => (new StringBuilder,
if(tok.length > 0){
toks :::
tokenize(word,tok.toString,offset,index).toList :::
tokenize(word,"-",offset+tok.length,index).toList
} else {
toks ::: tokenize(word,"-",offset,index).toList
},
offset+tok.length+1)
//(tokenize on /)
case '/' => (new StringBuilder,
if(tok.length > 0){
toks :::
tokenize(word,tok.toString,offset,index).toList :::
tokenize(word,"/",offset+tok.length,index).toList
} else {
toks ::: tokenize(word,"/",offset,index).toList
},
offset+tok.length+1)
//(tokenize on :)
case ':' => (new StringBuilder,
if(tok.length > 0){
toks :::
tokenize(word,tok.toString,offset,index).toList :::
tokenize(word,":",offset+tok.length,index).toList
} else {
toks ::: tokenize(word,":",offset,index).toList
},
offset+tok.length+1)
//(part of a token)
case _ => (tok.append(chr),toks,offset)
}
}
assert(finalOffset+lastTerm.length == finalOffsetGold,
"Offset mismatch for word: "+word + " orig: " + word)
val newTok
= if(lastTerm.length > 0) {
otherTerms :::
tokenize(word,lastTerm.toString,finalOffset,index).toList
} else { otherTerms }
newTok ::: soFar
}
//(set result)
sent.set(classOf[OriginalTokensAnnotation],origTokens)
val jRetok:java.util.List[CoreLabel] = retok
sent.set(classOf[TokensAnnotation],jRetok)
//(error check)
assert(retok(retok.length-1).get[JInt,TokenEndAnnotation](classOf[TokenEndAnnotation])
== origLength, "Lengths changed!")
retok.foldLeft(0){ case (last:Int,tok:CoreLabel) =>
val curr:Int = tok.get[JInt,TokenBeginAnnotation](classOf[TokenBeginAnnotation])
assert(curr == last || curr == last+1,
"Token offsets jump is fishy: " + last + " -> " + curr)
curr
}
//--Re-Link Timexes
DataLib.relinkTimexes(sent)
}
def retokenize(doc:CoreMap):Unit = {
if(doc.containsKey[Boolean,IsTokenized](classOf[IsTokenized])){
return
}
val sents=doc.get[java.util.List[CoreMap],SentencesAnnotation](SENTENCES)
//(for each sentence)
sents.zipWithIndex.foreach{ case (sent:CoreMap,i:Int) =>
//(retokenize sentence)
retokSentence(sent)
}
doc.set(classOf[IsTokenized], true)
}
def findNumbers(sent:CoreMap):Unit = {
//--Set Numbers
//(find numbers)
val nums:JList[CoreMap] = NumberNormalizer.findAndMergeNumbers(sent);
//(error check)
nums.foldLeft(-1){ case (lastEnd:Int,map:CoreMap) =>
val begin = map.get[JInt,TokenBeginAnnotation](classOf[TokenBeginAnnotation])
val end = map.get[JInt,TokenEndAnnotation](classOf[TokenEndAnnotation])
assert(lastEnd < 0 || begin <= lastEnd, "invalid jump: " + lastEnd + " to " + begin)
end
}
//(tweak tokens)
val tokens:JList[CoreLabel] = nums.map{ (map:CoreMap) =>
val subTokens
= map.get[JList[CoreLabel],TokensAnnotation](classOf[TokensAnnotation])
if(subTokens != null && isTimex(subTokens.get(0))) {
new CoreLabel(CoreMaps.merge(map,copyTime(subTokens.get(0))))
} else {
new CoreLabel(map)
}
}.toList
//(set tokens)
sent.set(classOf[TokensAnnotation], tokens )
//--Relink Timexes
DataLib.relinkTimexes(sent)
//--Set Annotation
tokens.foreach{ (num:CoreLabel) =>
if(num.originalText == null || num.originalText.equals("")){
num.setOriginalText(num.word)
}
}
}
def normalizeNumbers(doc:CoreMap):Unit = {
if(doc.containsKey[Boolean,IsNumbered](classOf[IsNumbered])){
return
}
val sents=doc.get[java.util.List[CoreMap],SentencesAnnotation](SENTENCES)
//(for each sentence)
sents.zipWithIndex.foreach{ case (sent:CoreMap,i:Int) =>
//(normalize numbers)
findNumbers(sent)
}
doc.set(classOf[IsNumbered], true)
}
private def numType(str:String):NumberType.Value = {
if(str != null){
str match {
case "ORDINAL" => NumberType.ORDINAL
case "NUMBER" => NumberType.NUMBER
case "UNIT" => NumberType.UNIT
case _ => NumberType.NONE
}
} else {
NumberType.NONE
}
}
def number(lbl:CoreLabel):(NumberType.Value,Int) = {
//(get annotation)
val numVal = lbl.get[Number,NumericCompositeValueAnnotation](
classOf[NumericCompositeValueAnnotation])
val t = numType(lbl.get[String,NumericCompositeTypeAnnotation](
classOf[NumericCompositeTypeAnnotation]))
assert(t == null || t == NumberType.NONE || numVal != null, "Null value but not type: " + t)
//(get number)
if(t == null || t == NumberType.NONE){
(NumberType.NONE,Int.MinValue)
} else if(math.floor(numVal.doubleValue) == numVal.doubleValue){
(t,numVal.doubleValue.toInt)
} else {
assert(numVal != null && numVal != null, "Bad number value")
(NumberType.REAL,numVal.intValue)
}
}
private def words(span:Buffer[CoreLabel],index:Indexing,train:Boolean,maxNum:Int=Int.MaxValue
):Array[Int] = {
span.map{ (lbl:CoreLabel) =>
val (typ,num) = DataLib.number(lbl)
if(typ != NumberType.NONE && num <= maxNum){
index.NUM
} else {
if(train){
index.str2w(lbl.word,false)
} else {
index.str2wTest(lbl.word,false)
}
}
}.toArray
}
private def pos(span:Buffer[CoreLabel], index:Indexing,train:Boolean):Array[Int] = {
span.map{ (lbl:CoreLabel) =>
if(train){
index.str2pos(lbl.tag)
} else {
index.str2posTest(lbl.tag)
}
}.toArray
}
private def nums(span:Buffer[CoreLabel],maxValue:Int=Int.MaxValue):Array[Int] = {
span.map{ (lbl:CoreLabel) =>
val (typ,num) = DataLib.number(lbl)
if(num > maxValue){
Int.MinValue
} else {
num
}
}.toArray
}
private def numTypes(span:Buffer[CoreLabel]):Array[NumberType.Value] = {
span.map{ (lbl:CoreLabel) =>
val (typ,num) = DataLib.number(lbl)
typ
}.toArray
}
def mkTimeSent(expr:CoreMap,tokens:Buffer[CoreLabel],index:Indexing,train:Boolean
):TimeSent = {
//(get span)
val beginIndex:Int = expr.get[JInt,BeginIndexAnnotation](classOf[BeginIndexAnnotation])
val endIndex:Int = expr.get[JInt,EndIndexAnnotation](classOf[EndIndexAnnotation])
val span = tokens.slice(beginIndex,endIndex)
mkTimeSent(span,index,train)
}
def mkTimeSent(tokens:Buffer[CoreLabel],index:Indexing,train:Boolean,
maxNumber:Int=Int.MaxValue):TimeSent = {
TimeSent(
words(tokens,index,train,maxNumber),
pos(tokens,index,train),
nums(tokens,maxNumber),
numTypes(tokens),
index)
}
def patchAttribute(typ:String,value:String,ground:DateTime):String = {
def zeroPad(i:Int,padding:Int):String
= "0"*(padding-i.toString.length)+i.toString
typ match {
case "TIME" =>
if(value.matches("T\\\\d{4}")){
"T" + value.substring(1,3) + ":" + value.substring(3,5)
} else if(value.matches("T[0-9]+:[0-9]+")) {
"" + ground.getYear + "-" +
zeroPad(ground.getMonthOfYear,2) + "-" +
zeroPad(ground.getDayOfMonth,2) + value
} else if(value.matches("T[0-9]+")) {
"" + ground.getYear + "-" +
zeroPad(ground.getMonthOfYear,2) + "-" +
zeroPad(ground.getDayOfMonth,2) + "T" +
value.substring(1) + ":00"
} else if(value.equals("T00:00")){
"" + ground.getYear + "-" +
zeroPad(ground.getMonthOfYear,2) + "-" +
zeroPad(ground.getDayOfMonth,2) + "T24"
} else {
value
}
case "DATE" =>
if (value.matches("\\\\d{8}T.*")) {
value.substring(0,4) + "-" +
value.substring(4,6) + "-" +
value.substring(6);
} else if (value.matches("\\\\d{8}")) {
value.substring(0,4) + "-" +
value.substring(4,6) + "-" +
value.substring(6,8);
} else if (value.matches("\\\\d\\\\d\\\\d\\\\d..")) {
value.substring(0,4) + "-" +
value.substring(4,6);
} else if (value.matches("[0-9X]{4}W[0-9X ]{1,2}-?[^-]*")) {
val Form = """([0-9X]{4})W([0-9X ]{1,2})(-?)([^-]*)""".r
val Form(year,week,dash,rest) = value
year + "-W" +
{if(week.matches("X+")) week else zeroPad(week.trim.toInt,2)} +
{if(rest.length > 0){ "-" + rest } else { "" }}
} else if(value.matches("\\\\d\\\\d\\\\dX")){
value.substring(0,3)
} else {
value
}
case "DURATION" => value
case _ => value
}
}
}
object Data {
def main(args:Array[String]):Unit = {
val props = new Properties();
props.setProperty("log.neatExit", "true");
props.setProperty("log.collapse", "exact");
StanfordRedwoodConfiguration.apply(props);
DateTimeZone.setDefault(DateTimeZone.UTC);
if(args.length < 1){ err("No dataset given"); exit(1) }
args(0).toLowerCase match {
case "tempeval2" => TempEval2.normalize(args(1),args(2))
case "gigaword" => Gigaword.process(args.slice(1,args.length))
case _ => err("Invalid dataset: " + args(0)); exit(1)
}
}
}
|
gangeli/ParsingTime
|
src/time/Data.scala
|
Scala
|
lgpl-3.0
| 29,838
|
package core.database
import scala.concurrent._
import play.api.Logger
import ExecutionContext.Implicits.global
import org.mongodb.scala.Completed
trait DatabaseComponent {
def db = MongoDb.getDatabase();
def dropAllCollections(): Future[Seq[Seq[Completed]]] = {
Logger.debug("Dropping all collections")
val namesFutures = db.listCollectionNames().toFuture()
namesFutures.flatMap { names =>
val collectionFutures:Seq[Future[Seq[Completed]]] = names map { name =>
Logger.debug("Creating a future to drop collection " + name)
db.getCollection(name).drop().toFuture()
}
Future.sequence(collectionFutures)
}
}
}
|
Vladislav-Zolotaryov/EchoCave
|
app/core/database/DatabaseComponent.scala
|
Scala
|
gpl-3.0
| 689
|
package com.twitter.finagle
import com.twitter.conversions.storage._
import com.twitter.finagle.client._
import com.twitter.finagle.http.{HttpClientTraceInitializer, HttpServerTraceInitializer, HttpTransport, Request, Response}
import com.twitter.finagle.http.codec.{HttpClientDispatcher, HttpServerDispatcher}
import com.twitter.finagle.http.filter.{DtabFilter, HttpNackFilter}
import com.twitter.finagle.netty3._
import com.twitter.finagle.param.{ProtocolLibrary, Stats}
import com.twitter.finagle.server._
import com.twitter.finagle.ssl.Ssl
import com.twitter.finagle.tracing._
import com.twitter.finagle.transport.Transport
import com.twitter.util.{Future, StorageUnit}
import java.net.{InetSocketAddress, SocketAddress}
import org.jboss.netty.channel.Channel
/**
* A rich client with a *very* basic URL fetcher. (It does not handle
* redirects, does not have a cookie jar, etc.)
*/
trait HttpRichClient { self: Client[Request, Response] =>
def fetchUrl(url: String): Future[Response] = fetchUrl(new java.net.URL(url))
def fetchUrl(url: java.net.URL): Future[Response] = {
val addr = {
val port = if (url.getPort < 0) url.getDefaultPort else url.getPort
new InetSocketAddress(url.getHost, port)
}
val req = http.RequestBuilder().url(url).buildGet()
val service = newService(Name.bound(addr), "")
service(req) ensure {
service.close()
}
}
}
/**
* Http protocol support, including client and server.
*/
object Http extends Client[Request, Response] with HttpRichClient
with Server[Request, Response] {
object param {
case class MaxRequestSize(size: StorageUnit)
implicit object MaxRequestSize extends Stack.Param[MaxRequestSize] {
val default = MaxRequestSize(5.megabytes)
}
case class MaxResponseSize(size: StorageUnit)
implicit object MaxResponseSize extends Stack.Param[MaxResponseSize] {
val default = MaxResponseSize(5.megabytes)
}
case class Streaming(enabled: Boolean)
implicit object Streaming extends Stack.Param[Streaming] {
val default = Streaming(false)
}
case class Decompression(enabled: Boolean)
implicit object Decompression extends Stack.Param[Decompression] {
val default = Decompression(enabled = true)
}
case class CompressionLevel(level: Int)
implicit object CompressionLevel extends Stack.Param[CompressionLevel] {
val default = CompressionLevel(-1)
}
private[Http] def applyToCodec(
params: Stack.Params, codec: http.Http): http.Http =
codec
.maxRequestSize(params[MaxRequestSize].size)
.maxResponseSize(params[MaxResponseSize].size)
.streaming(params[Streaming].enabled)
.decompressionEnabled(params[Decompression].enabled)
.compressionLevel(params[CompressionLevel].level)
}
object Client {
val stack: Stack[ServiceFactory[Request, Response]] =
StackClient.newStack
.replace(TraceInitializerFilter.role, new HttpClientTraceInitializer[Request, Response])
}
case class Client(
stack: Stack[ServiceFactory[Request, Response]] = Client.stack,
params: Stack.Params = StackClient.defaultParams + ProtocolLibrary("http")
) extends StdStackClient[Request, Response, Client] {
protected type In = Any
protected type Out = Any
protected def newTransporter(): Transporter[Any, Any] = {
val com.twitter.finagle.param.Label(label) = params[com.twitter.finagle.param.Label]
val codec = param.applyToCodec(params, http.Http())
.client(ClientCodecConfig(label))
val Stats(stats) = params[Stats]
val newTransport = (ch: Channel) => codec.newClientTransport(ch, stats)
Netty3Transporter(
codec.pipelineFactory,
params + Netty3Transporter.TransportFactory(newTransport))
}
protected def copy1(
stack: Stack[ServiceFactory[Request, Response]] = this.stack,
params: Stack.Params = this.params
): Client = copy(stack, params)
protected def newDispatcher(transport: Transport[Any, Any]): Service[Request, Response] =
new HttpClientDispatcher(transport)
def withTls(cfg: Netty3TransporterTLSConfig): Client =
configured(Transport.TLSClientEngine(Some(cfg.newEngine)))
.configured(Transporter.TLSHostname(cfg.verifyHost))
.transformed { stk => http.TlsFilter.module +: stk }
def withTls(hostname: String): Client =
withTls(new Netty3TransporterTLSConfig({
case inet: InetSocketAddress => Ssl.client(hostname, inet.getPort)
case _ => Ssl.client()
}, Some(hostname)))
def withTlsWithoutValidation(): Client =
configured(Transport.TLSClientEngine(Some({
case inet: InetSocketAddress => Ssl.clientWithoutCertificateValidation(inet.getHostName, inet.getPort)
case _ => Ssl.clientWithoutCertificateValidation()
})))
def withMaxRequestSize(size: StorageUnit): Client =
configured(param.MaxRequestSize(size))
def withMaxResponseSize(size: StorageUnit): Client =
configured(param.MaxResponseSize(size))
def withStreaming(enabled: Boolean): Client =
configured(param.Streaming(enabled))
def withDecompression(enabled: Boolean): Client =
configured(param.Decompression(enabled))
def withCompressionLevel(level: Int): Client =
configured(param.CompressionLevel(level))
}
val client = Client()
def newService(dest: Name, label: String): Service[Request, Response] =
client.newService(dest, label)
def newClient(dest: Name, label: String): ServiceFactory[Request, Response] =
client.newClient(dest, label)
object Server {
val stack: Stack[ServiceFactory[Request, Response]] =
StackServer.newStack
.replace(TraceInitializerFilter.role, new HttpServerTraceInitializer[Request, Response])
.replace(
StackServer.Role.preparer,
(next: ServiceFactory[Request, Response]) => (new HttpNackFilter).andThen(next))
}
case class Server(
stack: Stack[ServiceFactory[Request, Response]] = Server.stack,
params: Stack.Params = StackServer.defaultParams + ProtocolLibrary("http")
) extends StdStackServer[Request, Response, Server] {
protected type In = Any
protected type Out = Any
// This override allows java callers to use this method, working around https://issues.scala-lang.org/browse/SI-8905
override def configured[P](psp: (P, Stack.Param[P])): StackServer[Request, Response] = super.configured[P](psp)
protected def newListener(): Listener[Any, Any] = {
val com.twitter.finagle.param.Label(label) = params[com.twitter.finagle.param.Label]
val httpPipeline =
param.applyToCodec(params, http.Http())
.server(ServerCodecConfig(label, new SocketAddress{}))
.pipelineFactory
Netty3Listener(httpPipeline, params)
}
protected def newDispatcher(transport: Transport[In, Out],
service: Service[Request, Response]) = {
val dtab = new DtabFilter.Finagle[Request]
val Stats(stats) = params[Stats]
new HttpServerDispatcher(new HttpTransport(transport), dtab andThen service, stats.scope("dispatch"))
}
protected def copy1(
stack: Stack[ServiceFactory[Request, Response]] = this.stack,
params: Stack.Params = this.params
): Server = copy(stack, params)
def withTls(cfg: Netty3ListenerTLSConfig): Server =
configured(Transport.TLSServerEngine(Some(cfg.newEngine)))
def withMaxRequestSize(size: StorageUnit): Server =
configured(param.MaxRequestSize(size))
def withMaxResponseSize(size: StorageUnit): Server =
configured(param.MaxResponseSize(size))
def withStreaming(enabled: Boolean): Server =
configured(param.Streaming(enabled))
def withDecompression(enabled: Boolean): Server =
configured(param.Decompression(enabled))
def withCompressionLevel(level: Int): Server =
configured(param.CompressionLevel(level))
}
val server = Server()
def serve(addr: SocketAddress, service: ServiceFactory[Request, Response]): ListeningServer =
server.serve(addr, service)
}
|
rojanu/finagle
|
finagle-http/src/main/scala/com/twitter/finagle/Http.scala
|
Scala
|
apache-2.0
| 8,133
|
package spark.transitobh.util;
import _root_.java.math.MathContext
object FuzzyQueryUtil {
def fuzzyQuery(a:String,b:String) = SimilarityTool.compareStrings(a,b)
}
/// This class implements string comparison algorithm
/// based on character pair similarity
/// Source: http://www.catalysoft.com/articles/StrikeAMatch.html
object SimilarityTool{
def compareStrings(str1:String,str2:String):Double = {
val pairs1 = letterPairs(str1.toUpperCase())
val pairs2 = letterPairs(str2.toUpperCase())
var intersection = pairs1.filter(pairs2.contains(_)).size.toFloat + pairs2.filter(pairs1.contains(_)).size.toFloat
def union = (pairs1.size + pairs2.size).toFloat;
((intersection) / union) * 100.0
}
def letterPairs(str:String) = for( i <- 0 to str.length-2) yield str.substring(i,i+2);
}
|
transitoAgora/sample-realtime-spark
|
src/main/scala/transitobh/scala/spark/transitobh/util/FuzzyQueryUtil.scala
|
Scala
|
mit
| 833
|
package dbtarzan.gui.orderby
import dbtarzan.db.{Field, OrderByDirection, OrderByField, OrderByFields}
import dbtarzan.gui.TControlBuilder
import dbtarzan.gui.util.{JFXUtil, OrderedListView, TComboStrategy}
import dbtarzan.localization.Localization
import scalafx.Includes._
import scalafx.beans.property.BooleanProperty
import scalafx.collections.ObservableBuffer
import scalafx.event.ActionEvent
import scalafx.geometry.{Insets, Pos}
import scalafx.scene.Parent
import scalafx.scene.control.{Button, Label}
import scalafx.scene.image.{Image, ImageView}
import scalafx.scene.layout.{BorderPane, HBox, Priority, Region}
import scalafx.scene.paint.Color
/**
to change the order by columns. A list of order by columns with a panel on the side to change it.
*/
class OrderByEditor(
possibleOrderByFields: List[Field],
currentOrderBys : Option[OrderByFields],
onSave : OrderByFields => Unit,
onCancel : () => Unit,
localization : Localization
) extends TControlBuilder {
private var saveButtonDisabled = BooleanProperty(true)
val upIcon: Image = JFXUtil.loadIcon("up.png")
val downIcon: Image = JFXUtil.loadIcon("down.png")
private val currentOrderByFields = currentOrderBys.map(_.fields).getOrElse(List.empty[OrderByField])
private val showField: Option[OrderByField] => BorderPane = (value: Option[OrderByField]) => new BorderPane {
center = new Label {
alignmentInParent = Pos.CenterLeft
textFill = Color.Black
text = value.map(v => v.field.name).getOrElse("")
}
right = new ImageView(iconFromDirection(value))
padding = Insets(0,20,0, 0)
}
private val comboStrategy = new TComboStrategy[OrderByField] {
override def removeFromCombo(comboBuffer: ObservableBuffer[OrderByField], item: OrderByField): Unit =
comboBuffer --= fieldInBothDirections(item.field)
override def addToCombo(comboBuffer: ObservableBuffer[OrderByField], item: OrderByField): Unit =
comboBuffer ++= fieldInBothDirections(item.field)
}
private def fieldInBothDirections(field: Field) = {
OrderByDirection.directions().map(d => OrderByField(field, d))
}
private var list = new OrderedListView[OrderByField](localization.add, showField, comboStrategy)
list.setListData(currentOrderByFields)
list.setComboData(possibleOrderByFields.flatMap(f => OrderByDirection.directions().map(d => OrderByField(f, d))))
list.onChange(data =>
saveButtonDisabled.value = data.isEmpty
)
private def iconFromDirection(value: Option[OrderByField]): Image = {
value.map(v =>
v.direction match {
case OrderByDirection.ASC => upIcon
case OrderByDirection.DESC => downIcon
}).orNull
}
private val layout = new BorderPane {
center = list.control
bottom = saveCancelButtons()
}
private def saveCancelButtons() : HBox = {
new HBox {
children = List(buttonSave(), new Region() { hgrow = Priority.Always }, buttonCancel() )
padding = Insets(10)
spacing = 10
}
}
private def buttonCancel() = new Button {
text = localization.cancel
alignmentInParent = Pos.CenterRight
onAction = (event: ActionEvent) => onCancel()
}
private def buttonSave() = new Button {
text = localization.save
alignmentInParent = Pos.CenterRight
disable <==> saveButtonDisabled
onAction = (_: ActionEvent) => {
if(list.listData().nonEmpty || JFXUtil.areYouSure(localization.unorderedQueryResults, localization.saveOrder))
onSave(OrderByFields(list.listData()))
}
}
def control : Parent = layout
}
|
aferrandi/dbtarzan
|
src/main/scala/dbtarzan/gui/orderby/OrderByEditor.scala
|
Scala
|
apache-2.0
| 3,559
|
class A { val x = """\\u2
|
lrytz/scala
|
test/files/neg/t4584.scala
|
Scala
|
apache-2.0
| 25
|
/*
* ScalaRay - Ray tracer based on pbrt (see http://pbrt.org) written in Scala
* Copyright (C) 2009, 2010, 2011 Jesper de Jong
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.jesperdj.scalaray
import java.io.File
import javax.imageio.ImageIO
import org.jesperdj.scalaray.camera._
import org.jesperdj.scalaray.filter._
import org.jesperdj.scalaray.integrator._
import org.jesperdj.scalaray.raster._
import org.jesperdj.scalaray.renderer._
import org.jesperdj.scalaray.sampler._
import org.jesperdj.scalaray.common._
import org.jesperdj.scalaray.vecmath._
class Timer (val name: String) {
private var totalTime: Long = 0L
private var startTime: Long = 0L
def start() { startTime = System.nanoTime }
def stop() { totalTime += System.nanoTime - startTime }
def time[T](block: => T): T = { start(); val result: T = block; stop(); result }
def total = totalTime
override def toString = "%s: %g seconds" format (name, totalTime / 1e9)
}
object Main {
def main(args: Array[String]) {
println("ScalaRay - Ray tracer based on pbrt (see http://pbrt.org) written in Scala")
println("Copyright (C) 2009, 2010, 2011 Jesper de Jong")
// println("Press Enter to start")
// new java.util.Scanner(System.in).nextLine()
println()
println("Setup...")
val scene = Scene01.createScene()
val rect = new Rectangle(800, 600)
val samplePatternSpecs = new ListBuilder[SamplePatternSpec]
val surfaceIntegratorBuilder = new DirectLightingSurfaceIntegratorBuilder().withSamplePatternSpecs(samplePatternSpecs)
val volumeIntegratorBuilder = VacuumVolumeIntegratorBuilder
val integrator = new Integrator(scene, surfaceIntegratorBuilder, volumeIntegratorBuilder)
val sampler: Sampler = new StratifiedSampler(rect, 16384, 2, 2, true, samplePatternSpecs.build())
val filter: Filter = new BoxFilter
// val dir = Vector(1.0, -2.0, -3.0)
// val look = scene.boundingBox.centroid
// val pos = look - 3.0 * dir
// val camTr = Transform.lookAt(pos, look, Vector.YAxis).inverse
val camTr = Transform.translate(0.0, 0.75, 0.0)
val camera: Camera = new PerspectiveCamera(camTr, π / 4.0, rect.width, rect.height)
val renderer: Renderer = new SamplerRenderer(sampler, filter, camera, integrator)
// println("- Surface integrator: " + surfaceIntegrator)
// println("- Volume integrator: " + volumeIntegrator)
println("- Sampler: " + sampler)
println("- Filter: " + filter)
println("- Renderer: " + renderer)
println("- Camera: " + camera)
println()
println("Rendering...")
val timer = new Timer("Total rendering time")
val pixelBuffer = timer.time { renderer.render() }
println(timer.toString)
ImageIO.write(pixelBuffer.toImage, "png", new File("output.png"))
println()
println("Finished")
}
}
|
jesperdj/scalaray
|
src/main/scala/org/jesperdj/scalaray/Main.scala
|
Scala
|
gpl-3.0
| 3,440
|
/*
* Copyright (C) 2017 Vincibean <Andre Bessi>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.vincibean.scala.impatient.chapter10.exercise7
/**
* Construct an example where a class needs to be recompiled when one of the
* mixins changes. Start with class SavingsAccount extends Account with ConsoleLogger.
* Put each class and trait in a separate source file. Add a field to Account. In Main
* (also in a separate source file), construct a SavingsAccount and access the new
* field. Recompile all files except for SavingsAccount and verify that the program works.
* Now add a field to ConsoleLogger and access it in Main. Again, recompile
* all files except for SavingsAccount. What happens? Why?
*/
class Account {
var balance = 0.0
val bank = "Scala Bank"
}
|
Vincibean/ScalaForTheImpatient-Solutions
|
src/main/scala/org/vincibean/scala/impatient/chapter10/exercise7/Account.scala
|
Scala
|
gpl-3.0
| 1,414
|
import java.awt.event.InputEvent
import java.awt.event.KeyEvent.{VK_META => VK_COMMAND, _}
import java.awt.Robot
trait Robotic {
val robot: Robot
}
trait Commands {
this: Robotic =>
def switchWindow = {
robot keyPress VK_COMMAND
robot keyPress VK_TAB
robot keyRelease VK_COMMAND
robot keyRelease VK_TAB
Thread sleep 100
Command
}
case class Command() {
def -(c: Command): Command = this
}
def click = {
robot.mousePress(InputEvent.BUTTON1_MASK)
robot.mouseRelease(InputEvent.BUTTON1_MASK)
Command()
}
def doubleClick = click - click
def tripleClick = click - click - click
def press(key: Int) = {
robot keyPress key
robot keyRelease key
}
def cut = {
robot keyPress VK_COMMAND
press(VK_X)
robot keyRelease VK_COMMAND
Command()
}
def paste = {
robot keyPress VK_COMMAND
robot keyPress VK_V
robot keyRelease VK_V
robot keyRelease VK_COMMAND
Command()
}
def copy = {
robot keyPress VK_COMMAND
robot keyPress VK_C
robot keyRelease VK_C
robot keyRelease VK_COMMAND
Command()
}
}
|
siderakis/dragon-dance
|
src/main/scala/Commands.scala
|
Scala
|
apache-2.0
| 1,128
|
/*
* Copyright 2012 Atlassian PTY LTD
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kadai
package config
import java.io.{ File, Serializable }
import scala.annotation.implicitNotFound
import scala.collection.JavaConverters.{ asScalaBufferConverter, asScalaSetConverter, mapAsJavaMapConverter }
import scala.util.control.{ Exception, NonFatal }
import org.joda.time.DateTime
import com.typesafe.config.{ Config, ConfigFactory, ConfigObject, ConfigParseOptions, ConfigResolveOptions }
import log.Logging
import Logging._
import scalaz.syntax.id._
/**
* Simple user-friendly wrapper around Config.
*
* Provides a single type-safe apply method for getting values out from the configuration.
*
* Usage:
* {{{
* val config = Configuration.load("filename.conf").get[Configuration]("objectName")
* val intThing = config[Int]("intPropertyName")
* val strThing = config[String]("stringPropertyName")
* }}}
* Note that formatting or other problems will throw exceptions.
*
* You can also optionally find correct config items or validate and check their correctness (with Either):
* {{{
* val intOption:Option[Int] = config.option[Int]("intPropertyName")
* val strThing: Either[Throwable, String] = config.valid[String]("stringPropertyName")
* }}}
*
* The Accessor type-classes implement the glue to get the specific type configuration item.
*
* Details on the underlying configuration file specification can be found here:
* https://github.com/typesafehub/config/blob/master/HOCON.md
*/
trait ConfigurationInstances {
val failIfMissing =
ConfigParseOptions.defaults.setAllowMissing(false)
def apply(c: Config) =
new Configuration(c)
def from(s: String) =
Configuration(ConfigFactory.parseString(s).resolve)
def from(f: File) =
Configuration {
ConfigFactory.defaultOverrides.withFallback(ConfigFactory.parseFile(f, failIfMissing).resolve)
}
/** The path is always relative and on the classpath. */
def load(path: String) =
Configuration {
ConfigFactory.defaultOverrides.withFallback {
ConfigFactory.load(path, failIfMissing, ConfigResolveOptions.defaults) // by default resolves
}
}
/** The type-class that is used to extract a config item of a particular type. */
@implicitNotFound(msg = "Cannot find Configuration.Accessor for type ${A} – it is needed to extract config text and instantiate an element of that type")
trait Accessor[A] extends ((Config, String) => A)
object Accessor {
def apply[A: Accessor]: Accessor[A] =
implicitly[Accessor[A]]
}
//
// standard type-class instances
//
implicit object IntAccessor extends Accessor[Int] {
def apply(c: Config, s: String) = c getInt s
}
implicit object StringAccessor extends Accessor[String] {
def apply(c: Config, s: String) = c getString s
}
implicit object SeqStringAccessor extends Accessor[Seq[String]] {
def apply(c: Config, s: String) = c getString s split ","
}
implicit object ListStringAccessor extends Accessor[List[String]] {
def apply(c: Config, s: String) = c.getStringList(s).asScala.toList
}
implicit object LongAccessor extends Accessor[Long] {
def apply(c: Config, s: String) = c getLong s
}
implicit object BooleanAccessor extends Accessor[Boolean] {
def apply(c: Config, s: String) = c getBoolean s
}
import org.joda.time.DateTime
implicit object DateTimeAccessor extends Accessor[DateTime] {
def apply(c: Config, s: String) = new DateTime(c getMilliseconds s)
}
implicit object FileAccessor extends Accessor[File] {
def apply(c: Config, s: String) = new File(c getString s)
}
implicit object ConfigAccessor extends Accessor[Config] {
def apply(c: Config, s: String) = c getConfig s
}
implicit object ConfigurationAccessor extends Accessor[Configuration] {
def apply(c: Config, s: String) = Configuration(c getConfig s)
}
implicit object ConfigObjectAccessor extends Accessor[ConfigObject] {
def apply(c: Config, s: String) = c getObject s
}
implicit def ClassAccessor[T: Manifest] = new Accessor[Class[T]] {
def apply(c: Config, s: String): Class[T] =
Class.forName(c getString s).asInstanceOf[Class[T]] <| { cls =>
manifest[T].runtimeClass |> { expect =>
if (!(expect isAssignableFrom cls))
throw new ClassCastException("%s must be a subclass of %s (found [%s])".format(s, expect, cls))
}
}
}
implicit def ConfigReaderAccessor[A: ConfigReader]: Accessor[A] = new Accessor[A] {
def apply(c: Config, s: String) =
Configuration(c)[Configuration](s) |> { c => ConfigReader.run(c) }
}
def asString(c: Configuration): String = c.toConfig.root.render
// utils
private[config] val catcher = Exception.nonFatalCatch
}
object Configuration extends ConfigurationInstances {
private[kadai] class SerializationProxy(s: String) extends Serializable {
def readResolve: Object = Configuration from s
}
}
class Configuration protected[config] (val c: Config) extends Logging with Serializable {
import Configuration._
import Logging._
def apply[A: Accessor](s: String): A =
implicitly[Accessor[A]].apply(c, s)
def get[A: Accessor](s: String): A =
implicitly[Accessor[A]].apply(c, s)
def option[A: Accessor](s: String): Option[A] =
catcher.opt {
implicitly[Accessor[A]].apply(c, s)
}
def valid[A: Accessor](s: String): Either[Throwable, A] =
catcher.either {
implicitly[Accessor[A]].apply(c, s)
}
def keys(s: String): Iterable[String] =
implicitly[Accessor[ConfigObject]].apply(c, s).keySet.asScala
private[kadai] def config(s: String): Config =
apply[Config](s)
def toConfig: Config = c
def overriding(as: (String, String)*) =
Configuration {
import collection.JavaConverters._
ConfigFactory.parseMap(as.toMap.asJava).withFallback(c)
}
def withFallback(other: Configuration) = Configuration(c withFallback other.c)
override def toString = c.root.toString
override def equals(a: Any) =
if (!a.isInstanceOf[Configuration]) false
else c == a.asInstanceOf[Configuration].toConfig
override def hashCode = c.hashCode
private def access[A](s: String)(implicit accessor: Accessor[A]): A =
try accessor.apply(c, s)
catch {
case NonFatal(e) =>
error(c.toString + "")
throw e
}
private def writeReplace: Object = new SerializationProxy(asString(this))
}
|
simpleenergy/kadai
|
config/src/main/scala/kadai/config/Configuration.scala
|
Scala
|
apache-2.0
| 6,979
|
package org.elasticmq
import java.math.MathContext
import org.scalatest.EitherValues
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import scala.util.Random
class LimitsTest extends AnyWordSpec with Matchers with EitherValues {
"Validation of batch size limits in strict mode" should {
"pass if the size of the batch is less than the limit (10)" in {
Limits.verifyBatchSize(5, StrictSQSLimits) shouldBe Right(())
}
"fail if the size of the batch is greater than the limit (10)" in {
val error = Limits.verifyBatchSize(15, StrictSQSLimits).left.value
error shouldBe "AWS.SimpleQueueService.TooManyEntriesInBatchRequest"
}
}
"Validation of batch size limits in relaxed mode" should {
"always pass" in {
Limits.verifyBatchSize(-5, RelaxedSQSLimits) shouldBe Right(())
Limits.verifyBatchSize(5, RelaxedSQSLimits) shouldBe Right(())
Limits.verifyBatchSize(15, RelaxedSQSLimits) shouldBe Right(())
}
}
"Validation of number of messages from parameters in strict mode" should {
"pass if the number is between the limits (1-10)" in {
Limits.verifyNumberOfMessagesFromParameters(1, StrictSQSLimits) shouldBe Right(())
Limits.verifyNumberOfMessagesFromParameters(5, StrictSQSLimits) shouldBe Right(())
Limits.verifyNumberOfMessagesFromParameters(10, StrictSQSLimits) shouldBe Right(())
}
"fail the validation if the number is less than the lower bound" in {
val error = Limits.verifyNumberOfMessagesFromParameters(0, StrictSQSLimits).left.value
error shouldBe "ReadCountOutOfRange"
}
"fail the validation if the number is greater than the upper bound" in {
val error = Limits.verifyNumberOfMessagesFromParameters(15, StrictSQSLimits).left.value
error shouldBe "ReadCountOutOfRange"
}
}
"Validation of number of messages from parameters in relaxed mode" should {
"always pass the validation" in {
Limits.verifyNumberOfMessagesFromParameters(-5, RelaxedSQSLimits) shouldBe Right(())
Limits.verifyNumberOfMessagesFromParameters(0, RelaxedSQSLimits) shouldBe Right(())
Limits.verifyNumberOfMessagesFromParameters(5, RelaxedSQSLimits) shouldBe Right(())
Limits.verifyNumberOfMessagesFromParameters(15, RelaxedSQSLimits) shouldBe Right(())
}
}
"Validate number of message attributes" should {
val attributesLimit = 10
"fail on exceeded sqs limit in strict mode" in {
Limits.verifyMessageAttributesNumber(attributesLimit, StrictSQSLimits) shouldBe Right(())
Limits.verifyMessageAttributesNumber(attributesLimit + 1, StrictSQSLimits) shouldBe Left(
"Number of message attributes [11] exceeds the allowed maximum [10]."
)
}
"pass on exceeded sqs limit in relaxed mode" in {
Limits.verifyMessageAttributesNumber(attributesLimit, RelaxedSQSLimits) shouldBe Right(())
Limits.verifyMessageAttributesNumber(attributesLimit + 1, RelaxedSQSLimits) shouldBe Right(())
}
}
"Validation of message string attribute in strict mode" should {
"pass if string attribute contains only allowed characters" in {
val testString = List(0x9, 0xa, 0xd, 0x21, 0xe005, 0x10efff).map(_.toChar).mkString
Limits.verifyMessageStringAttribute("attribute1", testString, StrictSQSLimits) shouldBe Right(())
}
"fail if the string is empty" in {
Limits.verifyMessageStringAttribute("attribute1", "", StrictSQSLimits) shouldBe Left(
"Attribute 'attribute1' must contain a non-empty value of type 'String'"
)
}
"fail if string contains any not allowed character" in {
val testString = List(0x9, 0xa, 0xd, 0x21, 0xe005, 0x19, 0x10efff).map(_.toChar).mkString
val error = Limits.verifyMessageStringAttribute("attribute1", testString, StrictSQSLimits).left.value
error shouldBe "InvalidMessageContents"
}
}
"Validation of message string attribute in relaxed mode" should {
"pass if string attribute contains only allowed characters" in {
val testString = List(0x9, 0xa, 0xd, 0x21, 0xe005, 0x10efff).map(_.toChar).mkString
Limits.verifyMessageStringAttribute("attribute1", testString, RelaxedSQSLimits) shouldBe Right(())
}
"pass if the string is empty" in {
Limits.verifyMessageStringAttribute("attribute1", "", RelaxedSQSLimits) shouldBe Right(())
}
"pass if string contains any not allowed character" in {
val testString = List(0x9, 0xa, 0xd, 0x21, 0xe005, 0x19, 0x10efff).map(_.toChar).mkString
Limits.verifyMessageStringAttribute("attribute1", testString, RelaxedSQSLimits) shouldBe Right(())
}
}
"Validation of message number attribute in strict mode" should {
"pass if the number is between the limits (-10^128 - 10^126)" in {
Limits.verifyMessageNumberAttribute(
BigDecimal(10).pow(126).toString(),
"numAttribute",
StrictSQSLimits
) shouldBe Right(())
Limits.verifyMessageNumberAttribute(
(-BigDecimal(10).pow(128)).toString(),
"numAttribute",
StrictSQSLimits
) shouldBe Right(())
Limits.verifyMessageNumberAttribute(BigDecimal(0).toString(), "numAttribute", StrictSQSLimits) shouldBe Right(())
Limits.verifyMessageNumberAttribute(
BigDecimal(Random.nextDouble()).toString(),
"numAttribute",
StrictSQSLimits
) shouldBe Right(
()
)
}
"fail if the number is an empty string" in {
val emptyStringNumber = ""
val error = Limits.verifyMessageNumberAttribute(emptyStringNumber, "numAttribute", StrictSQSLimits)
error shouldBe Left("Attribute 'numAttribute' must contain a non-empty value of type 'Number'")
}
"fail if the number is bigger than the upper bound" in {
val overUpperBound = BigDecimal(10, MathContext.UNLIMITED).pow(126) + BigDecimal(0.1)
val error = Limits.verifyMessageNumberAttribute(overUpperBound.toString, "numAttribute", StrictSQSLimits)
error shouldBe Left(s"Number attribute value $overUpperBound should be in range (-10**128..10**126)")
}
"fail if the number is below the lower bound" in {
val belowLowerBound = -BigDecimal(10, MathContext.UNLIMITED).pow(128) - BigDecimal(0.1)
val error =
Limits.verifyMessageNumberAttribute(belowLowerBound.toString, "numAttribute", StrictSQSLimits)
error shouldBe Left(s"Number attribute value $belowLowerBound should be in range (-10**128..10**126)")
}
"fail if the number can't be parsed" in {
val error = Limits.verifyMessageNumberAttribute("12312312a", "numAttribute", StrictSQSLimits).left.value
error shouldBe s"Number attribute value 12312312a should be in range (-10**128..10**126)"
}
}
"Validation of message number attribute in relaxed mode" should {
"always pass the validation" in {
val belowLowerBound = -BigDecimal(10).pow(128) - BigDecimal(0.1)
val overUpperBound = BigDecimal(10).pow(126) + BigDecimal(0.1)
Limits.verifyMessageNumberAttribute(belowLowerBound.toString, "numAttribute", RelaxedSQSLimits) shouldBe Right(())
Limits.verifyMessageNumberAttribute(
BigDecimal(10).pow(126).toString(),
"numAttribute",
RelaxedSQSLimits
) shouldBe Right(())
Limits.verifyMessageNumberAttribute(
(-BigDecimal(10).pow(128)).toString(),
"numAttribute",
RelaxedSQSLimits
) shouldBe Right(())
Limits.verifyMessageNumberAttribute(overUpperBound.toString, "numAttribute", RelaxedSQSLimits) shouldBe Right(())
Limits.verifyMessageNumberAttribute("12312312a", "numAttribute", RelaxedSQSLimits) shouldBe Right(())
}
}
"Validation of message wait time in strict mode" should {
"pass if the wait time is between the limit range (0-20)" in {
Limits.verifyMessageWaitTime(0, StrictSQSLimits) shouldBe Right(())
Limits.verifyMessageWaitTime(13, StrictSQSLimits) shouldBe Right(())
Limits.verifyMessageWaitTime(20, StrictSQSLimits) shouldBe Right(())
}
"fail if the number is below the lower bound" in {
val error = Limits.verifyMessageWaitTime(-1, StrictSQSLimits).left.value
error shouldBe "InvalidParameterValue"
}
"fail if the number is above the upper bound" in {
val error = Limits.verifyMessageWaitTime(21, StrictSQSLimits).left.value
error shouldBe "InvalidParameterValue"
}
}
"Validation of message wait time in relaxed mode" should {
"pass if the wait time is bigger than 0" in {
Limits.verifyMessageWaitTime(0, RelaxedSQSLimits) shouldBe Right(())
Limits.verifyMessageWaitTime(13, RelaxedSQSLimits) shouldBe Right(())
Limits.verifyMessageWaitTime(20, RelaxedSQSLimits) shouldBe Right(())
Limits.verifyMessageWaitTime(25, RelaxedSQSLimits) shouldBe Right(())
}
"fail if the wait time is lower than 0" in {
val error = Limits.verifyMessageWaitTime(-1, StrictSQSLimits).left.value
error shouldBe "InvalidParameterValue"
}
}
"Validation of message length in strict mode" should {
"pass if the length is smaller than the limit (262144)" in {
Limits.verifyMessageLength(-5, StrictSQSLimits) shouldBe Right(())
Limits.verifyMessageLength(0, StrictSQSLimits) shouldBe Right(())
Limits.verifyMessageLength(100, StrictSQSLimits) shouldBe Right(())
Limits.verifyMessageLength(262144, StrictSQSLimits) shouldBe Right(())
}
"fail if the length is bigger than the limit" in {
val error = Limits.verifyMessageLength(300000, StrictSQSLimits).left.value
error shouldBe "MessageTooLong"
}
}
"Validation of message length in relaxed mode" should {
"always pass" in {
Limits.verifyMessageLength(-5, RelaxedSQSLimits) shouldBe Right(())
Limits.verifyMessageLength(0, RelaxedSQSLimits) shouldBe Right(())
Limits.verifyMessageLength(100, RelaxedSQSLimits) shouldBe Right(())
Limits.verifyMessageLength(262143, RelaxedSQSLimits) shouldBe Right(())
Limits.verifyMessageLength(262144, RelaxedSQSLimits) shouldBe Right(())
Limits.verifyMessageLength(300000, RelaxedSQSLimits) shouldBe Right(())
}
}
"Validation of queue name in strict mode" should {
"pass if queue name is made of alphanumeric characters and has length smaller than 80" in {
Limits.verifyQueueName("abc123.-_", isFifo = false, StrictSQSLimits) shouldBe Right(())
}
"fail if queue name contains invalid characters" in {
val error = Limits.verifyQueueName("invalid#characters&.fifo", isFifo = true, StrictSQSLimits).left.value
error shouldBe "InvalidParameterValue"
}
"fail if normal queue name exceeds 80 characters limit cap" in {
val error = Limits
.verifyQueueName(
"over80CharactersOver80CharactersOver80CharactersOver80CharactersOver80Characterss",
isFifo = false,
StrictSQSLimits
)
.left
.value
error shouldBe "InvalidParameterValue"
}
}
"Validation of queue name in relaxed mode" should {
"pass when queue name is made of alphanumeric characters" in {
Limits.verifyQueueName("abc123.-_", isFifo = false, RelaxedSQSLimits) shouldBe Right(())
}
"pass when normal queue name exceeds 80 characters limit cap" in {
Limits.verifyQueueName(
"over80CharactersOver80CharactersOver80CharactersOver80CharactersOver80Characterss",
isFifo = false,
RelaxedSQSLimits
) shouldBe Right(())
}
"fail if queue name contains invalid characters" in {
val error =
Limits.verifyQueueName("invalid#characters&.fifo", isFifo = true, RelaxedSQSLimits).left.value
error shouldBe "InvalidParameterValue"
}
}
"Validation of message body in strict mode" should {
"pass if string attribute contains only allowed characters" in {
val testString = List(0x9, 0xa, 0xd, 0x21, 0xe005, 0x10efff).map(_.toChar).mkString
Limits.verifyMessageBody(testString, StrictSQSLimits) shouldBe Right(())
}
"fail if the string is empty" in {
Limits.verifyMessageBody("", StrictSQSLimits) shouldBe Left(
"The request must contain the parameter MessageBody."
)
}
"fail if string contains any not allowed character" in {
val testString = List(0x9, 0xa, 0xd, 0x21, 0xe005, 0x19, 0x10efff).map(_.toChar).mkString
val error = Limits.verifyMessageBody(testString, StrictSQSLimits).left.value
error shouldBe "InvalidMessageContents"
}
}
"Validation of message body in relaxed mode" should {
"pass if string attribute contains only allowed characters" in {
val testString = List(0x9, 0xa, 0xd, 0x21, 0xe005, 0x10efff).map(_.toChar).mkString
Limits.verifyMessageBody(testString, RelaxedSQSLimits) shouldBe Right(())
}
"pass if the string is empty" in {
Limits.verifyMessageBody("", RelaxedSQSLimits) shouldBe Right(())
}
"pass if string contains any not allowed character" in {
val testString = List(0x9, 0xa, 0xd, 0x21, 0xe005, 0x19, 0x10efff).map(_.toChar).mkString
Limits.verifyMessageBody(testString, RelaxedSQSLimits) shouldBe Right(())
}
}
}
|
adamw/elasticmq
|
core/src/test/scala/org/elasticmq/LimitsTest.scala
|
Scala
|
apache-2.0
| 13,233
|
package org.jetbrains.plugins.scala.codeInspection.booleans
import com.intellij.codeInspection.{ProblemHighlightType, ProblemsHolder}
import com.intellij.openapi.project.Project
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.codeInspection.booleans.SimplifyBooleanUtil.isOfBooleanType
import org.jetbrains.plugins.scala.codeInspection.{AbstractFixOnPsiElement, AbstractInspection}
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.base.ScBooleanLiteral
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns._
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createExpressionFromText
import scala.language.implicitConversions
class SimplifyBooleanMatchInspection extends AbstractInspection("Trivial match can be simplified") {
override protected def actionFor(implicit holder: ProblemsHolder): PartialFunction[PsiElement, Any] = {
case stmt: ScMatchStmt if stmt.isValid && SimpleBooleanMatchUtil.isSimpleBooleanMatchStmt(stmt) =>
val toHighlight = Option(stmt.findFirstChildByType(ScalaTokenTypes.kMATCH)).getOrElse(stmt)
holder.registerProblem(toHighlight, getDisplayName, ProblemHighlightType.GENERIC_ERROR_OR_WARNING, new SimplifyBooleanMatchToIfStmtQuickFix(stmt))
case _ =>
}
}
class SimplifyBooleanMatchToIfStmtQuickFix(stmt: ScMatchStmt) extends AbstractFixOnPsiElement("Simplify match to if statement", stmt) {
override protected def doApplyFix(scStmt: ScMatchStmt)
(implicit project: Project): Unit = {
if (SimpleBooleanMatchUtil.isSimpleBooleanMatchStmt(scStmt)) {
scStmt.replaceExpression(SimpleBooleanMatchUtil.simplifyMatchStmt(scStmt), removeParenthesis = false)
}
}
}
object SimpleBooleanMatchUtil {
def isSimpleBooleanMatchStmt(stmt: ScMatchStmt): Boolean = {
if (stmt.expr.isEmpty || !isOfBooleanType(stmt.expr.get)) return false
if (!stmt.caseClauses.forall(_.expr.isDefined)) return false
stmt.caseClauses.size match {
case 1 => getFirstBooleanClauseAndValue(stmt).isDefined
case 2 => isValidClauses(stmt)
case _ => false
}
}
def simplifyMatchStmt(stmt: ScMatchStmt): ScExpression = {
if (!isSimpleBooleanMatchStmt(stmt) || stmt.expr.isEmpty) return stmt
stmt.caseClauses.size match {
case 1 => simplifySingleBranchedStmt(stmt)
case 2 => simplifyDualBranchedStmt(stmt)
case _ => stmt
}
}
private def simplifySingleBranchedStmt(stmt: ScMatchStmt): ScExpression = {
getFirstBooleanClauseAndValue(stmt) match {
case None => stmt
case Some((clause, value)) =>
val exprText = if (value) stmt.expr.get.getText else "!" + getParenthesisedText(stmt.expr.get)
createExpressionFromText(s"if ($exprText){ ${getTextWithoutBraces(clause)} }")(stmt.projectContext)
}
}
def simplifyDualBranchedStmt(stmt: ScMatchStmt): ScExpression = {
getPartitionedClauses(stmt) match {
case Some((trueClause, falseClause)) if trueClause.expr.nonEmpty && falseClause.expr.nonEmpty =>
val exprText = stmt.expr.get.getText
createExpressionFromText(
s"""
|if ($exprText) {
|${getTextWithoutBraces(trueClause)}
|} else {
|${getTextWithoutBraces(falseClause)}
|}
""".stripMargin)(stmt.projectContext)
case _ => stmt
}
}
private def getPartitionedClauses(stmt: ScMatchStmt): Option[(ScCaseClause, ScCaseClause)] = {
if (isSimpleClauses(stmt)) {
val parts = stmt.caseClauses.partition {
case ScCaseClause((Some(p: ScPattern), _, _)) => booleanConst(p).get
}
parts match {
case (Seq(trueClause), Seq(falseClause)) => Some(trueClause, falseClause)
case _ => None
}
} else {
(getFirstBooleanClauseAndValue(stmt), getFirstWildcardClause(stmt)) match {
case (Some((booleanClause, value)), Some(wildcardClause)) =>
if (value) Some((booleanClause, wildcardClause))
else Some((wildcardClause, booleanClause))
case _ => None
}
}
}
private def getFirstBooleanClauseAndValue(stmt: ScMatchStmt): Option[(ScCaseClause, Boolean)] = stmt.caseClauses.collectFirst {
case clause@ScCaseClause(Some(p: ScPattern), _, _) if booleanConst(p).isDefined => (clause, booleanConst(p).get)
}
private def getFirstWildcardClause(stmt: ScMatchStmt): Option[ScCaseClause] = stmt.caseClauses.collectFirst {
case clause@ScCaseClause(Some(_: ScWildcardPattern), _, _) => clause
}
private def isSimpleClauses(stmt: ScMatchStmt): Boolean = stmt.caseClauses.forall {
case ScCaseClause(Some(p: ScPattern), None, _) => booleanConst(p).isDefined
case _ => false
}
private val BracedBlockRegex = """(?ms)\\{(.+)\\}""".r
private def getTextWithoutBraces(clause: ScCaseClause): String = clause.expr match {
case Some(block: ScBlock) =>
block.getText match {
case BracedBlockRegex(code) => code.trim
case _ => block.getText
}
case Some(t) => t.getText
case _ => clause.getText
}
private def getParenthesisedText(expr: ScExpression): String = {
expr match {
case e: ScInfixExpr => e match {
case ScParenthesisedExpr(expr: ScExpression) => expr.getText
case _ => s"(${e.getText})"
}
case _ => expr.getText
}
}
private def isValidClauses(stmt: ScMatchStmt): Boolean = getPartitionedClauses(stmt).nonEmpty
private def booleanConst(expr: ScPattern): Option[Boolean] = expr match {
case pattern: ScLiteralPattern => pattern.getLiteral match {
case ScBooleanLiteral(value) => Some(value)
case _ => None
}
case _ => None
}
}
|
jastice/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/codeInspection/booleans/SimplifyBooleanMatchInspection.scala
|
Scala
|
apache-2.0
| 5,819
|
/**
* Copyright (C) 2014 Kaj Magnus Lindberg (born 1979)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package debiki
import com.debiki.core._
import com.debiki.core.Prelude._
import play.api.libs.json._
case class AnySetting(
val name: String,
val assignedValue: Option[Any],
val default: Any,
val section: Option[SettingsTarget]) {
def value: Any = assignedValue getOrElse default
def valueIsTrue = value == "T" || value == true
}
/** If rawSettings is e.g. List(X, Y, Z), then settings in
* X override settings in Y, Z, and settings in Y override Z,
* Example: X is a sub forum, Y is a forum and Z is the website settings.
*/
case class SettingsChain(rawSettings: Seq[RawSettings]) {
def deriveSetting(name: String, default: Any): AnySetting = {
var anyAssignedValue: Option[Any] = None
var anySection: Option[SettingsTarget] = None
var i = 0
while (i < rawSettings.size) {
val settings = rawSettings(i)
i += 1
val anyValue: Option[Any] = settings.valuesBySettingName.get(name)
anyValue foreach { value =>
anyAssignedValue = Some(value)
anySection = Some(settings.target)
i = 999999 // break loop, value found
}
}
new AnySetting(name, anyAssignedValue, default = default, anySection)
}
/** Simply remaps Some("T"/"F") to Some(true/false).
*/
def deriveBoolSetting(name: String, default: Boolean): AnySetting = {
val anySetting = deriveSetting(name, default)
val boolSetting = anySetting.assignedValue match {
case None => anySetting
case Some(true) => anySetting
case Some(false) => anySetting
case Some("T") => anySetting.copy(assignedValue = Some(true))
case Some("F") => anySetting.copy(assignedValue = Some(false))
case Some(bad) =>
assErr("DwE77GHF4", s"Bad bool setting value: `$bad', for setting: `$name'")
}
boolSetting
}
}
case class Settings(settingsChain: SettingsChain) {
val title = derive("title", "(no title)")
val description = derive("description", "(no description)")
val horizontalComments = derive("horizontalComments", false)
val logoUrlOrHtml = derive("logoUrlOrHtml", """<span>Home</span>""")
val companyDomain = derive("companyDomain", "www.example.com")
val companyFullName = derive("companyFullName", "Unnamed Company Full Name")
val companyShortName = derive("companyShortName", "Unnamed Company")
val googleUniversalAnalyticsTrackingId = derive("googleUniversalAnalyticsTrackingId", "")
private def derive(settingName: String, default: Any) =
settingsChain.deriveSetting(settingName, default)
def toJson =
Json.obj(
"title" -> jsonFor(title),
"description" -> jsonFor(description),
"horizontalComments" -> jsonFor(horizontalComments),
"logoUrlOrHtml" -> jsonFor(logoUrlOrHtml),
"companyDomain" -> jsonFor(companyDomain),
"companyFullName" -> jsonFor(companyFullName),
"companyShortName" -> jsonFor(companyShortName),
"googleUniversalAnalyticsTrackingId" -> jsonFor(googleUniversalAnalyticsTrackingId))
private def jsonFor(setting: AnySetting): JsObject = {
var jsObject = Json.obj("defaultValue" -> anyToJsValue(setting.default))
setting.assignedValue foreach { value =>
jsObject += "anyAssignedValue" -> anyToJsValue(value)
}
jsObject
}
private def anyToJsValue(value: Any): JsValue = value match {
case x: String => JsString(x)
case x: Int => JsNumber(x)
case x: Long => JsNumber(x)
case x: Float => JsNumber(x)
case x: Double => JsNumber(x)
case x: Boolean => JsBoolean(x)
}
}
|
debiki/debiki-server-old
|
app/debiki/settings.scala
|
Scala
|
agpl-3.0
| 4,272
|
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.boot.layer
import com.ibm.spark.boot.KernelBootstrap
import com.ibm.spark.interpreter.Interpreter
import com.ibm.spark.kernel.protocol.v5.KMBuilder
import com.ibm.spark.kernel.protocol.v5.kernel.ActorLoader
import com.ibm.spark.utils.LogLike
import com.typesafe.config.Config
import org.apache.spark.{SparkConf, SparkContext}
import org.mockito.ArgumentCaptor
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{BeforeAndAfter, FunSpec, Matchers}
class StandardComponentInitializationSpec extends FunSpec with Matchers
with MockitoSugar with BeforeAndAfter
{
private val TestAppName = "test app"
private var mockConfig: Config = _
private var mockActorLoader: ActorLoader = _
private var mockSparkContext: SparkContext = _
private var mockInterpreter: Interpreter = _
private var spyComponentInitialization: StandardComponentInitialization = _
private class TestComponentInitialization
extends StandardComponentInitialization with LogLike
before {
mockConfig = mock[Config]
mockActorLoader = mock[ActorLoader]
mockSparkContext = mock[SparkContext]
mockInterpreter = mock[Interpreter]
spyComponentInitialization = spy(new TestComponentInitialization())
}
describe("StandardComponentInitialization") {
describe("when spark.master is set in config") {
it("should set spark.master in SparkConf") {
val expected = "some value"
doReturn(expected).when(mockConfig).getString("spark.master")
doReturn("").when(mockConfig).getString("spark_configuration")
// Stub out other helper methods to avoid long init process and to
// avoid failure when creating SparkContext
doReturn(mockSparkContext).when(spyComponentInitialization)
.reallyInitializeSparkContext(
any[Config], any[ActorLoader], any[KMBuilder], any[SparkConf])
doNothing().when(spyComponentInitialization)
.updateInterpreterWithSparkContext(
any[Config], any[SparkContext], any[Interpreter])
// Provide stub for interpreter classServerURI since also executed
doReturn("").when(mockInterpreter).classServerURI
val sparkContext = spyComponentInitialization.initializeSparkContext(
mockConfig, TestAppName, mockActorLoader, mockInterpreter)
val sparkConf = {
val sparkConfCaptor = ArgumentCaptor.forClass(classOf[SparkConf])
verify(spyComponentInitialization).reallyInitializeSparkContext(
any[Config], any[ActorLoader], any[KMBuilder],
sparkConfCaptor.capture()
)
sparkConfCaptor.getValue
}
sparkConf.get("spark.master") should be (expected)
}
it("should not add ourselves as a jar if spark.master is not local") {
doReturn("local[*]").when(mockConfig).getString("spark.master")
spyComponentInitialization.updateInterpreterWithSparkContext(
mockConfig, mockSparkContext, mockInterpreter)
verify(mockSparkContext, never()).addJar(anyString())
}
it("should add ourselves as a jar if spark.master is not local") {
doReturn("notlocal").when(mockConfig).getString("spark.master")
// TODO: This is going to be outdated when we determine a way to
// re-include all jars
val expected =
com.ibm.spark.SparkKernel.getClass.getProtectionDomain
.getCodeSource.getLocation.getPath
spyComponentInitialization.updateInterpreterWithSparkContext(
mockConfig, mockSparkContext, mockInterpreter)
verify(mockSparkContext).addJar(expected)
}
}
}
}
|
Drooids/spark-kernel
|
kernel/src/test/scala/com/ibm/spark/boot/layer/StandardComponentInitializationSpec.scala
|
Scala
|
apache-2.0
| 4,311
|
/*
* Copyright 2008-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb {
package jpademo {
package snippet {
import _root_.java.text.{ParseException,SimpleDateFormat}
import _root_.scala.xml.{NodeSeq,Text}
import _root_.net.liftweb.http.{RequestVar,S,SHtml}
import _root_.net.liftweb.util.{Helpers}
import _root_.net.liftweb.common.{Box,Empty,Full,Loggable}
import S._
import Helpers._
import _root_.net.liftweb.jpademo.model._
import Model._
import _root_.javax.persistence.{EntityExistsException,PersistenceException}
// Make an object so that other pages can access (ie Authors)
object BookOps {
// Object to hold search results
object resultVar extends RequestVar[List[Book]](Nil)
}
class BookOps extends Loggable {
val formatter = new _root_.java.text.SimpleDateFormat("yyyyMMdd")
def list (xhtml : NodeSeq) : NodeSeq = {
val books = Model.createNamedQuery[Book]("findAllBooks").getResultList()
books.flatMap(book =>
bind("book", xhtml,
"title" -> Text(book.title),
"published" -> Text(formatter.format(book.published)),
"genre" -> Text(if (book.genre != null) book.genre.toString else ""),
"author" -> Text(book.author.name),
"edit" -> SHtml.link("add.html", () => bookVar(book), Text(?("Edit")))))
}
// Set up a requestVar to track the book object for edits and adds
object bookVar extends RequestVar(new Book())
def book = bookVar.is
// Utility methods for processing a submitted form
def is_valid_Book_? (toCheck : Book) : Boolean =
List((if (toCheck.title.length == 0) { S.error("You must provide a title"); false } else true),
(if (toCheck.published == null) { S.error("You must provide a publish date"); false } else true),
(if (toCheck.genre == null) { S.error("You must select a genre"); false } else true),
(if (toCheck.author == null) { S.error("You must select an author"); false } else true)).forall(_ == true)
def setDate (input : String, toSet : Book) {
try {
toSet.published = formatter.parse(input)
} catch {
case pe : ParseException => S.error("Error parsing the date")
}
}
// The add snippet method
def add (xhtml : NodeSeq) : NodeSeq = {
def doAdd () =
if (is_valid_Book_?(book)) {
try {
Model.mergeAndFlush(book)
redirectTo("list.html")
} catch {
case ee : EntityExistsException => error("That book already exists.")
case pe : PersistenceException => error("Error adding book"); logger.error("Book add failed", pe)
}
}
// Hold a val here so that the closure holds it when we re-enter this method
val current = book
val authors = Model.createNamedQuery[Author]("findAllAuthors").getResultList()
val choices = authors.map(author => (author.id.toString -> author.name)).toList
val default = if (book.author != null) { Full(book.author.id.toString) } else { Empty }
bind("book", xhtml,
"id" -> SHtml.hidden(() => bookVar(current)),
"title" -> SHtml.text(book.title, book.title = _),
"published" -> SHtml.text(formatter.format(book.published), setDate(_, book)) % ("id" -> "published"),
"genre" -> SHtml.select(Genre.getNameDescriptionList, (Box.legacyNullTest(book.genre).map(_.toString) or Full("")), choice => book.genre = Genre.valueOf(choice).getOrElse(null)),
"author" -> SHtml.select(choices, default, {authId : String => book.author = Model.getReference(classOf[Author], authId.toLong)}),
"save" -> SHtml.submit(?("Save"), doAdd))
}
def searchResults (xhtml : NodeSeq) : NodeSeq = BookOps.resultVar.is.flatMap(result =>
bind("result", xhtml, "title" -> Text(result.title), "author" -> Text(result.author.name)))
def search (xhtml : NodeSeq) : NodeSeq = {
var title = ""
def doSearch () = {
BookOps.resultVar(Model.createNamedQuery[Book]("findBooksByTitle", "title" -> ("%" + title.toLowerCase + "%")).getResultList().toList)
}
bind("search", xhtml,
"title" -> SHtml.text(title, x => title = x),
"run" -> SHtml.submit(?("Search"), doSearch _))
}
}
}
}
}
|
wsaccaco/lift
|
examples/JPADemo/JPADemo-web/src/main/scala/net/liftweb/jpademo/snippet/Books.scala
|
Scala
|
apache-2.0
| 4,570
|
package se.lu.nateko.cp.meta.ingestion
import java.net.URI
import scala.concurrent.Future
import org.eclipse.rdf4j.model.IRI
import org.eclipse.rdf4j.model.ValueFactory
import org.eclipse.rdf4j.rio.helpers.ContextStatementCollector
import org.eclipse.rdf4j.rio.turtle.TurtleParser
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpMethods
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.model.MediaTypes
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.headers
import akka.stream.scaladsl.StreamConverters
import se.lu.nateko.cp.meta.utils.rdf4j.EnrichedJavaUri
import akka.stream.Materializer
import se.lu.nateko.cp.meta.api.CloseableIterator
class RemoteRdfGraphIngester(endpoint: URI, rdfGraph: URI)(implicit system: ActorSystem, m: Materializer) extends Ingester{
import system.dispatcher
override def getStatements(factory: ValueFactory): Ingestion.Statements = {
makeQuery().flatMap(
resp => resp.status match {
case StatusCodes.OK =>
val inputStr = resp.entity.dataBytes.runWith(StreamConverters.asInputStream())
val graphUri: IRI = rdfGraph.toRdf(factory)
val collector = new ContextStatementCollector(factory, graphUri)
val parser = new TurtleParser(factory)
parser.setRDFHandler(collector)
Future{
parser.parse(inputStr, rdfGraph.toString)
import scala.jdk.CollectionConverters.IteratorHasAsScala
new CloseableIterator.Wrap(collector.getStatements.iterator().asScala, () => ())
}
case _ =>
resp.discardEntityBytes()
Future.failed(new Exception(s"Got ${resp.status} from the server"))
}
)
}
private def makeQuery(): Future[HttpResponse] = {
Http().singleRequest(
HttpRequest(
method = HttpMethods.POST,
uri = endpoint.toString,
headers = headers.Accept(MediaTypes.`text/plain`) :: Nil,//expecting RDF Turtle in response
entity = constructQuery
)
)
}
private def constructQuery: String = s"""
|construct {?s ?p ?o}
|from <$rdfGraph>
|where {?s ?p ?o}""".stripMargin
}
|
ICOS-Carbon-Portal/meta
|
src/main/scala/se/lu/nateko/cp/meta/ingestion/RemoteSparqlConstructIngester.scala
|
Scala
|
gpl-3.0
| 2,148
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.benchmarks.module.regular
import com.bwsw.sj.common.engine.StreamingValidator
/**
* @author Pavel Tomskikh
*/
class Validator extends StreamingValidator
|
bwsw/sj-platform
|
contrib/benchmarks/sj-regular-performance-benchmark/src/main/scala/com/bwsw/sj/benchmarks/module/regular/Validator.scala
|
Scala
|
apache-2.0
| 987
|
package lert.elasticsearch.restclient
import java.io.InputStream
import com.fasterxml.jackson.databind.ObjectMapper
import lert.core.config.Source
import org.apache.http.{Header, HttpEntity}
import scala.reflect.ClassTag
trait RestClient {
def performRequest(method: String, endpoint: String, params: Map[String, String], entity: HttpEntity, headers: Header*): Response
def supports(source: Source): Boolean
}
object RestClient {
def apply(source: Source): RestClient = {
Seq(
new AWSRestClient(source),
new ElasticSearchRestClient(source)
).find(_.supports(source))
.getOrElse(throw new IllegalStateException(s"Couldn't find a suitable RestClient for $source"))
}
}
case class Response(body: Array[Byte], status: Int) {
def to[T: ClassTag](implicit objectMapper: ObjectMapper): T = {
objectMapper.readValue(body, implicitly[reflect.ClassTag[T]].runtimeClass.asInstanceOf[Class[T]])
}
}
object Response {
def apply(body: InputStream, status: Int): Response = try {
Response(Stream.continually(body.read).takeWhile(_ != -1).map(_.toByte).toArray, status)
} finally {
body.close()
}
}
|
l3rt/l3rt
|
elasticsearch-input/src/main/scala/lert/elasticsearch/restclient/RestClient.scala
|
Scala
|
apache-2.0
| 1,146
|
package idv.brianhsu.maidroid.plurk.fragment
import idv.brianhsu.maidroid.plurk._
import idv.brianhsu.maidroid.plurk.activity._
import idv.brianhsu.maidroid.plurk.TypedResource._
import idv.brianhsu.maidroid.plurk.adapter._
import idv.brianhsu.maidroid.plurk.dialog._
import idv.brianhsu.maidroid.plurk.util._
import idv.brianhsu.maidroid.plurk.view._
import idv.brianhsu.maidroid.ui.util.AsyncUI._
import idv.brianhsu.maidroid.ui.util.CallbackConversions._
import android.app.Activity
import android.app.AlertDialog
import android.app.ProgressDialog
import android.widget.ArrayAdapter
import android.content.DialogInterface
import android.os.Bundle
import android.net.Uri
import android.support.v4.app.Fragment
import android.view.LayoutInflater
import android.view.ViewGroup
import android.view.View
import android.view.Menu
import android.view.MenuItem
import android.view.MenuInflater
import android.widget.AdapterView
import android.widget.Toast
import android.webkit.WebViewClient
import android.webkit.WebView
import android.support.v4.app.FragmentActivity
import android.support.v7.widget.SearchView
import android.support.v4.view.MenuItemCompat
import org.bone.soplurk.api._
import org.bone.soplurk.api.PlurkAPI._
import org.bone.soplurk.model._
import scala.concurrent._
import scala.util.Try
class FanListFragment extends Fragment {
private implicit def activity = getActivity.asInstanceOf[FragmentActivity]
private def listViewHolder = Option(getView).map(_.findView(TR.userListListView))
private def loadingIndicatorHolder = Option(getView).map(_.findView(TR.userListLoadingIndicator))
private def errorNoticeHolder = Option(getView).map(_.findView(TR.userListErrorNotice))
private def emptyNoticeHolder = Option(getView).map(_.findView(TR.userListEmptyNotice))
private def retryButtonHolder = Option(getView).map(_.findView(TR.moduleErrorNoticeRetryButton))
private def plurkAPI = PlurkAPIHelper.getPlurkAPI(activity)
private val userID = PlurkAPIHelper.plurkUserID
private var fanList: Option[Vector[User]] = None
private lazy val searchView = new SearchView(activity)
private def getFanList: Vector[User] = {
fanList match {
case Some(list) => list
case None =>
var batch = plurkAPI.FriendsFans.getFansByOffset(userID, 100).get
var allFans: Vector[User] = batch.toVector.map(_.basicInfo)
while (batch != Nil) {
batch = plurkAPI.FriendsFans.getFansByOffset(userID, 100, offset = Some(allFans.size)).get
allFans = allFans ++ batch.toVector.map(_.basicInfo)
}
val distinctUser = allFans.distinct
fanList = Some(distinctUser)
distinctUser
}
}
private def showErrorNotice(message: String) {
loadingIndicatorHolder.foreach(_.hide())
errorNoticeHolder.foreach(_.setVisibility(View.VISIBLE))
errorNoticeHolder.foreach { errorNotice =>
errorNotice.setMessageWithRetry(message) { retryButton =>
retryButton.setEnabled(false)
errorNoticeHolder.foreach(_.setVisibility(View.GONE))
loadingIndicatorHolder.foreach(_.show())
updateList()
}
}
}
private def removeFan(adapter: UserListAdapter, user: User) {
val dialogBuilder = new AlertDialog.Builder(activity)
val displayName = (
user.displayName.filterNot(_.trim.isEmpty) orElse
Option(user.fullName).filterNot(_.trim.isEmpty) orElse
Option(user.nickname).filterNot(_.trim.isEmpty)
).getOrElse(user.id)
val confirmDialog =
dialogBuilder.setTitle(R.string.fragmentFanListBlockTitle)
.setMessage(activity.getString(R.string.fragmentFanListBlockMessage).format(displayName))
.setPositiveButton(R.string.ok, null)
.setNegativeButton(R.string.cancel, null)
.create()
confirmDialog.setOnShowListener(new DialogInterface.OnShowListener() {
override def onShow(dialog: DialogInterface) {
val okButton = confirmDialog.getButton(DialogInterface.BUTTON_POSITIVE)
okButton.setOnClickListener { view: View =>
val progressDialog = ProgressDialog.show(
activity,
activity.getString(R.string.pleaseWait),
activity.getString(R.string.fragmentFanListBlocking),
true, false
)
val future = Future { plurkAPI.Blocks.block(user.id).get }
future.onSuccessInUI { status =>
if (activity != null) {
adapter.removeUser(user.id)
progressDialog.dismiss()
confirmDialog.dismiss()
}
}
future.onFailureInUI { case e: Exception =>
if (activity != null) {
Toast.makeText(activity, R.string.fragmentFanListBlockFailed, Toast.LENGTH_LONG).show()
progressDialog.dismiss()
confirmDialog.dismiss()
}
}
}
}
})
confirmDialog.show()
}
def updateList() {
val future = Future { getFanList }
future.onSuccessInUI { allFans =>
if (activity != null) {
val adapter = new UserListAdapter(activity, allFans)
listViewHolder.foreach { listView =>
listView.setAdapter(adapter)
searchView.setOnQueryTextListener(new SearchView.OnQueryTextListener() {
override def onQueryTextChange(newText: String) = {
adapter.getFilter.filter(newText)
false
}
override def onQueryTextSubmit(text: String) = {
adapter.getFilter.filter(text)
true
}
})
emptyNoticeHolder.foreach(view => listView.setEmptyView(view))
listView.setOnItemClickListener(new AdapterView.OnItemClickListener() {
override def onItemClick(parent: AdapterView[_], view: View, position: Int, id: Long) {
val user = adapter.getItem(position).asInstanceOf[User]
UserTimelineActivity.startActivity(activity, user)
}
})
listView.setOnItemLongClickListener(new AdapterView.OnItemLongClickListener() {
override def onItemLongClick(parent: AdapterView[_], view: View, position: Int, id: Long): Boolean = {
val dialog = new AlertDialog.Builder(activity)
val itemList = Array(
activity.getString(R.string.fragmentFanListViewTimeline),
activity.getString(R.string.fragmentFanListBlock)
)
val itemAdapter = new ArrayAdapter(activity, android.R.layout.select_dialog_item, itemList)
val onClickListener = new DialogInterface.OnClickListener {
override def onClick(dialog: DialogInterface, which: Int) {
val user = adapter.getItem(position)
which match {
case 0 => UserTimelineActivity.startActivity(activity, user)
case 1 => removeFan(adapter, user)
}
}
}
dialog.setTitle(R.string.fragmentFanListAction)
.setAdapter(itemAdapter, onClickListener)
.show()
true
}
})
}
loadingIndicatorHolder.foreach(_.setVisibility(View.GONE))
}
}
future.onFailureInUI { case e: Exception =>
if (activity != null) {
showErrorNotice(activity.getString(R.string.fragmentFanFetchFailure))
}
}
}
override def onCreateOptionsMenu(menu: Menu, inflater: MenuInflater) {
inflater.inflate(R.menu.fragment_user_list, menu)
val searchItem = menu.findItem(R.id.userListSearch)
if (searchView.getParent != null) {
searchView.getParent.asInstanceOf[ViewGroup].removeView(searchView)
}
MenuItemCompat.setActionView(searchItem, searchView)
searchView.setIconified(true)
super.onCreateOptionsMenu(menu, inflater)
}
override def onOptionsItemSelected(item: MenuItem) = item.getItemId match {
case _ => super.onOptionsItemSelected(item)
}
override def onCreateView(inflater: LayoutInflater, container: ViewGroup,
savedInstanceState: Bundle): View = {
val view = inflater.inflate(R.layout.fragment_user_list, container, false)
updateList()
setHasOptionsMenu(true)
view
}
}
|
brianhsu/MaidroidPlurk
|
src/main/scala/fragment/FanListFragment.scala
|
Scala
|
gpl-3.0
| 8,383
|
/*
* Copyright 2015 Geeoz Software
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pawl.web
import org.openqa.selenium.By
/** List of locator types.
*/
trait Locators {
/** Title locator type. */
lazy val title = "title"
/** Search by ID. */
lazy val id = new LocatorType {
override def by(id: String): By = By.id(id)
}
/** Search by name attribute. */
lazy val name = new LocatorType {
override def by(name: String): By = By.name(name)
}
/** Search by CSS. */
lazy val css = new LocatorType {
override def by(css: String): By = By.cssSelector(css)
}
/** Search by XPath. */
lazy val xpath = new LocatorType {
override def by(xpath: String): By = By.xpath(xpath)
}
}
|
geeoz/pawl
|
pawl-scalatest/src/main/scala/pawl/web/Locators.scala
|
Scala
|
apache-2.0
| 1,239
|
package improbable.launcher
import scala.sys.process._
object SpatialOSLauncher extends App {
"spatial build gsim".!
"spatial local start default_launch.pb.json".!
}
|
timtroendle/spatial-cimo
|
workers/gsim/src/main/scala/improbable/launcher/SpatialOSLauncher.scala
|
Scala
|
mit
| 171
|
/*
* Util.scala
* Utility functions for atomic discrete elements.
*
* Created By: Avi Pfeffer (apfeffer@cra.com)
* Creation Date: Feb 25, 2011
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email figaro@cra.com for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.library.atomic.discrete
import com.cra.figaro.util._
import scala.math.{ ceil, log }
object Util {
/**
* Generate a geometric distributed random variable.
*/
def generateGeometric(probFail: Double) =
ceil(log(random.nextDouble()) / log(probFail)).toInt
/**
* Density of the given number of positive outcomes under a binomial random variable with the given number of trials.
* Computing a binomial coefficient exactly can be very expensive for a large number of trials, so this method uses
* an approximation algorithm when the number of trials is sufficiently large.
*/
def binomialDensity(numTrials: Int, probSuccess: Double, numPositive: Int): Double = {
val q = 1 - probSuccess
if (numTrials > 10) {
val logNFact = JSci.maths.ExtraMath.logFactorial(numTrials)
val logKFact = JSci.maths.ExtraMath.logFactorial(numPositive)
val logNMinusKFact = JSci.maths.ExtraMath.logFactorial(numTrials-numPositive)
val logBinomialCoefficient = logNFact - (logKFact + logNMinusKFact)
val result = logBinomialCoefficient + (numPositive*Math.log(probSuccess) + ((numTrials-numPositive)*Math.log(q)))
Math.exp(result)
} else {
JSci.maths.ExtraMath.binomial(numTrials, numPositive) * math.pow(probSuccess, numPositive) * math.pow(q, numTrials - numPositive)
}
}
}
|
jyuhuan/figaro
|
Figaro/src/main/scala/com/cra/figaro/library/atomic/discrete/Util.scala
|
Scala
|
bsd-3-clause
| 1,748
|
package com.twitter.jaqen.ntuple
import scala.language.experimental.macros
import scala.language.implicitConversions
import NTupleMacros._
/**
* A tuple where fields can be accessed by name
*
* @author Julien Le Dem
*/
trait NTuple[+T <: NTuple[T]] {
type Type = T
/**
* returns the field named 'key' with the proper type
* if key does not exist, a compilation error will occur
* @param key a literal or a Symbol
* example:
* <code>
* val tuple = t('a -> 1, 'b -> "2")
* tuple('a)
* => 1 (of type Int)
* tuple('b)
* => "2" (of type String)
* </code>
*/
def get(key: Any) = macro applyImp[T]
/** @see get */
def apply(key: Any) = macro applyImp[T]
/**
* adds a pair key -> value to the tuple
* if key already exists, a compilation error will occur
* key must be a literal or a symbol.
* example: <code>
* val tuple = t('a -> 1, 'b -> 2)
* tuple + ('c -> 3)
* => t('a -> 1, 'b -> 2, 'c -> 3)
* </code>
*/
def add(pair: (Any, Any)) = macro plusImpl[T]
/** @see add */
def +(pair: (Any, Any)) = macro plusImpl[T]
/**
* concats another NTuple to this one
* if a key is defined in both tuples a compilation error will occur
* <code>
* val tuple1 = t('a -> 1, 'b -> 2)
* val tuple2 = t('c -> 3, 'd -> 4)
* tuple1 ++ tuple2
* => t('a -> 1, 'b -> 2, 'c -> 3, 'd -> 4)
* </code>
*/
def concat[T2 <: NTuple[T2]](t: T2) = macro plusplusImpl[T,T2]
/** @see concat */
def ++[T2 <: NTuple[T2]](t: T2) = macro plusplusImpl[T,T2]
/**
* removes a key from the tuple
* if key does not exist, a compilation error will occur
* <code>
* val tuple = t('a -> 1, 'b -> 2)
* tuple - 'a
* => t('b -> 2)
* </code>
*/
def remove(key: Any) = macro minusImpl[T]
/** @see remove */
def -(key: Any) = macro minusImpl[T]
/**
* removes a list of keys from the tuple
* if a key does not exist, a compilation error will occur
* <code>
* val tuple = t('a -> 1, 'b -> 2, 'c -> 3)
* tuple.discard('a, 'c)
* => t('b -> 2)
* </code>
*/
def discard(keysToRemove: Any*) = macro discardImpl[T]
/**
* keeps only entries for the list of keys
* if a key does not exist, a compilation error will occur
* <code>
* val tuple = t('a -> 1, 'b -> 2, 'c -> 3)
* tuple.project('a, 'c)
* => t('a -> 1, 'c -> 3)
* </code>
*/
def project(keysToKeep: Any*) = macro projectImpl[T]
/**
* takes a key -> value pair and replaces the existing key with the given value
* if key does not exist, a compilation error will occur
* example:
* <code>
* val tuple = t('a -> 1, 'b -> 2)
* tuple -+ ('a -> 3)
* => t('a -> 3, 'b -> 2)
* </code>
*/
def replace(pair: (Any, Any)) = macro replaceImpl[T]
/** @see replace */
def -+(pair: (Any, Any)) = macro replaceImpl[T]
/**
* prefixes all the key names with the given prefix.
* useful to concatenate 2 tuples
* example:
* <code>
* t('a -> 1, 'b -> 2).prefix("t")
* => t('ta -> 1, 'tb -> 2)
* </code>
*/
def prefix(prefix: String) = macro prefixImpl[T]
/**
* takes a pair (inputs -> output) and a function
* inputs: a tuple of the keys of the values to pass to the function
* output: the key to set with the result
* @returns the resulting tuple with the output key set with the result of the function
* example:
* <code>
* val tuple = t('a -> 1, 'b -> 2)
* tuple.map(('a, 'b) -> 'c) { (a: Int, b: Int) => a + b }
* => t('a -> 1, 'b -> 2, 'c -> 3)
* </code>
*/
def map(pair: Any)(f: Any) = macro mapImpl[T]
/**
* @returns a string representation of this tuple
* example:
* <code>
* t('a -> 1, 'b -> 2).mkString
* (a -> 1, b -> 2)
* </code>
*/
def mkString = macro mkStringImpl[T]
/**
* converts this tuple to a Map.
* @returns an immutable Map
*/
def toMap = macro toMapImpl[T]
}
object NTuple {
/**
* creates a new NTuple from a list of key -> value pairs
* the types of the values are preserved and will be returned accordingly when apply is called
* if a key is defined twice a compilation error will occur
* <code>
* val tuple1 = t('a -> 1, 'b -> "2")
* </code>
*/
def t(pairs: Any*) = macro newTupleImpl
/**
* provides a way to use NTuple types in method signatures
* The following will compile:
* val type = typeOf[(String, Int)]('a, 'b)
* val a: String = "Foo"
* val b: Int = 1
* val tuple: type.Type = t('a -> a, 'b -> b)
* R is a tuple on n types
* keys is a list of n field names
* @returns an NTupleType[NTuple{n}[K1,V1,K2,V2,...]] whith
* @param R the types of the fields: (V1, V, ...)
* @param keys the names of the fields: K1, K2, ...
*/
def typeOf[R](keys: Any*) = macro typeOfImpl[R]
implicit def nTupleToString[T <: NTuple[T]](ntuple: T): String = macro nTupleToStringImpl[T]
implicit def listOfNTupleToRichList[T <: NTuple[T]](list: List[T]) = RichList[T](list)
}
final class NTupleType[T <: NTuple[T]]() {
type Type = T
}
case class RichList[T] (val list: List[T]) extends AnyVal {
def nmap(pair: Any)(f: Any) = macro listMapImpl[T]
}
|
twitter/jaqen
|
jaqen-ntuple/src/main/scala/com/twitter/jaqen/ntuple/NTuple.scala
|
Scala
|
apache-2.0
| 5,167
|
package com.typesafe.slick.testkit.tests
import org.junit.Assert._
import scala.slick.ast.Dump
import com.typesafe.slick.testkit.util.{TestkitTest, TestDB}
class MapperTest(val tdb: TestDB) extends TestkitTest {
import tdb.profile.simple._
override val reuseInstance = true
def testMappedEntity {
case class User(id: Option[Int], first: String, last: String)
object Users extends Table[User]("users") {
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def first = column[String]("first")
def last = column[String]("last")
def * = id.? ~: baseProjection <> (User, User.unapply _)
def baseProjection = first ~ last
def forInsert = baseProjection <>
({ (f, l) => User(None, f, l) }, { u:User => Some((u.first, u.last)) })
val findByID = createFinderBy(_.id)
}
Users.ddl.create
Users.baseProjection.insert("Homer", "Simpson")
/* Using Users.forInsert so that we don't put a NULL value into the ID
* column. H2 and SQLite allow this but PostgreSQL doesn't. */
Users.forInsert.insertAll(
User(None, "Marge", "Bouvier"),
User(None, "Carl", "Carlson"),
User(None, "Lenny", "Leonard")
)
val lastNames = Set("Bouvier", "Ferdinand")
assertEquals(1, Query(Users).where(_.last inSet lastNames).list.size)
val updateQ = Users.where(_.id === 2.bind).map(_.forInsert)
println("Update: "+updateQ.updateStatement)
updateQ.update(User(None, "Marge", "Simpson"))
assertTrue(Query(Users.where(_.id === 1).exists).first)
Users.where(_.id between(1, 2)).foreach(println)
println("ID 3 -> " + Users.findByID.first(3))
assertEquals(
Set(User(Some(1), "Homer", "Simpson"), User(Some(2), "Marge", "Simpson")),
Users.where(_.id between(1, 2)).list.toSet
)
assertEquals(
User(Some(3), "Carl", "Carlson"),
Users.findByID.first(3)
)
}
def testUpdate {
case class Data(a: Int, b: Int)
object Ts extends Table[Data]("T") {
def a = column[Int]("A")
def b = column[Int]("B")
def * = a ~ b <> (Data, Data.unapply _)
}
Ts.ddl.create
Ts.insertAll(new Data(1, 2), new Data(3, 4), new Data(5, 6))
val updateQ = Ts.where(_.a === 1)
Dump(updateQ, "updateQ: ")
println("Update: "+updateQ.updateStatement)
updateQ.update(Data(7, 8))
val updateQ2 = Ts.where(_.a === 3).map(identity)
Dump(updateQ2, "updateQ2: ")
println("Update2: "+updateQ2.updateStatement)
updateQ2.update(Data(9, 10))
assertEquals(
Set(Data(7, 8), Data(9, 10), Data(5, 6)),
Query(Ts).list.toSet
)
}
def testMappedType {
sealed trait Bool
case object True extends Bool
case object False extends Bool
implicit val boolTypeMapper = MappedColumnType.base[Bool, Int](
{ b =>
assertNotNull(b)
if(b == True) 1 else 0
}, { i =>
assertNotNull(i)
if(i == 1) True else False
}
)
object T extends Table[(Int, Bool, Option[Bool])]("t2") {
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def b = column[Bool]("b")
def c = column[Option[Bool]]("c")
def * = id ~ b ~ c
}
T.ddl.create
(T.b ~ T.c).insertAll((False, None), (True, Some(True)))
assertEquals(Query(T).list.toSet, Set((1, False, None), (2, True, Some(True))))
assertEquals(T.where(_.b === (True:Bool)).list.toSet, Set((2, True, Some(True))))
assertEquals(T.where(_.b === (False:Bool)).list.toSet, Set((1, False, None)))
}
def testMappedRefType {
sealed trait Bool
case object True extends Bool
case object False extends Bool
implicit val boolTypeMapper = MappedColumnType.base[Bool, String](
{ b =>
assertNotNull(b)
if(b == True) "y" else "n"
}, { i =>
assertNotNull(i)
if(i == "y") True else False
}
)
object T extends Table[(Int, Bool, Option[Bool])]("t3") {
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def b = column[Bool]("b")
def c = column[Option[Bool]]("c")
def * = id ~ b ~ c
}
T.ddl.create
(T.b ~ T.c).insertAll((False, None), (True, Some(True)))
assertEquals(Query(T).list.toSet, Set((1, False, None), (2, True, Some(True))))
assertEquals(T.where(_.b === (True:Bool)).list.toSet, Set((2, True, Some(True))))
assertEquals(T.where(_.b === (False:Bool)).list.toSet, Set((1, False, None)))
}
/*
def testGetOr {
object T extends Table[Option[Int]]("t4") {
def year = column[Option[Int]]("YEAR")
def * = year
}
T.ddl.create
T.insertAll(Some(2000), None)
val q = T.map(t => (t.year.getOr(2000), (t.year.getOr(2000)-0)))
println(q.selectStatement)
q.foreach(println)
}
*/
}
|
boldradius/slick
|
slick-testkit/src/main/scala/com/typesafe/slick/testkit/tests/MapperTest.scala
|
Scala
|
bsd-2-clause
| 4,767
|
package kartograffel.server.infrastructure.doobie.repository
import eu.timepit.refined._
import kartograffel.server.ArbitraryInstances._
import kartograffel.server.domain.model.Radius.LengthRange
import kartograffel.server.domain.model._
import kartograffel.server.infrastructure.doobie.DbSpecification._
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class DbTagRepositoryTest extends AnyFunSuite with Matchers {
test("create") {
val graffel = sampleOf[Graffel]
val tag = sampleOf[Tag].copy(graffelId = graffel.id)
val q = for {
_ <- DbGraffelRepository.create(graffel)
_ <- DbTagRepository.create(tag)
t <- DbTagRepository.find(tag.name, tag.graffelId)
} yield t
val result = runQuery(q).unsafeRunSync()
result should be(defined)
result should contain(tag)
}
test("find") {
val graffel = sampleOf[Graffel]
val tag = sampleOf[Tag].copy(graffelId = graffel.id)
val q = for {
_ <- DbGraffelRepository.create(graffel)
_ <- DbTagRepository.create(tag)
t <- DbTagRepository.find(tag.name, tag.graffelId)
} yield t
val result = runQuery(q).unsafeRunSync()
result should be(defined)
result should contain(tag)
}
test("findTagsByPosition") {
val graffel = sampleOf[Graffel]
val tag = sampleOf[Tag].copy(graffelId = graffel.id)
val q = for {
_ <- DbGraffelRepository.create(graffel)
_ <- DbTagRepository.create(tag)
t <- DbTagRepository.findTagsByPosition(
graffel.position,
Radius(refineMV[LengthRange](1), kilometer)
)
} yield t
val result = runQuery(q).unsafeRunSync()
result should not be empty
result.headOption should contain((tag, graffel))
}
}
|
fthomas/kartograffel
|
modules/server/jvm/src/test/scala/kartograffel/server/infrastructure/doobie/repository/DbTagRepositoryTest.scala
|
Scala
|
apache-2.0
| 1,766
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.batch.sql
import org.apache.flink.api.common.typeinfo.BasicTypeInfo.{INT_TYPE_INFO, LONG_TYPE_INFO, STRING_TYPE_INFO}
import org.apache.flink.api.common.typeinfo.LocalTimeTypeInfo.{LOCAL_DATE, LOCAL_DATE_TIME, LOCAL_TIME}
import org.apache.flink.api.common.typeinfo.PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO
import org.apache.flink.api.common.typeinfo.SqlTimeTypeInfo.{DATE, TIME, TIMESTAMP}
import org.apache.flink.api.common.typeinfo.Types
import org.apache.flink.api.common.typeinfo.Types.INSTANT
import org.apache.flink.api.java.typeutils._
import org.apache.flink.api.scala._
import org.apache.flink.table.api.ValidationException
import org.apache.flink.table.api.config.ExecutionConfigOptions
import org.apache.flink.table.dataformat.DataFormatConverters.{LocalDateConverter}
import org.apache.flink.table.dataformat.{Decimal, SqlTimestamp}
import org.apache.flink.table.planner.expressions.utils.{RichFunc1, RichFunc2, RichFunc3, SplitUDF}
import org.apache.flink.table.planner.plan.rules.physical.batch.BatchExecSortRule
import org.apache.flink.table.planner.runtime.utils.BatchTableEnvUtil.parseFieldNames
import org.apache.flink.table.planner.runtime.utils.BatchTestBase.row
import org.apache.flink.table.planner.runtime.utils.TestData._
import org.apache.flink.table.planner.runtime.utils.UserDefinedFunctionTestUtils._
import org.apache.flink.table.planner.runtime.utils.{BatchTableEnvUtil, BatchTestBase, UserDefinedFunctionTestUtils}
import org.apache.flink.table.planner.utils.DateTimeTestUtil
import org.apache.flink.table.planner.utils.DateTimeTestUtil._
import org.apache.flink.table.runtime.functions.SqlDateTimeUtils.unixTimestampToLocalDateTime
import org.apache.flink.types.Row
import org.junit.Assert.assertEquals
import org.junit._
import java.nio.charset.StandardCharsets
import java.sql.{Date, Time, Timestamp}
import java.time.{LocalDate, LocalDateTime, ZoneId}
import java.util
import scala.collection.Seq
class CalcITCase extends BatchTestBase {
@Before
override def before(): Unit = {
super.before()
registerCollection("Table3", data3, type3, "a, b, c", nullablesOfData3)
registerCollection("NullTable3", nullData3, type3, "a, b, c", nullablesOfData3)
registerCollection("SmallTable3", smallData3, type3, "a, b, c", nullablesOfData3)
registerCollection("testTable", buildInData, buildInType, "a,b,c,d,e,f,g,h,i,j")
}
@Test
def testSelectStar(): Unit = {
checkResult(
"SELECT * FROM Table3 where a is not null",
data3)
}
@Test
def testSimpleSelectAll(): Unit = {
checkResult(
"SELECT a, b, c FROM Table3",
data3)
}
@Test
def testManySelectWithFilter(): Unit = {
val data = Seq(
(true, 1, 2, 3, 4, 5, 6, 7),
(false, 1, 2, 3, 4, 5, 6, 7)
)
BatchTableEnvUtil.registerCollection(tEnv, "MyT", data, "a, b, c, d, e, f, g, h")
checkResult(
"""
|SELECT
| a, b, c, d, e, f, g, h,
| a, b, c, g, d, e, f, h,
| h, g, f, e, d, c, b, a,
| h, f, e, d, g, c, b, a,
| c, a, b, g, f, e, h, d,
| a, b, c, d, e, f, g, h,
| a, b, c, g, d, e, f, h,
| h, g, f, e, d, c, b, a,
| h, f, e, d, g, c, b, a,
| c, a, b, g, f, e, h, d
|FROM MyT WHERE a
""".stripMargin,
Seq(row(
true, 1, 2, 3, 4, 5, 6, 7, true, 1, 2, 6, 3, 4, 5, 7, 7, 6, 5, 4, 3, 2, 1,
true, 7, 5, 4, 3, 6, 2, 1, true, 2, true, 1, 6, 5, 4, 7, 3, true, 1, 2, 3,
4, 5, 6, 7, true, 1, 2, 6, 3, 4, 5, 7, 7, 6, 5, 4, 3, 2, 1, true, 7, 5, 4,
3, 6, 2, 1, true, 2, true, 1, 6, 5, 4, 7, 3
)))
}
@Test
def testManySelect(): Unit = {
registerCollection(
"ProjectionTestTable",
projectionTestData, projectionTestDataType,
"a, b, c, d, e, f, g, h",
nullablesOfProjectionTestData)
checkResult(
"""
|SELECT
| a, b, c, d, e, f, g, h,
| a, b, c, g, d, e, f, h,
| h, g, f, e, d, c, b, a,
| h, f, e, d, g, c, b, a,
| c, a, b, g, f, e, h, d,
| a, b, c, d, e, f, g, h,
| a, b, c, g, d, e, f, h,
| h, g, f, e, d, c, b, a,
| h, f, e, d, g, c, b, a,
| c, a, b, g, f, e, h, d
|FROM ProjectionTestTable
""".stripMargin,
Seq(
row(
1, 10, 100, "1", "10", "100", 1000, "1000",
1, 10, 100, 1000, "1", "10", "100", "1000",
"1000", 1000, "100", "10", "1", 100, 10, 1,
"1000", "100", "10", "1", 1000, 100, 10, 1,
100, 1, 10, 1000, "100", "10", "1000", "1",
1, 10, 100, "1", "10", "100", 1000, "1000",
1, 10, 100, 1000, "1", "10", "100", "1000",
"1000", 1000, "100", "10", "1", 100, 10, 1,
"1000", "100", "10", "1", 1000, 100, 10, 1,
100, 1, 10, 1000, "100", "10", "1000", "1"),
row(
2, 20, 200, "2", "20", "200", 2000, "2000",
2, 20, 200, 2000, "2", "20", "200", "2000",
"2000", 2000, "200", "20", "2", 200, 20, 2,
"2000", "200", "20", "2", 2000, 200, 20, 2,
200, 2, 20, 2000, "200", "20", "2000", "2",
2, 20, 200, "2", "20", "200", 2000, "2000",
2, 20, 200, 2000, "2", "20", "200", "2000",
"2000", 2000, "200", "20", "2", 200, 20, 2,
"2000", "200", "20", "2", 2000, 200, 20, 2,
200, 2, 20, 2000, "200", "20", "2000", "2"),
row(
3, 30, 300, "3", "30", "300", 3000, "3000",
3, 30, 300, 3000, "3", "30", "300", "3000",
"3000", 3000, "300", "30", "3", 300, 30, 3,
"3000", "300", "30", "3", 3000, 300, 30, 3,
300, 3, 30, 3000, "300", "30", "3000", "3",
3, 30, 300, "3", "30", "300", 3000, "3000",
3, 30, 300, 3000, "3", "30", "300", "3000",
"3000", 3000, "300", "30", "3", 300, 30, 3,
"3000", "300", "30", "3", 3000, 300, 30, 3,
300, 3, 30, 3000, "300", "30", "3000", "3")
))
}
@Test
def testSelectWithNaming(): Unit = {
checkResult(
"SELECT `1-_./Ü`, b, c FROM (SELECT a as `1-_./Ü`, b, c FROM Table3)",
data3)
}
@Test(expected = classOf[ValidationException])
def testInvalidFields(): Unit = {
checkResult(
"SELECT a, foo FROM Table3",
data3)
}
@Test
def testAllRejectingFilter(): Unit = {
checkResult(
"SELECT * FROM Table3 WHERE false",
Seq())
}
@Test
def testAllPassingFilter(): Unit = {
checkResult(
"SELECT * FROM Table3 WHERE true",
data3)
}
@Test
def testFilterOnString(): Unit = {
checkResult(
"SELECT * FROM Table3 WHERE c LIKE '%world%'",
Seq(
row(3, 2L, "Hello world"),
row(4, 3L, "Hello world, how are you?")
))
}
@Test
def testFilterOnInteger(): Unit = {
checkResult(
"SELECT * FROM Table3 WHERE MOD(a,2)=0",
Seq(
row(2, 2L, "Hello"),
row(4, 3L, "Hello world, how are you?"),
row(6, 3L, "Luke Skywalker"),
row(8, 4L, "Comment#2"),
row(10, 4L, "Comment#4"),
row(12, 5L, "Comment#6"),
row(14, 5L, "Comment#8"),
row(16, 6L, "Comment#10"),
row(18, 6L, "Comment#12"),
row(20, 6L, "Comment#14")
))
}
@Test
def testDisjunctivePredicate(): Unit = {
checkResult(
"SELECT * FROM Table3 WHERE a < 2 OR a > 20",
Seq(
row(1, 1L, "Hi"),
row(21, 6L, "Comment#15")
))
}
@Test
def testFilterWithAnd(): Unit = {
checkResult(
"SELECT * FROM Table3 WHERE MOD(a,2)<>0 AND MOD(b,2)=0",
Seq(
row(3, 2L, "Hello world"),
row(7, 4L, "Comment#1"),
row(9, 4L, "Comment#3"),
row(17, 6L, "Comment#11"),
row(19, 6L, "Comment#13"),
row(21, 6L, "Comment#15")
))
}
@Test
def testAdvancedDataTypes(): Unit = {
val data = Seq(
row(
localDate("1984-07-12"),
localTime("14:34:24"),
localDateTime("1984-07-12 14:34:24")))
registerCollection(
"MyTable", data, new RowTypeInfo(LOCAL_DATE, LOCAL_TIME, LOCAL_DATE_TIME), "a, b, c")
checkResult(
"SELECT a, b, c, DATE '1984-07-12', TIME '14:34:24', " +
"TIMESTAMP '1984-07-12 14:34:24' FROM MyTable",
Seq(
row(
localDate("1984-07-12"),
localTime("14:34:24"),
localDateTime("1984-07-12 14:34:24"),
localDate("1984-07-12"),
localTime("14:34:24"),
localDateTime("1984-07-12 14:34:24"))))
checkResult(
"SELECT a, b, c, DATE '1984-07-12', TIME '14:34:24', " +
"TIMESTAMP '1984-07-12 14:34:24' FROM MyTable " +
"WHERE a = '1984-07-12' and b = '14:34:24' and c = '1984-07-12 14:34:24'",
Seq(
row(
localDate("1984-07-12"),
localTime("14:34:24"),
localDateTime("1984-07-12 14:34:24"),
localDate("1984-07-12"),
localTime("14:34:24"),
localDateTime("1984-07-12 14:34:24"))))
checkResult(
"SELECT a, b, c, DATE '1984-07-12', TIME '14:34:24', " +
"TIMESTAMP '1984-07-12 14:34:24' FROM MyTable " +
"WHERE '1984-07-12' = a and '14:34:24' = b and '1984-07-12 14:34:24' = c",
Seq(
row(
localDate("1984-07-12"),
localTime("14:34:24"),
localDateTime("1984-07-12 14:34:24"),
localDate("1984-07-12"),
localTime("14:34:24"),
localDateTime("1984-07-12 14:34:24"))))
}
@Test
def testUserDefinedScalarFunction(): Unit = {
registerFunction("hashCode", MyHashCode)
val data = Seq(row("a"), row("b"), row("c"))
registerCollection("MyTable", data, new RowTypeInfo(STRING_TYPE_INFO), "text")
checkResult(
"SELECT hashCode(text), hashCode('22') FROM MyTable",
Seq(row(97,1600), row(98,1600), row(99,1600)
))
}
@Test
def testDecimalReturnType(): Unit = {
registerFunction("myNegative", MyNegative)
checkResult("SELECT myNegative(5.1)",
Seq(row(new java.math.BigDecimal("-5.100000000000000000"))
))
}
@Test
def testUDFWithInternalClass(): Unit = {
registerFunction("func", BinaryStringFunction)
val data = Seq(row("a"), row("b"), row("c"))
registerCollection("MyTable", data, new RowTypeInfo(STRING_TYPE_INFO), "text")
checkResult(
"SELECT func(text) FROM MyTable",
Seq(row("a"), row("b"), row("c")
))
}
@Test
def testTimestampSemantics(): Unit = {
// If the timestamp literal '1969-07-20 16:17:39' is inserted in Washington D.C.
// and then queried from Paris, it might be shown in the following ways based
// on timestamp semantics:
// TODO: Add ZonedDateTime/OffsetDateTime
val new_york = ZoneId.of("America/New_York")
val ldt = localDateTime("1969-07-20 16:17:39")
val data = Seq(row(
ldt,
ldt.toInstant(new_york.getRules.getOffset(ldt))
))
registerCollection("T", data, new RowTypeInfo(LOCAL_DATE_TIME, INSTANT), "a, b")
val pairs = ZoneId.of("Europe/Paris")
tEnv.getConfig.setLocalTimeZone(pairs)
checkResult(
"SELECT CAST(a AS VARCHAR), b, CAST(b AS VARCHAR) FROM T",
Seq(row("1969-07-20 16:17:39.000", "1969-07-20T20:17:39Z", "1969-07-20 21:17:39.000"))
)
}
@Test
def testTimeUDF(): Unit = {
val data = Seq(row(
localDate("1984-07-12"),
Date.valueOf("1984-07-12"),
DateTimeTestUtil.localTime("08:03:09"),
Time.valueOf("08:03:09"),
localDateTime("2019-09-19 08:03:09"),
Timestamp.valueOf("2019-09-19 08:03:09"),
Timestamp.valueOf("2019-09-19 08:03:09").toInstant))
registerCollection("MyTable", data,
new RowTypeInfo(LOCAL_DATE, DATE, LOCAL_TIME, TIME, LOCAL_DATE_TIME, TIMESTAMP, INSTANT),
"a, b, c, d, e, f, g")
tEnv.registerFunction("dateFunc", DateFunction)
tEnv.registerFunction("localDateFunc", LocalDateFunction)
tEnv.registerFunction("timeFunc", TimeFunction)
tEnv.registerFunction("localTimeFunc", LocalTimeFunction)
tEnv.registerFunction("timestampFunc", TimestampFunction)
tEnv.registerFunction("datetimeFunc", DateTimeFunction)
tEnv.registerFunction("instantFunc", InstantFunction)
val v1 = "1984-07-12"
val v2 = "08:03:09"
val v3 = "2019-09-19 08:03:09.0"
val v4 = "2019-09-19T08:03:09"
checkResult(
"SELECT" +
" dateFunc(a), localDateFunc(a), dateFunc(b), localDateFunc(b)," +
" timeFunc(c), localTimeFunc(c), timeFunc(d), localTimeFunc(d)," +
" timestampFunc(e), datetimeFunc(e), timestampFunc(f), datetimeFunc(f)," +
" CAST(instantFunc(g) AS TIMESTAMP), instantFunc(g)" +
" FROM MyTable",
Seq(row(
v1, v1, v1, v1,
v2, v2, v2, v2,
v3, v4, v3, v4,
localDateTime("2019-09-19 08:03:09"),
Timestamp.valueOf("2019-09-19 08:03:09").toInstant)))
}
@Test
def testBinary(): Unit = {
val data = Seq(row(1, 2, "hehe".getBytes(StandardCharsets.UTF_8)))
registerCollection(
"MyTable",
data,
new RowTypeInfo(INT_TYPE_INFO, INT_TYPE_INFO, BYTE_PRIMITIVE_ARRAY_TYPE_INFO),
"a, b, c")
checkResult(
"SELECT a, b, c FROM MyTable",
data)
}
@Test
def testUserDefinedScalarFunctionWithParameter(): Unit = {
registerFunction("RichFunc2", new RichFunc2)
UserDefinedFunctionTestUtils.setJobParameters(env, Map("string.value" -> "ABC"))
checkResult(
"SELECT c FROM SmallTable3 where RichFunc2(c)='ABC#Hello'",
Seq(row("Hello"))
)
}
@Test
def testUserDefinedScalarFunctionWithDistributedCache(): Unit = {
val words = "Hello\nWord"
val filePath = UserDefinedFunctionTestUtils.writeCacheFile("test_words", words)
env.registerCachedFile(filePath, "words")
registerFunction("RichFunc3", new RichFunc3)
checkResult(
"SELECT c FROM SmallTable3 where RichFunc3(c)=true",
Seq(row("Hello"))
)
}
@Test
def testMultipleUserDefinedScalarFunctions(): Unit = {
registerFunction("RichFunc1", new RichFunc1)
registerFunction("RichFunc2", new RichFunc2)
UserDefinedFunctionTestUtils.setJobParameters(env, Map("string.value" -> "Abc"))
checkResult(
"SELECT c FROM SmallTable3 where RichFunc2(c)='Abc#Hello' or RichFunc1(a)=3 and b=2",
Seq(row("Hello"), row("Hello world"))
)
}
@Test
def testExternalTypeFunc1(): Unit = {
registerFunction("func1", RowFunc)
registerFunction("rowToStr", RowToStrFunc)
registerFunction("func2", ListFunc)
registerFunction("func3", StringFunc)
val data = Seq(row("a"), row("b"), row("c"))
registerCollection("MyTable", data, new RowTypeInfo(STRING_TYPE_INFO), "text")
checkResult(
"SELECT rowToStr(func1(text)), func2(text), func3(text) FROM MyTable",
Seq(
row("a", util.Arrays.asList("a"), "a"),
row("b", util.Arrays.asList("b"), "b"),
row("c", util.Arrays.asList("c"), "c")
))
}
@Test
def testExternalTypeFunc2(): Unit = {
registerFunction("func1", RowFunc)
registerFunction("rowToStr", RowToStrFunc)
registerFunction("func2", ListFunc)
registerFunction("func3", StringFunc)
val data = Seq(row("a"), row("b"), row("c"))
registerCollection("MyTable", data, new RowTypeInfo(STRING_TYPE_INFO), "text")
// go to shuffler to serializer
checkResult(
"SELECT text, count(*), rowToStr(func1(text)), func2(text), func3(text) " +
"FROM MyTable group by text",
Seq(
row("a", 1, "a", util.Arrays.asList("a"), "a"),
row("b", 1, "b", util.Arrays.asList("b"), "b"),
row("c", 1, "c", util.Arrays.asList("c"), "c")
))
}
@Test
def testPojoField(): Unit = {
val data = Seq(
row(new MyPojo(5, 105)),
row(new MyPojo(6, 11)),
row(new MyPojo(7, 12)))
registerCollection(
"MyTable",
data,
new RowTypeInfo(TypeExtractor.createTypeInfo(classOf[MyPojo])),
"a")
checkResult(
"SELECT a FROM MyTable",
Seq(
row(row(5, 105)),
row(row(6, 11)),
row(row(7, 12))
))
}
@Test
def testPojoFieldUDF(): Unit = {
val data = Seq(
row(new MyPojo(5, 105)),
row(new MyPojo(6, 11)),
row(new MyPojo(7, 12)))
registerCollection(
"MyTable",
data,
new RowTypeInfo(TypeExtractor.createTypeInfo(classOf[MyPojo])),
"a")
//1. external type for udf parameter
registerFunction("pojoFunc", MyPojoFunc)
registerFunction("toPojoFunc", MyToPojoFunc)
checkResult(
"SELECT pojoFunc(a) FROM MyTable",
Seq(row(105), row(11), row(12)))
//2. external type return in udf
checkResult(
"SELECT toPojoFunc(pojoFunc(a)) FROM MyTable",
Seq(
row(row(11, 11)),
row(row(12, 12)),
row(row(105, 105))))
}
// TODO
// @Test
// def testUDFWithGetResultTypeFromLiteral(): Unit = {
// registerFunction("hashCode0", LiteralHashCode)
// registerFunction("hashCode1", LiteralHashCode)
// val data = Seq(row("a"), row("b"), row("c"))
// tEnv.registerCollection("MyTable", data, new RowTypeInfo(STRING_TYPE_INFO), "text")
// checkResult(
// "SELECT hashCode0(text, 'int') FROM MyTable",
// Seq(row(97), row(98), row(99)
// ))
//
// checkResult(
// "SELECT hashCode1(text, 'string') FROM MyTable",
// Seq(row("str97"), row("str98"), row("str99")
// ))
// }
@Test
def testInSmallValues(): Unit = {
checkResult(
"SELECT a FROM Table3 WHERE a in (1, 2)",
Seq(row(1), row(2)))
checkResult(
"SELECT a FROM Table3 WHERE a in (1, 2) and b = 2",
Seq(row(2)))
}
@Test
def testInLargeValues(): Unit = {
checkResult(
"SELECT a FROM Table3 WHERE a in (1, 2, 3, 4, 5)",
Seq(row(1), row(2), row(3), row(4), row(5)))
checkResult(
"SELECT a FROM Table3 WHERE a in (1, 2, 3, 4, 5) and b = 2",
Seq(row(2), row(3)))
checkResult(
"SELECT c FROM Table3 WHERE c in ('Hi', 'H2', 'H3', 'H4', 'H5')",
Seq(row("Hi")))
}
@Test
def testComplexInLargeValues(): Unit = {
checkResult(
"SELECT c FROM Table3 WHERE substring(c, 0, 2) in ('Hi', 'H2', 'H3', 'H4', 'H5')",
Seq(row("Hi")))
checkResult(
"SELECT c FROM Table3 WHERE a = 1 and " +
"(b = 1 or (c = 'Hello' and substring(c, 0, 2) in ('Hi', 'H2', 'H3', 'H4', 'H5')))",
Seq(row("Hi")))
checkResult(
"SELECT c FROM Table3 WHERE a = 1 and " +
"(b = 1 or (c = 'Hello' and (" +
"substring(c, 0, 2) = 'Hi' or substring(c, 0, 2) = 'H2' or " +
"substring(c, 0, 2) = 'H3' or substring(c, 0, 2) = 'H4' or " +
"substring(c, 0, 2) = 'H5')))",
Seq(row("Hi")))
}
@Test
def testNotInLargeValues(): Unit = {
checkResult(
"SELECT a FROM SmallTable3 WHERE a not in (2, 3, 4, 5)",
Seq(row(1)))
checkResult(
"SELECT a FROM SmallTable3 WHERE a not in (2, 3, 4, 5) or b = 2",
Seq(row(1), row(2), row(3)))
checkResult(
"SELECT c FROM SmallTable3 WHERE c not in ('Hi', 'H2', 'H3', 'H4')",
Seq(row("Hello"), row("Hello world")))
}
@Test
def testComplexNotInLargeValues(): Unit = {
checkResult(
"SELECT c FROM SmallTable3 WHERE substring(c, 0, 2) not in ('Hi', 'H2', 'H3', 'H4', 'H5')",
Seq(row("Hello"), row("Hello world")))
checkResult(
"SELECT c FROM SmallTable3 WHERE a = 1 or " +
"(b = 1 and (c = 'Hello' or substring(c, 0, 2) not in ('Hi', 'H2', 'H3', 'H4', 'H5')))",
Seq(row("Hi")))
checkResult(
"SELECT c FROM SmallTable3 WHERE a = 1 or " +
"(b = 1 and (c = 'Hello' or (" +
"substring(c, 0, 2) <> 'Hi' and substring(c, 0, 2) <> 'H2' and " +
"substring(c, 0, 2) <> 'H3' and substring(c, 0, 2) <> 'H4' and " +
"substring(c, 0, 2) <> 'H5')))",
Seq(row("Hi")))
}
@Test
def testRowType(): Unit = {
// literals
checkResult(
"SELECT ROW(1, 'Hi', true) FROM SmallTable3",
Seq(
row(row(1, "Hi", true)),
row(row(1, "Hi", true)),
row(row(1, "Hi", true))
)
)
// primitive type
checkResult(
"SELECT ROW(1, a, b) FROM SmallTable3",
Seq(
row(row(1, 1, 1L)),
row(row(1, 2, 2L)),
row(row(1, 3, 2L))
)
)
}
@Test
def testRowTypeWithDecimal(): Unit = {
val d = Decimal.castFrom(2.0002, 5, 4).toBigDecimal
checkResult(
"SELECT ROW(CAST(2.0002 AS DECIMAL(5, 4)), a, c) FROM SmallTable3",
Seq(
row(d, 1, "Hi"),
row(d, 2, "Hello"),
row(d, 3, "Hello world")
)
)
}
@Test
def testArrayType(): Unit = {
// literals
checkResult(
"SELECT ARRAY['Hi', 'Hello', 'How are you'] FROM SmallTable3",
Seq(
row("[Hi, Hello, How are you]"),
row("[Hi, Hello, How are you]"),
row("[Hi, Hello, How are you]")
)
)
// primitive type
checkResult(
"SELECT ARRAY[b, 30, 10, a] FROM SmallTable3",
Seq(
row("[1, 30, 10, 1]"),
row("[2, 30, 10, 2]"),
row("[2, 30, 10, 3]")
)
)
// non-primitive type
checkResult(
"SELECT ARRAY['Test', c] FROM SmallTable3",
Seq(
row("[Test, Hi]"),
row("[Test, Hello]"),
row("[Test, Hello world]")
)
)
}
@Test
def testMapType(): Unit = {
// literals
checkResult(
"SELECT MAP[1, 'Hello', 2, 'Hi'] FROM SmallTable3",
Seq(
row("{1=Hello, 2=Hi}"),
row("{1=Hello, 2=Hi}"),
row("{1=Hello, 2=Hi}")
)
)
// primitive type
checkResult(
"SELECT MAP[b, 30, 10, a] FROM SmallTable3",
Seq(
row("{1=30, 10=1}"),
row("{2=30, 10=2}"),
row("{2=30, 10=3}")
)
)
// non-primitive type
checkResult(
"SELECT MAP[a, c] FROM SmallTable3",
Seq(
row("{1=Hi}"),
row("{2=Hello}"),
row("{3=Hello world}")
)
)
}
@Test
def testValueConstructor(): Unit = {
val data = Seq(row("foo", 12, localDateTime("1984-07-12 14:34:24.001")))
BatchTableEnvUtil.registerCollection(
tEnv, "MyTable", data,
new RowTypeInfo(Types.STRING, Types.INT, Types.LOCAL_DATE_TIME),
Some(parseFieldNames("a, b, c")), None, None)
val table = parseQuery("SELECT ROW(a, b, c), ARRAY[12, b], MAP[a, c] FROM MyTable " +
"WHERE (a, b, c) = ('foo', 12, TIMESTAMP '1984-07-12 14:34:24.001')")
val result = executeQuery(table)
val baseRow = result.head.getField(0).asInstanceOf[Row]
assertEquals(data.head.getField(0), baseRow.getField(0))
assertEquals(data.head.getField(1), baseRow.getField(1))
assertEquals(data.head.getField(2), baseRow.getField(2))
val arr = result.head.getField(1).asInstanceOf[Array[Integer]]
assertEquals(12, arr(0))
assertEquals(data.head.getField(1), arr(1))
val hashMap = result.head.getField(2).asInstanceOf[util.HashMap[String, Timestamp]]
assertEquals(data.head.getField(2),
hashMap.get(data.head.getField(0).asInstanceOf[String]))
}
@Test
def testSelectStarFromNestedTable(): Unit = {
val table = BatchTableEnvUtil.fromCollection(tEnv, Seq(
((0, 0), "0"),
((1, 1), "1"),
((2, 2), "2")
))
tEnv.registerTable("MyTable", table)
checkResult(
"SELECT * FROM MyTable",
Seq(
row(row(0, 0), "0"),
row(row(1, 1), "1"),
row(row(2, 2), "2")
)
)
}
@Test
def testSelectStarFromNestedValues(): Unit = {
val table = BatchTableEnvUtil.fromCollection(tEnv, Seq(
(0L, "0"),
(1L, "1"),
(2L, "2")
), "a, b")
tEnv.registerTable("MyTable", table)
checkResult(
"select * from (select MAP[a,b], a from MyTable)",
Seq(
row("{0=0}", 0),
row("{1=1}", 1),
row("{2=2}", 2)
)
)
checkResult(
"select * from (select ROW(a, a), b from MyTable)",
Seq(
row(row(0, 0), "0"),
row(row(1, 1), "1"),
row(row(2, 2), "2")
)
)
}
@Test
def testSelectStarFromNestedValues2(): Unit = {
val table = BatchTableEnvUtil.fromCollection(tEnv, Seq(
(0L, "0"),
(1L, "1"),
(2L, "2")
), "a, b")
tEnv.registerTable("MyTable", table)
checkResult(
"select * from (select ARRAY[a,cast(b as BIGINT)], a from MyTable)",
Seq(
row("[0, 0]", 0),
row("[1, 1]", 1),
row("[2, 2]", 2)
)
)
}
@Ignore // TODO support Unicode
@Test
def testFunctionWithUnicodeParameters(): Unit = {
val data = List(
("a\u0001b", "c\"d", "e\"\u0004f"), // uses Java/Scala escaping
("x\u0001y", "y\"z", "z\"\u0004z")
)
val splitUDF0 = new SplitUDF(deterministic = true)
val splitUDF1 = new SplitUDF(deterministic = false)
registerFunction("splitUDF0", splitUDF0)
registerFunction("splitUDF1", splitUDF1)
val t1 = BatchTableEnvUtil.fromCollection(tEnv, data, "a, b, c")
tEnv.registerTable("T1", t1)
// uses SQL escaping (be aware that even Scala multi-line strings parse backslash!)
checkResult(
s"""
|SELECT
| splitUDF0(a, U&'${'\\'}0001', 0) AS a0,
| splitUDF1(a, U&'${'\\'}0001', 0) AS a1,
| splitUDF0(b, U&'"', 1) AS b0,
| splitUDF1(b, U&'"', 1) AS b1,
| splitUDF0(c, U&'${'\\'}${'\\'}"${'\\'}0004', 0) AS c0,
| splitUDF1(c, U&'${'\\'}"#0004' UESCAPE '#', 0) AS c1
|FROM T1
|""".stripMargin,
Seq(
row("a", "a", "d", "d", "e", "e"),
row("x", "x", "z", "z", "z", "z"))
)
}
@Test
def testCast(): Unit = {
checkResult(
"SELECT CAST(a AS VARCHAR(10)) FROM Table3 WHERE CAST(a AS VARCHAR(10)) = '1'",
Seq(row(1)))
}
@Test
def testLike(): Unit = {
checkResult(
"SELECT a FROM NullTable3 WHERE c LIKE '%llo%'",
Seq(row(2), row(3), row(4)))
checkResult(
"SELECT a FROM NullTable3 WHERE CAST(a as VARCHAR(10)) LIKE CAST(b as VARCHAR(10))",
Seq(row(1), row(2)))
checkResult(
"SELECT a FROM NullTable3 WHERE c NOT LIKE '%Comment%' AND c NOT LIKE '%Hello%'",
Seq(row(1), row(5), row(6), row(null), row(null)))
checkResult(
"SELECT a FROM NullTable3 WHERE c LIKE 'Comment#%' and c LIKE '%2'",
Seq(row(8), row(18)))
checkResult(
"SELECT a FROM NullTable3 WHERE c LIKE 'Comment#12'",
Seq(row(18)))
checkResult(
"SELECT a FROM NullTable3 WHERE c LIKE '%omm%nt#12'",
Seq(row(18)))
}
@Test
def testLikeWithEscape(): Unit = {
val rows = Seq(
(1, "ha_ha"),
(2, "ffhaha_hahaff"),
(3, "aaffhaha_hahaffaa"),
(4, "aaffhaaa_aahaffaa"),
(5, "a%_ha")
)
BatchTableEnvUtil.registerCollection(tEnv, "MyT", rows, "a, b")
checkResult(
"SELECT a FROM MyT WHERE b LIKE '%ha?_ha%' ESCAPE '?'",
Seq(row(1), row(2), row(3)))
checkResult(
"SELECT a FROM MyT WHERE b LIKE '%ha?_ha' ESCAPE '?'",
Seq(row(1)))
checkResult(
"SELECT a FROM MyT WHERE b LIKE 'ha?_ha%' ESCAPE '?'",
Seq(row(1)))
checkResult(
"SELECT a FROM MyT WHERE b LIKE 'ha?_ha' ESCAPE '?'",
Seq(row(1)))
checkResult(
"SELECT a FROM MyT WHERE b LIKE '%affh%ha?_ha%' ESCAPE '?'",
Seq(row(3)))
checkResult(
"SELECT a FROM MyT WHERE b LIKE 'a?%?_ha' ESCAPE '?'",
Seq(row(5)))
checkResult(
"SELECT a FROM MyT WHERE b LIKE 'h_?_ha' ESCAPE '?'",
Seq(row(1)))
}
@Test
def testChainLike(): Unit = {
// special case to test CHAIN_PATTERN.
checkResult(
"SELECT a FROM NullTable3 WHERE c LIKE '% /sys/kvengine/KVServerRole/kvengine/kv_server%'",
Seq())
// special case to test CHAIN_PATTERN.
checkResult(
"SELECT a FROM NullTable3 WHERE c LIKE '%Tuple%%'",
Seq(row(null), row(null)))
// special case to test CHAIN_PATTERN.
checkResult(
"SELECT a FROM NullTable3 WHERE c LIKE '%/order/inter/touch/backwayprice.do%%'",
Seq())
}
@Test
def testEqual(): Unit = {
checkResult(
"SELECT a FROM Table3 WHERE c = 'Hi'",
Seq(row(1)))
checkResult(
"SELECT c FROM Table3 WHERE c <> 'Hello' AND b = 2",
Seq(row("Hello world")))
}
@Test
def testSubString(): Unit = {
checkResult(
"SELECT SUBSTRING(c, 6, 13) FROM Table3 WHERE a = 6",
Seq(row("Skywalker")))
}
@Test
def testConcat(): Unit = {
checkResult(
"SELECT CONCAT(c, '-haha') FROM Table3 WHERE a = 1",
Seq(row("Hi-haha")))
checkResult(
"SELECT CONCAT_WS('-x-', c, 'haha') FROM Table3 WHERE a = 1",
Seq(row("Hi-x-haha")))
}
@Test
def testStringAgg(): Unit = {
checkResult(
"SELECT MIN(c) FROM NullTable3",
Seq(row("Comment#1")))
checkResult(
"SELECT SUM(b) FROM NullTable3 WHERE c = 'NullTuple' OR c LIKE '%Hello world%' GROUP BY c",
Seq(row(1998), row(2), row(3)))
}
@Test
def testStringUdf(): Unit = {
registerFunction("myFunc", MyStringFunc)
checkResult(
"SELECT myFunc(c) FROM Table3 WHERE a = 1",
Seq(row("Hihaha")))
}
@Test
def testNestUdf(): Unit = {
registerFunction("func", MyStringFunc)
checkResult(
"SELECT func(func(func(c))) FROM SmallTable3",
Seq(row("Hello worldhahahahahaha"), row("Hellohahahahahaha"), row("Hihahahahahaha")))
}
@Test
def testCurrentDate(): Unit = {
// Execution in on Query should return the same value
checkResult("SELECT CURRENT_DATE = CURRENT_DATE FROM testTable WHERE a = TRUE",
Seq(row(true)))
val d0 = LocalDateConverter.INSTANCE.toInternal(
unixTimestampToLocalDateTime(System.currentTimeMillis()).toLocalDate)
val table = parseQuery("SELECT CURRENT_DATE FROM testTable WHERE a = TRUE")
val result = executeQuery(table)
val d1 = LocalDateConverter.INSTANCE.toInternal(
result.toList.head.getField(0).asInstanceOf[LocalDate])
Assert.assertTrue(d0 <= d1 && d1 - d0 <= 1)
}
@Test
def testCurrentTimestamp(): Unit = {
// Execution in on Query should return the same value
checkResult("SELECT CURRENT_TIMESTAMP = CURRENT_TIMESTAMP FROM testTable WHERE a = TRUE",
Seq(row(true)))
// CURRENT_TIMESTAMP should return the current timestamp
val ts0 = System.currentTimeMillis()
val table = parseQuery("SELECT CURRENT_TIMESTAMP FROM testTable WHERE a = TRUE")
val result = executeQuery(table)
val ts1 = SqlTimestamp.fromLocalDateTime(
result.toList.head.getField(0).asInstanceOf[LocalDateTime]).getMillisecond
val ts2 = System.currentTimeMillis()
Assert.assertTrue(ts0 <= ts1 && ts1 <= ts2)
}
@Test
def testCurrentTime(): Unit = {
// Execution in on Query should return the same value
checkResult("SELECT CURRENT_TIME = CURRENT_TIME FROM testTable WHERE a = TRUE",
Seq(row(true)))
}
def testTimestampCompareWithDate(): Unit = {
checkResult("SELECT j FROM testTable WHERE j < DATE '2017-11-11'",
Seq(row(true)))
}
/**
* TODO Support below string timestamp format to cast to timestamp:
* yyyy
* yyyy-[m]m
* yyyy-[m]m-[d]d
* yyyy-[m]m-[d]d
* yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]
* yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z
* yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m
* yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m
* yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]
* yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z
* yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m
* yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m
* [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]
* [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z
* [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m
* [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m
* T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]
* T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z
* T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m
* T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m
*/
@Test
def testTimestampCompareWithDateString(): Unit = {
//j 2015-05-20 10:00:00.887
checkResult("SELECT j FROM testTable WHERE j < '2017-11-11'",
Seq(row(localDateTime("2015-05-20 10:00:00.887"))))
}
@Test
def testDateCompareWithDateString(): Unit = {
checkResult("SELECT h FROM testTable WHERE h <= '2017-12-12'",
Seq(
row(localDate("2017-12-12")),
row(localDate("2017-12-12"))
))
}
@Test
def testDateEqualsWithDateString(): Unit = {
checkResult("SELECT h FROM testTable WHERE h = '2017-12-12'",
Seq(
row(localDate("2017-12-12")),
row(localDate("2017-12-12"))
))
}
@Test
def testDateFormat(): Unit = {
//j 2015-05-20 10:00:00.887
checkResult("SELECT j, " +
" DATE_FORMAT(j, 'yyyy/MM/dd HH:mm:ss')," +
" DATE_FORMAT('2015-05-20 10:00:00.887', 'yyyy/MM/dd HH:mm:ss')" +
" FROM testTable WHERE a = TRUE",
Seq(
row(localDateTime("2015-05-20 10:00:00.887"),
"2015/05/20 10:00:00",
"2015/05/20 10:00:00")
))
}
@Test
def testYear(): Unit = {
checkResult("SELECT j, YEAR(j) FROM testTable WHERE a = TRUE",
Seq(row(localDateTime("2015-05-20 10:00:00.887"), "2015")))
}
@Test
def testQuarter(): Unit = {
checkResult("SELECT j, QUARTER(j) FROM testTable WHERE a = TRUE",
Seq(row(localDateTime("2015-05-20 10:00:00.887"), "2")))
}
@Test
def testMonth(): Unit = {
checkResult("SELECT j, MONTH(j) FROM testTable WHERE a = TRUE",
Seq(row(localDateTime("2015-05-20 10:00:00.887"), "5")))
}
@Test
def testWeek(): Unit = {
checkResult("SELECT j, WEEK(j) FROM testTable WHERE a = TRUE",
Seq(row(localDateTime("2015-05-20 10:00:00.887"), "21")))
}
@Test
def testDayOfYear(): Unit = {
checkResult("SELECT j, DAYOFYEAR(j) FROM testTable WHERE a = TRUE",
Seq(row(localDateTime("2015-05-20 10:00:00.887"), "140")))
}
@Test
def testDayOfMonth(): Unit = {
checkResult("SELECT j, DAYOFMONTH(j) FROM testTable WHERE a = TRUE",
Seq(row(localDateTime("2015-05-20 10:00:00.887"), "20")))
}
@Test
def testDayOfWeek(): Unit = {
checkResult("SELECT j, DAYOFWEEK(j) FROM testTable WHERE a = TRUE",
Seq(row(localDateTime("2015-05-20 10:00:00.887"), "4")))
}
@Test
def testHour(): Unit = {
checkResult("SELECT j, HOUR(j) FROM testTable WHERE a = TRUE",
Seq(row(localDateTime("2015-05-20 10:00:00.887"), "10")))
}
@Test
def testMinute(): Unit = {
checkResult("SELECT j, MINUTE(j) FROM testTable WHERE a = TRUE",
Seq(row(localDateTime("2015-05-20 10:00:00.887"), "0")))
}
@Test
def testSecond(): Unit = {
checkResult("SELECT j, SECOND(j) FROM testTable WHERE a = TRUE",
Seq(row(localDateTime("2015-05-20 10:00:00.887"), "0")))
}
@Test
def testToDate(): Unit = {
checkResult("SELECT" +
" TO_DATE(CAST(null AS VARCHAR))," +
" TO_DATE('2016-12-31')," +
" TO_DATE('2016-12-31', 'yyyy-MM-dd')",
Seq(row(null, localDate("2016-12-31"), localDate("2016-12-31"))))
}
@Test
def testToTimestamp(): Unit = {
checkResult("SELECT" +
" TO_TIMESTAMP(CAST(null AS VARCHAR))," +
" TO_TIMESTAMP('2016-12-31 00:12:00')," +
" TO_TIMESTAMP('2016-12-31', 'yyyy-MM-dd')",
Seq(row(null, localDateTime("2016-12-31 00:12:00"), localDateTime("2016-12-31 00:00:00"))))
}
@Test
def testCalcBinary(): Unit = {
registerCollection(
"BinaryT",
nullData3.map((r) => row(r.getField(0), r.getField(1),
r.getField(2).toString.getBytes(StandardCharsets.UTF_8))),
new RowTypeInfo(INT_TYPE_INFO, LONG_TYPE_INFO, BYTE_PRIMITIVE_ARRAY_TYPE_INFO),
"a, b, c",
nullablesOfNullData3)
checkResult(
"select a, b, c from BinaryT where b < 1000",
nullData3.map((r) => row(r.getField(0), r.getField(1),
r.getField(2).toString.getBytes(StandardCharsets.UTF_8)))
)
}
@Test(expected = classOf[UnsupportedOperationException])
def testOrderByBinary(): Unit = {
registerCollection(
"BinaryT",
nullData3.map((r) => row(r.getField(0), r.getField(1),
r.getField(2).toString.getBytes(StandardCharsets.UTF_8))),
new RowTypeInfo(INT_TYPE_INFO, LONG_TYPE_INFO, BYTE_PRIMITIVE_ARRAY_TYPE_INFO),
"a, b, c",
nullablesOfNullData3)
conf.getConfiguration.setInteger(
ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 1)
conf.getConfiguration.setBoolean(
BatchExecSortRule.TABLE_EXEC_SORT_RANGE_ENABLED, true)
checkResult(
"select * from BinaryT order by c",
nullData3.sortBy((x : Row) =>
x.getField(2).asInstanceOf[String]).map((r) =>
row(r.getField(0), r.getField(1),
r.getField(2).toString.getBytes(StandardCharsets.UTF_8))),
isSorted = true
)
}
@Test
def testGroupByBinary(): Unit = {
registerCollection(
"BinaryT2",
nullData3.map((r) => row(r.getField(0),
r.getField(1).toString.getBytes(StandardCharsets.UTF_8), r.getField(2))),
new RowTypeInfo(INT_TYPE_INFO, BYTE_PRIMITIVE_ARRAY_TYPE_INFO, STRING_TYPE_INFO),
"a, b, c",
nullablesOfNullData3)
checkResult(
"select sum(sumA) from (select sum(a) as sumA, b, c from BinaryT2 group by c, b) group by b",
Seq(row(1), row(111), row(15), row(34), row(5), row(65), row(null))
)
}
}
|
bowenli86/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/runtime/batch/sql/CalcITCase.scala
|
Scala
|
apache-2.0
| 38,530
|
package com.alanrodas.test
/*
import org.scalatest._
package object fronttier {
abstract class UnitSpec extends FlatSpec with Matchers
}
*/
|
alanrodas/Fronttier
|
src/test/scala/com/alanrodas/test/fronttier/package.scala
|
Scala
|
apache-2.0
| 143
|
package org.mith.metaHathi.utils
import scala.concurrent._
import ExecutionContext.Implicits.global
import scalaz._, Scalaz._
import argonaut._, Argonaut._
import java.io.File
import java.io.ByteArrayInputStream
import java.util.ArrayList
// Using apache http utils directly instead of dispatch because
// multipart file upload is currently not supported.
import org.apache.http.entity.mime.MultipartEntityBuilder
import org.apache.http.client.methods.{HttpPost, HttpGet}
import org.apache.http.impl.client.HttpClientBuilder
import org.apache.http.util.EntityUtils
import java.util.zip.{ZipFile, ZipEntry}
object HTTPUtils {
private def encode(params:Map[String, String]) : String = {
val p = params.view map {
case (k, v) => k + "=" + v
} mkString ("", "&", "")
"?" + p
}
def createPostRequest(refineHost:String, command:String, params:Map[String, String] = null) : HttpPost = {
val paramStr = if (params != null) {
encode(params)
} else ""
new HttpPost("http://%s/%s%s".format(refineHost, command, paramStr))
}
def createGetRequest(refineHost:String, command:String, params:Map[String, String] = null) : HttpGet = {
val paramStr = if (params != null) {
encode(params)
} else ""
new HttpGet("http://%s/%s%s".format(refineHost, command, paramStr))
}
}
class OpenRefineClient(refineHost:String) {
val client = HttpClientBuilder.create().build
def getAllProjectMetadataForUser(user:String): Map[String, String] = {
val request = HTTPUtils.createGetRequest(refineHost, "command/core/get-all-project-metadata")
val response = client.execute(request)
val metaJson: Json = Parse.parseOption(EntityUtils.toString(response.getEntity())).getOrElse(jEmptyObject)
val projects = (metaJson.hcursor --\\ "projects")
projects.focus.get.obj.get.fieldSet.filter{ case p =>
(projects --\\ p --\\ "name").focus.get.as[String].value.get == user
}.map{case p =>
val date = (projects --\\ p --\\ "modified").focus.get.as[String].value.get
val rs = new com.github.nscala_time.time.RichString(date)
// The time is parsed as GMT, so we need to re-add the zone time diff
// Should use the library to do so properly, but for now we just add the missing 4 hours
(p, rs.toDateTime.plusHours(4).toString("MM/dd/y 'at' hh:mma"))
// I may also want to add some sorting by date here. Not urgent as OpenRefine seems to
// return them in chronological order anyway.
}.toMap
}
}
class OpenRefineProjectClient(refineHost:String, projectId:String, refineData:String){
implicit def ColumnDecodeJson: DecodeJson[Column] =
DecodeJson(c => for {
idx <- (c --\\ "cellIndex").as[Int]
name <- (c --\\ "originalName").as[String]
} yield Column(idx, name))
val client = HttpClientBuilder.create().build
def close() = {
client.close()
}
def getAllChanges(): Map[String, List[Change]] = {
getHistory().flatMap( h => getChangesForOperation(h) ).groupBy(_.url)
}
// Get history of operations (ignoring future ones)
// command/core/get-history
// return list of past operation ids
def getHistory(): List[String] = {
val request = HTTPUtils.createGetRequest(refineHost, "command/core/get-history", Map("project" -> projectId))
val response = client.execute(request)
val histJson: Json = Parse.parseOption(EntityUtils.toString(response.getEntity())).getOrElse(jEmptyObject)
val past: List[Json] = (histJson.hcursor --\\ "past").focus.get.arrayOrEmpty
past.map( p => p.fieldOrZero("id").toString )
}
// get field for row (generic)
// If the field is key, look up rows until found
def getFieldforRow(row:String, field: String, key: Boolean = false): Json = {
// First, find out the column index of record - id
val idx: Int = getModels().find(_.name == field).get.idx
val request = HTTPUtils.createGetRequest(
refineHost, "command/core/get-rows",
Map("project" -> projectId,
"start" -> row,
"limit" -> "1"
)
)
val response = client.execute(request)
val rowJson: Json = Parse.parseOption(EntityUtils.toString(response.getEntity())).getOrElse(jEmptyObject)
(((rowJson.hcursor --\\ "rows" \\\\).any --\\ "cells" =\\ idx).any --\\ "v").focus.getOrElse(
// Recursing to first row with non-null field
getFieldforRow((row.toInt - 1).toString, field, key)
) // NB cannot convert to string here because of recursion.
}
// command/core/get-models
// it would be good to store the results of this somewhere to avoid
// multiple HTTP requests (at least two per change, see getChangesForOperation)
def getModels() : List[Column] = {
val request = HTTPUtils.createGetRequest(refineHost, "command/core/get-models",
Map("project" -> projectId))
val response = client.execute(request)
val modelJson: Json = Parse.parseOption(EntityUtils.toString(response.getEntity()))
.getOrElse(jEmptyObject)
(modelJson.hcursor --\\ "columnModel" --\\ "columns").focus.get.arrayOrEmpty.map(
c => c.as[Column].value.get
)
}
// get changes for operation id
// this goes to the refineData folder, unzips the right project file, reads change data for operation id
def getChangesForOperation(opId: String) = {
def getZipEntryInputStream(zipFile: ZipFile)(entry: ZipEntry) = zipFile.getInputStream(entry)
val ChangeData = """(?s).*row=(\\d+)\\ncell=(\\d+)\\nold=(\\{.*?\\}\\n)new=(\\{.*?\\}\\n).*?""".r
val zipFile = new ZipFile("%s/%s.project/history/%s.change.zip".format(refineData, projectId, opId))
val zis = getZipEntryInputStream(zipFile)(new ZipEntry("change.txt"))
// Convert InputStrem to String, assuming the file won't be huge.
val changesRaw = scala.io.Source.fromInputStream(zis).mkString
zipFile.close()
val changes: List[Change] = changesRaw.split("/ec/").toList.map( c => c match {
case ChangeData(row, cell, old, nw) =>
val pold : String = Parse.parseWith(old, _.field("v").getOrElse(jEmptyString).as[String].value.get,
err => err)
val pnew : String = Parse.parseWith(nw, _.field("v").getOrElse(jEmptyString).as[String].value.get,
err => err)
Some(Change(
getFieldforRow(row, "record - url", true).as[String].value.get,
getModels().find(_.idx == cell.toInt).get.name,
pold,
pnew
))
case _ => None
}).flatten
changes
}
}
class OpenRefineImporter(refineHost:String, userEmail: String) {
// The user's email is used to distinguish projects in an OR instance used by multiple users
val client = HttpClientBuilder.create().build
def createImportingJob() : String = {
// Create job
val request = HTTPUtils.createPostRequest(refineHost, "command/core/create-importing-job")
request.addHeader("Content-Type", "application/json")
val response = client.execute(request)
val jobJson = EntityUtils.toString(response.getEntity())
Parse.parseWith(jobJson, _.field("jobID").getOrElse(jEmptyString).toString, err => err)
}
def checkStatus (jobId:String) = {
// Check job status
val request = HTTPUtils.createPostRequest(refineHost, "command/core/get-importing-job-status",
Map("jobID" -> jobId))
val response = client.execute(request)
EntityUtils.toString(response.getEntity())
}
def sendData(data:List[Json], path:List[String]) = {
import org.apache.http.entity.ContentType
import java.nio.charset.StandardCharsets
// Set importer and send data
val jobId = createImportingJob()
val params = Map("controller" -> "core/default-importing-controller",
"jobID" -> jobId,
"subCommand" -> "load-raw-data")
val request = HTTPUtils.createPostRequest(refineHost, "command/core/importing-controller", params)
val entity = MultipartEntityBuilder.create()
val out = new java.io.PrintWriter(new java.io.FileWriter("/tmp/full.json"))
for ( d <- data ) {
out.println(d.toString)
val dataStream = new ByteArrayInputStream(d.toString.getBytes(StandardCharsets.UTF_8))
entity.addBinaryBody("f", dataStream, ContentType APPLICATION_JSON, "f")
}
request.setEntity(entity.build())
client.execute(request)
// Finalize the import
val fin = finalize(jobId, path)
// In order to return the id of the finalized project, we must wait on OpenRefine to
// complete the import. So we return a Future of the project id.
// Future {
// Before proceeding, make sure the project creation is complete (NB it doens't guarantee that the import is done)
// This could be handled more natively, perhaps with another Future.
if (Parse.parseWith(fin, _.field("message").getOrElse(jEmptyString).as[String].value.get,
err => err) == "done" ) {
// Check status until a project ID appears (which is introduced together with state : created-project)
def getAsyncProjectId() : (String, String) = {
val status: Json = Parse.parseOption(checkStatus(jobId)).get
val cursor = status.hcursor
val pid = (cursor --\\ "job" --\\ "config" --\\ "projectID")
val value = pid.focus.getOrElse( getAsyncProjectId() ).toString
client.close()
(value, userEmail)
}
getAsyncProjectId()
}
else None
// }
}
def finalize (jobId:String, path:List[String]) : String = {
import org.apache.http.NameValuePair
import org.apache.http.client.entity.UrlEncodedFormEntity
import org.apache.http.message.BasicNameValuePair
// choose field, set options, complete project creation
val options =
Json(
"recordPath" := path,
"limit" := -1,
"trimStrings" := jFalse,
"guessCellValueTypes" := jFalse,
"storeEmptyStrings" := jTrue,
"includeFileSources" := jFalse,
"projectName" := userEmail
)
val params =
Map(
"controller" -> "core/default-importing-controller",
"jobID" -> jobId,
"subCommand" -> "create-project"
)
val request = HTTPUtils.createPostRequest(refineHost, "command/core/importing-controller", params)
val nameValuePairs = new ArrayList[NameValuePair](1)
nameValuePairs.add(new BasicNameValuePair("format", "text/json"))
nameValuePairs.add(new BasicNameValuePair("options", options.toString))
request.setEntity(new UrlEncodedFormEntity(nameValuePairs))
val response = client.execute(request)
EntityUtils.toString(response.getEntity())
}
}
|
umd-mith/metaHathi
|
src/main/scala/org/mith/metaHathi/utils/openRefine.scala
|
Scala
|
apache-2.0
| 10,710
|
case class Wrap(i: Int) {
override def hashCode = i * 0x9e3775cd
}
|
yusuke2255/dotty
|
tests/pending/run/ctries-old/Wrap.scala
|
Scala
|
bsd-3-clause
| 75
|
package is.hail.annotations
import is.hail.utils._
object RegionPool {
private lazy val thePool: ThreadLocal[RegionPool] = new ThreadLocal[RegionPool]() {
override def initialValue(): RegionPool = RegionPool()
}
def get: RegionPool = thePool.get()
def apply(strictMemoryCheck: Boolean = false): RegionPool = {
val thread = Thread.currentThread()
new RegionPool(strictMemoryCheck, thread.getName, thread.getId)
}
}
final class RegionPool private(strictMemoryCheck: Boolean, threadName: String, threadID: Long) extends AutoCloseable {
log.info(s"RegionPool: initialized for thread $threadID: $threadName")
protected[annotations] val freeBlocks: Array[ArrayBuilder[Long]] = Array.fill[ArrayBuilder[Long]](4)(new ArrayBuilder[Long])
protected[annotations] val regions = new ArrayBuilder[RegionMemory]()
private val freeRegions = new ArrayBuilder[RegionMemory]()
private val blocks: Array[Long] = Array(0L, 0L, 0L, 0L)
private var totalAllocatedBytes: Long = 0L
private var allocationEchoThreshold: Long = 256 * 1024
private var numJavaObjects: Long = 0L
private var maxNumJavaObjects: Long = 0L
def addJavaObject(): Unit = {
numJavaObjects += 1
}
def removeJavaObjects(n: Int): Unit = {
numJavaObjects -= n
}
def getTotalAllocatedBytes: Long = totalAllocatedBytes
private def incrementAllocatedBytes(toAdd: Long): Unit = {
totalAllocatedBytes += toAdd
if (totalAllocatedBytes >= allocationEchoThreshold) {
report("REPORT_THRESHOLD")
allocationEchoThreshold *= 2
}
}
protected[annotations] def reclaim(memory: RegionMemory): Unit = {
freeRegions += memory
}
protected[annotations] def getBlock(size: Int): Long = {
val pool = freeBlocks(size)
if (pool.size > 0) {
pool.pop()
} else {
blocks(size) += 1
val blockByteSize = Region.SIZES(size)
incrementAllocatedBytes(blockByteSize)
Memory.malloc(blockByteSize)
}
}
protected[annotations] def getChunk(size: Long): Long = {
incrementAllocatedBytes(size)
Memory.malloc(size)
}
protected[annotations] def freeChunks(ab: ArrayBuilder[Long], totalSize: Long): Unit = {
while (ab.size > 0) {
val addr = ab.pop()
Memory.free(addr)
}
totalAllocatedBytes -= totalSize
}
protected[annotations] def getMemory(size: Int): RegionMemory = {
if (freeRegions.size > 0) {
val rm = freeRegions.pop()
rm.initialize(size)
rm
} else {
val rm = new RegionMemory(this)
rm.initialize(size)
regions += rm
rm
}
}
def getRegion(): Region = getRegion(Region.REGULAR)
def getRegion(size: Int): Region = {
val r = new Region(size, this)
r.memory = getMemory(size)
r
}
def numRegions(): Int = regions.size
def numFreeRegions(): Int = freeRegions.size
def numFreeBlocks(): Int = freeBlocks.map(_.size).sum
def logStats(context: String): Unit = {
val pool = RegionPool.get
val nFree = pool.numFreeRegions()
val nRegions = pool.numRegions()
val nBlocks = pool.numFreeBlocks()
val freeBlockCounts = freeBlocks.map(_.size)
val usedBlockCounts = blocks.zip(freeBlockCounts).map { case (tot, free) => tot - free }
info(
s"""Region count for $context
| regions: $nRegions active, $nFree free
| blocks: $nBlocks
| free: ${ freeBlockCounts.mkString(", ") }
| used: ${ usedBlockCounts.mkString(", ") }""".stripMargin)
}
def report(context: String): Unit = {
var inBlocks = 0L
var i = 0
while (i < 4) {
inBlocks += blocks(i) * Region.SIZES(i)
i += 1
}
log.info(s"RegionPool: $context: ${readableBytes(totalAllocatedBytes)} allocated (${readableBytes(inBlocks)} blocks / " +
s"${readableBytes(totalAllocatedBytes - inBlocks)} chunks), regions.size = ${regions.size}, " +
s"$numJavaObjects current java objects, $maxNumJavaObjects max java objects, thread $threadID: $threadName")
// log.info("-----------STACK_TRACES---------")
// val stacks: String = regions.result().toIndexedSeq.flatMap(r => r.stackTrace.map((r.getTotalChunkMemory(), _))).foldLeft("")((a: String, b) => a + "\\n" + b.toString())
// log.info(stacks)
// log.info("---------------END--------------")
}
override def finalize(): Unit = close()
def close(): Unit = {
report("FREE")
var i = 0
while (i < regions.size) {
regions(i).freeMemory()
i += 1
}
i = 0
while (i < 4) {
val blockSize = Region.SIZES(i)
val blocks = freeBlocks(i)
while (blocks.size > 0) {
Memory.free(blocks.pop())
totalAllocatedBytes -= blockSize
}
i += 1
}
if (totalAllocatedBytes != 0) {
val msg = s"RegionPool: total allocated bytes not 0 after closing! total allocated: " +
s"$totalAllocatedBytes (${ readableBytes(totalAllocatedBytes) })"
if (strictMemoryCheck)
fatal(msg)
else
warn(msg)
}
}
}
|
cseed/hail
|
hail/src/main/scala/is/hail/annotations/RegionPool.scala
|
Scala
|
mit
| 5,023
|
/*
* Copyright (c) <2015-2016>, see CONTRIBUTORS
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package ch.usi.inf.l3.sana.oberon0.l3
import ch.usi.inf.l3.sana
import sana.tiny.dsl._
import sana.tiny.core._
import sana.tiny.core.Implicits._
import sana.tiny.symbols.Symbol
import sana.tiny.ast.{Tree, NoTree}
import sana.oberon0.Nodes
import sana.oberon0.namers._
import sana.primj.namers.{IdentNamerComponent => _, MethodDefNamerComponent => _, _}
import sana.ooj.namers.TemplateNamerComponent
// import sana.arrayj.namers.{ArrayAccessNamerComponent}
trait NamerFamilyApi
extends TransformationFamily[Tree, Tree] {
self =>
override def default = { case s: Tree => s }
def components: List[PartialFunction[Tree, Tree]] =
generateComponents[Tree, Tree](
"Program,MethodDef,Binary,Unary,Literal,While,If,MethodDef,Block,ValDef,TypeUse,Apply,Ident,Assign",
"NamerComponent", "name", "")
// "Ident,TypeUse,Assign,Ternary,Apply,Return,Binary,Literal")
def name: Tree => Tree = family
}
case class NamerFamily(compiler: CompilerInterface)
extends NamerFamilyApi
|
amanjpro/languages-a-la-carte
|
oberon0/src/main/scala/submodules/l3/NamerFamily.scala
|
Scala
|
bsd-3-clause
| 2,573
|
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc.doc
package doclet
import scala.collection._
/** Custom Scaladoc generators must implement the `Generator` class. A custom generator can be selected in Scaladoc
* using the `-doc-generator` command line option.
* The `Generator` class does not provide data about the documented code. A number of data provider traits can be used
* to configure what data is actually available to the generator:
* - A `Universer` provides a `Universe` data structure representing the interfaces and comments of the documented
* program.
* To implement this class only requires defining method `generateImpl`. */
abstract class Generator {
/** A series of tests that must be true before generation can be done. This is used by data provider traits to
* confirm that they have been correctly initialised before allowing generation to proceed. */
protected val checks: mutable.Set[() => Boolean] =
mutable.Set.empty[() => Boolean]
/** Outputs documentation (as a side effect). */
def generate(): Unit = {
assert(checks forall { check => check() })
generateImpl()
}
/** Outputs documentation (as a side effect). This method is called only if all `checks` are true. */
protected def generateImpl(): Unit
}
|
lrytz/scala
|
src/scaladoc/scala/tools/nsc/doc/doclet/Generator.scala
|
Scala
|
apache-2.0
| 1,549
|
/*
* Copyright (c) 2013, Scodec
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package scodec
package codecs
import java.io.ByteArrayInputStream
import java.security.KeyPairGenerator
import java.security.cert.{CertificateFactory, X509Certificate}
import java.util.Date
import org.bouncycastle.asn1.x500.X500Name
import org.bouncycastle.cert.jcajce.*
import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder
class CertificateCodecTest extends CodecSuite:
private val keyPair =
val keyGen = KeyPairGenerator.getInstance("RSA").nn
keyGen.initialize(1024)
keyGen.generateKeyPair.nn
val aCert: X509Certificate =
val issuer = new X500Name("CN=Test")
val serialNum = BigInt(1).bigInteger
val notBefore = new Date(System.currentTimeMillis - 1000000)
val notAfter = new Date()
val subject = issuer
val bldr = new JcaX509v3CertificateBuilder(
issuer,
serialNum,
notBefore,
notAfter,
subject,
keyPair.getPublic
)
val signer = new JcaContentSignerBuilder("SHA1withRSA").build(keyPair.getPrivate)
val holder = bldr.build(signer).nn
CertificateFactory
.getInstance("X.509")
.generateCertificate(new ByteArrayInputStream(holder.getEncoded))
.asInstanceOf[X509Certificate]
test("roundtrip") {
roundtrip(x509Certificate, aCert)
}
|
scodec/scodec
|
unitTests/src/test/scala/scodec/codecs/CertificateCodecTest.scala
|
Scala
|
bsd-3-clause
| 2,840
|
package issue10
@pkg.identity
@pkg.placebo
class C
|
xeno-by/paradise
|
tests/reflect/src/test/scala/compile/issue10/Test2.scala
|
Scala
|
bsd-3-clause
| 52
|
package gangstead
//case class Enthusiast(level: Int)
case class Admirer(esteem: Int)
class BetterAdmirer(h : Admirer){
def boosted = h.esteem + 1
}
object Helper1 {
implicit def AdmirerHelper(h: Admirer) = new BetterAdmirer(h)
}
object Helper2 {
implicit class BestAdmirer(h : Admirer) {
def bested = h.esteem * 100
}
}
object ImplicitClassUseCase extends App{
import Helper1._
import Helper2._
val scalaHuman = Admirer(11)
println("Esteem: " + scalaHuman.esteem)
println("Boosted: " + scalaHuman.boosted)
println("Bested: " + scalaHuman.bested)
}
|
gangstead/implicitly-yours
|
src/main/scala/gangstead/ImplicitClass.scala
|
Scala
|
gpl-2.0
| 583
|
/*
* Copyright 2013 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.vertx.scala.core.streams
import org.vertx.java.core.streams.{ Pump => JPump }
import org.vertx.scala.Self
/**
* Pumps data from a [[org.vertx.scala.core.streams.ReadStream]] to a
* [[org.vertx.scala.core.streams.WriteStream]] and performs flow control where necessary to
* prevent the write stream buffer from getting overfull.<p>
* Instances of this class read bytes from a [[org.vertx.scala.core.streams.ReadStream]]
* and write them to a [[org.vertx.scala.core.streams.WriteStream]]. If data
* can be read faster than it can be written this could result in the write
* queue of the [[org.vertx.scala.core.streams.WriteStream]] growing
* without bound, eventually causing it to exhaust all available RAM.<p>
* To prevent this, after each write, instances of this class check whether the write queue of the
* [[org.vertx.scala.core.streams.WriteStream]] is full, and if so, the
* [[org.vertx.scala.core.streams.ReadStream]] is paused, and a `drainHandler` is set on the
* [[org.vertx.scala.core.streams.WriteStream]]. When the [[org.vertx.scala.core.streams.WriteStream]]
* has processed half of its backlog, the `drainHandler` will be
* called, which results in the pump resuming the [[org.vertx.scala.core.streams.ReadStream]].<p>
* This class can be used to pump from any [[org.vertx.scala.core.streams.ReadStream]]
* to any [[org.vertx.scala.core.streams.WriteStream]], e.g. from an
* [[org.vertx.scala.core.http.HttpServerRequest]] to an [[org.vertx.scala.core.file.AsyncFile]],
* or from [[org.vertx.scala.core.net.NetSocket]] to a [[org.vertx.scala.core.http.WebSocket]].<p>
*
* Instances of this class are not thread-safe.<p>
*
* @author <a href="http://tfox.org">Tim Fox</a>
* @author swilliams
* @author <a href="http://www.campudus.com/">Joern Bernhardt</a>
*/
final class Pump private[scala] (val asJava: JPump) extends Self {
/**
* Set the write queue max size to `maxSize`
*/
def setWriteQueueMaxSize(maxSize: Int): Pump = wrap(asJava.setWriteQueueMaxSize(maxSize))
/**
* Start the Pump. The Pump can be started and stopped multiple times.
*/
def start(): Pump = wrap(asJava.start())
/**
* Stop the Pump. The Pump can be started and stopped multiple times.
*/
def stop(): Pump = wrap(asJava.stop())
/**
* Return the total number of bytes pumped by this pump.
*/
def bytesPumped(): Int = asJava.bytesPumped()
}
object Pump {
/**
* Create a new `Pump` with the given `ReadStream` and `WriteStream`
*/
def apply[A <: ReadStream, B <: WriteStream](rs: ReadStream, ws: WriteStream) = createPump(rs, ws)
/**
* Create a new `Pump` with the given `ReadStream` and `WriteStream` and
* `writeQueueMaxSize`
*/
def apply[A <: ReadStream, B <: WriteStream](rs: ReadStream, ws: WriteStream, writeQueueMaxSize: Int) =
createPump(rs, ws, writeQueueMaxSize)
/**
* Create a new `Pump` with the given `ReadStream` and `WriteStream`
*/
def createPump[A <: ReadStream, B <: WriteStream](rs: ReadStream, ws: WriteStream) =
new Pump(JPump.createPump(rs.asJava, ws.asJava))
/**
* Create a new `Pump` with the given `ReadStream` and `WriteStream` and
* `writeQueueMaxSize`
*/
def createPump[A <: ReadStream, B <: WriteStream](rs: ReadStream, ws: WriteStream, writeQueueMaxSize: Int) =
new Pump(JPump.createPump(rs.asJava, ws.asJava, writeQueueMaxSize))
}
|
galderz/mod-lang-scala
|
src/main/scala/org/vertx/scala/core/streams/Pump.scala
|
Scala
|
apache-2.0
| 4,008
|
package util
import java.io.{File, IOException, PrintWriter}
import java.nio.charset.StandardCharsets
import java.nio.file.{Files, Path}
import java.time.format.DateTimeFormatter
import java.time.{ZoneId, ZonedDateTime}
import java.util.Locale
import javax.xml.transform.stream.StreamSource
import javax.xml.validation.{Schema, SchemaFactory, Validator}
import org.xml.sax.ErrorHandler
import scala.io.{BufferedSource, Source}
import scala.xml.{SAXException, SAXParseException}
/**
* @author K.Sakamoto
* Created on 2016/10/28
*/
class XmlSchema(schemaPath: File) {
def isValid: Boolean = {
val factory: SchemaFactory = SchemaFactory.newInstance("http://www.w3.org/2001/XMLSchema")
val schema: Schema = factory.newSchema(schemaPath)
val validator: Validator = schema.newValidator
validator.setErrorHandler(new MyErrorHandler())
val source: StreamSource = new StreamSource(schemaPath.toString.split(File.separator).last)
try {
validator.validate(source)
true
} catch {
case e: SAXException =>
println(e.getMessage)
false
}
}
}
/**
* @author K.Sakamoto
*/
object MyLogFile {
private val logFile: File = createLogFile
private def createLogFile: File = {
new File("%1$s%2$s%3$s%2$s%4$s" format (
System.getProperty("user.home"),
File.separator,
".essay_qa",
"log")
)
}
def getLogFile: File = logFile
def clearLogFile() {
logFile.deleteOnExit()
}
def getLog: String = {
try {
if (logFile.canRead) {
val builder: StringBuilder = new StringBuilder()
val source: BufferedSource = Source.fromFile(logFile)
for (line <- source.getLines) {
builder.
append(line).
append('\\n')
}
Option(builder.result) match {
case Some(result) =>
result
case None =>
""
}
} else {
val logParentPath: Path = logFile.toPath.getParent
if (!logParentPath.toFile.canRead) {
Files.createDirectory(logParentPath)
}
logFile.createNewFile()
""
}
} catch {
case e: IOException =>
e.printStackTrace()
""
}
}
}
/**
* @author K.Sakamoto
*/
class MyErrorHandler extends ErrorHandler {
private val logPath: Path = MyLogFile.getLogFile.toPath
private def getLogFormat(errorCode: String, lineNumber: Int, columnNumber: Int, message: String) = {
"%s :: [%s] line %d, column %d, %s" format(
//new SimpleDateFormat("EEE, d MMM yyyy HH:mm:ss Z", Locale.US) format new Date(Calendar.
// getInstance(TimeZone getTimeZone "Asia/Tokyo", Locale.US).getTimeInMillis),
DateTimeFormatter.ofPattern("EEE, d MMM yyyy HH:mm:ss Z", Locale.US).
format(ZonedDateTime.now(ZoneId.of("Asia/Tokyo"))),
errorCode, lineNumber, columnNumber, message)
}
@throws(classOf[SAXException])
override def warning(e: SAXParseException) {
val writer: PrintWriter = new PrintWriter(Files.newBufferedWriter(logPath, StandardCharsets.UTF_8))
try {
val message: String = getLogFormat("warning", e.getLineNumber, e.getColumnNumber, e.toString)
writer.println(message)
println(message)
} catch {
case e: IOException =>
e.printStackTrace(writer)
} finally {
try {
writer.close()
} catch {
case e: IOException =>
e.printStackTrace(writer)
}
}
}
@throws(classOf[SAXException])
override def error(e: SAXParseException) {
val writer: PrintWriter = new PrintWriter(Files.newBufferedWriter(logPath, StandardCharsets.UTF_8))
try {
val message: String = getLogFormat("error", e.getLineNumber, e.getColumnNumber, e.toString)
writer.println(message)
println(message)
} catch {
case e: IOException =>
e.printStackTrace(writer)
} finally {
try {
writer.close()
} catch {
case e: IOException =>
e.printStackTrace(writer)
}
}
}
@throws(classOf[SAXException])
override def fatalError(e: SAXParseException) {
val writer: PrintWriter = new PrintWriter(Files.newBufferedWriter(logPath, StandardCharsets.UTF_8))
try {
val message: String = getLogFormat("fatal error", e.getLineNumber, e.getColumnNumber, e.toString)
writer.println(message)
println(message)
} catch {
case e: IOException =>
e.printStackTrace(writer)
} finally {
try {
writer.close()
} catch {
case e: IOException =>
e.printStackTrace(writer)
}
}
}
}
|
ktr-skmt/FelisCatusZero
|
src/main/scala/util/XmlSchema.scala
|
Scala
|
apache-2.0
| 4,645
|
package sampleclean.activeml
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import sampleclean.crowd.context.{GroupLabelingContext, PointLabelingContext}
import sampleclean.crowd.{CrowdTaskConfiguration, CrowdTask}
/**
* Class for using active selection criteria to pick points for labeling during active learning.
* @tparam M the type of model used by the selector
* @tparam C the point labeling context used by unlabeled points.
*/
abstract class ActivePointSelector[M, C <: PointLabelingContext] extends Serializable {
/**
* Splits points into two sets: the n points to label next, and all others.
* @param input an RDD of unlabeled points in the form (id, feature vector, labeling context).
* @param nPoints The number of points to select.
* @param model A trained model.
* @return Two RDDs in the same format as input, one consisting of points close to the margin, the other consisting
* of the remaining points.
*/
def selectPoints(input: RDD[(String, Vector, C)],
nPoints: Int,
model: M): (RDD[(String, Vector, C)], RDD[(String, Vector, C)])
}
/**
* Parameters to run the active learning framework
* @param budget maximum number of labels to acquire during training.
* @param batchSize number of new labels to acquire in each iteration.
* @param bootstrapSize number of labeled points required to train the initial model.
*/
case class ActiveLearningParameters(budget: Int=10,
batchSize: Int=5,
bootstrapSize: Int=10)
/**
* An algorithm that uses active learning to iteratively train models on new labeled data.
* @tparam M model trained at each iteration
* @tparam A parameters for the model
* @tparam C point context for labeling new data
* @tparam G group context for labeling new data
*/
abstract class ActiveLearningAlgorithm[M, A, C <: PointLabelingContext, G <: GroupLabelingContext] extends Serializable {
// TODO(dhaas): these methods would go away if we had a generic Model interface a la MLI.
/**
* Trains a new model instance on the available data.
* @param data training data for the model.
* @param parameters parameters for model training.
* @return the trained model.
*/
def trainModel(data: RDD[LabeledPoint], parameters:A): M
/**
* Uses the model to predict a point's label.
* @param model the trained model.
* @param point the point to predict.
* @return the predicted label.
*/
def predict(model:M, point: Vector): Double
/**
* Trains a series of models, using active learning to label new points for each new model.
*
* Specifically, this function:
* - bootstraps enough labels to train an initial model
* - selects a batch of new points to get labels for
* - gets labels for the points asynchronously by sending them to the crowd
* - trains a new model with each new batch of labels.
*
* @param labeledInput already labeled points. Each point is an
* (id, [[org.apache.spark.mllib.regression.LabeledPoint]]) tuple. Pass an empty RDD if there are
* no labeled points.
* @param unlabeledInput points without labels. Each point is an (id, feature vector, labeling context) tuple.
* @param groupContext context needed for labeling shared among all points.
* @param algParams parameters for training individual models.
* @param frameworkParams parameters for the active learning framework.
* @param crowdTask crowd task for getting labels for unlabeled data.
* @param crowdTaskConfig configuration settings for running the crowd task.
* @param pointSelector point selector for picking the next points to label at each iteration.
* @return an [[ActiveLearningTrainingFuture]], a future-like object with callbacks whenever new models are trained
* or new data is labeled.
*/
def train(
labeledInput: RDD[(String, LabeledPoint)],
unlabeledInput: RDD[(String, Vector, C)],
groupContext: G,
algParams: A,
frameworkParams: ActiveLearningParameters,
crowdTask: CrowdTask[C, G, Double],
crowdTaskConfig: CrowdTaskConfiguration,
pointSelector: ActivePointSelector[M, C]): ActiveLearningTrainingFuture[M] = {
// helper function that does the actual work of training.
def runTraining(trainingState: ActiveLearningTrainingState[M]): Unit = {
var spent = 0
var labeledInputLocal = labeledInput
var unlabeledInputLocal = unlabeledInput
var numLabeled = labeledInputLocal.count()
var numUnlabeled = unlabeledInputLocal.count()
//println(numLabeled + " labeled points given. " + numUnlabeled + " unlabeled points given.")
// bootstrap labels if necessary
if (numLabeled < frameworkParams.bootstrapSize) {
val numLabelsMissing = (frameworkParams.bootstrapSize - numLabeled).toDouble
//println("not enough labeled points--bootstrapping with a random sample.")
//println(numLabelsMissing + " missing labels.")
// split points to get the set that needs labeling
val newLabelSplit = unlabeledInputLocal.randomSplit(Array(numLabelsMissing, numUnlabeled))
val pointsToLabel = newLabelSplit(0)
unlabeledInputLocal = newLabelSplit(1)
//println("split unlabeled points: " + pointsToLabel.count() + " to label, " + unlabeledInputLocal.count() + " still unlabeled")
// get the labels
val newLabeledPoints = getLabelsBlocking(pointsToLabel, crowdTask, groupContext, crowdTaskConfig)
labeledInputLocal = labeledInputLocal.union(newLabeledPoints).cache()
numLabeled = labeledInputLocal.count()
numUnlabeled = unlabeledInputLocal.count()
spent += pointsToLabel.count().toInt
//println("After bootstrap: " + numLabeled + " labeled points, " + numUnlabeled + " unlabeled points.")
//println("Remaining budget: " + (frameworkParams.budget - spent))
// update the training state with the new labeled data
trainingState.addLabeledData(newLabeledPoints.map(p => (p._1, p._2.label)))
}
// train an initial model
System.out.flush()
//println("Training initial model...")
var model = trainModel(labeledInputLocal.map(p => p._2), algParams)
//println("Done. Train Error=" + trainError(model, labeledInputLocal.map(p => p._2), numLabeled))
// update the training state with the new model
trainingState.addModel(model, numLabeled)
while (spent < frameworkParams.budget) {
// decide on the next points to label
var batchSize = math.min(frameworkParams.batchSize, frameworkParams.budget - spent)
//println("Getting labels for " + batchSize + " new points...")
val nextPoints = pointSelector.selectPoints(unlabeledInputLocal, batchSize, model)
// get the labels
val newLabeledPoints = getLabelsBlocking(nextPoints._1, crowdTask, groupContext, crowdTaskConfig)
labeledInputLocal = labeledInputLocal.union(newLabeledPoints).cache()
unlabeledInputLocal = nextPoints._2
numLabeled = labeledInputLocal.count()
numUnlabeled = unlabeledInputLocal.count()
spent += batchSize
//println("Done. Now " + numLabeled + " labeled points, " + numUnlabeled + " unlabeled points.")
// update the training state with the new labeled data
trainingState.addLabeledData(newLabeledPoints.map(p => (p._1, p._2.label)))
// retrain the model
//println("Retraining model...")
model = trainModel(labeledInputLocal.map(p => p._2), algParams)
//println("Done. Train Error=" + trainError(model, labeledInputLocal.map(p => p._2), numLabeled))
//println(frameworkParams.budget - spent + " labels left in budget.")
// update the training state with the new model
trainingState.addModel(model, numLabeled)
}
}
// return the asynchronous model training context
new ActiveLearningTrainingFuture[M](runTraining)
}
/**
* Trains a series of models, using uncertainty sampling to label new points for each new model.
*
* Specifically, this function:
* - bootstraps enough labels to train an initial model
* - selects a batch of new points to get labels for using uncertainty sampling
* - gets labels for the points asynchronously by sending them to the crowd
* - trains a new model with each new batch of labels.
*
* Uses default values for active learning framework parameters.
*
* @param labeledInput already labeled points. Each point is an
* (id, [[org.apache.spark.mllib.regression.LabeledPoint]]) tuple. Pass an empty RDD if there are
* no labeled points.
* @param unlabeledInput points without labels. Each point is an (id, feature vector, labeling context) tuple.
* @param groupContext context needed for labeling shared among all points.
* @param algParams parameters for training individual models.
* @param crowdTask crowd task for getting labels for unlabeled data.
* @param crowdTaskConfig configuration settings for running the crowd task.
* @param pointSelector point selector for picking the next points to label at each iteration.
* @return an [[ActiveLearningTrainingFuture]], a future-like object with callbacks whenever new models are trained
* or new data is labeled.
*/
def train(labeledInput: RDD[(String, LabeledPoint)],
unlabeledInput: RDD[(String, Vector, C)],
groupContext: G,
algParams: A,
crowdTask: CrowdTask[C, G, Double],
crowdTaskConfig: CrowdTaskConfiguration,
pointSelector: ActivePointSelector[M, C]): ActiveLearningTrainingFuture[M] = {
train(labeledInput, unlabeledInput, groupContext, algParams, new ActiveLearningParameters(),
crowdTask, crowdTaskConfig, pointSelector)
}
/**
* Runs the crowd task to add labels to an RDD of unlabeled points.
* Blocks until all points are labeled.
* @param pointsToLabel an RDD of unlabeled points.
* @param crowdTask the crowd task that will provide labels for the points (as Doubles).
* @param groupContext the group context for all of the points to show the crowd.
* @param crowdTaskConfig configuration for the crowd task.
* @return the original points with crowd labels added instead of context.
*/
def getLabelsBlocking(pointsToLabel: RDD[(String, Vector, C)],
crowdTask:CrowdTask[C, G, Double],
groupContext: G,
crowdTaskConfig: CrowdTaskConfiguration): RDD[(String, LabeledPoint)] = {
val labels = crowdTask.processBlocking(pointsToLabel map { p => p._1 -> p._3}, groupContext, crowdTaskConfig)
val newLabeledPoints = crowdTask.joinResults(pointsToLabel map { p => p._1 -> p._2}, labels) map { point =>
point._2._2 match {
case Some(d) => point._1 -> LabeledPoint(d, point._2._1)
case _ => throw new RuntimeException("Crowd claimed to label all points, but a label was missing!")
}
}
newLabeledPoints
}
/**
* Classification Error on a training set.
* @param model a trained model.
* @param trainingData an RDD of [[org.apache.spark.mllib.regression.LabeledPoint]]s used to train the model.
* @param trainN the size of trainingData.
* @return the fraction of training examples incorrectly classified by the model.
*/
def trainError(model: M, trainingData: RDD[LabeledPoint], trainN: Long): Double = {
val labelAndPreds = trainingData.map { point =>
val prediction = predict(model, point.features)
(point.label, prediction)
}
labelAndPreds.filter(r => r._1 != r._2).count().toDouble / trainN
}
}
|
sjyk/sampleclean-async
|
src/main/scala/sampleclean/activeml/ActiveLearningAlgorithm.scala
|
Scala
|
apache-2.0
| 11,928
|
package spinoco.protocol.rtp
import scodec.bits.ByteVector
import scodec.{Attempt, Codec, Err}
import scodec.codecs._
/**
* Created by pach on 27/02/17.
*/
package object codec {
val version: Codec[RTPVersion.Value] = enumerated(uint(2), RTPVersion)
val wordSizeCodec: Codec[Int] = uint16.exmap(
szWords => Attempt.successful(szWords*4)
, sz =>
if (sz % 4 == 0) Attempt.successful(sz/4)
else Attempt.failure(Err(s"Expected size in words (%4 == 0) but got $sz"))
)
// helper to properly do padding. Padding index can be 0 .. 3
val paddingMapBytes = Map(
4 -> ByteVector.empty
, 3 -> ByteVector.view(Array[Byte](1))
, 2 -> ByteVector.view(Array[Byte](0, 2))
, 1 -> ByteVector.view(Array[Byte](0, 0, 3))
, 0 -> ByteVector.empty
)
val paddingMapBits = paddingMapBytes.mapValues(_.bits)
}
|
Spinoco/protocol
|
rtp/src/main/scala/spinoco/protocol/rtp/codec/codec.scala
|
Scala
|
mit
| 849
|
package com.ubirch.user.model.db
import com.ubirch.util.date.DateUtil
import com.ubirch.util.uuid.UUIDUtil
import org.joda.time.DateTime
/**
*
* @param id unique user identifier
* @param displayName
* @param providerId
* @param externalId sha512 hashed email
* @param locale
* @param activeUser if the user is active or not
* @param email email address
* @param hashedEmail sha256 hashed email
* @param action if user is to become de-/activated
* @param executionDate when the user shall become de-/activated
* @param created date of creation
* @param updated date of last update
*/
case class User(id: String = UUIDUtil.uuidStr,
displayName: String,
providerId: String,
externalId: String,
locale: String,
activeUser: Option[Boolean] = Some(false),
email: Option[String] = None,
hashedEmail: Option[String] = None,
action: Option[Action] = None,
executionDate: Option[DateTime] = None,
created: DateTime = DateUtil.nowUTC,
updated: DateTime = DateUtil.nowUTC
)
sealed trait Action
case object Activate extends Action
case object Deactivate extends Action
object Action {
def unsafeFromString(value: String): Action = value.toUpperCase match {
case "ACTIVATE" => Activate
case "DEACTIVATE" => Deactivate
}
def toFormattedString(status: Action): String = status match {
case Activate => "ACTIVATE"
case Deactivate => "DEACTIVATE"
}
}
|
ubirch/ubirch-user-service
|
model-db/src/main/scala/com/ubirch/user/model/db/User.scala
|
Scala
|
apache-2.0
| 1,632
|
/*
* Copyright (C) 2016 Christopher Batey and Dogan Narinc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scassandra.codec.messages
import org.scassandra.codec.{CodecSpec, ProtocolVersion, Startup}
import scodec.Codec
class StartupSpec extends CodecSpec {
"Startup.codec" when {
withProtocolVersions { (protocolVersion: ProtocolVersion) =>
implicit val p = protocolVersion
implicit val codec = Codec[Startup]
"encode and decode empty options map" in {
encodeAndDecode(Startup())
}
"encode and decode present map" in {
encodeAndDecode(Startup(Map("hello" -> "world", "goodbye" -> "stars")))
}
}
}
}
|
mikefero/cpp-driver
|
gtests/src/integration/scassandra/server/codec/src/test/scala/org/scassandra/codec/messages/StartupSpec.scala
|
Scala
|
apache-2.0
| 1,192
|
package name.abhijitsarkar.akka
/**
* @author Abhijit Sarkar
*/
import java.io._
import java.net.URL
import java.time.temporal.ChronoField._
import java.time.temporal.ChronoUnit.SECONDS
import java.time.{Instant, LocalDateTime}
import akka.Done
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{FileIO, Flow, Keep, Sink, Source}
import akka.util.ByteString
import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.util.{Random, Try}
/**
* @author Abhijit Sarkar
*/
object Downloader {
val megabytes = 1024 * 1024
val BASE_URL = "ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/daily/by_year/"
val numYears = 1
val currentYear = LocalDateTime.now().get(YEAR)
val previousCentury = currentYear - 100
val randomYears = new Random().shuffle(previousCentury to currentYear).toSeq.take(numYears)
val extractYear: String => Int = s => Try(s.takeWhile(_ != '.').trim.toInt).getOrElse(-1)
val urlSrc = Source.fromIterator(() => io.Source.fromURL(BASE_URL).getLines)
val outFiles = Flow[String]
.filter(_.contains("csv.gz"))
.mapConcat(_.split("\\\\s").takeRight(1).toStream) // takeRight is empty safe :)
.filter(s => randomYears.exists(_ == extractYear(s)))
def download(url: URL, out: File)(implicit ec: ExecutionContext, materializer: ActorMaterializer) = {
if (!out.exists) {
out.mkdirs
} else if (out.isFile || !out.canRead || !out.canExecute) {
throw new IllegalArgumentException(s"${out.getAbsolutePath} is not a directory or no rx permissions.")
}
val file = url.getFile
val outfile = new File(out, file.substring(file.lastIndexOf('/') + 1))
outfile.delete
println("Beginning download...")
val start = Instant.now
// import scala.language.postfixOps
// avoid deprecation warning
// val exitStatus = url #> outfile !
val buffer = new Array[Byte](4096)
val downloadResult = Source.unfoldResourceAsync[ByteString, InputStream](
() => Future(url.openConnection().getInputStream),
is => {
is.read(buffer) match {
case x if (x > -1) => Future(Some(ByteString.fromArray(buffer, 0, x)))
case _ => Future(None)
}
},
is => {
is.close()
Future(Done)
}
)
.runWith(FileIO.toPath(outfile.toPath))
import scala.concurrent.duration._
Await.result(downloadResult, 2.minutes)
val timeTaken = java.time.temporal.ChronoUnit.SECONDS.between(start, Instant.now)
println(s"File size: ${outfile.length() / megabytes} MB.")
println(s"Time taken: $timeTaken seconds.")
outfile
}
def extractFilename(in: File) = {
val parentDir = in.getParent
val filename = in.getName
filename.substring(0, filename.lastIndexOf('.'))
}
def extract(in: File) = {
if (!in.isFile || !in.canRead) {
throw new IllegalArgumentException(s"${in.getAbsolutePath} not a file or no read permission.")
}
val extractedFilename = extractFilename(in)
val outfile = new File(in.getParent, extractedFilename)
val fin = new FileInputStream(in)
val bin = new BufferedInputStream(fin)
val out = new FileOutputStream(outfile)
val gzIn = new GzipCompressorInputStream(bin)
val buffer = new Array[Byte](4096)
println("Beginning extraction...")
val start = Instant.now
Stream.continually(gzIn.read(buffer))
.takeWhile(_ != -1)
.foreach(out.write(buffer, 0, _))
out.close
gzIn.close
in.delete
val timeTaken = SECONDS.between(start, Instant.now)
println(s"Successfully extracted: ${in.getAbsolutePath} to: ${outfile.getAbsolutePath}.")
println(s"File size: ${outfile.length() / megabytes} MB.")
println(s"Time taken: $timeTaken seconds.")
outfile.getAbsolutePath
}
def downloadAndExtract(inDir: String)(implicit ec: ExecutionContext, materializer: ActorMaterializer) =
urlSrc.via(outFiles)
.toMat(Sink.foreachParallel(4)(x => extract(download(new URL(s"${BASE_URL}$x"), new File(inDir)))))(Keep.right)
}
|
asarkar/akka
|
akka-streams-learning/weather-streaming/src/main/scala/name/abhijitsarkar/akka/Downloader.scala
|
Scala
|
gpl-3.0
| 4,124
|
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.config
import java.io.File
import java.nio.file.Path
import scala.collection.breakOut
import org.ensime.api._
import org.ensime.util.file._
package object richconfig {
implicit class RichEnsimeConfig(val c: EnsimeConfig) extends AnyVal {
// doesn't do the transitive lookups
def classpath: List[File] =
(c.projects.flatMap(_.targets) ::: c.projects.flatMap(_.libraryJars)).distinct
def targets: List[File] =
c.projects.flatMap(_.targets)
def referenceSourceJars: Set[File] =
(c.javaSources ++ c.projects.flatMap(_.librarySources))(breakOut)
def lookup(id: EnsimeProjectId) =
c.projects.find(_.id == id).get
def allDocJars: Set[File] =
c.projects.flatMap(_.libraryDocs)(breakOut)
def scalaLibrary: Option[File] =
c.projects.flatMap(_.libraryJars).find { f =>
val name = f.getName
name.startsWith("scala-library") && name.endsWith(".jar")
}
def findProject(path: Path): Option[EnsimeProjectId] = {
// should use NIO relations instead of string comparison...
// needs https://github.com/ensime/ensime-server/issues/1791
c.projects collectFirst {
case project if project.sources.exists(f => path.startsWith(f.toPath)) => project.id
}
}
def findProject(file: EnsimeFile): Option[EnsimeProjectId] = file match {
case RawFile(file) => findProject(file)
case ArchiveFile(jar, _) => findProject(jar)
}
def findProject(file: SourceFileInfo): Option[EnsimeProjectId] = findProject(file.file)
}
implicit class RichEnsimeProject(val p: EnsimeProject) extends AnyVal {
def dependencies(implicit config: EnsimeConfig): List[EnsimeProject] =
p.depends.map(config.lookup)
def classpath(implicit config: EnsimeConfig): List[File] = {
// may not agree with the build tool (e.g. could put all targets first)
p.targets.toList ::: p.libraryJars ::: dependencies.flatMap(_.classpath)
}
def scalaSourceFiles: Set[RawFile] = for {
root <- p.sources
file <- root.tree // should use NIO
if file.isFile && file.isScala
} yield RawFile(file.toPath)
}
}
|
hzenginx/ensime-server
|
core/src/main/scala/org/ensime/config/richconfig.scala
|
Scala
|
gpl-3.0
| 2,300
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.tree
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.tree.impurity.ImpurityCalculator
import org.apache.spark.mllib.tree.model.{ImpurityStats,
InformationGainStats => OldInformationGainStats, Node => OldNode, Predict => OldPredict}
/**
* :: DeveloperApi ::
* Decision tree node interface.
*/
@DeveloperApi
sealed abstract class Node extends Serializable {
// TODO: Add aggregate stats (once available). This will happen after we move the DecisionTree
// code into the new API and deprecate the old API. SPARK-3727
/** Prediction a leaf node makes, or which an internal node would make if it were a leaf node */
def prediction: Double
/** Impurity measure at this node (for training data) */
def impurity: Double
/**
* Statistics aggregated from training data at this node, used to compute prediction, impurity,
* and probabilities.
* For classification, the array of class counts must be normalized to a probability distribution.
*/
private[ml] def impurityStats: ImpurityCalculator
/** Recursive prediction helper method */
private[ml] def predictImpl(features: Vector): LeafNode
/**
* Get the number of nodes in tree below this node, including leaf nodes.
* E.g., if this is a leaf, returns 0. If both children are leaves, returns 2.
*/
private[tree] def numDescendants: Int
/**
* Recursive print function.
* @param indentFactor The number of spaces to add to each level of indentation.
*/
private[tree] def subtreeToString(indentFactor: Int = 0): String
/**
* Get depth of tree from this node.
* E.g.: Depth 0 means this is a leaf node. Depth 1 means 1 internal and 2 leaf nodes.
*/
private[tree] def subtreeDepth: Int
/**
* Create a copy of this node in the old Node format, recursively creating child nodes as needed.
* @param id Node ID using old format IDs
*/
private[ml] def toOld(id: Int): OldNode
/**
* Trace down the tree, and return the largest feature index used in any split.
* @return Max feature index used in a split, or -1 if there are no splits (single leaf node).
*/
private[ml] def maxSplitFeatureIndex(): Int
/** Returns a deep copy of the subtree rooted at this node. */
private[tree] def deepCopy(): Node
}
private[ml] object Node {
/**
* Create a new Node from the old Node format, recursively creating child nodes as needed.
*/
def fromOld(oldNode: OldNode, categoricalFeatures: Map[Int, Int]): Node = {
if (oldNode.isLeaf) {
// TODO: Once the implementation has been moved to this API, then include sufficient
// statistics here.
new LeafNode(prediction = oldNode.predict.predict,
impurity = oldNode.impurity, impurityStats = null)
} else {
val gain = if (oldNode.stats.nonEmpty) {
oldNode.stats.get.gain
} else {
0.0
}
new InternalNode(prediction = oldNode.predict.predict, impurity = oldNode.impurity,
gain = gain, leftChild = fromOld(oldNode.leftNode.get, categoricalFeatures),
rightChild = fromOld(oldNode.rightNode.get, categoricalFeatures),
split = Split.fromOld(oldNode.split.get, categoricalFeatures), impurityStats = null)
}
}
}
/**
* :: DeveloperApi ::
* Decision tree leaf node.
* @param prediction Prediction this node makes
* @param impurity Impurity measure at this node (for training data)
*/
@DeveloperApi
class LeafNode private[ml] (
override val prediction: Double,
override val impurity: Double,
override private[ml] val impurityStats: ImpurityCalculator) extends Node {
override def toString: String =
s"LeafNode(prediction = $prediction, impurity = $impurity)"
override private[ml] def predictImpl(features: Vector): LeafNode = this
override private[tree] def numDescendants: Int = 0
override private[tree] def subtreeToString(indentFactor: Int = 0): String = {
val prefix: String = " " * indentFactor
prefix + s"Predict: $prediction\\n"
}
override private[tree] def subtreeDepth: Int = 0
override private[ml] def toOld(id: Int): OldNode = {
new OldNode(id, new OldPredict(prediction, prob = impurityStats.prob(prediction)),
impurity, isLeaf = true, None, None, None, None)
}
override private[ml] def maxSplitFeatureIndex(): Int = -1
override private[tree] def deepCopy(): Node = {
new LeafNode(prediction, impurity, impurityStats)
}
}
/**
* :: DeveloperApi ::
* Internal Decision Tree node.
* @param prediction Prediction this node would make if it were a leaf node
* @param impurity Impurity measure at this node (for training data)
* @param gain Information gain value.
* Values < 0 indicate missing values; this quirk will be removed with future updates.
* @param leftChild Left-hand child node
* @param rightChild Right-hand child node
* @param split Information about the test used to split to the left or right child.
*/
@DeveloperApi
class InternalNode private[ml] (
override val prediction: Double,
override val impurity: Double,
val gain: Double,
val leftChild: Node,
val rightChild: Node,
val split: Split,
override private[ml] val impurityStats: ImpurityCalculator) extends Node {
override def toString: String = {
s"InternalNode(prediction = $prediction, impurity = $impurity, split = $split)"
}
override private[ml] def predictImpl(features: Vector): LeafNode = {
if (split.shouldGoLeft(features)) {
leftChild.predictImpl(features)
} else {
rightChild.predictImpl(features)
}
}
override private[tree] def numDescendants: Int = {
2 + leftChild.numDescendants + rightChild.numDescendants
}
override private[tree] def subtreeToString(indentFactor: Int = 0): String = {
val prefix: String = " " * indentFactor
prefix + s"If (${InternalNode.splitToString(split, left = true)})\\n" +
leftChild.subtreeToString(indentFactor + 1) +
prefix + s"Else (${InternalNode.splitToString(split, left = false)})\\n" +
rightChild.subtreeToString(indentFactor + 1)
}
override private[tree] def subtreeDepth: Int = {
1 + math.max(leftChild.subtreeDepth, rightChild.subtreeDepth)
}
override private[ml] def toOld(id: Int): OldNode = {
assert(id.toLong * 2 < Int.MaxValue, "Decision Tree could not be converted from new to old API"
+ " since the old API does not support deep trees.")
new OldNode(id, new OldPredict(prediction, prob = impurityStats.prob(prediction)), impurity,
isLeaf = false, Some(split.toOld), Some(leftChild.toOld(OldNode.leftChildIndex(id))),
Some(rightChild.toOld(OldNode.rightChildIndex(id))),
Some(new OldInformationGainStats(gain, impurity, leftChild.impurity, rightChild.impurity,
new OldPredict(leftChild.prediction, prob = 0.0),
new OldPredict(rightChild.prediction, prob = 0.0))))
}
override private[ml] def maxSplitFeatureIndex(): Int = {
math.max(split.featureIndex,
math.max(leftChild.maxSplitFeatureIndex(), rightChild.maxSplitFeatureIndex()))
}
override private[tree] def deepCopy(): Node = {
new InternalNode(prediction, impurity, gain, leftChild.deepCopy(), rightChild.deepCopy(),
split, impurityStats)
}
}
private object InternalNode {
/**
* Helper method for [[Node.subtreeToString()]].
* @param split Split to print
* @param left Indicates whether this is the part of the split going to the left,
* or that going to the right.
*/
private def splitToString(split: Split, left: Boolean): String = {
val featureStr = s"feature ${split.featureIndex}"
split match {
case contSplit: ContinuousSplit =>
if (left) {
s"$featureStr <= ${contSplit.threshold}"
} else {
s"$featureStr > ${contSplit.threshold}"
}
case catSplit: CategoricalSplit =>
val categoriesStr = catSplit.leftCategories.mkString("{", ",", "}")
if (left) {
s"$featureStr in $categoriesStr"
} else {
s"$featureStr not in $categoriesStr"
}
}
}
}
/**
* Version of a node used in learning. This uses vars so that we can modify nodes as we split the
* tree by adding children, etc.
*
* For now, we use node IDs. These will be kept internal since we hope to remove node IDs
* in the future, or at least change the indexing (so that we can support much deeper trees).
*
* This node can either be:
* - a leaf node, with leftChild, rightChild, split set to null, or
* - an internal node, with all values set
*
* @param id We currently use the same indexing as the old implementation in
* [[org.apache.spark.mllib.tree.model.Node]], but this will change later.
* @param isLeaf Indicates whether this node will definitely be a leaf in the learned tree,
* so that we do not need to consider splitting it further.
* @param stats Impurity statistics for this node.
*/
private[tree] class LearningNode(
var id: Int,
var leftChild: Option[LearningNode],
var rightChild: Option[LearningNode],
var split: Option[Split],
var isLeaf: Boolean,
var stats: ImpurityStats) extends Serializable {
/**
* Convert this [[LearningNode]] to a regular [[Node]], and recurse on any children.
*/
def toNode: Node = {
if (leftChild.nonEmpty) {
assert(rightChild.nonEmpty && split.nonEmpty && stats != null,
"Unknown error during Decision Tree learning. Could not convert LearningNode to Node.")
new InternalNode(stats.impurityCalculator.predict, stats.impurity, stats.gain,
leftChild.get.toNode, rightChild.get.toNode, split.get, stats.impurityCalculator)
} else {
if (stats.valid) {
new LeafNode(stats.impurityCalculator.predict, stats.impurity,
stats.impurityCalculator)
} else {
// Here we want to keep same behavior with the old mllib.DecisionTreeModel
new LeafNode(stats.impurityCalculator.predict, -1.0, stats.impurityCalculator)
}
}
}
/**
* Get the node index corresponding to this data point.
* This function mimics prediction, passing an example from the root node down to a leaf
* or unsplit node; that node's index is returned.
*
* @param binnedFeatures Binned feature vector for data point.
* @param splits possible splits for all features, indexed (numFeatures)(numSplits)
* @return Leaf index if the data point reaches a leaf.
* Otherwise, last node reachable in tree matching this example.
* Note: This is the global node index, i.e., the index used in the tree.
* This index is different from the index used during training a particular
* group of nodes on one call to
* [[org.apache.spark.ml.tree.impl.RandomForest.findBestSplits()]].
*/
def predictImpl(binnedFeatures: Array[Int], splits: Array[Array[Split]]): Int = {
if (this.isLeaf || this.split.isEmpty) {
this.id
} else {
val split = this.split.get
val featureIndex = split.featureIndex
val splitLeft = split.shouldGoLeft(binnedFeatures(featureIndex), splits(featureIndex))
if (this.leftChild.isEmpty) {
// Not yet split. Return next layer of nodes to train
if (splitLeft) {
LearningNode.leftChildIndex(this.id)
} else {
LearningNode.rightChildIndex(this.id)
}
} else {
if (splitLeft) {
this.leftChild.get.predictImpl(binnedFeatures, splits)
} else {
this.rightChild.get.predictImpl(binnedFeatures, splits)
}
}
}
}
}
private[tree] object LearningNode {
/** Create a node with some of its fields set. */
def apply(
id: Int,
isLeaf: Boolean,
stats: ImpurityStats): LearningNode = {
new LearningNode(id, None, None, None, false, stats)
}
/** Create an empty node with the given node index. Values must be set later on. */
def emptyNode(nodeIndex: Int): LearningNode = {
new LearningNode(nodeIndex, None, None, None, false, null)
}
// The below indexing methods were copied from spark.mllib.tree.model.Node
/**
* Return the index of the left child of this node.
*/
def leftChildIndex(nodeIndex: Int): Int = nodeIndex << 1
/**
* Return the index of the right child of this node.
*/
def rightChildIndex(nodeIndex: Int): Int = (nodeIndex << 1) + 1
/**
* Get the parent index of the given node, or 0 if it is the root.
*/
def parentIndex(nodeIndex: Int): Int = nodeIndex >> 1
/**
* Return the level of a tree which the given node is in.
*/
def indexToLevel(nodeIndex: Int): Int = if (nodeIndex == 0) {
throw new IllegalArgumentException(s"0 is not a valid node index.")
} else {
java.lang.Integer.numberOfTrailingZeros(java.lang.Integer.highestOneBit(nodeIndex))
}
/**
* Returns true if this is a left child.
* Note: Returns false for the root.
*/
def isLeftChild(nodeIndex: Int): Boolean = nodeIndex > 1 && nodeIndex % 2 == 0
/**
* Return the maximum number of nodes which can be in the given level of the tree.
* @param level Level of tree (0 = root).
*/
def maxNodesInLevel(level: Int): Int = 1 << level
/**
* Return the index of the first node in the given level.
* @param level Level of tree (0 = root).
*/
def startIndexInLevel(level: Int): Int = 1 << level
/**
* Traces down from a root node to get the node with the given node index.
* This assumes the node exists.
*/
def getNode(nodeIndex: Int, rootNode: LearningNode): LearningNode = {
var tmpNode: LearningNode = rootNode
var levelsToGo = indexToLevel(nodeIndex)
while (levelsToGo > 0) {
if ((nodeIndex & (1 << levelsToGo - 1)) == 0) {
tmpNode = tmpNode.leftChild.get
} else {
tmpNode = tmpNode.rightChild.get
}
levelsToGo -= 1
}
tmpNode
}
}
|
xieguobin/Spark_2.0.0_cn1
|
ml/tree/Node.scala
|
Scala
|
apache-2.0
| 14,826
|
package com.gilesc
package mynab
package service
import com.gilesc.mynab.testkit.TestCase
import com.gilesc.mynab.repository._
import cats.effect.IO
import com.gilesc.arrow._
class CategoryServiceSpec extends TestCase {
val filter = new Filter[IO, (String, String), Category, CreateCategoryContext, Category] {
override def run(
request: (String, String),
service: Service[IO, CreateCategoryContext, Category]
): IO[Category] = {
val (major, minor) = request
val ctx = CreateCategoryContext(
UserId(1),
CategoryName(major),
CategoryName(minor))
service(ctx)
}
}
val service = new Service[IO, CreateCategoryContext, Category] {
override def run(req: CreateCategoryContext): IO[Category] = {
val group = CategoryGroup(CategoryGroupId(1), UserId(1), CategoryName(req.major.value))
val category = Category(CategoryId(1), UserId(1), group, CategoryName(req.minor.value))
IO.pure(category)
}
}
behavior of "Category Service"
it should "allow me to create a new category" in {
val food = CategoryName("Food")
val diningOut = CategoryName("Dining Out")
val ctx = CreateCategoryContext(UserId(1), food, diningOut)
val result = service(ctx).unsafeRunSync()
result.group.name.value should be(food.value)
result.name.value should be(diningOut.value)
val pipeline: Service[IO, (String, String), Category] = filter andThen service
val pipelineResult = pipeline((food.value, diningOut.value)).unsafeRunSync()
pipelineResult.group.name.value should be(food.value)
pipelineResult.name.value should be(diningOut.value)
}
}
|
CraigGiles/mynab
|
service/src/test/scala/com/gilesc/mynab/service/CategoryServiceSpec.scala
|
Scala
|
mit
| 1,670
|
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testsuite.scalalib
import org.junit.Test
import org.junit.Assert._
import org.scalajs.testsuite.utils.AssertThrows._
import org.scalajs.testsuite.utils.Platform._
class EnumerationTest {
@Test def should_use_explicit_naming_for_enumerated_values_issue_38(): Unit = {
object HelpLevel extends Enumeration {
type HelpLevel = Value
val None = Value("None")
val Basic = Value("Basic")
val Medium = Value("Medium")
val Full = Value("Full")
}
val h = HelpLevel.None
assertEquals("None", h.toString)
}
@Test def should_allow_implicit_naming_for_values(): Unit = {
object HelpLevel extends Enumeration {
type HelpLevel = Value
val None, Basic, Medium, Full = Value
val Special = Value(100)
val / = Value
}
val h = HelpLevel.Medium
assertEquals("Medium", h.toString)
assertEquals("Special", HelpLevel.Special.toString)
assertEquals("$div", HelpLevel./.toString)
}
@Test def should_give_a_pseudo_toString_to_unnamed_values(): Unit = {
if (!executingInJVM) {
object Test extends Enumeration {
private val nullStr: String = null
val A = Value(nullStr) // Circumvent compiler replacement and warning
}
assertTrue(Test.A.toString.startsWith(
"<Unknown name for enum field #0 of class "))
}
}
@Test def should_give_a_graceful_error_message_upon_name_based_query_when_unnamed_fields_are_present(): Unit = {
object Test extends Enumeration {
private val nullStr: String = null
val A = Value(nullStr) // Circumvent compiler replacement and warning
}
if (!executingInJVM) {
// In the JVM the exception thrown is a ClassCastException
val ex = expectThrows(classOf[NoSuchElementException], Test.withName("A"))
val subMsg = "Couldn't find enum field with name A.\\n" +
"However, there were the following unnamed fields:"
assertTrue(ex.getMessage.contains(subMsg))
}
}
@Test def should_respond_to_toString(): Unit = {
assertEquals("FooBarEnum", FooBarEnum.toString)
}
@Test def should_respond_to_values(): Unit = {
assertEquals("FooBarEnum.ValueSet(A, B, C, D, E, F)",
FooBarEnum.values.toString)
}
@Test def should_allow_setting_nextName(): Unit = {
object Test extends Enumeration {
nextName = Iterator("x","y","z")
val a, b, c = Value
}
assertEquals("x|y|z", Test.values.mkString("|"))
}
/** Object is here due to issues with Enumeration.toString inside closures */
object FooBarEnum extends Enumeration {
val A, B, C, D, E, F = Value
}
}
|
SebsLittleHelpers/scala-js
|
test-suite/shared/src/test/scala/org/scalajs/testsuite/scalalib/EnumerationTest.scala
|
Scala
|
apache-2.0
| 2,913
|
package trending.util
import scala.concurrent._
import ExecutionContext.Implicits.global
import play.api.libs.functional.syntax._
import play.api.libs.json._
import play.api.libs.json.Reads._
import play.api.libs.ws._
object Location {
implicit def readLocation: Reads[(Double, Double)] =
(__ \\ "latitude").read[Double] and
(__ \\ "longitude").read[Double] tupled
def getCoordinates(ip: String): Future[(Double, Double)] = {
val default = (37.789404, -122.401042)
WS.url("http://freegeoip.net/json/" + ip)
.get()
.map(
r => r.status match {
case 200 => r.json.asOpt[(Double, Double)].getOrElse(default)
case e => default
}
)
}
}
|
fbessadok/trendingvenues
|
app/util/Location.scala
|
Scala
|
mit
| 711
|
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package top
import com.intellij.lang.PsiBuilder
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
/**
* @author Alexander Podkhalyuzin
* Date: 06.02.2008
*/
/*
QualId ::= id {. id}
*/
object Qual_Id {
def parse(builder: ScalaPsiBuilder): Boolean = {
val qualMarker = builder.mark
return parse(builder,qualMarker)
}
def parse(builder: ScalaPsiBuilder, qualMarker: PsiBuilder.Marker): Boolean = {
//parsing td identifier
builder.getTokenType match {
case ScalaTokenTypes.tIDENTIFIER =>
builder.advanceLexer //Ate identifier
//Look for dot
builder.getTokenType match {
case ScalaTokenTypes.tDOT => {
val newMarker = qualMarker.precede
qualMarker.done(ScalaElementTypes.REFERENCE)
builder.advanceLexer //Ate dot
//recursively parse qualified identifier
Qual_Id parse(builder, newMarker)
return true
}
case _ => {
//It's OK, let's close marker
qualMarker.done(ScalaElementTypes.REFERENCE)
return true
}
}
case _ =>
builder error ScalaBundle.message("wrong.qual.identifier")
qualMarker.drop
return true
}
}
}
|
ilinum/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/parser/parsing/top/Qual_Id.scala
|
Scala
|
apache-2.0
| 1,423
|
/* Copyright (c) 2015 Lucas Satabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package lingua
package fst
import scala.collection.immutable.Queue
import scala.annotation.tailrec
/** A p-subsequential finite-state transducer. */
class PSubFst[In, Out] private[fst] (
states: Set[State],
val initial: State,
finals: Map[State, Set[Seq[Out]]],
val transitions: Map[(State, In), State],
val defaultTransitions: Map[State, State],
val outputs: Map[(State, In), Seq[Out]],
val defaultOutputs: Map[State, Seq[Out]])
extends Fst[PSubFst, In, Out](states, Set(initial), finals) {
/** The value of p */
def p =
finals.map(_._2.size).max
/** Returns the next state when reading `in` in state `state`.
* If no transition exists, returns `None`.
*/
def delta(state: State, in: In): Option[State] =
transitions.get((state, in)).orElse(defaultTransitions.get(state))
/** Returns the state reached when reading word `ins` from state `state`.
* If at some point no transition can be found, returns `None`.
*/
def delta(state: State, ins: Seq[In]): Option[State] =
if (ins.isEmpty)
Some(state)
else
delta(state, ins.head).flatMap(s => delta(s, ins.tail))
/** Returns the output sequence encountered when reading `in` in state `state`. */
def sigma(origin: State, in: In): Seq[Out] =
outputs.get((origin, in)).orElse(defaultOutputs.get(origin)).getOrElse(Seq.empty)
/** Returns the output sequence encountered when reading `ins` from state `state`. */
def sigma(state: State, ins: Seq[In]): Seq[Out] =
if (ins.isEmpty)
Seq.empty[Out]
else
delta(state, ins.head) match {
case Some(q) => sigma(state, ins.head) ++ sigma(q, ins.tail)
case None => Seq.empty[Out]
}
/** Returns the set of extra output associated to the state `state`. */
def phi(state: State): Set[Seq[Out]] =
finals.getOrElse(state, Set.empty)
def toDot: String = {
val trans = for {
((s1, in), s2) <- transitions
out = outputs.getOrElse((s1, in), Seq()).mkString
} yield f"""q$s1->q$s2[label="$in:$out"]"""
toDot(trans)
}
/** pushes common prefixes in front when possible. */
def push: PSubFst[In, Out] = {
import scala.collection.{ mutable => mu }
val prefixes = mu.Map.empty[State, Seq[Out]]
val sigma2 = mu.Map.empty[(State, In), Seq[Out]] ++ outputs
val phi2 = mu.Map.empty[State, Set[Seq[Out]]] ++ finals
def computePrefix(state: State, seen: Set[State]): Unit =
if (!prefixes.contains(state) && !seen.contains(state)) {
// first compute the prefixes of next states
for (((`state`, _), q) <- transitions)
computePrefix(q, seen + state)
val outs =
if (isFinal(state))
phi(state)
else
Set.empty[Seq[Out]]
val prefix =
lcp(outs ++ (for (((`state`, i), q) <- transitions) yield sigma(state, i) ++ (if (q != state) prefixes(q) else Seq.empty)))
prefixes(state) = prefix
// push it in front of all incoming edges
if (isFinal(state))
phi2(state) = for (o <- phi(state)) yield o.drop(prefix.size)
else if (isInitial(state))
for (((`state`, i), o) <- outputs) sigma2((state, i)) = (o ++ prefixes(transitions((state, i))))
else
for (((`state`, i), o) <- outputs) sigma2((state, i)) = (o ++ prefixes(transitions((state, i)))).drop(prefix.size)
}
computePrefix(initial, Set.empty[State])
new PSubFst(states, initial, phi2.toMap, transitions, defaultTransitions, sigma2.toMap, defaultOutputs)
}
}
object PSubFst {
object Builder {
def fromEntries[In <% Seq[I]: Ordering, Out <% Seq[O], I: Ordering, O](entries: Seq[(In, Out)]): PSubFst[I, O] =
fromEntries[I, O](entries.map { case (i, o) => (i.toSeq, o.toSeq) })
def fromEntries[In: Ordering, Out](entries: Seq[(Seq[In], Seq[Out])]): PSubFst[In, Out] = {
var _id = 0
def nextId = {
_id += 1
_id - 1
}
case class State(transitions: Map[In, (Seq[Out], State)] = Map.empty[In, (Seq[Out], State)], isFinal: Boolean = false, stateOutput: Set[Seq[Out]] = Set.empty[Seq[Out]]) {
private val id: Int = nextId
def setTransition(c: In, tgt: State): State =
copy(transitions = transitions.updated(c, (transitions.get(c).map(_._1).getOrElse(Seq.empty), tgt)))
def setOutput(c: In, out: Seq[Out]): State =
copy(transitions = transitions.updated(c, (out, transitions(c)._2)))
def setFinal(f: Boolean): State =
copy(isFinal = f)
def setStateOutput(out: Set[Seq[Out]]): State =
copy(stateOutput = out)
def showTransitions(sb: StringBuilder): Unit = {
transitions.foreach {
case (in, (out, target)) =>
sb.append(f" q$id->q${target.id}[label=<$in:$out>]\\n")
target.showTransitions(sb)
}
if (isFinal) {
sb.append(f" q$id->end[label=<${stateOutput.mkString("\\n")}>]\\n")
}
}
}
// entries are sorted by inputs
val sorted = entries.sortBy(_._1)
// the input alphabet and maximal word size
val (alphabet, maxWordSize) =
entries.foldLeft((Set.empty[In], 0)) {
case ((alphabet, maxWordSize), (in, _)) =>
(alphabet ++ in, math.max(maxWordSize, in.size))
}
val firstIn = alphabet.min
val lastIn = alphabet.max
def getStateId(st: State, nextStateId: Int, current: Map[State, Int]): (Int, Int, Map[State, Int]) =
current.get(st) match {
case Some(id) => (id, nextStateId, current)
case None => (nextStateId, nextStateId + 1, current.updated(st, nextStateId))
}
@tailrec
def mkPSubFst(nextStateId: Int, current: Map[State, Int], treated: Set[State], toProcess: Queue[State], states: Set[Int], finals: Map[Int, Set[Seq[Out]]], transitions: Map[(Int, In), Int], outputs: Map[(Int, In), Seq[Out]]): PSubFst[In, Out] =
toProcess.dequeueOption match {
case Some((st, rest)) =>
val (id, nextStateId1, current1) = getStateId(st, nextStateId, current)
val states1 = states + id
val finals1 =
if (st.isFinal)
finals.updated(id, st.stateOutput)
else
finals
val (rest1, nextStateId2, current2, transitions1, outputs1) =
st.transitions.foldLeft((rest, nextStateId1, current1, transitions, outputs)) {
case ((rest, nextStateId, current, transitions, outputs), (in, (out, nxt))) =>
val (nxtId, nextStateId1, current1) = getStateId(nxt, nextStateId, current)
val rest1 =
if (treated.contains(nxt))
rest
else
rest.enqueue(nxt)
(rest1, nextStateId1, current1, transitions.updated((id, in), nxtId), outputs.updated((id, in), out))
}
mkPSubFst(nextStateId2, current2, treated + st, rest1, states1, finals1, transitions1, outputs1)
case None =>
new PSubFst(states, 0, finals, transitions, Map.empty, outputs, Map.empty)
}
@tailrec
def loop(entries: Seq[(Seq[In], Seq[Out])], previous: Seq[In], tempStates: Vector[State], minTransducerStates: Set[State]): PSubFst[In, Out] = {
def findMinimized(st: State): (State, Set[State]) =
minTransducerStates.find(_ == st) match {
case Some(st) => (st, minTransducerStates)
case None => (st, minTransducerStates + st)
}
entries match {
case Seq() =>
val (tempStates1, minTransducerStates1) =
(previous.size to 1 by -1).foldLeft((tempStates, minTransducerStates)) {
case ((tempStates, minTransducerStates), i) =>
val (st, minTransducerStates1) = findMinimized(tempStates(i))
(tempStates.updated(i - 1, tempStates(i - 1).setTransition(previous(i - 1), st)), minTransducerStates1)
}
mkPSubFst(0, Map.empty, Set.empty, Queue(tempStates1(0)), Set.empty, Map.empty, Map.empty, Map.empty)
case Seq((current, currentOut), rest @ _*) =>
val prefixSizePlus1 = previous.zip(current).takeWhile(p => p._1 == p._2).size + 1
// minimize states from from suffix of previous word
val (tempStates1, minTransducerStates1) =
(previous.size to prefixSizePlus1 by -1).foldLeft((tempStates, minTransducerStates)) {
case ((tempStates, minTransducerStates), i) =>
val (st, minTransducerStates1) = findMinimized(tempStates(i))
(tempStates.updated(i - 1, tempStates(i - 1).setTransition(previous(i - 1), st)), minTransducerStates1)
}
// initialize tail states for current word
val tempStates2 =
(prefixSizePlus1 to current.size).foldLeft(tempStates1) { (tempStates, i) =>
val tempStates1 = tempStates.updated(i, State())
tempStates1.updated(i - 1, tempStates1(i - 1).setTransition(current(i - 1), tempStates1(i)))
}
// mark the last state as final if this is not the same word as the previous one
val tempStates3 =
if (current != previous)
tempStates2.updated(current.size, tempStates2(current.size).setFinal(true))
else
tempStates2
val (currentOut1, tempStates4) =
(1 until prefixSizePlus1).foldLeft((currentOut, tempStates3)) {
case ((currentOut, tempStates), i) =>
val tOut = tempStates(i - 1).transitions(current(i - 1))._1
val commonPrefix = lcp(tOut, currentOut)
val wordSuffix = tOut.drop(commonPrefix.size)
val tempStates1 = tempStates.updated(i - 1, tempStates(i - 1).setOutput(current(i - 1), commonPrefix))
// push the suffix to the end
val tempStates2 =
tempStates1(i).transitions.keySet.foldLeft(tempStates1) { (tempStates, in) =>
val st = tempStates(i)
val out = st.transitions(in)._1
tempStates.updated(i, st.setOutput(in, wordSuffix ++ out))
}
val tempStates3 =
if (tempStates2(i).isFinal) {
val stateOutput1 =
tempStates2(i).stateOutput.map(out => wordSuffix ++ out)
tempStates2.updated(i, tempStates2(i).setStateOutput(stateOutput1))
} else {
tempStates2
}
val currentOut1 = currentOut.drop(commonPrefix.size)
(currentOut1, tempStates3)
}
val tempStates5 =
if (current == previous) {
val st = tempStates4(current.size)
tempStates4.updated(current.size, st.setStateOutput(st.stateOutput + currentOut1))
} else {
val idx = math.min(prefixSizePlus1, current.size)
val st = tempStates4(idx - 1)
tempStates4.updated(idx - 1, st.setOutput(current(idx - 1), currentOut1))
}
loop(rest, current, tempStates5, minTransducerStates1)
}
}
loop(sorted, Seq.empty, Vector.fill(maxWordSize + 1)(State()), Set.empty)
}
}
}
|
satabin/lingua
|
fst/src/main/scala/lingua/fst/PSubFst.scala
|
Scala
|
apache-2.0
| 12,199
|
// Ported from https://www.hboehm.info/gc/gc_bench/GCBench.java
//
// This is adapted from a benchmark written by John Ellis and Pete Kovac
// of Post Communications.
// It was modified by Hans Boehm of Silicon Graphics.
//
// This is no substitute for real applications. No actual application
// is likely to behave in exactly this way. However, this benchmark was
// designed to be more representative of real applications than other
// Java GC benchmarks of which we are aware.
// It attempts to model those properties of allocation requests that
// are important to current GC techniques.
// It is designed to be used either to obtain a single overall performance
// number, or to give a more detailed estimate of how collector
// performance varies with object lifetimes. It prints the time
// required to allocate and collect balanced binary trees of various
// sizes. Smaller trees result in shorter object lifetimes. Each cycle
// allocates roughly the same amount of memory.
// Two data structures are kept around during the entire process, so
// that the measured performance is representative of applications
// that maintain some live in-memory data. One of these is a tree
// containing many pointers. The other is a large array containing
// double precision floating point numbers. Both should be of comparable
// size.
//
// The results are only really meaningful together with a specification
// of how much memory was used. It is possible to trade memory for
// better time performance. This benchmark should be run in a 32 MB
// heap, though we don't currently know how to enforce that uniformly.
//
// Unlike the original Ellis and Kovac benchmark, we do not attempt
// measure pause times. This facility should eventually be added back
// in. There are several reasons for omitting it for now. The original
// implementation depended on assumptions about the thread scheduler
// that don't hold uniformly. The results really measure both the
// scheduler and GC. Pause time measurements tend to not fit well with
// current benchmark suites. As far as we know, none of the current
// commercial Java implementations seriously attempt to minimize GC pause
// times.
package gcbench
import scala.{Int, Double, Boolean, Unit, Array}
import java.lang.String
class Node(var left: Node, var right: Node, var i: Int, var j: Int)
object GCBenchBenchmark extends communitybench.Benchmark {
val inputOutput: (String, String) = ("", "true")
def run(input: String): Boolean = {
val (node, doubles) = GCBenchBenchmark.start()
node != null && doubles(1000) == 1.0 / 1000
}
val kStretchTreeDepth: Int = 18 // about 16Mb
val kLongLivedTreeDepth: Int = 16 // about 4Mb
val kArraySize: Int = 500000 // about 4Mb
val kMinTreeDepth: Int = 4
val kMaxTreeDepth: Int = 16
// Nodes used by a tree of a given size
def treeSize(i: Int): Int = {
return ((1 << (i + 1)) - 1)
}
// Number of iterations to use for a given tree depth
def numIters(i: Int): Int =
2 * treeSize(kStretchTreeDepth) / treeSize(i)
// Build tree top down, assigning to older objects.
def populate(iDepth: Int, thisNode: Node): Unit =
if (iDepth > 0) {
thisNode.left = new Node(null, null, 0, 0)
thisNode.right = new Node(null, null, 0, 0)
populate(iDepth - 1, thisNode.left)
populate(iDepth - 1, thisNode.right)
}
// Build tree bottom-up
def makeTree(iDepth: Int): Node =
if (iDepth <= 0) {
new Node(null, null, 0, 0)
} else {
new Node(makeTree(iDepth - 1), makeTree(iDepth - 1), 0, 0)
}
def construction(depth: Int): Unit = {
var root: Node = null
var tempTree: Node = null
val iNumIter: Int = numIters(depth)
var i = 0;
while (i < iNumIter) {
tempTree = new Node(null, null, 0, 0)
populate(depth, tempTree)
tempTree = null
i += 1
}
i = 0
while (i < iNumIter) {
tempTree = makeTree(depth)
tempTree = null
i += 1
}
}
def start(): (Node, Array[Double]) = {
var root: Node = null
var longLivedTree: Node = null
var tempTree: Node = null
// Stretch the memory space quickly
tempTree = makeTree(kStretchTreeDepth)
tempTree = null;
// Create a long lived object
longLivedTree = new Node(null, null, 0, 0);
populate(kLongLivedTreeDepth, longLivedTree)
// Create long-lived array, filling half of it
val array = new Array[Double](kArraySize)
var i = 0
while (i < kArraySize / 2) {
array(i) = 1.0 / i
i += 1
}
i = kMinTreeDepth
while (i <= kMaxTreeDepth) {
construction(i)
i += 2
}
(longLivedTree, array)
}
}
|
sjrd/scalajs-benchmarks
|
gcbench/src/main/scala/gcbench/GCBenchBenchmark.scala
|
Scala
|
bsd-3-clause
| 4,806
|
/* __ __ *\\
* / /____ ___ ____ ___ ___ _/ / lasius *
* / __/ -_) _ `/ _ \\/ _ \\/ _ `/ / contributed by tegonal *
* \\__/\\__/\\_, /\\___/_//_/\\_,_/_/ http://tegonal.com/ *
* /___/ *
* *
* This program is free software: you can redistribute it and/or modify it *
* under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, *
* or (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, but *
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for *
* more details. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program. If not, see http://www.gnu.org/licenses/ *
* *
\\* */
package services
import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import models.UserId
object UserService {
case class StopUserView(userId: UserId)
case class StartUserTimeBookingView(userId: UserId)
}
abstract class UserService[C] extends Actor with ActorLogging {
import UserService._
/**
* Implicit convertion from userid object model to string based representation used in akka system
*/
implicit def userId2String(userId: UserId): String = userId.value
protected def findOrCreate(id: UserId): ActorRef =
context.child(id) getOrElse create(id)
/**
* Processes aggregate command.
* oCreates an aggregate (if not already created) and handles commands caching while aggregate is being killed.
*
* @param aggregateId Aggregate id
* @param command Command that should be passed to aggregate
*/
def processAggregateCommand(aggregateId: UserId, command: C) = {
val maybeChild = context child aggregateId
log.debug(s"processAgregateCommand -> addregateId:$aggregateId, child:$maybeChild")
maybeChild match {
case Some(child) =>
child forward command
case None =>
val child = create(aggregateId)
log.debug(s"forwardCommand to $child")
child forward command
}
}
def processCommand: Receive
def removeUserView(userId: UserId) = {
val maybeChild = context child userId
maybeChild match {
case Some(child) =>
context stop child
case _ =>
}
}
override def receive = {
case StopUserView(userId) =>
log.debug(s"StopUserView:$userId")
removeUserView(userId)
case c =>
log.debug(s"processCommand:$c")
processCommand(c)
}
protected def create(id: UserId): ActorRef = {
val agg = context.actorOf(aggregateProps(id), id)
context watch agg
agg
}
def aggregateProps(id: UserId): Props
}
|
tegonal/lasius
|
app/services/UserService.scala
|
Scala
|
gpl-3.0
| 3,470
|
package com.twitter.finagle.ssl
import java.util.concurrent.atomic.AtomicBoolean
import java.util.logging.{Level, Logger}
import javax.net.ssl._
import collection.mutable.{Map => MutableMap}
/*
* Creates APR/OpenSSL SSLEngines on behalf of the Ssl singleton
*
* You need to have the appropriate shared libraries on your java.library.path
*/
object OpenSSL {
type MapOfStrings = java.util.Map[java.lang.String, java.lang.String]
private[this] val log = Logger.getLogger(getClass.getName)
// For flagging global initialization of APR and OpenSSL
private[this] val initializedLibrary = new AtomicBoolean(false)
private[this] var mallocPool: AnyRef = null
private[this] var bufferPool: AnyRef = null
private[this] val defaultCiphers =
"AES128-SHA:RC4:AES:!ADH:!aNULL:!DH:!EDH:!PSK:!ECDH:!eNULL:!LOW:!SSLv2:!EXP:!NULL"
/*
* Deal with initialization of the native library
*/
class Linker {
private[this] def classNamed(name: String): Class[_] =
Class.forName("org.apache.tomcat.jni." + name)
val aprClass = classNamed("Library")
val aprInitMethod = aprClass.getMethod("initialize", classOf[String])
val poolClass = classNamed("Pool")
val poolCreateMethod = poolClass.getMethod("create", classOf[Long])
val sslClass = classNamed("SSL")
val sslInitMethod = sslClass.getMethod("initialize", classOf[String])
// OpenSSLEngine-specific configuration classes
val bufferPoolClass = classNamed("ssl.DirectBufferPool")
val bufferPoolCtor = bufferPoolClass.getConstructor(classOf[Int])
val configurationClass = classNamed("ssl.SSLConfiguration")
val configurationCtor = configurationClass.getConstructor(classOf[MapOfStrings])
val contextHolderClass = classNamed("ssl.SSLContextHolder")
val contextHolderCtor = contextHolderClass.getConstructor(classOf[Long], configurationClass)
val sslEngineClass = classNamed("ssl.OpenSSLEngine")
val sslEngineCtor = sslEngineClass.getConstructor(contextHolderClass, bufferPoolClass)
if (initializedLibrary.compareAndSet(false, true)) {
aprInitMethod.invoke(aprClass, null)
sslInitMethod.invoke(sslClass, null)
mallocPool = poolCreateMethod.invoke(poolClass, 0L.asInstanceOf[AnyRef]).asInstanceOf[AnyRef]
// We need to know how many workers might need buffers simultaneously, and to allocate a large
// enough pool.
val capacity = Runtime.getRuntime().availableProcessors() * 2
bufferPool = bufferPoolCtor.newInstance(capacity.asInstanceOf[AnyRef]).asInstanceOf[AnyRef]
}
}
private[this] val contextHolderCache: MutableMap[String, Object] = MutableMap.empty
private[this] var linker: Linker = null
/**
* Get a server
*/
def server(certificatePath: String,
keyPath: String,
caPath: String,
ciphers: String,
nextProtos: String,
useCache: Boolean = true): Option[Engine] = {
try {
synchronized {
if (null == linker) linker = new Linker()
}
} catch {
case e: Exception =>
// This is a warning rather than a Throwable because we fall back to JSSE
log.log(Level.FINEST,
"APR/OpenSSL could not be loaded: " +
e.getClass().getName() + ": " + e.getMessage())
return None
}
def makeContextHolder = {
val configMap = new java.util.HashMap[java.lang.String, java.lang.String]
configMap.put("ssl.cert_path", certificatePath)
configMap.put("ssl.key_path", keyPath)
configMap.put("ssl.cipher_spec", Option(ciphers).getOrElse { defaultCiphers })
if (caPath != null)
configMap.put("ssl.ca_path", caPath)
if (nextProtos != null)
configMap.put("ssl.next_protos", nextProtos)
val config = linker.configurationCtor.newInstance(configMap.asInstanceOf[MapOfStrings])
log.finest("OpenSSL context instantiated for certificate '%s'".format(certificatePath))
linker.contextHolderCtor.newInstance(mallocPool, config.asInstanceOf[AnyRef]).asInstanceOf[AnyRef]
}
val contextHolder = synchronized {
if (useCache)
contextHolderCache.getOrElseUpdate(certificatePath, makeContextHolder)
else
makeContextHolder
}
val engine: SSLEngine = linker.sslEngineCtor.newInstance(
contextHolder,
bufferPool
).asInstanceOf[SSLEngine]
Some(new Engine(engine, true))
}
}
|
LithiumTD/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/ssl/OpenSSL.scala
|
Scala
|
apache-2.0
| 4,456
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.