code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package no.vestein.purplerain.graphics
import com.badlogic.gdx.graphics.{Pixmap, Texture}
import com.badlogic.gdx.graphics.g2d.Sprite
class RainDropSprite(size: Float) extends Sprite(RainDropSprite.texture) {
val dWidth = 0.125f
val dHeight = 2.5f
setSize(dWidth * size, dHeight * size)
//TODO set origin if it is necessary
}
private object RainDropSprite {
val width: Int = 1
val height: Int = 10
val texture: Texture = new Texture(pixmap, true)
private def pixmap: PixmapPR = {
val pixmap: PixmapPR = new PixmapPR(width, height, com.badlogic.gdx.graphics.Pixmap.Format.RGBA8888)
Pixmap.setBlending(Pixmap.Blending.None)
pixmap.setColor(0x8A/255.0f, 0x2B/255.0f, 0xE2/255.0f, 0xff/255.0f)
pixmap.fillRectangle(0, 0, width, height)
pixmap
}
}
| WoodStone/PurpleRain | core/src/main/scala/no/vestein/purplerain/graphics/RainDropSprite.scala | Scala | gpl-3.0 | 786 |
package mesosphere.marathon.api.v2.json
import com.wix.accord.dsl._
import mesosphere.marathon.Features
import mesosphere.marathon.Protos.Constraint
import mesosphere.marathon.api.v2.Validation._
import mesosphere.marathon.core.readiness.ReadinessCheck
import mesosphere.marathon.health.HealthCheck
import mesosphere.marathon.state._
import scala.collection.immutable.Seq
import scala.concurrent.duration.FiniteDuration
case class AppUpdate(
id: Option[PathId] = None,
cmd: Option[String] = None,
args: Option[Seq[String]] = None,
user: Option[String] = None,
env: Option[Map[String, EnvVarValue]] = None,
instances: Option[Int] = None,
cpus: Option[Double] = None,
mem: Option[Double] = None,
disk: Option[Double] = None,
executor: Option[String] = None,
constraints: Option[Set[Constraint]] = None,
fetch: Option[Seq[FetchUri]] = None,
storeUrls: Option[Seq[String]] = None,
portDefinitions: Option[Seq[PortDefinition]] = None,
requirePorts: Option[Boolean] = None,
backoff: Option[FiniteDuration] = None,
backoffFactor: Option[Double] = None,
maxLaunchDelay: Option[FiniteDuration] = None,
container: Option[Container] = None,
healthChecks: Option[Set[HealthCheck]] = None,
readinessChecks: Option[Seq[ReadinessCheck]] = None,
taskKillGracePeriod: Option[FiniteDuration] = None,
dependencies: Option[Set[PathId]] = None,
upgradeStrategy: Option[UpgradeStrategy] = None,
labels: Option[Map[String, String]] = None,
acceptedResourceRoles: Option[Set[String]] = None,
version: Option[Timestamp] = None,
ipAddress: Option[IpAddress] = None,
residency: Option[Residency] = None,
secrets: Option[Map[String, Secret]] = None) {
require(version.isEmpty || onlyVersionOrIdSet, "The 'version' field may only be combined with the 'id' field.")
protected[api] def onlyVersionOrIdSet: Boolean = productIterator forall {
case x @ Some(_) => x == version || x == id
case _ => true
}
def isResident: Boolean = residency.isDefined
def persistentVolumes: Iterable[PersistentVolume] = {
container.fold(Seq.empty[Volume])(_.volumes).collect{ case vol: PersistentVolume => vol }
}
def empty(appId: PathId): AppDefinition = {
def volumes: Iterable[Volume] = container.fold(Seq.empty[Volume])(_.volumes)
def externalVolumes: Iterable[ExternalVolume] = volumes.collect { case vol: ExternalVolume => vol }
val residency = if (persistentVolumes.nonEmpty) Some(Residency.defaultResidency) else None
val upgradeStrategy = if (residency.isDefined || isResident
|| externalVolumes.nonEmpty) UpgradeStrategy.forResidentTasks
else UpgradeStrategy.empty
apply(AppDefinition(appId, residency = residency, upgradeStrategy = upgradeStrategy))
}
/**
* Returns the supplied [[mesosphere.marathon.state.AppDefinition]]
* after updating its members with respect to this update request.
*/
def apply(app: AppDefinition): AppDefinition = app.copy(
id = app.id,
cmd = cmd.orElse(app.cmd),
args = args.orElse(app.args),
user = user.orElse(app.user),
env = env.getOrElse(app.env),
instances = instances.getOrElse(app.instances),
cpus = cpus.getOrElse(app.cpus),
mem = mem.getOrElse(app.mem),
disk = disk.getOrElse(app.disk),
executor = executor.getOrElse(app.executor),
constraints = constraints.getOrElse(app.constraints),
fetch = fetch.getOrElse(app.fetch),
storeUrls = storeUrls.getOrElse(app.storeUrls),
portDefinitions = portDefinitions.getOrElse(app.portDefinitions),
requirePorts = requirePorts.getOrElse(app.requirePorts),
backoff = backoff.getOrElse(app.backoff),
backoffFactor = backoffFactor.getOrElse(app.backoffFactor),
maxLaunchDelay = maxLaunchDelay.getOrElse(app.maxLaunchDelay),
container = container.filterNot(_ == Container.Empty).orElse(app.container),
healthChecks = healthChecks.getOrElse(app.healthChecks),
readinessChecks = readinessChecks.getOrElse(app.readinessChecks),
dependencies = dependencies.map(_.map(_.canonicalPath(app.id))).getOrElse(app.dependencies),
upgradeStrategy = upgradeStrategy.getOrElse(app.upgradeStrategy),
labels = labels.getOrElse(app.labels),
acceptedResourceRoles = acceptedResourceRoles.orElse(app.acceptedResourceRoles),
ipAddress = ipAddress.orElse(app.ipAddress),
// The versionInfo may never be overridden by an AppUpdate.
// Setting the version in AppUpdate means that the user wants to revert to that version. In that
// case, we do not update the current AppDefinition but revert completely to the specified version.
// For all other updates, the GroupVersioningUtil will determine a new version if the AppDefinition
// has really changed.
versionInfo = app.versionInfo,
residency = residency.orElse(app.residency),
secrets = secrets.getOrElse(app.secrets),
taskKillGracePeriod = taskKillGracePeriod.orElse(app.taskKillGracePeriod)
)
def withCanonizedIds(base: PathId = PathId.empty): AppUpdate = copy(
id = id.map(_.canonicalPath(base)),
dependencies = dependencies.map(_.map(_.canonicalPath(base)))
)
}
object AppUpdate {
implicit val appUpdateValidator = validator[AppUpdate] { appUp =>
appUp.id is valid
appUp.dependencies is valid
appUp.upgradeStrategy is valid
appUp.storeUrls is optional(every(urlCanBeResolvedValidator))
appUp.portDefinitions is optional(PortDefinitions.portDefinitionsValidator)
appUp.fetch is optional(every(fetchUriIsValid))
appUp.container.each is valid
appUp.residency is valid
appUp.mem should optional(be >= 0.0)
appUp.cpus should optional(be >= 0.0)
appUp.instances should optional(be >= 0)
appUp.disk should optional(be >= 0.0)
appUp.env is optional(valid(EnvVarValue.envValidator))
appUp.secrets is optional(valid(Secret.secretsValidator))
appUp.secrets is optional(empty) or featureEnabled(Features.SECRETS)
}
}
| yp-engineering/marathon | src/main/scala/mesosphere/marathon/api/v2/json/AppUpdate.scala | Scala | apache-2.0 | 6,019 |
/* Copyright 2009-2011 Jay Conrod
*
* This file is part of Tungsten.
*
* Tungsten is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 2 of
* the License, or (at your option) any later version.
*
* Tungsten is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Tungsten. If not, see
* <http://www.gnu.org/licenses/>.
*/
package tungsten
import org.junit.Test
import org.junit.Assert._
import Utilities._
class DefinitionTest {
@Test
def extractFileLocation {
val defn = Global("foo", UnitType, None,
List(AnnotationValue("tungsten.Location",
List(StringValue("foo.w"),
IntValue(1, 32),
IntValue(2, 32),
IntValue(3, 32),
IntValue(4, 32)))))
assertEquals(FileLocation("foo.w", 1, 2, 3, 4), defn.getLocation)
}
@Test
def extractSymbolLocation {
val sym = Symbol("foo")
val defn = Global("foo", UnitType, None)
assertEquals(SymbolLocation(sym), defn.getLocation)
}
}
| jayconrod/tungsten | core/src/test/scala/tungsten/DefinitionTest.scala | Scala | gpl-2.0 | 1,577 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.adapter
import com.twitter.zipkin.gen
import com.twitter.zipkin.query.{TraceCombo, TraceTimeline, TimelineAnnotation}
import com.twitter.zipkin.common.Trace
object ThriftQueryAdapter extends QueryAdapter {
type timelineAnnotationType = gen.TimelineAnnotation
type traceTimelineType = gen.TraceTimeline
type traceComboType = gen.TraceCombo
type traceType = gen.Trace
/* TimelineAnnotation from Thrift */
def apply(t: timelineAnnotationType): TimelineAnnotation = {
TimelineAnnotation(
t.`timestamp`,
t.`value`,
ThriftAdapter(t.`host`),
t.`spanId`,
t.`parentId`,
t.`serviceName`,
t.`spanName`)
}
/* TimelineAnnotation to Thrift */
def apply(t: TimelineAnnotation): timelineAnnotationType = {
gen.TimelineAnnotation(
t.timestamp,
t.value,
ThriftAdapter(t.host),
t.spanId,
t.parentId,
t.serviceName,
t.spanName)
}
/* TraceTimeline from Thrift */
def apply(t: traceTimelineType): TraceTimeline = {
TraceTimeline(
t.`traceId`,
t.`rootMostSpanId`,
t.`annotations`.map { ThriftQueryAdapter(_) },
t.`binaryAnnotations`.map { ThriftAdapter(_) })
}
/* TraceTimeline to Thrift */
def apply(t: TraceTimeline): traceTimelineType = {
gen.TraceTimeline(
t.traceId,
t.rootSpanId,
t.annotations.map { ThriftQueryAdapter(_) },
t.binaryAnnotations.map { ThriftAdapter(_) })
}
/* TraceCombo from Thrift */
def apply(t: traceComboType): TraceCombo = {
TraceCombo(
ThriftQueryAdapter(t.`trace`),
t.`summary`.map(ThriftAdapter(_)),
t.`timeline`.map(ThriftQueryAdapter(_)),
t.`spanDepths`.map(_.toMap))
}
/* TraceCombo to Thrift */
def apply(t: TraceCombo): traceComboType = {
gen.TraceCombo(
ThriftQueryAdapter(t.trace),
t.traceSummary.map(ThriftAdapter(_)),
t.traceTimeline.map(ThriftQueryAdapter(_)),
t.spanDepths)
}
/* Trace from Thrift */
def apply(t: traceType): Trace = {
Trace(t.`spans`.map(ThriftAdapter(_)))
}
/* Trace to Thrift */
def apply(t: Trace): traceType = {
gen.Trace(t.spans.map(ThriftAdapter(_)))
}
}
| rodzyn0688/zipkin | zipkin-scrooge/src/main/scala/com/twitter/zipkin/adapter/ThriftQueryAdapter.scala | Scala | apache-2.0 | 2,799 |
package slate
package bench
import japgolly.scalajs.benchmark._
import japgolly.scalajs.benchmark.gui._
import qq.data.{FilterAST, QQDSL}
import qq.protocol.FilterProtocol
import scodec.bits.BitVector
object SerializationBench {
val selectKeyBuilder: Benchmark.Builder[Int, FilterAST] =
Benchmark.setup[Int, FilterAST](i => Util.composeBuildRec(i, QQDSL.getPathS(QQDSL.selectKey("key"))))
val idBuilder: Benchmark.Builder[Int, FilterAST] =
Benchmark.setup[Int, FilterAST](i => Util.composeBuildRec(i, QQDSL.id))
def preEncode(b: Benchmark.Builder[Int, FilterAST]): Benchmark.Builder[Int, BitVector] =
new Benchmark.Builder[Int, BitVector](b.prepare.andThen(FilterProtocol.filterCodec.encode).andThen(_.require))
val serializationBenchSuite: GuiSuite[Int] = GuiSuite(
Suite("QQ Program Serialization Benchmarks")(
selectKeyBuilder("encode fix select key")(FilterProtocol.filterCodec.encode(_).require),
idBuilder("encode fix compose with id")(FilterProtocol.filterCodec.encode(_).require),
preEncode(selectKeyBuilder)("decode fix select key")(FilterProtocol.filterCodec.decode(_).require.value),
preEncode(idBuilder)("decode fix compose with id")(FilterProtocol.filterCodec.decode(_).require.value)
), GuiParams.one(GuiParam.int("filter size", 5, 10, 50))
)
}
| edmundnoble/slate | uibench/src/main/scala/slate/bench/SerializationBench.scala | Scala | mit | 1,318 |
/*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.index
import com.netflix.atlas.core.model.Query
import com.netflix.atlas.core.model.Tag
import com.netflix.atlas.core.model.TagKey
case class TagQuery(
query: Option[Query],
key: Option[String] = None,
offset: String = "",
limit: Int = Integer.MAX_VALUE) {
/** Parse the offset string to a tag object. */
lazy val offsetTag: Tag = {
val comma = offset.indexOf(",")
if (comma == -1)
Tag(offset, "", Integer.MAX_VALUE)
else
Tag(offset.substring(0, comma), offset.substring(comma + 1), Integer.MAX_VALUE)
}
/** Parse the offset string to a tag key object. */
lazy val offsetTagKey: TagKey = {
TagKey(offset, Integer.MAX_VALUE)
}
/** Check if key matches. */
def checkKey(k: String): Boolean = !key.isDefined || key.get == k
/** Extend the limit if possible without overflowing. */
def extendedLimit(amount: Int): Int = {
val newLimit = limit + amount
if (newLimit < 0) Integer.MAX_VALUE else newLimit
}
}
| jasimmk/atlas | atlas-core/src/main/scala/com/netflix/atlas/core/index/TagQuery.scala | Scala | apache-2.0 | 1,610 |
package scala
/**
* Created by hqdvista on 5/9/16.
*/
class InterproceduralTest {
}
| flankerhqd/JAADAS | jade/test/scala/InterproceduralTest.scala | Scala | gpl-3.0 | 90 |
import scala.collection.mutable._
import scala.collection.script._
// #4461
object Test {
def main(args: Array[String]) {
val buf = new ArrayBuffer[Int] with ObservableBuffer[Int]
buf.subscribe(new Subscriber[Message[Int], ObservableBuffer[Int]] {
def notify(pub: ObservableBuffer[Int], event: Message[Int]) = println(event)
})
buf += 1 // works
buf ++= Array(2) // works
buf ++= ArrayBuffer(3, 4) // works
buf ++= List(5) // works
buf ++= collection.immutable.Vector(6, 7) // works
buf.insertAll(7, List(8, 9, 10))
0 +=: buf
List(-2, -1) ++=: buf
buf remove 0
}
}
| felixmulder/scala | test/files/run/t4461.scala | Scala | bsd-3-clause | 626 |
package com.twitter.algebird
import org.scalatest.{ PropSpec, Matchers, WordSpec }
import org.scalatest.prop.{ GeneratorDrivenPropertyChecks, PropertyChecks }
import org.scalacheck.{ Gen, Arbitrary }
import CMSHasherImplicits._
import scala.util.Random
class CmsLaws extends PropSpec with PropertyChecks with Matchers {
import BaseProperties._
val DELTA = 1E-8
val EPS = 0.005
val SEED = 1
private def createArbitrary[K: Numeric](cmsMonoid: CMSMonoid[K]): Arbitrary[CMS[K]] = {
val k = implicitly[Numeric[K]]
Arbitrary {
for (v <- Gen.choose(0, 10000)) yield cmsMonoid.create(k.fromInt(v))
}
}
property("CountMinSketch[Short] is a Monoid") {
implicit val cmsMonoid = CMS.monoid[Short](EPS, DELTA, SEED)
implicit val cmsGen = createArbitrary[Short](cmsMonoid)
monoidLaws[CMS[Short]]
}
property("CountMinSketch[Int] is a Monoid") {
implicit val cmsMonoid = CMS.monoid[Int](EPS, DELTA, SEED)
implicit val cmsGen = createArbitrary[Int](cmsMonoid)
monoidLaws[CMS[Int]]
}
property("CountMinSketch[Long] is a Monoid") {
implicit val cmsMonoid = CMS.monoid[Long](EPS, DELTA, SEED)
implicit val cmsGen = createArbitrary[Long](cmsMonoid)
monoidLaws[CMS[Long]]
}
property("CountMinSketch[BigInt] is a Monoid") {
implicit val cmsMonoid = CMS.monoid[BigInt](EPS, DELTA, SEED)
implicit val cmsGen = createArbitrary[BigInt](cmsMonoid)
monoidLaws[CMS[BigInt]]
}
}
class TopPctCmsLaws extends PropSpec with PropertyChecks with Matchers {
import BaseProperties._
val DELTA = 1E-8
val EPS = 0.005
val SEED = 1
val HEAVY_HITTERS_PCT = 0.1
private def createArbitrary[K: Numeric](cmsMonoid: TopPctCMSMonoid[K]): Arbitrary[TopCMS[K]] = {
val k = implicitly[Numeric[K]]
Arbitrary {
for (v <- Gen.choose(0, 10000)) yield cmsMonoid.create(k.fromInt(v))
}
}
property("TopPctCms[Short] is a Monoid") {
implicit val cmsMonoid = TopPctCMS.monoid[Short](EPS, DELTA, SEED, HEAVY_HITTERS_PCT)
implicit val cmsGen = createArbitrary[Short](cmsMonoid)
monoidLaws[TopCMS[Short]]
}
property("TopPctCms[Int] is a Monoid") {
implicit val cmsMonoid = TopPctCMS.monoid[Int](EPS, DELTA, SEED, HEAVY_HITTERS_PCT)
implicit val cmsGen = createArbitrary[Int](cmsMonoid)
monoidLaws[TopCMS[Int]]
}
property("TopPctCms[Long] is a Monoid") {
implicit val cmsMonoid = TopPctCMS.monoid[Long](EPS, DELTA, SEED, HEAVY_HITTERS_PCT)
implicit val cmsGen = createArbitrary[Long](cmsMonoid)
monoidLaws[TopCMS[Long]]
}
property("TopPctCms[BigInt] is a Monoid") {
implicit val cmsMonoid = TopPctCMS.monoid[BigInt](EPS, DELTA, SEED, HEAVY_HITTERS_PCT)
implicit val cmsGen = createArbitrary[BigInt](cmsMonoid)
monoidLaws[TopCMS[BigInt]]
}
}
class CMSShortTest extends CMSTest[Short]
class CMSIntTest extends CMSTest[Int]
class CMSLongTest extends CMSTest[Long]
class CMSBigIntTest extends CMSTest[BigInt]
abstract class CMSTest[K: Ordering: CMSHasher: Numeric] extends WordSpec with Matchers with GeneratorDrivenPropertyChecks {
import TestImplicits._
val DELTA = 1E-10
val EPS = 0.001
val SEED = 1
// We use TopPctCMS for testing CMSCounting functionality. We argue that because TopPctCMS[K] encapsulates CMS[K]
// and uses it for all its counting/querying functionality (like an adapter) we can test CMS[K] indirectly through
// testing TopPctCMS[K].
val COUNTING_CMS_MONOID = {
val ANY_HEAVY_HITTERS_PCT = 0.1 // heavy hitters functionality is not relevant for the tests using this monoid
TopPctCMS.monoid[K](EPS, DELTA, SEED, ANY_HEAVY_HITTERS_PCT)
}
val RAND = new scala.util.Random
/**
* Returns the exact frequency of {x} in {data}.
*/
def exactFrequency(data: Seq[K], x: K): Long = data.count(_ == x)
/**
* Returns the exact inner product between two data streams, when the streams
* are viewed as count vectors.
*/
def exactInnerProduct(data1: Seq[K], data2: Seq[K]): Long = {
val counts1 = data1.groupBy(x => x).mapValues(_.size)
val counts2 = data2.groupBy(x => x).mapValues(_.size)
(counts1.keys.toSet & counts2.keys.toSet).toSeq.map { k => counts1(k) * counts2(k) }.sum
}
/**
* Returns the elements in {data} that appear at least heavyHittersPct * data.size times.
*/
def exactHeavyHitters(data: Seq[K], heavyHittersPct: Double): Set[K] = {
val counts = data.groupBy(x => x).mapValues(_.size)
val totalCount = counts.values.sum
counts.filter { _._2 >= heavyHittersPct * totalCount }.keys.toSet
}
/**
* Creates a random data stream.
*
* @param size Number of stream elements.
* @param range Elements are randomly drawn from [0, range).
* @return
*/
def createRandomStream(size: Int, range: Int, rnd: Random = RAND): Seq[K] = {
require(size > 0)
(1 to size).map { _ => rnd.nextInt(range) }.toK[K]
}
"A Count-Min sketch implementing CMSCounting" should {
"count total number of elements in a stream" in {
val totalCount = 1243
val range = 234
val data = createRandomStream(totalCount, range)
val cms = COUNTING_CMS_MONOID.create(data)
cms.totalCount should be(totalCount)
}
"estimate frequencies" in {
val totalCount = 5678
val range = 897
val data = createRandomStream(totalCount, range)
val cms = COUNTING_CMS_MONOID.create(data)
(0 to 100).foreach { _ =>
val x = RAND.nextInt(range).toK[K]
val exact = exactFrequency(data, x)
val approx = cms.frequency(x).estimate
val estimationError = approx - exact
val maxError = approx - cms.frequency(x).min
val beWithinTolerance = be >= 0L and be <= maxError
approx should be >= exact
estimationError should beWithinTolerance
}
}
"exactly compute frequencies in a small stream" in {
val one = COUNTING_CMS_MONOID.create(1.toK[K])
one.frequency(1.toK[K]).estimate should be(1)
one.frequency(2.toK[K]).estimate should be(0)
val two = COUNTING_CMS_MONOID.create(2.toK[K])
two.frequency(1.toK[K]).estimate should be(0)
two.frequency(2.toK[K]).estimate should be(1)
val cms = COUNTING_CMS_MONOID.plus(COUNTING_CMS_MONOID.plus(one, two), two)
cms.frequency(0.toK[K]).estimate should be(0)
cms.frequency(1.toK[K]).estimate should be(1)
cms.frequency(2.toK[K]).estimate should be(2)
val three = COUNTING_CMS_MONOID.create(Seq(1, 1, 1).toK[K])
three.frequency(1.toK[K]).estimate should be(3)
val four = COUNTING_CMS_MONOID.create(Seq(1, 1, 1, 1).toK[K])
four.frequency(1.toK[K]).estimate should be(4)
val cms2 = COUNTING_CMS_MONOID.plus(four, three)
cms2.frequency(1.toK[K]).estimate should be(7)
}
"estimate inner products" in {
val totalCounts = Gen.choose(1, 10000)
val ranges = Gen.choose(100, 2000)
forAll((totalCounts, "totalCount"), (ranges, "range"), minSuccessful(50)) { (totalCount: Int, range: Int) =>
val data1 = createRandomStream(totalCount, range)
val data2 = createRandomStream(totalCount, range)
val cms1 = COUNTING_CMS_MONOID.create(data1)
val cms2 = COUNTING_CMS_MONOID.create(data2)
val approxA = cms1.innerProduct(cms2)
val approx = approxA.estimate
val exact = exactInnerProduct(data1, data2)
val estimationError = approx - exact
val maxError = approx - approxA.min
val beWithinTolerance = be >= 0L and be <= maxError
// We do not support negative counts, hence the lower limit of a frequency is 0 but never negative.
approxA.min should be >= 0L
approx should be(cms2.innerProduct(cms1).estimate)
approx should be >= exact
estimationError should beWithinTolerance
}
}
"exactly compute inner product of small streams" in {
// Nothing in common.
val a1 = List(1, 2, 3).toK[K]
val a2 = List(4, 5, 6).toK[K]
COUNTING_CMS_MONOID.create(a1).innerProduct(COUNTING_CMS_MONOID.create(a2)).estimate should be(0)
// One element in common.
val b1 = List(1, 2, 3).toK[K]
val b2 = List(3, 5, 6).toK[K]
COUNTING_CMS_MONOID.create(b1).innerProduct(COUNTING_CMS_MONOID.create(b2)).estimate should be(1)
// Multiple, non-repeating elements in common.
val c1 = List(1, 2, 3).toK[K]
val c2 = List(3, 2, 6).toK[K]
COUNTING_CMS_MONOID.create(c1).innerProduct(COUNTING_CMS_MONOID.create(c2)).estimate should be(2)
// Multiple, repeating elements in common.
val d1 = List(1, 2, 2, 3, 3).toK[K]
val d2 = List(2, 3, 3, 6).toK[K]
COUNTING_CMS_MONOID.create(d1).innerProduct(COUNTING_CMS_MONOID.create(d2)).estimate should be(6)
}
"work as an Aggregator when created from a single, small stream" in {
val data1 = Seq(1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5).toK[K]
val cms = CMS.aggregator[K](EPS, DELTA, SEED).apply(data1)
cms.frequency(1.toK[K]).estimate should be(1L)
cms.frequency(2.toK[K]).estimate should be(2L)
cms.frequency(3.toK[K]).estimate should be(3L)
cms.frequency(4.toK[K]).estimate should be(4L)
cms.frequency(5.toK[K]).estimate should be(5L)
val topPctCMS = {
val anyHeavyHittersPct = 0.1 // exact setting not relevant for this test
TopPctCMS.aggregator[K](EPS, DELTA, SEED, anyHeavyHittersPct).apply(data1)
}
topPctCMS.frequency(1.toK[K]).estimate should be(1L)
topPctCMS.frequency(2.toK[K]).estimate should be(2L)
topPctCMS.frequency(3.toK[K]).estimate should be(3L)
topPctCMS.frequency(4.toK[K]).estimate should be(4L)
topPctCMS.frequency(5.toK[K]).estimate should be(5L)
val topNCMS = {
val anyHeavyHittersN = 1 // exact setting not relevant for this test
TopNCMS.aggregator[K](EPS, DELTA, SEED, anyHeavyHittersN).apply(data1)
}
topNCMS.frequency(1.toK[K]).estimate should be(1L)
topNCMS.frequency(2.toK[K]).estimate should be(2L)
topNCMS.frequency(3.toK[K]).estimate should be(3L)
topNCMS.frequency(4.toK[K]).estimate should be(4L)
topNCMS.frequency(5.toK[K]).estimate should be(5L)
}
}
"A Top-% Count-Min sketch implementing CMSHeavyHitters" should {
"create correct sketches out of a single item" in {
forAll{ (x: Int) =>
val data = x.toK[K]
val cmsMonoid = {
val anyHeavyHittersPct = 0.1 // exact setting not relevant for this test
TopPctCMS.monoid[K](EPS, DELTA, SEED, anyHeavyHittersPct)
}
val topCms = cmsMonoid.create(data)
topCms.totalCount should be(1)
topCms.cms.totalCount should be(1)
topCms.frequency(x.toK[K]).estimate should be(1)
// Poor man's way to come up with an item that is not x and that is very unlikely to hash to the same slot.
val otherItem = x + 1
topCms.frequency(otherItem.toK[K]).estimate should be(0)
// The following assert indirectly verifies whether the counting table is not all-zero (cf. GH-393).
topCms.innerProduct(topCms).estimate should be(1)
}
}
"create correct sketches out of a single-item stream" in {
forAll{ (x: Int) =>
val data = Seq(x).toK[K]
val cmsMonoid = {
val anyHeavyHittersPct = 0.1 // exact setting not relevant for this test
TopPctCMS.monoid[K](EPS, DELTA, SEED, anyHeavyHittersPct)
}
val topCms = cmsMonoid.create(data)
topCms.totalCount should be(1)
topCms.cms.totalCount should be(1)
topCms.frequency(x.toK[K]).estimate should be(1)
// Poor man's way to come up with an item that is not x and that is very unlikely to hash to the same slot.
val otherItem = x + 1
topCms.frequency(otherItem.toK[K]).estimate should be(0)
// The following assert indirectly verifies whether the counting table is not all-zero (cf. GH-393).
topCms.innerProduct(topCms).estimate should be(1)
}
}
"estimate heavy hitters" in {
// Simple way of making some elements appear much more often than others.
val data1 = (1 to 3000).map { _ => RAND.nextInt(3) }.toK[K]
val data2 = (1 to 3000).map { _ => RAND.nextInt(10) }.toK[K]
val data3 = (1 to 1450).map { _ => -1 }.toK[K] // element close to being a 20% heavy hitter
val data = data1 ++ data2 ++ data3
// Find elements that appear at least 20% of the time.
val heavyHittersPct = 0.2
val cms = TopPctCMS.monoid[K](EPS, DELTA, SEED, 0.2).create(data)
val trueHhs = exactHeavyHitters(data, heavyHittersPct)
val estimatedHhs = cms.heavyHitters
// All true heavy hitters must be claimed as heavy hitters.
trueHhs.intersect(estimatedHhs) should be(trueHhs)
// It should be very unlikely that any element with count less than
// (heavyHittersPct - eps) * totalCount is claimed as a heavy hitter.
val minHhCount = (heavyHittersPct - cms.eps) * cms.totalCount
val infrequent = data.groupBy { x => x }.mapValues { _.size }.filter { _._2 < minHhCount }.keys.toSet
infrequent.intersect(estimatedHhs) should be('empty)
}
"(when adding CMS instances) drop old heavy hitters when new heavy hitters replace them" in {
val monoid = TopPctCMS.monoid[K](EPS, DELTA, SEED, 0.3)
val cms1 = monoid.create(Seq(1, 2, 2).toK[K])
cms1.heavyHitters should be(Set(1, 2))
val cms2 = cms1 ++ monoid.create(2.toK[K])
cms2.heavyHitters should be(Set(2))
val cms3 = cms2 ++ monoid.create(1.toK[K])
cms3.heavyHitters should be(Set(1, 2))
val cms4 = cms3 ++ monoid.create(Seq(0, 0, 0, 0, 0, 0).toK[K])
cms4.heavyHitters should be(Set(0))
}
"(when adding individual items) drop old heavy hitters when new heavy hitters replace them" in {
val monoid = TopPctCMS.monoid[K](EPS, DELTA, SEED, 0.3)
val cms1 = monoid.create(Seq(1, 2, 2).toK[K])
cms1.heavyHitters should be(Set(1, 2))
val cms2 = cms1 + 2.toK[K]
cms2.heavyHitters should be(Set(2))
val cms3 = cms2 + 1.toK[K]
cms3.heavyHitters should be(Set(1, 2))
val heaviest = 0.toK[K]
val cms4 = cms3 + heaviest + heaviest + heaviest + heaviest + heaviest + heaviest
cms4.heavyHitters should be(Set(heaviest))
}
"(when adding CMS instances) merge heavy hitters correctly [GH-353 regression test]" in {
// See https://github.com/twitter/algebird/issues/353
val monoid = TopPctCMS.monoid(EPS, DELTA, SEED, 0.1)
val data1 = Seq(1, 1, 1, 2, 2, 3).toK[K]
val data2 = Seq(3, 4, 4, 4, 5, 5).toK[K]
val data3 = Seq(3, 6, 6, 6, 7, 7).toK[K]
val data4 = Seq(3, 8, 8, 8, 9, 9).toK[K]
val singleData = data1 ++ data2 ++ data3 ++ data4
/*
Data sets from above shown in tabular view
Item 1 2 3 4 total (= singleData)
----------------------------------------
A (1) 3 - - - 3
B (2) 2 - - - 2
C (3) 1 1 1 1 4 <<< C is global top 1 heavy hitter
D (4) - 3 - - 3
E (5) - 2 - - 2
F (6) - - 3 - 3
G (7) - - 2 - 2
H (8) - - - 3 3
I (9) - - - 2 2
*/
val cms1 = monoid.create(data1)
val cms2 = monoid.create(data2)
val cms3 = monoid.create(data3)
val cms4 = monoid.create(data4)
val aggregated = cms1 ++ cms2 ++ cms3 ++ cms4
val single = monoid.create(singleData)
aggregated.heavyHitters should be(single.heavyHitters)
aggregated.heavyHitters contains (3.toK[K]) // C=3 is global top 1 heavy hitter
}
"exactly compute heavy hitters when created from a single, small stream" in {
val data1 = Seq(1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5).toK[K]
val cms1 = TopPctCMS.monoid[K](EPS, DELTA, SEED, 0.01).create(data1)
cms1.heavyHitters should be(Set(1, 2, 3, 4, 5))
val cms2 = TopPctCMS.monoid[K](EPS, DELTA, SEED, 0.1).create(data1)
cms2.heavyHitters should be(Set(2, 3, 4, 5))
val cms3 = TopPctCMS.monoid[K](EPS, DELTA, SEED, 0.3).create(data1)
cms3.heavyHitters should be(Set(5))
val cms4 = TopPctCMS.monoid[K](EPS, DELTA, SEED, 0.9).create(data1)
cms4.heavyHitters should be(Set[K]())
}
"work as an Aggregator when created from a single, small stream" in {
val data1 = Seq(1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5).toK[K]
val cms1 = TopPctCMS.aggregator[K](EPS, DELTA, SEED, 0.01).apply(data1)
cms1.heavyHitters should be(Set(1, 2, 3, 4, 5))
val cms2 = TopPctCMS.aggregator[K](EPS, DELTA, SEED, 0.1).apply(data1)
cms2.heavyHitters should be(Set(2, 3, 4, 5))
val cms3 = TopPctCMS.aggregator[K](EPS, DELTA, SEED, 0.3).apply(data1)
cms3.heavyHitters should be(Set(5))
val cms4 = TopPctCMS.aggregator[K](EPS, DELTA, SEED, 0.9).apply(data1)
cms4.heavyHitters should be(Set[K]())
}
}
"A Top-N Count-Min sketch implementing CMSHeavyHitters" should {
// Note: As described in https://github.com/twitter/algebird/issues/353, a top-N CMS is, in general, not able to
// merge heavy hitters correctly. This is because merging top-N based heavy hitters is not an associative
// operation.
"create correct sketches out of a single item" in {
forAll{ (x: Int) =>
val data = x.toK[K]
val cmsMonoid = {
val anyHeavyHittersN = 2 // exact setting not relevant for this test
TopNCMS.monoid[K](EPS, DELTA, SEED, anyHeavyHittersN)
}
val topCms = cmsMonoid.create(data)
topCms.totalCount should be(1)
topCms.cms.totalCount should be(1)
topCms.frequency(x.toK[K]).estimate should be(1)
// Poor man's way to come up with an item that is not x and that is very unlikely to hash to the same slot.
val otherItem = x + 1
topCms.frequency(otherItem.toK[K]).estimate should be(0)
// The following assert indirectly verifies whether the counting table is not all-zero (cf. GH-393).
topCms.innerProduct(topCms).estimate should be(1)
}
}
"create correct sketches out of a single-item stream" in {
forAll{ (x: Int) =>
val data = Seq(x).toK[K]
val cmsMonoid = {
val anyHeavyHittersN = 2 // exact setting not relevant for this test
TopNCMS.monoid[K](EPS, DELTA, SEED, anyHeavyHittersN)
}
val topCms = cmsMonoid.create(data)
topCms.totalCount should be(1)
topCms.cms.totalCount should be(1)
topCms.frequency(x.toK[K]).estimate should be(1)
// Poor man's way to come up with an item that is not x and that is very unlikely to hash to the same slot.
val otherItem = x + 1
topCms.frequency(otherItem.toK[K]).estimate should be(0)
// The following assert indirectly verifies whether the counting table is not all-zero (cf. GH-393).
topCms.innerProduct(topCms).estimate should be(1)
}
}
// This test involves merging of top-N CMS instances, which is not an associative operation. This means that the
// success or failure of this test depends on the merging order and/or the test data characteristics.
"(when adding CMS instances) drop old heavy hitters when new heavy hitters replace them, if merge order matches data" in {
val heavyHittersN = 2
val monoid = TopNCMS.monoid[K](EPS, DELTA, SEED, heavyHittersN)
val cms1 = monoid.create(Seq(1, 2, 2).toK[K])
cms1.heavyHitters should be(Set(1, 2))
val cms2 = cms1 ++ monoid.create(Seq(3, 3, 3).toK[K])
cms2.heavyHitters should be(Set(2, 3))
val cms3 = cms2 ++ monoid.create(Seq(1, 1, 1).toK[K])
cms3.heavyHitters should be(Set(3, 1))
val cms4 = cms3 ++ monoid.create(Seq(6, 6, 6, 6, 6, 6).toK[K])
cms4.heavyHitters should be(Set(1, 6))
}
"(when adding individual items) drop old heavy hitters when new heavy hitters replace them" in {
val monoid = TopPctCMS.monoid[K](EPS, DELTA, SEED, 0.3)
val cms1 = monoid.create(Seq(1, 2, 2).toK[K])
cms1.heavyHitters should be(Set(1, 2))
val cms2 = cms1 + 2.toK[K]
cms2.heavyHitters should be(Set(2))
val cms3 = cms2 + 1.toK[K]
cms3.heavyHitters should be(Set(1, 2))
val heaviest = 0.toK[K]
val cms4 = cms3 + heaviest + heaviest + heaviest + heaviest + heaviest + heaviest
cms4.heavyHitters should be(Set(heaviest))
}
// This test documents the order bias of top-N CMS, i.e. it's a negative test case.
// See https://github.com/twitter/algebird/issues/353
"(when adding CMS instances) generally FAIL to merge heavy hitters correctly due to order bias" in {
val topN = 2
val monoid = TopNCMS.monoid(EPS, DELTA, SEED, topN)
val data1 = Seq(1, 1, 1, 2, 2, 3).toK[K]
val data2 = Seq(3, 4, 4, 4, 5, 5).toK[K]
val data3 = Seq(3, 6, 6, 6, 7, 7).toK[K]
val data4 = Seq(3, 8, 8, 8, 9, 9).toK[K]
val singleData = data1 ++ data2 ++ data3 ++ data4
/*
Data sets from above shown in tabular view
Item 1 2 3 4 total (= singleData)
----------------------------------------
A (1) 3 - - - 3
B (2) 2 - - - 2
C (3) 1 1 1 1 4 <<< C is global top 1 heavy hitter
D (4) - 3 - - 3
E (5) - 2 - - 2
F (6) - - 3 - 3
G (7) - - 2 - 2
H (8) - - - 3 3
I (9) - - - 2 2
*/
val cms1 = monoid.create(data1)
val cms2 = monoid.create(data2)
val cms3 = monoid.create(data3)
val cms4 = monoid.create(data4)
val aggregated = cms1 ++ cms2 ++ cms3 ++ cms4
val single = monoid.create(singleData)
aggregated.heavyHitters shouldNot be(single.heavyHitters)
aggregated.heavyHitters shouldNot contain(3.toK[K]) // C=3 is global top 1 heavy hitter
}
// Compared to adding top-N CMS instances, which is generally unsafe because of order bias (see test cases above),
// adding individual items to a top-N CMS is a safe operation.
// See https://github.com/twitter/algebird/issues/353
"(when adding individual items) merge heavy hitters correctly [GH-353 regression test]" in {
val topN = 2
val monoid = TopNCMS.monoid(EPS, DELTA, SEED, topN)
val data1 = Seq(1, 1, 1, 2, 2, 3).toK[K]
val data2 = Seq(3, 4, 4, 4, 5, 5).toK[K]
val data3 = Seq(3, 6, 6, 6, 7, 7).toK[K]
val data4 = Seq(3, 8, 8, 8, 9, 9).toK[K]
val singleData = data1 ++ data2 ++ data3 ++ data4
/*
Data sets from above shown in tabular view
Item 1 2 3 4 total (= singleData)
----------------------------------------
A (1) 3 - - - 3
B (2) 2 - - - 2
C (3) 1 1 1 1 4 <<< C is global top 1 heavy hitter
D (4) - 3 - - 3
E (5) - 2 - - 2
F (6) - - 3 - 3
G (7) - - 2 - 2
H (8) - - - 3 3
I (9) - - - 2 2
*/
val cms1 = monoid.create(data1)
val cms2 = cms1 + 3.toK[K] + 4.toK[K] + 4.toK[K] + 4.toK[K] + 5.toK[K] + 5.toK[K] // effectively "++ data2"
val cms3 = cms2 + 3.toK[K] + 6.toK[K] + 6.toK[K] + 6.toK[K] + 7.toK[K] + 7.toK[K] // "++ data3"
val aggregated = cms3 + 3.toK[K] + 8.toK[K] + 8.toK[K] + 8.toK[K] + 9.toK[K] + 9.toK[K] // "++ data4"
val single = monoid.create(singleData)
aggregated.heavyHitters should be(single.heavyHitters)
aggregated.heavyHitters should contain(3.toK[K]) // C=3 is global top 1 heavy hitter
}
"exactly compute heavy hitters when created a from single, small stream" in {
val data1 = Seq(1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5).toK[K]
val cms1 = TopNCMS.monoid[K](EPS, DELTA, SEED, 5).create(data1)
cms1.heavyHitters should be(Set(1, 2, 3, 4, 5))
val cms2 = TopNCMS.monoid[K](EPS, DELTA, SEED, 4).create(data1)
cms2.heavyHitters should be(Set(2, 3, 4, 5))
val cms3 = TopNCMS.monoid[K](EPS, DELTA, SEED, 3).create(data1)
cms3.heavyHitters should be(Set(3, 4, 5))
val cms4 = TopNCMS.monoid[K](EPS, DELTA, SEED, 2).create(data1)
cms4.heavyHitters should be(Set(4, 5))
val cms5 = TopNCMS.monoid[K](EPS, DELTA, SEED, 1).create(data1)
cms5.heavyHitters should be(Set(5))
}
"work as an Aggregator when created from a single, small stream" in {
val data1 = Seq(1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5).toK[K]
val cms1 = TopNCMS.aggregator[K](EPS, DELTA, SEED, 5).apply(data1)
cms1.heavyHitters should be(Set(1, 2, 3, 4, 5))
val cms2 = TopNCMS.aggregator[K](EPS, DELTA, SEED, 4).apply(data1)
cms2.heavyHitters should be(Set(2, 3, 4, 5))
val cms3 = TopNCMS.aggregator[K](EPS, DELTA, SEED, 3).apply(data1)
cms3.heavyHitters should be(Set(3, 4, 5))
val cms4 = TopNCMS.aggregator[K](EPS, DELTA, SEED, 2).apply(data1)
cms4.heavyHitters should be(Set(4, 5))
val cms5 = TopNCMS.aggregator[K](EPS, DELTA, SEED, 1).apply(data1)
cms5.heavyHitters should be(Set(5))
}
}
}
class CMSFunctionsSpec extends PropSpec with PropertyChecks with Matchers {
property("roundtrips width->eps->width") {
forAll { (i: Int) =>
whenever(i > 0) {
CMSFunctions.width(CMSFunctions.eps(i)) should be(i)
}
}
}
property("roundtrips depth->delta->depth for common depth values") {
// For all i > 709 this test break because of precision limits: For all i > 709 will return 0.0, which is not the
// mathematically correct value but rather the asymptote of delta.
val maxI = 709
forAll((Gen.choose(1, maxI), "depth")) { (i: Int) =>
CMSFunctions.depth(CMSFunctions.delta(i)) should be(i)
}
}
// Documents a precision error that is exposed by all depths > 709.
// For all i > 709, CMSFunctions.delta(i) will return 0.0, which is not the mathematically correct value but rather
// the asymptote of the delta function.
property("throw IAE when deriving delta from invalid depth values") {
val maxValidDelta = 709
forAll(Gen.choose(maxValidDelta + 1, 10000)) { (invalidDepth: Int) =>
val exception = intercept[IllegalArgumentException] {
CMSFunctions.delta(invalidDepth)
}
exception.getMessage should fullyMatch regex
"""requirement failed: depth must be smaller as it causes precision errors when computing delta \(\d+ led to an invalid delta of 0.0\)"""
}
}
property("throw IAE when deriving depth from invalid delta values") {
val invalidDeltas = Table("invalidDelta", 0.0, 1E-330, 1E-400)
forAll(invalidDeltas) { (invalidDelta: Double) =>
val exception = intercept[IllegalArgumentException] {
CMSFunctions.depth(invalidDelta)
}
exception.getMessage should be("requirement failed: delta must lie in (0, 1)")
}
}
}
class CMSParamsSpec extends PropSpec with PropertyChecks with Matchers {
val AnyEps = 0.001
val AnyDelta = 1E-5
val AnyHashes = {
val AnySeed = 1
CMSFunctions.generateHashes[Long](AnyEps, AnyDelta, AnySeed)
}
property("throw IAE for invalid eps values") {
val invalidEpsilons = Table("invalidEps", 0.0, 1.0, 2.0, 100.0)
forAll(invalidEpsilons) { (invalidEps: Double) =>
val exception = intercept[IllegalArgumentException] {
CMSParams(AnyHashes, invalidEps, AnyDelta)
}
exception.getMessage should be("requirement failed: eps must lie in (0, 1)")
}
}
property("throw IAE for invalid delta values") {
val invalidDeltas = Table("invalidDelta", 0.0, 1.0, 2.0, 100.0, 1E-330, 1E-400)
forAll(invalidDeltas) { (invalidDelta: Double) =>
val exception = intercept[IllegalArgumentException] {
CMSParams(AnyHashes, AnyEps, invalidDelta)
}
exception.getMessage should be("requirement failed: delta must lie in (0, 1)")
}
}
property("throw IAE when we do not have enough hashes") {
val tooFewHashes = Seq.empty[CMSHash[Long]]
val exception = intercept[IllegalArgumentException] {
CMSParams(tooFewHashes, AnyEps, AnyDelta)
}
exception.getMessage should fullyMatch regex """requirement failed: we require at least (\d+) hash functions"""
}
}
class CMSHasherShortSpec extends CMSHasherSpec[Short]
class CMSHasherIntSpec extends CMSHasherSpec[Int]
class CMSHasherLongSpec extends CMSHasherSpec[Long]
class CMSHasherBigIntSpec extends CMSHasherSpec[BigInt]
abstract class CMSHasherSpec[K: CMSHasher: Numeric] extends PropSpec with PropertyChecks with Matchers {
import TestImplicits._
property("returns positive hashes (i.e. slots) only") {
forAll { (a: Int, b: Int, width: Int, x: Int) =>
whenever (width > 0) {
val hash = CMSHash[K](a, b, width)
hash(x.toK[K]) should be >= 0
}
}
}
}
/**
* This spec verifies that we provide legacy types for the CMS and CountMinSketchMonoid classes we had in Algebird
* versions < 0.8.1. Note that this spec is not meant to verify their actual functionality.
*/
class LegacyCMSSpec extends WordSpec with Matchers {
import legacy.CountMinSketchMonoid
val DELTA = 1E-10
val EPS = 0.001
val SEED = 1
val CMS_MONOID: CountMinSketchMonoid = CountMinSketchMonoid(EPS, DELTA, SEED)
"The legacy package" should {
"provide a legacy type for the CMS implementation in Algebird versions < 0.8.1" in {
val cms: legacy.CMS = CMS_MONOID.create(Seq(0L, 0L))
cms.frequency(0L).estimate should be (2)
cms.heavyHitters should be(Set(0L))
}
"provide a legacy type for the CMS monoid implementation in Algebird versions < 0.8.1" in {
val cmsMonoid: CountMinSketchMonoid = {
val eps = 0.001
val delta = 1E-5
val seed = 1
val heavyHittersPct = 0.1
CountMinSketchMonoid(eps, delta, seed, heavyHittersPct)
}
val cms = cmsMonoid.create(Seq(0L, 0L))
cms.frequency(0L).estimate should be (2)
cms.heavyHitters should be(Set(0L))
}
}
}
object TestImplicits {
// Convenience methods to convert from `Int` to the actual `K` type, and we prefer these conversions to be explicit
// (cf. JavaConverters vs. JavaConversions). We use the name `toK` to clarify the intent and to prevent name conflicts
// with the existing `to[Col]` method in Scala.
implicit class IntCast(x: Int) {
def toK[A: Numeric]: A = implicitly[Numeric[A]].fromInt(x)
}
implicit class SeqCast(xs: Seq[Int]) {
def toK[A: Numeric]: Seq[A] = xs map { _.toK[A] }
}
implicit class SetCast(xs: Set[Int]) {
def toK[A: Numeric]: Set[A] = xs map { _.toK[A] }
}
} | avibryant/algebird | algebird-test/src/test/scala/com/twitter/algebird/CountMinSketchTest.scala | Scala | apache-2.0 | 30,924 |
/** Copyright 2015, Metreta Information Technology s.r.l. */
package com.metreta.spark.orientdb.connector.rdd
import scala.collection.JavaConversions._
import java.util.Date
import java.util.Calendar
abstract class OrientEntry(val className: String,
val rid: String,
val columnNames: IndexedSeq[String],
val columnValues: IndexedSeq[Any]) extends Serializable {
def getString(index: Int): String = {
try {
return columnValues.get(index).asInstanceOf[String]
} catch {
case e1: IndexOutOfBoundsException =>
throw OrientDocumentException("Index not found, please use indices between 0 and " + (columnNames.size - 1))
}
}
def getInt(index: Int): Int = {
return getString(index).toInt
}
def getDouble(index: Int): Double = {
return getString(index).toDouble
}
def getFloat(index: Int): Float = {
return getString(index).toFloat
}
def getBoolean(index: Int): Boolean = {
return getString(index).toBoolean
}
def getShort(index: Int): Short = {
return getString(index).toShort
}
def getLong(index: Int): Long = {
return getString(index).toLong
}
def getByte(index: Int): Byte = {
return getString(index).toByte
}
def getDate(index: Int): Date = {
return new Date(getString(index))
}
def getAs[T](index: Int): T = {
try {
return columnValues.get(index).asInstanceOf[T]
} catch {
case e1: IndexOutOfBoundsException =>
throw OrientDocumentException("Index not found, please use indices between 0 and " + (columnNames.size - 1))
}
}
def getString(name: String): String = {
try {
return columnValues.get((columnNames.zipWithIndex filter {
case (columnName, i) => columnName.toLowerCase() == name.toLowerCase()
}).get(0)._2).toString
} catch {
case e1: IndexOutOfBoundsException =>
throw OrientDocumentException(s"Column name not found, please use one of the following names: [${columnNames.mkString("|")}]")
}
}
def getDouble(name: String): Double = {
return getString(name).toDouble
}
def getInt(name: String): Int = {
return getString(name).toInt
}
def getFloat(name: String): Float = {
return getString(name).toFloat
}
def getBoolean(name: String): Boolean = {
return getString(name).toBoolean
}
def getShort(name: String): Short = {
return getString(name).toShort
}
def getLong(name: String): Long = {
return getString(name).toLong
}
def getByte(name: String): Byte = {
return getString(name).toByte
}
def getDate(name: String): Date = {
return new Date(getString(name))
}
def getAs[T](name: String): T = {
try {
return columnValues.get((columnNames.zipWithIndex filter {
case (columnName, i) => columnName.toLowerCase() == name.toLowerCase()
}).get(0)._2).asInstanceOf[T]
} catch {
case e1: IndexOutOfBoundsException =>
throw OrientDocumentException(s"Column name not found, please use one of the following names: [${columnNames.mkString("|")}]")
}
}
} | metreta/spark-orientdb-connector | spark-orientdb-connector/src/main/scala/com/metreta/spark/orientdb/connector/rdd/OrientEntry.scala | Scala | apache-2.0 | 3,078 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.data.tables
import java.nio.ByteBuffer
import java.util.Date
import java.util.Map.Entry
import com.google.common.base.Charsets
import com.google.common.collect.ImmutableSet
import com.google.common.primitives.{Bytes, Longs, Shorts}
import com.vividsolutions.jts.geom.Point
import org.apache.accumulo.core.client.BatchDeleter
import org.apache.accumulo.core.client.admin.TableOperations
import org.apache.accumulo.core.conf.Property
import org.apache.accumulo.core.data.{Key, Mutation, Range => aRange, Value}
import org.apache.hadoop.io.Text
import org.joda.time.{DateTime, Seconds, Weeks}
import org.locationtech.geomesa.accumulo.data.AccumuloFeatureWriter.{FeatureToMutations, FeatureToWrite}
import org.locationtech.geomesa.accumulo.data.EMPTY_TEXT
import org.locationtech.geomesa.accumulo.index.QueryPlanners._
import org.locationtech.geomesa.curve.Z3SFC
import org.locationtech.geomesa.features.kryo.KryoFeatureSerializer
import org.locationtech.geomesa.features.nio.{AttributeAccessor, LazySimpleFeature}
import org.locationtech.geomesa.filter.function.{BasicValues, Convert2ViewerFunction}
import org.locationtech.geomesa.utils.geotools.Conversions._
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
import org.opengis.feature.`type`.GeometryDescriptor
import org.opengis.feature.simple.SimpleFeatureType
import scala.collection.JavaConversions._
object Z3Table extends GeoMesaTable {
val EPOCH = new DateTime(0) // min value we handle - 1970-01-01T00:00:00.000
val EPOCH_END = EPOCH.plusSeconds(Int.MaxValue) // max value we can calculate - 2038-01-18T22:19:07.000
val SFC = new Z3SFC
val FULL_CF = new Text("F")
val BIN_CF = new Text("B")
val EMPTY_BYTES = Array.empty[Byte]
val EMPTY_VALUE = new Value(EMPTY_BYTES)
def secondsInCurrentWeek(dtg: DateTime, weeks: Weeks) =
Seconds.secondsBetween(EPOCH, dtg).getSeconds - weeks.toStandardSeconds.getSeconds
def epochWeeks(dtg: DateTime) = Weeks.weeksBetween(EPOCH, new DateTime(dtg))
override def supports(sft: SimpleFeatureType): Boolean =
sft.getSchemaVersion > 4 &&
sft.getGeometryDescriptor.getType.getBinding == classOf[Point] &&
sft.getDtgField.isDefined
override val suffix: String = "z3"
// z3 always needs a separate table since we don't include the feature name in the row key
override def formatTableName(prefix: String, sft: SimpleFeatureType): String =
GeoMesaTable.formatSoloTableName(prefix, suffix, sft)
override def writer(sft: SimpleFeatureType): FeatureToMutations = {
val dtgIndex = sft.getDtgIndex.getOrElse(throw new RuntimeException("Z3 writer requires a valid date"))
val binWriter: (FeatureToWrite, Mutation) => Unit = sft.getBinTrackId match {
case Some(trackId) =>
val geomIndex = sft.getGeomIndex
val trackIndex = sft.indexOf(trackId)
(fw: FeatureToWrite, m: Mutation) => {
val (lat, lon) = {
val geom = fw.feature.getAttribute(geomIndex).asInstanceOf[Point]
(geom.getY.toFloat, geom.getX.toFloat)
}
val dtg = fw.feature.getAttribute(dtgIndex).asInstanceOf[Date].getTime
val trackId = Option(fw.feature.getAttribute(trackIndex)).map(_.toString).getOrElse("")
val encoded = Convert2ViewerFunction.encodeToByteArray(BasicValues(lat, lon, dtg, trackId))
val value = new Value(encoded)
m.put(BIN_CF, EMPTY_TEXT, fw.columnVisibility, value)
}
case _ => (fw: FeatureToWrite, m: Mutation) => {}
}
if (sft.getSchemaVersion > 5) {
// we know the data is kryo serialized in version 6+
(fw: FeatureToWrite) => {
val mutation = new Mutation(getRowKey(fw, dtgIndex))
binWriter(fw, mutation)
mutation.put(FULL_CF, EMPTY_TEXT, fw.columnVisibility, fw.dataValue)
Seq(mutation)
}
} else {
// we always want to use kryo - reserialize the value to ensure it
val writer = new KryoFeatureSerializer(sft)
(fw: FeatureToWrite) => {
val mutation = new Mutation(getRowKey(fw, dtgIndex))
val payload = new Value(writer.serialize(fw.feature))
binWriter(fw, mutation)
mutation.put(FULL_CF, EMPTY_TEXT, fw.columnVisibility, payload)
Seq(mutation)
}
}
}
override def remover(sft: SimpleFeatureType): FeatureToMutations = {
val dtgIndex = sft.getDtgIndex.getOrElse(throw new RuntimeException("Z3 writer requires a valid date"))
(fw: FeatureToWrite) => {
val mutation = new Mutation(getRowKey(fw, dtgIndex))
mutation.putDelete(BIN_CF, EMPTY_TEXT, fw.columnVisibility)
mutation.putDelete(FULL_CF, EMPTY_TEXT, fw.columnVisibility)
Seq(mutation)
}
}
override def deleteFeaturesForType(sft: SimpleFeatureType, bd: BatchDeleter): Unit = {
bd.setRanges(Seq(new aRange()))
bd.delete()
}
def getRowPrefix(x: Double, y: Double, time: Long): Array[Byte] = {
val dtg = new DateTime(time)
val weeks = epochWeeks(dtg)
val prefix = Shorts.toByteArray(weeks.getWeeks.toShort)
val secondsInWeek = secondsInCurrentWeek(dtg, weeks)
val z3 = SFC.index(x, y, secondsInWeek)
val z3idx = Longs.toByteArray(z3.z)
Bytes.concat(prefix, z3idx)
}
private def getRowKey(ftw: FeatureToWrite, dtgIndex: Int): Array[Byte] = {
val geom = ftw.feature.point
val x = geom.getX
val y = geom.getY
val dtg = ftw.feature.getAttribute(dtgIndex).asInstanceOf[Date]
val time = if (dtg == null) System.currentTimeMillis() else dtg.getTime
val prefix = getRowPrefix(x, y, time)
val idBytes = ftw.feature.getID.getBytes(Charsets.UTF_8)
Bytes.concat(prefix, idBytes)
}
def adaptZ3Iterator(sft: SimpleFeatureType): FeatureFunction = {
val accessors = AttributeAccessor.buildSimpleFeatureTypeAttributeAccessors(sft)
(e: Entry[Key, Value]) => {
val k = e.getKey
val row = k.getRow.getBytes
val idbytes = row.slice(10, Int.MaxValue)
val id = new String(idbytes)
new LazySimpleFeature(id, sft, accessors, ByteBuffer.wrap(e.getValue.get()))
// TODO visibility
}
}
def adaptZ3KryoIterator(sft: SimpleFeatureType): FeatureFunction = {
val kryo = new KryoFeatureSerializer(sft)
(e: Entry[Key, Value]) => {
// TODO lazy features if we know it's read-only?
kryo.deserialize(e.getValue.get())
}
}
def configureTable(sft: SimpleFeatureType, table: String, tableOps: TableOperations): Unit = {
tableOps.setProperty(table, Property.TABLE_SPLIT_THRESHOLD.getKey, "128M")
tableOps.setProperty(table, Property.TABLE_BLOCKCACHE_ENABLED.getKey, "true")
val indexedAttributes = getAttributesToIndex(sft)
val localityGroups: Map[Text, Text] =
indexedAttributes.map { case (name, _) => (name, name) }.toMap.+((BIN_CF, BIN_CF)).+((FULL_CF, FULL_CF))
tableOps.setLocalityGroups(table, localityGroups.map { case (k, v) => (k.toString, ImmutableSet.of(v)) } )
}
private def getAttributesToIndex(sft: SimpleFeatureType) =
sft.getAttributeDescriptors
.filterNot { d => d.isInstanceOf[GeometryDescriptor] }
.map { d => (new Text(d.getLocalName.getBytes(Charsets.UTF_8)), sft.indexOf(d.getName)) }
}
| drackaer/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/data/tables/Z3Table.scala | Scala | apache-2.0 | 7,720 |
/** This file is part of TextCompose, a program for producing PDF from text files.
* Copyright 2014 Jesper S Villadsen <jeschvi@gmail.com>
* License: GNU Affero General Public License version 3 or later.
* For full license text see LICENSE.txt or <http://www.gnu.org/licenses/>.
*/
package textcompose.core
import java.io.File
import textcompose.{ editor, storage }
class Arguments(
internal: Boolean,
providedSourceName: String,
temporaryLocation: Boolean,
openPDFViewer: PreviewType.Value) {
var SourceFileName = "" // The file name part of providedSourceName
var SourceFileDirectory = "" // The path part of providedSourceName
var sourceFullFileName = ""
var PDFDefaultTitle = ""
var PDFFileName = ""
var VariablesFileName = ""
var caretPostionForPreview = 0
var previewPageNumber = 0
checkAndSplitSourceFileName()
determinePDFFileName()
LatestExtensions.addFileName(providedSourceName)
LatestExtensions.addExtension("self")
private def checkAndSplitSourceFileName() {
val fileHandle = new java.io.File(providedSourceName)
if (fileHandle.exists) {
if (fileHandle.isDirectory) {
editor.DialogBox.error("Please specify a file name. '" + providedSourceName + "' is a directory")
}
if (fileHandle.canRead) {
SourceFileName = fileHandle.getName
if (SourceFileName ne providedSourceName) {
SourceFileDirectory = fileHandle.getParent + Environment.fileSeparator
}
sourceFullFileName = fileHandle.getAbsolutePath()
VariablesFileName = SourceFileDirectory + "." + SourceFileName
} else {
editor.DialogBox.error("Cannot read file " + providedSourceName)
}
} else {
editor.DialogBox.error("There is no file named '" + providedSourceName + "'")
}
}
private def determinePDFFileName() {
PDFDefaultTitle = storage.FileMethods.splitFileNameAtLastPeriod(SourceFileName)._1
PDFFileName = SourceFileDirectory + PDFDefaultTitle + ".pdf"
}
def pathToReachablePath(givenPath: String): String = {
if (Environment.PathIsAbsolute(givenPath)) {
givenPath
} else {
SourceFileDirectory + givenPath
}
}
def isTemporaryLocation: Boolean = temporaryLocation
def maybeLaunchPDFViewer(errorCount: Int) {
if (openPDFViewer == PreviewType.Yes || openPDFViewer == PreviewType.IfNoErrors && errorCount == 0) {
editor.DesktopInteraction.OpenPDF(
PDFFileName,
internal, // true => previewer, false => ask OS
if (temporaryLocation) PDFDefaultTitle + ".pdf" else PDFFileName,
previewPageNumber)
}
}
}
| jvilladsen/TextCompose | src/main/scala/core/Arguments.scala | Scala | agpl-3.0 | 2,631 |
/*
* Copyright (c) 2013-2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.kinesis.producer
// Java
import java.nio.ByteBuffer
// Amazon
import com.amazonaws.AmazonServiceException
import com.amazonaws.auth.{
BasicAWSCredentials,
ClasspathPropertiesFileCredentialsProvider
}
// Scalazon (for Kinesis interaction)
import io.github.cloudify.scala.aws.kinesis.Client
import io.github.cloudify.scala.aws.kinesis.Client.ImplicitExecution._
import io.github.cloudify.scala.aws.kinesis.Definitions.{Stream,PutResult}
import io.github.cloudify.scala.aws.kinesis.KinesisDsl._
// Config
import com.typesafe.config.Config
// SnowPlow Utils
import com.snowplowanalytics.util.Tap._
// Concurrent libraries.
import scala.concurrent.{Future,Await,TimeoutException}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
// Thrift.
import org.apache.thrift.TSerializer
/**
* The core logic for the Kinesis event producer
*/
case class StreamProducer(config: Config) {
// Grab all the configuration variables one-time
private object ProducerConfig {
private val producer = config.getConfig("producer")
val logging = producer.getBoolean("logging")
private val aws = producer.getConfig("aws")
val awsAccessKey = aws.getString("access-key")
val awsSecretKey = aws.getString("secret-key")
private val stream = producer.getConfig("stream")
val streamName = stream.getString("name")
val streamSize = stream.getInt("size")
val streamDataType = stream.getString("data-type")
private val events = producer.getConfig("events")
val eventsOrdered = events.getBoolean("ordered")
val eventsLimit = {
val l = events.getInt("limit")
if (l == 0) None else Some(l)
}
private val ap = producer.getConfig("active-polling")
val apDuration = ap.getInt("duration")
val apInterval = ap.getInt("interval")
}
// Initialize
private implicit val kinesis = createKinesisClient(ProducerConfig.awsAccessKey, ProducerConfig.awsSecretKey)
private var stream: Option[Stream] = None
private val thriftSerializer = new TSerializer()
/**
* Creates a new stream if one doesn't exist.
* Arguments are optional - defaults to the values
* provided in the ProducerConfig if not provided.
*
* @param name The name of the stream to create
* @param size The number of shards to support for this stream
* @param duration How long to keep checking if the stream became active,
* in seconds
* @param interval How frequently to check if the stream has become active,
* in seconds
*
* @return a Boolean, where:
* 1. true means the stream was successfully created or already exists
* 2. false means an error occurred
*/
def createStream(
name: String = ProducerConfig.streamName,
size: Int = ProducerConfig.streamSize,
duration: Int = ProducerConfig.apDuration,
interval: Int = ProducerConfig.apInterval): Boolean = {
if (ProducerConfig.logging) println(s"Checking streams for $name.")
val streamListFuture = for {
s <- Kinesis.streams.list
} yield s
val streamList: Iterable[String] =
Await.result(streamListFuture, Duration(duration, SECONDS))
for (stream <- streamList) {
if (stream == name) {
if (ProducerConfig.logging) println(s"Stream $name already exists.")
return true
}
}
if (ProducerConfig.logging) println(s"Stream $name doesn't exist.")
if (ProducerConfig.logging) println(s"Creating stream $name of size $size.")
val createStream = for {
s <- Kinesis.streams.create(name)
} yield s
try {
stream = Some(Await.result(createStream, Duration(duration, SECONDS)))
Await.result(stream.get.waitActive.retrying(duration),
Duration(duration, SECONDS))
} catch {
case _: TimeoutException =>
if (ProducerConfig.logging) println("Error: Timed out.")
false
}
if (ProducerConfig.logging) println("Successfully created stream.")
true
}
/**
* Produces an (in)finite stream of events.
*
* @param name The name of the stream to produce events for
* @param ordered Whether the sequence numbers of the events should
* always be ordered
* @param limit How many events to produce in this stream.
* Use None for an infinite stream
*/
def produceStream(
name: String = ProducerConfig.streamName,
ordered: Boolean = ProducerConfig.eventsOrdered,
limit: Option[Int] = ProducerConfig.eventsLimit) {
if (stream.isEmpty) {
stream = Some(Kinesis.stream(name))
}
var writeExampleRecord: (String, Long) => PutResult =
if (ProducerConfig.streamDataType == "string") {
writeExampleStringRecord
} else if (ProducerConfig.streamDataType == "thrift") {
writeExampleThriftRecord
} else {
throw new RuntimeException("data-type configuration must be 'string' or 'thrift'.")
}
def write() = writeExampleRecord(name, System.currentTimeMillis())
(ordered, limit) match {
case (false, None) => while (true) { write() }
case (true, None) => throw new RuntimeException("Ordered stream support not yet implemented") // TODO
case (false, Some(c)) => (1 to c).foreach(_ => write())
case (true, Some(c)) => throw new RuntimeException("Ordered stream support not yet implemented") // TODO
}
}
/**
* Creates a new Kinesis client from provided AWS access key and secret
* key. If both are set to "cpf", then authenticate using the classpath
* properties file.
*
* @return the initialized AmazonKinesisClient
*/
private[producer] def createKinesisClient(
accessKey: String, secretKey: String): Client =
if (isCpf(accessKey) && isCpf(secretKey)) {
Client.fromCredentials(new ClasspathPropertiesFileCredentialsProvider())
} else if (isCpf(accessKey) || isCpf(secretKey)) {
throw new RuntimeException("access-key and secret-key must both be set to 'cpf', or neither of them")
} else {
Client.fromCredentials(accessKey, secretKey)
}
/**
* Writes an example record to the given stream.
* Uses the supplied timestamp to make the record identifiable.
*
* @param stream The name of the stream to write the record to
* @param timestamp When this record was created
*
* @return A PutResult containing the ShardId and SequenceNumber
* of the record written to.
*/
private[producer] def writeExampleStringRecord(
stream: String, timestamp: Long): PutResult = {
if (ProducerConfig.logging) println(s"Writing String record.")
val stringData = s"example-record-$timestamp"
val stringKey = s"partition-key-${timestamp % 100000}"
if (ProducerConfig.logging) println(s" + data: $stringData")
if (ProducerConfig.logging) println(s" + key: $stringKey")
val result = writeRecord(
data = ByteBuffer.wrap(stringData.getBytes),
key = stringKey
)
if (ProducerConfig.logging) println(s"Writing successful.")
if (ProducerConfig.logging) println(s" + ShardId: ${result.shardId}")
if (ProducerConfig.logging) println(s" + SequenceNumber: ${result.sequenceNumber}")
result
}
private[producer] def writeExampleThriftRecord(
stream: String, timestamp: Long): PutResult = {
if (ProducerConfig.logging) println(s"Writing Thrift record.")
val dataName = "example-record"
val dataTimestamp = timestamp % 100000
val streamData = new generated.StreamData(dataName, dataTimestamp)
val stringKey = s"partition-key-${timestamp % 100000}"
if (ProducerConfig.logging) println(s" + data.name: $dataName")
if (ProducerConfig.logging) println(s" + data.timestamp: $dataTimestamp")
if (ProducerConfig.logging) println(s" + key: $stringKey")
val result = this.synchronized{
writeRecord(
data = ByteBuffer.wrap(thriftSerializer.serialize(streamData)),
key = stringKey
)
}
if (ProducerConfig.logging) println(s"Writing successful.")
if (ProducerConfig.logging) println(s" + ShardId: ${result.shardId}")
if (ProducerConfig.logging) println(s" + SequenceNumber: ${result.sequenceNumber}")
result
}
/**
* Writes a record to the given stream
*
* @param data The data for this record
* @param key The partition key for this record
* @param duration Time in seconds to wait to put the data.
*
* @return A PutResult containing the ShardId and SequenceNumber
* of the record written to.
*/
private[producer] def writeRecord(data: ByteBuffer, key: String,
duration: Int = ProducerConfig.apDuration): PutResult = {
val putData = for {
p <- stream.get.put(data, key)
} yield p
val putResult = Await.result(putData, Duration(duration, SECONDS))
putResult
}
/**
* Is the access/secret key set to the special value "cpf" i.e. use
* the classpath properties file for credentials.
*
* @param key The key to check
* @return true if key is cpf, false otherwise
*/
private[producer] def isCpf(key: String): Boolean = (key == "cpf")
}
| snowplow/kinesis-example-scala-producer | src/main/scala/com/snowplowanalytics/kinesis/producer/StreamProducer.scala | Scala | apache-2.0 | 9,805 |
/*
* Copyright (c) 2016 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless.examples
import java.util.UUID
import shapeless._
import shapeless.ops.hlist._
// Spoiler alert - don't look!
object compose extends Poly1 {
implicit def cases[A, B, C] = at[(A => B, B => C)] {
case (f1, f2) => f1 andThen f2
}
}
/**
* Examples for ZipApply
*
* @author Jeremy Smith
*/
object ZipApplyExamples extends App {
// Some functions over various types
def intFunction(i: Int): String = (i * i).toString
def stringFunction(s: String): Boolean = s startsWith "4"
def longFunction(l: Long): Int = (l & 0xFFFFFFFF).toInt
def uuidFunction(u: UUID): Long = u.getLeastSignificantBits ^ u.getMostSignificantBits
def booleanFunction(b: Boolean): String = if(b) "Yes" else "No"
// Just to illustrate the types of the HLists - you don't actually need to define type aliases at all.
type Functions = (Int => String) :: (String => Boolean) :: (Long => Int) :: (UUID => Long) :: HNil
type Args = Int :: String :: Long :: UUID :: HNil
type Results = String :: Boolean :: Int :: Long :: HNil
// Some sample values
val anInt = 22
val aString = "foo"
val aLong = 33L
val aUUID = UUID.fromString("deadbeef-dead-dead-beef-deaddeadbeef")
// Example values of those types
val functions1 = (intFunction _) :: (stringFunction _) :: (longFunction _) :: (uuidFunction _) :: HNil
val args1 = anInt :: aString :: aLong :: aUUID :: HNil
val results1 = intFunction(anInt) :: stringFunction(aString) :: longFunction(aLong) :: uuidFunction(aUUID) :: HNil
// Just to show that the types line up
implicitly[functions1.type <:< Functions]
implicitly[args1.type <:< Args]
implicitly[results1.type <:< Results]
// A different type of HList of functions (it's specially planned - wait for the big twist!)
val functions2 = (stringFunction _) :: (booleanFunction _) :: (intFunction _) :: (longFunction _) :: HNil
/**
* [[ZipApply]] allows application of an [[HList]] of functions to an [[HList]] of their corresponding arguments,
* resulting in an [[HList]] of the results of the applications. Here's an example.
*/
def zipApplyExample() = {
// At a minimum, ZipApply requires the argument types to be known
val zipApply = ZipApply[Functions, Args]
val results = zipApply(functions1, args1)
// The result type lines up
implicitly[results.type <:< Results]
// The results are what's expected
assert(results == results1)
/**
* The result type can also be inferred at a function call site, using ZipApply.Aux — this allows you to use other
* ops with type inference in between. Here's a neat function that will take two functions of HLists and compose
* each element. It also takes an HLists of arguments, which must be compatible with the first HList of functions.
* The composed functions are applied to the arguments, and we get an HList of the results of those. If you try
* to call this function on two HLists of functions that won't compose, or with arguments that don't map to the
* first HList of functions, you'll get a compile time error at that call site. Isn't it great?
*
* The arguments:
* @param f1 An HList of functions
* @param f2 Another HList of functions — each element must be composable with the corresponding element of f1
* @param a An HList of arguments — each element must be of the input type of the corresponding function from f1
*
* The implicits used:
* @param zip A [[Zip]] instance which allows us to zip f1 and f2, giving an HList with the corresponding
* elements from each f1 and f2 as a [[Tuple2]] - i.e.
* (f1.head, f2.head) :: (f1.drop(1).head, f2.drop(1).head) ... :: HNil
* @param mapCompose A [[Mapper]] instance which allows us to map the output of `zip` with the [[compose]] poly
* function (defined at the bottom of this file)
* @param zipApply A [[ZipApply]] instance which will perform the application of the composed functions. Note the
* use of `Aux` to infer the result type.
*
* And the type parameters:
* @tparam Functions1 The type of the first [[HList]] of functions. This can be inferred because `inferenceExample`
* takes an argument of this type.
* @tparam Functions2 The type of the second [[HList]] of functions. This can be inferred for the same reason.
* @tparam Arguments The type of the [[HList]] of arguments. Again, this can be inferred since it must be concrete
* at the call site.
* @tparam FCombined The type of the [[HList]] of pairs zipped from [[Functions1]] and [[Functions2]]. This gets
* inferred thanks to the use of [[Zip.Aux]].
* @tparam FComposed The type of the [[HList]] of composed functions from [[Functions1]] and [[Functions2]]. This
* gets inferred thanks to the use of [[Mapper.Aux]].
* @tparam Output The type of the [[HList]] of results from applying [[FComposed]] to [[Arguments]]. This gets
* inferred thanks to the use of [[ZipApply.Aux]].
*
* @return The results of applying the composed functions to the corresponding arguments, as an HList.
*/
def inferenceExample[
Functions1 <: HList,
Functions2 <: HList,
Arguments <: HList,
FCombined <: HList,
FComposed <: HList,
Output <: HList
](
f1: Functions1,
f2: Functions2,
a: Arguments)(implicit
zip: Zip.Aux[Functions1 :: Functions2 :: HNil, FCombined],
mapCompose: Mapper.Aux[compose.type, FCombined, FComposed],
zipApply: ZipApply.Aux[FComposed, Arguments, Output]
): Output = zipApply(mapCompose(zip(f1 :: f2 :: HNil)), a)
/**
* An example invocation of [[inferenceExample]] — note that all of the type arguments are inferred.
* Here's the big plot twist! That innocent looking [[HList]], [[functions2]], composes with [[functions1]]!
* (cue Dramatic Hamster)
*/
val inferenceResults = inferenceExample(functions1, functions2, args1)
// We can show that the result type was correctly inferred
implicitly[inferenceResults.type <:< (Boolean :: String :: String :: Int :: HNil)]
// and that the results are what's expected
val expected =
stringFunction(intFunction(anInt)) ::
booleanFunction(stringFunction(aString)) ::
intFunction(longFunction(aLong)) ::
longFunction(uuidFunction(aUUID)) ::
HNil
assert(inferenceResults == expected)
// the following would not compile, because functions1 doesn't compose with itself
// val inferenceResults2 = inferenceExample(functions1, functions1, args1)
// the following would not compile, because the argument types are wrong
// val inferenceResults3 = inferenceExample(functions1, functions2, "hey" :: "hi" :: "how ya" :: "doin'" :: HNil)
}
}
| isaka/shapeless | examples/src/main/scala/shapeless/examples/zipapply.scala | Scala | apache-2.0 | 7,664 |
package org.jetbrains.plugins.scala
package codeInspection.prefix
import org.jetbrains.plugins.scala.codeInspection.ScalaLightInspectionFixtureTestAdapter
import com.intellij.codeInspection.LocalInspectionTool
import org.jetbrains.plugins.scala.codeInspection.prefixMutableCollections.{ReferenceMustBePrefixedInspection, AddPrefixFix}
/**
* Nikolay.Tropin
* 2/25/14
*/
class ReferenceMustBePrefixedInspectionTest extends ScalaLightInspectionFixtureTestAdapter {
override protected def annotation: String = ReferenceMustBePrefixedInspection.displayName
override protected def classOfInspection: Class[_ <: LocalInspectionTool] = classOf[ReferenceMustBePrefixedInspection]
def testFix(text: String, result: String): Unit = testFix(text, result, AddPrefixFix.hint)
def doTest(selected: String, text: String, result: String) = {
check(selected)
testFix(text, result)
}
def testType() = doTest (
s"""import java.util.List
|object AAA {
| val list: ${START}List$END[Int] = null
|}""",
s"""import java.util.List
|object AAA {
| val list: ${CARET_MARKER}List[Int] = null
|}""",
"""import java.util
|import java.util.List
|object AAA {
| val list: util.List[Int] = null
|}"""
)
def testExtends() = doTest(
s"""import scala.collection.mutable.Seq
|object AAA extends ${START}Seq$END[Int]""",
s"""import scala.collection.mutable.Seq
|object AAA extends ${CARET_MARKER}Seq[Int]""",
"""import scala.collection.mutable
|import scala.collection.mutable.Seq
|object AAA extends mutable.Seq[Int]"""
)
def testApply() = doTest (
s"""import scala.collection.mutable.Seq
|object AAA {
| val s = ${START}Seq$END(0, 1)
|}""",
s"""import scala.collection.mutable.Seq
|object AAA {
| val s = ${CARET_MARKER}Seq(0, 1)
|}""",
"""import scala.collection.mutable
|import scala.collection.mutable.Seq
|object AAA {
| val s = mutable.Seq(0, 1)
|}"""
)
def testUnapply() = doTest (
s"""import scala.collection.mutable.HashMap
|object AAA {
| Map(1 -> "a") match {
| case hm: ${START}HashMap$END =>
| }
|}""",
s"""import scala.collection.mutable.HashMap
|object AAA {
| Map(1 -> "a") match {
| case hm: ${CARET_MARKER}HashMap =>
| }
|}""",
"""import scala.collection.mutable
|import scala.collection.mutable.HashMap
|object AAA {
| Map(1 -> "a") match {
| case hm: mutable.HashMap =>
| }
|}"""
)
def testHaveImport() = doTest(
s"""import scala.collection.mutable.HashMap
|import scala.collection.mutable
|object AAA {
| val hm: ${START}HashMap$END = null
|}""",
s"""import scala.collection.mutable.HashMap
|import scala.collection.mutable
|object AAA {
| val hm: ${CARET_MARKER}HashMap = null
|}""",
"""import scala.collection.mutable.HashMap
|import scala.collection.mutable
|object AAA {
| val hm: mutable.HashMap = null
|}"""
)
}
| consulo/consulo-scala | test/org/jetbrains/plugins/scala/codeInspection/prefix/ReferenceMustBePrefixedInspectionTest.scala | Scala | apache-2.0 | 3,210 |
import sbt.Keys._
import sbt._
import scala.sys.process.Process
import Path.rebase
object FrontEnd extends AutoPlugin {
object autoImport {
val frontendFiles = taskKey[Seq[(File, String)]]("Front-end files")
}
import autoImport._
override def trigger = allRequirements
override def projectSettings = Seq[Setting[_]](
frontendFiles := {
val s = streams.value
s.log.info("Building front-end ...")
s.log.info("npm install")
Process("npm" :: "install" :: Nil, baseDirectory.value / "www") ! s.log
s.log.info("npm run build")
Process("npm" :: "run" :: "build" :: Nil, baseDirectory.value / "www") ! s.log
val dir = baseDirectory.value / "www" / "dist"
dir.**(AllPassFilter) pair rebase(dir, "www")
})
} | CERT-BDF/Cortex | project/FrontEnd.scala | Scala | agpl-3.0 | 772 |
package com.datastax.driver.spark.types
import com.datastax.driver.spark.util.SerializationUtil
import org.junit.Assert._
import org.junit.Test
class CanBuildFromTest {
@Test
def testBuild() {
val bf = CanBuildFrom.setCanBuildFrom[Int]
val builder = bf.apply()
builder += 1
builder += 2
builder += 3
assertEquals(Set(1,2,3), builder.result())
}
@Test
def testSerializeAndBuild() {
val bf = CanBuildFrom.setCanBuildFrom[Int]
val bf2 = SerializationUtil.serializeAndDeserialize(bf)
val builder = bf2.apply()
builder += 1
builder += 2
builder += 3
assertEquals(Set(1,2,3), builder.result())
}
@Test
def testSerializeAndBuildWithOrdering() {
val bf = CanBuildFrom.treeSetCanBuildFrom[Int]
val bf2 = SerializationUtil.serializeAndDeserialize(bf)
val builder = bf2.apply()
builder += 1
builder += 2
builder += 3
assertEquals(Set(1,2,3), builder.result())
}
}
| bovigny/cassandra-driver-spark | src/test/scala/com/datastax/driver/spark/types/CanBuildFromTest.scala | Scala | apache-2.0 | 958 |
package de.mips.Node
import java.io.Serializable
import de.mips.geometric.Ball
trait Node extends Serializable {
def ball: Ball
}
| felixmaximilian/mips | src/main/scala-2.11/de/mips/Node/Node.scala | Scala | apache-2.0 | 136 |
package com.github.vonnagy.service.container.metrics.reporting
import java.util.concurrent.TimeUnit
import akka.actor.ActorSystem
import com.typesafe.config.Config
import org.slf4j.LoggerFactory
class Slf4jReporter(implicit val system: ActorSystem, val config: Config) extends ScheduledReporter {
lazy val reporter = getReporter
/**
* This is the method that gets called so that the metrics
* reporting can occur.
*/
def report(): Unit = {
reporter.report(metrics.metricRegistry.getGauges(),
metrics.metricRegistry.getCounters(),
metrics.metricRegistry.getHistograms(),
metrics.metricRegistry.getMeters(),
metrics.metricRegistry.getTimers());
}
private[reporting] def getReporter: com.codahale.metrics.Slf4jReporter = {
com.codahale.metrics.Slf4jReporter.forRegistry(metrics.metricRegistry)
.outputTo(LoggerFactory.getLogger(config.getString("logger")))
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.build
}
}
| vonnagy/service-container | service-container/src/main/scala/com/github/vonnagy/service/container/metrics/reporting/Slf4jReporter.scala | Scala | apache-2.0 | 1,027 |
package breeze.integrate
import org.scalatest.funsuite.AnyFunSuite
import breeze.integrate
import breeze.linalg._
import breeze.numerics._
/**
*
* @author chrismedrela
**/
class SimpsonIntegrationTest extends AnyFunSuite {
val f = (x: Double) => 2 * x
val f2 = (x: Double) => x * x
test("basics") {
assert(closeTo(integrate.simpson(f, 0, 1, 2), 1.0))
assert(closeTo(integrate.simpson(f, 0, 1, 3), 1.0))
assert(closeTo(integrate.simpson(f2, 0, 1, 2), 0.33333333333333))
assert(closeTo(integrate.simpson(f2, 0, 1, 3), 0.33333333333333))
}
test("not enough nodes") {
intercept[Exception] {
integrate.simpson(f, 0, 1, 1)
}
}
}
| scalanlp/breeze | math/src/test/scala/breeze/integrate/SimpsonIntegrationTest.scala | Scala | apache-2.0 | 673 |
package shredzzz.kirkwood.junit.tests.vector.booleans
import org.junit.Test
import breeze.linalg.DenseVector
import shredzzz.kirkwood.cumath.tensor.CuVector
import shredzzz.kirkwood.cumath.Sentinel
import shredzzz.kirkwood.cumath.tensor.operations.ConversionImplicits._
import shredzzz.kirkwood.junit._
import com.simple.simplespec.Matchers
class CuVectorTest extends BooleanCuVectorTester with Matchers
{
@Test def testData() {
val size = 3
val arr = Array(false, true, false)
val zeros = Array(false, false, false)
val ones = Array(true, true, true)
def data = arr.clone()
withCuContext(
implicit ctx => {
{
val x = CuVector(data)
x.size must be(size)
x.fetch() must matchers.be(data)
x.fetchBreeze() must matchers.be(DenseVector(data))
x.dispose()
}
{
val x = CuVector(DenseVector(data))
x.size must be(size)
x.fetch() must matchers.be(data)
x.fetchBreeze() must matchers.be(DenseVector(data))
x.dispose()
}
{
val x = CuVector.zeros[Boolean](size)
x.size must be(size)
x.fetch() must matchers.be(zeros)
x.fetchBreeze() must matchers.be(DenseVector(zeros))
x.dispose()
}
{
val x = CuVector.ones[Boolean](size)
x.size must be(size)
x.fetch() must matchers.be(ones)
x.fetchBreeze() must matchers.be(DenseVector(ones))
x.dispose()
}
{
val x = CuVector.ones[Boolean](size)
val intX = x.asInt()
intX.size must be(size)
intX.fetch() must matchers.be(Array(1, 1, 1))
x.dispose()
}
{
val x = CuVector.zeros[Boolean](size)
val intX = x.asInt()
intX.size must be(size)
intX.fetch() must matchers.be(Array(0, 0, 0))
x.dispose()
}
}
)
}
@Test def test_!=() {
withCuContext(
implicit ctx => {
testReduceCompAlloc(
xArr = Array(true, true, false),
yArr = Array(false, true, false),
expected = false,
(x: CuVector[Boolean], y: CuVector[Boolean]) => {implicit sentinel: Sentinel => x != y}
)
}
)
}
@Test def test_==() {
withCuContext(
implicit ctx => {
testReduceCompAlloc(
xArr = Array(true, false, false),
yArr = Array(true, false, false),
expected = true,
(x: CuVector[Boolean], y: CuVector[Boolean]) => {implicit sentinel: Sentinel => x == y}
)
}
)
}
}
| shredzzz/kirkwood | src/test/scala/shredzzz/kirkwood/junit/tests/vector/booleans/CuVectorTest.scala | Scala | apache-2.0 | 2,657 |
package com.wavesplatform.transaction.serialization.impl
import java.nio.ByteBuffer
import com.google.common.primitives.{Bytes, Longs}
import com.wavesplatform.protobuf.transaction.PBOrders
import com.wavesplatform.protobuf.utils.PBUtils
import com.wavesplatform.serialization.ByteBufferOps
import com.wavesplatform.transaction.Proofs
import com.wavesplatform.transaction.assets.exchange.{AssetPair, Order, OrderType}
import play.api.libs.json.{JsObject, Json}
import scala.util.Try
object OrderSerializer {
def toJson(order: Order): JsObject = {
import order._
Json.obj(
"version" -> version,
"id" -> idStr(),
"sender" -> senderPublicKey.toAddress,
"senderPublicKey" -> senderPublicKey,
"matcherPublicKey" -> matcherPublicKey,
"assetPair" -> assetPair.json,
"orderType" -> orderType.toString,
"amount" -> amount,
"price" -> price,
"timestamp" -> timestamp,
"expiration" -> expiration,
"matcherFee" -> matcherFee,
"signature" -> proofs.toSignature.toString,
"proofs" -> proofs.proofs.map(_.toString)
) ++ (if (version >= Order.V3) Json.obj("matcherFeeAssetId" -> matcherFeeAssetId) else JsObject.empty)
}
def bodyBytes(order: Order): Array[Byte] = {
import order._
version match {
case Order.V1 =>
Bytes.concat(
senderPublicKey.arr,
matcherPublicKey.arr,
assetPair.bytes,
orderType.bytes,
Longs.toByteArray(price),
Longs.toByteArray(amount),
Longs.toByteArray(timestamp),
Longs.toByteArray(expiration),
Longs.toByteArray(matcherFee)
)
case Order.V2 =>
Bytes.concat(
Array(version),
senderPublicKey.arr,
matcherPublicKey.arr,
assetPair.bytes,
orderType.bytes,
Longs.toByteArray(price),
Longs.toByteArray(amount),
Longs.toByteArray(timestamp),
Longs.toByteArray(expiration),
Longs.toByteArray(matcherFee)
)
case Order.V3 =>
Bytes.concat(
Array(version),
senderPublicKey.arr,
matcherPublicKey.arr,
assetPair.bytes,
orderType.bytes,
Longs.toByteArray(price),
Longs.toByteArray(amount),
Longs.toByteArray(timestamp),
Longs.toByteArray(expiration),
Longs.toByteArray(matcherFee),
matcherFeeAssetId.byteRepr
)
case _ =>
PBUtils.encodeDeterministic(PBOrders.protobuf(order.copy(proofs = Proofs.empty)))
}
}
def toBytes(ord: Order): Array[Byte] = {
import ord._
(version: @unchecked) match {
case Order.V1 => Bytes.concat(this.bodyBytes(ord), proofs.toSignature.arr)
case Order.V2 | Order.V3 => Bytes.concat(this.bodyBytes(ord), proofs.bytes())
}
}
def parseBytes(version: Order.Version, bytes: Array[Byte]): Try[Order] = Try {
def parseCommonPart(buf: ByteBuffer): Order = {
val sender = buf.getPublicKey
val matcher = buf.getPublicKey
val assetPair = AssetPair(buf.getAsset, buf.getAsset)
val orderType = OrderType(buf.get())
val price = buf.getLong
val amount = buf.getLong
val timestamp = buf.getLong
val expiration = buf.getLong
val matcherFee = buf.getLong
Order(version, sender, matcher, assetPair, orderType, amount, price, timestamp, expiration, matcherFee)
}
version match {
case Order.V1 =>
val buf = ByteBuffer.wrap(bytes)
parseCommonPart(buf).copy(proofs = Proofs(buf.getSignature))
case Order.V2 =>
require(bytes(0) == version, "order version mismatch")
val buf = ByteBuffer.wrap(bytes, 1, bytes.length - 1)
parseCommonPart(buf).copy(proofs = buf.getProofs)
case Order.V3 =>
require(bytes(0) == version, "order version mismatch")
val buf = ByteBuffer.wrap(bytes, 1, bytes.length - 1)
parseCommonPart(buf).copy(matcherFeeAssetId = buf.getAsset, proofs = buf.getProofs)
case _ =>
throw new IllegalArgumentException(s"Unsupported order version: $version")
}
}
}
| wavesplatform/Waves | node/src/main/scala/com/wavesplatform/transaction/serialization/impl/OrderSerializer.scala | Scala | mit | 4,314 |
package com.github.lab24.sbtmongodb
import sbt._
import Keys._
import Process._
import Project.Initialize
import com.mongodb.ServerAddress
import com.mongodb.Mongo
import com.mongodb.util.JSON
import com.mongodb.BasicDBObject
import com.mongodb.DBObject
import java.io.File
import scala.io.Source
import java.io.InputStream
import java.io.FileInputStream
object MongodbPlugin extends Plugin {
val Mongodb = config("mongodb") extend (Runtime)
val Test = config("test") extend (Runtime)
val drop = TaskKey[Unit]("drop", "Drop Mongodb Database")
val fixturesLoad = TaskKey[Unit]("fixtures-load", "Load Fixtures Data into Database")
val mongoHost = SettingKey[String]("mongo-host")
val mongoPort = SettingKey[Int]("mongo-port")
val mongoDb = SettingKey[String]("mongo-db");
val mongoUser = SettingKey[String]("mongo-db-user");
val mongoPassword = SettingKey[String]("mongo-db-password");
val mongoFixturesPath = SettingKey[String]("mongo-fixtures-path");
lazy val mongodbSettings: Seq[Setting[_]] =
inConfig(Mongodb)(Seq(
drop <<= dropTask,
fixturesLoad <<= fixturesLoadTask))
private def dropTask: Initialize[Task[Unit]] =
(state, mongoHost, mongoPort, mongoDb) map { (s, mHost, mPort, mDb) =>
val log = s.log
// connect to DB
val mongo = new Mongo(mHost, mPort)
val db = mongo.getDB(mDb)
log.info("drop Database " + mDb )
// drop the DB
db.dropDatabase();
s
}
private def fixturesLoadTask: Initialize[Task[Unit]] =
(state, mongoHost, mongoPort, mongoDb, mongoFixturesPath) map { (s, mHost, mPort, mDb, mFixturesPath) =>
val log = s.log
// connect to DB
val mongo = new Mongo(mHost, mPort)
val db = mongo.getDB(mDb)
for (collection <- recursiveListFiles(new File(mFixturesPath))) {
var docCounter = 0
var openCounter = 0
var buff = new StringBuffer()
val coll = db.getCollection(collection.getName())
val fis = new FileInputStream(collection);
var c: Char = 0
while (fis.available() > 0) {
c = fis.read().asInstanceOf[Char]
if (c == '{') {
buff.append(c)
openCounter = openCounter + 1
}
else if (c == '}') {
buff.append(c)
openCounter = openCounter - 1
if (openCounter == 0) {
val data:DBObject = JSON.parse(buff.toString()) match {
case x:DBObject => x
}
coll.insert(data)
docCounter = docCounter + 1
buff = new StringBuffer()
}
} else {
// do not add chunk to the buffer, if we are not in a document -> removes also the [ ]
if (openCounter != 0)
buff.append(c)
}
}
log.info(docCounter + " documents inserted into collection " + collection.getName())
}
s
}
private def recursiveListFiles(f: File): Array[File] = {
val these = f.listFiles
these ++ these.filter(_.isDirectory).flatMap(recursiveListFiles)
}
}
| lab-24/xsbt-mongodb | src/main/scala/com/github/lab24/sbtmongodb/MongodbPlugin.scala | Scala | bsd-3-clause | 3,094 |
/**
* Copyright (C) 2012 Inria, University Lille 1.
*
* This file is part of PowerAPI.
*
* PowerAPI is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* PowerAPI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with PowerAPI. If not, see <http://www.gnu.org/licenses/>.
*
* Contact: powerapi-user-list@googlegroups.com.
*/
package fr.inria.powerapi.formula.cpu.max
import scala.collection.mutable
import fr.inria.powerapi.core.Energy
import fr.inria.powerapi.core.TickSubscription
import fr.inria.powerapi.formula.cpu.api.CpuFormulaMessage
import fr.inria.powerapi.sensor.cpu.api.CpuSensorMessage
/**
* CpuFormula configuration part.
*/
trait Configuration extends fr.inria.powerapi.core.Configuration {
/**
* CPU Thermal Design Power (TDP) value.
*
* @see http://en.wikipedia.org/wiki/Thermal_design_power
*/
lazy val tdp = load { _.getInt("powerapi.cpu.tdp") }(0)
/**
* CPU Thermal Design Power (TDP) factor.
* Not required but 0.7 as default [1].
*
* @see [1], JouleSort: A Balanced Energy-Efficiency Benchmark, by Rivoire et al.
*/
lazy val tdpFactor = load (_.getDouble("powerapi.cpu.tdp-factor"), false) (0.7)
}
/**
* Implements a CpuFormula in making the ratio between maximum CPU power (obtained by multiplying
* its Thermal Design Power (TDP) value by a specific factor) and the process CPU usage obtained from
* the received CpuSensorMessage.
*
* @see http://en.wikipedia.org/wiki/Thermal_design_power
*/
class CpuFormula extends fr.inria.powerapi.formula.cpu.api.CpuFormula with Configuration {
lazy val power = tdp * tdpFactor
def compute(now: CpuSensorMessage) = {
Energy.fromPower(power * now.processPercent.percent)
}
def process(cpuSensorMessage: CpuSensorMessage) {
publish(CpuFormulaMessage(compute(cpuSensorMessage), cpuSensorMessage.tick))
}
} | abourdon/powerapi-akka | formulae/formula-cpu-max/src/main/scala/fr/inria/powerapi/formula/cpu/max/CpuFormula.scala | Scala | agpl-3.0 | 2,313 |
package org.qirx.cms.evolution
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import org.qirx.cms.machinery.~>
import org.qirx.cms.construction.Store
import play.api.libs.json.JsObject
import play.api.libs.json.Json.obj
/**
* A store that wraps another store. It will add a version to the saved documents
* which is used (during retrieval) to determine which evolutions should be applied.
* After the evolutions have been applied the version is removed from the retrieved
* documents.
*/
class EvolvingStore(
store: Store ~> Future,
evolutions: Map[String, Evolutions])(
implicit ec: ExecutionContext) extends (Store ~> Future) {
private val VERSION = "_version"
import Store._
def transform[x] = {
case List(metaId, fieldSet) =>
val list = List(metaId, addVersion(fieldSet))
store(list).map(_ map transformDocumentFor(metaId))
case Get(metaId, id, fieldSet) =>
val get = Get(metaId, id, addVersion(fieldSet))
store(get).map(_ map transformDocumentFor(metaId))
case Save(metaId, id, document) =>
val version = obj(VERSION -> latestVersionFor(metaId))
store(Save(metaId, id, document ++ version))
case other => store(other)
}
private def addVersion(fieldSet: Set[String]) =
if (fieldSet.isEmpty) fieldSet
else fieldSet + VERSION
private def evolutionsFor(metaId: String) = evolutions.get(metaId)
private def transformDocumentFor(metaId: String) =
applyEvolutionsFor(metaId) andThen removeVersion
private def applyEvolutionsFor(metaId: String): JsObject => JsObject = { document =>
val documentVersion = (document \\ VERSION).as[Int]
evolutionsFor(metaId).fold(document)(_.applyEvolutions(document, documentVersion))
}
private def latestVersionFor(metaId: String) =
evolutionsFor(metaId).map(_.latestVersion).getOrElse(0)
private val removeVersion: JsObject => JsObject = _ - VERSION
}
| EECOLOR/play-cms | cms/src/main/scala/org/qirx/cms/evolution/EvolvingStore.scala | Scala | mit | 1,935 |
/**
* This file is part of the TA Buddy project.
* Copyright (c) 2013-2015 Alexey Aksenov ezh@ezh.msk.ru
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Global License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED
* BY Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS»,
* Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS» DISCLAIMS
* THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Global License for more details.
* You should have received a copy of the GNU Affero General Global License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://www.gnu.org/licenses/agpl.html
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Global License.
*
* In accordance with Section 7(b) of the GNU Affero General Global License,
* you must retain the producer line in every report, form or document
* that is created or manipulated using TA Buddy.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the TA Buddy software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers,
* serving files in a web or/and network application,
* shipping TA Buddy with a closed source product.
*
* For more information, please contact Digimead Team at this
* address: ezh@ezh.msk.ru
*/
package org.digimead.tabuddy.desktop.logic.operation.graph
import org.digimead.digi.lib.aop.log
import org.digimead.digi.lib.api.XDependencyInjection
import org.digimead.digi.lib.log.api.XLoggable
import org.digimead.tabuddy.desktop.core.definition.Operation
import org.digimead.tabuddy.desktop.core.support.App
import org.digimead.tabuddy.desktop.core.ui.UI
import org.digimead.tabuddy.desktop.logic.operation.graph.api.XOperationGraphClose
import org.digimead.tabuddy.desktop.logic.payload.marker.GraphMarker
import org.digimead.tabuddy.model.Model
import org.digimead.tabuddy.model.graph.Graph
import org.eclipse.core.runtime.{ IAdaptable, IProgressMonitor }
import org.eclipse.swt.SWT
import org.eclipse.swt.widgets.MessageBox
/**
* 'Close graph' operation.
*/
class OperationGraphClose extends XOperationGraphClose[GraphMarker.Generic] with XLoggable {
/**
* Close graph.
*
* @param graph graph to close
* @param force close graph without saving
* @return the same marker or read only marker if current one is deleted
*/
def apply(graph: Graph[_ <: Model.Like], force: Boolean): GraphMarker.Generic = {
@volatile var closeApproved = force
val marker = GraphMarker(graph)
while (true) {
GraphMarker.globalRWL.writeLock().lock()
val shell = try {
marker.safeUpdate { state ⇒
log.info(s"Close $graph, force is $force")
if (!marker.graphIsOpen())
throw new IllegalStateException(s"$graph is already closed")
val shell = if (marker.graphIsDirty() && !closeApproved && App.isUIAvailable) {
UI.getActiveShell match {
case Some(shell) ⇒
Some(shell)
case None ⇒
log.warn("There is no active shell")
log.info(s"Close modified $graph without saving")
None
}
} else {
None
}
if (shell.isEmpty) {
// Close graph
if (marker.graphStored == GraphMarker.TimestampNil) {
// There is no a local copy.
// Newly created graph is unsaved
// Remove current marker.
marker.graphClose()
log.info(s"$graph is closed. Delete unused marker")
return GraphMarker.deleteFromWorkspace(marker)
} else {
// Returns the same marker
marker.graphClose()
log.info(s"$graph is closed")
return marker
}
}
shell
}
} finally GraphMarker.globalRWL.writeLock().unlock()
shell.foreach { shell ⇒
closeApproved = App.execNGet {
val style = SWT.ICON_QUESTION | SWT.YES | SWT.NO
val dialog = new MessageBox(shell, style)
dialog.setText(s"Graph '${marker.graphModelId.name}' is modified")
dialog.setMessage("Do you want to save modifications?")
if (dialog.open() == SWT.YES) {
// save modified model if user permits
OperationGraphSave.operation(graph, false)
false // marker.graphIsDirty() should be false
} else {
log.info(s"Close modified $graph without saving")
true
}
}(App.LongRunnable)
}
}
throw new IllegalStateException("This code is unreachable")
}
/**
* Create 'Close graph' operation.
*
* @param graph graph to close
* @param force close graph without saving
* @return 'Close graph' operation
*/
def operation(graph: Graph[_ <: Model.Like], force: Boolean) =
new Implemetation(graph, force: Boolean)
/**
* Checks that this class can be subclassed.
* <p>
* The API class is intended to be subclassed only at specific,
* controlled point. This method enforces this rule
* unless it is overridden.
* </p><p>
* <em>IMPORTANT:</em> By providing an implementation of this
* method that allows a subclass of a class which does not
* normally allow subclassing to be created, the implementer
* agrees to be fully responsible for the fact that any such
* subclass will likely fail.
* </p>
*/
override protected def checkSubclass() {}
class Implemetation(graph: Graph[_ <: Model.Like], force: Boolean)
extends OperationGraphClose.Abstract(graph, force) with XLoggable {
@volatile protected var allowExecute = true
override def canExecute() = allowExecute
override def canRedo() = false
override def canUndo() = false
protected def execute(monitor: IProgressMonitor, info: IAdaptable): Operation.Result[GraphMarker.Generic] = {
require(canExecute, "Execution is disabled")
try {
val result = Option(OperationGraphClose.this(graph, force))
allowExecute = false
Operation.Result.OK(result)
} catch {
case e: Throwable ⇒
Operation.Result.Error(s"Unable to close $graph: " + e.getMessage(), e)
}
}
protected def redo(monitor: IProgressMonitor, info: IAdaptable): Operation.Result[GraphMarker.Generic] =
throw new UnsupportedOperationException
protected def undo(monitor: IProgressMonitor, info: IAdaptable): Operation.Result[GraphMarker.Generic] =
throw new UnsupportedOperationException
}
}
object OperationGraphClose extends XLoggable {
/** Stable identifier with OperationGraphClose DI */
lazy val operation = DI.operation.asInstanceOf[OperationGraphClose]
/**
* Build a new 'Close graph' operation.
*
* @param graph graph to close
* @param force close graph without saving
* @return 'Close graph' operation
*/
@log
def apply(graph: Graph[_ <: Model.Like], force: Boolean): Option[Abstract] =
Some(operation.operation(graph, force))
/** Bridge between abstract XOperation[Unit] and concrete Operation[Unit] */
abstract class Abstract(val graph: Graph[_ <: Model.Like], val force: Boolean)
extends Operation[GraphMarker.Generic](s"Close $graph") {
this: XLoggable ⇒
}
/**
* Dependency injection routines.
*/
private object DI extends XDependencyInjection.PersistentInjectable {
lazy val operation = injectOptional[XOperationGraphClose[_]] getOrElse new OperationGraphClose
}
}
| digimead/digi-TABuddy-desktop | part-logic/src/main/scala/org/digimead/tabuddy/desktop/logic/operation/graph/OperationGraphClose.scala | Scala | agpl-3.0 | 8,492 |
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.kudu.spark.tools
import java.math.BigDecimal
import java.math.BigInteger
import java.nio.charset.StandardCharsets
import org.apache.kudu.Schema
import org.apache.kudu.Type
import org.apache.kudu.client.PartialRow
import org.apache.kudu.client.SessionConfiguration
import org.apache.kudu.spark.kudu.KuduContext
import org.apache.kudu.spark.kudu.KuduWriteOptions
import org.apache.kudu.spark.kudu.RowConverter
import org.apache.kudu.spark.kudu.SparkUtil
import org.apache.kudu.spark.tools.DistributedDataGeneratorOptions._
import org.apache.kudu.util.DataGenerator
import org.apache.kudu.util.DateUtil
import org.apache.spark.sql.Row
import org.apache.spark.sql.SparkSession
import org.apache.spark.util.LongAccumulator
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.yetus.audience.InterfaceAudience
import org.apache.yetus.audience.InterfaceStability
import org.slf4j.Logger
import org.slf4j.LoggerFactory
import scopt.OptionParser
import scala.collection.JavaConverters._
case class GeneratorMetrics(rowsWritten: LongAccumulator, collisions: LongAccumulator)
object GeneratorMetrics {
def apply(sc: SparkContext): GeneratorMetrics = {
GeneratorMetrics(sc.longAccumulator("rows_written"), sc.longAccumulator("row_collisions"))
}
}
object DistributedDataGenerator {
val log: Logger = LoggerFactory.getLogger(getClass)
def run(options: DistributedDataGeneratorOptions, ss: SparkSession): GeneratorMetrics = {
log.info(s"Running a DistributedDataGenerator with options: $options")
val sc = ss.sparkContext
val context = new KuduContext(options.masterAddresses, sc)
val metrics = GeneratorMetrics(sc)
// Generate the Inserts.
var rdd = sc
.parallelize(0 until options.numTasks, numSlices = options.numTasks)
.mapPartitions(
{ taskNumIter =>
// We know there is only 1 task per partition because numSlices = options.numTasks above.
val taskNum = taskNumIter.next()
val generator = new DataGenerator.DataGeneratorBuilder()
// Add taskNum to the seed otherwise each task will try to generate the same rows.
.random(new java.util.Random(options.seed + taskNum))
.stringLength(options.stringLength)
.binaryLength(options.binaryLength)
.build()
val table = context.syncClient.openTable(options.tableName)
val schema = table.getSchema
val numRows = options.numRows / options.numTasks
val startRow: Long = numRows * taskNum
new GeneratedRowIterator(generator, options.generatorType, schema, startRow, numRows)
},
true
)
if (options.repartition) {
val table = context.syncClient.openTable(options.tableName)
val sparkSchema = SparkUtil.sparkSchema(table.getSchema)
rdd = context
.repartitionRows(rdd, options.tableName, sparkSchema, KuduWriteOptions(ignoreNull = true))
}
// Write the rows to Kudu.
// TODO: Use context.writeRows while still tracking inserts/collisions.
rdd.foreachPartition { rows =>
val kuduClient = context.syncClient
val table = kuduClient.openTable(options.tableName)
val kuduSchema = table.getSchema
val sparkSchema = SparkUtil.sparkSchema(kuduSchema)
val converter = new RowConverter(kuduSchema, sparkSchema, ignoreNull = true)
val session = kuduClient.newSession()
session.setFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_BACKGROUND)
var rowsWritten = 0
rows.foreach { row =>
val insert = table.newInsert()
val partialRow = converter.toPartialRow(row)
insert.setRow(partialRow)
session.apply(insert)
rowsWritten += 1
}
// Synchronously flush after the last record is written.
session.flush()
// Track the collisions.
var collisions = 0
for (error <- session.getPendingErrors.getRowErrors) {
if (error.getErrorStatus.isAlreadyPresent) {
// Because we can't check for collisions every time, but instead
// only when the rows are flushed, we subtract any rows that may
// have failed from the counter.
rowsWritten -= 1
collisions += 1
} else {
throw new RuntimeException("Kudu write error: " + error.getErrorStatus.toString)
}
}
metrics.rowsWritten.add(rowsWritten)
metrics.collisions.add(collisions)
session.close()
}
metrics
}
/**
* Entry point for testing. SparkContext is a singleton,
* so tests must create and manage their own.
*/
@InterfaceAudience.LimitedPrivate(Array("Test"))
def testMain(args: Array[String], ss: SparkSession): GeneratorMetrics = {
DistributedDataGeneratorOptions.parse(args) match {
case None => throw new IllegalArgumentException("Could not parse arguments")
case Some(config) => run(config, ss)
}
}
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("DistributedDataGenerator")
val ss = SparkSession.builder().config(conf).getOrCreate()
val metrics = testMain(args, ss)
log.info(s"Rows written: ${metrics.rowsWritten.value}")
log.info(s"Collisions: ${metrics.collisions.value}")
}
}
private class GeneratedRowIterator(
generator: DataGenerator,
generatorType: String,
schema: Schema,
startRow: Long,
numRows: Long)
extends Iterator[Row] {
val sparkSchema = SparkUtil.sparkSchema(schema)
// ignoreNull values so unset/defaulted rows can be passed through.
val converter = new RowConverter(schema, sparkSchema, ignoreNull = true)
var currentRow: Long = startRow
var rowsGenerated: Long = 0
override def hasNext: Boolean = rowsGenerated < numRows
override def next(): Row = {
if (rowsGenerated >= numRows) {
throw new IllegalStateException("Already generated all of the rows.")
}
val partialRow = schema.newPartialRow()
if (generatorType == SequentialGenerator) {
setRow(partialRow, currentRow)
} else if (generatorType == RandomGenerator) {
generator.randomizeRow(partialRow)
} else {
throw new IllegalArgumentException(s"Generator type of $generatorType is unsupported")
}
currentRow += 1
rowsGenerated += 1
converter.toRow(partialRow)
}
/**
* Sets all the columns in the passed row to the passed value.
* TODO(ghenke): Consider failing when value doesn't fit into the type.
*/
private def setRow(row: PartialRow, value: Long): Unit = {
val schema = row.getSchema
val columns = schema.getColumns.asScala
columns.indices.foreach { i =>
val col = columns(i)
col.getType match {
case Type.BOOL =>
row.addBoolean(i, value % 2 == 1)
case Type.INT8 =>
row.addByte(i, value.toByte)
case Type.INT16 =>
row.addShort(i, value.toShort)
case Type.INT32 =>
row.addInt(i, value.toInt)
case Type.INT64 =>
row.addLong(i, value)
case Type.UNIXTIME_MICROS =>
row.addLong(i, value)
case Type.DATE =>
row.addDate(i, DateUtil.epochDaysToSqlDate(value.toInt))
case Type.FLOAT =>
row.addFloat(i, value.toFloat)
case Type.DOUBLE =>
row.addDouble(i, value.toDouble)
case Type.DECIMAL =>
row.addDecimal(
i,
new BigDecimal(BigInteger.valueOf(value), col.getTypeAttributes.getScale))
case Type.VARCHAR =>
row.addVarchar(i, String.valueOf(value))
case Type.STRING =>
row.addString(i, String.valueOf(value))
case Type.BINARY =>
val bytes: Array[Byte] = String.valueOf(value).getBytes(StandardCharsets.UTF_8)
row.addBinary(i, bytes)
case _ =>
throw new UnsupportedOperationException("Unsupported type " + col.getType)
}
}
}
}
@InterfaceAudience.Private
@InterfaceStability.Unstable
case class DistributedDataGeneratorOptions(
tableName: String,
masterAddresses: String,
generatorType: String = DistributedDataGeneratorOptions.DefaultGeneratorType,
numRows: Long = DistributedDataGeneratorOptions.DefaultNumRows,
numTasks: Int = DistributedDataGeneratorOptions.DefaultNumTasks,
stringLength: Int = DistributedDataGeneratorOptions.DefaultStringLength,
binaryLength: Int = DistributedDataGeneratorOptions.DefaultStringLength,
seed: Long = System.currentTimeMillis(),
repartition: Boolean = DistributedDataGeneratorOptions.DefaultRepartition)
@InterfaceAudience.Private
@InterfaceStability.Unstable
object DistributedDataGeneratorOptions {
val DefaultNumRows: Long = 10000
val DefaultNumTasks: Int = 1
val DefaultStringLength: Int = 128
val DefaultBinaryLength: Int = 128
val RandomGenerator: String = "random"
val SequentialGenerator: String = "sequential"
val DefaultGeneratorType: String = SequentialGenerator
val DefaultRepartition: Boolean = false
private val parser: OptionParser[DistributedDataGeneratorOptions] =
new OptionParser[DistributedDataGeneratorOptions]("LoadRandomData") {
arg[String]("table-name")
.action((v, o) => o.copy(tableName = v))
.text("The table to load with random data")
arg[String]("master-addresses")
.action((v, o) => o.copy(masterAddresses = v))
.text("Comma-separated addresses of Kudu masters")
opt[String]("type")
.action((v, o) => o.copy(generatorType = v))
.text(s"The type of data generator. Must be one of 'random' or 'sequential'. " +
s"Default: ${DefaultGeneratorType}")
.optional()
opt[Long]("num-rows")
.action((v, o) => o.copy(numRows = v))
.text(s"The total number of unique rows to generate. Default: ${DefaultNumRows}")
.optional()
opt[Int]("num-tasks")
.action((v, o) => o.copy(numTasks = v))
.text(s"The total number of Spark tasks to use when generating data. " +
s"Default: ${DefaultNumTasks}")
.optional()
opt[Int]("string-length")
.action((v, o) => o.copy(stringLength = v))
.text(s"The length of generated string fields. Default: ${DefaultStringLength}")
.optional()
opt[Int]("binary-length")
.action((v, o) => o.copy(binaryLength = v))
.text(s"The length of generated binary fields. Default: ${DefaultBinaryLength}")
.optional()
opt[Long]("seed")
.action((v, o) => o.copy(seed = v))
.text(s"The seed to use in the random data generator. " +
s"Default: `System.currentTimeMillis()`")
opt[Boolean]("repartition")
.action((v, o) => o.copy(repartition = v))
.text(s"Repartition the data to ensure each spark task talks to a minimal " +
s"set of tablet servers.")
}
def parse(args: Seq[String]): Option[DistributedDataGeneratorOptions] = {
parser.parse(args, DistributedDataGeneratorOptions("", ""))
}
}
| helifu/kudu | java/kudu-spark-tools/src/main/scala/org/apache/kudu/spark/tools/DistributedDataGenerator.scala | Scala | apache-2.0 | 11,851 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.algebird.util
import com.twitter.algebird._
import com.twitter.util.{ Await, Future }
import scala.util.Random
object TunnelMonoidProperties {
def testTunnelMonoid[I, V](makeRandomInput: Int => I,
makeTunnel: I => V,
collapseFinalValues: (V, Seq[V], I) => Seq[Future[I]])(implicit monoid: Monoid[I],
superMonoid: Monoid[V]) = {
val r = new Random
val numbers = (1 to 40).map { _ => makeRandomInput(r.nextInt) }
def helper(seeds: Seq[I], toFeed: I) = {
val tunnels = seeds.map(makeTunnel)
@annotation.tailrec
def process(tunnels: Seq[V]): V = {
val size = tunnels.size
if (size > 2) {
val (tun1, tun2) = tunnels.splitAt(r.nextInt(size - 2))
val (of2, rest) = tun2.splitAt(2)
process(tun1 ++ (Monoid.plus(of2.head, of2.tail.head) +: rest))
} else if (size == 2) {
Monoid.plus(tunnels.head, tunnels.tail.head)
} else {
tunnels.head
}
}
collapseFinalValues(process(tunnels), tunnels, toFeed)
}
numbers.forall { _ =>
val toFeed = makeRandomInput(r.nextInt)
val finalResults = helper(numbers, toFeed) zip helper(numbers, toFeed) map {
case (f1, f2) => for {
b1 <- f1
b2 <- f2
} yield b1 == b2
}
Await.result(Future.collect(finalResults).map { _.forall(identity) })
}
}
}
class TunnelMonoidPropertiesextends extends CheckProperties {
import TunnelMonoidProperties._
implicit val monoid = new Monoid[Int] {
val zero = 0
def plus(older: Int, newer: Int): Int = older + newer
}
property("associative") {
def makeTunnel(seed: Int) = Tunnel.toIncrement(seed)
def collapseFinalValues(finalTunnel: Tunnel[Int], tunnels: Seq[Tunnel[Int]], toFeed: Int) =
finalTunnel(toFeed) +: tunnels.map { _.future }
testTunnelMonoid[Int, Tunnel[Int]](identity, makeTunnel, collapseFinalValues)
}
} | nvoron23/algebird | algebird-util/src/test/scala/com/twitter/algebird/util/TunnelMonoidProperties.scala | Scala | apache-2.0 | 2,509 |
package chandu0101.scalajs.react.components.demo.components.reacttable
import chandu0101.scalajs.react.components.demo.components.CodeExample
import chandu0101.scalajs.react.components.demo.util.SampleData
import chandu0101.scalajs.react.components.tables.ReactTable
import chandu0101.scalajs.react.components.util.JsonUtil
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
/**
* Created by chandrasekharkode .
*/
object ReactTableCustomCell {
val code =
"""
| val data: Vector[Map[String, Any]] = JsonUtil.jsonArrayToMap(SampleData.personJson)
| val columns: List[String] = List("fname", "lname", "email", "country")
| def customFname = (fname : Any) => { val name = fname.toString ; if(name.startsWith("J")) <.span(^.backgroundColor := "grey")(name).render else <.span(name).render }
| //config is a List of touple4 (String, Option[(Any) => ReactElement], Option[(Model, Model) => Boolean],Option[Double])
| /**
| * ._1 : String = column name
| * ._2 : Option[Any => ReactElement] = custom cell
| * ._3 : Option[(Model,Model) => Boolean] = sorting function
| * ._4 : Option[Double] = column width interms of flex property
| */
| val config = List(("fname",Some(customFname),None,None))
| ReactTable( data = data ,columns = columns , config = config)
|
""".stripMargin
val component = ReactComponentB[Unit]("plain")
.render(P => {
<.div(
<.h2(^.cls := "mui-font-style-headline")("Custom Cell Factory"),
CodeExample(code)(
ReactTable( data = data ,columns = columns , config = config)
)
)
}).buildU
val data: Vector[Map[String, Any]] = JsonUtil.jsonArrayToMap(SampleData.personJson)
val columns: List[String] = List("fname", "lname", "email", "country")
//config is a List of touple4 (String, Option[(Any) => ReactElement], Option[(Model, Model) => Boolean],Option[Double])
// ._1 : columnname you want to config
// ._2 : custom render function (custom cell factory)
// ._3 : Sorting function
// ._4 : column width (flex := width)
// let say if i want to turn all fnames to grey that starts with J (you can return any ReactElement(buttons,well another ReactTable if you want!)
def customFname = (fname : Any) => { val name = fname.toString ; if(name.startsWith("J")) <.span(^.backgroundColor := "grey")(name).render else <.span(name).render }
val config = List(("fname",Some(customFname),None,None))
def apply() = component()
}
| coreyauger/scalajs-react-components | demo/src/main/scala/chandu0101/scalajs/react/components/demo/components/reacttable/ReactTableCustomCell.scala | Scala | apache-2.0 | 2,539 |
package sigmastate
import org.ergoplatform.ErgoAddressEncoder.TestnetNetworkPrefix
import org.ergoplatform.ErgoScriptPredef
import org.scalatest.Matchers
import sigmastate.Values.{SValue, Value, SigmaPropValue, ErgoTree, SigmaBoolean}
import sigmastate.eval.IRContext
import sigmastate.interpreter.Interpreter
import sigmastate.interpreter.Interpreter.ScriptEnv
import sigmastate.lang.{TransformingSigmaBuilder, SigmaCompiler, CompilerSettings}
import sigmastate.lang.Terms.ValueOps
import sigmastate.serialization.ValueSerializer
import spire.syntax.all.cfor
import scala.util.DynamicVariable
trait TestsBase extends Matchers {
val activatedVersions: Seq[Byte] =
(0 to Interpreter.MaxSupportedScriptVersion).map(_.toByte).toArray[Byte]
private[sigmastate] val _currActivatedVersion = new DynamicVariable[Byte](0)
def activatedVersionInTests: Byte = _currActivatedVersion.value
/** Checks if the current activated script version used in tests corresponds to v4.x. */
def isActivatedVersion4: Boolean = activatedVersionInTests < 2
val ergoTreeVersions: Seq[Byte] =
(0 to Interpreter.MaxSupportedScriptVersion).map(_.toByte).toArray[Byte]
private[sigmastate] val _currErgoTreeVersion = new DynamicVariable[Byte](0)
/** Current ErgoTree version assigned dynamically using [[CrossVersionProps]]. */
def ergoTreeVersionInTests: Byte = _currErgoTreeVersion.value
/** Current ErgoTree header flags assigned dynamically using [[CrossVersionProps]] and
* ergoTreeVersionInTests.
*/
def ergoTreeHeaderInTests: Byte = ErgoTree.headerWithVersion(ergoTreeVersionInTests)
/** Executes the given block for each combination of _currActivatedVersion and
* _currErgoTreeVersion assigned to dynamic variables.
*/
def forEachScriptAndErgoTreeVersion
(activatedVers: Seq[Byte], ergoTreeVers: Seq[Byte])
(block: => Unit): Unit = {
cfor(0)(_ < activatedVers.length, _ + 1) { i =>
val activatedVersion = activatedVers(i)
// setup each activated version
_currActivatedVersion.withValue(activatedVersion) {
cfor(0)(
i => i < ergoTreeVers.length && ergoTreeVers(i) <= activatedVersion,
_ + 1) { j =>
val treeVersion = ergoTreeVers(j)
// for each tree version up to currently activated, set it up and execute block
_currErgoTreeVersion.withValue(treeVersion)(block)
}
}
}
}
/** Helper method which executes the given `block` once for each `activatedVers`.
* The method sets the dynamic variable activatedVersionInTests with is then available
* in the block.
*/
def forEachActivatedScriptVersion(activatedVers: Seq[Byte])(block: => Unit): Unit = {
cfor(0)(_ < activatedVers.length, _ + 1) { i =>
val activatedVersion = activatedVers(i)
_currActivatedVersion.withValue(activatedVersion)(block)
}
}
/** Helper method which executes the given `block` once for each `ergoTreeVers`.
* The method sets the dynamic variable ergoTreeVersionInTests with is then available
* in the block.
*/
def forEachErgoTreeVersion(ergoTreeVers: Seq[Byte])(block: => Unit): Unit = {
cfor(0)(_ < ergoTreeVers.length, _ + 1) { i =>
val version = ergoTreeVers(i)
_currErgoTreeVersion.withValue(version)(block)
}
}
/** Obtains [[ErgoTree]] which corresponds to True proposition using current
* ergoTreeHeaderInTests. */
def TrueTree: ErgoTree = ErgoScriptPredef.TrueProp(ergoTreeHeaderInTests)
/** Obtains [[ErgoTree]] which corresponds to False proposition using current
* ergoTreeHeaderInTests. */
def FalseTree: ErgoTree = ErgoScriptPredef.FalseProp(ergoTreeHeaderInTests)
/** Transform proposition into [[ErgoTree]] using current ergoTreeHeaderInTests. */
def mkTestErgoTree(prop: SigmaPropValue): ErgoTree =
ErgoTree.fromProposition(ergoTreeHeaderInTests, prop)
/** Transform sigma proposition into [[ErgoTree]] using current ergoTreeHeaderInTests. */
def mkTestErgoTree(prop: SigmaBoolean): ErgoTree =
ErgoTree.fromSigmaBoolean(ergoTreeHeaderInTests, prop)
protected val _lowerMethodCalls = new DynamicVariable[Boolean](true)
/** Returns true if MethodCall nodes should be lowered by TypeChecker to the
* corresponding ErgoTree nodes. E.g. xs.map(f) --> MapCollection(xs, f).
* NOTE: The value of the flag is assigned dynamically using _lowerMethodCalls
* DynamicVariable. */
def lowerMethodCallsInTests: Boolean = _lowerMethodCalls.value
/** If true, then all suite properties are executed with _lowerMethodCalls set to false.
* This allow to test execution of MethodCall nodes in ErgoTree.
*/
val okRunTestsWithoutMCLowering: Boolean = false
val defaultCompilerSettings: CompilerSettings = CompilerSettings(
TestnetNetworkPrefix, TransformingSigmaBuilder,
lowerMethodCalls = true
)
def compilerSettingsInTests: CompilerSettings =
defaultCompilerSettings.copy(lowerMethodCalls = lowerMethodCallsInTests)
def compiler = SigmaCompiler(compilerSettingsInTests)
def checkSerializationRoundTrip(v: SValue): Unit = {
val compiledTreeBytes = ValueSerializer.serialize(v)
withClue(s"(De)Serialization roundtrip failed for the tree:") {
ValueSerializer.deserialize(compiledTreeBytes) shouldEqual v
}
}
def compileWithoutCosting(env: ScriptEnv, code: String): Value[SType] =
compiler.compileWithoutCosting(env, code)
def compile(env: ScriptEnv, code: String)(implicit IR: IRContext): Value[SType] = {
val tree = compiler.compile(env, code)
checkSerializationRoundTrip(tree)
tree
}
def compileAndCheck(env: ScriptEnv, code: String, expected: SValue)
(implicit IR: IRContext): (ErgoTree, SigmaPropValue) = {
val prop = compile(env, code).asSigmaProp
prop shouldBe expected
val tree = mkTestErgoTree(prop)
(tree, prop)
}
}
| ScorexFoundation/sigmastate-interpreter | sigmastate/src/test/scala/sigmastate/TestsBase.scala | Scala | mit | 5,901 |
package com.thinkbiganalytics.spark.metadata
import com.thinkbiganalytics.discovery.schema.QueryResultColumn
import com.thinkbiganalytics.spark.SparkContextService
import com.thinkbiganalytics.spark.dataprofiler.Profiler
import com.thinkbiganalytics.spark.rest.model.TransformResponse
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.slf4j.LoggerFactory
/** Wraps a transform script into a function that can be evaluated.
*
* @param destination the name of the destination Hive table
* @param sqlContext the Spark SQL context
*/
abstract class TransformScript16(destination: String, profiler: Profiler, sqlContext: SQLContext, sparkContextService: SparkContextService) extends TransformScript(destination, profiler) {
private[this] val log = LoggerFactory.getLogger(classOf[TransformScript])
/** Evaluates this transform script and stores the result in a Hive table. */
def run(): QueryResultCallable = {
new QueryResultCallable16
}
/** Evaluates the transform script.
*
* @return the transformation result
*/
protected[metadata] def dataFrame: DataFrame
/** Fetches or re-generates the results of the parent transformation, if available.
*
* @return the parent results
*/
protected def parent: DataFrame = {
try {
sqlContext.read.table(parentTable)
}
catch {
case e: Exception =>
log.trace("Exception reading parent table: {}", e.toString)
log.debug("Parent table not found: {}", parentTable)
parentDataFrame
}
}
protected override def parentDataFrame: DataFrame = {
throw new UnsupportedOperationException
}
/** Stores the `DataFrame` results in a [[QueryResultColumn]] and returns the object. */
private class QueryResultCallable16 extends QueryResultCallable {
override def call(): TransformResponse = {
// Cache data frame
val cache = dataFrame.cache
cache.registerTempTable(destination)
// Build response object
toResponse(sparkContextService.toDataSet(cache))
}
}
}
| rashidaligee/kylo | integrations/spark/spark-shell-client/spark-shell-client-v1/src/main/scala/com/thinkbiganalytics/spark/metadata/TransformScript16.scala | Scala | apache-2.0 | 2,194 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail.internal
import cats.effect.Sync
import cats.syntax.all._
import monix.execution.internal.collection.ChunkedArrayStack
import monix.tail.Iterant
import monix.tail.Iterant.{Concat, Halt, Last, Next, NextBatch, NextCursor, Scope, Suspend}
import monix.tail.batches.BatchCursor
private[tail] object IterantCompleteL {
/**
* Implementation for `Iterant#completedL`
*/
final def apply[F[_], A](source: Iterant[F, A])(implicit F: Sync[F]): F[Unit] = {
F.suspend(new Loop[F, A]().apply(source))
}
private final class Loop[F[_], A](implicit F: Sync[F]) extends Iterant.Visitor[F, A, F[Unit]] {
//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
// Used in visit(Concat)
private[this] var stackRef: ChunkedArrayStack[F[Iterant[F, A]]] = _
private def stackPush(item: F[Iterant[F, A]]): Unit = {
if (stackRef == null) stackRef = ChunkedArrayStack()
stackRef.push(item)
}
private def stackPop(): F[Iterant[F, A]] = {
if (stackRef != null) stackRef.pop()
else null.asInstanceOf[F[Iterant[F, A]]]
}
private[this] val concatContinue: (Unit => F[Unit]) =
_ =>
stackPop() match {
case null => F.unit
case xs => xs.flatMap(this)
}
//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
def visit(ref: Next[F, A]): F[Unit] =
ref.rest.flatMap(this)
def visit(ref: NextBatch[F, A]): F[Unit] =
processCursor(ref.batch.cursor(), ref.rest)
def visit(ref: NextCursor[F, A]): F[Unit] =
processCursor(ref.cursor, ref.rest)
def visit(ref: Suspend[F, A]): F[Unit] =
ref.rest.flatMap(this)
def visit(ref: Concat[F, A]): F[Unit] = {
stackPush(ref.rh)
ref.lh.flatMap(this).flatMap(concatContinue)
}
def visit[S](ref: Scope[F, S, A]): F[Unit] =
ref.runFold(this)
def visit(ref: Last[F, A]): F[Unit] =
F.unit
def visit(ref: Halt[F, A]): F[Unit] =
ref.e match {
case None => F.unit
case Some(e) => F.raiseError(e)
}
def fail(e: Throwable): F[Unit] =
F.raiseError(e)
private def processCursor(cursor: BatchCursor[A], rest: F[Iterant[F, A]]) = {
while (cursor.hasNext()) cursor.next()
rest.flatMap(this)
}
}
}
| monifu/monix | monix-tail/shared/src/main/scala/monix/tail/internal/IterantCompleteL.scala | Scala | apache-2.0 | 2,980 |
package com.ntsdev.service
import java.util.Collections
import org.neo4j.ogm.session.Session
import org.slf4j.LoggerFactory
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.context.annotation.Profile
import org.springframework.stereotype.Service
import org.springframework.transaction.annotation.{Propagation, Transactional}
import scala.collection.JavaConversions._
import scala.io.{Codec, Source}
@Service
@Profile(Array("!cloud"))
@Transactional(propagation = Propagation.REQUIRED)
class TestDataService {
private val log = LoggerFactory.getLogger(getClass)
private var loaded = false
@Autowired
val session: Session = null
def loadTestData() = {
session.purgeDatabase()
log.info ("Loading test data...")
val emptyMap = mapAsJavaMap[String, AnyRef](Collections.emptyMap[String, AnyRef]())
session.query(loadDataFromFile("testdata.cql"), emptyMap)
log.info("Test data loaded.")
loaded = true
}
private def loadDataFromFile(fileName: String): String = {
Source.fromURL(getClass.getResource("/" + fileName))(Codec.UTF8).mkString
}
}
| neilshannon/atlanta-scala-microservice | src/main/scala/com/ntsdev/service/TestDataService.scala | Scala | mit | 1,127 |
package com
import java.time.Instant
import com.wavesplatform.block.Block
import com.wavesplatform.lang.ValidationError
import com.wavesplatform.mining.Miner
import com.wavesplatform.settings.WavesSettings
import com.wavesplatform.state.Blockchain
import com.wavesplatform.transaction.BlockchainUpdater
import com.wavesplatform.transaction.TxValidationError.GenericError
import com.wavesplatform.utils.ScorexLogging
package object wavesplatform extends ScorexLogging {
private def checkOrAppend(block: Block, blockchainUpdater: Blockchain with BlockchainUpdater, miner: Miner): Either[ValidationError, Unit] =
if (blockchainUpdater.isEmpty) {
blockchainUpdater.processBlock(block, block.header.generationSignature).map { _ =>
val genesisHeader = blockchainUpdater.blockHeader(1).get
log.info(s"Genesis block ${genesisHeader.id()} (generated at ${Instant.ofEpochMilli(genesisHeader.header.timestamp)}) has been added to the state")
}
} else blockchainUpdater.blockHeader(1).map(_.id()) match {
case Some(id) if id == block.id() =>
miner.scheduleMining()
Right(())
case _ =>
Left(GenericError("Mismatched genesis blocks in configuration and blockchain"))
}
def checkGenesis(settings: WavesSettings, blockchainUpdater: Blockchain with BlockchainUpdater, miner: Miner): Unit = {
Block
.genesis(settings.blockchainSettings.genesisSettings)
.flatMap { genesis =>
log.trace(s"Genesis block json: ${genesis.json()}")
checkOrAppend(genesis, blockchainUpdater, miner)
}
.left
.foreach { e =>
log.error("INCORRECT NODE CONFIGURATION!!! NODE STOPPED BECAUSE OF THE FOLLOWING ERROR:")
log.error(e.toString)
com.wavesplatform.utils.forceStopApplication()
}
}
}
| wavesplatform/Waves | node/src/main/scala/com/wavesplatform/package.scala | Scala | mit | 1,813 |
/*
Fixing this seems to require a better understanding of the algorithm due to the
extensive use of symbols. Since it is not being used anywhere, I am commenting
it out for now. [Giselle]
package at.logic.gapt.language.hol.algorithms.unification
import at.logic.calculi.lk.base.types.FSequent
import at.logic.gapt.language.hol.{Formula}
import at.logic.gapt.expr.{VariableStringSymbol, VariableSymbolA}
import at.logic.parsing.language.simple.SimpleFOLParser
import at.logic.parsing.readers.StringReader
import at.logic.gapt.algorithms.diophantine.{LankfordSolver, Vector}
import at.logic.calculi.lk.base.FSequent
import at.logic.gapt.language.hol.logicSymbols.{ConstantStringSymbol, ConstantSymbolA}
import at.logic.gapt.language.fol._
import at.logic.gapt.language.fol.Eq
import at.logic.gapt.expr.substitutions.Substitution
import collection.immutable.Stream.Cons
import at.logic.calculi.lk.base.FSequent
import at.logic.gapt.expr.typedLambdaCalculus.Normalization
import scala.collection.mutable
package types {
class Equation(val left: FOLTerm, val right : FOLTerm) {
def toFormula() = Eq(left, right)
}
}
object Equation {
def apply(left : FOLTerm , right : FOLTerm ) = new types.Equation(left, right)
implicit def equation2formula(e : types.Equation) = e.toFormula()
}
abstract class EequalityA {
// the set of rewrite rules is empty in a pure equational theory
final override def rewrite_rules() = Set[Tuple2[FOLFormula, FOLFormula]]()
override def reequal_to(s : FOLFormula, t : FOLFormula) : Boolean =
reequal_to_(s, t)
//Normalization(s,0,"x", s.symbols.map(_.toString).toSet)._1,
//Normalization(t,0,"x", t.symbols.map(_.toString).toSet)._1)
private def reequal_to_(s : FOLFormula, t : FOLFormula) : Boolean = {
def tuples_equals(el : Tuple2[FOLTerm, FOLTerm] ) : Boolean = (word_equalsto(el._1, el._2))
(s,t) match {
case (Atom( sym1, args1: List[FOLTerm]), Atom( sym2, args2: List[FOLTerm])) =>
(sym1 == sym2) &&
(args1.length == args2.length) &&
( (args1 zip args2) forall (tuples_equals))
case (Neg(f1), Neg(f2)) =>
reequal_to_(f1,f2)
case (And(f1,f2), And(g1,g2)) =>
reequal_to_(f1,g1) && reequal_to_(f2,g2)
case (Or(f1,f2), Or(g1,g2)) =>
reequal_to_(f1,g1) && reequal_to_(f2,g2)
// these two rules work only if the variables are canonically renamed in both formulas
case (All(x1,t1), All(x2,t2)) =>
(x1 == x2) && reequal_to_(t1,t2)
case (Ex(x1,t1), Ex(x2,t2)) =>
(x1 == x2) && reequal_to_(t1,t2)
case default => false
}
}
def word_equalsto(s : FOLTerm, t : FOLTerm) : Boolean
def unifies_with(s : FOLTerm, t : FOLTerm) : Option[Substitution[FOLTerm]]
}
object ACUnification {
var algorithms = Map[ConstantSymbolA, FinitaryUnification[FOLTerm]]()
def unify(f:ConstantSymbolA, term1:FOLTerm, term2:FOLTerm) : Seq[Substitution[FOLTerm]] = {
algorithms.get(f) match {
case Some(alg) =>
alg.unify(term1, term2)
case None =>
val alg = new ACUnification(f)
algorithms = algorithms + ((f, alg ))
alg.unify(term1,term2)
}
}
def unify(f:ConstantSymbolA, terms : List[FOLTerm]) : Seq[Substitution[FOLTerm]] = {
// this is very inefficient
terms match {
case Nil => Seq(Substitution[FOLTerm]())
case _::Nil => Seq(Substitution[FOLTerm]())
case x::y::rest =>
val subst_rest : Seq[Substitution[FOLTerm]] = unify(f, y::rest)
val alternatives : Seq[FOLTerm] = subst_rest map (_.apply(y))
val possible_substs : Seq[Seq[Substitution[FOLTerm]]] = (alternatives map (unify(f,x,_)))
val without_nonunifiables : Seq[(Substitution[FOLTerm], Seq[Substitution[FOLTerm]])] = (subst_rest zip possible_substs) filter (! _._2.isEmpty)
// this is nonfunctional, but probably easier to understand
var result : List[Substitution[FOLTerm]] = List[Substitution[FOLTerm]]()
for ( pair <- without_nonunifiables ) {
val sigma = pair._1
for (tau <- pair._2)
result = (sigma compose tau) :: result
}
result
}
}
val debuglevel = 0 // 0... no output, 1 ... show unifiers after unification 2--- also before unification 3... maximum
def debug(level: Int, msg: String) = if (debuglevel >= level) println("DEBUG: " + msg + " \\\\\\\\")
}
class ACUnification(val f:ConstantSymbolA) extends FinitaryUnification[FOLTerm] {
import ACUnification.debug
import ACUtils._
import ListUtils._
import MathUtils._
import TermUtils._
import Vector._
type TermCount = (FOLTerm, Int)
type ListEntry = (Int, Vector, List[Vector])
type MapEntry = (Int, List[Vector])
type ArrayEntry = (Vector, MapEntry)
def unify(term1:FOLTerm, term2:FOLTerm) : List[Substitution[FOLTerm]] = unify(f,term1,term2)
def unify(function: ConstantSymbolA, term1: FOLTerm, term2: FOLTerm): List[Substitution[FOLTerm]] = {
unify(function, List((term1, term2)), List(Substitution[FOLTerm]()))
}
def unify(function: ConstantSymbolA,
terms: List[(FOLTerm, FOLTerm)],
substs: List[Substitution[FOLTerm]]): List[Substitution[FOLTerm]] = {
debug(3, "unifying " + terms + " substitutions are: " + substs)
terms match {
case (term1, term2) :: rest =>
term1 match {
// comparing constant to sthg else
case FOLConst(c1) =>
term2 match {
// if the two constants are equal, the substitution is not changed, else not unifiable
case FOLConst(c2) =>
if (c1 == c2) collect(substs, ((s: Substitution[FOLTerm]) => unify(function, rest, List(s))))
else Nil
// second one is a var => flip & variable elimination
case FOLVar(v) =>
val ve = Substitution[FOLTerm](term2.asInstanceOf[FOLVar], term1)
val newterms = rest map makesubstitute_pair(ve)
collect(substs, (s: Substitution[FOLTerm]) => unify(function, newterms, List(ve compose s))) //TODO:check, ok
// anything else is not unifiable
case _ =>
Nil
}
// comparing function symbol to sthg else
case Function(f1, args1) =>
term2 match {
// decomposition or ac unification, if the function symbols are the same, else not unifiable
case Function(f2, args2) =>
if (f1 != f2) {
Nil
} else if (args1.length != args2.length) {
throw new Exception("function symbols both named " + f1 + " but with different arity "
+ args1.length + " and " + args2.length + "encountered!")
} else if (f1 == function) {
//ac unification
val acunivs = ac_unify(function, term1, term2)
collect(acunivs, ((acu: Substitution[FOLTerm]) =>
collect(substs, ((subst: Substitution[FOLTerm]) =>
unify(function, rest map makesubstitute_pair(subst), List((acu compose subst)))) //TODO:check, ok
)))
} else {
//non ac unification => decomposition
collect(substs, (s: Substitution[FOLTerm]) => unify(function, (args1 zip args2) ::: rest, List(s)))
}
// variable as second term: flip & variable elimination
case FOLVar(v) =>
//occurs check
if (occurs(term2.asInstanceOf[FOLVar], term1)) {
Nil
} else {
val ve = Substitution[FOLTerm](term2.asInstanceOf[FOLVar], term1)
val newterms = rest map makesubstitute_pair(ve)
collect(substs, (s: Substitution[FOLTerm]) => unify(function, newterms, List((ve compose s)))) //TODO:check, ok
}
// anything else is not unifiable
case _ =>
Nil
}
//variable elimination
case FOLVar(v) =>
term2 match {
case FOLVar(w) =>
if (v == w) {
collect(substs, (s: Substitution[FOLTerm]) => unify(function, rest, List(s)))
} else {
val ve = Substitution[FOLTerm](term1.asInstanceOf[FOLVar], term2)
val newterms = rest map makesubstitute_pair(ve)
collect(substs, (s: Substitution[FOLTerm]) => unify(function, newterms, List((ve compose s).asInstanceOf[Substitution[FOLTerm]]))) //TODO:check, ok
}
case _ =>
//occurs check
if (occurs(term1.asInstanceOf[FOLVar], term2)) {
Nil
} else {
val ve = Substitution[FOLTerm](term1.asInstanceOf[FOLVar], term2)
val newterms = rest map makesubstitute_pair(ve)
collect(substs, (s: Substitution[FOLTerm]) => unify(function, newterms, List[Substitution[FOLTerm]]((ve compose s)))) //TODO:check, ok
}
}
//this should be empty in the end
case _ =>
throw new Exception("there should be only variables, constants and functions in first order terms!")
}
case Nil =>
substs
}
}
def ac_unify(function: ConstantSymbolA, term1: FOLTerm, term2: FOLTerm): List[Substitution[FOLTerm]] = {
debug(1, "=== unifying " + flatten(function, term1) + " with " + flatten(function, term2) + "===")
val counted_symbols = countSymbols(nestedFunctions_toList(function, term1), nestedFunctions_toList(function, term2))
val (ts1, count1) = counted_symbols.unzip
val (lhs, rhs) = (counted_symbols partition (_._2 > 0)) // this works because countSymbols only returns values != 0
val vlhs = Vector(lhs map (_._2))
val vrhs = Vector(rhs map (_._2 * -1))
val (unifiable_invariant, unifiable_condition) = createConstantFilter((lhs map (_._1)) ::: (rhs map (_._1)))
val vlhs_length = vlhs.length
val vrhs_length = vrhs.length
if ((lhs == Nil) && (rhs == Nil)) {
List(Substitution[FOLTerm]())
} else if ((lhs == Nil) || (rhs == Nil)) {
Nil
} else {
val basis = LankfordSolver solve (vlhs, vrhs) sortWith Vector.lex_<
//val sums = calculateSums_new(basis, vlhs, vrhs, unifiable_invariant)
val sums = calculateSums_new_efficient(basis, vlhs, vrhs, unifiable_invariant)
//debug(1,"difference :"+(sums-sums2)+ " and "+(sums2-sums))
var results: List[Vector] = Nil
// filter vectors
for (v <- sums.toList) {
if (gzero(v._1))
results = v._1 :: results
}
results = results.filter(unifiable_condition)
// remove vectors which are subsumed by smaller vectors
results = removeSubsumedVectors_new(results, Vector(vlhs.vector ::: vrhs.vector))
//debug(1,"number of solutions: "+results.size)
// associate every base vector to a fresh logical variable
var varmap = Map[Vector, VariableSymbolA]()
debug(1, "basis:")
for (b <- basis) {
val v: VariableSymbolA = generator.getFreshVariable()
debug(1, "$" + v + "<-" + b + "$")
varmap = varmap + ((b, v))
}
for (s <- sums.toList.sortWith((x: (Vector, List[(Int, List[Vector])]), y: (Vector, List[(Int, List[Vector])])) => Vector.lex_<(x._1, y._1)))
debug(1, "whole sums " + s)
for (s <- results)
debug(1, "sum $" + s + "$ with representative $" + sums(s).map(_._2.map(varmap(_))))
// debug(1,"sum $"+s+"$ with representative $"+sums(s).head._2.map(varmap(_))+"$ sum in map=$"+sums(s).head._1+"$")
//some helper functions, could be factored out
def extract(pair: (Int, List[Vector])): List[Vector] = pair._2
def ntimes[A](x: A, n: Int): List[A] = if (n <= 0) Nil else x :: ntimes(x, n - 1)
def nfilter[A](l: List[A], count: (A => Int)): List[A] = {
l match {
case x :: xs => ntimes(x, count(x)) ::: nfilter(xs, count)
case Nil => Nil
}
}
def createVectors(mapping: Map[Vector, VariableSymbolA], v: List[Vector]): List[FOLTerm] = {
//val len = v.length
val len = if (v == Nil) 0 else v(0).length - 1
//println("create vectors length="+len+" v="+v)
val expanded: List[(Int, List[Vector])] = ((0 to len) map ((_, v))).toList //pair vector with every index of a component
val filtered: List[List[VariableSymbolA]] =
expanded map (x =>
(nfilter(x._2, ((v: Vector) => v.vector(x._1)))) //take the vector the number of times of the actual component
map (mapping(_)) //and convert them to VariableSymbols
)
val ltt: List[VariableSymbolA] => FOLTerm = listToTerm(function, _)
filtered map ltt
}
debug(2, "" + results.length + " ac unification preresults:" + results)
//convert results to list of terms
var converted: List[List[FOLTerm]] = Nil
for (r <- results) {
for (i <- sums(r).map(extract))
//val i = sums(r).map(extract).head //one representative is enough
converted = createVectors(varmap, i) :: converted
}
debug(1, "$" + converted.length + "$ ac unification results: $" + converted + "$")
var unified_terms: List[List[FOLTerm]] = Nil
var unifiers: List[Substitution[FOLTerm]] = Nil
for (c <- converted) {
val zc = ts1 zip c
//println("finding unifier for: "+zc)
val us = unify(function, zc, List(Substitution[FOLTerm]()))
for (unifier <- us) {
val uterm: List[FOLTerm] = ts1 map ((x: FOLTerm) => unifier.apply(x))
//println("yay found one:" + uterm)
//unifiers = subst :: unifiers
unifiers = unifier :: unifiers
unified_terms = uterm :: unified_terms
}
}
val term_context = (ts1 map ((x: FOLTerm) => getVariableContext(x))) reduceLeft (_ ++ _)
//remove variables not in the original term from the substitution
debug(2, "and the unifiers are:")
var reduced_unifiers: List[Substitution[FOLTerm]] = Nil
val base_variables = varmap.values.toList.map(FOLVar(_))
for (u <- unifiers) {
debug(2, "$" + u + "$")
//val splitted : Tuple2[List[(FOLVar,FOLTerm)], List[(FOLVar,FOLTerm)]] = (u.mapFOL.partition(term_context contains _._1)).asInstanceOf[Tuple2[List[(FOLVar,FOLTerm)], List[(FOLVar,FOLTerm)]]]
// val umap = (u.map.elements.toList).asInstanceOf[List[(FOLVar, FOLTerm)]]
val umap = u.map.toList.asInstanceOf[List[(FOLVar, FOLTerm)]]
val in_term = umap.filter((x: (FOLVar, FOLTerm)) => (term_context contains x._1))
debug(3, "variables in term: " + in_term)
//apply subsitutions of z_i<-t to the rest of the substituted terms, since z_i is free
val not_in_term = Substitution[FOLTerm](umap.filter((x: (FOLVar, FOLTerm)) => !(term_context contains x._1)))
val in_term_reduced = in_term map ((x: (FOLVar, FOLTerm)) => (x._1, not_in_term.apply(x._2)))
//if a variable from the original term is renamed to a base variable, switch the renaming
var renamed = in_term_reduced
var switch_candidates = renamed filter ((x: (FOLVar, FOLTerm)) => (base_variables contains x._2))
while (switch_candidates.length > 0) {
val candidate = switch_candidates.head
val subst = Substitution[FOLTerm]((candidate._2.asInstanceOf[FOLVar]), candidate._1)
renamed = (renamed filter (_ != candidate)) map ((x: (FOLVar, FOLTerm)) => (x._1, subst apply x._2))
switch_candidates = renamed filter ((x: (FOLVar, FOLTerm)) => (base_variables contains x._2))
}
reduced_unifiers = Substitution[FOLTerm](renamed) :: reduced_unifiers
}
reduced_unifiers
}
}
def calculateSums(basis: List[Vector], vlhs: Vector, vrhs: Vector, invariant: (Vector => Boolean)) = {
var sums = Map[Vector, List[(Int, List[Vector])]]()
var oldnewest: List[(Int, Vector, List[Vector])] = Nil
var newest: List[(Int, Vector, List[Vector])] = Nil
for (b <- basis) {
val weight = vector_weight(vlhs, b)
sums = sums + ((b, List((weight, List(b)))))
newest = (weight, b, List(b)) :: newest
}
val maxweight = calculateMaxWeight(vlhs, vrhs)
debug(1, "upper bound to sum of vectors: " + maxweight)
while (newest.size > 0) {
oldnewest = newest
newest = Nil
for (v <- oldnewest) {
val candidates = basis map (x => (vector_weight(vlhs, x) + v._1, x + v._2, x :: v._3))
for (candidate <- candidates) {
val (weight, sum, vectors) = candidate
val entry: (Int, List[Vector]) = (weight, vectors sortWith Vector.lex_<)
val newest_entry: (Int, Vector, List[Vector]) = (weight, sum, entry._2)
if (weight <= maxweight) { //drop any sum that is too large
if (sums.contains(sum)) {
// if the linear combination was already generated, add it to the list
val l: List[(Int, List[Vector])] = sums(sum)
if (!l.contains(entry))
sums = sums + ((sum,entry :: l))
} else {
// else create a new entry and calculate possible new linear combinations
sums = sums + ((sum, List(entry)))
//if (weight < maxweight && sum.anyeqzero && invariant)
if (invariant(sum)) //TODO: check if the anyeqzero is correct, the invariant has to be true anyway
newest = newest_entry :: newest
}
}
}
}
}
sums
}
// this is rather inefficient, but generates fewer solutions
def calculateSums_new(basis: List[Vector], vlhs: Vector, vrhs: Vector, invariant: (Vector => Boolean)) = {
var sums = Map[Vector, List[(Int, List[Vector])]]()
val maxweight = calculateMaxWeight(vlhs, vrhs)
debug(1, "upper bound to sum of vectors: " + maxweight)
val zero = basis(0).zero
val ps = powerset(basis)
val pswithsums = ps map ((x: List[Vector]) => {val sum = vectorSum(x, zero); (sum, x, vector_weight(vlhs, sum))})
var solutions = 0
for (i <- pswithsums) {
debug(1, "fullsum " + i._1 + " weight=" + i._3 + " list=" + i._2)
solutions += i._2.length
}
debug(1, "# of solutions " + solutions)
val ps_inv = pswithsums filter ((x: (Vector, List[Vector], Int)) => invariant(x._1) && (x._3 <= maxweight) && (x._3 > 0))
for (p <- ps_inv) {
val (sum, vs, weight) = p
sums.get(sum) match {
case Some(list) => sums = sums + ((sum, (weight, vs) :: list))
case None => sums = sums + ((sum, List((weight, vs))))
}
}
sums
}
def calculateSums_new_efficient(basis: List[Vector], vlhs: Vector, vrhs: Vector, invariant: (Vector => Boolean)) :
Map[Vector, List[(Int, List[Vector])]] = {
var sums = Map[Vector, List[(Int, List[Vector])]]()
val maxweight = calculateMaxWeight(vlhs, vrhs)
val zero = basis(0).zero
val invariant_ = (x: Vector) => invariant(x) && (vector_weight(vlhs, x) <= maxweight)
val fpowerset = filterpowerset((zero, Nil: List[Vector]), basis, invariant_)
for (s <- fpowerset) {
val (sum, vectors) = s
sums.get(sum) match {
case None =>
sums = sums + ((sum, List((vector_weight(vlhs, sum), vectors))))
case Some(entry) =>
val new_entry = (vector_weight(vlhs, sum), vectors)
if (!entry.contains(new_entry))
sums = sums + ((sum, new_entry :: entry))
}
}
sums
}
def filterpowerset(in: (Vector, List[Vector]), still_left: List[Vector], invariant: (Vector => Boolean)): List[(Vector, List[Vector])] = {
still_left match {
case Nil => List(in)
case _ => rflattenLists(still_left map ((x: Vector) => filterpowerset(in, dropuntil(x, still_left), invariant))) :::
rflattenLists(still_left map ((x: Vector) => {
val in_new = (in._1 + x, x :: in._2)
if (invariant(in_new._1))
filterpowerset(in_new, dropuntil(x, still_left), invariant)
else
Nil
}))
}
}
// convert list of variable symbols to a term f(x_1,f(x_2, ...))
def listToTerm(function: ConstantSymbolA, terms: List[VariableSymbolA]): FOLTerm = {
terms match {
case x :: Nil => FOLVar(x)
case x :: xs => Function(function, List(FOLVar(x), listToTerm(function, xs)))
case Nil =>
throw new Exception("cannot convert empty list to term, there is no unit element!")
}
}
def composable_by(v: Vector, vs: List[Vector]): Boolean = {
vs match {
case Nil => false
case _ =>
val reduced = (vs map (_ - v))
if (reduced contains v.zero)
true
else {
composable_by(v, reduced filter gzero)
}
}
}
def vector_weight(vlhs: Vector, v: Vector): Int = vlhs * Vector(v.vector slice (0, vlhs.length))
def calculateMaxWeight(l: Vector, r: Vector): Int = {
var maxab = 0
var lcm_act = 0
for (i <- l.vector)
for (j <- r.vector) {
lcm_act = lcm(i, j)
if (lcm_act > maxab)
maxab = lcm_act
}
return max(l.length, r.length) * maxab
}
// counts the number of symbols, those in terms1 count positively, thos in count2 negatively
def countSymbols(terms1: List[FOLTerm], terms2: List[FOLTerm]): List[TermCount] = {
var result: List[TermCount] = Nil
for (t <- terms1) {
result = insertTerm(t, result, 1)
}
for (t <- terms2) {
result = insertTerm(t, result, -1)
}
result filter (_._2 != 0)
}
// finds term in list and increses its counter
def insertTerm(term: FOLTerm, list: List[TermCount],i:Int): List[TermCount] = {
list match {
case Nil => List((term, i))
case (lterm, count) :: rest =>
if (term == lterm)
(lterm, count + i) :: rest
else
(lterm, count) :: insertTerm(term, rest, i)
}
}
// creates a function that applies a given substitution to a pair of terms
def makesubstitute_pair(subst: Substitution[FOLTerm]): (((FOLTerm, FOLTerm)) => (FOLTerm, FOLTerm)) =
(x: (FOLTerm, FOLTerm)) => (subst.apply(x._1), subst.apply(x._2))
// occurs check : true iff term contains v
def occurs(v: FOLVar, term: FOLTerm): Boolean = {
term match {
case FOLVar(w) => v == term
case FOLConst(_) => false
case Function(_, args) => args.foldLeft(false)(_ || occurs(v, _))
}
}
// creates a function, which checks if a vector is <= 1 at the given indices
def makeLTEQ1Filters(ns: List[Int]): (Vector => Boolean) = (v: Vector) =>
(ns map (v.vector(_) <= 1)).foldLeft(true)(_ && _)
// creates a function, which checks if a vector is <= 1 at the given indices
def makeEQ1Filters(ns: List[Int]): (Vector => Boolean) = (v: Vector) =>
(ns map (v.vector(_) == 1)).foldLeft(true)(_ && _)
// creates two filters that checks if the number of terms that later has to be unified with a constant or
// function term does not exceed 1. the first function is true as long as the corresponding components are <= 1,
// the second is true as long the corresponding components are exactly 1.
// the first function is intended to be checked while generating solutions, the second is to be checked after
// all solutions have been generated
def createConstantFilter(symbols: List[FOLTerm]): ((Vector => Boolean), (Vector => Boolean)) = {
var i: Int = 0
var indices: List[Int] = Nil
for (s <- symbols) {
s match {
case FOLVar(_) =>; //do nothing
case FOLConst(_) => indices = i :: indices
case Function(_, _) => indices = i :: indices
case _ => throw new Exception("unhandled term type " + s.getClass + " of term " + s)
}
i += 1
}
(makeLTEQ1Filters(indices), makeEQ1Filters(indices))
}
}
object ACUtils {
import ACUnification.debug
import TermUtils.term_<
def structural_fold(fun : (FOLTerm => FOLTerm), formula: FOLFormula): FOLFormula =
formula match {
case Atom(p, args) => Atom(p, args map ((x:FOLTerm) => fun(x)))
case Neg(l) => Neg(structural_fold(fun,l))
case All(q,l) => All(q,structural_fold(fun,l))
case Ex(q,l) => Ex(q,structural_fold(fun,l))
case And(l,r) => And(structural_fold(fun,l), structural_fold(fun,r))
case Or(l,r) => Or(structural_fold(fun,l), structural_fold(fun,r))
case Imp(l,r) => Imp(structural_fold(fun,l), structural_fold(fun,r))
case _ => throw new Exception("Unkonwn operator during structrual folding of formula!")
}
//performs the flattening operation below on formulas
def flatten(f: ConstantSymbolA, formula: FOLFormula): FOLFormula = structural_fold((x:FOLTerm) => flatten(f,x), formula )
// performs the rewrite rule f(s1, ... , f(t1, ... ,tm), ...sn) -> f(s1, ... ,t1, ... ,tm, ...sn) on the
// given term (see also: Lincoln 89 "Adventures in Associative-Commutative Unification") and sorts the
// the argument list lexicographically
def flatten(f: ConstantSymbolA, term: FOLTerm): FOLTerm = {
term match {
case FOLVar(_) => term
case FOLConst(_) => term
case Function(fun, args) =>
if (f == fun) {
Function(fun, ((args map ((x: FOLTerm) => stripFunctionSymbol(f, x))).reduceRight(_ ::: _)
map ((x: FOLTerm) => flatten(f, x))) sortWith term_<)
} else {
Function(fun, args map ((x: FOLTerm) => flatten(f, x)))
}
}
}
// flatten but removes the neutral element, i.e. f(x) = x, f() = e
def flatten_andfiltersymbol(f: ConstantSymbolA, e:ConstantSymbolA, formula: FOLFormula): FOLFormula =
structural_fold((x:FOLTerm) => flatten_andfiltersymbol(f,e,x), formula )
def flatten_andfiltersymbol(f: ConstantSymbolA, e:ConstantSymbolA, term: FOLTerm): FOLTerm =
sortargsof_in(f, flatten_andfiltersymbol_withoutsorting(f,e,term) )
def flatten_andfiltersymbol_withoutsorting(f: ConstantSymbolA, e:ConstantSymbolA, term: FOLTerm): FOLTerm = {
term match {
case FOLVar(_) => term
case FOLConst(_) => term
case Function(fun, args) =>
if (f == fun) {
val c = FOLConst(e)
val args_ = (((args map ((x: FOLTerm) => stripFunctionSymbol(f, x))).reduceRight(_ ::: _) map
((x: FOLTerm) => flatten_andfiltersymbol_withoutsorting(f, e, x)))
sortWith term_<) filterNot (_ == c)
args_ match {
case Nil => FOLConst(e)
case List(t) => t
case _ => Function(fun,args_)
}
} else {
Function(fun, args map ((x: FOLTerm) => flatten_andfiltersymbol_withoutsorting (f, e, x)))
}
}
}
def sortargsof_in(f : ConstantSymbolA, t : FOLTerm) : FOLTerm = t match {
case Function(sym, args) =>
val args_ = args map (sortargsof_in(f,_))
if (f == sym)
Function(sym, args_ sortWith term_< )
else
Function(sym, args_)
case _ => t
}
def sortargsof_in(fs : List[ConstantSymbolA], t : FOLTerm) : FOLTerm = t match {
case Function(sym, args) =>
val args_ = args map (sortargsof_in(fs,_))
if (fs contains sym)
Function(sym, args_ sortWith term_< )
else
Function(sym, args_)
case _ => t
}
// removes the nesting of f in a term to a list - since the term f(g(f(x,y),z) should rewrite to
// f(x,y,z) instead of f(f(x,y),z), it is preferred to use flatten
def stripFunctionSymbol(f: ConstantSymbolA, term: FOLTerm): List[FOLTerm] = {
term match {
case Function(fun, args) =>
if (f == fun)
(args map ((x: FOLTerm) => stripFunctionSymbol(f, x))).reduceRight(_ ::: _)
else
List(term)
case _ => List(term)
}
}
//TODO: refactor
def nestedFunctions_toList(function: ConstantSymbolA, term: FOLTerm): List[FOLTerm] = {
term match {
case v: FOLVar => List(v)
//case c : FOLConst => List(c)
case Function(name, args) =>
if (name == function) {
val join = ((x: List[FOLTerm], y: List[FOLTerm]) => x ++ y)
args.map(nestedFunctions_toList(function, _)) reduceLeft join
} else {
List(Function(name, args))
}
case _ =>
Nil
}
}
def removeSubsumedVectors_new(arg: List[Vector], weight: Vector): List[Vector] = {
var removed: List[Vector] = Nil
val sortedarg = arg sortWith (_ * weight < _ * weight)
debug(1, "sorted list by " + weight + " is " + sortedarg)
for (v <- sortedarg) {
if (!linearlydependent_on(v, removed)) {
removed = v :: removed
debug(1, "adding " + v + " to result list")
} else {
debug(1, "throwing away " + v)
}
}
removed
}
def linearlydependent_on(v: Vector, list: List[Vector]): Boolean = {
var changed = true
var vs: List[Vector] = List(v)
while (changed) {
changed = false
var newones: List[Vector] = Nil
for (i <- vs)
newones = newones ::: (list map (i - _))
debug(4, "newones=" + newones)
if (newones contains v.zero) {
debug(4, "" + v + " is linearly dependent on " + list)
return true
}
val newonesgz = newones filter Vector.geqzero
if (newonesgz.length > 0) {
changed = true
vs = newonesgz
debug(3, ("v=" + v + " vs=" + vs))
}
}
return false
}
}
class ACUEquality(val function_symbol : ConstantSymbolA, val zero_symbol : ConstantSymbolA) extends EequalityA {
import ACUtils.flatten
private class Parser(input : String) extends StringReader(input) with SimpleFOLParser
private def parse(s:String) = (new Parser(s)).formula.asInstanceOf[FOLTerm]
private val zero = FOLConst(zero_symbol)
private def f(s:FOLTerm, t:FOLTerm) = Function(function_symbol, List(s,t))
override def equational_rules() : Set[types.Equation] = {
val x = FOLVar(new VariableStringSymbol("x"))
val y = FOLVar(new VariableStringSymbol("y"))
val z = FOLVar(new VariableStringSymbol("z"))
val assoc = Equation( f(x, f(y,z)), f(f(x,y),z))
val comm = Equation( f(x, y), f(y, x))
val unit = Equation( f(x, zero), x)
Set(assoc, comm, unit)
}
override def word_equalsto(s : FOLTerm, t : FOLTerm) : Boolean = {
(flatten (function_symbol, s)) syntaxEquals (flatten (function_symbol, t))
}
//todo: implementation
override def unifies_with(s : FOLTerm, t : FOLTerm) : Option[Substitution[FOLTerm]] = None
}
class MulACEquality(val function_symbols : List[ConstantSymbolA]) extends EequalityA {
import ACUEquality._
def f(sym:ConstantSymbolA, x:FOLTerm, y:FOLTerm) = Function(sym,List(x,y))
def flatten(f : FOLFormula) = function_symbols.foldLeft(f)( (formula : FOLFormula, sym:ConstantSymbolA) => ACUtils.flatten(sym, formula) )
override def equational_rules() : Set[types.Equation] = {
val x = FOLVar(new VariableStringSymbol("x"))
val y = FOLVar(new VariableStringSymbol("y"))
val z = FOLVar(new VariableStringSymbol("z"))
val assoc = function_symbols map( fs => Equation( f(fs,x, f(fs,y,z)), f(fs,f(fs,x,y),z)))
val comm = function_symbols map( fs => Equation( f(fs, x, y), f(fs, y, x)) )
(assoc ++ comm) toSet
}
override def word_equalsto(s:FOLTerm, t:FOLTerm) : Boolean = fold_flatten(function_symbols,s) syntaxEquals fold_flatten(function_symbols,t)
//todo: implementation
override def unifies_with(s : FOLTerm, t : FOLTerm) : Option[Substitution[FOLTerm]] = None
}
class MulACUEquality(override val function_symbols : List[ConstantSymbolA], val zero_symbols : List[ConstantSymbolA]) extends MulACEquality(function_symbols) {
require { function_symbols.length == zero_symbols.length }
import ACUEquality._
val fzsymbols = function_symbols zip zero_symbols
override def equational_rules() : Set[types.Equation] = {
val x = FOLVar(new VariableStringSymbol("x"))
val acrules : Set[types.Equation] = super.equational_rules()
val urules = fzsymbols map ((i : (ConstantSymbolA, ConstantSymbolA)) => { Equation( f(i._1, x, FOLConst(i._2)), x) })
acrules ++ urules.toSet
}
override def flatten(f : FOLFormula) = fzsymbols.foldLeft(f)( (formula : FOLFormula, sym:(ConstantSymbolA, ConstantSymbolA)) => ACUtils.flatten_andfiltersymbol(sym._1, sym._2, formula) )
override def word_equalsto(s:FOLTerm, t:FOLTerm) : Boolean = fold_flatten_filter(function_symbols, zero_symbols, s) syntaxEquals fold_flatten_filter(function_symbols, zero_symbols, t)
//todo: implementation
override def unifies_with(s : FOLTerm, t : FOLTerm) : Option[Substitution[FOLTerm]] = None
}
object ACUEquality {
import ACUtils.{flatten, flatten_andfiltersymbol_withoutsorting, sortargsof_in}
def fold_flatten(fs : List[ConstantSymbolA], s:FOLTerm) = fs.foldLeft(s)( (term : FOLTerm, f : ConstantSymbolA) => flatten(f, term) )
def fold_flatten_filter(fs : List[ConstantSymbolA], cs : List[ConstantSymbolA], s:FOLTerm) : FOLTerm =
sortargsof_in(fs, (fs zip cs).foldLeft(s)(
(term : FOLTerm, el : ( ConstantSymbolA, ConstantSymbolA) ) => flatten_andfiltersymbol_withoutsorting(el._1, el._2, term) )
)
def factor_clause(e : EequalityA, clause : FSequent) : FSequent = {
var antecedent : Seq[FOLFormula] = clause._1.asInstanceOf[Seq[FOLFormula]]
var succedent : Seq[FOLFormula] = clause._2.asInstanceOf[Seq[FOLFormula]]
var ant : Seq[FOLFormula] = Nil
while (antecedent.nonEmpty ) {
ant = ant.+:(antecedent.head)
antecedent = antecedent filterNot ((g:FOLFormula) => e.reequal_to(antecedent head,g))
}
var succ : Seq[FOLFormula] = Nil
while (succedent.nonEmpty ) {
succ = succ.+:(succedent.head)
succedent = succedent filterNot ((g:FOLFormula) => e.reequal_to(succedent head,g))
}
FSequent(ant, succ)
}
def tautology_removal(theory : EequalityA, clauses : List[FSequent]) : List[FSequent] = {
clauses.foldLeft (List[FSequent]()) ( (done : List[FSequent], s : FSequent) =>
if (s._1.exists( (pos : Formula) => s._2.exists( (neg : Formula) => theory.reequal_to(pos.asInstanceOf[FOLFormula], neg.asInstanceOf[FOLFormula]) )))
done
else
done.+:(s)
)
}
//private because this only works on factorized formulas
private def clause_restricted_subsumed_in(theory : EequalityA, clause : FSequent, list : List[FSequent]) = list.exists( (s : FSequent) =>
clause._1.length == s._1.length &&
clause._2.length == s._2.length &&
clause._1.forall((f:Formula) => s._1.exists((g:Formula) => theory.reequal_to(f.asInstanceOf[FOLFormula], g.asInstanceOf[FOLFormula]) )) &&
clause._2.forall((f:Formula) => s._2.exists((g:Formula) => theory.reequal_to(f.asInstanceOf[FOLFormula], g.asInstanceOf[FOLFormula]) ))
)
//returns true if clause is reequal some element of list modulo the theory, where clause may be weakened (i.e. have additional literals)
def clause_restricted_subsumed_in2(theory : EequalityA, clause : FSequent, list : List[FSequent]) = list.exists( (s : FSequent) =>
s._1.forall((f:Formula) => clause._1.exists((g:Formula) => theory.reequal_to(f.asInstanceOf[FOLFormula], g.asInstanceOf[FOLFormula]) )) &&
s._2.forall((f:Formula) => clause._2.exists((g:Formula) => theory.reequal_to(f.asInstanceOf[FOLFormula], g.asInstanceOf[FOLFormula]) ))
)
def restricted_subsumption(theory : EequalityA, clauses : List[FSequent]) : List[FSequent] =
apply_subsumptionalgorithm_to( clause_restricted_subsumed_in2(theory,_,_), clauses )
def apply_subsumptionalgorithm_to( subsumes : (FSequent, List[FSequent]) => Boolean, clauses : List[FSequent] ) =
apply_subsumptionalgorithm_to_(subsumes, Nil, clauses)
private def apply_subsumptionalgorithm_to_(subsumed_by : (FSequent, List[FSequent]) => Boolean, clauses : List[FSequent], remaining : List[FSequent]) : List[FSequent] = {
remaining match {
case x::xs => if (subsumed_by(x, clauses))
apply_subsumptionalgorithm_to_(subsumed_by, clauses, xs)
else
apply_subsumptionalgorithm_to_(subsumed_by, (clauses filterNot ((s:FSequent) => subsumed_by(s, List(x) )) ).+:(x), xs)
case Nil=> clauses
}
}
def tautology_deletion(seqs : List[FSequent], e: EequalityA) = {
import at.logic.gapt.language.hol._
seqs.filter(_ match {
case FSequent(_, succedent) => succedent.exists(
(f: Formula) =>
f match {
case Atom(ConstantStringSymbol("="), List(x,y)) => e.word_equalsto(x.asInstanceOf[FOLTerm],y.asInstanceOf[FOLTerm])
case _ => false
}
)
case _ => true
} )
}
}
*/
| gisellemnr/gapt | src/main/scala/at/logic/gapt/language/hol/algorithms/unification/ACUnification.scala | Scala | gpl-3.0 | 37,016 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.prop
import org.scalatest.FunSpec
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.SharedHelpers.thisLineNumber
class TableDrivenPropertyCheckFailedExceptionSpec extends FunSpec with ShouldMatchers with TableDrivenPropertyChecks {
describe("The TableDrivenPropertyCheckFailedException") {
it("should give the proper line on a table-driven property check") {
val examples =
Table(
("a", "b"),
(1, 2),
(3, 4),
(6, 5),
(7, 8)
)
try {
forAll (examples) { (a, b) => a should be < b }
}
catch {
case e: TableDrivenPropertyCheckFailedException =>
e.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("TableDrivenPropertyCheckFailedExceptionSpec.scala:" + (thisLineNumber - 5))
case None => fail("A table-driven property check didn't produce a file name and line number string", e)
}
case e =>
fail("forAll (examples) { (a, b) => a should be < b } didn't produce a TableDrivenPropertyCheckFailedException", e)
}
}
describe("even when it is nested in another describe") {
it("should give the proper line on a table-driven property check") {
val examples =
Table(
("a", "b"),
(1, 2),
(3, 4),
(6, 5),
(7, 8)
)
try {
forAll (examples) { (a, b) => a should be < b }
}
catch {
case e: TableDrivenPropertyCheckFailedException =>
e.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("TableDrivenPropertyCheckFailedExceptionSpec.scala:" + (thisLineNumber - 5))
case None => fail("A table-driven property check didn't produce a file name and line number string", e)
}
case e =>
fail("forAll (examples) { (a, b) => a should be < b } didn't produce a TableDrivenPropertyCheckFailedException", e)
}
}
}
it("should return the cause in both cause and getCause") {
val theCause = new IllegalArgumentException("howdy")
val tfe = new TableDrivenPropertyCheckFailedException(sde => "doody", Some(theCause), sde => 3, "howdy", List(1, 2, 3), List("a", "b", "c"), 7)
assert(tfe.cause.isDefined)
assert(tfe.cause.get === theCause)
assert(tfe.getCause == theCause)
}
it("should return None in cause and null in getCause if no cause") {
val tfe = new TableDrivenPropertyCheckFailedException(sde => "doody", None, sde => 3, "howdy", List(1, 2, 3), List("a", "b", "c"), 7)
assert(tfe.cause.isEmpty)
assert(tfe.getCause == null)
}
it("should be equal to itself") {
val tfe = new TableDrivenPropertyCheckFailedException(sde => "doody", None, sde => 3, "howdy", List(1, 2, 3), List("a", "b", "c"), 7)
assert(tfe equals tfe)
}
}
}
| yyuu/scalatest | src/test/scala/org/scalatest/prop/TableDrivenPropertyCheckFailedExceptionSpec.scala | Scala | apache-2.0 | 3,601 |
package com.olvind.crud
package frontend
import autowire._
import chandu0101.scalajs.react.components.materialui.{DeterminateIndeterminate, MuiCircularProgress}
import japgolly.scalajs.react._
import japgolly.scalajs.react.extra._
import japgolly.scalajs.react.extra.router.RouterCtl
import japgolly.scalajs.react.vdom.prefix_<^._
import upickle.default
import scala.concurrent.Future
import scalacss.ScalaCssReact._
/* Things every Editor has */
case class EditorBaseProps(
userInfo: UserInfo,
editorDesc: EditorDesc,
onResult: TimedRes ~=> Callback,
cachedDataOpt: Option[CachedData],
ctl: RouterCtl[Route]
)
/**
* An Editor that shows multiple rows
*/
trait EditorBaseMultipleRows extends EditorBase {
override final type Data = Seq[StrTableRow]
}
/**
* An editor that shows one single row
*/
trait EditorBaseSingleRow extends EditorBase {
override final type Data = StrTableRow
}
/**
* An Editor that is able to update cells, and is maintains
* state of the data it has fetched
*/
trait EditorBaseUpdaterPrimary extends EditorBaseUpdater with EditorBasePrimary {
trait BackendBUP[P <: PropsB, S <: StateBP[S]] extends BackendBU[P, S] with BackendBP[P, S]{
def patchRow(id: StrRowId, row: StrTableRow): Callback
def handleNoRowFoundOnUpdate(id: StrRowId): Callback
final override def handleUpdated(id: StrRowId): Callback =
async((user, remote) => remote.readRow(user, id).call(), None){
case XSuccess(rowOpt) => rowOpt.fold(handleNoRowFoundOnUpdate(id))(row => patchRow(id, row))
}
}
}
/**
* An Editor that is able to update cells, but is
* not responsible for maintaining it's own state
*/
trait EditorBaseUpdaterLinked extends EditorBaseUpdater {
trait PropsBUL extends PropsB {
def reload: Callback
}
trait BackendBUL[P <: PropsBUL, S <: StateB[S]] extends BackendBU[P, S]{
final override def handleUpdated(id: StrRowId): Callback =
$.props.flatMap(_.reload)
final override def handleDeleted(id: StrRowId): Callback =
$.props.flatMap(_.reload)
}
}
/**
* An Editor that is able to update cells
*/
trait EditorBaseUpdater extends EditorBase {
trait BackendBU[P <: PropsB, S <: StateB[S]] extends BackendB[P, S]{
def handleUpdated(id: StrRowId): Callback
def handleDeleted(id: StrRowId): Callback
final def handleUpdateFailed(row: StrRowId, vs: Seq[ValidationError]): Callback =
$.modState(_.withAddedValidationFails(Some(row), vs))
final def updateValue(id: StrRowId)(c: ColumnRef)(v: StrValue): Callback =
async((user, remote) => remote.update(user, id, c, v).call(), s"Update row $id to ${v.value}".some){
case XValidation(_, vs) => handleUpdateFailed(id, vs)
case XSuccess(ok) => handleUpdated(id)
}
final def deleteRow(id: StrRowId): Callback =
async((user, remote) => remote.delete(user, id).call(), s"Delete row $id".some){
case XSuccess(()) => handleDeleted(id)
}
}
}
/**
* An Editor which is responsible to fetching data from remote
*/
trait EditorBasePrimary extends EditorBase {
sealed trait DataState{def opt: Option[Data] = None}
case object InitialState extends DataState
case class HasDataState(data: Data) extends DataState{override def opt = data.some}
case class ErrorState(msg: XUserMsg) extends DataState
trait StateBP[S <: StateBP[S]] extends StateB[S]{
def data: DataState
def withDataState(data: DataState): S
}
trait BackendBP[P <: PropsB, S <: StateBP[S]] extends BackendB[P, S]{
def loadInitialData: Callback
def renderData(P: P, S: S, t: EditorDesc, data: Data): ReactElement
def setData(data: DataState, cb: Callback): Callback =
$.modState(_.withDataState(data), cb)
override final abstract def reInit: Callback =
$.modState(_.withDataState(InitialState)) >> loadInitialData
override final abstract def init: Callback = {
super.init >> $.state.map(_.data).flatMap{
case InitialState ⇒ loadInitialData
case _ ⇒ Callback.empty
}
}
final lazy val renderWaiting: ReactElement =
MuiCircularProgress(
size = 2.0,
mode = DeterminateIndeterminate.indeterminate
)()
override final def render(P: P, S: S): ReactElement = {
val content: ReactElement = S.data match {
case InitialState ⇒ renderWaiting
case ErrorState(fail) ⇒ <.pre(<.code(fail.value))
case HasDataState(row) ⇒ renderData(P, S, P.base.editorDesc, row)
}
<.div(
TableStyle.centered,
content
)
}
}
}
trait EditorBase {
type Data
trait PropsB {
def base: EditorBaseProps
final def editorDesc: EditorDesc =
base.editorDesc
}
trait StateB[S <: StateB[S]] {
def cachedDataOpt: Option[CachedData]
def validationFails: Map[Option[StrRowId], Seq[ValidationError]]
def withCachedData(cd: CachedData): S
def withValidationFails(rowOpt: Option[StrRowId], ves: Seq[ValidationError]): S
final def withAddedValidationFails(rowOpt: Option[StrRowId], ves: Seq[ValidationError]): S =
withValidationFails(rowOpt, validationFails.get(rowOpt).fold(ves)(_ ++ ves))
}
trait BackendB[P <: PropsB, S <: StateB[S]] {
def $: BackendScope[P, S]
implicit def r: Reusability[P]
def init: Callback =
$.props.map(p => p.base.cachedDataOpt) flatMap {
case Some(cd) => $.modState(_.withCachedData(cd))
case None => Callback.empty
}
def reInit: Callback =
Callback.empty
def render(P: P, S: S): ReactElement
def clearValidationFail(idOpt: Option[StrRowId])(c: ColumnRef): Callback =
$.modState(s => s.withValidationFails(idOpt, Seq.empty))
lazy val fromProps: Px[FromProps] =
Px.cbA($.props).map(FromProps)
sealed case class FromProps(P: P){
val asyncInfo: AsyncCallback =
AsyncCallback(
P.base.userInfo,
P.base.onResult,
P.base.editorDesc.editorId
)
val showSingleRow: RouterCtl[StrRowId] =
P.base.ctl.contramap[StrRowId](
id ⇒ RouteEditorRow(P.base.editorDesc, id)
)
val showAllRows: Callback =
P.base.ctl.set(RouteEditor(P.editorDesc))
}
def async[R](f: (UserInfo, ClientProxy[Editor, String, default.Reader, default.Writer]) => Future[XRes[R]],
logOpt: Option[String])
(handle: PartialFunction[XRes[R], Callback]): Callback = {
val fp = fromProps.value
val remote = AjaxCall.forEditor(fp.asyncInfo.editorId)[Editor]
def coord = Coordinate(fp.asyncInfo.editorId, rowOpt = None, colOpt = None)
Callback.future(
Clock{c =>
val ff: Future[XRes[R]] =
f(fp.asyncInfo.user, remote) recover {
case th => XTechnicalMsg(th)
}
ff.map[Callback]{
res ⇒
fp.asyncInfo.onResult(c.timed((coord, res, logOpt))) >>
(handle.lift(res) getOrElse Callback.empty)
}
}
)
}
}
}
| elacin/slick-crud | crud/js/src/main/scala/com/olvind/crud/frontend/EditorBase.scala | Scala | apache-2.0 | 7,134 |
package jp.que.ti.sv
import org.slf4j.LoggerFactory
import jp.que.ti.sv.util.MessageResource
import jp.que.ti.sv.util.StringUtil
import jp.que.ti.sv.util.TrimIsEmptySupport
/**
* 単項目チェック用の validator の基本インタフェース。
* 1つの値を受け取ってチェックするインタフェース
*/
trait Validator1ParamIF extends Validator[Option[String]]
/**
* 単項目チェック用の validator の基本抽象クラス。
* 1つの値を受け取ってチェックする。
*/
abstract class Validator1Param(val messageKey: String)
extends Validator1ParamIF
with ValidatorImpl[Option[String]]
with MessageResourceSupport
with TrimIsEmptySupport {
}
abstract class NotRequiredBase(messageKey: String)
extends Validator1Param(messageKey) {
/**
* チェック内容を定義する。
* @return true の場合、チェックOK。 false の場合、チェックNG
*/
def isValid(paramValue: Option[String]): Boolean = paramValue.map(v => trim(v)) match {
case None => true // 空の場合チェックしない
case Some(value) => if (value.isEmpty()) { true /* 空の場合チェックしない*/ } else {
isValidInputed(value) // 空でない場合チェックする
}
}
def isValidInputed(paramValue: String): Boolean
}
abstract class Validator1ParamDecorator(validator: Validator1ParamIF)
extends ValidatorDecorator[Option[String], Validator1ParamIF](validator)
| yangiYA/simple-validator | simple-valid/src/main/scala/jp/que/ti/sv/Validator1Param.scala | Scala | mit | 1,455 |
package scala.controllers.bam
import org.scalatestplus.play._
import play.api.libs.json._
import play.api.test.Helpers._
import play.api.test._
import scala.setup.WithToken
/**
* Test JsonController.
* The test bam has coordinates in range chr1:761997-762551,
* 579 paired-end reads of length 101.
*/
class JsonSpec extends PlaySpec with OneAppPerSuite with WithToken {
val body: JsValue = Json.parse(s"""{"sample": "$testSample1"}""")
val headers = (AUTHORIZATION -> s"Bearer $auth0Token")
val nreads = 579
"JsonController" should {
"provide reads in JSON format if a region is given (POST)" in {
val request = FakeAuthorizedRequest(POST, "/bam/json?region=chr1:761997-762551").withJsonBody(body)
val response = route(app, request).get
status(response) mustBe OK
contentType(response) must be(Some(JSON))
val n = contentAsJson(response).as[JsArray].value.size
n must be > 1
n must be < nreads
val content = contentAsJson(response)
val read1 = content.head
(read1 \\ "name").as[String] must equal("HISEQ:206:C8E95ANXX:3:2113:2451:6639")
(read1 \\ "flag").as[Int] must equal(99)
(read1 \\ "chrom").as[String] must equal("chr1")
(read1 \\ "start").as[Int] must equal(761997)
(read1 \\ "end").as[Int] must equal(762097) // + 100
(read1 \\ "mapq").as[Int] must equal(50)
(read1 \\ "cigar").as[String] must equal("101M")
(read1 \\ "rnext").as[String] must equal("=")
(read1 \\ "pnext").as[Int] must equal(762179)
(read1 \\ "tlen").as[Int] must equal(283)
(read1 \\ "seq").as[String] must equal("CTACTGACGGTCAAGGCCTCCTCATTGTATTCTGTCCTCCATATCTCTGCTGATTCCCATTTTGTCTATTTCCATTTACCCCACTACTGCTTGCTCAGGTC")
(read1 \\ "qual").as[String] must equal("AB<B@G>FAF=E@BHFFFAEFAF@?>G><?=FAG=EFAEF@><>EAEAGFAG>>=EFF@>===G=EA<>==EF>>==<FFCF@FA;FAAFA=GFAD?B6;C")
}
"provide reads in JSON format if a region is given (GET)" in {
val request = FakeAuthorizedRequest(GET, s"/bam/json/$testSample1?region=chr1:761997-762200")
val response = route(app, request).get
status(response) mustBe OK
contentType(response) must be(Some(JSON))
val n = contentAsJson(response).as[JsArray].value.size
n must be > 1
n must be < nreads
}
"provide reads in JSON format if a region is given, with token in URL (GET)" in {
val request = FakeRequest(GET, s"/bam/json/$testSample1?token=$auth0Token®ion=chr1:761997-762200")
val response = route(app, request).get
status(response) mustBe OK
contentType(response) must be(Some(JSON))
}
"return all reads if the region is very wide (GET)" in {
val request = FakeAuthorizedRequest(GET, s"/bam/json/$testSample1?region=chr1:0-10000000")
val response = route(app, request).get
status(response) mustBe OK
contentType(response) must be(Some(JSON))
contentAsJson(response).as[JsArray].value.size must equal(nreads)
}
"return an empty array if the region is outside of scope (GET)" in {
val request = FakeAuthorizedRequest(GET, s"/bam/json/$testSample1?region=chr1:99761997-99762551")
val response = route(app, request).get
status(response) mustBe OK
contentType(response) must be(Some(JSON))
contentAsJson(response).as[JsArray].value.size must equal(0)
}
}
} | chuv-ssrc/bam-server-scala | test/scala/controllers/bam/JsonSpec.scala | Scala | gpl-3.0 | 3,369 |
package com.twitter.finagle.http.codec
import com.twitter.conversions.time._
import com.twitter.finagle.context.{Contexts, Deadline, Retries}
import com.twitter.finagle.http.{Message, Method, Request, Version}
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class HttpContextTest extends FunSuite {
def newMsg(): Message = Request(Version.Http11, Method.Get, "/")
test("written request deadline matches read request deadline") {
val m = newMsg()
val writtenDeadline = Deadline.ofTimeout(5.seconds)
Contexts.broadcast.let(Deadline, writtenDeadline) {
HttpContext.write(m)
// Clear Deadline in the context
Contexts.broadcast.letClear(Deadline) {
// Ensure the Deadline was cleared
assert(Deadline.current == None)
HttpContext.read(m) {
val readDeadline = Deadline.current.get
assert(writtenDeadline == readDeadline)
}
}
}
}
test("written request retries matches read request retries") {
val m = newMsg()
val writtenRetries = Retries(5)
Contexts.broadcast.let(Retries, writtenRetries) {
HttpContext.write(m)
// Clear Retries in the Context
Contexts.broadcast.letClear(Retries) {
// Ensure the Retries was cleared
assert(Retries.current == None)
HttpContext.read(m) {
val readRetries = Retries.current.get
assert(writtenRetries == readRetries)
}
}
}
}
test("invalid context header value causes context to not be set") {
val m = newMsg()
m.headers.set("Finagle-Ctx-com.twitter.finagle.foo", ",,,")
HttpContext.read(m) {
assert(Contexts.broadcast.marshal.isEmpty)
}
}
test("when there are no context headers, reading returns an empty iterator") {
val m = newMsg()
HttpContext.read(m) {
assert(Contexts.broadcast.marshal.isEmpty)
}
}
test("removing deadline") {
val m = newMsg()
val deadlineKey = "Finagle-Ctx-com.twitter.finagle.Deadline"
Contexts.broadcast.let(Deadline, Deadline.ofTimeout(5.seconds)) {
HttpContext.write(m)
assert(m.headerMap.contains(deadlineKey))
HttpContext.removeDeadline(m)
assert(!m.headerMap.contains(deadlineKey))
}
}
}
| adriancole/finagle | finagle-http/src/test/scala/com/twitter/finagle/http/codec/HttpContextTest.scala | Scala | apache-2.0 | 2,327 |
/*
Facsimile: A Discrete-Event Simulation Library
Copyright © 2004-2020, Michael J Allen.
This file is part of Facsimile.
Facsimile is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later
version.
Facsimile is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with Facsimile. If not, see
http://www.gnu.org/licenses/lgpl.
The developers welcome all comments, suggestions and offers of assistance. For further information, please visit the
project home page at:
http://facsim.org/
Thank you for your interest in the Facsimile project!
IMPORTANT NOTE: All patches (modifications to existing files and/or the addition of new files) submitted for inclusion
as part of the official Facsimile code base, must comply with the published Facsimile Coding Standards. If your code
fails to comply with the standard, then your patches will be rejected. For further information, please visit the coding
standards at:
http://facsim.org/Documentation/CodingStandards/
========================================================================================================================
Scala source file from the org.facsim.anim.cell package.
*/
package org.facsim.anim.cell
import org.facsim.LibResource
import scala.annotation.tailrec
/**
Abstract class for primitives that have children.
@see [[http://facsim.org/Documentation/Resources/Sets.htmls Sets]] for further
information.
@constructor Construct a new set primitive from the data stream.
@param scene Reference to the CellScene of which this cell is a part.
@param parent Parent set of this cell primitive. If this value is `None`, then
this cell is the scene's root cell.
@throws org.facsim.anim.cell.IncorrectFormatException if the file supplied is
not an ''AutoMod® cell'' file.
@throws org.facsim.anim.cell.ParsingErrorException if errors are encountered
during parsing of the file.
@see [[http://facsim.org/Documentation/Resources/AutoModCellFile/Sets.html
Sets]]
*/
private[cell] abstract class SetWithChildren(scene: CellScene,
parent: Option[Set])
extends Set(scene, parent) {
/**
Determine the number of children and read them in.
*/
private val childCells = readChildren()
/**
Read in the children of the set and return them.
@return List of children belonging to the set. This may be empty if no children
are defined.
@throws org.facsim.anim.cell.IncorrectFormatException if the file supplied is
not an ''AutoMod® cell'' file.
@throws org.facsim.anim.cell.ParsingErrorException if errors are encountered
during parsing of the file.
@see [[http://facsim.org/Documentation/Resources/AutoModCellFile/Sets.html
Sets]]
*/
private final def readChildren() = {
/**
Helper function to read the next child from the list.
NOTE: The list will contain children defined in reserve order.
*/
@tailrec
def readChild(count: Int, children: List[Cell]): List[Cell] = {
if(count == 0) children
else readChild(count - 1, scene.readNextCell(Some(this), false) ::
children)
}
/*
Read in the number of children from the data stream. This must be a value that
is >= 0.
*/
val numChildren = scene.readInt(_ >= 0, LibResource
("anim.cell.Set.readChildren"))
/*
Build the list of children and return it.
*/
readChild(numChildren, Nil).reverse
}
/*
@see [[org.facsim.anim.cell.Set!.getChildren]]
*/
protected[cell] override def getChildren = childCells
} | MichaelJAllen/facsimile | core/src/main/scala/org/facsim/anim/cell/SetWithChildren.scala | Scala | lgpl-3.0 | 3,798 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.reactive.Observable
import scala.concurrent.duration._
object DropByTimespanSuite extends BaseOperatorSuite {
val waitFirst = 2500.millis
val waitNext = 500.millis
def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = None
def sum(sourceCount: Int) =
(0 until sourceCount).map(_ + 5).sum
def count(sourceCount: Int) =
sourceCount
def createObservable(sourceCount: Int) = Some {
require(sourceCount > 0, "sourceCount should be strictly positive")
val o = Observable
.intervalAtFixedRate(500.millis)
.take(sourceCount.toLong + 5)
.dropByTimespan(2300.millis)
Sample(o, count(sourceCount), sum(sourceCount), waitFirst, waitNext)
}
def observableInError(sourceCount: Int, ex: Throwable) = {
require(sourceCount > 0, "sourceCount should be strictly positive")
Some {
val source = Observable
.intervalAtFixedRate(500.millis)
.take(sourceCount.toLong + 5)
val o = createObservableEndingInError(source, ex)
.dropByTimespan(2300.millis)
Sample(o, count(sourceCount), sum(sourceCount), waitFirst, waitNext)
}
}
override def cancelableObservables(): Seq[Sample] = {
val o = Observable
.intervalAtFixedRate(500.millis)
.dropByTimespan(2300.millis)
Seq(Sample(o, 0, 0, 0.seconds, 0.seconds))
}
}
| monix/monix | monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/DropByTimespanSuite.scala | Scala | apache-2.0 | 2,068 |
import org.json4s.JsonDSL._
import org.json4s._
import org.json4s.jackson.Serialization.write
/**
* Created by felipe.almeida@vtex.com.br on 07/06/16.
*/
object DSL extends App {
implicit val formats = DefaultFormats
// connect tuples with "~" to create a json object
val obj1: JObject = ("foo", "bar") ~ ("baz", "quux")
println(write(obj1))
// {"foo":"bar","baz":"quux"}
// you can also use "->" notation to create tuples
val obj2: JObject = ("foo" -> "bar") ~ ("baz" -> "quux")
println(write(obj2))
// {"foo":"bar","baz":"quux"}
// use any sequence to make an array of json elements
val array1: JArray = Seq(obj1,obj2)
println(write(array1))
// [{"foo":"bar","baz":"quux"},{"foo":"bar","baz":"quux"}]
val array2: JArray = Seq("foo","bar")
println(write(array2))
// ["foo","bar"]
}
| queirozfcom/scala-sandbox | json4s/src/main/scala/DSL.scala | Scala | mit | 823 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.mv.datamap
import java.util
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.sql.{CarbonEnv, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.CatalogTable
import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, AttributeReference, Cast, Expression, NamedExpression, ScalaUDF, SortOrder}
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LogicalPlan, Project}
import org.apache.spark.sql.execution.command.{Field, TableModel, TableNewProcessor}
import org.apache.spark.sql.execution.command.table.CarbonCreateTableCommand
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.parser.CarbonSpark2SqlParser
import org.apache.carbondata.core.datamap.DataMapStoreManager
import org.apache.carbondata.core.metadata.schema.table.{DataMapSchema, RelationIdentifier}
import org.apache.carbondata.mv.plans.modular.{GroupBy, Matchable, ModularPlan, Select}
import org.apache.carbondata.mv.rewrite.{MVPlanWrapper, QueryRewrite}
import org.apache.carbondata.spark.util.CommonUtil
/**
* Utility for MV datamap operations.
*/
object MVHelper {
def createMVDataMap(sparkSession: SparkSession,
dataMapSchema: DataMapSchema,
queryString: String,
ifNotExistsSet: Boolean = false): Unit = {
val dmProperties = dataMapSchema.getProperties.asScala
val updatedQuery = new CarbonSpark2SqlParser().addPreAggFunction(queryString)
val logicalPlan = sparkSession.sql(updatedQuery).drop("preAgg").queryExecution.analyzed
val fullRebuild = isFullReload(logicalPlan)
val fields = logicalPlan.output.map { attr =>
val name = updateColumnName(attr)
val rawSchema = '`' + name + '`' + ' ' + attr.dataType.typeName
if (attr.dataType.typeName.startsWith("decimal")) {
val (precision, scale) = CommonUtil.getScaleAndPrecision(attr.dataType.catalogString)
Field(column = name,
dataType = Some(attr.dataType.typeName),
name = Some(name),
children = None,
precision = precision,
scale = scale,
rawSchema = rawSchema)
} else {
Field(column = name,
dataType = Some(attr.dataType.typeName),
name = Some(name),
children = None,
rawSchema = rawSchema)
}
}
val tableProperties = mutable.Map[String, String]()
dmProperties.foreach(t => tableProperties.put(t._1, t._2))
val selectTables = getTables(logicalPlan)
// TODO inherit the table properties like sort order, sort scope and block size from parent
// tables to mv datamap table
// TODO Use a proper DB
val tableIdentifier =
TableIdentifier(dataMapSchema.getDataMapName + "_table",
selectTables.head.identifier.database)
// prepare table model of the collected tokens
val tableModel: TableModel = new CarbonSpark2SqlParser().prepareTableModel(
ifNotExistPresent = ifNotExistsSet,
new CarbonSpark2SqlParser().convertDbNameToLowerCase(tableIdentifier.database),
tableIdentifier.table.toLowerCase,
fields,
Seq(),
tableProperties,
None,
isAlterFlow = false,
isPreAggFlow = false,
None)
val tablePath = if (dmProperties.contains("path")) {
dmProperties("path")
} else {
CarbonEnv.getTablePath(tableModel.databaseNameOp, tableModel.tableName)(sparkSession)
}
CarbonCreateTableCommand(TableNewProcessor(tableModel),
tableModel.ifNotExistsSet, Some(tablePath), isVisible = false).run(sparkSession)
dataMapSchema.setCtasQuery(queryString)
dataMapSchema
.setRelationIdentifier(new RelationIdentifier(tableIdentifier.database.get,
tableIdentifier.table,
""))
val parentIdents = selectTables.map { table =>
new RelationIdentifier(table.database, table.identifier.table, "")
}
dataMapSchema.setParentTables(new util.ArrayList[RelationIdentifier](parentIdents.asJava))
dataMapSchema.getProperties.put("full_refresh", fullRebuild.toString)
DataMapStoreManager.getInstance().saveDataMapSchema(dataMapSchema)
}
def updateColumnName(attr: Attribute): String = {
val name = attr.name.replace("(", "_").replace(")", "").replace(" ", "_").replace("=", "")
attr.qualifier.map(qualifier => qualifier + "_" + name).getOrElse(name)
}
def getTables(logicalPlan: LogicalPlan): Seq[CatalogTable] = {
logicalPlan.collect {
case l: LogicalRelation => l.catalogTable.get
}
}
def dropDummFuc(plan: LogicalPlan): LogicalPlan = {
plan transform {
case p@Project(exps, child) =>
Project(dropDummyExp(exps), child)
case Aggregate(grp, aggExp, child) =>
Aggregate(
grp,
dropDummyExp(aggExp),
child)
}
}
private def dropDummyExp(exps: Seq[NamedExpression]) = {
exps.map {
case al@Alias(udf: ScalaUDF, name) if name.equalsIgnoreCase("preAgg") => None
case attr: AttributeReference if attr.name.equalsIgnoreCase("preAgg") => None
case other => Some(other)
}.filter(_.isDefined).map(_.get)
}
/**
* Check if we can do incremental load on the mv table. Some cases like aggregation functions
* which are present inside other expressions like sum(a)+sum(b) cannot be incremental loaded.
*/
private def isFullReload(logicalPlan: LogicalPlan): Boolean = {
var isFullReload = false
logicalPlan.transformAllExpressions {
case a: Alias =>
a
case agg: AggregateExpression => agg
case c: Cast =>
isFullReload = c.child.find {
case agg: AggregateExpression => false
case _ => false
}.isDefined || isFullReload
c
case exp: Expression =>
// Check any aggregation function present inside other expression.
isFullReload = exp.find {
case agg: AggregateExpression => true
case _ => false
}.isDefined || isFullReload
exp
}
isFullReload
}
def getAttributeMap(subsumer: Seq[NamedExpression],
subsume: Seq[NamedExpression]): Map[AttributeKey, NamedExpression] = {
if (subsumer.length == subsume.length) {
subsume.zip(subsumer).flatMap { case (left, right) =>
var tuples = left collect {
case attr: AttributeReference =>
(AttributeKey(attr), createAttrReference(right, attr.name))
}
left match {
case a: Alias =>
tuples = Seq((AttributeKey(a.child), createAttrReference(right, a.name))) ++ tuples
case _ =>
}
Seq((AttributeKey(left), createAttrReference(right, left.name))) ++ tuples
}.toMap
} else {
throw new UnsupportedOperationException("Cannot create mapping with unequal sizes")
}
}
def createAttrReference(ref: NamedExpression, name: String): Alias = {
Alias(ref, name)(exprId = ref.exprId, qualifier = None)
}
case class AttributeKey(exp: Expression) {
override def equals(other: Any): Boolean = other match {
case attrKey: AttributeKey =>
exp.semanticEquals(attrKey.exp)
case _ => false
}
// Basically we want to use it as simple linked list so hashcode is hardcoded.
override def hashCode: Int = 1
}
/**
* Updates the expressions as per the subsumer output expressions. It is needed to update the
* expressions as per the datamap table relation
*
* @param expressions expressions which are needed to update
* @param aliasName table alias name
* @return Updated expressions
*/
def updateSubsumeAttrs(
expressions: Seq[Expression],
attrMap: Map[AttributeKey, NamedExpression],
aliasName: Option[String],
keepAlias: Boolean = false): Seq[Expression] = {
def getAttribute(exp: Expression) = {
exp match {
case Alias(agg: AggregateExpression, name) =>
agg.aggregateFunction.collect {
case attr: AttributeReference =>
AttributeReference(attr.name, attr.dataType, attr.nullable, attr
.metadata)(attr.exprId,
aliasName,
attr.isGenerated)
}.head
case Alias(child, name) =>
child
case other => other
}
}
expressions.map {
case alias@Alias(agg: AggregateExpression, name) =>
attrMap.get(AttributeKey(agg)).map { exp =>
Alias(getAttribute(exp), name)(alias.exprId,
alias.qualifier,
alias.explicitMetadata,
alias.isGenerated)
}.getOrElse(alias)
case attr: AttributeReference =>
val uattr = attrMap.get(AttributeKey(attr)).map{a =>
if (keepAlias) {
AttributeReference(a.name, a.dataType, a.nullable, a.metadata)(a.exprId,
attr.qualifier,
a.isGenerated)
} else {
a
}
}.getOrElse(attr)
uattr
case alias@Alias(expression: Expression, name) =>
attrMap.get(AttributeKey(expression)).map { exp =>
Alias(getAttribute(exp), name)(alias.exprId,
alias.qualifier,
alias.explicitMetadata,
alias.isGenerated)
}.getOrElse(alias)
case expression: Expression =>
val uattr = attrMap.get(AttributeKey(expression))
uattr.getOrElse(expression)
}
}
def updateOutPutList(
subsumerOutputList: Seq[NamedExpression],
dataMapRltn: Select,
aliasMap: Map[AttributeKey, NamedExpression],
keepAlias: Boolean): Seq[NamedExpression] = {
var outputSel =
updateSubsumeAttrs(
subsumerOutputList,
aliasMap,
Some(dataMapRltn.aliasMap.values.head),
keepAlias).asInstanceOf[Seq[NamedExpression]]
outputSel.zip(subsumerOutputList).map{ case (l, r) =>
l match {
case attr: AttributeReference =>
Alias(attr, r.name)(r.exprId, None)
case a@Alias(attr: AttributeReference, name) =>
Alias(attr, r.name)(r.exprId, None)
case other => other
}
}
}
def updateSelectPredicates(
predicates: Seq[Expression],
attrMap: Map[AttributeKey, NamedExpression],
keepAlias: Boolean): Seq[Expression] = {
predicates.map { exp =>
exp transform {
case attr: AttributeReference =>
val uattr = attrMap.get(AttributeKey(attr)).map{a =>
if (keepAlias) {
AttributeReference(a.name, a.dataType, a.nullable, a.metadata)(a.exprId,
attr.qualifier,
a.isGenerated)
} else {
a
}
}.getOrElse(attr)
uattr
}
}
}
/**
* Update the modular plan as per the datamap table relation inside it.
*
* @param subsumer plan to be updated
* @return Updated modular plan.
*/
def updateDataMap(subsumer: ModularPlan, rewrite: QueryRewrite): ModularPlan = {
subsumer match {
case s: Select if s.dataMapTableRelation.isDefined =>
val relation =
s.dataMapTableRelation.get.asInstanceOf[MVPlanWrapper].plan.asInstanceOf[Select]
val mappings = s.outputList zip relation.outputList
val oList = for ((o1, o2) <- mappings) yield {
if (o1.name != o2.name) Alias(o2, o1.name)(exprId = o1.exprId) else o2
}
relation.copy(outputList = oList).setRewritten()
case g: GroupBy if g.dataMapTableRelation.isDefined =>
val relation =
g.dataMapTableRelation.get.asInstanceOf[MVPlanWrapper].plan.asInstanceOf[Select]
val in = relation.asInstanceOf[Select].outputList
val mappings = g.outputList zip relation.outputList
val oList = for ((left, right) <- mappings) yield {
left match {
case Alias(agg@AggregateExpression(fun@Sum(child), _, _, _), name) =>
val uFun = fun.copy(child = right)
Alias(agg.copy(aggregateFunction = uFun), left.name)(exprId = left.exprId)
case Alias(agg@AggregateExpression(fun@Max(child), _, _, _), name) =>
val uFun = fun.copy(child = right)
Alias(agg.copy(aggregateFunction = uFun), left.name)(exprId = left.exprId)
case Alias(agg@AggregateExpression(fun@Min(child), _, _, _), name) =>
val uFun = fun.copy(child = right)
Alias(agg.copy(aggregateFunction = uFun), left.name)(exprId = left.exprId)
case Alias(agg@AggregateExpression(fun@Count(Seq(child)), _, _, _), name) =>
val uFun = Sum(right)
Alias(agg.copy(aggregateFunction = uFun), left.name)(exprId = left.exprId)
case _ =>
if (left.name != right.name) Alias(right, left.name)(exprId = left.exprId) else right
}
}
val updatedPredicates = g.predicateList.map { f =>
mappings.find{ case (k, y) =>
k match {
case a: Alias if f.isInstanceOf[Alias] =>
a.child.semanticEquals(f.children.head)
case a: Alias => a.child.semanticEquals(f)
case other => other.semanticEquals(f)
}
} match {
case Some(r) => r._2
case _ => f
}
}
g.copy(outputList = oList,
inputList = in,
predicateList = updatedPredicates,
child = relation,
dataMapTableRelation = None).setRewritten()
case select: Select =>
select.children match {
case Seq(g: GroupBy) if g.dataMapTableRelation.isDefined =>
val relation =
g.dataMapTableRelation.get.asInstanceOf[MVPlanWrapper].plan.asInstanceOf[Select]
val aliasMap = getAttributeMap(relation.outputList, g.outputList)
val updatedFlagSpec: Seq[Seq[ArrayBuffer[SortOrder]]] = updateSortOrder(
keepAlias = false,
select,
relation,
aliasMap)
if (isFullRefresh(g.dataMapTableRelation.get.asInstanceOf[MVPlanWrapper])) {
val mappings = g.outputList zip relation.outputList
val oList = for ((o1, o2) <- mappings) yield {
if (o1.name != o2.name) Alias(o2, o1.name)(exprId = o1.exprId) else o2
}
val outList = select.outputList.map{ f =>
oList.find(_.name.equals(f.name)).get
}
// Directly keep the relation as child.
select.copy(
outputList = outList,
children = Seq(relation),
aliasMap = relation.aliasMap,
flagSpec = updatedFlagSpec).setRewritten()
} else {
val outputSel =
updateOutPutList(select.outputList, relation, aliasMap, keepAlias = false)
val child = updateDataMap(g, rewrite).asInstanceOf[Matchable]
// TODO Remove the unnecessary columns from selection.
// Only keep columns which are required by parent.
val inputSel = child.outputList
select.copy(
outputList = outputSel,
inputList = inputSel,
flagSpec = updatedFlagSpec,
children = Seq(child)).setRewritten()
}
case _ => select
}
case other => other
}
}
/**
* Updates the flagspec of given select plan with attributes of relation select plan
*/
private def updateSortOrder(keepAlias: Boolean,
select: Select,
relation: Select,
aliasMap: Map[AttributeKey, NamedExpression]) = {
val updatedFlagSpec = select.flagSpec.map { f =>
f.map {
case list: ArrayBuffer[SortOrder] =>
list.map { s =>
val expressions =
updateOutPutList(
Seq(s.child.asInstanceOf[Attribute]),
relation,
aliasMap,
keepAlias = false)
SortOrder(expressions.head, s.direction, s.sameOrderExpressions)
}
}
}
updatedFlagSpec
}
/**
* It checks whether full referesh for the table is required. It means we no need to apply
* aggregation function or group by functions on the mv table.
*/
private def isFullRefresh(mvPlanWrapper: MVPlanWrapper): Boolean = {
val fullRefesh = mvPlanWrapper.dataMapSchema.getProperties.get("full_refresh")
if (fullRefesh != null) {
fullRefesh.toBoolean
} else {
false
}
}
/**
* Rewrite the updated mv query with corresponding MV table.
*/
def rewriteWithMVTable(rewrittenPlan: ModularPlan, rewrite: QueryRewrite): ModularPlan = {
if (rewrittenPlan.find(_.rewritten).isDefined) {
val updatedDataMapTablePlan = rewrittenPlan transform {
case s: Select =>
MVHelper.updateDataMap(s, rewrite)
case g: GroupBy =>
MVHelper.updateDataMap(g, rewrite)
}
// TODO Find a better way to set the rewritten flag, it may fail in some conditions.
val mapping =
rewrittenPlan.collect { case m: ModularPlan => m } zip
updatedDataMapTablePlan.collect { case m: ModularPlan => m }
mapping.foreach(f => if (f._1.rewritten) f._2.setRewritten())
updatedDataMapTablePlan
} else {
rewrittenPlan
}
}
}
| jatin9896/incubator-carbondata | datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVHelper.scala | Scala | apache-2.0 | 18,370 |
package com.rainysoft.copula.distribution
import org.apache.commons.math3.distribution.{UniformRealDistribution, RealDistribution}
import org.apache.commons.math3.util.FastMath
/** Copula functions for Clayton copula.
*
* Created by mikael.ohman on 21/12/14.
*/
object Clayton {
val unif = new UniformRealDistribution()
/** Conditional copula function for Clayton copula.
*
* @param u2 Conditioned uniform.
* @param u1 Conditioning uniform.
* @param a Copula parameter.
* @return Conditional copula quantile.
*/
def conf(u2: Double, u1: Double, a: Double) = {
FastMath.pow(u1, -(1+a))*FastMath.pow((FastMath.pow(u1,-a) + FastMath.pow(u2, -a) - 1), -(1+a)/a);
}
/** Inverse conditional copula function for Clayton copula.
*
* @param v Quantile to find uniform from.
* @param u1 Conditioning uniform.
* @param a Copula parameter.
* @return Conditioned uniform.
*/
def invCond(v: Double, u1: Double, a: Double) = {
FastMath.pow(1 + FastMath.pow(u1, -a)*(FastMath.pow(v, -a/(1+a)) - 1), -1/a)
}
/** Samples from a Clayton copula with the given marginal distributions and the given parameter.
*
* @param marginal1 The first marginal.
* @param marginal2 The second marginal.
* @param a The Clayton parameter.
* @return Sample from the bivariate distribution.
*/
def sample(marginal1: RealDistribution, marginal2: RealDistribution, a: Double): (Double,Double) = {
// Get uniforms
val u1s = unif.sample()
val u2 = unif.sample()
// Conditionally find u2s
val u2s = invCond(u2, u1s, a)
// Feed into marginals
val ms1 = marginal1.inverseCumulativeProbability(u1s)
val ms2 = marginal2.inverseCumulativeProbability(u2s)
(ms1,ms2)
}
}
| MikaelUmaN/Copula | src/main/scala/com/rainysoft/copula/distribution/Clayton.scala | Scala | mit | 1,751 |
import java.io.File
import testgen.TestSuiteBuilder._
import testgen._
object RobotSimulatorTestGenerator {
def main(args: Array[String]): Unit = {
val file = new File("src/main/resources/robot-simulator.json")
def getPosition(labeledTest: LabeledTest): (Int, Int) = {
val inputMap = fromInputMap(labeledTest.result, "position")
.asInstanceOf[Map[String, Any]]
getPosition2(inputMap)
}
def getPosition2(positionMap: Map[String, Any]): (Int, Int) =
(positionMap("x").asInstanceOf[Int], positionMap("y").asInstanceOf[Int])
def getDirection(labeledTest: LabeledTest): String =
fromInputMap(labeledTest.result, "direction").toString
def toPositionArgs(position: (Int, Int)): String =
s"${position._1}, ${position._2}"
def getExpectedMap(labeledTest: LabeledTest): Map[String, Any] =
labeledTest.result("expected").asInstanceOf[Map[String, Any]]
def directionToBearing(direction: String): String =
direction.toLowerCase match {
case "north" => "Bearing.North"
case "south" => "Bearing.South"
case "east" => "Bearing.East"
case "west" => "Bearing.West"
}
def toCreateSutCall(labeledTest: LabeledTest): String = {
val bearing = directionToBearing(getDirection(labeledTest))
s"Robot($bearing, (${toPositionArgs(getPosition(labeledTest))}))"
}
def toCreateExpected(labeledTest: LabeledTest): String = {
val robotMap = getExpectedMap(labeledTest)
val bearing = directionToBearing(robotMap("direction").toString)
val positionArgs = toPositionArgs(getPosition2(robotMap("position")
.asInstanceOf[Map[String, Int]]))
s"Robot($bearing, ($positionArgs))"
}
def toTurnFunction(map: Map[String, Any]) =
map.get("direction") match {
case Some(_) => "bearing"
case None => "coordinates"
}
def toTurnSutCall(labeledTest: LabeledTest): String = {
val property = labeledTest.property
val expected = getExpectedMap(labeledTest)
s"${toCreateSutCall(labeledTest)}.$property.${toTurnFunction(expected)}"
}
def toTurnExpected(labeledTest: LabeledTest): String = {
val expected = getExpectedMap(labeledTest)
expected.foreach{case (k, v) => System.out.println(s"$k")}
expected.get("direction") match {
case Some(s: String) => directionToBearing(s)
case None => {
val positionMap = expected("position").asInstanceOf[Map[String, Int]]
s"(${toPositionArgs(getPosition2(positionMap))})"
}
}
}
def toInstructSutCall(labeledTest: LabeledTest): String = {
val instructions = fromInputMap(labeledTest.result, "instructions")
s"""${toCreateSutCall(labeledTest)}.simulate("$instructions")"""
}
def toInstructExpected(labeledTest: LabeledTest): String =
toCreateExpected(labeledTest)
def fromLabeledTest(argNames: String*): ToTestCaseData =
withLabeledTest { sut =>
labeledTest =>
val property = labeledTest.property
val (sutCall, expected) = property match {
case "create" =>
(toCreateSutCall(labeledTest),
toCreateExpected(labeledTest))
case "turnRight" | "turnLeft" | "advance" =>
(toTurnSutCall(labeledTest),
toTurnExpected(labeledTest))
case "instructions" =>
(toInstructSutCall(labeledTest),
toInstructExpected(labeledTest))
case _ => throw new IllegalStateException()
}
TestCaseData(labeledTest.parentDescriptions.mkString(" - ") + " - " + labeledTest.description,
sutCall, expected)
}
val code =
TestSuiteBuilder.build(file, fromLabeledTest())
println(s"-------------")
println(code)
println(s"-------------")
}
}
| ricemery/xscala | testgen/src/main/scala/RobotSimulatorTestGenerator.scala | Scala | mit | 3,886 |
package models
case class CloneImportJob(
ownerEmail: String,
sourceDocumentSetId: Long
)
| overview/overview-server | web/app/models/CloneImportJob.scala | Scala | agpl-3.0 | 95 |
package scuff
import scala.reflect.{ ClassTag, NameTransformer }
object Enum {
trait Value {
value: Enum[Enum.Value]#Value =>
def id: Int
final def name: String = this.toString
override def toString = NameTransformer decode super.toString
}
}
/**
* Parametric `scala.Enumeration` extension.
* @tparam V Sealed trait enum type
*/
class Enum[V <: Enum.Value: ClassTag] extends Enumeration {
type Value = V
lazy val list: List[V] = this.values.toList.collect {
case v: V => v
}
def find(find: V => Boolean): Option[V] = list.find(find)
def get(name: String): Option[V] = list.find(_.name == name)
def id(id: Int): V = this.apply(id).asInstanceOf[V]
def apply(name: String): V = {
get(name) match {
case Some(value) => value
case _ =>
val valuesStr = this.list.map(v => s"'$v'").mkString(", ")
throw new NoSuchElementException(s"No value found for '$name'; available: $valuesStr")
}
}
def apply(find: V => Boolean): V = {
list.find(find) match {
case Some(value) => value
case _ => throw new NoSuchElementException
}
}
}
| nilskp/scuff | src/main/scala/scuff/Enum.scala | Scala | mit | 1,133 |
package parser
import org.jsoup.nodes.Document
import scala.collection.JavaConversions._
import org.apache.http.client.utils.URIUtils
import java.net.URI
import org.apache.commons.lang3.StringUtils
import org.apache.commons.validator.routines.UrlValidator
import scala._
import scala.collection.mutable.ListBuffer
import url.URLCanonicalizer
/**
* This extractor using for get image and text from a html fragment.
* Basically it just use Jsoup parser but for more convenient and more functionally.
*
* @author Nguyen Duc Dung
* @since 6/16/13 1:43 PM
*
*/
class HtmlExtractor {
val images = new ListBuffer[String]
var text: String = ""
val urlValidator = new UrlValidator(Array("http", "https"))
def extract(doc: Document) {
val images = doc.select("img")
images.foreach(img => {
val src = img.attr("src")
if (StringUtils.isNotBlank(src)) {
if (StringUtils.isNotBlank(doc.baseUri)) {
val baseUrl = URIUtils.extractHost(new URI(doc.baseUri)).toURI
if (baseUrl != null) {
val fixedUrl = URLCanonicalizer.getCanonicalURL(src, baseUrl)
if (StringUtils.isNotBlank(fixedUrl) && urlValidator.isValid(fixedUrl)) {
img.attr("src", fixedUrl)
}
}
}
if (urlValidator.isValid(src)) {
this.images += src
}
}
})
this.text = doc.text()
}
}
| SunriseSoftVN/hayhayblog | app/parser/HtmlExtractor.scala | Scala | gpl-2.0 | 1,401 |
package com.glowingavenger.plan.util
import org.sat4j.scala.Logic._
import scala.Some
object Model {
private val ExtraLiteral = 'ExtraLiteral
/**
* Checks which symbols in the model are actually inferred from the clause
*/
def retrieveSymbols(model: Map[Symbol, Option[Boolean]], clause: BoolExp): Option[Map[Symbol, Option[Boolean]]] = {
retrieveModels(clause) match {
case None => None
case Some(clauseModel) => Some(model map {
p => (p._1, if (clauseModel contains p._1) clauseModel(p._1) else None)
})
}
}
def retrieveModelsOrThrow(clause: BoolExp): Map[Symbol, Option[Boolean]] = {
Model.retrieveModels(clause) match {
case Some(models) => models
case None => throw new IllegalArgumentException("Clause is not satisfiable: " + clause)
}
}
def retrieveModels(clause: BoolExp, extraLiteral:Option[Symbol] = None): Option[Map[Symbol, Option[Boolean]]] = {
def withoutExtra(model: Map[Symbol, Boolean]) = {
extraLiteral match {
case Some(l) => model - l
case _ => model
}
}
allSat[Symbol](clause) match {
case (true, models) if models.isDefined => Some(parseModels(models.get.map(withoutExtra)))
// For the case of trivial clause such as 'L & 'L allSat doesn't produce a single model
case (false, Some(_)) =>
retrieveModels(clause & ExtraLiteral, Some(ExtraLiteral))
case _ => None
}
}
private def parseModels(models: List[Map[Symbol, Boolean]]): Map[Symbol, Option[Boolean]] = {
val list = for (model <- models; pair <- model) yield pair
list groupBy (_._1) map (p => {
val set = p._2.unzip._2.toSet
(p._1, if (set.count(b => true) > 1) None else Some(set.head))
})
}
def model2BoolExp(model: Map[Symbol, Option[Boolean]]): BoolExp = (True.asInstanceOf[BoolExp] /: model)((e, p) => e & (p match {
case (s, None) => s | ~s
case (s, Some(true)) => toProp(s)
case (s, Some(false)) => ~s
}))
}
| dreef3/glowing-avenger | src/main/scala/com/glowingavenger/plan/util/Model.scala | Scala | mit | 1,995 |
package dhg.ccg.parse.gfl
import dhg.gfl.Fudg._
import dhg.util._
import dhg.util.viz.VizTree
import scalaz._
import Scalaz._
import dhg.ccg.parse.pcfg._
import dhg.ccg.parse._
import dhg.ccg.cat._
import dhg.ccg.rule._
import dhg.ccg.tagdict._
import dhg.ccg.prob.DefaultedLogProbabilityDistribution
import dhg.ccg.prob._
object Gfl {
def main(args: Array[String]): Unit = {
val gcb = new SimpleCfgGuideChartBuilder(Vector(FA, BA), allowTerminalDeletion = false)
val parser = new PcfgParser(
new UniformDefaultLogProbabilityDistribution[Cat](LogDouble(0.01)),
new UnconditionalWrappingConditionalLogProbabilityDistribution[Cat, Prod](new UniformDefaultLogProbabilityDistribution(LogDouble(0.01))))
val s: NonPuncCat = cat"S".asInstanceOf[NonPuncCat]
val np: NonPuncCat = cat"NP".asInstanceOf[NonPuncCat]
val n: NonPuncCat = cat"N".asInstanceOf[NonPuncCat]
val pp: NonPuncCat = cat"PP".asInstanceOf[NonPuncCat]
val Det: Set[Cat] = Set(
np / n)
val Adj: Set[Cat] = Set(
n / n)
val IV: Set[Cat] = Set(
s \\ np,
(s \\ np) / pp)
val TV: Set[Cat] = Set(
(s \\ np) / np,
((s \\ np) / pp) / np,
(((s \\ np) / pp) / pp) / np)
val N: Set[Cat] = Set(
n)
val NNP: Set[Cat] = Set(
np,
np / pp,
(np / pp) / pp)
val Prep: Set[Cat] = Set(
pp / np)
val tagdict = SimpleTagDictionary.apply(
Map[String, Set[Cat]](
"the" -> Det,
"big" -> Adj,
"man" -> N,
"dog" -> N,
"dogs" -> N,
"cats" -> N,
"telescope" -> N,
"saw" -> (IV | TV),
"walked" -> (IV | TV),
"chase" -> TV,
"run" -> IV,
"ran" -> IV,
"John" -> NNP,
"Mary" -> NNP,
"with" -> Prep),
"<S>", cat"<S>", "<E>", cat"<E>")
// "the dogs walked",
// "the man walked the dog",
// "dogs chase cats",
// "big dogs run",
// "the big dogs run",
// "John saw Mary with the dog",
// "John saw Mary with the telescope",
// "John saw Mary with the dog with the telescope")
if (false) {
val text = "John saw Mary with the telescope"
val annotation = """ John > saw < (Mary < (with the telescope)) """
val sentence = fromGfl(text, annotation).getOrElseThrow()
dhg.util.viz.TreeViz.drawTree(sentence.fudgTree)
val supertagSetSentence: Vector[(String, Set[Cat])] = text.splitWhitespace.mapToVal(Set.empty[Cat])
val gc = gcb.buildFromSupertagSetSentence(supertagSetSentence, Some(sentence), tagdict)
gc.get.draw()
val parses = parser.parseAndProbKBestFromGuideChart(gc.get, 100).map(_._1)
for (p <- parses) dhg.util.viz.TreeViz.drawTree(p)
}
{
val text = "John saw Mary with the telescope"
val annotation = """ John saw Mary with the telescope """
val sentence = fromGfl(text, annotation).getOrElseThrow()
dhg.util.viz.TreeViz.drawTree(sentence.fudgTree)
val supertagSetSentence: Vector[(String, Set[Cat])] = text.splitWhitespace.mapToVal(Set.empty[Cat])
val gc = gcb.buildFromSupertagSetSentence(supertagSetSentence, Some(sentence), tagdict)
gc.get.draw()
val parses = parser.parseAndProbKBestFromGuideChart(gc.get, 100).map(_._1)
for (p <- parses) dhg.util.viz.TreeViz.drawTree(p)
}
}
}
| dhgarrette/2015-ccg-parsing | src/main/scala/dhg/ccg/parse/gfl/Gfl.scala | Scala | apache-2.0 | 3,393 |
package com.sksamuel.scapegoat.io
import java.util.UUID
/** @author Stephen Samuel */
class FindbugsReportWriter {
def xml(projectName: String) =
<bugcollection analysistimestamp={ System.currentTimeMillis.toString } release=" " sequence="0" timestamp={ System.currentTimeMillis.toString } version="1.3.9">
<project projectname={ projectName }>
</project>
<buginstance abbrev="CN" category="BAD_PRACTICE" instancehash={ UUID.randomUUID.toString } instanceoccurrencemax="0" instanceoccurrencenum="0" priority="2" type="CN_IMPLEMENTS_CLONE_BUT_NOT_CLONEABLE">
<shortmessage>Class defines clone() but doesn't implement Cloneable</shortmessage>
<longmessage>org.sprunck.bee.Bee defines clone() but doesn't implement Cloneable</longmessage>
<class classname="org.sprunck.bee.Bee" primary="true">
<sourceline classname="org.sprunck.bee.Bee" end="31" start="6">
<message>At 'unknown':[lines 6-31]</message>
</sourceline>
<message>In class org.sprunck.bee.Bee</message>
</class>
<method classname="org.sprunck.bee.Bee" isstatic="false" name="clone" primary="true" signature="()Ljava/lang/Object;">
<sourceline classname="org.sprunck.bee.Bee" end="31" endbytecode="25" start="31" startbytecode="0"></sourceline>
<message>In method org.sprunck.bee.Bee.clone()</message>
</method>
<sourceline classname="org.sprunck.bee.Bee" end="31" endbytecode="25" start="31" startbytecode="0" synthetic="true">
<message>At 'unknown' :[line 31]</message>
</sourceline>
</buginstance>
<errors errors="0" missingclasses="0"></errors>
<findbugssummary alloc_mbytes="0" clock_seconds="0" cpu_seconds="0" gc_seconds="0" num_packages="0" peak_mbytes="0" priority_1="1" priority_2="3" referenced_classes="0" timestamp="Fri, 23 Jul 2010 20:35:05 +0200" total_bugs="0" total_classes="0" total_size="0" vm_version="0">
<filestats bugcount="3" bughash="df1120c1c4b7708412d471df8e18b310" path="org/sprunck/bee/" size="11"></filestats>
<filestats bugcount="1" bughash="2838bfeb77300e43a82da09454a1d353" path="org/sprunck/foo/" size="12"></filestats>
<filestats bugcount="0" path="org/sprunck/tests/Unknown" size="22"></filestats>
<packagestats package="org.sprunck.bee" priority_1="1" priority_2="2" total_bugs="3" total_size="11" total_types="1">
<classstats bugs="3" class="org.sprunck.bee.Bee" interface="false" priority_1="1" priority_2="2" size="11" sourcefile="Unknown"></classstats>
</packagestats>
<packagestats package="org.sprunck.foo" priority_2="1" total_bugs="1" total_size="12" total_types="1">
<classstats bugs="1" class="org.sprunck.foo.Foo" interface="false" priority_2="1" size="12" sourcefile="Unknown"></classstats>
</packagestats>
<packagestats package="org.sprunck.tests" total_bugs="0" total_size="22" total_types="2">
<classstats bugs="0" class="org.sprunck.tests.BeeTest" interface="false" size="9" sourcefile="Unknown"></classstats>
<classstats bugs="0" class="org.sprunck.tests.FooTest" interface="false" size="13" sourcefile="Unknown"></classstats>
</packagestats>
</findbugssummary>
</bugcollection>
}
| pwwpche/scalac-scapegoat-plugin | src/main/scala/com/sksamuel/scapegoat/io/FindbugsReportWriter.scala | Scala | apache-2.0 | 3,297 |
/*
* Copyright 2015 LG CNS.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package scouter.server.tagcnt.first
import scouter.lang.value.Value
class FirstTCData(_objType: String, _time: Long, _tagKey: Long, _tagValue: Value, _cnt: Float) {
val objType = _objType;
val time = _time;
val tagKey = _tagKey;
val tagValue = _tagValue;
val cnt = _cnt;
} | jhshin9/scouter | scouter.server/src/scouter/server/tagcnt/first/FirstTCData.scala | Scala | apache-2.0 | 922 |
package teleporter.integration.utils
import org.apache.logging.log4j.scala.Logging
import scala.concurrent.{ExecutionContext, Future}
/**
* Author: kui.dai
* Date: 2015/11/26.
*/
trait Use extends Logging {
type Closable = AutoCloseable
def using[R <: Closable, A](resource: R)(f: R ⇒ A): A = {
try {
f(resource)
} finally {
quietClose(resource)
}
}
def quietClose[R <: Closable](resource: R): Unit = {
try {
resource.close()
} catch {
case e: Exception ⇒ logger.error(e.getLocalizedMessage, e)
}
}
def futureUsing[R <: Closable, A](resource: R)(f: R => Future[A])(implicit ec: ExecutionContext): Future[A] = {
f(resource) andThen { case _ => resource.close() }
}
} | huanwuji/teleporter | src/main/scala/teleporter/integration/utils/Use.scala | Scala | agpl-3.0 | 748 |
/*
* Copyright 2015 The SIRIS Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* The SIRIS Project is a cooperation between Beuth University, Berlin and the
* HCI Group at the University of Würzburg. The project is funded by the German
* Federal Ministry of Education and Research (grant no. 17N4409).
*/
package simx.components.ai.mipro.supervisedlearning.helper
import simplex3d.math.floatx.{Vec3f, ConstVec3f}
import simx.core.entity.description.SVal
/**
* Created by chrisz on 10/06/15.
*/
object Conversion {
def createMatrix(x: List[Double], xs: List[Double]*) = x ::: xs.flatten.toList
implicit def constVec3fsValToList(sVal: SVal.SValType[ConstVec3f]): List[Double] = sVal.value
// implicit def toDenseMat(data: ConstVec3f):DenseMatrix[Double]={
// DenseMatrix{(data.x.toDouble, data.y.toDouble, data.z.toDouble)}
// }
implicit def toList(data: ConstVec3f): List[Double] = {
data.x.toDouble :: data.y.toDouble :: data.z.toDouble :: Nil
}
implicit def toList(data: Vec3f): List[Double] = {
data.x.toDouble :: data.y.toDouble :: data.z.toDouble :: Nil
}
implicit def toList(int: Int): List[Double] = {
int :: Nil
}
implicit def toList(b: Boolean): List[Double] = {
(if(b) 1.0 else 0.0) :: Nil
}
def toPredictionX(x: ConstVec3f, xs: ConstVec3f*) = {
x ::: xs.flatten.toList
}
}
| simulator-x/feature | src/simx/components/ai/mipro/supervisedlearning/helper/Conversion.scala | Scala | apache-2.0 | 1,900 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.sql.analyzer
/**
*/
trait Node {
def name: String
}
case class Edge(src: Node, dest: Node) {
override def toString = s"${src}->${dest}"
}
case class Graph(nodes: Set[Node], edges: Set[Edge]) {
def +(n: Node): Graph = Graph(nodes + n, edges)
def +(e: Edge): Graph = {
val s = findNode(e.src)
val d = findNode(e.dest)
Graph(nodes + s + d, edges + Edge(s, d))
}
def findNode(n: Node): Node = {
nodes.find(_.name == n.name).getOrElse(n)
}
override def toString: String = {
s"""nodes: ${nodes.mkString(", ")}
|edges: ${edges.mkString(", ")}""".stripMargin
}
}
case object EdgeOrdering extends Ordering[Edge] {
override def compare(x: Edge, y: Edge): Int = {
val diff = x.src.name.compareTo(y.src.name)
if (diff != 0) {
diff
} else {
x.dest.name.compareTo(y.dest.name)
}
}
}
| wvlet/airframe | airframe-sql/src/main/scala/wvlet/airframe/sql/analyzer/Graph.scala | Scala | apache-2.0 | 1,439 |
/**
* Copyright (c) 2014 Marco Sarti <marco.sarti at gmail.com>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
package com.elogiclab.guardbee.core.authprovider
import org.specs2.mutable.Specification
import play.api.test.FakeRequest
import play.api.test.WithApplication
import com.elogiclab.guardbee.core.Password
import org.mindrot.jbcrypt.BCrypt
import com.elogiclab.guardbee.core.User
import org.joda.time.DateTime
import com.elogiclab.guardbee.core.UsernamePasswordAuthenticationToken
/**
* @author Marco Sarti
*
*/
object LocalAccountAuthenticatorSpec extends Specification {
"LocalAccountAuthenticator" should {
"should authenticate" in new WithApplication {
val plugin = new LocalAccountAuthenticatorPlugin(app) {
override def obtainPassword(username: String) = Some(Password("bcrypt", BCrypt.hashpw("password", BCrypt.gensalt(10)), None))
override def getByUsername(u: String) = Some(new User {
def username: String = u
def fullName: String = "fullname"
def email: String = "test@example.org"
def enabled: Boolean = true
def expirationDate: Option[DateTime] = None
})
override def matchPassword(candidate: String, pwd: Password) = BCrypt.checkpw(candidate, pwd.password)
}
val token = UsernamePasswordAuthenticationToken("username", "password", None)
val auth = plugin.authenticate(token)
auth.isRight should beTrue
}
}
} | elogiclab/guardbee | modules/core/test/com/elogiclab/guardbee/core/authprovider/LocalAccountAuthenticatorSpec.scala | Scala | mit | 2,536 |
package org.opencoin.issuer
import org.opencoin.core.token.{Blind,Coin}
import org.opencoin.core.util.BigIntSerializer
import org.opencoin.core.util.BigIntDeserializer
import org.opencoin.core.util.JacksonWrapper._
import org.opencoin.core.REST.RequestValidationREST
import org.opencoin.core.REST.RequestRenewalREST
import org.opencoin.core.REST.RequestInvalidationREST
import java.util.{NoSuchElementException => NoSuchElement}
import com.twitter.util.Future
import com.twitter.finagle.Service
import com.twitter.finagle.http.Version.Http11
import com.twitter.finagle.http.{Http, RichHttp, Request, Response}
import com.twitter.finagle.http.path._
import com.twitter.finagle.http.Status._
import org.jboss.netty.handler.codec.http.HttpMethod._
import org.jboss.netty.handler.codec.http.HttpHeaders.Names._
import org.jboss.netty.handler.codec.http.HttpHeaders.Values._
import org.eintr.loglady.Logging
/*import org.codehaus.jackson.map.module.SimpleModule
import org.codehaus.jackson.Version
import org.codehaus.jackson.map.SerializationConfig
import org.codehaus.jackson.map.DeserializationConfig
import org.codehaus.jackson.map.ObjectWriter
import org.codehaus.jackson.map.ObjectMapper
*/
import com.fasterxml.jackson.databind.module.SimpleModule
//import com.fasterxml.jackson.databind.cfg.DatabindVersion
import com.fasterxml.jackson.databind.SerializationConfig
import com.fasterxml.jackson.databind.DeserializationConfig
import com.fasterxml.jackson.databind.ObjectWriter
import com.fasterxml.jackson.databind.ObjectMapper
/**
* This is the web service itself. It processes the requests, calls the Methods class and
* sends the response. This class contains the REST API but also calls the Messages class
* for the message-based API. Open the issuer in a web browser for further information.
**/
class Respond(methods: Methods, prefixPath: String) extends Service[Request, Response] with Logging {
val basePath = Root / prefixPath
def apply(request: Request) = {
try {
request.method -> Path(request.path) match {
//Serve the static explanatory webpage
case GET -> Root => Future.value {
val source = scala.io.Source.fromFile("static/index.html", "UTF-8")
val data = source.mkString
source.close ()
log.debug("Static index file has been served.")
Responses.html(data, acceptsGzip(request))
}
case GET -> `basePath` / "cdds" / "latest" => Future.value {
log.debug("GET -> %s/cdds/latest has been called." format basePath)
val data = serialize(methods.getLatestCdd) //Generate JSON syntax from object
log.debug("data: %s" format data)
Responses.json(data, acceptsGzip(request))
}
case GET -> `basePath` / "cdds" / "serial" / serial => Future.value {
log.debug("GET -> %s/cdds/serial/ has been called." format basePath)
val data = serialize(methods.getCdd(serial.toInt)) //Generate JSON syntax from object
log.debug("data: %s" format data)
Responses.json(data, acceptsGzip(request))
}
case GET -> `basePath` / "mintkeys" / "denomination" / denom => Future.value {
log.debug("GET -> %s/mintkeys/denomination/<denom.> has been called." format basePath)
val data = serialize(methods.getMintKeys(denom.toInt)) //Generate JSON syntax from object
log.debug("data: %s" format data)
Responses.json(data, acceptsGzip(request))
}
case GET -> `basePath` / "mintkeys" / "id" / id => Future.value {
log.debug("GET -> %s/mintkeys/id/<id> has been called." format basePath)
val data = serialize(methods.getMintKeysById(List(BigInt(id)))) //Generate JSON syntax from object
log.debug("data: %s" format data)
Responses.json(data, acceptsGzip(request))
}
case GET -> `basePath` / "mintkeys" => Future.value {
log.debug("GET -> %s/mintkeys/ has been called." format basePath)
val data = serialize(methods.getAllMintKeys) //Generate JSON syntax from object
log.debug("data: %s" format data)
Responses.json(data, acceptsGzip(request))
}
case POST -> `basePath` / "validate" => Future.value {
log.debug("POST -> %s/validate has been called." format basePath)
val content = request.contentString
log.debug("request: %s" format content)
val p = deserialize[RequestValidationREST](content) //Parse JSON syntax to object
val data = serialize(methods.validate(p.authorization, p.blinds)) //Generate JSON syntax from object
log.debug("data: %s" format data)
Responses.json(data, acceptsGzip(request))
}
case POST -> `basePath` / "renew" => Future.value {
log.debug("POST -> %s/renew has been called." format basePath)
val content = request.contentString
log.debug("request: %s" format content)
val p = deserialize[RequestRenewalREST](content) //Parse JSON syntax to object
//val (coins, blind) = p partition (_.isInstanceOf[Coin]) //TODO test!
val data = serialize(methods.renew(p.coins, p.blinds)) //Generate JSON syntax from object
log.debug("data: %s" format data)
Responses.json(data, acceptsGzip(request))
}
case POST -> `basePath` / "invalidate" => Future.value {
log.debug("POST -> %s/invalidate has been called." format basePath)
val content = request.contentString
log.debug("request: %s" format content)
val p = deserialize[RequestInvalidationREST](content) //Parse JSON syntax to object
val data = serialize(methods.invalidate(p.authorization, p.coins)) //Generate JSON syntax from object
log.debug("data: %s" format data)
Responses.json(data, acceptsGzip(request))
}
case GET -> `basePath` / "resume" => Future.value {
log.debug("POST -> %s/resume has been called." format basePath)
val content = request.contentString
log.debug("request: %s" format content)
val p = deserialize[String](content) //Parse JSON syntax to object
val data = serialize(methods.resume(p)) //Generate JSON syntax from object
log.debug("data: %s" format data)
Responses.json(data, acceptsGzip(request))
}
case GET -> `basePath` => Future.value {
val source = scala.io.Source.fromFile("static/gulden.html", "UTF-8")
val data = source.mkString
source.close ()
log.debug("data: %s" format data)
Responses.html(data, acceptsGzip(request))
}
//This is the message-based API
case POST -> `basePath` / "message-api" => Future.value {
log.debug("POST -> %s/message-api has been called." format basePath)
val content = request.contentString
log.debug("request: %s" format content)
val data = serialize(Messages.process(methods, content)) //Generate JSON syntax from object
log.debug("data: %s" format data)
Responses.json(data, acceptsGzip(request))
}
case _ => {
log.debug("Error: URL not found: " + Path(request.path).toString)
Future value Responses.notFoundError("Error: URL not found.", acceptsGzip(request))
}
}
} catch {
case e: NoSuchElement => Future value Response(Http11, NotFound)
case e: Exception => Future.value {
val message = Option(e.getMessage) getOrElse "Something went wrong."
sys.error("Error Message: %s\\nStack trace:\\n%s"
.format(message, e.getStackTraceString))
Responses.error(message, acceptsGzip(request))
}
}
}
def acceptsGzip(request: Request) =
if (request.containsHeader(ACCEPT_ENCODING))
request.getHeader(ACCEPT_ENCODING).split(",").contains(GZIP)
else false
}
| OpenCoin/opencoin-issuer-scala | src/main/scala/org/opencoin/issuer/Respond.scala | Scala | gpl-3.0 | 7,589 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import java.util.regex.Pattern
import scala.collection.mutable
import scala.util.control.NonFatal
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.EliminateSubqueryAliases
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.internal.HiveSerDe
import org.apache.spark.sql.sources.InsertableRelation
import org.apache.spark.sql.types._
/**
* A command used to create a data source table.
*
* Note: This is different from [[CreateTableCommand]]. Please check the syntax for difference.
* This is not intended for temporary tables.
*
* The syntax of using this command in SQL is:
* {{{
* CREATE TABLE [IF NOT EXISTS] [db_name.]table_name
* [(col1 data_type [COMMENT col_comment], ...)]
* USING format OPTIONS ([option1_name "option1_value", option2_name "option2_value", ...])
* }}}
*/
case class CreateDataSourceTableCommand(
tableIdent: TableIdentifier,
userSpecifiedSchema: Option[StructType],
provider: String,
options: Map[String, String],
partitionColumns: Array[String],
bucketSpec: Option[BucketSpec],
ignoreIfExists: Boolean,
managedIfNoPath: Boolean)
extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
// Since we are saving metadata to metastore, we need to check if metastore supports
// the table name and database name we have for this query. MetaStoreUtils.validateName
// is the method used by Hive to check if a table name or a database name is valid for
// the metastore.
if (!CreateDataSourceTableUtils.validateName(tableIdent.table)) {
throw new AnalysisException(s"Table name ${tableIdent.table} is not a valid name for " +
s"metastore. Metastore only accepts table name containing characters, numbers and _.")
}
if (tableIdent.database.isDefined &&
!CreateDataSourceTableUtils.validateName(tableIdent.database.get)) {
throw new AnalysisException(s"Database name ${tableIdent.database.get} is not a valid name " +
s"for metastore. Metastore only accepts database name containing " +
s"characters, numbers and _.")
}
val sessionState = sparkSession.sessionState
if (sessionState.catalog.tableExists(tableIdent)) {
if (ignoreIfExists) {
return Seq.empty[Row]
} else {
throw new AnalysisException(s"Table ${tableIdent.unquotedString} already exists.")
}
}
var isExternal = true
val optionsWithPath =
if (!new CaseInsensitiveMap(options).contains("path") && managedIfNoPath) {
isExternal = false
options + ("path" -> sessionState.catalog.defaultTablePath(tableIdent))
} else {
options
}
// Create the relation to validate the arguments before writing the metadata to the metastore.
DataSource(
sparkSession = sparkSession,
userSpecifiedSchema = userSpecifiedSchema,
className = provider,
bucketSpec = None,
options = optionsWithPath).resolveRelation(checkPathExist = false)
CreateDataSourceTableUtils.createDataSourceTable(
sparkSession = sparkSession,
tableIdent = tableIdent,
userSpecifiedSchema = userSpecifiedSchema,
partitionColumns = partitionColumns,
bucketSpec = bucketSpec,
provider = provider,
options = optionsWithPath,
isExternal = isExternal)
Seq.empty[Row]
}
}
/**
* A command used to create a data source table using the result of a query.
*
* Note: This is different from [[CreateTableAsSelectLogicalPlan]]. Please check the syntax for
* difference. This is not intended for temporary tables.
*
* The syntax of using this command in SQL is:
* {{{
* CREATE TABLE [IF NOT EXISTS] [db_name.]table_name
* USING format OPTIONS ([option1_name "option1_value", option2_name "option2_value", ...])
* AS SELECT ...
* }}}
*/
case class CreateDataSourceTableAsSelectCommand(
tableIdent: TableIdentifier,
provider: String,
partitionColumns: Array[String],
bucketSpec: Option[BucketSpec],
mode: SaveMode,
options: Map[String, String],
query: LogicalPlan)
extends RunnableCommand {
override protected def innerChildren: Seq[LogicalPlan] = Seq(query)
override def run(sparkSession: SparkSession): Seq[Row] = {
// Since we are saving metadata to metastore, we need to check if metastore supports
// the table name and database name we have for this query. MetaStoreUtils.validateName
// is the method used by Hive to check if a table name or a database name is valid for
// the metastore.
if (!CreateDataSourceTableUtils.validateName(tableIdent.table)) {
throw new AnalysisException(s"Table name ${tableIdent.table} is not a valid name for " +
s"metastore. Metastore only accepts table name containing characters, numbers and _.")
}
if (tableIdent.database.isDefined &&
!CreateDataSourceTableUtils.validateName(tableIdent.database.get)) {
throw new AnalysisException(s"Database name ${tableIdent.database.get} is not a valid name " +
s"for metastore. Metastore only accepts database name containing " +
s"characters, numbers and _.")
}
val sessionState = sparkSession.sessionState
val db = tableIdent.database.getOrElse(sessionState.catalog.getCurrentDatabase)
val tableIdentWithDB = tableIdent.copy(database = Some(db))
val tableName = tableIdentWithDB.unquotedString
var createMetastoreTable = false
var isExternal = true
val optionsWithPath =
if (!new CaseInsensitiveMap(options).contains("path")) {
isExternal = false
options + ("path" -> sessionState.catalog.defaultTablePath(tableIdent))
} else {
options
}
var existingSchema = Option.empty[StructType]
// Pass a table identifier with database part, so that `tableExists` won't check temp views
// unexpectedly.
if (sparkSession.sessionState.catalog.tableExists(tableIdentWithDB)) {
// Check if we need to throw an exception or just return.
mode match {
case SaveMode.ErrorIfExists =>
throw new AnalysisException(s"Table $tableName already exists. " +
s"If you are using saveAsTable, you can set SaveMode to SaveMode.Append to " +
s"insert data into the table or set SaveMode to SaveMode.Overwrite to overwrite" +
s"the existing data. " +
s"Or, if you are using SQL CREATE TABLE, you need to drop $tableName first.")
case SaveMode.Ignore =>
// Since the table already exists and the save mode is Ignore, we will just return.
return Seq.empty[Row]
case SaveMode.Append =>
// Check if the specified data source match the data source of the existing table.
val dataSource = DataSource(
sparkSession = sparkSession,
userSpecifiedSchema = Some(query.schema.asNullable),
partitionColumns = partitionColumns,
bucketSpec = bucketSpec,
className = provider,
options = optionsWithPath)
// TODO: Check that options from the resolved relation match the relation that we are
// inserting into (i.e. using the same compression).
// Pass a table identifier with database part, so that `tableExists` won't check temp
// views unexpectedly.
EliminateSubqueryAliases(sessionState.catalog.lookupRelation(tableIdentWithDB)) match {
case l @ LogicalRelation(_: InsertableRelation | _: HadoopFsRelation, _, _) =>
// check if the file formats match
l.relation match {
case r: HadoopFsRelation if r.fileFormat.getClass != dataSource.providingClass =>
throw new AnalysisException(
s"The file format of the existing table $tableName is " +
s"`${r.fileFormat.getClass.getName}`. It doesn't match the specified " +
s"format `$provider`")
case _ =>
}
if (query.schema.size != l.schema.size) {
throw new AnalysisException(
s"The column number of the existing schema[${l.schema}] " +
s"doesn't match the data schema[${query.schema}]'s")
}
existingSchema = Some(l.schema)
case s: SimpleCatalogRelation if DDLUtils.isDatasourceTable(s.metadata) =>
existingSchema = DDLUtils.getSchemaFromTableProperties(s.metadata)
case o =>
throw new AnalysisException(s"Saving data in ${o.toString} is not supported.")
}
case SaveMode.Overwrite =>
sessionState.catalog.dropTable(tableIdentWithDB, ignoreIfNotExists = true)
// Need to create the table again.
createMetastoreTable = true
}
} else {
// The table does not exist. We need to create it in metastore.
createMetastoreTable = true
}
val data = Dataset.ofRows(sparkSession, query)
val df = existingSchema match {
// If we are inserting into an existing table, just use the existing schema.
case Some(s) => data.selectExpr(s.fieldNames: _*)
case None => data
}
// Create the relation based on the data of df.
val dataSource = DataSource(
sparkSession,
className = provider,
partitionColumns = partitionColumns,
bucketSpec = bucketSpec,
options = optionsWithPath)
val result = try {
dataSource.write(mode, df)
} catch {
case ex: AnalysisException =>
logError(s"Failed to write to table $tableName in $mode mode", ex)
throw ex
}
if (createMetastoreTable) {
// We will use the schema of resolved.relation as the schema of the table (instead of
// the schema of df). It is important since the nullability may be changed by the relation
// provider (for example, see org.apache.spark.sql.parquet.DefaultSource).
CreateDataSourceTableUtils.createDataSourceTable(
sparkSession = sparkSession,
tableIdent = tableIdent,
userSpecifiedSchema = Some(result.schema),
partitionColumns = partitionColumns,
bucketSpec = bucketSpec,
provider = provider,
options = optionsWithPath,
isExternal = isExternal)
}
// Refresh the cache of the table in the catalog.
sessionState.catalog.refreshTable(tableIdentWithDB)
Seq.empty[Row]
}
}
object CreateDataSourceTableUtils extends Logging {
val DATASOURCE_PREFIX = "spark.sql.sources."
val DATASOURCE_PROVIDER = DATASOURCE_PREFIX + "provider"
val DATASOURCE_WRITEJOBUUID = DATASOURCE_PREFIX + "writeJobUUID"
val DATASOURCE_OUTPUTPATH = DATASOURCE_PREFIX + "output.path"
val DATASOURCE_SCHEMA = DATASOURCE_PREFIX + "schema"
val DATASOURCE_SCHEMA_PREFIX = DATASOURCE_SCHEMA + "."
val DATASOURCE_SCHEMA_NUMPARTS = DATASOURCE_SCHEMA_PREFIX + "numParts"
val DATASOURCE_SCHEMA_NUMPARTCOLS = DATASOURCE_SCHEMA_PREFIX + "numPartCols"
val DATASOURCE_SCHEMA_NUMSORTCOLS = DATASOURCE_SCHEMA_PREFIX + "numSortCols"
val DATASOURCE_SCHEMA_NUMBUCKETS = DATASOURCE_SCHEMA_PREFIX + "numBuckets"
val DATASOURCE_SCHEMA_NUMBUCKETCOLS = DATASOURCE_SCHEMA_PREFIX + "numBucketCols"
val DATASOURCE_SCHEMA_PART_PREFIX = DATASOURCE_SCHEMA_PREFIX + "part."
val DATASOURCE_SCHEMA_PARTCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "partCol."
val DATASOURCE_SCHEMA_BUCKETCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "bucketCol."
val DATASOURCE_SCHEMA_SORTCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "sortCol."
/**
* Checks if the given name conforms the Hive standard ("[a-zA-z_0-9]+"),
* i.e. if this name only contains characters, numbers, and _.
*
* This method is intended to have the same behavior of
* org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName.
*/
def validateName(name: String): Boolean = {
val tpat = Pattern.compile("[\\w_]+")
val matcher = tpat.matcher(name)
matcher.matches()
}
def createDataSourceTable(
sparkSession: SparkSession,
tableIdent: TableIdentifier,
userSpecifiedSchema: Option[StructType],
partitionColumns: Array[String],
bucketSpec: Option[BucketSpec],
provider: String,
options: Map[String, String],
isExternal: Boolean): Unit = {
val tableProperties = new mutable.HashMap[String, String]
tableProperties.put(DATASOURCE_PROVIDER, provider)
// Saves optional user specified schema. Serialized JSON schema string may be too long to be
// stored into a single metastore SerDe property. In this case, we split the JSON string and
// store each part as a separate SerDe property.
userSpecifiedSchema.foreach { schema =>
val threshold = sparkSession.sessionState.conf.schemaStringLengthThreshold
val schemaJsonString = schema.json
// Split the JSON string.
val parts = schemaJsonString.grouped(threshold).toSeq
tableProperties.put(DATASOURCE_SCHEMA_NUMPARTS, parts.size.toString)
parts.zipWithIndex.foreach { case (part, index) =>
tableProperties.put(s"$DATASOURCE_SCHEMA_PART_PREFIX$index", part)
}
}
if (userSpecifiedSchema.isDefined && partitionColumns.length > 0) {
tableProperties.put(DATASOURCE_SCHEMA_NUMPARTCOLS, partitionColumns.length.toString)
partitionColumns.zipWithIndex.foreach { case (partCol, index) =>
tableProperties.put(s"$DATASOURCE_SCHEMA_PARTCOL_PREFIX$index", partCol)
}
}
if (userSpecifiedSchema.isDefined && bucketSpec.isDefined) {
val BucketSpec(numBuckets, bucketColumnNames, sortColumnNames) = bucketSpec.get
tableProperties.put(DATASOURCE_SCHEMA_NUMBUCKETS, numBuckets.toString)
tableProperties.put(DATASOURCE_SCHEMA_NUMBUCKETCOLS, bucketColumnNames.length.toString)
bucketColumnNames.zipWithIndex.foreach { case (bucketCol, index) =>
tableProperties.put(s"$DATASOURCE_SCHEMA_BUCKETCOL_PREFIX$index", bucketCol)
}
if (sortColumnNames.nonEmpty) {
tableProperties.put(DATASOURCE_SCHEMA_NUMSORTCOLS, sortColumnNames.length.toString)
sortColumnNames.zipWithIndex.foreach { case (sortCol, index) =>
tableProperties.put(s"$DATASOURCE_SCHEMA_SORTCOL_PREFIX$index", sortCol)
}
}
}
if (userSpecifiedSchema.isEmpty && partitionColumns.length > 0) {
// The table does not have a specified schema, which means that the schema will be inferred
// when we load the table. So, we are not expecting partition columns and we will discover
// partitions when we load the table. However, if there are specified partition columns,
// we simply ignore them and provide a warning message.
logWarning(
s"The schema and partitions of table $tableIdent will be inferred when it is loaded. " +
s"Specified partition columns (${partitionColumns.mkString(",")}) will be ignored.")
}
val tableType = if (isExternal) {
tableProperties.put("EXTERNAL", "TRUE")
CatalogTableType.EXTERNAL
} else {
tableProperties.put("EXTERNAL", "FALSE")
CatalogTableType.MANAGED
}
val maybeSerDe = HiveSerDe.sourceToSerDe(provider, sparkSession.sessionState.conf)
val dataSource =
DataSource(
sparkSession,
userSpecifiedSchema = userSpecifiedSchema,
partitionColumns = partitionColumns,
bucketSpec = bucketSpec,
className = provider,
options = options)
def newSparkSQLSpecificMetastoreTable(): CatalogTable = {
CatalogTable(
identifier = tableIdent,
tableType = tableType,
schema = Nil,
storage = CatalogStorageFormat(
locationUri = None,
inputFormat = None,
outputFormat = None,
serde = None,
compressed = false,
serdeProperties = options
),
properties = tableProperties.toMap)
}
def newHiveCompatibleMetastoreTable(
relation: HadoopFsRelation,
serde: HiveSerDe): CatalogTable = {
assert(partitionColumns.isEmpty)
assert(relation.partitionSchema.isEmpty)
CatalogTable(
identifier = tableIdent,
tableType = tableType,
storage = CatalogStorageFormat(
locationUri = Some(relation.location.paths.map(_.toUri.toString).head),
inputFormat = serde.inputFormat,
outputFormat = serde.outputFormat,
serde = serde.serde,
compressed = false,
serdeProperties = options
),
schema = relation.schema.map { f =>
CatalogColumn(f.name, f.dataType.catalogString)
},
properties = tableProperties.toMap,
viewText = None)
}
// TODO: Support persisting partitioned data source relations in Hive compatible format
val qualifiedTableName = tableIdent.quotedString
val skipHiveMetadata = options.getOrElse("skipHiveMetadata", "false").toBoolean
val resolvedRelation = dataSource.resolveRelation(checkPathExist = false)
val (hiveCompatibleTable, logMessage) = (maybeSerDe, resolvedRelation) match {
case _ if skipHiveMetadata =>
val message =
s"Persisting partitioned data source relation $qualifiedTableName into " +
"Hive metastore in Spark SQL specific format, which is NOT compatible with Hive."
(None, message)
case (Some(serde), relation: HadoopFsRelation) if relation.location.paths.length == 1 &&
relation.partitionSchema.isEmpty && relation.bucketSpec.isEmpty =>
val hiveTable = newHiveCompatibleMetastoreTable(relation, serde)
val message =
s"Persisting data source relation $qualifiedTableName with a single input path " +
s"into Hive metastore in Hive compatible format. Input path: " +
s"${relation.location.paths.head}."
(Some(hiveTable), message)
case (Some(serde), relation: HadoopFsRelation) if relation.partitionSchema.nonEmpty =>
val message =
s"Persisting partitioned data source relation $qualifiedTableName into " +
"Hive metastore in Spark SQL specific format, which is NOT compatible with Hive. " +
"Input path(s): " + relation.location.paths.mkString("\n", "\n", "")
(None, message)
case (Some(serde), relation: HadoopFsRelation) if relation.bucketSpec.nonEmpty =>
val message =
s"Persisting bucketed data source relation $qualifiedTableName into " +
"Hive metastore in Spark SQL specific format, which is NOT compatible with Hive. " +
"Input path(s): " + relation.location.paths.mkString("\n", "\n", "")
(None, message)
case (Some(serde), relation: HadoopFsRelation) =>
val message =
s"Persisting data source relation $qualifiedTableName with multiple input paths into " +
"Hive metastore in Spark SQL specific format, which is NOT compatible with Hive. " +
s"Input paths: " + relation.location.paths.mkString("\n", "\n", "")
(None, message)
case (Some(serde), _) =>
val message =
s"Data source relation $qualifiedTableName is not a " +
s"${classOf[HadoopFsRelation].getSimpleName}. Persisting it into Hive metastore " +
"in Spark SQL specific format, which is NOT compatible with Hive."
(None, message)
case _ =>
val message =
s"Couldn't find corresponding Hive SerDe for data source provider $provider. " +
s"Persisting data source relation $qualifiedTableName into Hive metastore in " +
s"Spark SQL specific format, which is NOT compatible with Hive."
(None, message)
}
(hiveCompatibleTable, logMessage) match {
case (Some(table), message) =>
// We first try to save the metadata of the table in a Hive compatible way.
// If Hive throws an error, we fall back to save its metadata in the Spark SQL
// specific way.
try {
logInfo(message)
sparkSession.sessionState.catalog.createTable(table, ignoreIfExists = false)
} catch {
case NonFatal(e) =>
val warningMessage =
s"Could not persist $qualifiedTableName in a Hive compatible way. Persisting " +
s"it into Hive metastore in Spark SQL specific format."
logWarning(warningMessage, e)
val table = newSparkSQLSpecificMetastoreTable()
sparkSession.sessionState.catalog.createTable(table, ignoreIfExists = false)
}
case (None, message) =>
logWarning(message)
val table = newSparkSQLSpecificMetastoreTable()
sparkSession.sessionState.catalog.createTable(table, ignoreIfExists = false)
}
}
}
| gioenn/xSpark | sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala | Scala | apache-2.0 | 21,894 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import org.apache.spark.sql.DataFrame
/**
* An interface for systems that can collect the results of a streaming query. In order to preserve
* exactly once semantics a sink must be idempotent in the face of multiple attempts to add the same
* batch.
*/
trait Sink extends BaseStreamingSink {
/**
* Adds a batch of data to this sink. The data for a given `batchId` is deterministic and if
* this method is called more than once with the same batchId (which will happen in the case of
* failures), then `data` should only be added once.
*
* Note 1: You cannot apply any operators on `data` except consuming it (e.g., `collect/foreach`).
* Otherwise, you may get a wrong result.
*
* Note 2: The method is supposed to be executed synchronously, i.e. the method should only return
* after data is consumed by sink successfully.
*/
def addBatch(batchId: Long, data: DataFrame): Unit
}
| bravo-zhang/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/Sink.scala | Scala | apache-2.0 | 1,771 |
package cn.edu.sjtu.omnilab.kalin.hz
import java.util.Random
import org.apache.spark._
import org.joda.time.DateTime
/**
* Sample users with specific ratio.
*/
object SampleUserJob {
def main(args: Array[String]) {
if (args.length != 3){
println("Usage: SampleUserJob <RATIO> <MOVDATA> <SAMPLE>")
sys.exit(0)
}
val conf = new SparkConf()
.setAppName("SampleUser")
val sc = new SparkContext(conf)
val samplingRatio = args(0).toDouble
val input = args(1)
val output = args(2)
val logs = sc.textFile(input)
val rand = new Random(System.currentTimeMillis());
logs.map(_.split(",")).groupBy(_(0))
.filter { case (uid, movdata) => {
rand.nextDouble() <= samplingRatio
}}.flatMap { case (uid, movdata) => movdata }
.map(line => line.mkString(","))
.saveAsTextFile(output)
sc.stop()
}
/**
* Parse date value as file name from recording time.
* @param milliSecond
* @return
*/
def parseDay(milliSecond: Long): String = {
val datetime = new DateTime(milliSecond)
return "SET%02d%02d".format(datetime.getMonthOfYear, datetime.getDayOfMonth)
}
}
| caesar0301/MDMS | kalin-etl/src/main/scala/cn/edu/sjtu/omnilab/kalin/hz/SampleUserJob.scala | Scala | apache-2.0 | 1,172 |
package org.mauritania.photosync
import java.io.File
import java.nio.file.{Paths, Files}
import java.nio.charset.StandardCharsets
object TestHelper {
def touchFile(parent: File, filename: String): File = {
val file = new File(parent, filename)
file.getParentFile.mkdirs()
file.createNewFile()
file.deleteOnExit()
file
}
def createTmpFile(prefix: String, size: Long): File = {
val file = File.createTempFile(prefix, "tmp")
file.deleteOnExit()
Files.write(Paths.get(file.getAbsolutePath), (" " * size.toInt).getBytes(StandardCharsets.UTF_8))
file
}
def createTmpDir(prefix: String): File = {
val path = Files.createTempDirectory("photosync-tmp")
val file = path.toFile
file.deleteOnExit()
file
}
}
| mauriciojost/olympus-photosync | src/test/scala/org/mauritania/photosync/TestHelper.scala | Scala | apache-2.0 | 766 |
object test {
case class AAA(i: Int)
}
object test2 {
import test.{AAA => BBB}
val x = BBB(1)
x match {
case BBB(/*caret*/)
}
}
//i: Int | whorbowicz/intellij-scala | testdata/parameterInfo/patternParameterInfo/caseClasses/AliasedPattern.scala | Scala | apache-2.0 | 152 |
package com.scala.exercises.impatient.chapter5
import com.scala.exercises.ScalaExercise
import com.scala.interfaces.Exercise
/**
* Created by Who on 2014/7/7.
*/
class Exercise5 extends ScalaExercise with Exercise {
override def getName: String = "Chapter5"
addT(
() => {
val myCounter = new Counter
myCounter.increment()
print(myCounter.current)
}
)
addT(
() => {
val person = new Person("Xu ZHANG")
person.age = 100
print(person.age)
}
)
addT(
() => {
val chatter = new Network
val myFace = new Network
val fred = chatter.join("Fred")
val wilma = chatter.join("Wilma")
fred.contacts += wilma
val barney = myFace.join("Barney")
fred.contacts += barney
}
)
addT(
() => {
val testOuter = (new Network).getTestOuter.getTestOuterNumber
print(testOuter)
}
)
addQ(
() => {
class Counter {
private var value = 0
def increment() =
if (value != Int.MaxValue) {
value += 1
}
def getValue = value
}
}
)
addQ(
() => {
class BankAccount {
private var balanceValue = 0d
def balance = balanceValue
def deposit(value: Double) = {
if (value > 0) {
balanceValue += value
}
}
def withdraw(value: Double) = {
if (value <= balanceValue) {
balanceValue -= value
}
}
}
val bankAccount = new BankAccount
bankAccount.deposit(1000)
println("\\n" + bankAccount.balance)
bankAccount.withdraw(499)
println(bankAccount.balance)
}
)
addQ(
() => {
class Time(val hours: Int, val minutes: Int) {
if (hours < 0 || hours > 23 || minutes < 0 || minutes > 59) {
throw new Exception("Wrong format of the Time.")
}
def before(other: Time): Boolean = {
if (hours < other.hours) {
true
}
else if (hours > other.hours) {
false
}
else {
if (minutes < other.minutes) {
true
}
else {
false
}
}
}
}
val time1 = new Time(22, 22)
val time2 = new Time(21, 22)
val time3 = new Time(22, 21)
println("\\n" + time1.before(time2))
println(time1.before(time3))
println(time2.before(time1))
println(time3.before(time1))
println(time2.before(time3))
}
)
addQ(
() => {
class Time(val hours: Int, val minutes: Int) {
private var minutesFormat = 0
if (hours < 0 || hours > 23 || minutes < 0 || minutes > 59) {
throw new Exception("Wrong format of the Time.")
}
minutesFormat = hours * 60 + minutes - 1
def before(other: Time): Boolean = {
minutesFormat < other.minutesFormat
}
}
val time1 = new Time(22, 22)
val time2 = new Time(21, 22)
val time3 = new Time(22, 21)
println("\\n" + time1.before(time2))
println(time1.before(time3))
println(time2.before(time1))
println(time3.before(time1))
println(time2.before(time3))
}
)
addQ()
addQ(
() => {
class Person(private var age: Int) {
if (age < 0) {
age = 0
}
def getAge = age
}
val p = new Person(-10)
println("\\n" + p.getAge)
}
)
addQ(
() => {
class Person(name: String) {
println("\\n" + "My name is " + name.split(" ")(0) + " " + name.split(" ")(1))
}
val p = new Person("Xu ZHANG")
}
)
addQ(
() => {
class Car {
def this(producer: String, model: String) {
this()
}
def this(producer: String, model: String, year: Int) {
this(producer, model)
}
def this(producer: String, model: String, year: Int = -1, number: String = "") {
this(producer, model, year)
}
}
}
)
addQ()
addQ(
() => {
class Employee {
val name = "John Q. Public"
var salary = 0.0
def this(name: String, salary: Double) {
this()
}
}
}
)
}
| laonawuli/scalalearning | src/com/scala/exercises/impatient/chapter5/Exercise5.scala | Scala | mit | 4,310 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.metadata
import org.apache.flink.table.planner.plan.nodes.calcite.{LogicalExpand, LogicalRank}
import org.apache.flink.table.planner.plan.utils.ExpandUtil
import org.apache.flink.table.runtime.operators.rank.{ConstantRankRange, RankType}
import com.google.common.collect.ImmutableList
import org.apache.calcite.rel.`type`.RelDataTypeFieldImpl
import org.apache.calcite.rel.{RelCollations, RelNode}
import org.apache.calcite.sql.`type`.SqlTypeName.{BIGINT, VARCHAR}
import org.apache.calcite.util.ImmutableBitSet
import org.junit.Assert._
import org.junit.Test
import scala.collection.JavaConversions._
class FlinkRelMdColumnUniquenessTest extends FlinkRelMdHandlerTestBase {
@Test
def testAreColumnsUniqueOnTableScan(): Unit = {
Array(studentLogicalScan, studentFlinkLogicalScan, studentBatchScan, studentStreamScan)
.foreach { scan =>
assertFalse(mq.areColumnsUnique(scan, ImmutableBitSet.of()))
assertTrue(mq.areColumnsUnique(scan, ImmutableBitSet.of(0)))
(1 until scan.getRowType.getFieldCount).foreach { idx =>
assertFalse(mq.areColumnsUnique(scan, ImmutableBitSet.of(idx)))
}
assertTrue(mq.areColumnsUnique(scan, ImmutableBitSet.of(0, 1)))
assertTrue(mq.areColumnsUnique(scan, ImmutableBitSet.of(0, 2)))
assertFalse(mq.areColumnsUnique(scan, ImmutableBitSet.of(1, 2)))
}
Array(empLogicalScan, empFlinkLogicalScan, empBatchScan, empStreamScan).foreach {
scan =>
(0 until scan.getRowType.getFieldCount).foreach { idx =>
assertNull(mq.areColumnsUnique(scan, ImmutableBitSet.of(idx)))
}
}
}
@Test
def testAreColumnsUniqueOnValues(): Unit = {
assertTrue(mq.areColumnsUnique(logicalValues, ImmutableBitSet.of(0)))
assertFalse(mq.areColumnsUnique(logicalValues, ImmutableBitSet.of(1)))
assertTrue(mq.areColumnsUnique(logicalValues, ImmutableBitSet.of(2)))
assertTrue(mq.areColumnsUnique(logicalValues, ImmutableBitSet.of(3)))
assertTrue(mq.areColumnsUnique(logicalValues, ImmutableBitSet.of(4)))
assertTrue(mq.areColumnsUnique(logicalValues, ImmutableBitSet.of(5)))
assertFalse(mq.areColumnsUnique(logicalValues, ImmutableBitSet.of(6)))
assertTrue(mq.areColumnsUnique(logicalValues, ImmutableBitSet.of(7)))
(0 until emptyValues.getRowType.getFieldCount).foreach { idx =>
assertTrue(mq.areColumnsUnique(emptyValues, ImmutableBitSet.of(idx)))
}
}
@Test
def testAreColumnsUniqueOnProject(): Unit = {
assertTrue(mq.areColumnsUnique(logicalProject, ImmutableBitSet.of(0)))
assertFalse(mq.areColumnsUnique(logicalProject, ImmutableBitSet.of(1)))
assertNull(mq.areColumnsUnique(logicalProject, ImmutableBitSet.of(2)))
assertNull(mq.areColumnsUnique(logicalProject, ImmutableBitSet.of(3)))
assertNull(mq.areColumnsUnique(logicalProject, ImmutableBitSet.of(4)))
assertNull(mq.areColumnsUnique(logicalProject, ImmutableBitSet.of(5)))
assertFalse(mq.areColumnsUnique(logicalProject, ImmutableBitSet.of(6)))
assertNull(mq.areColumnsUnique(logicalProject, ImmutableBitSet.of(7)))
assertNull(mq.areColumnsUnique(logicalProject, ImmutableBitSet.of(8)))
assertNull(mq.areColumnsUnique(logicalProject, ImmutableBitSet.of(9)))
assertNull(mq.areColumnsUnique(logicalProject, ImmutableBitSet.of(10)))
assertNull(mq.areColumnsUnique(logicalProject, ImmutableBitSet.of(11)))
assertTrue(mq.areColumnsUnique(logicalProject, ImmutableBitSet.of(0, 1)))
assertTrue(mq.areColumnsUnique(logicalProject, ImmutableBitSet.of(0, 2)))
assertFalse(mq.areColumnsUnique(logicalProject, ImmutableBitSet.of(1, 2)))
assertNull(mq.areColumnsUnique(logicalProject, ImmutableBitSet.of(2, 3)))
// project: id, cast(id as long not null), name, cast(name as varchar not null)
relBuilder.push(studentLogicalScan)
val exprs = List(
relBuilder.field(0),
relBuilder.cast(relBuilder.field(0), BIGINT),
relBuilder.field(1),
relBuilder.cast(relBuilder.field(1), VARCHAR)
)
val project = relBuilder.project(exprs).build()
assertTrue(mq.areColumnsUnique(project, ImmutableBitSet.of(0)))
assertNull(mq.areColumnsUnique(project, ImmutableBitSet.of(1)))
assertTrue(mq.areColumnsUnique(project, ImmutableBitSet.of(1), true))
assertFalse(mq.areColumnsUnique(project, ImmutableBitSet.of(2)))
assertNull(mq.areColumnsUnique(project, ImmutableBitSet.of(3)))
assertTrue(mq.areColumnsUnique(project, ImmutableBitSet.of(1, 2), true))
assertTrue(mq.areColumnsUnique(project, ImmutableBitSet.of(1, 3), true))
}
@Test
def testAreColumnsUniqueOnFilter(): Unit = {
assertFalse(mq.areColumnsUnique(logicalFilter, ImmutableBitSet.of()))
assertTrue(mq.areColumnsUnique(logicalFilter, ImmutableBitSet.of(0)))
(1 until logicalFilter.getRowType.getFieldCount).foreach { idx =>
assertFalse(mq.areColumnsUnique(logicalFilter, ImmutableBitSet.of(idx)))
}
assertTrue(mq.areColumnsUnique(logicalFilter, ImmutableBitSet.of(0, 1)))
assertTrue(mq.areColumnsUnique(logicalFilter, ImmutableBitSet.of(0, 2)))
assertFalse(mq.areColumnsUnique(logicalFilter, ImmutableBitSet.of(1, 2)))
}
@Test
def testAreColumnsUniqueOnCalc(): Unit = {
assertTrue(mq.areColumnsUnique(logicalCalc, ImmutableBitSet.of(0)))
assertFalse(mq.areColumnsUnique(logicalCalc, ImmutableBitSet.of(1)))
assertNull(mq.areColumnsUnique(logicalCalc, ImmutableBitSet.of(2)))
assertNull(mq.areColumnsUnique(logicalCalc, ImmutableBitSet.of(3)))
assertNull(mq.areColumnsUnique(logicalCalc, ImmutableBitSet.of(4)))
assertNull(mq.areColumnsUnique(logicalCalc, ImmutableBitSet.of(5)))
assertFalse(mq.areColumnsUnique(logicalCalc, ImmutableBitSet.of(6)))
assertNull(mq.areColumnsUnique(logicalCalc, ImmutableBitSet.of(7)))
assertNull(mq.areColumnsUnique(logicalCalc, ImmutableBitSet.of(8)))
assertNull(mq.areColumnsUnique(logicalCalc, ImmutableBitSet.of(9)))
assertNull(mq.areColumnsUnique(logicalCalc, ImmutableBitSet.of(10)))
assertNull(mq.areColumnsUnique(logicalCalc, ImmutableBitSet.of(11)))
assertTrue(mq.areColumnsUnique(logicalCalc, ImmutableBitSet.of(0, 1)))
assertTrue(mq.areColumnsUnique(logicalCalc, ImmutableBitSet.of(0, 2)))
assertFalse(mq.areColumnsUnique(logicalCalc, ImmutableBitSet.of(1, 2)))
assertNull(mq.areColumnsUnique(logicalCalc, ImmutableBitSet.of(2, 3)))
}
@Test
def testAreColumnsUniqueOnExpand(): Unit = {
Array(logicalExpand, flinkLogicalExpand, batchExpand, streamExpand).foreach {
expand =>
assertFalse(mq.areColumnsUnique(expand, ImmutableBitSet.of()))
(0 until expand.getRowType.getFieldCount).foreach { idx =>
assertFalse(mq.areColumnsUnique(expand, ImmutableBitSet.of(idx)))
}
assertTrue(mq.areColumnsUnique(expand, ImmutableBitSet.of(0, 7)))
(1 until expand.getRowType.getFieldCount - 1).foreach { idx =>
assertFalse(mq.areColumnsUnique(expand, ImmutableBitSet.of(idx, 7)))
}
}
val expandOutputType = ExpandUtil.buildExpandRowType(
cluster.getTypeFactory, studentLogicalScan.getRowType, Array.empty[Integer])
val expandProjects = ExpandUtil.createExpandProjects(
studentLogicalScan.getCluster.getRexBuilder,
studentLogicalScan.getRowType,
expandOutputType,
ImmutableBitSet.of(0, 3, 5),
ImmutableList.of(
ImmutableBitSet.of(0, 3, 5),
ImmutableBitSet.of(3, 5),
ImmutableBitSet.of(3)),
Array.empty[Integer])
val logicalExpand2 = new LogicalExpand(cluster, studentLogicalScan.getTraitSet,
studentLogicalScan, expandOutputType, expandProjects, 7)
(0 until logicalExpand2.getRowType.getFieldCount - 1).foreach { idx =>
assertFalse(mq.areColumnsUnique(logicalExpand2, ImmutableBitSet.of(idx, 7)))
}
}
@Test
def testAreColumnsUniqueOnExchange(): Unit = {
Array(batchExchange, streamExchange).foreach {
exchange =>
assertFalse(mq.areColumnsUnique(exchange, ImmutableBitSet.of()))
assertTrue(mq.areColumnsUnique(exchange, ImmutableBitSet.of(0)))
(1 until exchange.getRowType.getFieldCount).foreach { idx =>
assertFalse(mq.areColumnsUnique(exchange, ImmutableBitSet.of(idx)))
}
assertTrue(mq.areColumnsUnique(exchange, ImmutableBitSet.of(0, 1)))
assertTrue(mq.areColumnsUnique(exchange, ImmutableBitSet.of(0, 2)))
assertFalse(mq.areColumnsUnique(exchange, ImmutableBitSet.of(1, 2)))
}
}
@Test
def testAreColumnsUniqueOnRank(): Unit = {
Array(logicalRank, flinkLogicalRank, batchLocalRank, batchGlobalRank, streamRank,
logicalRankWithVariableRange, flinkLogicalRankWithVariableRange, streamRankWithVariableRange)
.foreach {
rank =>
assertTrue(mq.areColumnsUnique(rank, ImmutableBitSet.of(0)))
(1 until rank.getRowType.getFieldCount).foreach { idx =>
assertFalse(mq.areColumnsUnique(rank, ImmutableBitSet.of(idx)))
}
assertTrue(mq.areColumnsUnique(rank, ImmutableBitSet.of(0, 1)))
assertTrue(mq.areColumnsUnique(rank, ImmutableBitSet.of(0, 2)))
assertFalse(mq.areColumnsUnique(rank, ImmutableBitSet.of(1, 2)))
}
Array(logicalRowNumber, flinkLogicalRowNumber, streamRowNumber).foreach {
rank =>
assertTrue(mq.areColumnsUnique(rank, ImmutableBitSet.of(0)))
val rankFunColumn = rank.getRowType.getFieldCount - 1
(1 until rankFunColumn).foreach { idx =>
assertFalse(mq.areColumnsUnique(rank, ImmutableBitSet.of(idx)))
}
assertTrue(mq.areColumnsUnique(rank, ImmutableBitSet.of(rankFunColumn)))
assertTrue(mq.areColumnsUnique(rank, ImmutableBitSet.of(0, rankFunColumn)))
assertTrue(mq.areColumnsUnique(rank, ImmutableBitSet.of(0, 1)))
assertTrue(mq.areColumnsUnique(rank, ImmutableBitSet.of(0, 2)))
assertFalse(mq.areColumnsUnique(rank, ImmutableBitSet.of(1, 2)))
assertTrue(mq.areColumnsUnique(rank, ImmutableBitSet.of(1, rankFunColumn)))
assertTrue(mq.areColumnsUnique(rank, ImmutableBitSet.of(2, rankFunColumn)))
}
val rowNumber = new LogicalRank(
cluster,
logicalTraits,
studentLogicalScan,
ImmutableBitSet.of(6),
RelCollations.of(4),
RankType.ROW_NUMBER,
new ConstantRankRange(3, 6),
new RelDataTypeFieldImpl("rn", 7, longType),
outputRankNumber = true
)
assertTrue(mq.areColumnsUnique(rowNumber, ImmutableBitSet.of(0)))
(1 until rowNumber.getRowType.getFieldCount).foreach { idx =>
assertFalse(mq.areColumnsUnique(rowNumber, ImmutableBitSet.of(idx)))
}
assertTrue(mq.areColumnsUnique(rowNumber, ImmutableBitSet.of(0, 7)))
assertFalse(mq.areColumnsUnique(rowNumber, ImmutableBitSet.of(1, 7)))
// partition key and row number
assertTrue(mq.areColumnsUnique(rowNumber, ImmutableBitSet.of(6, 7)))
}
@Test
def testAreColumnsUniqueCountOnSort(): Unit = {
Array(logicalSort, flinkLogicalSort, batchSort, streamSort,
logicalLimit, flinkLogicalLimit, batchLimit, batchLocalLimit, batchGlobalLimit, streamLimit,
logicalSortLimit, flinkLogicalSortLimit, batchSortLimit, batchLocalSortLimit,
batchGlobalSortLimit, streamSortLimit).foreach {
sort =>
assertFalse(mq.areColumnsUnique(sort, ImmutableBitSet.of()))
assertTrue(mq.areColumnsUnique(sort, ImmutableBitSet.of(0)))
(1 until sort.getRowType.getFieldCount).foreach { idx =>
assertFalse(mq.areColumnsUnique(sort, ImmutableBitSet.of(idx)))
}
assertTrue(mq.areColumnsUnique(sort, ImmutableBitSet.of(0, 1)))
assertTrue(mq.areColumnsUnique(sort, ImmutableBitSet.of(0, 2)))
assertFalse(mq.areColumnsUnique(sort, ImmutableBitSet.of(1, 2)))
}
}
@Test
def testAreColumnsUniqueCountOnStreamExecDeduplicate(): Unit = {
assertTrue(mq.areColumnsUnique(streamProcTimeDeduplicateFirstRow, ImmutableBitSet.of(1)))
assertFalse(mq.areColumnsUnique(streamProcTimeDeduplicateFirstRow, ImmutableBitSet.of(0, 2)))
assertFalse(mq.areColumnsUnique(streamProcTimeDeduplicateFirstRow, ImmutableBitSet.of(0, 1, 2)))
assertFalse(mq.areColumnsUnique(streamProcTimeDeduplicateLastRow, ImmutableBitSet.of(0)))
assertFalse(mq.areColumnsUnique(streamProcTimeDeduplicateLastRow, ImmutableBitSet.of(0, 1)))
assertFalse(mq.areColumnsUnique(streamProcTimeDeduplicateLastRow, ImmutableBitSet.of(0, 1, 2)))
assertTrue(mq.areColumnsUnique(streamRowTimeDeduplicateFirstRow, ImmutableBitSet.of(1)))
assertFalse(mq.areColumnsUnique(streamRowTimeDeduplicateFirstRow, ImmutableBitSet.of(0, 2)))
assertFalse(mq.areColumnsUnique(streamRowTimeDeduplicateFirstRow, ImmutableBitSet.of(0, 1, 2)))
assertFalse(mq.areColumnsUnique(streamRowTimeDeduplicateLastRow, ImmutableBitSet.of(0)))
assertFalse(mq.areColumnsUnique(streamRowTimeDeduplicateLastRow, ImmutableBitSet.of(0, 1)))
assertFalse(mq.areColumnsUnique(streamRowTimeDeduplicateLastRow, ImmutableBitSet.of(0, 1, 2)))
}
@Test
def testAreColumnsUniqueCountOnStreamExecChangelogNormalize(): Unit = {
assertTrue(mq.areColumnsUnique(streamChangelogNormalize, ImmutableBitSet.of(0, 1)))
assertTrue(mq.areColumnsUnique(streamChangelogNormalize, ImmutableBitSet.of(1, 0)))
assertFalse(mq.areColumnsUnique(streamChangelogNormalize, ImmutableBitSet.of(1)))
assertFalse(mq.areColumnsUnique(streamChangelogNormalize, ImmutableBitSet.of(2)))
assertFalse(mq.areColumnsUnique(streamChangelogNormalize, ImmutableBitSet.of(1, 2)))
}
@Test
def testAreColumnsUniqueCountOnStreamExecDropUpdateBefore(): Unit = {
assertFalse(mq.areColumnsUnique(streamDropUpdateBefore, ImmutableBitSet.of()))
assertTrue(mq.areColumnsUnique(streamDropUpdateBefore, ImmutableBitSet.of(0)))
assertTrue(mq.areColumnsUnique(streamDropUpdateBefore, ImmutableBitSet.of(0, 1)))
assertTrue(mq.areColumnsUnique(streamDropUpdateBefore, ImmutableBitSet.of(0, 2)))
assertFalse(mq.areColumnsUnique(streamDropUpdateBefore, ImmutableBitSet.of(1, 2)))
}
@Test
def testAreColumnsUniqueOnAggregate(): Unit = {
Array(logicalAgg, flinkLogicalAgg).foreach { agg =>
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0)))
val fieldCnt = agg.getRowType.getFieldCount
(1 until fieldCnt).foreach { idx =>
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(idx)))
}
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 2)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(1, 2)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1, 2)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.range(1, fieldCnt)))
}
Array(logicalAggWithAuxGroup, flinkLogicalAggWithAuxGroup).foreach { agg =>
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0)))
val fieldCnt = agg.getRowType.getFieldCount
(1 until fieldCnt).foreach { idx =>
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(idx)))
}
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 2)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(1, 2)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1, 2)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.range(1, fieldCnt)))
}
}
@Test
def testAreColumnsUniqueOnBatchExecAggregate(): Unit = {
Array(batchGlobalAggWithLocal, batchGlobalAggWithoutLocal).foreach { agg =>
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0)))
val fieldCnt = agg.getRowType.getFieldCount
(1 until fieldCnt).foreach { idx =>
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(idx)))
}
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(1, 2)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.range(1, fieldCnt)))
}
Array(batchGlobalAggWithLocalWithAuxGroup, batchGlobalAggWithoutLocalWithAuxGroup)
.foreach { agg =>
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0)))
val fieldCnt = agg.getRowType.getFieldCount
(1 until fieldCnt).foreach { idx =>
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(idx)))
}
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(1, 2)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.range(1, fieldCnt)))
}
// always return null for local agg
Array(batchLocalAgg, batchLocalAggWithAuxGroup).foreach { agg =>
(0 until agg.getRowType.getFieldCount).foreach { idx =>
assertNull(mq.areColumnsUnique(agg, ImmutableBitSet.of(idx)))
}
}
}
@Test
def testAreColumnsUniqueOnStreamExecAggregate(): Unit = {
Array(streamGlobalAggWithLocal, streamGlobalAggWithoutLocal).foreach { agg =>
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0)))
val fieldCnt = agg.getRowType.getFieldCount
(1 until fieldCnt).foreach { idx =>
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(idx)))
}
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(1, 2)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.range(1, fieldCnt)))
}
// always return null for local agg
(0 until streamLocalAgg.getRowType.getFieldCount).foreach { idx =>
assertNull(mq.areColumnsUnique(streamLocalAgg, ImmutableBitSet.of(idx)))
}
}
@Test
def testAreColumnsUniqueOnWindowAgg(): Unit = {
Array(logicalWindowAgg, flinkLogicalWindowAgg, batchGlobalWindowAggWithLocalAgg,
batchGlobalWindowAggWithoutLocalAgg, streamWindowAgg).foreach { agg =>
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 2)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 3)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1, 2)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1, 3)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1, 4)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1, 5)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1, 6)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1, 3, 4, 5, 6)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 2, 3)))
}
assertNull(mq.areColumnsUnique(batchLocalWindowAgg, ImmutableBitSet.of(0, 1)))
assertNull(mq.areColumnsUnique(batchLocalWindowAgg, ImmutableBitSet.of(0, 1, 3)))
Array(logicalWindowAgg2, flinkLogicalWindowAgg2, batchGlobalWindowAggWithLocalAgg2,
batchGlobalWindowAggWithoutLocalAgg2, streamWindowAgg2).foreach { agg =>
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 2)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 3)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 4)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 5)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 2, 3, 4, 5)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(1, 2)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(1, 3)))
}
assertNull(mq.areColumnsUnique(batchLocalWindowAgg2, ImmutableBitSet.of(0, 1)))
assertNull(mq.areColumnsUnique(batchLocalWindowAgg2, ImmutableBitSet.of(0, 2)))
Array(logicalWindowAggWithAuxGroup, flinkLogicalWindowAggWithAuxGroup,
batchGlobalWindowAggWithLocalAggWithAuxGroup, batchGlobalWindowAggWithoutLocalAggWithAuxGroup
).foreach { agg =>
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 2)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1, 2)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 3)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 4)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 5)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 6)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 3, 4, 5, 6)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(1, 3)))
}
assertNull(mq.areColumnsUnique(batchLocalWindowAggWithAuxGroup, ImmutableBitSet.of(0, 1)))
assertNull(mq.areColumnsUnique(batchLocalWindowAggWithAuxGroup, ImmutableBitSet.of(0, 3)))
}
@Test
def testAreColumnsUniqueOnOverAgg(): Unit = {
Array(flinkLogicalOverAgg, batchOverAgg).foreach { agg =>
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(1)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(2)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(3)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(4)))
assertNull(mq.areColumnsUnique(agg, ImmutableBitSet.of(5)))
assertNull(mq.areColumnsUnique(agg, ImmutableBitSet.of(6)))
assertNull(mq.areColumnsUnique(agg, ImmutableBitSet.of(7)))
assertNull(mq.areColumnsUnique(agg, ImmutableBitSet.of(8)))
assertNull(mq.areColumnsUnique(agg, ImmutableBitSet.of(9)))
assertNull(mq.areColumnsUnique(agg, ImmutableBitSet.of(10)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 1)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 2)))
assertFalse(mq.areColumnsUnique(agg, ImmutableBitSet.of(1, 2)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 5)))
assertTrue(mq.areColumnsUnique(agg, ImmutableBitSet.of(0, 10)))
assertNull(mq.areColumnsUnique(agg, ImmutableBitSet.of(5, 10)))
}
assertTrue(mq.areColumnsUnique(streamOverAgg, ImmutableBitSet.of(0)))
assertFalse(mq.areColumnsUnique(streamOverAgg, ImmutableBitSet.of(1)))
assertFalse(mq.areColumnsUnique(streamOverAgg, ImmutableBitSet.of(2)))
assertFalse(mq.areColumnsUnique(streamOverAgg, ImmutableBitSet.of(3)))
assertFalse(mq.areColumnsUnique(streamOverAgg, ImmutableBitSet.of(4)))
assertNull(mq.areColumnsUnique(streamOverAgg, ImmutableBitSet.of(5)))
assertNull(mq.areColumnsUnique(streamOverAgg, ImmutableBitSet.of(6)))
assertNull(mq.areColumnsUnique(streamOverAgg, ImmutableBitSet.of(7)))
assertTrue(mq.areColumnsUnique(streamOverAgg, ImmutableBitSet.of(0, 1)))
assertTrue(mq.areColumnsUnique(streamOverAgg, ImmutableBitSet.of(0, 2)))
assertFalse(mq.areColumnsUnique(streamOverAgg, ImmutableBitSet.of(1, 2)))
assertTrue(mq.areColumnsUnique(streamOverAgg, ImmutableBitSet.of(0, 5)))
assertTrue(mq.areColumnsUnique(streamOverAgg, ImmutableBitSet.of(0, 7)))
assertNull(mq.areColumnsUnique(streamOverAgg, ImmutableBitSet.of(5, 7)))
}
@Test
def testAreColumnsUniqueOnJoin(): Unit = {
// inner join
assertFalse(mq.areColumnsUnique(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(0)))
assertTrue(mq.areColumnsUnique(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(1)))
assertFalse(mq.areColumnsUnique(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(2)))
assertTrue(mq.areColumnsUnique(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(5)))
assertFalse(mq.areColumnsUnique(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(6)))
assertTrue(mq.areColumnsUnique(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(0, 1)))
assertFalse(mq.areColumnsUnique(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(0, 2)))
assertTrue(mq.areColumnsUnique(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(1, 2)))
assertFalse(mq.areColumnsUnique(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(0, 5)))
assertTrue(mq.areColumnsUnique(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(1, 5)))
assertFalse(mq.areColumnsUnique(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(0, 6)))
assertFalse(mq.areColumnsUnique(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(1, 6)))
assertTrue(mq.areColumnsUnique(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(5, 6)))
assertTrue(mq.areColumnsUnique(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(0, 1, 5, 6)))
// left join
assertFalse(mq.areColumnsUnique(logicalLeftJoinOnUniqueKeys, ImmutableBitSet.of(0)))
assertTrue(mq.areColumnsUnique(logicalLeftJoinOnUniqueKeys, ImmutableBitSet.of(1)))
assertFalse(mq.areColumnsUnique(logicalLeftJoinOnUniqueKeys, ImmutableBitSet.of(2)))
assertFalse(mq.areColumnsUnique(logicalLeftJoinOnUniqueKeys, ImmutableBitSet.of(5)))
assertFalse(mq.areColumnsUnique(logicalLeftJoinOnUniqueKeys, ImmutableBitSet.of(6)))
assertTrue(mq.areColumnsUnique(logicalLeftJoinOnUniqueKeys, ImmutableBitSet.of(0, 1)))
assertFalse(mq.areColumnsUnique(logicalLeftJoinOnUniqueKeys, ImmutableBitSet.of(0, 2)))
assertTrue(mq.areColumnsUnique(logicalLeftJoinOnUniqueKeys, ImmutableBitSet.of(1, 5)))
assertFalse(mq.areColumnsUnique(logicalLeftJoinOnUniqueKeys, ImmutableBitSet.of(1, 6)))
assertFalse(mq.areColumnsUnique(logicalLeftJoinOnUniqueKeys, ImmutableBitSet.of(5, 6)))
assertTrue(mq.areColumnsUnique(logicalLeftJoinOnUniqueKeys, ImmutableBitSet.of(0, 1, 5, 6)))
// right join
assertFalse(mq.areColumnsUnique(logicalRightJoinOnUniqueKeys, ImmutableBitSet.of(0)))
assertFalse(mq.areColumnsUnique(logicalRightJoinOnUniqueKeys, ImmutableBitSet.of(1)))
assertFalse(mq.areColumnsUnique(logicalRightJoinOnUniqueKeys, ImmutableBitSet.of(2)))
assertTrue(mq.areColumnsUnique(logicalRightJoinOnUniqueKeys, ImmutableBitSet.of(5)))
assertFalse(mq.areColumnsUnique(logicalRightJoinOnUniqueKeys, ImmutableBitSet.of(6)))
assertFalse(mq.areColumnsUnique(logicalRightJoinOnUniqueKeys, ImmutableBitSet.of(0, 1)))
assertFalse(mq.areColumnsUnique(logicalRightJoinOnUniqueKeys, ImmutableBitSet.of(0, 2)))
assertTrue(mq.areColumnsUnique(logicalRightJoinOnUniqueKeys, ImmutableBitSet.of(1, 5)))
assertFalse(mq.areColumnsUnique(logicalRightJoinOnUniqueKeys, ImmutableBitSet.of(1, 6)))
assertTrue(mq.areColumnsUnique(logicalRightJoinOnUniqueKeys, ImmutableBitSet.of(5, 6)))
assertTrue(mq.areColumnsUnique(logicalRightJoinOnUniqueKeys, ImmutableBitSet.of(0, 1, 5, 6)))
// full join
assertFalse(mq.areColumnsUnique(logicalFullJoinOnUniqueKeys, ImmutableBitSet.of(0)))
assertFalse(mq.areColumnsUnique(logicalFullJoinOnUniqueKeys, ImmutableBitSet.of(1)))
assertFalse(mq.areColumnsUnique(logicalFullJoinOnUniqueKeys, ImmutableBitSet.of(2)))
assertFalse(mq.areColumnsUnique(logicalFullJoinOnUniqueKeys, ImmutableBitSet.of(5)))
assertFalse(mq.areColumnsUnique(logicalFullJoinOnUniqueKeys, ImmutableBitSet.of(6)))
assertFalse(mq.areColumnsUnique(logicalFullJoinOnUniqueKeys, ImmutableBitSet.of(0, 1)))
assertFalse(mq.areColumnsUnique(logicalFullJoinOnUniqueKeys, ImmutableBitSet.of(0, 2)))
assertTrue(mq.areColumnsUnique(logicalFullJoinOnUniqueKeys, ImmutableBitSet.of(1, 5)))
assertFalse(mq.areColumnsUnique(logicalFullJoinOnUniqueKeys, ImmutableBitSet.of(1, 6)))
assertFalse(mq.areColumnsUnique(logicalFullJoinOnUniqueKeys, ImmutableBitSet.of(5, 6)))
assertTrue(mq.areColumnsUnique(logicalFullJoinOnUniqueKeys, ImmutableBitSet.of(0, 1, 5, 6)))
// semi/anti join
Array(logicalSemiJoinOnUniqueKeys, logicalSemiJoinNotOnUniqueKeys,
logicalSemiJoinOnDisjointKeys, logicalAntiJoinOnUniqueKeys, logicalAntiJoinNotOnUniqueKeys,
logicalAntiJoinOnDisjointKeys).foreach { join =>
assertFalse(mq.areColumnsUnique(join, ImmutableBitSet.of(0)))
assertTrue(mq.areColumnsUnique(join, ImmutableBitSet.of(1)))
assertFalse(mq.areColumnsUnique(join, ImmutableBitSet.of(2)))
assertFalse(mq.areColumnsUnique(join, ImmutableBitSet.of(3)))
assertFalse(mq.areColumnsUnique(join, ImmutableBitSet.of(4)))
assertTrue(mq.areColumnsUnique(join, ImmutableBitSet.of(0, 1)))
assertFalse(mq.areColumnsUnique(join, ImmutableBitSet.of(0, 2)))
}
}
@Test
def testAreColumnsUniqueOnLookupJoin(): Unit = {
Array(batchLookupJoin, streamLookupJoin).foreach { join =>
assertFalse(mq.areColumnsUnique(join, ImmutableBitSet.of()))
assertNull(mq.areColumnsUnique(join, ImmutableBitSet.of(0)))
assertNull(mq.areColumnsUnique(join, ImmutableBitSet.of(1)))
assertNull(mq.areColumnsUnique(join, ImmutableBitSet.of(2)))
assertNull(mq.areColumnsUnique(join, ImmutableBitSet.of(3)))
assertNull(mq.areColumnsUnique(join, ImmutableBitSet.of(4)))
assertNull(mq.areColumnsUnique(join, ImmutableBitSet.of(5)))
assertNull(mq.areColumnsUnique(join, ImmutableBitSet.of(6)))
assertNull(mq.areColumnsUnique(join, ImmutableBitSet.of(7)))
assertNull(mq.areColumnsUnique(join, ImmutableBitSet.of(8)))
assertNull(mq.areColumnsUnique(join, ImmutableBitSet.of(9)))
assertNull(mq.areColumnsUnique(join, ImmutableBitSet.of(0, 1)))
assertNull(mq.areColumnsUnique(join, ImmutableBitSet.of(1, 2)))
assertNull(mq.areColumnsUnique(join, ImmutableBitSet.of(0, 7)))
assertNull(mq.areColumnsUnique(join, ImmutableBitSet.of(1, 7)))
assertNull(mq.areColumnsUnique(join, ImmutableBitSet.of(0, 8)))
assertNull(mq.areColumnsUnique(join, ImmutableBitSet.of(7, 8)))
assertNull(mq.areColumnsUnique(join, ImmutableBitSet.of(8, 9)))
}
}
@Test
def testAreColumnsUniqueOnUnion(): Unit = {
val fieldCnt = logicalUnionAll.getRowType.getFieldCount
(0 until fieldCnt).foreach { idx =>
assertFalse(mq.areColumnsUnique(logicalUnionAll, ImmutableBitSet.of(idx)))
}
assertFalse(mq.areColumnsUnique(logicalUnionAll, ImmutableBitSet.range(fieldCnt)))
(0 until fieldCnt).foreach { idx =>
assertFalse(mq.areColumnsUnique(logicalUnion, ImmutableBitSet.of(idx)))
}
assertTrue(mq.areColumnsUnique(logicalUnion, ImmutableBitSet.range(fieldCnt)))
}
@Test
def testAreColumnsUniqueOnIntersect(): Unit = {
assertFalse(mq.areColumnsUnique(logicalIntersectAll, ImmutableBitSet.of(0)))
assertTrue(mq.areColumnsUnique(logicalIntersectAll, ImmutableBitSet.of(1)))
assertFalse(mq.areColumnsUnique(logicalIntersectAll, ImmutableBitSet.of(2)))
assertTrue(mq.areColumnsUnique(logicalIntersectAll, ImmutableBitSet.of(1, 2)))
assertFalse(mq.areColumnsUnique(logicalIntersectAll, ImmutableBitSet.of(0, 2)))
assertTrue(mq.areColumnsUnique(logicalIntersectAll, ImmutableBitSet.of(1, 2)))
assertFalse(mq.areColumnsUnique(logicalIntersect, ImmutableBitSet.of(0)))
assertTrue(mq.areColumnsUnique(logicalIntersect, ImmutableBitSet.of(1)))
assertFalse(mq.areColumnsUnique(logicalIntersect, ImmutableBitSet.of(2)))
assertTrue(mq.areColumnsUnique(logicalIntersect, ImmutableBitSet.of(1, 2)))
assertFalse(mq.areColumnsUnique(logicalIntersect, ImmutableBitSet.of(0, 2)))
assertTrue(mq.areColumnsUnique(logicalIntersect, ImmutableBitSet.of(1, 2)))
assertTrue(mq.areColumnsUnique(logicalIntersect,
ImmutableBitSet.range(logicalIntersect.getRowType.getFieldCount)))
}
@Test
def testAreColumnsUniqueOnMinus(): Unit = {
assertFalse(mq.areColumnsUnique(logicalMinusAll, ImmutableBitSet.of(0)))
assertTrue(mq.areColumnsUnique(logicalMinusAll, ImmutableBitSet.of(1)))
assertFalse(mq.areColumnsUnique(logicalMinusAll, ImmutableBitSet.of(2)))
assertTrue(mq.areColumnsUnique(logicalMinusAll, ImmutableBitSet.of(1, 2)))
assertFalse(mq.areColumnsUnique(logicalMinusAll, ImmutableBitSet.of(0, 2)))
assertTrue(mq.areColumnsUnique(logicalMinusAll, ImmutableBitSet.of(1, 2)))
assertFalse(mq.areColumnsUnique(logicalMinus, ImmutableBitSet.of(0)))
assertTrue(mq.areColumnsUnique(logicalMinus, ImmutableBitSet.of(1)))
assertFalse(mq.areColumnsUnique(logicalMinus, ImmutableBitSet.of(2)))
assertTrue(mq.areColumnsUnique(logicalMinus, ImmutableBitSet.of(1, 2)))
assertFalse(mq.areColumnsUnique(logicalMinus, ImmutableBitSet.of(0, 2)))
assertTrue(mq.areColumnsUnique(logicalMinus, ImmutableBitSet.of(1, 2)))
assertTrue(mq.areColumnsUnique(logicalMinus,
ImmutableBitSet.range(logicalMinus.getRowType.getFieldCount)))
// SELECT * FROM MyTable2 MINUS SELECT * MyTable1
val logicalMinus2: RelNode = relBuilder
.scan("MyTable2")
.scan("MyTable1")
.minus(false).build()
assertNull(mq.areColumnsUnique(logicalMinus2, ImmutableBitSet.of(0)))
assertNull(mq.areColumnsUnique(logicalMinus2, ImmutableBitSet.of(1)))
assertNull(mq.areColumnsUnique(logicalMinus2, ImmutableBitSet.of(2)))
assertNull(mq.areColumnsUnique(logicalMinus2, ImmutableBitSet.of(1, 2)))
assertNull(mq.areColumnsUnique(logicalMinus2, ImmutableBitSet.of(0, 2)))
assertNull(mq.areColumnsUnique(logicalMinus2, ImmutableBitSet.of(1, 2)))
assertTrue(mq.areColumnsUnique(logicalMinus2,
ImmutableBitSet.range(logicalMinus2.getRowType.getFieldCount)))
}
@Test
def testGetColumnNullCountOnDefault(): Unit = {
(0 until testRel.getRowType.getFieldCount).foreach { idx =>
assertNull(mq.areColumnsUnique(testRel, ImmutableBitSet.of(idx)))
}
}
}
| aljoscha/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdColumnUniquenessTest.scala | Scala | apache-2.0 | 34,287 |
package com.sksamuel.elastic4s.requests.searches
import com.sksamuel.exts.OptionImplicits._
import scala.concurrent.duration.FiniteDuration
trait ScrollApi {
def searchScroll(id: String, keepAlive: String): SearchScrollRequest = SearchScrollRequest(id).keepAlive(keepAlive)
def searchScroll(id: String): SearchScrollRequest = SearchScrollRequest(id)
def clearScroll(first: String, rest: String*): ClearScrollRequest = clearScroll(first +: rest)
def clearScroll(ids: Iterable[String]): ClearScrollRequest = ClearScrollRequest(ids.toSeq)
}
case class SearchScrollRequest(id: String, keepAlive: Option[String] = None) {
def keepAlive(keepAlive: String): SearchScrollRequest = copy(keepAlive = keepAlive.some)
def keepAlive(duration: FiniteDuration): SearchScrollRequest = copy(keepAlive = Some(duration.toSeconds + "s"))
}
case class ClearScrollRequest(ids: Seq[String])
| stringbean/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/searches/ScrollApi.scala | Scala | apache-2.0 | 902 |
package Scheduler
import Master.{GameMode, TournamentMode}
import Master.Types.{Game, Round}
import Master.UeberActor.{FinishedSchedule, MakeSchedule}
import akka.actor.{Actor, Props}
import org.joda.time.DateTime
import org.joda.time.format.DateTimeFormat
import com.github.nscala_time.time.Imports._
/**
* This actor gets a List of Rounds and returns a List of Slots
*/
object Scheduler {
val name = "scheduler"
val props = Props(new Scheduler)
}
class Scheduler extends Actor with Log2 with FormatHelpers{
def receive: Receive = {
case MakeSchedule(rounds, mode) => sender ! FinishedSchedule(roundsToSchedule(mode, rounds))
}
def roundsToSchedule(mode: TournamentMode, rounds: List[Round]): List[String] = {
var results: List[String] = initStringForTournament(mode) :: Nil
var acc = ""
val format = DateTimeFormat.forPattern("hh:mm")
var time = DateTime.parse(mode.startTime, format)
// Groups them in to the number of available fields (with/without)
// formats each slot for .csv
//todo check if team is playing twice in one slot: ERROR
for (slots <- mode.gameMode match {
case GameMode.RoundRobin => rounds.flatten.grouped(mode.fields) //without empty fields per slot
case _ => rounds.flatMap(_.grouped(mode.fields)) //with empty fields per slot
}) {
acc = timeStringForSlot(time, mode)
for (game@(gameId, (t1, t2)) <- slots) {
acc += gameToString(game)
}
for(empty <- 0 until mode.fields-slots.size){
acc += ",,--- : ---,"
}
time += mode.gameTime.minutes + mode.pauseTime.minutes
results = acc :: results
}
results.reverse
}
}
trait FormatHelpers {
def gameToString(game: Game): String = game match {
case g@(gameId, (t1, t2)) =>
s",$gameId,${
t1.name match {
case "" => s"Team ${t1.id}"
case name => name
}
}: ${
t2.name match {
case "" => s"Team ${t2.id}"
case name => name
}
},"
}
def initStringForTournament(mode: TournamentMode):String = {
var acc = "Time,"
for (nr <- 1 to mode.fields) {
acc += s",,Field $nr ,"
}
acc
}
def timeStringForSlot(time:DateTime,mode:TournamentMode) = {
val format = DateTimeFormat.forPattern("hh:mm")
s"${format.print(time)}-${format.print(time + mode.gameTime.minutes)},"
}
}
trait Log2 {
val lnOf2 = scala.math.log(2)
// natural log of 2
def log2(x: Int): Int = (scala.math.log(x.toDouble) / lnOf2).toInt
}
| yannick-cw/tournament_planer | TournamentScheduler/src/main/scala/Scheduler/Scheduler.scala | Scala | mit | 2,548 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.physical.rdbms
import slamdata.Predef._
import quasar.connector.{DefaultAnalyzeModule, BackendModule}
import quasar.contrib.pathy.{AFile, APath}
import quasar.contrib.scalaz.MonadReader_
import quasar.effect.uuid.GenUUID
import quasar.effect.{KeyValueStore, MonotonicSeq}
import quasar.fp.{:/:, :\\:}
import quasar.fp.free._
import quasar.fs.MonadFsErr
import quasar.fs.ReadFile.ReadHandle
import quasar.fs.mount.BackendDef.{DefErrT, DefinitionError}
import quasar.fs.mount.ConnectionUri
import quasar.physical.rdbms.fs._
import quasar.qscript.{::/::, ::\\::, EquiJoin, ExtractPath, Injectable, Optimize, QScriptCore, QScriptTotal, ShiftedRead, Unicoalesce, Unirewrite}
import quasar.fs.WriteFile.WriteHandle
import quasar.physical.rdbms.common.{Config, TablePath}
import quasar.physical.rdbms.jdbc.JdbcConnectionInfo
import quasar.{RenderTree, RenderTreeT, fp}
import quasar.qscript.analysis._
import scala.Predef.implicitly
import doobie.hikari.hikaritransactor.HikariTransactor
import doobie.imports.ConnectionIO
import matryoshka.{BirecursiveT, Delay, EqualT, RecursiveT, ShowT}
import matryoshka.data._
import scalaz._
import Scalaz._
import scalaz.concurrent.Task
trait Rdbms extends BackendModule with RdbmsReadFile with RdbmsWriteFile with RdbmsManageFile with RdbmsQueryFile with Interpreter with DefaultAnalyzeModule {
type Eff[A] = (
ConnectionIO :\\:
MonotonicSeq :\\:
GenUUID :\\:
KeyValueStore[ReadHandle, SqlReadCursor, ?] :/:
KeyValueStore[WriteHandle, TablePath, ?]
)#M[A]
type QS[T[_[_]]] = QScriptCore[T, ?] :\\: EquiJoin[T, ?] :/: Const[ShiftedRead[AFile], ?]
type Repr = String // TODO define best Repr for a SQL query (Doobie Fragment?)
type M[A] = Free[Eff, A]
type Config = common.Config
implicit class LiftEffBackend[F[_], A](m: F[A])(implicit I: F :<: Eff) {
val liftB: Backend[A] = lift(m).into[Eff].liftB
}
import Cost._
import Cardinality._
def CardinalityQSM: Cardinality[QSM[Fix, ?]] = Cardinality[QSM[Fix, ?]]
def CostQSM: Cost[QSM[Fix, ?]] = Cost[QSM[Fix, ?]]
def FunctorQSM[T[_[_]]] = Functor[QSM[T, ?]]
def TraverseQSM[T[_[_]]] = Traverse[QSM[T, ?]]
def DelayRenderTreeQSM[T[_[_]]: BirecursiveT: EqualT: ShowT: RenderTreeT] =
implicitly[Delay[RenderTree, QSM[T, ?]]]
def ExtractPathQSM[T[_[_]]: RecursiveT] = ExtractPath[QSM[T, ?], APath]
def QSCoreInject[T[_[_]]] = implicitly[QScriptCore[T, ?] :<: QSM[T, ?]]
def MonadM = Monad[M]
def UnirewriteT[T[_[_]]: BirecursiveT: EqualT: ShowT: RenderTreeT] = implicitly[Unirewrite[T, QS[T]]]
def UnicoalesceCap[T[_[_]]: BirecursiveT: EqualT: ShowT: RenderTreeT] = Unicoalesce.Capture[T, QS[T]]
implicit def qScriptToQScriptTotal[T[_[_]]]: Injectable.Aux[
QSM[T, ?],
QScriptTotal[T, ?]] =
::\\::[QScriptCore[T, ?]](::/::[T, EquiJoin[T, ?], Const[ShiftedRead[AFile], ?]])
override def optimize[T[_[_]]: BirecursiveT: EqualT: ShowT]: QSM[T, T[QSM[T, ?]]] => QSM[T, T[QSM[T, ?]]] = {
val O = new Optimize[T]
O.optimize(fp.reflNT[QSM[T, ?]])
}
def parseConfig(uri: ConnectionUri): DefErrT[Task, Config] =
EitherT(Task.delay(parseConnectionUri(uri).map(Config.apply)))
def compile(cfg: Config): DefErrT[Task, (M ~> Task, Task[Unit])] = {
val xa = HikariTransactor[Task](
cfg.connInfo.driverClassName,
cfg.connInfo.url,
cfg.connInfo.userName,
cfg.connInfo.password.getOrElse("")
)
val close = xa.flatMap(_.configure(_.close()))
(interp(xa) ∘ (i => (foldMapNT[Eff, Task](i), close))).liftM[DefErrT]
}
lazy val MR = MonadReader_[Backend, Config]
lazy val ME = MonadFsErr[Backend]
def plan[T[_[_]]: BirecursiveT: EqualT: ShowT: RenderTreeT](
cp: T[QSM[T, ?]]): Backend[Repr] = {
???
} // TODO
def parseConnectionUri(uri: ConnectionUri): \\/[DefinitionError, JdbcConnectionInfo]
}
| drostron/quasar | rdbms/src/main/scala/quasar/physical/rdbms/Rdbms.scala | Scala | apache-2.0 | 4,640 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.hbase.tools.export
import com.beust.jcommander.Parameters
import org.locationtech.geomesa.hbase.data.HBaseDataStore
import org.locationtech.geomesa.hbase.tools.HBaseDataStoreCommand
import org.locationtech.geomesa.hbase.tools.HBaseDataStoreCommand.{HBaseParams, ToggleRemoteFilterParam}
import org.locationtech.geomesa.hbase.tools.export.HBasePlaybackCommand.HBasePlaybackParams
import org.locationtech.geomesa.tools.export.PlaybackCommand
import org.locationtech.geomesa.tools.export.PlaybackCommand.PlaybackParams
class HBasePlaybackCommand extends PlaybackCommand[HBaseDataStore] with HBaseDataStoreCommand {
override val params = new HBasePlaybackParams
}
object HBasePlaybackCommand {
@Parameters(commandDescription = "Playback features from a GeoMesa data store, based on the feature date")
class HBasePlaybackParams extends PlaybackParams with HBaseParams with ToggleRemoteFilterParam
}
| elahrvivaz/geomesa | geomesa-hbase/geomesa-hbase-tools/src/main/scala/org/locationtech/geomesa/hbase/tools/export/HBasePlaybackCommand.scala | Scala | apache-2.0 | 1,401 |
package com.indix.petstore.api
import com.indix.petstore.models.User
import org.scalatest.{ FlatSpec, Matchers }
import spray.json._
import com.indix.petstore.api.JsonFormats._
class JsonFormatsSpec extends FlatSpec with Matchers {
"JsonFormats#User" should "format user to json" in {
val user = """{
| "name" : "foo",
| "first_name" : "foo",
| "last_name" : "bar",
| "email" : "foo@bar.com",
| "password" : ""
|}""".stripMargin.parseJson.convertTo[User]
user should be(User(None, "foo", Some("foo"), Some("bar"), Some("foo@bar.com"), Some(""), None))
}
}
| codingnirvana/scala-microservices-template | src/test/scala/com/indix/petstore/api/JsonFormatsSpec.scala | Scala | apache-2.0 | 620 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.carbondata.spark.testsuite.createTable
import org.apache.spark.sql.common.util.QueryTest
import org.scalatest.BeforeAndAfterAll
/**
* test functionality related space in column names for create table
*/
class TestCreateTableWithSpaceInColumnName extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
sql("use default")
sql("drop database if exists dbColumnSpace cascade")
}
test("test create table space in column names") {
// this test case will test the creation of table for different case for database name.
// In hive dbName folder is always created with small case in HDFS. Carbon should behave
// the same way. If table creation fails during second time creation it means in HDFS
// separate folders are created for the matching case in commands executed.
sql("create database dbColumnSpace")
sql("use dbColumnSpace")
sql("create table carbonTable(`my id` int, `full name` string)stored by 'carbondata'")
sql("drop table carbonTable")
sql("use default")
sql("use dbColumnSpace")
try {
// table creation should be successful
sql("create table carbonTable(`my id` int, `full name` string)stored by 'carbondata'")
assert(true)
} catch {
case ex: Exception =>
assert(false)
}
}
override def afterAll {
sql("use default")
sql("drop database if exists dbColumnSpace cascade")
}
}
| ksimar/incubator-carbondata | integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithSpaceInColumnName.scala | Scala | apache-2.0 | 2,244 |
package repositories
import com.byteslounge.slickrepo.repository.Repository
import javax.inject.Inject
import model.{PaginatedResult, Pagination, PaginationParams, Post}
import model.PostTable.PostTable
import slick.ast.BaseTypedType
import slick.jdbc.JdbcProfile
import scala.concurrent.ExecutionContext
class PostRepository @Inject()(override val driver: JdbcProfile)(implicit executionContext: ExecutionContext)
extends Repository[Post, Int](driver)
with Pagination {
import driver.api._
val pkType = implicitly[BaseTypedType[Int]]
val tableQuery = TableQuery[PostTable]
type TableType = PostTable
def getPaginatedObjectsList(paginationParams: PaginationParams): DBIO[PaginatedResult[Post]] = {
val (offset, limit) = (paginationParams.offset, paginationParams.limit)
val paginatedQuery = withPagination(tableQuery, paginationParams)
for {
totalCount <- tableQuery.size.result
paginatedResult <- paginatedQuery.result
} yield
PaginatedResult(
totalCount = totalCount,
entities = paginatedResult.toList,
hasNextPage = (totalCount - (offset + limit)) > 0
)
}
}
| sysgears/apollo-universal-starter-kit | modules/post/server-scala/src/main/scala/repositories/PostRepository.scala | Scala | mit | 1,147 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.cloudml.zen.ml.clustering.algorithm
import breeze.linalg.{DenseVector => BDV, SparseVector => BSV, sum}
import com.github.cloudml.zen.ml.clustering.LDADefines._
import com.github.cloudml.zen.ml.util.Concurrent._
import org.apache.spark.graphx2.impl.EdgePartition
import scala.concurrent.Future
abstract class LDATrainerByWord(numTopics: Int, numThreads: Int)
extends LDATrainer(numTopics, numThreads) {
override def isByDoc: Boolean = false
override def initEdgePartition(ep: EdgePartition[TA, _]): EdgePartition[TA, Int] = {
val totalSize = ep.size
val srcSize = ep.indexSize
val lcSrcIds = ep.localSrcIds
val zeros = new Array[Int](ep.vertexAttrs.length)
val srcInfos = new Array[(Int, Int, Int)](srcSize)
implicit val es = initExecutionContext(numThreads)
val all = Future.traverse(ep.index.iterator.zipWithIndex) { case ((_, startPos), ii) => withFuture {
val si = lcSrcIds(startPos)
var pos = startPos
while (pos < totalSize && lcSrcIds(pos) == si) {
pos += 1
}
srcInfos(ii) = (si, startPos, pos)
}}
withAwaitReadyAndClose(all)
val newLcSrcIds = srcInfos.toSeq.sorted.flatMap(t => Iterator(t._1, t._2, t._3)).toArray
new EdgePartition(newLcSrcIds, ep.localDstIds, ep.data, null, ep.global2local, ep.local2global, zeros, None)
}
override def countPartition(ep: EdgePartition[TA, Int]): Iterator[NvkPair] = {
val lcSrcIds = ep.localSrcIds
val lcDstIds = ep.localDstIds
val l2g = ep.local2global
val useds = ep.vertexAttrs
val data = ep.data
val vertSize = useds.length
val results = new Array[NvkPair](vertSize)
implicit val es = initExecutionContext(numThreads)
val all0 = Future.traverse(Range(0, numThreads).iterator) { thid => withFuture {
var i = thid
while (i < vertSize) {
val vid = l2g(i)
val used = useds(i)
val counter: Nvk = if (isTermId(vid) && used >= dscp) {
new BDV(new Array[Count](numTopics))
} else {
val len = math.max(used >>> 1, 2)
new BSV(new Array[Int](len), new Array[Count](len), 0, numTopics)
}
results(i) = (vid, counter)
i += numThreads
}
}}
withAwaitReady(all0)
val all = Future.traverse(lcSrcIds.indices.by(3).iterator) { lsi => withFuture {
val si = lcSrcIds(lsi)
val startPos = lcSrcIds(lsi + 1)
val endPos = lcSrcIds(lsi + 2)
val termTopics = results(si)._2
var pos = startPos
while (pos < endPos) {
val docTopics = results(lcDstIds(pos))._2
val topic = data(pos)
termTopics(topic) += 1
docTopics.synchronized {
docTopics(topic) += 1
}
pos += 1
}
termTopics match {
case v: BDV[Count] =>
val used = v.data.count(_ > 0)
if (used < dscp) {
results(si) = (l2g(si), toBSV(v, used))
}
case _ =>
}
}}
withAwaitReadyAndClose(all)
results.iterator
}
override def perplexPartition(topicCounters: BDV[Count],
numTokens: Long,
numTerms: Int,
alpha: Double,
alphaAS: Double,
beta: Double)
(ep: EdgePartition[TA, Nvk]): (Double, Double, Double) = {
val alphaSum = alpha * numTopics
val betaSum = beta * numTerms
val alphaRatio = calc_alphaRatio(alphaSum, numTokens, alphaAS)
val alphaks = calc_alphaks(topicCounters, alphaAS, alphaRatio)
val denoms = calc_denoms(topicCounters, betaSum)
val alphak_denoms = calc_alphak_denoms(denoms, alphaAS, betaSum, alphaRatio)
val lcSrcIds = ep.localSrcIds
val lcDstIds = ep.localDstIds
val vattrs = ep.vertexAttrs
val data = ep.data
val vertSize = vattrs.length
val docNorms = new Array[Double](vertSize)
@volatile var llhs = 0.0
@volatile var wllhs = 0.0
@volatile var dllhs = 0.0
val abDenseSum = sum_abDense(alphak_denoms, beta)
implicit val es = initExecutionContext(numThreads)
val all = Future.traverse(lcSrcIds.indices.by(3).iterator) { lsi => withFuture {
val si = lcSrcIds(lsi)
val startPos = lcSrcIds(lsi + 1)
val endPos = lcSrcIds(lsi + 2)
val termTopics = vattrs(si)
val waSparseSum = sum_waSparse(alphak_denoms, termTopics)
val sum12 = abDenseSum + waSparseSum
var llhs_th = 0.0
var wllhs_th = 0.0
var dllhs_th = 0.0
val denseTermTopics = toBDV(termTopics)
var pos = startPos
while (pos < endPos) {
val di = lcDstIds(pos)
val docTopics = vattrs(di).asInstanceOf[Ndk]
var docNorm = docNorms(di)
if (docNorm == 0.0) {
docNorm = 1.0 / (sum(docTopics) + alphaSum)
docNorms(di) = docNorm
}
val dwbSparseSum = sum_dwbSparse(denoms, denseTermTopics, docTopics, beta)
llhs_th += Math.log((sum12 + dwbSparseSum) * docNorm)
val topic = data(pos)
wllhs_th += Math.log((denseTermTopics(topic) + beta) * denoms(topic))
dllhs_th += Math.log((docTopics(topic) + alphaks(topic)) * docNorm)
pos += 1
}
llhs += llhs_th
wllhs += wllhs_th
dllhs += dllhs_th
}}
withAwaitReadyAndClose(all)
(llhs, wllhs, dllhs)
}
def sum_waSparse(alphak_denoms: BDV[Double],
termTopics: Nwk): Double = termTopics match {
case v: BDV[Count] =>
var sum = 0.0
var i = 0
while (i < numTopics) {
val cnt = v(i)
if (cnt > 0) {
sum += alphak_denoms(i) * cnt
}
i += 1
}
sum
case v: BSV[Count] =>
val used = v.used
val index = v.index
val data = v.data
var sum = 0.0
var i = 0
while (i < used) {
sum += alphak_denoms(index(i)) * data(i)
i += 1
}
sum
}
def sum_dwbSparse(denoms: BDV[Double],
denseTermTopics: BDV[Count],
docTopics: Ndk,
beta: Double): Double = {
val used = docTopics.used
val index = docTopics.index
val data = docTopics.data
var sum = 0.0
var i = 0
while (i < used) {
val topic = index(i)
sum += (denseTermTopics(topic) + beta) * data(i) * denoms(topic)
i += 1
}
sum
}
def sum_dwbSparse_wOpt(termBeta_denoms: BDV[Double],
docTopics: Ndk): Double = {
val used = docTopics.used
val index = docTopics.index
val data = docTopics.data
var sum = 0.0
var i = 0
while (i < used) {
sum += termBeta_denoms(index(i)) * data(i)
i += 1
}
sum
}
}
| witgo/zen | ml/src/main/scala/com/github/cloudml/zen/ml/clustering/algorithm/LDATrainerByWord.scala | Scala | apache-2.0 | 7,329 |
package ui.mail
import org.openqa.selenium.WebDriver
import org.subethamail.wiser.Wiser
import play.api.test.{FakeApplication, Helpers, WebDriverFactory, WithBrowser}
abstract class WithMailServerAndBrowser(
webDriver: WebDriver = WebDriverFactory(Helpers.HTMLUNIT),
port: Int = Helpers.testServerPort,
val mailServer: Wiser with SecurityMessages)
extends WithBrowser(
webDriver = webDriver, app = FakeApplication(
additionalConfiguration = Map(
"smtp.host" -> mailServer.getServer.getHostName,
"smtp.port" -> mailServer.getServer.getPort)),
port = port)
| timothygordon32/reactive-todolist | it/ui/mail/WithMailServerAndBrowser.scala | Scala | mit | 595 |
import sbt._
import sbt.Keys._
import sbtassembly.Plugin._
import AssemblyKeys._
object DropSyncBuild extends Build {
lazy val dropsync = Project(
id = "dropsync",
base = file("."),
settings = Project.defaultSettings ++ assemblySettings ++ Seq(
name := "dropsync",
organization := "com.github.tanacasino",
version := "0.1-SNAPSHOT",
scalaVersion := "2.11.2",
// add other settings here
libraryDependencies ++= Seq(
"com.dropbox.core" % "dropbox-core-sdk" % "[1.7,1.8)",
"com.typesafe" % "config" % "1.2.1",
"org.scalatest" % "scalatest_2.11" % "2.2.1" % "test"
)
)
)
}
| tanacasino/dropsync | project/DropSyncBuild.scala | Scala | mit | 659 |
/*
* UnaryOp.scala
* (FScape)
*
* Copyright (c) 2001-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.fscape
package graph
import de.sciss.fscape.Graph.{ProductReader, RefMapIn}
import de.sciss.fscape.UGen.Adjunct
import de.sciss.fscape.UGenSource.unwrap
import de.sciss.fscape.stream.{BufD, BufI, BufL, StreamIn, StreamOut}
import de.sciss.numbers.{DoubleFunctions => rd, DoubleFunctions2 => rd2, IntFunctions => ri, IntFunctions2 => ri2, LongFunctions => rl, LongFunctions2 => rl2}
import scala.annotation.switch
import scala.collection.immutable.{IndexedSeq => Vec}
object UnaryOp extends ProductReader[UnaryOp] {
object Op {
def apply(id: Int): Op = (id: @switch) match {
case Neg .id => Neg
case Not .id => Not
case BitNot .id => BitNot
case Abs .id => Abs
case ToDouble .id => ToDouble
case ToInt .id => ToInt
case Ceil .id => Ceil
case Floor .id => Floor
case Frac .id => Frac
case Signum .id => Signum
case Squared .id => Squared
case Cubed .id => Cubed
case Sqrt .id => Sqrt
case Exp .id => Exp
case Reciprocal .id => Reciprocal
case MidiCps .id => MidiCps
case CpsMidi .id => CpsMidi
case MidiRatio .id => MidiRatio
case RatioMidi .id => RatioMidi
case DbAmp .id => DbAmp
case AmpDb .id => AmpDb
case OctCps .id => OctCps
case CpsOct .id => CpsOct
case Log .id => Log
case Log2 .id => Log2
case Log10 .id => Log10
case Sin .id => Sin
case Cos .id => Cos
case Tan .id => Tan
case Asin .id => Asin
case Acos .id => Acos
case Atan .id => Atan
case Sinh .id => Sinh
case Cosh .id => Cosh
case Tanh .id => Tanh
// case Distort .id => Distort
// case Softclip .id => Softclip
// case Ramp .id => Ramp
// case Scurve .id => Scurve
case IsNaN .id => IsNaN
case NextPowerOfTwo .id => NextPowerOfTwo
case ToLong .id => ToLong
}
}
sealed trait Op {
op =>
def id: Int
def funDD: Double => Double
// def apply(a: Double): Double
/** The default converts to `Double`, but specific operators
* may better preserve semantics and precision for other types such as `Int` and `Long`.
*/
def apply(a: Constant): Constant = ConstantD(funDD(a.doubleValue))
def name: String = plainName.capitalize
final def make(a: GE): GE = a match {
case v: Constant => apply(v)
case _ => UnaryOp(op.id, a)
}
private def plainName: String = {
val cn = getClass.getName
val sz = cn.length
val i = cn.indexOf('$') + 1
cn.substring(i, if (cn.charAt(sz - 1) == '$') sz - 1 else sz)
}
}
sealed trait OpII extends Op {
def funII: Int => Int
override def apply(a: Constant): Constant = a match {
case ConstantI(i) => ConstantI(funII(i))
case _ => ConstantD(funDD(a.doubleValue))
}
}
sealed trait OpLL extends Op {
def funLL: Long => Long
override def apply(a: Constant): Constant = a match {
case ConstantL(n) => ConstantL(funLL(n))
case _ => ConstantD(funDD(a.doubleValue))
}
}
sealed trait OpDI extends Op {
def funDI: Double => Int
}
sealed trait OpDL extends Op {
def funDL: Double => Long
}
sealed trait OpID extends Op {
def funID: Int => Double
}
sealed trait OpLD extends Op {
def funLD: Long => Double
}
sealed trait OpIL extends Op {
def funIL: Int => Long
}
sealed trait OpLI extends Op {
def funLI: Long => Int
}
sealed trait OpSame extends Op with OpII with OpLL {
override def apply(a: Constant): Constant = a match {
case ConstantD(d) => ConstantD(funDD(d))
case ConstantI(i) => ConstantI(funII(i))
case ConstantL(n) => ConstantL(funLL(n))
}
}
case object Neg extends OpSame {
final val id = 0
val funDD: Double => Double = -_
val funII: Int => Int = -_
val funLL: Long => Long = -_
}
case object Not extends OpII {
final val id = 1
val funDD: Double => Double = a => if (a == 0.0 ) 1.0 else 0.0
val funII: Int => Int = a => if (a == 0 ) 1 else 0
}
// case object IsNil extends Op( 2 )
// case object NotNil extends Op( 3 )
case object BitNot extends OpSame {
final val id = 4
val funDD: Double => Double = a => (~a.toLong).toDouble
val funII: Int => Int = a => ~a
val funLL: Long => Long = a => ~a
}
case object Abs extends OpSame {
final val id = 5
val funDD: Double => Double = a => rd.abs(a)
val funII: Int => Int = a => ri.abs(a)
val funLL: Long => Long = a => rl.abs(a)
}
case object ToDouble extends OpID with OpLD {
final val id = 6
val funDD: Double => Double = a => a
val funID: Int => Double = a => a.toDouble
val funLD: Long => Double = a => a.toDouble
override def apply(a: Constant): Constant = a match {
case ConstantD(d) => ConstantD(funDD(d))
case ConstantI(i) => ConstantD(funID(i))
case ConstantL(n) => ConstantD(funLD(n))
}
}
case object ToInt extends OpDI with OpLI with OpII {
final val id = 7
val funDD: Double => Double = a => a.toInt.toDouble
val funDI: Double => Int = a => a.toInt
val funLI: Long => Int = a => a.toInt
val funII: Int => Int = a => a
override def apply(a: Constant): Constant = a match {
case ConstantD(d) => ConstantI(funDI(d))
case ConstantI(i) => ConstantI(funII(i))
case ConstantL(n) => ConstantI(funLI(n))
}
}
case object Ceil extends Op {
final val id = 8
val funDD: Double => Double = a => rd.ceil(a)
}
case object Floor extends Op {
final val id = 9
val funDD: Double => Double = a => rd.floor(a)
}
case object Frac extends Op {
final val id = 10
val funDD: Double => Double = a => rd.frac(a)
}
case object Signum extends OpSame {
final val id = 11
val funDD: Double => Double = a => rd.signum(a)
val funII: Int => Int = a => ri.signum(a)
val funLL: Long => Long = a => rl.signum(a)
}
case object Squared extends Op with OpIL with OpLL {
final val id = 12
val funDD: Double => Double = a => rd.squared(a)
val funIL: Int => Long = a => ri.squared(a)
val funLL: Long => Long = a => rl.squared(a)
override def apply(a: Constant): Constant = a match {
case ConstantD(d) => ConstantD(funDD(d))
case ConstantI(i) => ConstantL(funIL(i))
case ConstantL(n) => ConstantL(funLL(n))
}
}
case object Cubed extends Op with OpIL with OpLL {
final val id = 13
val funDD: Double => Double = a => rd2.cubed(a)
val funIL: Int => Long = a => ri2.cubed(a)
val funLL: Long => Long = a => rl2.cubed(a)
override def apply(a: Constant): Constant = a match {
case ConstantD(d) => ConstantD(funDD(d))
case ConstantI(i) => ConstantL(funIL(i))
case ConstantL(n) => ConstantL(funLL(n))
}
}
case object Sqrt extends Op {
final val id = 14
val funDD: Double => Double = a => rd.sqrt(a)
}
case object Exp extends Op {
final val id = 15
val funDD: Double => Double = a => rd.exp(a)
}
case object Reciprocal extends Op with OpID with OpLD {
final val id = 16
val funDD: Double => Double = a => rd2.reciprocal(a)
val funID: Int => Double = a => rd2.reciprocal(a.toDouble)
val funLD: Long => Double = a => rd2.reciprocal(a.toDouble)
}
case object MidiCps extends Op {
final val id = 17
val funDD: Double => Double = a => rd.midiCps(a)
}
case object CpsMidi extends Op {
final val id = 18
val funDD: Double => Double = a => rd.cpsMidi(a)
}
case object MidiRatio extends Op {
final val id = 19
val funDD: Double => Double = a => rd.midiRatio(a)
}
case object RatioMidi extends Op {
final val id = 20
val funDD: Double => Double = a => rd.ratioMidi(a)
}
case object DbAmp extends Op {
final val id = 21
val funDD: Double => Double = a => rd.dbAmp(a)
}
case object AmpDb extends Op {
final val id = 22
val funDD: Double => Double = a => rd.ampDb(a)
}
case object OctCps extends Op {
final val id = 23
val funDD: Double => Double = a => rd.octCps(a)
}
case object CpsOct extends Op {
final val id = 24
val funDD: Double => Double = a => rd.cpsOct(a)
}
case object Log extends Op {
final val id = 25
val funDD: Double => Double = a => rd.log(a)
}
case object Log2 extends Op {
final val id = 26
val funDD: Double => Double = a => rd.log2(a)
}
case object Log10 extends Op {
final val id = 27
val funDD: Double => Double = a => rd.log10(a)
}
case object Sin extends Op {
final val id = 28
val funDD: Double => Double = a => rd.sin(a)
}
case object Cos extends Op {
final val id = 29
val funDD: Double => Double = a => rd.cos(a)
}
case object Tan extends Op {
final val id = 30
val funDD: Double => Double = a => rd.tan(a)
}
case object Asin extends Op {
final val id = 31
val funDD: Double => Double = a => rd.asin(a)
}
case object Acos extends Op {
final val id = 32
val funDD: Double => Double = a => rd.acos(a)
}
case object Atan extends Op {
final val id = 33
val funDD: Double => Double = a => rd.atan(a)
}
case object Sinh extends Op {
final val id = 34
val funDD: Double => Double = a => rd.sinh(a)
}
case object Cosh extends Op {
final val id = 35
val funDD: Double => Double = a => rd.cosh(a)
}
case object Tanh extends Op {
final val id = 36
val funDD: Double => Double = a => rd.tanh(a)
}
// class Rand extends Op( 37 )
// class Rand2 extends Op( 38 )
// class Linrand extends Op( 39 )
// class Bilinrand extends Op( 40 )
// class Sum3rand extends Op( 41 )
// case object Distort extends Op {
// final val id = 42
// def apply(a: Double): Double = rd2.distort(a)
// }
//
// case object Softclip extends Op {
// final val id = 43
// def apply(a: Double): Double = rd2.softClip(a)
// }
// class Coin extends Op( 44 )
// case object DigitValue extends Op( 45 )
// case object Silence extends Op( 46 )
// case object Thru extends Op( 47 )
// case object RectWindow extends Op( 48 )
// case object HanWindow extends Op( 49 )
// case object WelWindow extends Op( 50 )
// case object TriWindow extends Op( 51 )
// case object Ramp extends Op {
// final val id = 52
// def apply(a: Double): Double = rd2.ramp(a)
// }
//
// case object Scurve extends Op {
// final val id = 53
// def apply(a: Double): Double = rd2.sCurve(a)
// }
case object IsNaN extends Op with OpDI {
final val id = 100
val funDD: Double => Double = a => if (java.lang.Double.isNaN(a)) 1.0 else 0.0
val funDI: Double => Int = a => if (java.lang.Double.isNaN(a)) 1 else 0
override def apply(a: Constant): Constant =
ConstantI(funDI(a.doubleValue))
}
case object NextPowerOfTwo extends OpSame {
final val id = 101
val funDD: Double => Double = a => ri.nextPowerOfTwo(Math.ceil(a).toInt).toDouble
val funII: Int => Int = a => ri.nextPowerOfTwo(a)
val funLL: Long => Long = a => rl.nextPowerOfTwo(a)
}
case object ToLong extends OpDL with OpLL with OpIL {
final val id = 200
val funDD: Double => Double = a => a.toInt.toDouble
val funDL: Double => Long = a => a.toLong
val funLL: Long => Long = a => a
val funIL: Int => Long = a => a.toLong
override def apply(a: Constant): Constant = a match {
case ConstantD(d) => ConstantL(funDL(d))
case ConstantI(i) => ConstantL(funIL(i))
case ConstantL(n) => ConstantL(funLL(n))
}
}
override def read(in: RefMapIn, key: String, arity: Int): UnaryOp = {
require (arity == 2)
val _op = in.readInt()
val _in = in.readGE()
new UnaryOp(_op, _in)
}
}
final case class UnaryOp(op: Int, in: GE) extends UGenSource.SingleOut {
protected def makeUGens(implicit b: UGenGraph.Builder): UGenInLike =
unwrap(this, Vector(in.expand))
protected def makeUGen(args: Vec[UGenIn])(implicit b: UGenGraph.Builder): UGenInLike =
UGen.SingleOut(this, inputs = args, adjuncts = Adjunct.Int(op) :: Nil)
private[fscape] def makeStream(args: Vec[StreamIn])(implicit b: stream.Builder): StreamOut = {
val Vec(in) = args: @unchecked
val op0 = UnaryOp.Op(op)
if (in.isDouble) {
op0 match {
case opDI: UnaryOp.OpDI =>
stream.UnaryOp[Double , BufD, Int , BufI](op0.name, opDI.funDI, in = in.toDouble): StreamOut
case opDL: UnaryOp.OpDL =>
stream.UnaryOp[Double , BufD, Long , BufL](op0.name, opDL.funDL, in = in.toDouble): StreamOut
case _ =>
stream.UnaryOp[Double , BufD, Double, BufD](op0.name, op0 .funDD, in = in.toDouble): StreamOut
}
} else if (in.isInt) {
op0 match {
case opII: UnaryOp.OpII =>
stream.UnaryOp[Int , BufI, Int , BufI](op0.name, opII.funII, in = in.toInt ): StreamOut
case opIL: UnaryOp.OpIL =>
stream.UnaryOp[Int , BufI, Long , BufL](op0.name, opIL.funIL, in = in.toInt ): StreamOut
case opID: UnaryOp.OpID =>
stream.UnaryOp[Int , BufI, Double, BufD](op0.name, opID.funID, in = in.toInt ): StreamOut
case _ =>
stream.UnaryOp[Double , BufD, Double, BufD](op0.name, op0 .funDD, in = in.toDouble): StreamOut
}
} else /*if (in.isLong)*/ {
op0 match {
case opLI: UnaryOp.OpLI =>
stream.UnaryOp[Long , BufL, Int , BufI](op0.name, opLI.funLI, in = in.toLong ): StreamOut
case opLL: UnaryOp.OpLL =>
stream.UnaryOp[Long , BufL, Long , BufL](op0.name, opLL.funLL, in = in.toLong ): StreamOut
case opLD: UnaryOp.OpLD =>
stream.UnaryOp[Long , BufL, Double, BufD](op0.name, opLD.funLD, in = in.toLong ): StreamOut
case _ =>
stream.UnaryOp[Double , BufD, Double, BufD](op0.name, op0 .funDD, in = in.toDouble): StreamOut
}
}
}
} | Sciss/FScape-next | core/shared/src/main/scala/de/sciss/fscape/graph/UnaryOp.scala | Scala | agpl-3.0 | 14,962 |
package definiti.scalamodel.builder.typeVerification
import definiti.common.ast._
import definiti.scalamodel.ScalaAST
import definiti.scalamodel.builder.ScalaModelBuilder
import definiti.scalamodel.model.AliasOrDefinedType
import definiti.scalamodel.utils.{ListUtils, StringUtils}
trait AtomicVerificationBuilder {
self: ScalaModelBuilder =>
def generatePublicAtomicVerification(aliasOrDefinedType: AliasOrDefinedType): ScalaAST.Statement = {
val body = ScalaAST.Block(
generateAtomicInternalVerificationObjects(aliasOrDefinedType) ++
generateVerification(aliasOrDefinedType)
).simplify
if (aliasOrDefinedType.parameters.nonEmpty || aliasOrDefinedType.genericTypes.nonEmpty) {
ScalaAST.Def1(
name = "verification",
typ = ScalaAST.Type("Verification", generateType(aliasOrDefinedType.internal)),
generics = aliasOrDefinedType.genericTypes,
parameters = aliasOrDefinedType.parameters.map(generateParameter),
body = body,
property = None
)
} else {
ScalaAST.ClassVal(
name = "verification",
typ = ScalaAST.Type("Verification", generateType(aliasOrDefinedType.internal)),
body = body
)
}
}
private def generateVerification(aliasOrDefinedType: AliasOrDefinedType): ScalaAST.Statement = {
val attributeVerifications = aliasOrDefinedType.internal match {
case definedType: DefinedType => definedType.attributes.flatMap(generateAttributeVerifications(_, definedType))
case _ => Seq.empty
}
val inheritedVerifications = generateInheritedVerifications(aliasOrDefinedType)
val internalVerifications = generateAtomicInternalVerifications(aliasOrDefinedType)
val verifications = attributeVerifications ++ inheritedVerifications ++ internalVerifications
verifications match {
case Nil => ScalaAST.CallMethod("Verification", "none", generics = generateType(aliasOrDefinedType))
case list => ScalaAST.CallMethod("Verification", "all", list)
}
}
private def generateAttributeVerifications(attributeDefinition: AttributeDefinition, definedType: DefinedType): Option[ScalaAST.Expression] = {
val typeVerification = generateTypeVerificationCall(attributeDefinition.typeDeclaration)
val directVerifications = attributeDefinition.verifications.map(generateVerificationCall(_, attributeDefinition.typeDeclaration))
val deepVerification = generateDeepVerification(attributeDefinition.typeDeclaration)
val verifications = typeVerification ++ directVerifications ++ deepVerification
val groupVerificationOpt = generateGroupVerification(attributeDefinition.typeDeclaration, verifications)
val verificationFromType = groupVerificationOpt.map { groupVerification =>
ScalaAST.CallMethod(
target = groupVerification,
name = s"from[${generateType(definedType).toCode}]",
arguments = Seq(
ScalaAST.SimpleExpression(s"_.${attributeDefinition.name}"),
ScalaAST.StringExpression(attributeDefinition.name)
)
)
}
verificationFromType
}
private def generateTypeVerificationCall(typeDeclaration: TypeDeclaration): Option[ScalaAST.Expression] = {
library.typesMap.get(typeDeclaration.typeName) match {
case Some(_: AliasType | _: DefinedType) =>
if (typeDeclaration.parameters.nonEmpty) {
Some(ScalaAST.CallMethod(
target = ScalaAST.SimpleExpression(StringUtils.lastPart(typeDeclaration.typeName)),
name = s"verification${generateGenericTypes(typeDeclaration.genericTypes)}",
arguments = typeDeclaration.parameters.map(generateExpression)
))
} else {
Some(ScalaAST.CallAttribute(
target = ScalaAST.SimpleExpression(StringUtils.lastPart(typeDeclaration.typeName)),
name = s"verification${generateGenericTypes(typeDeclaration.genericTypes)}"
))
}
case _ => None
}
}
private def generateDeepVerification(typeDeclaration: TypeDeclaration): Option[ScalaAST.Expression] = {
val innerVerification = typeDeclaration.genericTypes.flatMap(generateTypeVerificationCall)
if (innerVerification.nonEmpty) {
if (isList(typeDeclaration)) {
ScalaAST.New(s"ListVerification", Seq.empty, innerVerification)
} else if (isOption(typeDeclaration)) {
ScalaAST.New(s"OptionVerification", Seq.empty, innerVerification)
} else {
None
}
} else {
None
}
}
private def generateInheritedVerifications(aliasOrDefinedType: AliasOrDefinedType): Seq[ScalaAST.Expression] = {
verificationsFromType(aliasOrDefinedType.internal)
.map(generateVerificationCall(_, aliasOrDefinedType))
}
private def generateVerificationCall(verificationReference: VerificationReference, aliasOrDefinedType: AliasOrDefinedType): ScalaAST.Expression = {
val verification = library.verificationsMap(verificationReference.verificationName)
ScalaAST.New(
name = verification.fullName,
generics = replaceGenerics(verification.function.genericTypes, aliasOrDefinedType),
arguments = verificationReference.parameters.map(generateExpression)
)
}
private def replaceGenerics(generics: Seq[String], aliasOrDefinedType: AliasOrDefinedType): Seq[String] = {
aliasOrDefinedType.internal match {
case aliasType: AliasType =>
aliasType.alias.genericTypes.map(_.readableString)
case _ =>
ListUtils.replaceOrdered(generics, aliasOrDefinedType.genericTypes)
}
}
private def generateVerificationCall(verificationReference: VerificationReference, typeDeclaration: TypeDeclaration): ScalaAST.Expression = {
val verification = library.verificationsMap(verificationReference.verificationName)
ScalaAST.New(
name = verification.fullName,
generics = ListUtils.replaceOrdered(verification.function.genericTypes, typeDeclaration.genericTypes.map(_.readableString)),
arguments = verificationReference.parameters.map(generateExpression)
)
}
private def generateAtomicInternalVerifications(aliasOrDefinedType: AliasOrDefinedType): Seq[ScalaAST.Expression] = {
aliasOrDefinedType.verifications
.collect { case (v: AtomicTypeVerification) => v }
.indices
.map { index => ScalaAST.SimpleExpression(s"${aliasOrDefinedType.name}${index}") }
}
private def generateAtomicInternalVerificationObjects(aliasOrDefinedType: AliasOrDefinedType): Seq[ScalaAST.Statement] = {
aliasOrDefinedType.verifications
.collect { case (v: AtomicTypeVerification) => v }
.zipWithIndex
.map { case (typeVerification, index) =>
generateTypeVerificationObject(s"${aliasOrDefinedType.name}${index}", typeVerification)
}
}
} | definiti/definiti-scala-model | src/main/scala/definiti/scalamodel/builder/typeVerification/AtomicVerificationBuilder.scala | Scala | mit | 6,780 |
package tastytest
object TestTypeRefIns extends Suite("TestTypeRefIns") {
test(assert(TypeRefIns.b == 1))
}
| lrytz/scala | test/tasty/run/src-2/tastytest/TestTypeRefIns.scala | Scala | apache-2.0 | 113 |
package geostat.kriging.model
/**
* Gaussian Model
*
* @param c sill
* @param a range
*/
class GaussianModel(val c: Double, a: Double) extends SemiVariogramModel {
import scala.math._
require(c > 0.0)
require(a > 1e-8)
def variogram(h: Double): Double = {
require(h>=0.0)
c * (1.0 - exp(-h * h / (a * a)))
}
}
| alessandroadamo/geostat | src/main/scala/geostat/kriging/model/GaussianModel.scala | Scala | lgpl-3.0 | 344 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import Matchers._
class ShellSuite extends Suite {
def testDefaults() {
// From default values
org.scalatest.color should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.durations should have (
'colorPassed (true),
'durationsPassed (true),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.shortstacks should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (true),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.fullstacks should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (true),
'statsPassed (false)
)
org.scalatest.stats should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (true)
)
org.scalatest.nocolor should have (
'colorPassed (false),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.nodurations should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.nostacks should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.nostats should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
}
def testFromColor() {
org.scalatest.color.color should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.color.durations should have (
'colorPassed (true),
'durationsPassed (true),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.color.shortstacks should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (true),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.color.fullstacks should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (true),
'statsPassed (false)
)
org.scalatest.color.stats should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (true)
)
org.scalatest.color.nocolor should have (
'colorPassed (false),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.color.nodurations should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.color.nostacks should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.color.nostats should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
}
def testFromDurations() {
org.scalatest.durations.color should have (
'colorPassed (true),
'durationsPassed (true),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.durations.durations should have (
'colorPassed (true),
'durationsPassed (true),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.durations.shortstacks should have (
'colorPassed (true),
'durationsPassed (true),
'shortstacksPassed (true),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.durations.fullstacks should have (
'colorPassed (true),
'durationsPassed (true),
'shortstacksPassed (false),
'fullstacksPassed (true),
'statsPassed (false)
)
org.scalatest.durations.stats should have (
'colorPassed (true),
'durationsPassed (true),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (true)
)
org.scalatest.durations.nocolor should have (
'colorPassed (false),
'durationsPassed (true),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.durations.nodurations should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.durations.nostacks should have (
'colorPassed (true),
'durationsPassed (true),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.durations.nostats should have (
'colorPassed (true),
'durationsPassed (true),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
}
def testFromShortstacks() {
org.scalatest.shortstacks.color should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (true),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.shortstacks.durations should have (
'colorPassed (true),
'durationsPassed (true),
'shortstacksPassed (true),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.shortstacks.shortstacks should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (true),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.shortstacks.fullstacks should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (true),
'statsPassed (false)
)
org.scalatest.shortstacks.stats should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (true),
'fullstacksPassed (false),
'statsPassed (true)
)
org.scalatest.shortstacks.nocolor should have (
'colorPassed (false),
'durationsPassed (false),
'shortstacksPassed (true),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.shortstacks.nodurations should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (true),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.shortstacks.nostacks should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.shortstacks.nostats should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (true),
'fullstacksPassed (false),
'statsPassed (false)
)
}
def testFromFullstacks() {
org.scalatest.fullstacks.color should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (true),
'statsPassed (false)
)
org.scalatest.fullstacks.durations should have (
'colorPassed (true),
'durationsPassed (true),
'shortstacksPassed (false),
'fullstacksPassed (true),
'statsPassed (false)
)
org.scalatest.fullstacks.shortstacks should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (true),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.fullstacks.fullstacks should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (true),
'statsPassed (false)
)
org.scalatest.fullstacks.stats should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (true),
'statsPassed (true)
)
org.scalatest.fullstacks.nocolor should have (
'colorPassed (false),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (true),
'statsPassed (false)
)
org.scalatest.fullstacks.nodurations should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (true),
'statsPassed (false)
)
org.scalatest.fullstacks.nostacks should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
org.scalatest.fullstacks.nostats should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (true),
'statsPassed (false)
)
}
def testFromStats() {
org.scalatest.stats.color should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (true)
)
org.scalatest.stats.durations should have (
'colorPassed (true),
'durationsPassed (true),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (true)
)
org.scalatest.stats.shortstacks should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (true),
'fullstacksPassed (false),
'statsPassed (true)
)
org.scalatest.stats.fullstacks should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (true),
'statsPassed (true)
)
org.scalatest.stats.stats should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (true)
)
org.scalatest.stats.nocolor should have (
'colorPassed (false),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (true)
)
org.scalatest.stats.nodurations should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (true)
)
org.scalatest.stats.nostacks should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (true)
)
org.scalatest.stats.nostats should have (
'colorPassed (true),
'durationsPassed (false),
'shortstacksPassed (false),
'fullstacksPassed (false),
'statsPassed (false)
)
}
}
| travisbrown/scalatest | src/test/scala/org/scalatest/ShellSuite.scala | Scala | apache-2.0 | 12,066 |
/*******************************************************************************/
/* */
/* Copyright (C) 2017 by Max Lv <max.c.lv@gmail.com> */
/* Copyright (C) 2017 by Mygod Studio <contact-shadowsocks-android@mygod.be> */
/* */
/* This program is free software: you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation, either version 3 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* */
/*******************************************************************************/
package com.github.shadowsocks
import android.content.{ComponentName, Context, Intent, ServiceConnection}
import android.os.{RemoteException, IBinder}
import com.github.shadowsocks.aidl.{IShadowsocksServiceCallback, IShadowsocksService}
import com.github.shadowsocks.utils.Action
import com.github.shadowsocks.ShadowsocksApplication.app
/**
* @author Mygod
*/
trait ServiceBoundContext extends Context with IBinder.DeathRecipient {
private class ShadowsocksServiceConnection extends ServiceConnection {
override def onServiceConnected(name: ComponentName, service: IBinder) {
binder = service
service.linkToDeath(ServiceBoundContext.this, 0)
bgService = IShadowsocksService.Stub.asInterface(service)
if (callback != null && !callbackRegistered) try {
bgService.registerCallback(callback)
callbackRegistered = true
if (listeningForBandwidth) bgService.startListeningForBandwidth(callback)
} catch {
case _: RemoteException => // Nothing
}
ServiceBoundContext.this.onServiceConnected()
}
override def onServiceDisconnected(name: ComponentName) {
unregisterCallback()
ServiceBoundContext.this.onServiceDisconnected()
bgService = null
binder = null
}
}
protected def setListeningForBandwidth(value: Boolean) {
if (listeningForBandwidth != value && bgService != null && callback != null)
if (value) bgService.startListeningForBandwidth(callback) else bgService.stopListeningForBandwidth(callback)
listeningForBandwidth = value
}
private def unregisterCallback() {
if (bgService != null && callback != null && callbackRegistered) try bgService.unregisterCallback(callback) catch {
case _: RemoteException =>
}
listeningForBandwidth = false
callbackRegistered = false
}
protected def onServiceConnected(): Unit = ()
/**
* Different from Android framework, this method will be called even when you call `detachService`.
*/
protected def onServiceDisconnected(): Unit = ()
override def binderDied(): Unit = ()
private var callback: IShadowsocksServiceCallback.Stub = _
private var connection: ShadowsocksServiceConnection = _
private var callbackRegistered: Boolean = _
private var listeningForBandwidth: Boolean = _
// Variables
private var binder: IBinder = _
var bgService: IShadowsocksService = _
protected def attachService(callback: IShadowsocksServiceCallback.Stub = null) {
this.callback = callback
if (bgService == null) {
val s = if (app.isNatEnabled) classOf[ShadowsocksNatService] else classOf[ShadowsocksVpnService]
val intent = new Intent(this, s)
intent.setAction(Action.SERVICE)
connection = new ShadowsocksServiceConnection()
bindService(intent, connection, Context.BIND_AUTO_CREATE)
}
}
protected def detachService() {
unregisterCallback()
onServiceDisconnected()
callback = null
if (connection != null) {
try unbindService(connection) catch {
case _: IllegalArgumentException => // ignore
}
connection = null
}
if (binder != null) {
binder.unlinkToDeath(this, 0)
binder = null
}
bgService = null
}
}
| hangox/shadowsocks-android | mobile/src/main/scala/com/github/shadowsocks/ServiceBoundContext.scala | Scala | gpl-3.0 | 4,796 |
package scavlink.link.fence
import scavlink.link.{VehicleInfo, VehicleType}
import com.typesafe.config.Config
import scala.collection.JavaConversions._
case class FenceBinding(name: String, fence: Fence, mode: FenceMode.Value, applyTo: VehicleInfo => Boolean)
/**
* Binds defined fences to vehicles by name or type with a predicate,
* and assigns the type of fence enforcement for the binding (stay in, stay out, or report).
*/
object FenceBinding {
val ConfigVehicleType = "vehicle-type"
val ConfigVehicleTypes = "vehicle-types"
val ConfigVehicleName = "vehicle-name"
val ConfigVehiclePrefix = "vehicle-name-prefix"
def apply(fences: Map[String, Fence], config: Config): Set[FenceBinding] = {
if (fences.nonEmpty && config.hasPath("bind")) {
val list = config.getConfigList("bind")
list.map(parseBinding(fences)).toSet
} else {
Set.empty
}
}
def parseBinding(fences: Map[String, Fence])(config: Config): FenceBinding = {
val name = config.getString("fence")
val fence = fences(name)
val mode = FenceMode.withName(config.getString("mode"))
val apply = parsePredicate(config)
FenceBinding(name, fence, mode, apply)
}
/**
* Parse a predicate that determines which vehicles the binding will apply to.
*/
def parsePredicate(config: Config): (VehicleInfo => Boolean) = {
if (config.hasPath(ConfigVehicleType)) {
info: VehicleInfo => VehicleType(info.vehicleType).is(config.getString(ConfigVehicleType))
} else if (config.hasPath(ConfigVehicleTypes)) {
info: VehicleInfo => VehicleType(info.vehicleType).is(config.getStringList(ConfigVehicleTypes))
} else if (config.hasPath(ConfigVehicleName)) {
info: VehicleInfo => info.id.address == config.getString(ConfigVehicleName)
} else if (config.hasPath(ConfigVehiclePrefix)) {
info: VehicleInfo => info.id.address.startsWith(config.getString(ConfigVehiclePrefix))
} else {
info => true
}
}
/**
* Filter fences for a particular vehicle.
* @param fences full fence set
* @param vehicleInfo vehicle
* @return filtered fence set
*/
def filter(fences: Set[FenceBinding], vehicleInfo: VehicleInfo): Set[FenceBinding] = {
val applicableFences = fences.filter(_.applyTo(vehicleInfo)).toVector
// predicates may result in multiple fence modes for the same vehicle,
// so we sort to put the most restrictive modes last
val sorted = applicableFences.sortWith {
(f1, f2) => f1.fence == f2.fence && f1.mode > f2.mode
}
// when fences are collected into a map, most restrictive mode per vehicle wins
sorted.map(fence => fence -> fence.mode).toMap.keySet
}
}
| nickolasrossi/scavlink | src/main/scala/scavlink/link/fence/FenceBinding.scala | Scala | mit | 2,695 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.sql.Date
import java.util.concurrent.TimeUnit
import org.apache.spark.sql.catalyst.plans.logical.{EventTimeTimeout, ProcessingTimeTimeout}
import org.apache.spark.sql.catalyst.util.IntervalUtils
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.execution.streaming.GroupStateImpl._
import org.apache.spark.sql.streaming.{GroupState, GroupStateTimeout}
import org.apache.spark.unsafe.types.UTF8String
/**
* Internal implementation of the [[GroupState]] interface. Methods are not thread-safe.
*
* @param optionalValue Optional value of the state
* @param batchProcessingTimeMs Processing time of current batch, used to calculate timestamp
* for processing time timeouts
* @param timeoutConf Type of timeout configured. Based on this, different operations will
* be supported.
* @param hasTimedOut Whether the key for which this state wrapped is being created is
* getting timed out or not.
*/
private[sql] class GroupStateImpl[S] private(
optionalValue: Option[S],
batchProcessingTimeMs: Long,
eventTimeWatermarkMs: Long,
timeoutConf: GroupStateTimeout,
override val hasTimedOut: Boolean,
watermarkPresent: Boolean) extends GroupState[S] {
private var value: S = optionalValue.getOrElse(null.asInstanceOf[S])
private var defined: Boolean = optionalValue.isDefined
private var updated: Boolean = false // whether value has been updated (but not removed)
private var removed: Boolean = false // whether value has been removed
private var timeoutTimestamp: Long = NO_TIMESTAMP
// ========= Public API =========
override def exists: Boolean = defined
override def get: S = {
if (defined) {
value
} else {
throw QueryExecutionErrors.stateNotDefinedOrAlreadyRemovedError()
}
}
override def getOption: Option[S] = {
if (defined) {
Some(value)
} else {
None
}
}
override def update(newValue: S): Unit = {
if (newValue == null) {
throw new IllegalArgumentException("'null' is not a valid state value")
}
value = newValue
defined = true
updated = true
removed = false
}
override def remove(): Unit = {
defined = false
updated = false
removed = true
}
override def setTimeoutDuration(durationMs: Long): Unit = {
if (timeoutConf != ProcessingTimeTimeout) {
throw QueryExecutionErrors.cannotSetTimeoutDurationError()
}
if (durationMs <= 0) {
throw new IllegalArgumentException("Timeout duration must be positive")
}
timeoutTimestamp = durationMs + batchProcessingTimeMs
}
override def setTimeoutDuration(duration: String): Unit = {
setTimeoutDuration(parseDuration(duration))
}
override def setTimeoutTimestamp(timestampMs: Long): Unit = {
checkTimeoutTimestampAllowed()
if (timestampMs <= 0) {
throw new IllegalArgumentException("Timeout timestamp must be positive")
}
if (eventTimeWatermarkMs != NO_TIMESTAMP && timestampMs < eventTimeWatermarkMs) {
throw new IllegalArgumentException(
s"Timeout timestamp ($timestampMs) cannot be earlier than the " +
s"current watermark ($eventTimeWatermarkMs)")
}
timeoutTimestamp = timestampMs
}
override def setTimeoutTimestamp(timestampMs: Long, additionalDuration: String): Unit = {
checkTimeoutTimestampAllowed()
setTimeoutTimestamp(parseDuration(additionalDuration) + timestampMs)
}
override def setTimeoutTimestamp(timestamp: Date): Unit = {
checkTimeoutTimestampAllowed()
setTimeoutTimestamp(timestamp.getTime)
}
override def setTimeoutTimestamp(timestamp: Date, additionalDuration: String): Unit = {
checkTimeoutTimestampAllowed()
setTimeoutTimestamp(timestamp.getTime + parseDuration(additionalDuration))
}
override def getCurrentWatermarkMs(): Long = {
if (!watermarkPresent) {
throw QueryExecutionErrors.cannotGetEventTimeWatermarkError()
}
eventTimeWatermarkMs
}
override def getCurrentProcessingTimeMs(): Long = {
batchProcessingTimeMs
}
override def toString: String = {
s"GroupState(${getOption.map(_.toString).getOrElse("<undefined>")})"
}
// ========= Internal API =========
/** Whether the state has been marked for removing */
def hasRemoved: Boolean = removed
/** Whether the state has been updated */
def hasUpdated: Boolean = updated
/** Return timeout timestamp or `TIMEOUT_TIMESTAMP_NOT_SET` if not set */
def getTimeoutTimestamp: Long = timeoutTimestamp
private def parseDuration(duration: String): Long = {
val cal = IntervalUtils.stringToInterval(UTF8String.fromString(duration))
if (IntervalUtils.isNegative(cal)) {
throw new IllegalArgumentException(s"Provided duration ($duration) is negative")
}
IntervalUtils.getDuration(cal, TimeUnit.MILLISECONDS)
}
private def checkTimeoutTimestampAllowed(): Unit = {
if (timeoutConf != EventTimeTimeout) {
throw QueryExecutionErrors.cannotSetTimeoutTimestampError()
}
}
}
private[sql] object GroupStateImpl {
// Value used represent the lack of valid timestamp as a long
val NO_TIMESTAMP = -1L
def createForStreaming[S](
optionalValue: Option[S],
batchProcessingTimeMs: Long,
eventTimeWatermarkMs: Long,
timeoutConf: GroupStateTimeout,
hasTimedOut: Boolean,
watermarkPresent: Boolean): GroupStateImpl[S] = {
new GroupStateImpl[S](
optionalValue, batchProcessingTimeMs, eventTimeWatermarkMs,
timeoutConf, hasTimedOut, watermarkPresent)
}
def createForBatch(
timeoutConf: GroupStateTimeout,
watermarkPresent: Boolean): GroupStateImpl[Any] = {
new GroupStateImpl[Any](
optionalValue = None,
batchProcessingTimeMs = System.currentTimeMillis,
eventTimeWatermarkMs = NO_TIMESTAMP,
timeoutConf,
hasTimedOut = false,
watermarkPresent)
}
}
| maropu/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/GroupStateImpl.scala | Scala | apache-2.0 | 6,839 |
package fr.hmil.roshttp.node.net
import scala.scalajs.js
private[roshttp] trait SocketOptions extends js.Object {
// val fd: FileDescriptor - not implemented here
val allowHalfOpen: Boolean
val readable: Boolean
val writable: Boolean
}
private[roshttp] object SocketOptions {
def apply(
allowHalfOpen: js.UndefOr[Boolean],
readable: js.UndefOr[Boolean],
writable: js.UndefOr[Boolean]
): SocketOptions = {
val r = js.Dynamic.literal()
allowHalfOpen.foreach(r.allowHalfOpen = _)
readable.foreach(r.readable = _)
writable.foreach(r.writable = _)
r.asInstanceOf[SocketOptions]
}
}
| hmil/RosHTTP | js/src/main/scala/fr/hmil/roshttp/node/net/SocketOptions.scala | Scala | mit | 626 |
package com.mesosphere.cosmos.rpc.v1.model
import io.circe.Decoder
import io.circe.Encoder
import io.circe.JsonObject
import io.circe.generic.semiauto.deriveDecoder
import io.circe.generic.semiauto.deriveEncoder
case class ErrorResponse(
`type`: String,
message: String,
data: Option[JsonObject] = None
)
object ErrorResponse {
implicit val encode: Encoder[ErrorResponse] = deriveEncoder
implicit val decode: Decoder[ErrorResponse] = deriveDecoder
}
| takirala/cosmos | cosmos-common/src/main/scala/com/mesosphere/cosmos/rpc/v1/model/ErrorResponse.scala | Scala | apache-2.0 | 463 |
package freedomandy.data
/**
* Created by andy on 10/06/2017.
*/
case class ContainerInfo(containerName: Option[String], containerUri: Option[String]) | freedomandy/akka-storage | src/main/scala-2.11/freedomandy/data/ContainerData.scala | Scala | mit | 155 |
package scodec
package codecs
import scodec.bits.BitVector
final class TupleCodec[A, B](A: Codec[A], B: Codec[B]) extends Codec[(A, B)] {
override def sizeBound: SizeBound =
A.sizeBound + B.sizeBound
override def encode(t: (A, B)) =
Codec.encodeBoth(A, B)(t._1, t._2)
override def decode(buffer: BitVector) =
Codec.decodeBoth(A, B)(buffer)
def ~~[C](C: Codec[C]): Tuple3Codec[A,B,C] = new Tuple3Codec(A,B,C)
def widenAs[X](to: (A,B) => X, from: X => Option[(A,B)]): Codec[X] =
this.widenOpt(to.tupled, from)
override def toString = s"($A, $B)"
}
final class Tuple3Codec[A,B,C](A: Codec[A],
B: Codec[B],
C: Codec[C]) extends Codec[(A,B,C)] {
def ~~[D](D: Codec[D]) = new Tuple4Codec(A,B,C,D)
override def sizeBound: SizeBound =
A.sizeBound + B.sizeBound + C.sizeBound
override def decode(bits: BitVector) = {
for {
a <- A.decode(bits)
b <- B.decode(a.remainder)
c <- C.decode(b.remainder)
} yield DecodeResult((a.value,b.value,c.value), c.remainder)
}
override def encode(abc: (A,B,C)) =
for {
bits <- A.encode(abc._1)
bits2 <- B.encode(abc._2)
bits3 <- C.encode(abc._3)
} yield bits ++ bits2 ++ bits3
def widenAs[X](to: (A,B,C) => X, from: X => Option[(A,B,C)]): Codec[X] =
this.widenOpt(to.tupled, from)
override def toString = s"($A, $B, $C)"
}
final class Tuple4Codec[A,B,C,D](A: Codec[A],
B: Codec[B],
C: Codec[C],
D: Codec[D]) extends Codec[(A,B,C,D)] {
def ~~[E](E: Codec[E]) = new Tuple5Codec(A,B,C,D,E)
override def sizeBound: SizeBound =
A.sizeBound + B.sizeBound + C.sizeBound + D.sizeBound
override def decode(bits: BitVector) =
for {
a <- A.decode(bits)
b <- B.decode(a.remainder)
c <- C.decode(b.remainder)
d <- D.decode(c.remainder)
} yield DecodeResult((a.value,b.value,c.value,d.value), d.remainder)
override def encode(abcd: (A,B,C,D)) =
for {
bits <- A.encode(abcd._1)
bits2 <- B.encode(abcd._2)
bits3 <- C.encode(abcd._3)
bits4 <- D.encode(abcd._4)
} yield bits ++ bits2 ++ bits3 ++ bits4
def widenAs[X](to: (A,B,C,D) => X, from: X => Option[(A,B,C,D)]): Codec[X] =
this.widenOpt(to.tupled, from)
}
final class Tuple5Codec[A,B,C,D,E](A: Codec[A],
B: Codec[B],
C: Codec[C],
D: Codec[D],
E: Codec[E]) extends Codec[(A,B,C,D,E)] {
def ~~[F](F: Codec[F]) = new Tuple6Codec(A,B,C,D,E,F)
override def sizeBound: SizeBound =
A.sizeBound + B.sizeBound + C.sizeBound + D.sizeBound + E.sizeBound
override def decode(bits: BitVector) =
for {
a <- A.decode(bits)
b <- B.decode(a.remainder)
c <- C.decode(b.remainder)
d <- D.decode(c.remainder)
e <- E.decode(d.remainder)
} yield DecodeResult((a.value,b.value,c.value,d.value,e.value),e.remainder)
override def encode(abcde: (A,B,C,D,E)) =
for {
bits <- A.encode(abcde._1)
bits2 <- B.encode(abcde._2)
bits3 <- C.encode(abcde._3)
bits4 <- D.encode(abcde._4)
bits5 <- E.encode(abcde._5)
} yield bits ++ bits2 ++ bits3 ++ bits4 ++ bits5
def widenAs[X](to: (A,B,C,D,E) => X, from: X => Option[(A,B,C,D,E)]): Codec[X] =
this.widenOpt(to.tupled, from)
override def toString = s"($A, $B, $C, $D, $E)"
}
final class Tuple6Codec[A,B,C,D,E,F](A: Codec[A],
B: Codec[B],
C: Codec[C],
D: Codec[D],
E: Codec[E],
F: Codec[F]) extends Codec[(A,B,C,D,E,F)] {
def ~~[G](G: Codec[G]) = new Tuple7Codec(A,B,C,D,E,F,G)
override def sizeBound: SizeBound =
A.sizeBound + B.sizeBound + C.sizeBound + D.sizeBound + E.sizeBound + F.sizeBound
override def decode(bits: BitVector) =
for {
a <- A.decode(bits)
b <- B.decode(a.remainder)
c <- C.decode(b.remainder)
d <- D.decode(c.remainder)
e <- E.decode(d.remainder)
f <- F.decode(e.remainder)
} yield DecodeResult((a.value,b.value,c.value,d.value,e.value,f.value),f.remainder)
override def encode(abcdef: (A,B,C,D,E,F)) =
for {
bits <- A.encode(abcdef._1)
bits2 <- B.encode(abcdef._2)
bits3 <- C.encode(abcdef._3)
bits4 <- D.encode(abcdef._4)
bits5 <- E.encode(abcdef._5)
bits6 <- F.encode(abcdef._6)
} yield bits ++ bits2 ++ bits3 ++ bits4 ++ bits5 ++ bits6
def widenAs[X](to: (A,B,C,D,E,F) => X, from: X => Option[(A,B,C,D,E,F)]): Codec[X] =
this.widenOpt(to.tupled, from)
override def toString = s"($A, $B, $C, $D, $E, $F)"
}
final class Tuple7Codec[A,B,C,D,E,F,G](A: Codec[A],
B: Codec[B],
C: Codec[C],
D: Codec[D],
E: Codec[E],
F: Codec[F],
G: Codec[G]) extends Codec[(A,B,C,D,E,F,G)] {
def ~~[H](H: Codec[H]) = new Tuple8Codec(A,B,C,D,E,F,G,H)
override def sizeBound: SizeBound =
A.sizeBound + B.sizeBound + C.sizeBound + D.sizeBound + E.sizeBound + F.sizeBound + G.sizeBound
override def decode(bits: BitVector) =
for {
a <- A.decode(bits)
b <- B.decode(a.remainder)
c <- C.decode(b.remainder)
d <- D.decode(c.remainder)
e <- E.decode(d.remainder)
f <- F.decode(e.remainder)
g <- G.decode(f.remainder)
} yield DecodeResult((a.value,b.value,c.value,d.value,e.value,f.value,g.value),g.remainder)
override def encode(abcdefg: (A,B,C,D,E,F,G)) =
for {
bits <- A.encode(abcdefg._1)
bits2 <- B.encode(abcdefg._2)
bits3 <- C.encode(abcdefg._3)
bits4 <- D.encode(abcdefg._4)
bits5 <- E.encode(abcdefg._5)
bits6 <- F.encode(abcdefg._6)
bits7 <- G.encode(abcdefg._7)
} yield bits ++ bits2 ++ bits3 ++ bits4 ++ bits5 ++ bits6 ++ bits7
def widenAs[X](to: (A,B,C,D,E,F,G) => X, from: X => Option[(A,B,C,D,E,F,G)]): Codec[X] =
this.widenOpt(to.tupled, from)
override def toString = s"($A, $B, $C, $D, $E, $F, $G)"
}
final class Tuple8Codec[A,B,C,D,E,F,G,H](A: Codec[A],
B: Codec[B],
C: Codec[C],
D: Codec[D],
E: Codec[E],
F: Codec[F],
G: Codec[G],
H: Codec[H]) extends Codec[(A,B,C,D,E,F,G,H)] {
def ~~[I](I: Codec[I]) = new Tuple9Codec(A,B,C,D,E,F,G,H,I)
override def sizeBound: SizeBound =
A.sizeBound + B.sizeBound + C.sizeBound + D.sizeBound + E.sizeBound + F.sizeBound + G.sizeBound + H.sizeBound
override def decode(bits: BitVector) =
for {
a <- A.decode(bits)
b <- B.decode(a.remainder)
c <- C.decode(b.remainder)
d <- D.decode(c.remainder)
e <- E.decode(d.remainder)
f <- F.decode(e.remainder)
g <- G.decode(f.remainder)
h <- H.decode(g.remainder)
} yield DecodeResult((a.value,b.value,c.value,d.value,e.value,f.value,g.value,h.value),h.remainder)
override def encode(abcdefgh: (A,B,C,D,E,F,G,H)) =
for {
bits <- A.encode(abcdefgh._1)
bits2 <- B.encode(abcdefgh._2)
bits3 <- C.encode(abcdefgh._3)
bits4 <- D.encode(abcdefgh._4)
bits5 <- E.encode(abcdefgh._5)
bits6 <- F.encode(abcdefgh._6)
bits7 <- G.encode(abcdefgh._7)
bits8 <- H.encode(abcdefgh._8)
} yield bits ++ bits2 ++ bits3 ++ bits4 ++ bits5 ++ bits6 ++ bits7 ++ bits8
def widenAs[X](to: (A,B,C,D,E,F,G,H) => X, from: X => Option[(A,B,C,D,E,F,G,H)]): Codec[X] =
this.widenOpt(to.tupled, from)
override def toString = s"($A, $B, $C, $D, $E, $F, $G, $H)"
}
final class Tuple9Codec[A,B,C,D,E,F,G,H,I](A: Codec[A],
B: Codec[B],
C: Codec[C],
D: Codec[D],
E: Codec[E],
F: Codec[F],
G: Codec[G],
H: Codec[H],
I: Codec[I]) extends Codec[(A,B,C,D,E,F,G,H,I)] {
def ~~[J](J: Codec[J]) = new Tuple10Codec(A,B,C,D,E,F,G,H,I,J)
override def sizeBound: SizeBound =
A.sizeBound + B.sizeBound + C.sizeBound + D.sizeBound + E.sizeBound + F.sizeBound + G.sizeBound + H.sizeBound + I.sizeBound
override def decode(bits: BitVector) =
for {
a <- A.decode(bits)
b <- B.decode(a.remainder)
c <- C.decode(b.remainder)
d <- D.decode(c.remainder)
e <- E.decode(d.remainder)
f <- F.decode(e.remainder)
g <- G.decode(f.remainder)
h <- H.decode(g.remainder)
i <- I.decode(h.remainder)
} yield DecodeResult((a.value,b.value,c.value,d.value,e.value,f.value,g.value,h.value,i.value),i.remainder)
override def encode(abcdefghi: (A,B,C,D,E,F,G,H,I)) =
for {
bits <- A.encode(abcdefghi._1)
bits2 <- B.encode(abcdefghi._2)
bits3 <- C.encode(abcdefghi._3)
bits4 <- D.encode(abcdefghi._4)
bits5 <- E.encode(abcdefghi._5)
bits6 <- F.encode(abcdefghi._6)
bits7 <- G.encode(abcdefghi._7)
bits8 <- H.encode(abcdefghi._8)
bits9 <- I.encode(abcdefghi._9)
} yield bits ++ bits2 ++ bits3 ++ bits4 ++ bits5 ++ bits6 ++ bits7 ++ bits8 ++ bits9
def widenAs[X](to: (A,B,C,D,E,F,G,H,I) => X, from: X => Option[(A,B,C,D,E,F,G,H,I)]): Codec[X] =
this.widenOpt(to.tupled, from)
override def toString = s"($A, $B, $C, $D, $E, $F, $G, $H, $I)"
}
final class Tuple10Codec[A,B,C,D,E,F,G,H,I,J](A: Codec[A],
B: Codec[B],
C: Codec[C],
D: Codec[D],
E: Codec[E],
F: Codec[F],
G: Codec[G],
H: Codec[H],
I: Codec[I],
J: Codec[J]) extends Codec[(A,B,C,D,E,F,G,H,I,J)] {
def ~~[K](K: Codec[K]) = new Tuple11Codec(A,B,C,D,E,F,G,H,I,J,K)
override def sizeBound: SizeBound =
A.sizeBound + B.sizeBound + C.sizeBound + D.sizeBound + E.sizeBound + F.sizeBound + G.sizeBound + H.sizeBound + I.sizeBound + J.sizeBound
override def decode(bits: BitVector) =
for {
a <- A.decode(bits)
b <- B.decode(a.remainder)
c <- C.decode(b.remainder)
d <- D.decode(c.remainder)
e <- E.decode(d.remainder)
f <- F.decode(e.remainder)
g <- G.decode(f.remainder)
h <- H.decode(g.remainder)
i <- I.decode(h.remainder)
j <- J.decode(i.remainder)
} yield DecodeResult((a.value,b.value,c.value,d.value,e.value,f.value,g.value,h.value,i.value,j.value),j.remainder)
override def encode(abcdefghij: (A,B,C,D,E,F,G,H,I,J)) =
for {
bits <- A.encode(abcdefghij._1)
bits2 <- B.encode(abcdefghij._2)
bits3 <- C.encode(abcdefghij._3)
bits4 <- D.encode(abcdefghij._4)
bits5 <- E.encode(abcdefghij._5)
bits6 <- F.encode(abcdefghij._6)
bits7 <- G.encode(abcdefghij._7)
bits8 <- H.encode(abcdefghij._8)
bits9 <- I.encode(abcdefghij._9)
bits10 <- J.encode(abcdefghij._10)
} yield bits ++ bits2 ++ bits3 ++ bits4 ++ bits5 ++ bits6 ++ bits7 ++ bits8 ++ bits9 ++ bits10
def widenAs[X](to: (A,B,C,D,E,F,G,H,I,J) => X, from: X => Option[(A,B,C,D,E,F,G,H,I,J)]): Codec[X] =
this.widenOpt(to.tupled, from)
override def toString = s"($A, $B, $C, $D, $E, $F, $G, $H, $I, $J)"
}
final class Tuple11Codec[A,B,C,D,E,F,G,H,I,J,K](A: Codec[A],
B: Codec[B],
C: Codec[C],
D: Codec[D],
E: Codec[E],
F: Codec[F],
G: Codec[G],
H: Codec[H],
I: Codec[I],
J: Codec[J],
K: Codec[K]) extends Codec[(A,B,C,D,E,F,G,H,I,J,K)] {
def ~~[L](L: Codec[L]) = new Tuple12Codec(A,B,C,D,E,F,G,H,I,J,K,L)
override def sizeBound: SizeBound =
A.sizeBound + B.sizeBound + C.sizeBound + D.sizeBound + E.sizeBound + F.sizeBound + G.sizeBound + H.sizeBound + I.sizeBound + J.sizeBound + K.sizeBound
override def decode(bits: BitVector) =
for {
a <- A.decode(bits)
b <- B.decode(a.remainder)
c <- C.decode(b.remainder)
d <- D.decode(c.remainder)
e <- E.decode(d.remainder)
f <- F.decode(e.remainder)
g <- G.decode(f.remainder)
h <- H.decode(g.remainder)
i <- I.decode(h.remainder)
j <- J.decode(i.remainder)
k <- K.decode(j.remainder)
} yield DecodeResult((a.value,b.value,c.value,d.value,e.value,f.value,g.value,h.value,i.value,j.value,k.value),k.remainder)
override def encode(abcdefghijk: (A,B,C,D,E,F,G,H,I,J,K)) =
for {
bits <- A.encode(abcdefghijk._1)
bits2 <- B.encode(abcdefghijk._2)
bits3 <- C.encode(abcdefghijk._3)
bits4 <- D.encode(abcdefghijk._4)
bits5 <- E.encode(abcdefghijk._5)
bits6 <- F.encode(abcdefghijk._6)
bits7 <- G.encode(abcdefghijk._7)
bits8 <- H.encode(abcdefghijk._8)
bits9 <- I.encode(abcdefghijk._9)
bits10 <- J.encode(abcdefghijk._10)
bits11 <- K.encode(abcdefghijk._11)
} yield bits ++ bits2 ++ bits3 ++ bits4 ++ bits5 ++ bits6 ++ bits7 ++ bits8 ++ bits9 ++ bits10 ++ bits11
def widenAs[X](to: (A,B,C,D,E,F,G,H,I,J,K) => X, from: X => Option[(A,B,C,D,E,F,G,H,I,J,K)]): Codec[X] =
this.widenOpt(to.tupled, from)
override def toString = s"($A, $B, $C, $D, $E, $F, $G, $H, $I, $J, $K)"
}
final class Tuple12Codec[A,B,C,D,E,F,G,H,I,J,K,L](A: Codec[A],
B: Codec[B],
C: Codec[C],
D: Codec[D],
E: Codec[E],
F: Codec[F],
G: Codec[G],
H: Codec[H],
I: Codec[I],
J: Codec[J],
K: Codec[K],
L: Codec[L]) extends Codec[(A,B,C,D,E,F,G,H,I,J,K,L)] {
override def sizeBound: SizeBound =
A.sizeBound + B.sizeBound + C.sizeBound + D.sizeBound + E.sizeBound + F.sizeBound + G.sizeBound + H.sizeBound + I.sizeBound + J.sizeBound + K.sizeBound + L.sizeBound
override def decode(bits: BitVector) =
for {
a <- A.decode(bits)
b <- B.decode(a.remainder)
c <- C.decode(b.remainder)
d <- D.decode(c.remainder)
e <- E.decode(d.remainder)
f <- F.decode(e.remainder)
g <- G.decode(f.remainder)
h <- H.decode(g.remainder)
i <- I.decode(h.remainder)
j <- J.decode(i.remainder)
k <- K.decode(j.remainder)
l <- L.decode(k.remainder)
} yield DecodeResult((a.value,b.value,c.value,d.value,e.value,f.value,g.value,h.value,i.value,j.value,k.value,l.value),l.remainder)
override def encode(abcdefghijkl: (A,B,C,D,E,F,G,H,I,J,K,L)) =
for {
bits <- A.encode(abcdefghijkl._1)
bits2 <- B.encode(abcdefghijkl._2)
bits3 <- C.encode(abcdefghijkl._3)
bits4 <- D.encode(abcdefghijkl._4)
bits5 <- E.encode(abcdefghijkl._5)
bits6 <- F.encode(abcdefghijkl._6)
bits7 <- G.encode(abcdefghijkl._7)
bits8 <- H.encode(abcdefghijkl._8)
bits9 <- I.encode(abcdefghijkl._9)
bits10 <- J.encode(abcdefghijkl._10)
bits11 <- K.encode(abcdefghijkl._11)
bits12 <- L.encode(abcdefghijkl._12)
} yield bits ++ bits2 ++ bits3 ++ bits4 ++ bits5 ++ bits6 ++ bits7 ++ bits8 ++ bits9 ++ bits10 ++ bits11 ++ bits12
def widenAs[X](to: (A,B,C,D,E,F,G,H,I,J,K,L) => X, from: X => Option[(A,B,C,D,E,F,G,H,I,J,K,L)]): Codec[X] =
this.widenOpt(to.tupled, from)
override def toString = s"($A, $B, $C, $D, $E, $F, $G, $H, $I, $J, $K, $L)"
}
| reactific/scodec | shared/src/main/scala/scodec/codecs/TupleCodec.scala | Scala | bsd-3-clause | 17,125 |
/*
* Copyright (C) 2014 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.plugin.task.statistic
import org.openmole.core.tools.math.Stat
trait StatisticMethods {
lazy val average = new StatisticalAggregation[Double] {
override def apply(s: Seq[Double]): Double = Stat.average(s)
}
def confidenceInterval(level: Double) = new StatisticalAggregation[Double] {
override def apply(s: Seq[Double]): Double = Stat.confidenceInterval(s, level)
}
lazy val meanSquareError = new StatisticalAggregation[Double] {
override def apply(s: Seq[Double]): Double = Stat.meanSquareError(s)
}
lazy val medianAbsoluteDeviation = new StatisticalAggregation[Double] {
override def apply(s: Seq[Double]): Double = Stat.medianAbsoluteDeviation(s)
}
lazy val median = new StatisticalAggregation[Double] {
override def apply(s: Seq[Double]): Double = Stat.median(s)
}
lazy val sum = new StatisticalAggregation[Double] {
override def apply(s: Seq[Double]): Double = s.sum
}
}
| ISCPIF/PSEExperiments | openmole-src/openmole/plugins/org.openmole.plugin.task.statistic/src/main/scala/org/openmole/plugin/task/statistic/StatisticMethods.scala | Scala | agpl-3.0 | 1,653 |
package akka.contrib.persistence.mongodb
import akka.actor.ActorSystem
import com.typesafe.config.{Config, ConfigFactory}
import scala.concurrent.duration._
class CasbahDriverSettingsSpec extends BaseUnitTest{
def fixture[A](config: Config)(testCode: CasbahDriverSettings => A): A = {
testCode(CasbahDriverSettings(new ActorSystem.Settings(getClass.getClassLoader, config, "settings name")))
}
def reference = ConfigFactory.load()
def overriden = ConfigFactory.parseString(
"""
|akka.contrib.persistence.mongodb.casbah.socketkeepalive = true
|akka.contrib.persistence.mongodb.casbah.maxpoolsize = 5
|akka.contrib.persistence.mongodb.casbah.heartbeatfrequency = 500ms
""".stripMargin)
"A settings object" should "correctly load the defaults" in fixture(reference){ s =>
s.MinConnectionsPerHost shouldBe 0
s.ConnectionsPerHost shouldBe 100
s.ThreadsAllowedToBlockforConnectionMultiplier shouldBe 5
s.ServerSelectionTimeout shouldBe 30.seconds
s.MaxWaitTime shouldBe 2.minutes
s.MaxConnectionLifeTime shouldBe 0.seconds
s.MaxConnectionIdleTime shouldBe 0.seconds
s.ConnectTimeout shouldBe 10.seconds
s.SocketTimeout shouldBe 0.millis
s.SocketKeepAlive shouldBe false
s.SslEnabled shouldBe false
s.SslInvalidHostNameAllowed shouldBe false
s.HeartbeatFrequency shouldBe 10.seconds
s.MinHeartbeatFrequency shouldBe 500.millis
s.HeartbeatConnectTimeout shouldBe 20.seconds
s.HeartbeatSocketTimeout shouldBe 20.seconds
}
it should "correctly load overriden values" in fixture(overriden){s =>
s.SocketKeepAlive shouldBe true
s.ConnectionsPerHost shouldBe 5
s.HeartbeatFrequency shouldBe 500.millis
}
}
| alari/akka-persistence-mongo | casbah/src/test/scala/akka/contrib/persistence/mongodb/CasbahDriverSettingsSpec.scala | Scala | apache-2.0 | 1,725 |
/**
* Copyright 2013 Bayes Technologies
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package olympian.party
/**
* This just indicates if a resource has an external refernce
*
* @author Kevin Bayes
* @since 1.0
*/
trait ExternalRef {
var externalRef: String
} | kevinbayes/olympian | modules/party/party-model/src/main/scala/olympian/party/ExternalRef.scala | Scala | apache-2.0 | 798 |
package org.modelfun.arrangers
import org.modelfun.Model
/**
*
*/
case class OnCircle extends Model | zzorn/modelfun | src/main/scala/org/modelfun/arrangers/OnCircle.scala | Scala | lgpl-3.0 | 104 |
package codeGen.template
trait TemplateWriter {
/**
* Writes a string into a template
* @param template Template
* @param contents Contents to be written in the template
*/
def write(template: String, contents: String): String
}
| sergioifg94/GenDOMan | src/main/java/codeGen/template/TemplateWriter.scala | Scala | gpl-3.0 | 252 |
package tap.engine
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.spark._
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.stat.{MultivariateStatisticalSummary, Statistics}
import scala.util.Try
import spray.json._
import spray.json.DefaultJsonProtocol._
import spark.jobserver._
object SummaryStats extends SparkJob with NamedRddSupport {
def main(args: Array[String]) {
val sc = new SparkContext("local[4]", "WordCountExample")
val config = ConfigFactory.parseString("")
val results = runJob(sc, config)
println("Result is " + results)
}
override def validate(sc: SparkContext, config: Config): SparkJobValidation = {
Try(config.getString("SummaryStats.input0"))
.map(x => SparkJobValid)
.getOrElse(SparkJobInvalid("No SummaryStats.input0 config param"))
}
override def runJob(sc: SparkContext, config: Config): Any = {
val input0Name = config.getString("SummaryStats.input0")
val observations = namedRdds.get[org.apache.spark.mllib.linalg.Vector](input0Name).get
val summary: MultivariateStatisticalSummary = Statistics.colStats(observations)
val result = Map(
"input0" -> input0Name,
"count" -> summary.count,
"max" -> summary.max,
"min" -> summary.min,
"mean" -> summary.mean,
"numNonzeros" -> summary.numNonzeros,
"variance" -> summary.variance
)
result
}
}
| solma/tap_public | tap-engines/src/tap.engine/SummaryStats.scala | Scala | apache-2.0 | 1,468 |
package org.chaomai.paraten.tensor
/**
* Created by chaomai on 15/04/2017.
*/
trait TypedTensor[Shape] extends Serializable
| ChaoMai/ParaTen | src/main/scala/org/chaomai/paraten/tensor/TypedTensor.scala | Scala | apache-2.0 | 129 |
package com.sksamuel.elastic4s.requests.indexes
import com.sksamuel.elastic4s.requests.admin.UpdateIndexLevelSettingsRequest
import com.sksamuel.elastic4s.json.{XContentBuilder, XContentFactory}
object UpdateIndexLevelSettingsBuilder {
def apply(d: UpdateIndexLevelSettingsRequest): XContentBuilder = {
val source = XContentFactory.jsonBuilder().startObject("index")
d.numberOfReplicas.foreach(source.field("number_of_replicas", _))
d.autoExpandReplicas.foreach(source.field("auto_expand_replicas", _))
d.refreshInterval.foreach(source.field("refresh_interval", _))
d.maxResultWindow.foreach(source.field("max_result_window", _))
source.endObject().endObject()
source
}
}
| stringbean/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/indexes/UpdateIndexLevelSettingsBuilder.scala | Scala | apache-2.0 | 710 |
package com.twitter.bijection.macros
import org.scalatest.matchers.should.Matchers
import _root_.java.io.{
ByteArrayOutputStream,
ByteArrayInputStream,
Externalizable,
ObjectInput,
ObjectOutput,
ObjectInputStream,
ObjectOutputStream
}
import _root_.java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}
object MacroCaseClasses extends java.io.Serializable {
type Atup = (Int, String)
type Btup = (Atup, Atup, String)
type Ctup = (Atup, Btup, Atup, Btup, Btup)
// These are s single level unpacking into tuples
// of the case classes below
type Atupnr = (Int, String)
type Btupnr = (SampleClassA, SampleClassA, String)
type Ctupnr = (SampleClassA, SampleClassB, SampleClassA, SampleClassB, SampleClassB)
case class SampleClassA(x: Int, y: String)
case class SampleClassB(a1: SampleClassA, a2: SampleClassA, y: String)
case class SampleClassC(
a: SampleClassA,
b: SampleClassB,
c: SampleClassA,
d: SampleClassB,
e: SampleClassB
)
class SampleClassD // Non-case class
}
object Externalizer {
def apply[T](t: T): Externalizer[T] = {
val x = new Externalizer[T]
x.set(t)
x
}
}
/**
* This is a simplified version of com.twitter.chill.Externalizer which only does Java
* serialization
*/
class Externalizer[T] extends Externalizable {
// Either points to a result or a delegate Externalizer to fufil that result.
private var item: Either[Externalizer[T], Option[T]] = Right(None)
@transient private val doesJavaWork = new AtomicReference[Option[Boolean]](None)
@transient private val testing = new AtomicBoolean(false)
// No vals or var's below this line!
def getOption: Option[T] =
item match {
case Left(e) => e.getOption
case Right(i) => i
}
def get: T = getOption.get // This should never be None when get is called
/**
* Unfortunately, Java serialization requires mutable objects if you are going to control how the
* serialization is done. Use the companion object to creat new instances of this
*/
def set(it: T): Unit = {
item match {
case Left(e) => e.set(it)
case Right(x) =>
assert(x.isEmpty, "Tried to call .set on an already constructed Externalizer")
item = Right(Some(it))
}
}
// 1 here is 1 thread, since we will likely only serialize once
// this should not be a val because we don't want to capture a reference
def javaWorks: Boolean =
doesJavaWork.get match {
case Some(v) => v
case None => probeJavaWorks
}
/**
* Try to round-trip and see if it works without error
*/
private def probeJavaWorks: Boolean = {
if (!testing.compareAndSet(false, true)) return true
try {
val baos = new ByteArrayOutputStream()
val oos = new ObjectOutputStream(baos)
oos.writeObject(getOption)
val bytes = baos.toByteArray
val testInput = new ByteArrayInputStream(bytes)
val ois = new ObjectInputStream(testInput)
ois.readObject // this may throw
doesJavaWork.set(Some(true))
true
} catch {
case t: Throwable =>
t.printStackTrace
doesJavaWork.set(Some(false))
false
} finally {
testing.set(false)
}
}
override def readExternal(in: ObjectInput) = readJava(in)
private def readJava(in: ObjectInput) {
item = Right(in.readObject.asInstanceOf[Option[T]])
}
protected def writeJava(out: ObjectOutput): Boolean =
javaWorks && {
out.writeObject(getOption)
true
}
override def writeExternal(out: ObjectOutput) = writeJava(out)
}
trait MacroTestHelper extends Matchers {
def canExternalize(t: AnyRef) { Externalizer(t).javaWorks shouldBe true }
}
| twitter/bijection | bijection-macros/src/test/scala/com/twitter/bijection/macros/TestHelpers.scala | Scala | apache-2.0 | 3,727 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.util.{Date, UUID}
import scala.collection.mutable
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.io.{FileCommitProtocol, SparkHadoopWriterUtils}
import org.apache.spark.internal.io.FileCommitProtocol.TaskCommitMessage
import org.apache.spark.shuffle.FetchFailedException
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, ExternalCatalogUtils}
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.expressions.{UnsafeProjection, _}
import org.apache.spark.sql.catalyst.plans.physical.HashPartitioning
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, DateTimeUtils}
import org.apache.spark.sql.execution.{SortExec, SparkPlan, SQLExecution}
import org.apache.spark.sql.types.StringType
import org.apache.spark.util.{SerializableConfiguration, Utils}
/** A helper object for writing FileFormat data out to a location. */
object FileFormatWriter extends Logging {
/**
* Max number of files a single task writes out due to file size. In most cases the number of
* files written should be very small. This is just a safe guard to protect some really bad
* settings, e.g. maxRecordsPerFile = 1.
*/
private val MAX_FILE_COUNTER = 1000 * 1000
/** Describes how output files should be placed in the filesystem. */
case class OutputSpec(
outputPath: String,
customPartitionLocations: Map[TablePartitionSpec, String],
outputColumns: Seq[Attribute])
/** A shared job description for all the write tasks. */
private class WriteJobDescription(
val uuid: String, // prevent collision between different (appending) write jobs
val serializableHadoopConf: SerializableConfiguration,
val outputWriterFactory: OutputWriterFactory,
val allColumns: Seq[Attribute],
val dataColumns: Seq[Attribute],
val partitionColumns: Seq[Attribute],
val bucketIdExpression: Option[Expression],
val path: String,
val customPartitionLocations: Map[TablePartitionSpec, String],
val maxRecordsPerFile: Long,
val timeZoneId: String,
val statsTrackers: Seq[WriteJobStatsTracker])
extends Serializable {
assert(AttributeSet(allColumns) == AttributeSet(partitionColumns ++ dataColumns),
s"""
|All columns: ${allColumns.mkString(", ")}
|Partition columns: ${partitionColumns.mkString(", ")}
|Data columns: ${dataColumns.mkString(", ")}
""".stripMargin)
}
/** The result of a successful write task. */
private case class WriteTaskResult(commitMsg: TaskCommitMessage, summary: ExecutedWriteSummary)
/**
* Basic work flow of this command is:
* 1. Driver side setup, including output committer initialization and data source specific
* preparation work for the write job to be issued.
* 2. Issues a write job consists of one or more executor side tasks, each of which writes all
* rows within an RDD partition.
* 3. If no exception is thrown in a task, commits that task, otherwise aborts that task; If any
* exception is thrown during task commitment, also aborts that task.
* 4. If all tasks are committed, commit the job, otherwise aborts the job; If any exception is
* thrown during job commitment, also aborts the job.
* 5. If the job is successfully committed, perform post-commit operations such as
* processing statistics.
* @return The set of all partition paths that were updated during this write job.
*/
def write(
sparkSession: SparkSession,
plan: SparkPlan,
fileFormat: FileFormat,
committer: FileCommitProtocol,
outputSpec: OutputSpec,
hadoopConf: Configuration,
partitionColumns: Seq[Attribute],
bucketSpec: Option[BucketSpec],
statsTrackers: Seq[WriteJobStatsTracker],
options: Map[String, String])
: Set[String] = {
val job = Job.getInstance(hadoopConf)
job.setOutputKeyClass(classOf[Void])
job.setOutputValueClass(classOf[InternalRow])
FileOutputFormat.setOutputPath(job, new Path(outputSpec.outputPath))
val partitionSet = AttributeSet(partitionColumns)
val dataColumns = outputSpec.outputColumns.filterNot(partitionSet.contains)
val bucketIdExpression = bucketSpec.map { spec =>
val bucketColumns = spec.bucketColumnNames.map(c => dataColumns.find(_.name == c).get)
// Use `HashPartitioning.partitionIdExpression` as our bucket id expression, so that we can
// guarantee the data distribution is same between shuffle and bucketed data source, which
// enables us to only shuffle one side when join a bucketed table and a normal one.
HashPartitioning(bucketColumns, spec.numBuckets).partitionIdExpression
}
val sortColumns = bucketSpec.toSeq.flatMap {
spec => spec.sortColumnNames.map(c => dataColumns.find(_.name == c).get)
}
val caseInsensitiveOptions = CaseInsensitiveMap(options)
// Note: prepareWrite has side effect. It sets "job".
val outputWriterFactory =
fileFormat.prepareWrite(sparkSession, job, caseInsensitiveOptions, dataColumns.toStructType)
val description = new WriteJobDescription(
uuid = UUID.randomUUID().toString,
serializableHadoopConf = new SerializableConfiguration(job.getConfiguration),
outputWriterFactory = outputWriterFactory,
allColumns = outputSpec.outputColumns,
dataColumns = dataColumns,
partitionColumns = partitionColumns,
bucketIdExpression = bucketIdExpression,
path = outputSpec.outputPath,
customPartitionLocations = outputSpec.customPartitionLocations,
maxRecordsPerFile = caseInsensitiveOptions.get("maxRecordsPerFile").map(_.toLong)
.getOrElse(sparkSession.sessionState.conf.maxRecordsPerFile),
timeZoneId = caseInsensitiveOptions.get(DateTimeUtils.TIMEZONE_OPTION)
.getOrElse(sparkSession.sessionState.conf.sessionLocalTimeZone),
statsTrackers = statsTrackers
)
// We should first sort by partition columns, then bucket id, and finally sorting columns.
val requiredOrdering = partitionColumns ++ bucketIdExpression ++ sortColumns
// the sort order doesn't matter
val actualOrdering = plan.outputOrdering.map(_.child)
val orderingMatched = if (requiredOrdering.length > actualOrdering.length) {
false
} else {
requiredOrdering.zip(actualOrdering).forall {
case (requiredOrder, childOutputOrder) =>
requiredOrder.semanticEquals(childOutputOrder)
}
}
SQLExecution.checkSQLExecutionId(sparkSession)
// This call shouldn't be put into the `try` block below because it only initializes and
// prepares the job, any exception thrown from here shouldn't cause abortJob() to be called.
committer.setupJob(job)
try {
val rdd = if (orderingMatched) {
plan.execute()
} else {
// SPARK-21165: the `requiredOrdering` is based on the attributes from analyzed plan, and
// the physical plan may have different attribute ids due to optimizer removing some
// aliases. Here we bind the expression ahead to avoid potential attribute ids mismatch.
val orderingExpr = requiredOrdering
.map(SortOrder(_, Ascending))
.map(BindReferences.bindReference(_, outputSpec.outputColumns))
SortExec(
orderingExpr,
global = false,
child = plan).execute()
}
// SPARK-23271 If we are attempting to write a zero partition rdd, create a dummy single
// partition rdd to make sure we at least set up one write task to write the metadata.
val rddWithNonEmptyPartitions = if (rdd.partitions.length == 0) {
sparkSession.sparkContext.parallelize(Array.empty[InternalRow], 1)
} else {
rdd
}
val ret = new Array[WriteTaskResult](rddWithNonEmptyPartitions.partitions.length)
sparkSession.sparkContext.runJob(
rddWithNonEmptyPartitions,
(taskContext: TaskContext, iter: Iterator[InternalRow]) => {
executeTask(
description = description,
sparkStageId = taskContext.stageId(),
sparkPartitionId = taskContext.partitionId(),
sparkAttemptNumber = taskContext.attemptNumber(),
committer,
iterator = iter)
},
rddWithNonEmptyPartitions.partitions.indices,
(index, res: WriteTaskResult) => {
committer.onTaskCommit(res.commitMsg)
ret(index) = res
})
val commitMsgs = ret.map(_.commitMsg)
committer.commitJob(job, commitMsgs)
logInfo(s"Job ${job.getJobID} committed.")
processStats(description.statsTrackers, ret.map(_.summary.stats))
logInfo(s"Finished processing stats for job ${job.getJobID}.")
// return a set of all the partition paths that were updated during this job
ret.map(_.summary.updatedPartitions).reduceOption(_ ++ _).getOrElse(Set.empty)
} catch { case cause: Throwable =>
logError(s"Aborting job ${job.getJobID}.", cause)
committer.abortJob(job)
throw new SparkException("Job aborted.", cause)
}
}
/** Writes data out in a single Spark task. */
private def executeTask(
description: WriteJobDescription,
sparkStageId: Int,
sparkPartitionId: Int,
sparkAttemptNumber: Int,
committer: FileCommitProtocol,
iterator: Iterator[InternalRow]): WriteTaskResult = {
val jobId = SparkHadoopWriterUtils.createJobID(new Date, sparkStageId)
val taskId = new TaskID(jobId, TaskType.MAP, sparkPartitionId)
val taskAttemptId = new TaskAttemptID(taskId, sparkAttemptNumber)
// Set up the attempt context required to use in the output committer.
val taskAttemptContext: TaskAttemptContext = {
// Set up the configuration object
val hadoopConf = description.serializableHadoopConf.value
hadoopConf.set("mapreduce.job.id", jobId.toString)
hadoopConf.set("mapreduce.task.id", taskAttemptId.getTaskID.toString)
hadoopConf.set("mapreduce.task.attempt.id", taskAttemptId.toString)
hadoopConf.setBoolean("mapreduce.task.ismap", true)
hadoopConf.setInt("mapreduce.task.partition", 0)
new TaskAttemptContextImpl(hadoopConf, taskAttemptId)
}
committer.setupTask(taskAttemptContext)
val writeTask =
if (sparkPartitionId != 0 && !iterator.hasNext) {
// In case of empty job, leave first partition to save meta for file format like parquet.
new EmptyDirectoryWriteTask(description)
} else if (description.partitionColumns.isEmpty && description.bucketIdExpression.isEmpty) {
new SingleDirectoryWriteTask(description, taskAttemptContext, committer)
} else {
new DynamicPartitionWriteTask(description, taskAttemptContext, committer)
}
try {
Utils.tryWithSafeFinallyAndFailureCallbacks(block = {
// Execute the task to write rows out and commit the task.
val summary = writeTask.execute(iterator)
writeTask.releaseResources()
WriteTaskResult(committer.commitTask(taskAttemptContext), summary)
})(catchBlock = {
// If there is an error, release resource and then abort the task
try {
writeTask.releaseResources()
} finally {
committer.abortTask(taskAttemptContext)
logError(s"Job $jobId aborted.")
}
})
} catch {
case e: FetchFailedException =>
throw e
case t: Throwable =>
throw new SparkException("Task failed while writing rows.", t)
}
}
/**
* For every registered [[WriteJobStatsTracker]], call `processStats()` on it, passing it
* the corresponding [[WriteTaskStats]] from all executors.
*/
private def processStats(
statsTrackers: Seq[WriteJobStatsTracker],
statsPerTask: Seq[Seq[WriteTaskStats]])
: Unit = {
val numStatsTrackers = statsTrackers.length
assert(statsPerTask.forall(_.length == numStatsTrackers),
s"""Every WriteTask should have produced one `WriteTaskStats` object for every tracker.
|There are $numStatsTrackers statsTrackers, but some task returned
|${statsPerTask.find(_.length != numStatsTrackers).get.length} results instead.
""".stripMargin)
val statsPerTracker = if (statsPerTask.nonEmpty) {
statsPerTask.transpose
} else {
statsTrackers.map(_ => Seq.empty)
}
statsTrackers.zip(statsPerTracker).foreach {
case (statsTracker, stats) => statsTracker.processStats(stats)
}
}
/**
* A simple trait for writing out data in a single Spark task, without any concerns about how
* to commit or abort tasks. Exceptions thrown by the implementation of this trait will
* automatically trigger task aborts.
*/
private trait ExecuteWriteTask {
/**
* Writes data out to files, and then returns the summary of relative information which
* includes the list of partition strings written out. The list of partitions is sent back
* to the driver and used to update the catalog. Other information will be sent back to the
* driver too and used to e.g. update the metrics in UI.
*/
def execute(iterator: Iterator[InternalRow]): ExecutedWriteSummary
def releaseResources(): Unit
}
/** ExecuteWriteTask for empty partitions */
private class EmptyDirectoryWriteTask(description: WriteJobDescription)
extends ExecuteWriteTask {
val statsTrackers: Seq[WriteTaskStatsTracker] =
description.statsTrackers.map(_.newTaskInstance())
override def execute(iter: Iterator[InternalRow]): ExecutedWriteSummary = {
ExecutedWriteSummary(
updatedPartitions = Set.empty,
stats = statsTrackers.map(_.getFinalStats()))
}
override def releaseResources(): Unit = {}
}
/** Writes data to a single directory (used for non-dynamic-partition writes). */
private class SingleDirectoryWriteTask(
description: WriteJobDescription,
taskAttemptContext: TaskAttemptContext,
committer: FileCommitProtocol) extends ExecuteWriteTask {
private[this] var currentWriter: OutputWriter = _
val statsTrackers: Seq[WriteTaskStatsTracker] =
description.statsTrackers.map(_.newTaskInstance())
private def newOutputWriter(fileCounter: Int): Unit = {
val ext = description.outputWriterFactory.getFileExtension(taskAttemptContext)
val currentPath = committer.newTaskTempFile(
taskAttemptContext,
None,
f"-c$fileCounter%03d" + ext)
currentWriter = description.outputWriterFactory.newInstance(
path = currentPath,
dataSchema = description.dataColumns.toStructType,
context = taskAttemptContext)
statsTrackers.map(_.newFile(currentPath))
}
override def execute(iter: Iterator[InternalRow]): ExecutedWriteSummary = {
var fileCounter = 0
var recordsInFile: Long = 0L
newOutputWriter(fileCounter)
while (iter.hasNext) {
if (description.maxRecordsPerFile > 0 && recordsInFile >= description.maxRecordsPerFile) {
fileCounter += 1
assert(fileCounter < MAX_FILE_COUNTER,
s"File counter $fileCounter is beyond max value $MAX_FILE_COUNTER")
recordsInFile = 0
releaseResources()
newOutputWriter(fileCounter)
}
val internalRow = iter.next()
currentWriter.write(internalRow)
statsTrackers.foreach(_.newRow(internalRow))
recordsInFile += 1
}
releaseResources()
ExecutedWriteSummary(
updatedPartitions = Set.empty,
stats = statsTrackers.map(_.getFinalStats()))
}
override def releaseResources(): Unit = {
if (currentWriter != null) {
try {
currentWriter.close()
} finally {
currentWriter = null
}
}
}
}
/**
* Writes data to using dynamic partition writes, meaning this single function can write to
* multiple directories (partitions) or files (bucketing).
*/
private class DynamicPartitionWriteTask(
desc: WriteJobDescription,
taskAttemptContext: TaskAttemptContext,
committer: FileCommitProtocol) extends ExecuteWriteTask {
/** Flag saying whether or not the data to be written out is partitioned. */
val isPartitioned = desc.partitionColumns.nonEmpty
/** Flag saying whether or not the data to be written out is bucketed. */
val isBucketed = desc.bucketIdExpression.isDefined
assert(isPartitioned || isBucketed,
s"""DynamicPartitionWriteTask should be used for writing out data that's either
|partitioned or bucketed. In this case neither is true.
|WriteJobDescription: ${desc}
""".stripMargin)
// currentWriter is initialized whenever we see a new key (partitionValues + BucketId)
private var currentWriter: OutputWriter = _
/** Trackers for computing various statistics on the data as it's being written out. */
private val statsTrackers: Seq[WriteTaskStatsTracker] =
desc.statsTrackers.map(_.newTaskInstance())
/** Extracts the partition values out of an input row. */
private lazy val getPartitionValues: InternalRow => UnsafeRow = {
val proj = UnsafeProjection.create(desc.partitionColumns, desc.allColumns)
row => proj(row)
}
/** Expression that given partition columns builds a path string like: col1=val/col2=val/... */
private lazy val partitionPathExpression: Expression = Concat(
desc.partitionColumns.zipWithIndex.flatMap { case (c, i) =>
val partitionName = ScalaUDF(
ExternalCatalogUtils.getPartitionPathString _,
StringType,
Seq(Literal(c.name), Cast(c, StringType, Option(desc.timeZoneId))))
if (i == 0) Seq(partitionName) else Seq(Literal(Path.SEPARATOR), partitionName)
})
/** Evaluates the `partitionPathExpression` above on a row of `partitionValues` and returns
* the partition string. */
private lazy val getPartitionPath: InternalRow => String = {
val proj = UnsafeProjection.create(Seq(partitionPathExpression), desc.partitionColumns)
row => proj(row).getString(0)
}
/** Given an input row, returns the corresponding `bucketId` */
private lazy val getBucketId: InternalRow => Int = {
val proj = UnsafeProjection.create(desc.bucketIdExpression.toSeq, desc.allColumns)
row => proj(row).getInt(0)
}
/** Returns the data columns to be written given an input row */
private val getOutputRow = UnsafeProjection.create(desc.dataColumns, desc.allColumns)
/**
* Opens a new OutputWriter given a partition key and/or a bucket id.
* If bucket id is specified, we will append it to the end of the file name, but before the
* file extension, e.g. part-r-00009-ea518ad4-455a-4431-b471-d24e03814677-00002.gz.parquet
*
* @param partitionValues the partition which all tuples being written by this `OutputWriter`
* belong to
* @param bucketId the bucket which all tuples being written by this `OutputWriter` belong to
* @param fileCounter the number of files that have been written in the past for this specific
* partition. This is used to limit the max number of records written for a
* single file. The value should start from 0.
* @param updatedPartitions the set of updated partition paths, we should add the new partition
* path of this writer to it.
*/
private def newOutputWriter(
partitionValues: Option[InternalRow],
bucketId: Option[Int],
fileCounter: Int,
updatedPartitions: mutable.Set[String]): Unit = {
val partDir = partitionValues.map(getPartitionPath(_))
partDir.foreach(updatedPartitions.add)
val bucketIdStr = bucketId.map(BucketingUtils.bucketIdToString).getOrElse("")
// This must be in a form that matches our bucketing format. See BucketingUtils.
val ext = f"$bucketIdStr.c$fileCounter%03d" +
desc.outputWriterFactory.getFileExtension(taskAttemptContext)
val customPath = partDir.flatMap { dir =>
desc.customPartitionLocations.get(PartitioningUtils.parsePathFragment(dir))
}
val currentPath = if (customPath.isDefined) {
committer.newTaskTempFileAbsPath(taskAttemptContext, customPath.get, ext)
} else {
committer.newTaskTempFile(taskAttemptContext, partDir, ext)
}
currentWriter = desc.outputWriterFactory.newInstance(
path = currentPath,
dataSchema = desc.dataColumns.toStructType,
context = taskAttemptContext)
statsTrackers.foreach(_.newFile(currentPath))
}
override def execute(iter: Iterator[InternalRow]): ExecutedWriteSummary = {
// If anything below fails, we should abort the task.
var recordsInFile: Long = 0L
var fileCounter = 0
val updatedPartitions = mutable.Set[String]()
var currentPartionValues: Option[UnsafeRow] = None
var currentBucketId: Option[Int] = None
for (row <- iter) {
val nextPartitionValues = if (isPartitioned) Some(getPartitionValues(row)) else None
val nextBucketId = if (isBucketed) Some(getBucketId(row)) else None
if (currentPartionValues != nextPartitionValues || currentBucketId != nextBucketId) {
// See a new partition or bucket - write to a new partition dir (or a new bucket file).
if (isPartitioned && currentPartionValues != nextPartitionValues) {
currentPartionValues = Some(nextPartitionValues.get.copy())
statsTrackers.foreach(_.newPartition(currentPartionValues.get))
}
if (isBucketed) {
currentBucketId = nextBucketId
statsTrackers.foreach(_.newBucket(currentBucketId.get))
}
recordsInFile = 0
fileCounter = 0
releaseResources()
newOutputWriter(currentPartionValues, currentBucketId, fileCounter, updatedPartitions)
} else if (desc.maxRecordsPerFile > 0 &&
recordsInFile >= desc.maxRecordsPerFile) {
// Exceeded the threshold in terms of the number of records per file.
// Create a new file by increasing the file counter.
recordsInFile = 0
fileCounter += 1
assert(fileCounter < MAX_FILE_COUNTER,
s"File counter $fileCounter is beyond max value $MAX_FILE_COUNTER")
releaseResources()
newOutputWriter(currentPartionValues, currentBucketId, fileCounter, updatedPartitions)
}
val outputRow = getOutputRow(row)
currentWriter.write(outputRow)
statsTrackers.foreach(_.newRow(outputRow))
recordsInFile += 1
}
releaseResources()
ExecutedWriteSummary(
updatedPartitions = updatedPartitions.toSet,
stats = statsTrackers.map(_.getFinalStats()))
}
override def releaseResources(): Unit = {
if (currentWriter != null) {
try {
currentWriter.close()
} finally {
currentWriter = null
}
}
}
}
}
/**
* Wrapper class for the metrics of writing data out.
*
* @param updatedPartitions the partitions updated during writing data out. Only valid
* for dynamic partition.
* @param stats one `WriteTaskStats` object for every `WriteJobStatsTracker` that the job had.
*/
case class ExecutedWriteSummary(
updatedPartitions: Set[String],
stats: Seq[WriteTaskStats])
| brad-kaiser/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormatWriter.scala | Scala | apache-2.0 | 24,843 |
/*
* Copyright 2009-2010 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package textapp
import org.specs.Specification
import ccf.tree.operation._
import ccf.tree.indexing._
object TextDocumentSpec extends Specification {
"Empty TextDocument" should {
val doc = new TextDocument("")
"be empty initially" in {
doc.text must equalTo("")
}
"calculate initial MD5 hash correctly" in {
doc.hash must equalTo("d41d8cd98f00b204e9800998ecf8427e")
}
"ignore no-ops" in {
doc.applyOp(NoOperation())
doc.text must equalTo("")
}
}
"TextDocument with operations applied" should {
val doc = new TextDocument("")
doBefore {
doc.applyOp(new InsertOperation(TreeIndex(0), Elem('a')))
doc.applyOp(new InsertOperation(TreeIndex(1), Elem('b')))
doc.applyOp(new InsertOperation(TreeIndex(2), Elem('c')))
doc.applyOp(new InsertOperation(TreeIndex(3), Elem('d')))
doc.applyOp(new DeleteOperation(TreeIndex(0)))
}
"have correct text" in {
doc.text must equalTo("bcd")
}
"calculate MD5 hash correctly" in {
doc.hash must equalTo("d4b7c284882ca9e208bb65e8abd5f4c8")
}
}
}
| akisaarinen/ccf | app/src/test/scala/textapp/TextDocumentSpec.scala | Scala | apache-2.0 | 1,733 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.tree.impurity
import org.apache.spark.annotation.{DeveloperApi, Experimental}
/**
* :: Experimental ::
* Class for calculating [[http://en.wikipedia.org/wiki/Binary_entropy_function entropy]] during
* binary classification.
*/
@Experimental
object Entropy extends Impurity {
private[tree] def log2(x: Double) = scala.math.log(x) / scala.math.log(2)
/**
* :: DeveloperApi ::
* information calculation for multiclass classification
* @param counts Array[Double] with counts for each label
* @param totalCount sum of counts for all labels
* @return information value, or 0 if totalCount = 0
*/
@DeveloperApi
override def calculate(counts: Array[Double], totalCount: Double): Double = {
if (totalCount == 0) {
return 0
}
val numClasses = counts.length
var impurity = 0.0
var classIndex = 0
while (classIndex < numClasses) {
val classCount = counts(classIndex)
if (classCount != 0) {
val freq = classCount / totalCount
impurity -= freq * log2(freq)
}
classIndex += 1
}
impurity
}
/**
* :: DeveloperApi ::
* variance calculation
* @param count number of instances
* @param sum sum of labels
* @param sumSquares summation of squares of the labels
* @return information value, or 0 if count = 0
*/
@DeveloperApi
override def calculate(count: Double, sum: Double, sumSquares: Double): Double =
throw new UnsupportedOperationException("Entropy.calculate")
/**
* Get this impurity instance.
* This is useful for passing impurity parameters to a Strategy in Java.
*/
def instance: this.type = this
}
/**
* Class for updating views of a vector of sufficient statistics,
* in order to compute impurity from a sample.
* Note: Instances of this class do not hold the data; they operate on views of the data.
* @param numClasses Number of classes for label.
*/
private[tree] class EntropyAggregator(numClasses: Int)
extends ImpurityAggregator(numClasses) with Serializable {
/**
* Update stats for one (node, feature, bin) with the given label.
* @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous.
* @param offset Start index of stats for this (node, feature, bin).
*/
def update(allStats: Array[Double], offset: Int, label: Double, instanceWeight: Double): Unit = {
if (label >= statsSize) {
throw new IllegalArgumentException(s"EntropyAggregator given label $label" +
s" but requires label < numClasses (= $statsSize).")
}
if (label < 0) {
throw new IllegalArgumentException(s"EntropyAggregator given label $label" +
s"but requires label is non-negative.")
}
allStats(offset + label.toInt) += instanceWeight
}
/**
* Get an [[ImpurityCalculator]] for a (node, feature, bin).
* @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous.
* @param offset Start index of stats for this (node, feature, bin).
*/
def getCalculator(allStats: Array[Double], offset: Int): EntropyCalculator = {
new EntropyCalculator(allStats.view(offset, offset + statsSize).toArray)
}
}
/**
* Stores statistics for one (node, feature, bin) for calculating impurity.
* Unlike [[EntropyAggregator]], this class stores its own data and is for a specific
* (node, feature, bin).
* @param stats Array of sufficient statistics for a (node, feature, bin).
*/
private[tree] class EntropyCalculator(stats: Array[Double]) extends ImpurityCalculator(stats) {
/**
* Make a deep copy of this [[ImpurityCalculator]].
*/
def copy: EntropyCalculator = new EntropyCalculator(stats.clone())
/**
* Calculate the impurity from the stored sufficient statistics.
*/
def calculate(): Double = Entropy.calculate(stats, stats.sum)
/**
* Number of data points accounted for in the sufficient statistics.
*/
def count: Long = stats.sum.toLong
/**
* Prediction which should be made based on the sufficient statistics.
*/
def predict: Double = if (count == 0) {
0
} else {
indexOfLargestArrayElement(stats)
}
/**
* Probability of the label given by [[predict]].
*/
override def prob(label: Double): Double = {
val lbl = label.toInt
require(lbl < stats.length,
s"EntropyCalculator.prob given invalid label: $lbl (should be < ${stats.length}")
require(lbl >= 0, "Entropy does not support negative labels")
val cnt = count
if (cnt == 0) {
0
} else {
stats(lbl) / cnt
}
}
override def toString: String = s"EntropyCalculator(stats = [${stats.mkString(", ")}])"
}
| andrewor14/iolap | mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Entropy.scala | Scala | apache-2.0 | 5,481 |
package io.transwarp.midas.constant.midas.params.data
/**
* Created by endy on 16-12-13.
*/
object OverSampleParams {
val Threshold = "threshold"
val DependentColName = "dependentColName"
val PrimaryClass = "primaryClass"
}
| transwarpio/rapidminer | api-driver/src/main/scala/io/transwarp/midas/constant/midas/params/data/OverSampleParams.scala | Scala | gpl-3.0 | 235 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.