code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package org.flowpaint.util
import org.flowpaint.model2.raster.Change
import org.flowpaint.model2.{Operation, Picture}
/**
* A command to run a rendering operation.
*/
class OperationCommand(operation: Operation) extends Command[Picture](
operation.description,
// Action
(picture: Picture) => {
// val tiles = operation.getAffectedTiles(picture)
// operation.renderToTiles(picture, tiles)
picture.takeUndoSnapshot()
},
// Undo action
(picture: Picture, undoData: Object) => {
val change = undoData.asInstanceOf[Change]
change.undo(picture)
change
},
// Redo action
(picture: Picture, redoData: Object) => {
val change = redoData.asInstanceOf[Change]
change.redo(picture)
change
},
// Can undo
(picture: Picture) => {true}
) {
} | zzorn/flowpaint | src/main/scala/org/flowpaint/util/OperationCommand.scala | Scala | gpl-2.0 | 792 |
package jp.co.dzl.example.akka.api.di
import com.typesafe.config.{ ConfigFactory, Config }
import scaldi.Module
class ConfigModule extends Module {
bind[Config] to ConfigFactory.load()
bind[String] identifiedBy "http.listen.host" to inject[Config].getString("http.listen.host")
bind[Int] identifiedBy "http.listen.port" to inject[Config].getInt("http.listen.port")
bind[String] identifiedBy "services.github.host" to inject[Config].getString("services.github.host")
bind[Int] identifiedBy "services.github.port" to inject[Config].getInt("services.github.port")
bind[Int] identifiedBy "services.github.timeout" to inject[Config].getInt("services.github.timeout")
}
| dazzle-lab/akka-api-gateway-example | src/main/scala/jp/co/dzl/example/akka/api/di/ConfigModule.scala | Scala | mit | 680 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.cluster.scheduler
import akka.actor.{Actor, ActorRef}
import org.apache.gearpump.Time.MilliSeconds
import org.apache.gearpump.cluster.MasterToWorker.{UpdateResourceFailed, UpdateResourceSucceed, WorkerRegistered}
import org.apache.gearpump.cluster.WorkerToMaster.ResourceUpdate
import org.apache.gearpump.cluster.master.Master.WorkerTerminated
import org.apache.gearpump.cluster.scheduler.Scheduler.ApplicationFinished
import org.apache.gearpump.cluster.worker.WorkerId
import org.apache.gearpump.util.LogUtil
import org.slf4j.Logger
import scala.collection.mutable
/**
* Scheduler schedule resource for different applications.
*/
abstract class Scheduler extends Actor {
val LOG: Logger = LogUtil.getLogger(getClass)
protected var resources = new mutable.HashMap[WorkerId, (ActorRef, Resource)]
def handleScheduleMessage: Receive = {
case WorkerRegistered(id, _) =>
if (!resources.contains(id)) {
LOG.info(s"Worker $id added to the scheduler")
resources.put(id, (sender, Resource.empty))
}
case update@ResourceUpdate(worker, workerId, resource) =>
LOG.info(s"$update...")
if (resources.contains(workerId)) {
val resourceReturned = resource > resources.get(workerId).get._2
resources.update(workerId, (worker, resource))
if (resourceReturned) {
allocateResource()
}
sender ! UpdateResourceSucceed
}
else {
sender ! UpdateResourceFailed(
s"ResourceUpdate failed! The worker $workerId has not been registered into master")
}
case WorkerTerminated(workerId) =>
if (resources.contains(workerId)) {
resources -= workerId
}
case ApplicationFinished(appId) =>
doneApplication(appId)
}
def allocateResource(): Unit
def doneApplication(appId: Int): Unit
}
object Scheduler {
case class PendingRequest(
appId: Int, appMaster: ActorRef, request: ResourceRequest, timeStamp: MilliSeconds)
case class ApplicationFinished(appId: Int)
} | manuzhang/incubator-gearpump | core/src/main/scala/org/apache/gearpump/cluster/scheduler/Scheduler.scala | Scala | apache-2.0 | 2,856 |
package sbt
import java.io.File
import Def.Classpath
import scala.annotation.implicitNotFound
import sbt.internal.util.Attributed
import Def.Initialize
import reflect.internal.annotations.compileTimeOnly
object Append {
@implicitNotFound(
msg = "No implicit for Append.Value[${A}, ${B}] found,\\n so ${B} cannot be appended to ${A}")
trait Value[A, B] {
def appendValue(a: A, b: B): A
}
@implicitNotFound(
msg = "No implicit for Append.Values[${A}, ${B}] found,\\n so ${B} cannot be appended to ${A}")
trait Values[A, -B] {
def appendValues(a: A, b: B): A
}
trait Sequence[A, -B, T] extends Value[A, T] with Values[A, B]
implicit def appendSeq[T, V <: T]: Sequence[Seq[T], Seq[V], V] =
new Sequence[Seq[T], Seq[V], V] {
def appendValues(a: Seq[T], b: Seq[V]): Seq[T] = a ++ b
def appendValue(a: Seq[T], b: V): Seq[T] = a :+ b
}
implicit def appendSeqImplicit[T, V](implicit ev: V => T): Sequence[Seq[T], Seq[V], V] =
new Sequence[Seq[T], Seq[V], V] {
def appendValues(a: Seq[T], b: Seq[V]): Seq[T] =
a ++ (b map { x =>
(x: T)
})
def appendValue(a: Seq[T], b: V): Seq[T] = a :+ (b: T)
}
@compileTimeOnly("This can be used in += only.")
implicit def appendTaskValueSeq[T, V <: T]: Value[Seq[Task[T]], Initialize[Task[V]]] =
new Value[Seq[Task[T]], Initialize[Task[V]]] {
def appendValue(a: Seq[Task[T]], b: Initialize[Task[V]]): Seq[Task[T]] = ???
}
@compileTimeOnly("This can be used in += only.")
implicit def appendTaskKeySeq[T, V <: T]: Value[Seq[Task[T]], TaskKey[V]] =
new Value[Seq[Task[T]], TaskKey[V]] {
def appendValue(a: Seq[Task[T]], b: TaskKey[V]): Seq[Task[T]] = ???
}
implicit def appendList[T, V <: T]: Sequence[List[T], List[V], V] =
new Sequence[List[T], List[V], V] {
def appendValues(a: List[T], b: List[V]): List[T] = a ::: b
def appendValue(a: List[T], b: V): List[T] = a :+ b
}
implicit def appendListImplicit[T, V](implicit ev: V => T): Sequence[List[T], List[V], V] =
new Sequence[List[T], List[V], V] {
def appendValues(a: List[T], b: List[V]): List[T] =
a ::: (b map { x =>
(x: T)
})
def appendValue(a: List[T], b: V): List[T] = a :+ (b: T)
}
implicit def appendVectorImplicit[T, V](implicit ev: V => T): Sequence[Vector[T], Seq[V], V] =
new Sequence[Vector[T], Seq[V], V] {
def appendValues(a: Vector[T], b: Seq[V]): Vector[T] =
a ++ (b map { x =>
(x: T)
})
def appendValue(a: Vector[T], b: V): Vector[T] = a :+ (b: T)
}
implicit def appendString: Value[String, String] = new Value[String, String] {
def appendValue(a: String, b: String) = a + b
}
implicit def appendInt = new Value[Int, Int] {
def appendValue(a: Int, b: Int) = a + b
}
implicit def appendLong = new Value[Long, Long] {
def appendValue(a: Long, b: Long) = a + b
}
implicit def appendDouble = new Value[Double, Double] {
def appendValue(a: Double, b: Double) = a + b
}
implicit def appendClasspath: Sequence[Classpath, Seq[File], File] =
new Sequence[Classpath, Seq[File], File] {
def appendValues(a: Classpath, b: Seq[File]): Classpath = a ++ Attributed.blankSeq(b)
def appendValue(a: Classpath, b: File): Classpath = a :+ Attributed.blank(b)
}
implicit def appendSet[T, V <: T]: Sequence[Set[T], Set[V], V] =
new Sequence[Set[T], Set[V], V] {
def appendValues(a: Set[T], b: Set[V]): Set[T] = a ++ b
def appendValue(a: Set[T], b: V): Set[T] = a + b
}
implicit def appendMap[A, B, X <: A, Y <: B]: Sequence[Map[A, B], Map[X, Y], (X, Y)] =
new Sequence[Map[A, B], Map[X, Y], (X, Y)] {
def appendValues(a: Map[A, B], b: Map[X, Y]): Map[A, B] = a ++ b
def appendValue(a: Map[A, B], b: (X, Y)): Map[A, B] = a + b
}
implicit def appendOption[T]: Sequence[Seq[T], Option[T], Option[T]] =
new Sequence[Seq[T], Option[T], Option[T]] {
def appendValue(a: Seq[T], b: Option[T]): Seq[T] = b.fold(a)(a :+ _)
def appendValues(a: Seq[T], b: Option[T]): Seq[T] = b.fold(a)(a :+ _)
}
}
| Duhemm/sbt | main-settings/src/main/scala/sbt/Append.scala | Scala | bsd-3-clause | 4,139 |
package lila.tournament
import org.specs2.mutable.Specification
object ColorHistoryTest {
def apply(s: String): ColorHistory = {
s.foldLeft(ColorHistory(0, 0)) { (acc, c) =>
c match {
case 'W' => acc.inc(chess.White)
case 'B' => acc.inc(chess.Black)
}
}
}
def toTuple2(history: ColorHistory): (Int, Int) = (history.strike, history.balance)
def unpack(s: String): (Int, Int) = toTuple2(apply(s))
def couldPlay(s1: String, s2: String, maxStreak: Int): Boolean = apply(s1).couldPlay(apply(s2), maxStreak)
def sameColors(s1: String, s2: String): Boolean = apply(s1).sameColors(apply(s2))
def firstGetsWhite(s1: String, s2: String): Boolean =
apply(s1).firstGetsWhite(apply(s2)) { () =>
true
}
}
class ColorHistoryTest extends Specification {
import ColorHistoryTest.{ apply, couldPlay, firstGetsWhite, sameColors, unpack }
"arena tournament color history" should {
"hand tests" in {
unpack("WWW") must be equalTo ((3, 3))
unpack("WWWB") must be equalTo ((-1, 2))
unpack("BBB") must be equalTo ((-3, -3))
unpack("BBBW") must be equalTo ((1, -2))
unpack("WWWBBB") must be equalTo ((-3, 0))
}
"couldPlay" in {
couldPlay("WWW", "WWW", 3) must beFalse
couldPlay("BBB", "BBB", 3) must beFalse
couldPlay("BB", "BB", 3) must beTrue
}
"sameColors" in {
sameColors("WWW", "W") must beTrue
sameColors("BBB", "B") must beTrue
}
"firstGetsWhite" in {
firstGetsWhite("WWW", "WW") must beFalse
firstGetsWhite("WW", "WWW") must beTrue
firstGetsWhite("BB", "B") must beTrue
firstGetsWhite("B", "BB") must beFalse
firstGetsWhite("WW", "BWW") must beFalse
firstGetsWhite("BB", "WBB") must beTrue
}
"equals" in {
apply("") must be equalTo apply("")
apply("WBW") must be equalTo apply("W")
}
}
}
| luanlv/lila | modules/tournament/src/test/ColorHistoryTest.scala | Scala | mit | 1,950 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail
import cats.laws._
import cats.laws.discipline._
import monix.eval.{Coeval, Task}
import monix.execution.exceptions.DummyException
import monix.execution.internal.Platform
import monix.tail.batches.BatchCursor
import org.scalacheck.Test
import org.scalacheck.Test.Parameters
import scala.annotation.tailrec
object IterantDropWhileSuite extends BaseTestSuite {
override lazy val checkConfig: Parameters = {
if (Platform.isJVM)
Test.Parameters.default.withMaxSize(256)
else
Test.Parameters.default.withMaxSize(32)
}
@tailrec
def dropFromList(p: Int => Boolean)(list: List[Int]): List[Int] =
list match {
case x :: xs =>
if (p(x)) dropFromList(p)(xs)
else list
case Nil =>
Nil
}
test("Iterant.dropWhile equivalence with List.dropWhile") { implicit s =>
check3 { (list: List[Int], idx: Int, p: Int => Boolean) =>
val iter = arbitraryListToIterant[Coeval, Int](list, math.abs(idx) + 1, allowErrors = false)
val stream = iter ++ Iterant[Coeval].of(1, 2, 3)
val received = stream.dropWhile(p).toListL.runTry()
val expected = stream.toListL.map(dropFromList(p)).runTry()
received <-> expected
}
}
test("Iterant.dropWhile protects against broken batches") { implicit s =>
check1 { (iter: Iterant[Task, Int]) =>
val dummy = DummyException("dummy")
val suffix = Iterant[Task].nextBatchS[Int](new ThrowExceptionBatch(dummy), Task.now(Iterant[Task].empty))
val stream = iter.onErrorIgnore ++ suffix
val received = stream.dropWhile(_ => true)
received <-> Iterant[Task].haltS[Int](Some(dummy))
}
}
test("Iterant.dropWhile protects against broken cursors") { implicit s =>
check1 { (iter: Iterant[Task, Int]) =>
val dummy = DummyException("dummy")
val suffix = Iterant[Task].nextCursorS[Int](new ThrowExceptionCursor(dummy), Task.now(Iterant[Task].empty))
val stream = iter.onErrorIgnore ++ suffix
val received = stream.dropWhile(_ => true)
received <-> Iterant[Task].haltS[Int](Some(dummy))
}
}
test("Iterant.dropWhile protects against user code") { implicit s =>
check1 { (iter: Iterant[Task, Int]) =>
val dummy = DummyException("dummy")
val suffix = Iterant[Task].nextCursorS[Int](BatchCursor(1, 2, 3), Task.now(Iterant[Task].empty))
val stream = iter.onErrorIgnore ++ suffix
val received = stream.dropWhile(_ => throw dummy)
received <-> Iterant[Task].haltS[Int](Some(dummy))
}
}
test("Iterant.dropWhile preserves the source earlyStop") { implicit s =>
var effect = 0
val stop = Coeval.eval(effect += 1)
val source =
Iterant[Coeval].nextCursorS(BatchCursor(1, 2, 3), Coeval.now(Iterant[Coeval].empty[Int])).guarantee(stop)
val stream = source.dropWhile(_ => true)
stream.completedL.value()
assertEquals(effect, 1)
}
}
| monix/monix | monix-tail/shared/src/test/scala/monix/tail/IterantDropWhileSuite.scala | Scala | apache-2.0 | 3,583 |
package org.jetbrains.plugins.scala.actions
import java.util
import java.util.Collections
import com.intellij.openapi.actionSystem.{ActionPromoter, AnAction, DataContext}
/**
* User: Dmitry.Naydanov
* Date: 28.02.17.
*/
abstract class SingleActionPromoterBase extends ActionPromoter {
def shouldPromote(anAction: AnAction): Boolean
override def promote(actions: util.List[AnAction], context: DataContext): util.List[AnAction] = {
val it = actions.iterator()
while (it.hasNext) {
val a = it.next()
if (shouldPromote(a)) return util.Arrays.asList(a)
}
Collections.emptyList()
}
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/actions/SingleActionPromoterBase.scala | Scala | apache-2.0 | 626 |
package com.sksamuel.elastic4s.search.aggs
import com.sksamuel.elastic4s.RefreshPolicy
import com.sksamuel.elastic4s.testkit.DockerTests
import org.scalatest.{FreeSpec, Matchers}
import scala.util.Try
class ExtendedStatsAggregationHttpTest extends FreeSpec with DockerTests with Matchers {
Try {
client.execute {
deleteIndex("extendedstatsagg")
}.await
}
client.execute {
createIndex("extendedstatsagg") mappings {
mapping("sales_per_month") fields(
dateField("month"),
doubleField("sales").stored(true)
)
}
}.await
// based on the example from extended stats agg documentation
client.execute(
bulk(
indexInto("extendedstatsagg/sales_per_month") fields("month" -> "2017-01-01", "sales" -> 550.0),
indexInto("extendedstatsagg/sales_per_month") fields("month" -> "2017-02-01", "sales" -> 60.0),
indexInto("extendedstatsagg/sales_per_month") fields("month" -> "2017-03-01", "sales" -> 375.0)
).refresh(RefreshPolicy.Immediate)
).await
"extended stats agg" - {
"should return the expected stats" in {
val resp = client.execute {
search("extendedstatsagg").matchAllQuery().aggs {
extendedStatsAgg("agg1", "sales")
}
}.await.result
resp.totalHits shouldBe 3
val agg = resp.aggs.extendedStats("agg1")
agg.count shouldBe 3
agg.min shouldBe 60.0
agg.max shouldBe 550.0
math.abs(agg.avg - 328.333) < 0.1 shouldBe true
agg.sum shouldBe 985.0
agg.sumOfSquares shouldBe 446725.0
math.abs(agg.variance - 41105.555) < 0.1 shouldBe true
math.abs(agg.stdDeviation - 202.745) < 0.1 shouldBe true
}
}
}
| Tecsisa/elastic4s | elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/search/aggs/ExtendedStatsAggregationHttpTest.scala | Scala | apache-2.0 | 1,692 |
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
* */
package io.github.tailhq.dynaml.kernels
import breeze.linalg.{DenseMatrix, DenseVector, eig, max, min}
import org.apache.log4j.{Logger, Priority}
/**
* @author tailhq
*
*/
class SVMKernelMatrix(
override protected val kernel: DenseMatrix[Double],
private val dimension: Long)
extends KernelMatrix[DenseMatrix[Double]]
with Serializable {
private val logger = Logger.getLogger(this.getClass)
/**
* Calculates the approximate eigen-decomposition of the
* kernel matrix
*
* @param dimensions The effective number of dimensions
* to be calculated in the feature map
*
* @return A Scala [[Tuple2]] containing the eigenvalues
* and eigenvectors.
*
* */
override def eigenDecomposition(dimensions: Int = this.dimension.toInt):
(DenseVector[Double], DenseMatrix[Double]) = {
logger.log(Priority.INFO, "Eigenvalue decomposition of the kernel matrix using JBlas.")
val decomp = eig(this.kernel)
logger.log(Priority.INFO, "Eigenvalue stats: "
+min(decomp.eigenvalues)
+" =< lambda =< "
+max(decomp.eigenvalues)
)
(decomp.eigenvalues, decomp.eigenvectors)
}
}
| mandar2812/DynaML | dynaml-core/src/main/scala/io/github/tailhq/dynaml/kernels/SVMKernelMatrix.scala | Scala | apache-2.0 | 1,938 |
package app.v1.api
import java.util.UUID.fromString
import app.db.NoteDatabase
import app.support.EmbeddedCassandraConnector
import app.v1.model.Note
import app.v1.service.{ CassandraNoteService, UUIDService }
import com.twitter.finagle.http.Status
import io.circe.generic.auto._
import io.finch.Input
import io.finch.circe._
import org.cassandraunit.dataset.cql.ClassPathCQLDataSet
import org.joda.time.DateTime
import org.scalamock.scalatest.MockFactory
trait CassandraMock
extends NoteApi
with CassandraNoteService
with UUIDService
with MockFactory {
override def database: NoteDatabase = new NoteDatabase(EmbeddedCassandraConnector.connector)
override val noteUUID: NoteUUID = stub[NoteUUID]
}
class NoteApiTest extends EmbeddedCassandra {
val basePath = "/api/v1/notes"
val someNoteUUID = fromString("4a5d0831-4630-4e82-b3bb-80fe8a7dc9bd")
val someUserUUID = fromString("05cd3079-a9ea-4cff-ba22-b4211a95d1be")
behavior of "getNoteById endpoint"
it should " return 200 status code if was successfully executed " in new CassandraMock {
loadData(new ClassPathCQLDataSet("insert_notes.cql"))
override val noteApi: DefaultNoteApi = new DefaultNoteApi
val input = Input.get(basePath + "/" + someNoteUUID)
// sut
val result = noteApi.getNoteById(input)
// Verify result
result.awaitOutputUnsafe().map(_.status).get should be(Status.Ok)
result.awaitOutputUnsafe().map(_.value).get.id should be(someNoteUUID)
result.awaitOutputUnsafe().map(_.value).get.userid should be(fromString("05cd3079-a9ea-4cff-ba22-b4211a95d1be"))
result.awaitOutputUnsafe().map(_.value).get.created.toString should be("2016-12-10T19:06:21.000Z")
result.awaitOutputUnsafe().map(_.value).get.content should be("some content 2")
}
behavior of "createNote endpoint"
it should " return 201 if a note has been created succesfully " in new CassandraMock {
val someNote = Note(someNoteUUID, someUserUUID, DateTime.parse("2012-08-16T07:22:05Z"), "some content 2")
import Note._
val input = Input.post(basePath).withBody(someNote)
// configure stubs
(noteUUID.getUUID _).when().returns(someNoteUUID)
// sut
val result = noteApi.createNote(input)
// Verify result
result.awaitOutputUnsafe().map(_.status).get should be(Status.Created)
// Verify expectations met
(noteUUID.getUUID _).verify()
}
behavior of "updateNote endpoint"
it should " return 200 if a note content has been updated succesfully " in new CassandraMock {
loadData(new ClassPathCQLDataSet("insert_notes.cql"))
val someNote = Note(someNoteUUID, someUserUUID, DateTime.parse("2012-08-16T07:22:05Z"), "some new content 2")
import Note._
val input = Input.patch(basePath + "/" + someNoteUUID).withBody(someNote)
// sut
val result = noteApi.patchNote(input)
// Verify result
result.awaitOutputUnsafe().map(_.status).get should be(Status.Ok)
}
it should " return 404 if a note uuid doesn't exist" in new CassandraMock {
val someNote = Note(someNoteUUID, someUserUUID, DateTime.parse("2012-08-16T07:22:05Z"), "some new content 2")
import Note._
val input = Input.patch(basePath + "/" + someNoteUUID).withBody(someNote)
// sut
val result = noteApi.patchNote(input)
// Verify result
result.awaitOutputUnsafe().map(_.status).get should be(Status.NotFound)
}
behavior of "deleteNote endpoint"
it should " return 200 if a note has been deleted successfully " in new CassandraMock {
loadData(new ClassPathCQLDataSet("insert_notes.cql"))
val input = Input.delete(basePath + "/" + someNoteUUID)
// sut
val result = noteApi.deleteNote(input)
// Verify result
result.awaitOutputUnsafe().map(_.status).get should be(Status.Ok)
}
it should " return 404 if a note doesn't exist " in new CassandraMock {
val input = Input.delete(basePath + "/" + someNoteUUID)
// sut
val result = noteApi.deleteNote(input)
// Verify result
result.awaitOutputUnsafe().map(_.status).get should be(Status.NotFound)
}
behavior of "getAllNotes endpoint"
it should " return a list of notes" in new CassandraMock {
loadData(new ClassPathCQLDataSet("insert_notes.cql"))
val input = Input.get(basePath)
// sut
val result = noteApi.getAllNotes(input)
// Verify result
result.awaitOutputUnsafe().map(_.status).get should be(Status.Ok)
result.awaitOutputUnsafe().map(_.value).get(0).id should be(fromString("3e6ea370-e09a-4c82-b413-f557f4baf3e3"))
result.awaitOutputUnsafe().map(_.value).get(1).id should be(fromString("4a5d0831-4630-4e82-b3bb-80fe8a7dc9bd"))
result.awaitOutputUnsafe().map(_.value).get(2).id should be(fromString("c8f727b3-f31d-41a3-9d25-e0d282dd82cd"))
result.awaitOutputUnsafe().map(_.value).get(3).id should be(fromString("05db40e8-0eb6-4166-9a97-aece071237fd"))
}
} | PScopelliti/ProjectTracker | note-service/src/test/scala/app/v1/api/NoteApiTest.scala | Scala | apache-2.0 | 4,905 |
/** Copyright 2015 Dropbox, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package djinni
import djinni.ast._
import djinni.generatorTools._
import djinni.meta._
class CWrapperMarshal(spec: Spec) extends Marshal(spec) { // modeled(pretty much copied) after CppMarshal, not fully C-like
val cppMarshal = new CppMarshal(spec)
val pythonCdefIgnore = " // python_cdef_ignore"
val pythonSetSourceIgnore = " // python_setsource_ignore"
val djinniWrapper = "DjinniWrapper"
val cw = "cw__" // prefix for c wrapper files
val dh = "dh__" // prefix for c files containing djinni helpers for records
val djinniObjectHandle = "DjinniObjectHandle"
def ctypename(tm: MExpr, forHeader: Boolean): String =
cParamType(tm, forHeader)
override def typename(tm: MExpr): String = cParamType(tm, false)
def typename(name: String, ty: TypeDef): String = ty match {
case e: Enum => idCpp.enumType(name)
case i: Interface => idCpp.ty(name)
case r: Record => idCpp.ty(name)
}
override def fqTypename(tm: MExpr): String = throw new NotImplementedError()
def cParamType(tm: MExpr, forHeader: Boolean = false): String =
toCParamType(tm, forHeader)
def cParamType(tm: TypeRef, forHeader: Boolean): String =
cParamType(tm.resolved, forHeader)
override def paramType(tm: MExpr): String = cParamType(tm, false)
override def fqParamType(tm: MExpr): String = throw new NotImplementedError()
def cReturnType(ret: Option[TypeRef], forHeader: Boolean = false): String = {
if (ret.isEmpty) return "void"
return toCType(ret.get, forHeader)
}
override def returnType(ret: Option[TypeRef]): String =
cReturnType(ret, false)
override def fqReturnType(ret: Option[TypeRef]): String =
throw new NotImplementedError()
def cFieldType(tm: MExpr, forHeader: Boolean) = ctypename(tm, forHeader)
override def fieldType(tm: MExpr): String = ctypename(tm, false)
override def fqFieldType(tm: MExpr): String = throw new NotImplementedError()
// override def toCpp(tm: MExpr, expr: String): String = throw new AssertionError("cpp to cpp conversion")
// override def fromCpp(tm: MExpr, expr: String): String = throw new AssertionError("cpp to cpp conversion")
def references(m: Meta, exclude: String): Seq[SymbolReference] = m match {
case p: MPrimitive =>
p.idlName match {
case "i8" | "i16" | "i32" | "i64" =>
List(ImportRef("<stdint.h>" + pythonCdefIgnore))
case "bool" => List(ImportRef("<stdbool.h>" + pythonCdefIgnore))
case _ => List()
}
case MDate => List(ImportRef("<chrono>" + pythonSetSourceIgnore))
case MBinary =>
List(
ImportRef("<vector>" + pythonSetSourceIgnore),
ImportRef("<stdint.h>" + pythonCdefIgnore)
)
case MOptional => List(ImportRef(spec.cppOptionalHeader))
case d: MDef =>
d.defType match {
case DInterface => List(ImportRef(q(cw + d.name + ".hpp")))
case DRecord => List(ImportRef(q(dh + d.name + ".hpp")))
case DEnum =>
List(
ImportRef(q(d.name + ".hpp")),
ImportRef(q(dh + d.name + ".hpp"))
)
}
case e: MExtern => throw new NotImplementedError()
case _ => List()
}
// Types that need RAII will be placed in unique pointers at acquisition time, and released when the language
// boundary is crossed; this function helps inform that decision in the generator
def needsRAII(ty: TypeRef): Boolean = needsRAII(ty.resolved)
def needsRAII(tm: MExpr): Boolean = tm.base match {
case MString | MBinary => true
case MList | MSet | MMap => true
case d: MDef =>
d.defType match {
case DRecord => true
case DInterface => true
case DEnum => false // we pass as ints
}
case MOptional =>
tm.args.head.base match {
case mp: MPrimitive => true
case MDate => true
case _ => needsRAII(tm.args.head)
}
case e: MExtern => throw new NotImplementedError()
case _ => false
}
def canRAIIUseStandardUniquePtr(tm: MExpr): Boolean = tm.base match {
case MString | MBinary => true
case MOptional =>
tm.args.head.base match {
case mp: MPrimitive => true
case MString | MBinary => true
case MDate => true
case _ => false
}
case e: MExtern => throw new NotImplementedError()
case _ => false
}
private def toCType(ty: TypeRef, forHeader: Boolean): String =
toCType(ty.resolved, forHeader)
private def toCType(tm: MExpr, forHeader: Boolean): String = {
def base(m: Meta): String = {
val structPrefix = if (forHeader) "struct " else ""
m match {
case p: MPrimitive => p.cName
case MDate => "uint64_t"
case MString | MBinary =>
val idlName = idCpp.ty(m.asInstanceOf[MOpaque].idlName)
structPrefix + "Djinni" + idlName + " *"
case MList | MSet | MMap => structPrefix + djinniObjectHandle + " *"
case MOptional =>
tm.args.head.base match {
case m @ (MPrimitive(_, _, _, _, _, _, _, _, _, _) | MDate) =>
val idlName = m.asInstanceOf[MOpaque].idlName
structPrefix + "DjinniBoxed" + idCpp.ty(idlName) + " *"
case MList | MSet | MMap =>
structPrefix + "DjinniOptionalObjectHandle *"
case d: MDef =>
d.defType match {
case DRecord => structPrefix + "DjinniOptionalRecordHandle *"
case _ => base(tm.args.head.base)
}
case _ => base(tm.args.head.base)
}
case d: MDef =>
d.defType match {
case DEnum => "int"
case DRecord => structPrefix + "DjinniRecordHandle *"
case DInterface =>
structPrefix + djinniWrapper + idCpp.ty(d.name) + " *"
}
case p: MParam => idCpp.typeParam(p.name)
case e: MExtern => throw new NotImplementedError()
}
}
def expr(tm: MExpr): String = {
base(tm.base)
}
expr(tm)
}
// getting the idl name for a type
// useful for determining file names for container helpers ex: list_set_string
// useful also in the marshaler (allows writing less code)
def getExprIdlName(tm: MExpr): String = toCIdlType(tm)
def getReleaseMethodName(tm: MExpr): String = tm.base match {
case MString | MBinary => "delete_djinni_" + idCpp.local(getExprIdlName(tm))
case MList | MSet | MMap => getExprIdlName(tm) + "___delete"
case d: MDef =>
d.defType match {
case DInterface => idCpp.method(d.name) + "___wrapper_dec_ref"
case _ => idCpp.method(d.name) + "___delete"
}
case MOptional =>
tm.args.head.base match {
case MString | MBinary => getReleaseMethodName(tm.args.head)
case mp: MPrimitive =>
"delete_djinni_boxed_" + idCpp.method(
mp.asInstanceOf[MOpaque].idlName
)
case MDate => "delete_djinni_boxed_date"
case d: MDef =>
d.defType match {
case DInterface => getReleaseMethodName(tm.args.head)
case _ => "optional_" + getReleaseMethodName(tm.args.head)
}
case _ => "optional_" + getReleaseMethodName(tm.args.head)
}
case _ => throw new NotImplementedError()
}
private def toCIdlType(ty: TypeRef): String = toCIdlType(ty.resolved)
private def toCIdlType(tm: MExpr): String = {
def baseToIdl(m: Meta): String = m match {
case p: MPrimitive => p.cName
case d: MDef =>
d.defType match {
case DRecord => "record_" + d.name
case DInterface => "interface_" + d.name
case DEnum => "enum_" + d.name
}
case p: MParam => idCpp.typeParam(p.name)
case e: MExtern => "extern"
case MOptional =>
tm.args.head.base match {
case mp: MPrimitive => "boxed"
case _ => "optional"
}
case _ => m.asInstanceOf[MOpaque].idlName
}
def exprIdlName(tm: MExpr): String = {
val baseTy = baseToIdl(tm.base)
baseTy match {
case "boxed" | "optional" => baseTy + "_" + toCIdlType(tm.args.head)
// for list, set, map we return the name of the helper
case "list" | "set" =>
idCpp.local(
baseTy + (if (tm.args.isEmpty) ""
else "_" + toCIdlType(tm.args.head))
)
case "map" =>
idCpp.local(
baseTy + (if (tm.args.isEmpty) ""
else "_" + toCIdlType(tm.args.head)) + "_" + toCIdlType(
tm.args(1)
)
)
case _ => baseTy
}
}
exprIdlName(tm)
}
// this can be used in c++ generation to know whether a const& should be applied to the parameter or not
private def toCParamType(tm: MExpr, forHeader: Boolean): String = {
val cType = toCType(tm, forHeader)
val refType = "const " + cType + " &"
val valueType = cType
def toType(expr: MExpr): String = expr.base match {
case p: MPrimitive => valueType
case MString => valueType // DjinniString
case MBinary => valueType // DjinniBinary
case MDate => valueType // uint64_t
case MList => valueType
case MSet => valueType
case MMap => valueType
case MOptional => valueType
case d: MDef =>
d.defType match {
case DEnum => valueType
case DInterface => valueType
case DRecord => valueType
case _ => refType
}
case e: MExtern => throw new NotImplementedError()
case _ => refType
}
toType(tm)
}
def removePointer(s: String): String =
if (s(s.length - 1) == '*') s.slice(0, s.length - 2) else s
def removeComment(s: String): String = {
val idx = s.indexOf("//")
if (idx != -1) {
return s.slice(0, idx - 1)
} else return s
}
def callback(s: String): String = "s_callback_" + s
def wrappedName(s: String): String = djinniWrapper + s // del
// Get to data from within C structure
def convertTo(
cppExpr: String,
ty: TypeRef,
tempExpr: Boolean = false
): String = convertTo(cppExpr, ty.resolved, tempExpr)
def convertTo(cppExpr: String, ty: MExpr, tempExpr: Boolean): String = {
val exprArg = if (tempExpr) { cppExpr }
else {
"std::move" + p(cppExpr)
} // Move only when it wouldn't be pessimizing
ty.base match {
case MOptional => {
ty.args.head.base match {
case MPrimitive(_, _, _, _, _, _, _, _, _, _) | MDate =>
val idlName = ty.args.head.base.asInstanceOf[MOpaque].idlName
"DjinniBoxed" + idCpp.ty(idlName) + "::toCpp" + p(exprArg)
case MString | MBinary =>
val idlName = ty.args.head.base.asInstanceOf[MOpaque].idlName
"DjinniOptional" + idCpp.ty(idlName) + "::toCpp" + p(exprArg)
case MList | MMap | MSet =>
"Djinni" + idCpp.ty(toCIdlType(ty.args.head)) + "::toCpp" + p(
exprArg
)
case d: MDef =>
d.defType match {
case DRecord =>
"Djinni" + idCpp.ty(d.name) + "::toCpp" + p(exprArg)
case DEnum =>
"get_boxed_enum_" + idCpp.method(d.name) + "_from_int32" + p(
cppExpr
)
case _ => convertTo(cppExpr, ty.args.head, tempExpr)
}
case _ => convertTo(cppExpr, ty.args.head, tempExpr)
}
}
case MDate => "DjinniDate::toCpp" + p(cppExpr)
case MString | MBinary | MList | MMap | MSet =>
"Djinni" + idCpp.ty(toCIdlType(ty)) + "::toCpp" + p(exprArg)
case d: MDef =>
d.defType match {
case DInterface =>
djinniWrapper + idCpp.ty(d.name) + "::get" + p(exprArg)
case DRecord => "Djinni" + idCpp.ty(d.name) + "::toCpp" + p(exprArg)
case DEnum =>
"static_cast<" + withCppNs(idCpp.enumType(d.name)) + ">" + p(
cppExpr
)
}
case e: MExtern => throw new NotImplementedError()
case _ => cppExpr // MParam <- didn't need to do anything here
}
}
// Pack data into C structure (for returning C structure)
def convertFrom(
cppExpr: String,
ty: TypeRef,
tempExpr: Boolean = false
): String = convertFrom(cppExpr, ty.resolved, tempExpr)
def convertFrom(cppExpr: String, ty: MExpr, tempExpr: Boolean): String = {
ty.base match {
case MOptional => {
ty.args.head.base match {
case MPrimitive(_, _, _, _, _, _, _, _, _, _) | MDate =>
val idlName = ty.args.head.base.asInstanceOf[MOpaque].idlName
"DjinniBoxed" + idCpp.ty(idlName) + "::fromCpp" + p(cppExpr)
case MString | MBinary =>
val idlName = ty.args.head.base.asInstanceOf[MOpaque].idlName
"DjinniOptional" + idCpp.ty(idlName) + "::fromCpp" + p(cppExpr)
case MList | MMap | MSet =>
"Djinni" + idCpp.ty(toCIdlType(ty.args.head)) + "::fromCpp" + p(
cppExpr
)
case _ => convertFrom(cppExpr, ty.args.head, tempExpr)
}
}
case MString | MBinary | MDate | MList | MSet | MMap =>
val idlName = idCpp.ty(toCIdlType(ty))
"Djinni" + idlName + "::fromCpp" + p(cppExpr)
case d: MDef =>
d.defType match {
case DInterface => (
djinniWrapper + idCpp.ty(d.name) + "::wrap"
+ p(if (tempExpr) { cppExpr }
else { "std::move" + p(cppExpr) })
) // Move only when it wouldn't be pessimizing
case DRecord => "Djinni" + idCpp.ty(d.name) + "::fromCpp" + p(cppExpr)
case DEnum => "int32_from_enum_" + idCpp.method(d.name) + p(cppExpr)
}
case e: MExtern => throw new NotImplementedError()
case _ => cppExpr // TODO: MParam <- didn't need to do anything here
}
}
def checkForException(s: String) = "lib.check_for_exception" + p(s)
def cArgDecl(args: Seq[String]) = {
if (args.isEmpty) {
// CWrapper headers need to be parsed as C. `()` in C means "unspecified args" and triggers
// -Wstrict-prototypes. `(void)` means no args in C. In C++ the two forms are equivalent.
"(void)"
} else {
args.mkString("(", ", ", ")")
}
}
def cArgVals(args: Seq[String]) = {
args.mkString("(", ", ", ")")
}
}
| cross-language-cpp/djinni-generator | src/main/scala/djinni/CWrapperMarshal.scala | Scala | apache-2.0 | 15,204 |
package org.scalaide.core.internal.launching
import org.scalaide.core.IScalaPlugin
import org.scalaide.core.IScalaProject
import org.eclipse.core.resources.IResource
import org.eclipse.core.runtime.IStatus
import org.eclipse.core.runtime.Status
import org.scalaide.ui.internal.handlers.CompilerLaunchErrorHandler
import org.scalaide.core.SdtConstants
import org.scalaide.util.eclipse.EclipseUtils
object MainClassVerifier {
private final val ModuleClassSuffix = "$"
}
class MainClassVerifier {
/**
* Performs the following checks:
*
* 1) If the classfile of the fully-qualified `mainTypeName` can be found in the `project`'s output folders, a matching companion classfile
* (which is expected to contain the main method) is also found. If it can't be found, an error is reported (this is done because it means
* the user is trying to run a plain `class`, instead of an `object`).
*
* 2) If the class of the fully-qualified `mainTypeName` cannot be found, it reports an error if the `project` has build errors. Otherwise, it
* trusts the user's configuration and returns ok (this is the case for instance when the `mainTypeName` comes from a classfile in a JAR).
*
* @param project The scala project containing the main type.
* @param mainTypeName The fully-qualified main type name.
* @param hasBuildErrors True if the passed `project` has build errors, false otherwise.
*/
def execute(project: IScalaProject, mainTypeName: String, hasBuildErrors: Boolean): IStatus = {
canRunMain(project, mainTypeName, hasBuildErrors)
}
private def canRunMain(project: IScalaProject, mainTypeName: String, hasBuildErrors: Boolean): IStatus = {
val mainClass = findClassFile(project, mainTypeName)
def mainModuleClass = findClassFile(project, mainTypeName + MainClassVerifier.ModuleClassSuffix)
if (mainClass.nonEmpty && mainModuleClass.isEmpty) new Status(IStatus.ERROR, SdtConstants.PluginId, s"${mainTypeName} needs to be an `object` (it is currently a `class`).")
else if (hasBuildErrors) {
val msg = s"Project ${project.underlying.getName} contains build errors."
new Status(IStatus.ERROR, SdtConstants.PluginId, CompilerLaunchErrorHandler.STATUS_CODE_LAUNCH_ERROR, msg, null)
}
else new Status(IStatus.OK, SdtConstants.PluginId, "")
}
private def findClassFile(project: IScalaProject, mainTypeName: String): Option[IResource] = {
val outputLocations = project.outputFolders
val classFileName = mainTypeName.replace('.', '/')
(for {
outputLocation <- outputLocations
classFileLocation = outputLocation.append(s"${classFileName}.class")
classFile <- Option(EclipseUtils.workspaceRoot.findMember(classFileLocation))
} yield classFile).headOption
}
}
| Kwestor/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/core/internal/launching/MainClassVerifier.scala | Scala | bsd-3-clause | 2,786 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.batch
import org.apache.flink.table.api.TableException
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.plan.`trait`.{FlinkRelDistribution, FlinkRelDistributionTraitDef}
import org.apache.flink.table.planner.plan.cost.{FlinkCost, FlinkCostFactory}
import org.apache.flink.table.planner.plan.nodes.calcite.Rank
import org.apache.flink.table.planner.plan.nodes.exec.{InputProperty, ExecNode}
import org.apache.flink.table.planner.plan.nodes.exec.batch.BatchExecRank
import org.apache.flink.table.planner.plan.rules.physical.batch.BatchPhysicalJoinRuleBase
import org.apache.flink.table.planner.plan.utils.{FlinkRelOptUtil, RelExplainUtil}
import org.apache.flink.table.runtime.operators.rank.{ConstantRankRange, RankRange, RankType}
import org.apache.calcite.plan._
import org.apache.calcite.rel.RelDistribution.Type
import org.apache.calcite.rel.RelDistribution.Type.{HASH_DISTRIBUTED, SINGLETON}
import org.apache.calcite.rel._
import org.apache.calcite.rel.`type`.RelDataTypeField
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.calcite.util.{ImmutableBitSet, ImmutableIntList, Util}
import java.util
import scala.collection.JavaConversions._
/**
* Batch physical RelNode for [[Rank]].
*
* This node supports two-stage(local and global) rank to reduce data-shuffling.
*/
class BatchPhysicalRank(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
partitionKey: ImmutableBitSet,
orderKey: RelCollation,
rankType: RankType,
rankRange: RankRange,
rankNumberType: RelDataTypeField,
outputRankNumber: Boolean,
val isGlobal: Boolean)
extends Rank(
cluster,
traitSet,
inputRel,
partitionKey,
orderKey,
rankType,
rankRange,
rankNumberType,
outputRankNumber)
with BatchPhysicalRel {
require(rankType == RankType.RANK, "Only RANK is supported now")
val (rankStart, rankEnd) = rankRange match {
case r: ConstantRankRange => (r.getRankStart, r.getRankEnd)
case o => throw new TableException(s"$o is not supported now")
}
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new BatchPhysicalRank(
cluster,
traitSet,
inputs.get(0),
partitionKey,
orderKey,
rankType,
rankRange,
rankNumberType,
outputRankNumber,
isGlobal
)
}
override def explainTerms(pw: RelWriter): RelWriter = {
val inputRowType = inputRel.getRowType
pw.input("input", getInput)
.item("rankType", rankType)
.item("rankRange", rankRange.toString(inputRowType.getFieldNames))
.item("partitionBy", RelExplainUtil.fieldToString(partitionKey.toArray, inputRowType))
.item("orderBy", RelExplainUtil.collationToString(orderKey, inputRowType))
.item("global", isGlobal)
.item("select", getRowType.getFieldNames.mkString(", "))
}
override def computeSelfCost(planner: RelOptPlanner, mq: RelMetadataQuery): RelOptCost = {
// sort is done in the last sort operator, only need to compare between agg column.
val inputRowCnt = mq.getRowCount(getInput())
val cpuCost = FlinkCost.FUNC_CPU_COST * inputRowCnt
val memCost: Double = mq.getAverageRowSize(this)
val rowCount = mq.getRowCount(this)
val costFactory = planner.getCostFactory.asInstanceOf[FlinkCostFactory]
costFactory.makeCost(rowCount, cpuCost, 0, 0, memCost)
}
override def satisfyTraits(requiredTraitSet: RelTraitSet): Option[RelNode] = {
if (isGlobal) {
satisfyTraitsOnGlobalRank(requiredTraitSet)
} else {
satisfyTraitsOnLocalRank(requiredTraitSet)
}
}
private def satisfyTraitsOnGlobalRank(requiredTraitSet: RelTraitSet): Option[RelNode] = {
val requiredDistribution = requiredTraitSet.getTrait(FlinkRelDistributionTraitDef.INSTANCE)
val canSatisfy = requiredDistribution.getType match {
case SINGLETON => partitionKey.cardinality() == 0
case HASH_DISTRIBUTED =>
val shuffleKeys = requiredDistribution.getKeys
val partitionKeyList = ImmutableIntList.of(partitionKey.toArray: _*)
if (requiredDistribution.requireStrict) {
shuffleKeys == partitionKeyList
} else if (Util.startsWith(shuffleKeys, partitionKeyList)) {
// If required distribution is not strict, Hash[a] can satisfy Hash[a, b].
// so return true if shuffleKeys(Hash[a, b]) start with partitionKeyList(Hash[a])
true
} else {
// If partialKey is enabled, try to use partial key to satisfy the required distribution
val tableConfig = FlinkRelOptUtil.getTableConfigFromContext(this)
val partialKeyEnabled = tableConfig.getConfiguration.getBoolean(
BatchPhysicalJoinRuleBase.TABLE_OPTIMIZER_SHUFFLE_BY_PARTIAL_KEY_ENABLED)
partialKeyEnabled && partitionKeyList.containsAll(shuffleKeys)
}
case _ => false
}
if (!canSatisfy) {
return None
}
val inputRequiredDistribution = requiredDistribution.getType match {
case SINGLETON => requiredDistribution
case HASH_DISTRIBUTED =>
val shuffleKeys = requiredDistribution.getKeys
val partitionKeyList = ImmutableIntList.of(partitionKey.toArray: _*)
if (requiredDistribution.requireStrict) {
FlinkRelDistribution.hash(partitionKeyList)
} else if (Util.startsWith(shuffleKeys, partitionKeyList)) {
// Hash[a] can satisfy Hash[a, b]
FlinkRelDistribution.hash(partitionKeyList, requireStrict = false)
} else {
// use partial key to satisfy the required distribution
FlinkRelDistribution.hash(shuffleKeys.map(partitionKeyList(_)), requireStrict = false)
}
}
// sort by partition keys + orderby keys
val providedFieldCollations = partitionKey.toArray.map {
k => FlinkRelOptUtil.ofRelFieldCollation(k)
}.toList ++ orderKey.getFieldCollations
val providedCollation = RelCollations.of(providedFieldCollations)
val requiredCollation = requiredTraitSet.getTrait(RelCollationTraitDef.INSTANCE)
val newProvidedTraitSet = if (providedCollation.satisfies(requiredCollation)) {
getTraitSet.replace(requiredDistribution).replace(requiredCollation)
} else {
getTraitSet.replace(requiredDistribution)
}
val newInput = RelOptRule.convert(getInput, inputRequiredDistribution)
Some(copy(newProvidedTraitSet, Seq(newInput)))
}
private def satisfyTraitsOnLocalRank(requiredTraitSet: RelTraitSet): Option[RelNode] = {
val requiredDistribution = requiredTraitSet.getTrait(FlinkRelDistributionTraitDef.INSTANCE)
requiredDistribution.getType match {
case Type.SINGLETON =>
val inputRequiredDistribution = requiredDistribution
// sort by orderby keys
val providedCollation = orderKey
val requiredCollation = requiredTraitSet.getTrait(RelCollationTraitDef.INSTANCE)
val newProvidedTraitSet = if (providedCollation.satisfies(requiredCollation)) {
getTraitSet.replace(requiredDistribution).replace(requiredCollation)
} else {
getTraitSet.replace(requiredDistribution)
}
val inputRequiredTraits = getInput.getTraitSet.replace(inputRequiredDistribution)
val newInput = RelOptRule.convert(getInput, inputRequiredTraits)
Some(copy(newProvidedTraitSet, Seq(newInput)))
case Type.HASH_DISTRIBUTED =>
val shuffleKeys = requiredDistribution.getKeys
if (outputRankNumber) {
// rank function column is the last one
val rankColumnIndex = getRowType.getFieldCount - 1
if (!shuffleKeys.contains(rankColumnIndex)) {
// Cannot satisfy required distribution if some keys are not from input
return None
}
}
val inputRequiredDistributionKeys = shuffleKeys
val inputRequiredDistribution = FlinkRelDistribution.hash(
inputRequiredDistributionKeys, requiredDistribution.requireStrict)
// sort by partition keys + orderby keys
val providedFieldCollations = partitionKey.toArray.map {
k => FlinkRelOptUtil.ofRelFieldCollation(k)
}.toList ++ orderKey.getFieldCollations
val providedCollation = RelCollations.of(providedFieldCollations)
val requiredCollation = requiredTraitSet.getTrait(RelCollationTraitDef.INSTANCE)
val newProvidedTraitSet = if (providedCollation.satisfies(requiredCollation)) {
getTraitSet.replace(requiredDistribution).replace(requiredCollation)
} else {
getTraitSet.replace(requiredDistribution)
}
val inputRequiredTraits = getInput.getTraitSet.replace(inputRequiredDistribution)
val newInput = RelOptRule.convert(getInput, inputRequiredTraits)
Some(copy(newProvidedTraitSet, Seq(newInput)))
case _ => None
}
}
override def translateToExecNode(): ExecNode[_] = {
val requiredDistribution = if (partitionKey.length() == 0) {
InputProperty.SINGLETON_DISTRIBUTION
} else {
InputProperty.hashDistribution(partitionKey.toArray)
}
new BatchExecRank(
partitionKey.toArray,
orderKey.getFieldCollations.map(_.getFieldIndex).toArray,
rankStart,
rankEnd,
outputRankNumber,
InputProperty.builder().requiredDistribution(requiredDistribution).build(),
FlinkTypeFactory.toLogicalRowType(getRowType),
getRelDetailedDescription
)
}
}
| godfreyhe/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchPhysicalRank.scala | Scala | apache-2.0 | 10,386 |
/*
* Copyright 2012 Tumblr Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.storage.redis
import com.twitter.finagle.redis.Client
import com.twitter.finagle.redis.protocol.ZRangeResults
import com.twitter.util.{Duration, Future}
import com.twitter.zipkin.common.{AnnotationType, BinaryAnnotation, Span}
import com.twitter.zipkin.storage.{Index, IndexedTraceId, TraceIdDuration}
import java.nio.ByteBuffer
import org.jboss.netty.buffer.{ChannelBuffer, ChannelBuffers}
import com.twitter.zipkin.Constants
trait RedisIndex extends Index {
val database: Client
val ttl: Option[Duration]
/**
* A special data structure for dealing with keys which are treated differently if different
* arguments are passed in.
*/
case class OptionSortedSetMap(_client: Client, firstPrefix: String, secondPrefix: String) {
lazy val firstSetMap = new RedisSortedSetMap(_client, firstPrefix, ttl)
lazy val secondSetMap = new RedisSortedSetMap(_client, secondPrefix, ttl)
def get(primaryKey: String,
secondaryKey: Option[String],
start: Option[Double],
stop: Double,
count: Int) = secondaryKey match {
case Some(contents) =>
firstSetMap.get(redisJoin(primaryKey, contents), start.getOrElse(0), stop, count)
case None => secondSetMap.get(primaryKey, start.getOrElse(0), stop, count)
}
def put(primaryKey: String, secondaryKey: Option[String], score: Double, value: ChannelBuffer) = {
val first = secondaryKey map { secondValue =>
firstSetMap.add(redisJoin(primaryKey, secondValue), score, value)
}
val second = secondSetMap.add(primaryKey, score, value)
Future.join(Seq(first.getOrElse(Future.Unit), second))
}
}
/**
* A singleton RedisSet
*/
case class RedisSet(client: Client, key: String) {
val _underlying = new RedisSetMap(client, "singleton", ttl)
def get() = _underlying.get(key)
def add(bytes: ChannelBuffer) = _underlying.add(key, bytes)
}
lazy val annotationsListMap = new RedisSortedSetMap(database, "annotations", ttl)
lazy val binaryAnnotationsListMap = new RedisSortedSetMap(database, "binary_annotations", ttl)
lazy val serviceSpanMap = OptionSortedSetMap(database, "service", "service:span")
lazy val spanMap = new RedisSetMap(database, "span", ttl)
lazy val serviceArray = new RedisSet(database, "services")
lazy val traceHash = new RedisHash(database, "ttlMap")
override def close() = {
database.release()
}
private[this] def zRangeResultsToSeqIds(arr: ZRangeResults): Seq[IndexedTraceId] =
arr.asTuples map (tup => IndexedTraceId(tup._1, tup._2.toLong))
private[redis] def redisJoin(items: String*) = items.mkString(":")
override def getTraceIdsByName(serviceName: String, span: Option[String],
endTs: Long, limit: Int): Future[Seq[IndexedTraceId]] =
serviceSpanMap.get(
serviceName,
span,
ttl map (dur => endTs - dur.inMicroseconds),
endTs,
limit) map zRangeResultsToSeqIds
override def getTraceIdsByAnnotation(serviceName: String, annotation: String, value: Option[ByteBuffer],
endTs: Long, limit: Int): Future[Seq[IndexedTraceId]] = (value match {
case Some(anno) =>
binaryAnnotationsListMap.get(
redisJoin(serviceName, annotation, ChannelBuffers.copiedBuffer(anno)),
(ttl map (dur => (endTs - dur.inMicroseconds).toDouble)).getOrElse(0.0),
endTs,
limit)
case None =>
annotationsListMap.get(
redisJoin(serviceName, annotation),
(ttl map (dur => (endTs - dur.inMicroseconds).toDouble)).getOrElse(0.0),
endTs,
limit)
}) map zRangeResultsToSeqIds
override def getTracesDuration(traceIds: Seq[Long]): Future[Seq[TraceIdDuration]] = Future.collect(
traceIds map (getTraceDuration(_))
) map (_ flatten)
private[this] def getTraceDuration(traceId: Long): Future[Option[TraceIdDuration]] =
traceHash.get(traceId) map {
_ flatMap { bytes =>
val TimeRange(start, end) = decodeStartEnd(bytes)
Some(TraceIdDuration(traceId, end - start, start))
}
}
override def getServiceNames: Future[Set[String]] = serviceArray.get() map (serviceNames =>
(serviceNames map (new String(_))).toSet
)
override def getSpanNames(service: String): Future[Set[String]] = spanMap.get(service) map (
strings => (strings map (new String(_))).toSet
)
override def indexTraceIdByServiceAndName(span: Span) : Future[Unit] = Future.join(
(span.serviceNames toSeq) map { serviceName =>
(span.lastAnnotation map { last =>
serviceSpanMap.put(serviceName, Some(span.name), last.timestamp, span.traceId)
}).getOrElse(Future.Unit)
}
)
override def indexSpanByAnnotations(span: Span) : Future[Unit] = {
def encodeAnnotation(bin: BinaryAnnotation): String = bin.annotationType match {
case AnnotationType.Bool => (if (bin.value.get() != 0) true else false).toString
case AnnotationType.Double => bin.value.getDouble.toString
case AnnotationType.I16 => bin.value.getShort.toString
case AnnotationType.I32 => bin.value.getInt.toString
case AnnotationType.I64 => bin.value.getLong.toString
case _ => ChannelBuffers.copiedBuffer(bin.value)
}
def binaryAnnoStringify(bin: BinaryAnnotation, service: String): String =
redisJoin(service, bin.key, encodeAnnotation(bin))
span.lastAnnotation.map {
lastAnnotation =>
Future.join({
val time = lastAnnotation.timestamp
val binaryAnnos: Seq[Future[Unit]] = span.serviceNames.toSeq flatMap {
serviceName =>
span.binaryAnnotations map {
binaryAnno =>
binaryAnnotationsListMap.add(
binaryAnnoStringify(binaryAnno, serviceName),
time,
span.traceId
)
}
}
val annos = for (serviceName <- span.serviceNames toSeq;
anno <- span.annotations if (!Constants.CoreAnnotations.contains(anno.value)))
yield annotationsListMap.add(redisJoin(serviceName, anno.value), time, span.traceId)
annos ++ binaryAnnos
}
)
} getOrElse {
Future.Unit
}
}
override def indexServiceName(span: Span): Future[Unit] = Future.join(
span.serviceNames.toSeq collect {case name if name != "" => serviceArray.add(name)}
)
override def indexSpanNameByService(span: Span): Future[Unit] =
if (span.name != "")
Future.join(
for (serviceName <- span.serviceNames.toSeq
if serviceName != "")
yield spanMap.add(serviceName, span.name)
)
else
Future.Unit
override def indexSpanDuration(span: Span): Future[Unit] = (traceHash.get(span.traceId) map {
case None => TimeRange.fromSpan(span) map { timeRange =>
traceHash.put(span.traceId, timeRange)
}
case Some(bytes) => indexNewStartAndEnd(span, bytes)
}).unit
private[this] def indexNewStartAndEnd(span: Span, buf: ChannelBuffer) =
TimeRange.fromSpan(span) map { timeRange =>
traceHash.put(span.traceId, timeRange.widen(buf))
}
}
| chang2394/zipkin | zipkin-redis/src/main/scala/com/twitter/zipkin/storage/redis/RedisIndex.scala | Scala | apache-2.0 | 7,824 |
/* From Sereni, PhD thesis 2006 */
import stainless.lang._
object ToChurch {
def compose[T,U,V](f: U => V, g: T => U): T => V = {
(x: T) => f(g(x))
}
def id[T](x: T) = x
def succ(x: BigInt) = x + 1
def toChurch(n: BigInt, f: BigInt => BigInt): BigInt => BigInt = {
require(n >= 0)
if (n == 0) id[BigInt] _
else compose(f, toChurch(n - 1, f))
}
def main(x: BigInt): BigInt = {
if (x >= 0) toChurch(x, succ)(0)
else 0
}
}
| epfl-lara/stainless | frontends/benchmarks/termination/valid/ToChurch.scala | Scala | apache-2.0 | 468 |
/*
* Copyright 2014 Frugal Mechanic (http://frugalmechanic.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fm.http.server
import com.frugalmechanic.optparse.OptParse
import fm.common.{Crypto, DigestUtils, Logging, StacklessException}
import fm.common.Implicits._
import fm.http.{Headers, Status}
import io.netty.handler.codec.http.HttpHeaderNames
import java.security.SecureRandom
import scala.util.matching.Regex
import scala.concurrent.{ExecutionContext, Future}
object DigestAuth {
private val DigestAuthHeader: Regex = """Digest (.+)""".r
private val DigestAuthParam: Regex = """(\\w+)=(?:"([^"]+)"|([^,]+)),?""".r
private case object Stale extends StacklessException
def digestHash(realm: String, username: String, password: String): String = {
DigestUtils.md5Hex(username+":"+realm+":"+password)
}
object CLIOptions extends OptParse {
val realm = StrOpt(desc="Realm")
val user = StrOpt(desc="Username")
val pass = StrOpt(desc="Pass")
}
/**
* For generating DIGEST hashes (like Apache's htdigest utility)
*/
def main(args: Array[String]): Unit = {
CLIOptions.parse(args)
val realm: String = CLIOptions.realm.getOrElse {
print("Enter Realm: ")
System.console.readLine().trim()
}
val user: String = CLIOptions.user.getOrElse {
print("Enter User: ")
System.console.readLine().trim()
}
val pass: String = CLIOptions.pass.getOrElse {
print("Enter Password: ")
val pw1: String = new String(System.console.readPassword())
print("Again: ")
val pw2: String = new String(System.console.readPassword())
require(pw1 === pw2, "Passwords do not match!")
pw1
}
val hash: String = digestHash(realm=realm, username=user, password=pass)
println("Realm: "+realm)
println("User: "+user)
println("Digest Hash: "+hash)
}
}
/**
* Implements DIGEST HTTP Authentication
*
* Mostly used the Wikipedia pages as references:
* http://en.wikipedia.org/wiki/Digest_access_authentication
*
* NOTE: Use at your own risk. We make no claim that any of this Crypto code is correct.
*/
final case class DigestAuth(
/**
* The realm to use
*/
realm: String,
/**
* Map of Usernames -> (Plaintext passwords or hashes based on the digestHash method in the DigestAuth object)
*/
users: Map[String, String],
/**
* A value to prefix to the nonce. Can be anything.
*/
noncePrefix: String,
/**
* The Base64 encoded 256-bit encryption key to use for encrypting the opaque data. You
* can use fm.common.Crypto.makeRandomKeyBase64() to generate a key.
*/
base64EncryptionKey: String,
/**
* How long is the nonce good for? After this amount of seconds the client's browser
* will automatically re-authenticate using the updated nonce from the server.
*/
expirationSeconds: Int = 300 // 5 minutes
) extends Auth with Logging {
import DigestAuth._
// Prefix this onto the nonce
private[this] val DigestNoncePrefix: String = noncePrefix
// Encrypt/Sign the opaque data using this
private[this] val DigestOpaqueCrypto = Crypto.authenticatedCipherForBase64Key(base64EncryptionKey)
// The nonce expires after this number of seconds. After the expiration the "stale" flag is passed
// back to the client which should allow the browser to automatically retry the request with
// the updated nonce without prompting the user for their username/password.
private[this] val DigestExpirationSeconds: Int = expirationSeconds
private[this] val secureRandom: SecureRandom = new SecureRandom()
if (logger.isDebugEnabled) logger.debug(s"DigestAuth: realm: $realm, users: $users")
override protected def requireAuthImpl(request: Request)(action: => Future[Response])(implicit executor: ExecutionContext): Future[Response] = try {
if (isValid(request)) action else response(isStale = false)
} catch {
case Stale => response(isStale = true)
}
private def response(isStale: Boolean): Future[Response] = {
val nonce: String = DigestUtils.md5Hex(DigestNoncePrefix+System.currentTimeMillis.toString+secureRandom.nextLong.toString)
val opaque: String = DigestOpaqueCrypto.encryptBase64String(System.currentTimeMillis+":"+nonce)
val stale: Seq[(String,String)] = if (isStale) Seq("stale" -> "true") else Seq()
val params: Seq[(String,String)] = Seq(
"realm" -> realm,
"qop" -> "auth",
"nonce" -> nonce,
"opaque" -> opaque
)++stale
val wwwAuthenticateValue: String = "Digest "+params.map{case (k,v) => k+"=\\""+v+"\\""}.mkString(", ")
Future.successful(Response(Status.UNAUTHORIZED, Headers(HttpHeaderNames.WWW_AUTHENTICATE -> wwwAuthenticateValue)))
}
def isValid(request: Request): Boolean = {
val auth: String = request.headers.authorization.getOrElse{ return false }
auth match {
case DigestAuthHeader(paramsStr) =>
val params: Map[String, String] = Map(DigestAuthParam.findAllIn(paramsStr).matchData.map{ m =>
val k: String = m.group(1)
val v: String = if (null != m.group(2)) m.group(2) else m.group(3)
(k,v)
}.toSeq: _*)
if (logger.isDebugEnabled) logger.debug(s"Request: ${request.method} ${request.uri} Params: $params")
isValid(request, params)
case _ => false
}
}
def isValid(request: Request, params: Map[String, String]): Boolean = {
val pUsername: String = params.getOrElse("username", "")
val pRealm: String = params.getOrElse("realm", "")
val pNonce: String = params.getOrElse("nonce", "")
val pClientNonce: String = params.getOrElse("cnonce", "")
val pCount: String = params.getOrElse("nc", "")
val pUri: String = params.getOrElse("uri", "")
val pResponse: String = params.getOrElse("response", "")
val pOpaque: String = params.getOrElse("opaque", "")
val pQop: String = params.getOrElse("qop", "")
def computeResponse(ha1: String): String = {
val ha2: String = DigestUtils.md5Hex(request.method.name+":"+pUri)
pQop match {
case "auth" => DigestUtils.md5Hex(ha1+":"+pNonce+":"+pCount+":"+pClientNonce+":auth:"+ha2)
case "" => DigestUtils.md5Hex(ha1+":"+pNonce+":"+ha2)
case _ => ""
}
}
// Some basic checks to ignore any obviously invalid requests
// Note - the stripTrailing("?") exists because Netty seems to convert /foo? to /foo in HttpRequest
if (!users.contains(pUsername) || realm != pRealm || pUri.stripTrailing("?") != request.uri.stripTrailing("?")) {
if (logger.isDebugEnabled) logger.debug(s"Invalid Auth. user, realm or uri mismatch. user: $pUsername, realm: $realm, pRealm: $pRealm, pUri: $pUri, request.uri: ${request.uri}")
return false
}
// Verify the opaque value was encrypted/signed by us
val opaqueMsg: String = DigestOpaqueCrypto.tryDecryptBase64String(pOpaque).getOrElse{
if (logger.isDebugEnabled) logger.debug("Auth Failed. Cannot decode pOpaque")
return false
}
val Array(time,nonce) = opaqueMsg.split(':')
// If the nonce doesn't match then return
if (nonce != pNonce) {
if (logger.isDebugEnabled) logger.debug(s"Auth Failed. nonce mismatch. nonce: $nonce, pNonce: $pNonce")
return false
}
// Force re-authorization after a certain amount of time
val expirationTimeMillis = time.toLong+(DigestExpirationSeconds*1000)
val isStale: Boolean = System.currentTimeMillis > expirationTimeMillis
if (isStale) throw Stale
// Allow the users map to work with both pre-computed hashes and plaintext passwords
val ha1Precomputed: String = users(pUsername)
val ha1Plaintext: String = digestHash(realm=realm, username=pUsername, password=users(pUsername))
val res: Boolean = pResponse === computeResponse(ha1Precomputed) || pResponse === computeResponse(ha1Plaintext)
if (!res && logger.isDebugEnabled) logger.debug("Response mismatch - Expected: "+computeResponse(ha1Precomputed)+" OR "+computeResponse(ha1Plaintext)+" Got: "+pResponse)
res
}
} | er1c/fm-http | src/main/scala/fm/http/server/DigestAuth.scala | Scala | apache-2.0 | 8,734 |
// scalastyle:off line.size.limit
/*
* Ported by Alistair Johnson from
* https://github.com/gwtproject/gwt/blob/master/user/test/com/google/gwt/emultest/java/math/BigIntegerCompareTest.java
*/
// scalastyle:on line.size.limit
package org.scalajs.testsuite.javalib.math
import java.math.BigInteger
import org.scalajs.jasminetest.JasmineTest
object BigIntegerCompareTest extends JasmineTest {
describe("BigIntegerCompareTest") {
it("testAbsNegative") {
val aBytes = Array[Byte](1, 2, 3, 4, 5, 6, 7)
val aSign = -1
val rBytes = Array[Byte](1, 2, 3, 4, 5, 6, 7)
val aNumber = new BigInteger(aSign, aBytes)
val result = aNumber.abs()
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(1)
}
it("testAbsPositive") {
val aBytes = Array[Byte](1, 2, 3, 4, 5, 6, 7)
val aSign = 1
val rBytes = Array[Byte](1, 2, 3, 4, 5, 6, 7)
val aNumber = new BigInteger(aSign, aBytes)
val result = aNumber.abs()
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(1)
}
it("testCompareNegNeg2") {
val aBytes = Array[Byte](10, 20, 30, 40, 50, 60, 70, 10, 20, 30)
val bBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aSign = -1
val bSign = -1
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
expect(aNumber.compareTo(bNumber)).toEqual(1)
expect(aNumber.compareTo(bNumber)).toEqual(1)
}
it("testCompareToDiffSigns1") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val bBytes = Array[Byte](10, 20, 30, 40, 50, 60, 70, 10, 20, 30)
val aSign = 1
val bSign = -1
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
expect(aNumber.compareTo(bNumber)).toEqual(1)
}
it("testCompareToDiffSigns2") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val bBytes = Array[Byte](10, 20, 30, 40, 50, 60, 70, 10, 20, 30)
val aSign = -1
val bSign = 1
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
expect(aNumber.compareTo(bNumber)).toEqual(-1)
}
it("testCompareToEqualNeg") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val bBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aSign = -1
val bSign = -1
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
expect(aNumber.compareTo(bNumber)).toEqual(0)
}
it("testCompareToEqualPos") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val bBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aSign = 1
val bSign = 1
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
expect(aNumber.compareTo(bNumber)).toEqual(0)
}
it("testCompareToNegNeg1") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val bBytes = Array[Byte](10, 20, 30, 40, 50, 60, 70, 10, 20, 30)
val aSign = -1
val bSign = -1
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
expect(aNumber.compareTo(bNumber)).toEqual(-1)
}
it("testCompareToNegZero") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aSign = -1
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = BigInteger.ZERO
expect(aNumber.compareTo(bNumber)).toEqual(-1)
}
it("testCompareToPosPos1") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val bBytes = Array[Byte](10, 20, 30, 40, 50, 60, 70, 10, 20, 30)
val aSign = 1
val bSign = 1
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
expect(aNumber.compareTo(bNumber)).toEqual(1)
}
it("testCompareToPosPos2") {
val aBytes = Array[Byte](10, 20, 30, 40, 50, 60, 70, 10, 20, 30)
val bBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aSign = 1
val bSign = 1
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
expect(aNumber.compareTo(bNumber)).toEqual(-1)
}
it("testCompareToPosZero") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aSign = 1
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = BigInteger.ZERO
expect(aNumber.compareTo(bNumber)).toEqual(1)
}
it("testCompareToZeroNeg") {
val bBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val bSign = -1
val aNumber = BigInteger.ZERO
val bNumber = new BigInteger(bSign, bBytes)
expect(aNumber.compareTo(bNumber)).toEqual(1)
}
it("testCompareToZeroPos") {
val bBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val bSign = 1
val aNumber = BigInteger.ZERO
val bNumber = new BigInteger(bSign, bBytes)
expect(aNumber.compareTo(bNumber)).toEqual(-1)
}
it("testCompareToZeroZero") {
val aNumber = BigInteger.ZERO
val bNumber = BigInteger.ZERO
expect(aNumber.compareTo(bNumber)).toEqual(0)
}
it("testEqualsBigIntegerFalse") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val bBytes = Array[Byte](45, 91, 3, -15, 35, 26, 3, 91)
val aSign = 1
val bSign = 1
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
expect(aNumber == bNumber).toBeFalsy()
}
it("testEqualsBigIntegerTrue") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val bBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aSign = 1
val bSign = 1
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
expect(aNumber == bNumber).toBeTruthy()
}
it("testEqualsNull") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aSign = 1
val aNumber = new BigInteger(aSign, aBytes)
expect(aNumber == null).toBeFalsy()
}
it("testEqualsObject") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aSign = 1
val aNumber = new BigInteger(aSign, aBytes)
val obj = new AnyRef()
expect(aNumber == obj).toBeFalsy()
}
it("testMaxEqual") {
val aBytes = Array[Byte](45, 91, 3, -15, 35, 26, 3, 91)
val bBytes = Array[Byte](45, 91, 3, -15, 35, 26, 3, 91)
val aSign = 1
val bSign = 1
val rBytes = Array[Byte](45, 91, 3, -15, 35, 26, 3, 91)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.max(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(1)
}
it("testMaxGreater") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val bBytes = Array[Byte](45, 91, 3, -15, 35, 26, 3, 91)
val aSign = 1
val bSign = 1
val rBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
var result = aNumber.max(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(1)
result = bNumber.max(aNumber)
expect(aNumber == result).toBeTruthy()
}
it("testMaxLess") {
val aBytes = Array[Byte](45, 91, 3, -15, 35, 26, 3, 91)
val bBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aSign = 1
val bSign = 1
val rBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.max(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(1)
}
it("testMaxNegZero") {
val aBytes = Array[Byte](45, 91, 3, -15, 35, 26, 3, 91)
val aSign = -1
val rBytes = Array[Byte](0)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = BigInteger.ZERO
val result = aNumber.max(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(0)
}
it("testMinEqual") {
val aBytes = Array[Byte](45, 91, 3, -15, 35, 26, 3, 91)
val bBytes = Array[Byte](45, 91, 3, -15, 35, 26, 3, 91)
val aSign = 1
val bSign = 1
val rBytes = Array[Byte](45, 91, 3, -15, 35, 26, 3, 91)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.min(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(1)
}
it("testMinGreater") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val bBytes = Array[Byte](45, 91, 3, -15, 35, 26, 3, 91)
val aSign = 1
val bSign = 1
val rBytes = Array[Byte](45, 91, 3, -15, 35, 26, 3, 91)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.min(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(1)
}
it("testMinLess") {
val aBytes = Array[Byte](45, 91, 3, -15, 35, 26, 3, 91)
val bBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aSign = 1
val bSign = 1
val rBytes = Array(45, 91, 3, -15, 35, 26, 3, 91)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.min(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(1)
}
it("testMinPosZero") {
val aBytes = Array[Byte](45, 91, 3, -15, 35, 26, 3, 91)
val aSign = 1
val rBytes = Array[Byte](0)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = BigInteger.ZERO
val result = aNumber.min(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(0)
}
it("testNegateNegative") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aSign = -1
val rBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aNumber = new BigInteger(aSign, aBytes)
val result = aNumber.negate()
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(1)
}
it("testNegatePositive") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aSign = 1
val rBytes = Array[Byte](-13, -57, -101, 1, 75, -90, -46, -92, -4, 14, -36, -27, -4, -91)
val aNumber = new BigInteger(aSign, aBytes)
val result = aNumber.negate()
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(-1)
}
it("testNegateZero") {
val rBytes = Array[Byte](0)
val aNumber = BigInteger.ZERO
val result = aNumber.negate()
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(0)
}
it("tassestSignumNegative") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aSign = -1
val aNumber = new BigInteger(aSign, aBytes)
expect(aNumber.signum()).toEqual(-1)
}
it("testSignumPositive") {
val aBytes = Array[Byte](12, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, 3, 91)
val aSign = 1
val aNumber = new BigInteger(aSign, aBytes)
expect(aNumber.signum()).toEqual(1)
}
it("testSignumZero") {
val aNumber = BigInteger.ZERO
expect(aNumber.signum()).toEqual(0)
}
}
}
| CapeSepias/scala-js | test-suite/src/test/scala/org/scalajs/testsuite/javalib/math/BigIntegerCompareTest.scala | Scala | bsd-3-clause | 14,412 |
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Outworkers Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.example.basics
import com.datastax.driver.core.Session
import com.websudos.phantom.connectors._
import com.websudos.phantom.zookeeper.ZkContactPointLookup
trait KeyspaceDefinition {
implicit val space = KeySpace("phantom_example")
}
object Defaults extends KeyspaceDefinition {
val connector = ContactPoint.local.keySpace(space.name)
}
/**
* This is an example of how to connect to Cassandra in the easiest possible way.
* The SimpleCassandraConnector is designed to get you up and running immediately, with almost 0 effort.
*
* What you have to do now is to tell phantom what keyspace you will be using in Cassandra. This connector will automaticalyl try to connect to localhost:9042.
* If you want to tell the connector to use a different host:port combination, simply override the address inside it.
*
* Otherwise, simply mixing this connector in will magically inject a database session for all your queries and you can immediately run them.
*/
trait ExampleConnector extends Defaults.connector.Connector
/**
* Now you might ask yourself how to use service discovery with phantom. The Datastax Java Driver can automatically connect to multiple clusters.
* Using some underlying magic, phantom can also help you painlessly connect to a series of nodes in a Cassandra cluster via ZooKeeper.
*
* Once again, all you need to tell phantom is what your keyspace is. Phantom will make a series of assumptions about which path you are using in ZooKeeper.
* By default, it will try to connect to localhost:2181, fetch the "/cassandra" path and parse ports found in a "host:port, host1:port1,
* .." sequence. All these settings are trivial to override in the below connector and you can adjust all the settings to fit your environment.
*/
object ZkDefaults extends KeyspaceDefinition {
val connector = ZkContactPointLookup.local.keySpace(space.name)
}
trait DefaultZookeeperConnector extends RootConnector {
override implicit lazy val session: Session = ZkDefaults.connector.session
}
/**
* This is an example of how to connect to a custom set of hosts and ports.
* First, we need to obtain a connector and keep a singleton reference to it.
* It's really important to guarantee we are using a singleton here, otherwise
* we will end up spawning a cluster on every call.
*/
object RemoteConnector extends KeyspaceDefinition {
// Simply specify the list of hosts followed by the keyspace.
// Now the connector object will automatically create the Database connection for us and initialise it.
val connector = ContactPoints(Seq("docker.local")).keySpace("phantom_example")
}
trait DockerConnector extends RemoteConnector.connector.Connector | levinson/phantom | phantom-example/src/main/scala/com/websudos/phantom/example/basics/ExampleConnector.scala | Scala | bsd-2-clause | 4,200 |
package opjj
object ListUtil {
def reverse[A](list : List[A]) : List[A] = list match {
case head :: tail => reverse(tail) :+ head
case _ => Nil
}
}
| mbezjak/opjj-testing | 1-reverse-list/scala/src/main/scala/opjj/ListUtil.scala | Scala | mit | 174 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.consumers
import monix.execution.Callback
import monix.execution.Ack.{Continue, Stop}
import monix.execution.{Ack, Scheduler}
import monix.execution.cancelables.AssignableCancelable
import scala.util.control.NonFatal
import monix.reactive.Consumer
import monix.reactive.observers.Subscriber
/** Implementation for [[monix.reactive.Consumer.foreach]]. */
private[reactive] final class ForeachConsumer[A](f: A => Unit) extends Consumer.Sync[A, Unit] {
def createSubscriber(cb: Callback[Throwable, Unit], s: Scheduler): (Subscriber.Sync[A], AssignableCancelable) = {
val out = new Subscriber.Sync[A] {
implicit val scheduler = s
private[this] var isDone = false
def onNext(elem: A): Ack = {
try {
f(elem)
Continue
} catch {
case ex if NonFatal(ex) =>
onError(ex)
Stop
}
}
def onComplete(): Unit =
if (!isDone) {
isDone = true
cb.onSuccess(())
}
def onError(ex: Throwable): Unit =
if (!isDone) {
isDone = true
cb.onError(ex)
}
}
(out, AssignableCancelable.dummy)
}
}
| alexandru/monifu | monix-reactive/shared/src/main/scala/monix/reactive/internal/consumers/ForeachConsumer.scala | Scala | apache-2.0 | 1,879 |
package com.alvin.niagara.service
import akka.http.scaladsl.server.Directives._
import spray.json.DefaultJsonProtocol
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import com.alvin.niagara.dao.UserDAO.User
import com.alvin.niagara.dao.{CassandraDAO, UserDAO}
import com.alvin.niagara.model.RichPost
import scala.concurrent.ExecutionContext.Implicits.global
/**
* Created by JINC4 on 6/4/2016.
*
* RouteService contains a bunch of directives calling Spark SQL queries
*
*/
trait AkkaJSONProtocol extends DefaultJsonProtocol with CassandraDAO {
implicit val postFormat = jsonFormat4(RichPost.apply)
implicit val userFormat = jsonFormat6(User.apply)
}
trait Routes extends AkkaJSONProtocol {
val route =
path("postid" / LongNumber) { id =>
get {
onSuccess(queryPostById(id)) {
case result: List[RichPost] =>
complete(result)
}
} ~ delete {
onSuccess(deletePostById(id)) {
case result: String =>
complete(result)
}
}
} ~
path("post") {
(post & entity(as[RichPost])) { p =>
onSuccess(insertPost(p)) {
case result: String =>
complete(result)
}
}
} ~
path("title" / Segment) { title =>
get {
onSuccess(queryByTitle(title)) {
case result: List[RichPost] =>
//complete(HttpEntity(ContentTypes.`application/json`, result))
complete(result)
}
}
}~
path("count" / Segment) { typ =>
get {
onSuccess(countByType(typ)) {
case result: Long =>
complete(result.toString)
}
}
}
val authRoute = path("users" / Segment) { email =>
get {
onSuccess(UserDAO.queryUsersByEmail(email)) {
case result: Seq[User] =>
complete(result)
}
}
} ~
path("user") {
(post & entity(as[User])) { user =>
onSuccess(UserDAO.insertUser(user.email, user.username, user.password)) {
case result: Some[User] =>
complete(result)
}
}
} ~
path("user" / Segment) { userid =>
get {
onSuccess(UserDAO.queryUserById(userid)) {
case result: Some[User] =>
complete(result)
}
}
}
}
| AlvinCJin/Niagara | src/main/scala/com/alvin/niagara/service/Routes.scala | Scala | apache-2.0 | 2,374 |
package org.jetbrains.plugins.scala.annotator
import com.intellij.lang.annotation.AnnotationHolder
import org.jetbrains.plugins.scala.lang.psi.api.base.ScMethodLike
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScFunctionExpr
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScClassParameter, ScParameter, ScParameters}
/**
* Pavel.Fatin, 15.06.2010
*/
trait ParametersAnnotator {
def annotateParameters(parameters: ScParameters, holder: AnnotationHolder): Unit = {
def checkRepeatedParams() {
parameters.clauses.foreach { cl =>
cl.parameters.dropRight(1).foreach {
case p if p.isRepeatedParameter => holder.createErrorAnnotation(p, "*-parameter must come last")
case _ =>
}
cl.parameters.lastOption match {
case Some(p) if p.isRepeatedParameter && cl.parameters.exists(_.isDefaultParam) =>
holder.createErrorAnnotation(cl, "Parameter section with *-parameter cannot have default arguments")
case _ =>
}
}
}
checkRepeatedParams()
}
def annotateParameter(parameter: ScParameter, holder: AnnotationHolder): Unit = {
parameter.owner match {
case null =>
holder.createErrorAnnotation(parameter, "Parameter hasn't owner: " + parameter.name)
case _: ScMethodLike =>
parameter.typeElement match {
case None =>
holder.createErrorAnnotation(parameter, "Missing type annotation for parameter: " + parameter.name)
case _ =>
}
if (parameter.isCallByNameParameter)
annotateCallByNameParameter(parameter, holder: AnnotationHolder)
case _: ScFunctionExpr =>
parameter.typeElement match {
case None =>
parameter.expectedParamType match {
case None =>
holder.createErrorAnnotation(parameter, "Missing parameter type: " + parameter.name)
case _ =>
}
case _ =>
}
}
}
private def annotateCallByNameParameter(parameter: ScParameter, holder: AnnotationHolder): Any = {
def errorWithMessageAbout(topic: String) = {
val message = s"$topic parameters may not be call-by-name"
holder.createErrorAnnotation(parameter, message)
}
parameter match {
case cp: ScClassParameter if cp.isVal => errorWithMessageAbout("\\'val\\'")
case cp: ScClassParameter if cp.isVar => errorWithMessageAbout("\\'var\\'")
case cp: ScClassParameter if cp.isCaseClassVal => errorWithMessageAbout("case class")
case p if p.isImplicitParameter => errorWithMessageAbout("implicit")
case _ =>
}
}
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/annotator/ParametersAnnotator.scala | Scala | apache-2.0 | 2,654 |
package com.ambrosoft.bloom
import java.nio.ByteBuffer
import java.security.MessageDigest
import scala.collection.mutable
import scala.io.Source
/**
* Created by jacek on 3/4/16.
*/
/** Simple implementation of a mutable Bloom Filter as specified in
* http://codekata.com/kata/kata05-bloom-filters/
*
* Strings can be added to the set but not removed!
*
* Scala allows for an extremely concise expression
* at the cost of some (small) runtime inefficiencies
*
* @param setSize size of the filter's storage in bits
* @param multiHashFuns sequence of String hashing functions returning Seqs of integer hash values
*/
class BloomFilter[-T](setSize: Int, multiHashFuns: Seq[T => Seq[Int]]) {
private val bitSet = new mutable.BitSet(setSize)
// apply hash functions to input word
private def normalizedHashes(word: T) =
multiHashFuns.flatMap(_ (word)).map(h => (h & 0x7fffffff) % setSize)
/**
* add the argument String to the set represented by this filter
*
* @param word the String to add.
*/
def add(word: T) {
normalizedHashes(word).foreach(bitSet.add)
}
/**
* check if the argument word belongs to the set of seen words
*
* @param word the String to test for membership.
* @return false iff the argument DOES NOT belong to the set
* true result might be a false positive
*/
def contains(word: T): Boolean =
normalizedHashes(word).forall(bitSet.contains)
/**
* add all String to the set represented by this filter
*
* @param words Iterator of Strings to add.
*/
def addAll(words: Iterator[T]) {
words.foreach(add)
}
/**
* empty the set
*/
def clear() {
bitSet.clear()
}
}
// test a filter
object BloomFilter extends App {
// MD5 value: array of 16 bytes (128 bits)
private def md5(word: String): Array[Byte] =
MessageDigest.getInstance("MD5").digest(word.getBytes)
// from 16-byte array extract 4 integers into a list
private def toInts(array: Array[Byte]) = {
val buffer = ByteBuffer.wrap(array)
Seq(buffer.getInt, buffer.getInt, buffer.getInt, buffer.getInt)
}
private val filterSize = 1024 * 1024 * 8
// create a filter to use the supplied hashing functions
val filter = new BloomFilter[String](filterSize, Seq(
(word: String) => toInts(md5(word)),
(word: String) => Seq(word.hashCode)
))
private val wordlistURL = getClass.getResource("/wordlist.txt")
filter.addAll(Source.fromURL(wordlistURL).getLines())
def containsTest(word: String) {
println(s"\'$word\' is member? ${filter.contains(word)}")
}
containsTest("hurry")
containsTest("atom")
containsTest("atox") // shouldn't be there
containsTest("atomm") // shouldn't be there
println("test all")
println(Source.fromURL(wordlistURL).getLines().forall(filter.contains))
}
| JacekAmbroziak/Ambrosoft | src/main/scala/com/ambrosoft/bloom/BloomFilter.scala | Scala | apache-2.0 | 2,862 |
/*
* Copyright 2017-2022 Viktor Lövgren
*
* SPDX-License-Identifier: MIT
*/
package ciris
import cats.{Eq, Show}
import cats.implicits._
import java.nio.charset.Charset
import java.nio.file.Path
/**
* Provides a description of a key used for loading configuration values.
*
* @example {{{
* scala> val apiKey = ConfigKey.env("API_KEY")
* apiKey: ConfigKey = ConfigKey(environment variable API_KEY)
*
* scala> apiKey.description
* res0: String = environment variable API_KEY
* }}}
*/
sealed abstract class ConfigKey {
/**
* Returns a description of a key used for loading
* configuration values.
*/
def description: String
}
/**
* @groupname Create Creating Instances
* @groupprio Create 0
*
* @groupname Instances Type Class Instances
* @groupprio Instances 1
*/
object ConfigKey {
/**
* Returns a new [[ConfigKey]] with the specified description.
*
* @group Create
*/
final def apply(description: => String): ConfigKey = {
def _description = description
new ConfigKey {
override final def description: String =
_description
override final def hashCode: Int =
description.hashCode
override final def equals(that: Any): Boolean =
that match {
case key: ConfigKey => configKeyEq.eqv(this, key)
case _ => false
}
override final def toString: String =
s"ConfigKey($description)"
}
}
/**
* Returns a new [[ConfigKey]] for the specified environment variable.
*
* @example {{{
* scala> val apiKey = ConfigKey.env("API_KEY")
* apiKey: ConfigKey = ConfigKey(environment variable API_KEY)
*
* scala> apiKey.description
* res0: String = environment variable API_KEY
* }}}
*
* @group Create
*/
final def env(name: String): ConfigKey =
ConfigKey(s"environment variable $name")
/**
* Returns a new [[ConfigKey]] for the specified path and charset.
*
* @group Create
*/
final def file(path: Path, charset: Charset): ConfigKey =
ConfigKey(s"file at $path with charset $charset")
/**
* Returns a new [[ConfigKey]] for the specified system property.
*
* @example {{{
* scala> val apiKey = ConfigKey.prop("api.key")
* apiKey: ConfigKey = ConfigKey(system property api.key)
*
* scala> apiKey.description
* res0: String = system property api.key
* }}}
*
* @group Create
*/
final def prop(name: String): ConfigKey =
ConfigKey(s"system property $name")
/**
* Returns the description for the specified [[ConfigKey]].
*
* This function enables pattern matching on [[ConfigKey]]s.
*
* @example {{{
* scala> val apiKey = ConfigKey.env("API_KEY")
* apiKey: ConfigKey = ConfigKey(environment variable API_KEY)
*
* scala> apiKey match { case ConfigKey(description) => description }
* res0: String = environment variable API_KEY
* }}}
*
* @group Create
*/
final def unapply(key: ConfigKey): Some[String] =
Some(key.description)
/**
* @group Instances
*/
implicit final val configKeyEq: Eq[ConfigKey] =
Eq.by(_.description)
/**
* @group Instances
*/
implicit final val configKeyShow: Show[ConfigKey] =
Show.fromToString
}
| vlovgr/ciris | modules/core/src/main/scala/ciris/ConfigKey.scala | Scala | mit | 3,321 |
package com.rocketfuel.sdbc.cassandra
import com.datastax.oss.driver.api.core.`type`.DataType
import com.datastax.oss.driver.api.core.data.UdtValue
import com.datastax.oss.driver.api.core.metadata.token.Token
import com.rocketfuel.sdbc.base
import java.lang
import java.math.BigInteger
import java.net.InetAddress
import java.nio.ByteBuffer
import java.time.{Instant, LocalDate, LocalTime}
import java.util.UUID
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
import scodec.bits.ByteVector
import shapeless.HList
import shapeless.ops.hlist.{Mapper, ToTraversable}
import shapeless.ops.product.ToHList
trait ParameterValue
extends base.ParameterValue
with base.CompiledParameterizedQuery {
override type PreparedStatement = com.datastax.oss.driver.api.core.cql.BoundStatementBuilder
override protected def setNone(preparedStatement: PreparedStatement, parameterIndex: Int): PreparedStatement = {
preparedStatement.setToNull(parameterIndex)
preparedStatement
}
implicit val BooleanParameter: Parameter[Boolean] = {
(value: Boolean) => (statement: PreparedStatement, ix: Int) =>
statement.setBoolean(ix, value)
}
implicit val BoxedBooleanParameter: Parameter[lang.Boolean] = Parameter.derived[lang.Boolean, Boolean]
//We're using ByteVectors, since they're much more easily testable than Array[Byte].
//IE equality actually works. Also, they're immutable.
implicit val ByteVectorParameter: Parameter[ByteVector] = {
(value: ByteVector) =>
val bufferValue = value.toByteBuffer
(statement: PreparedStatement, parameterIndex: Int) =>
statement.setByteBuffer(parameterIndex, bufferValue)
}
implicit val ByteBufferParameter: Parameter[ByteBuffer] = Parameter.converted[ByteBuffer, ByteVector](ByteVector(_))
implicit val ArrayByteParameter: Parameter[Array[Byte]] = Parameter.converted[Array[Byte], ByteVector](ByteVector(_))
implicit val SeqByteParameter: Parameter[Seq[Byte]] = Parameter.converted[Seq[Byte], ByteVector](ByteVector(_))
implicit val LocalDateParameter: Parameter[LocalDate] = {
(value: LocalDate) => (statement: PreparedStatement, parameterIndex: Int) =>
statement.setLocalDate(parameterIndex, value)
}
implicit val LocalTimeParameter: Parameter[LocalTime] = {
(value: LocalTime) => (statement: PreparedStatement, parameterIndex: Int) =>
statement.setLocalTime(parameterIndex, value)
}
implicit val InstantParameter: Parameter[Instant] = {
(value: Instant) => (statement: PreparedStatement, parameterIndex: Int) =>
statement.setInstant(parameterIndex, value)
}
implicit val JavaBigDecimalParameter: Parameter[java.math.BigDecimal] = {
(value: java.math.BigDecimal) => (statement: PreparedStatement, parameterIndex: Int) =>
statement.setBigDecimal(parameterIndex, value)
}
implicit val BigDecimalParameter: Parameter[BigDecimal] =
Parameter.converted[BigDecimal, java.math.BigDecimal](_.underlying())
implicit val DoubleParameter: Parameter[Double] = {
(value: Double) => (statement: PreparedStatement, ix: Int) =>
statement.setDouble(ix, value)
}
implicit val BoxedDoubleParameter: Parameter[lang.Double] = Parameter.derived[lang.Double, Double]
implicit val FloatParameter: Parameter[Float] = {
(value: Float) => (statement: PreparedStatement, ix: Int) =>
statement.setFloat(ix, value)
}
implicit val BoxedFloatParameter: Parameter[lang.Float] = Parameter.derived[lang.Float, Float]
implicit val InetAddressParameter: Parameter[InetAddress] = {
(value: InetAddress) => (statement: PreparedStatement, ix: Int) =>
statement.setInetAddress(ix, value)
}
implicit val IntParameter: Parameter[Int] = {
(value: Int) => (statement: PreparedStatement, ix: Int) =>
statement.setInt(ix, value)
}
implicit val BoxedIntParameter: Parameter[Integer] = Parameter.derived[Integer, Int]
implicit def JavaSeqParameter[T](implicit c: ClassTag[T]): Parameter[java.util.List[T]] = {
(value: java.util.List[T]) => (statement: PreparedStatement, ix: Int) =>
statement.setList[T](ix, value, c.runtimeClass.asInstanceOf[Class[T]])
}
implicit def SeqParameter[T](implicit c: ClassTag[T]): Parameter[Seq[T]] =
Parameter.converted[Seq[T], java.util.List[T]](_.asJava)
implicit val LongParameter: Parameter[Long] = {
(value: Long) => (statement: PreparedStatement, ix: Int) =>
statement.setLong(ix, value)
}
implicit val BoxedLongParameter: Parameter[lang.Long] = Parameter.derived[lang.Long, Long]
implicit def JavaMapParameter[Key, Value](implicit k: ClassTag[Key], v: ClassTag[Value]): Parameter[java.util.Map[Key, Value]] = {
(value: java.util.Map[Key, Value]) => (statement: PreparedStatement, ix: Int) =>
statement.setMap[Key, Value](ix, value, k.runtimeClass.asInstanceOf[Class[Key]], v.runtimeClass.asInstanceOf[Class[Value]])
}
implicit def MapParameter[Key, Value](implicit k: ClassTag[Key], v: ClassTag[Value]): Parameter[Map[Key, Value]] =
Parameter.converted[Map[Key, Value], java.util.Map[Key, Value]](_.asJava)
implicit def JavaSetParameter[T](implicit c: ClassTag[T]): Parameter[java.util.Set[T]] = {
(value: java.util.Set[T]) => (statement: PreparedStatement, ix: Int) =>
statement.setSet[T](ix, value, c.runtimeClass.asInstanceOf[Class[T]])
}
implicit def SetParameter[T](implicit c: ClassTag[T]): Parameter[Set[T]] =
Parameter.converted[Set[T], java.util.Set[T]](_.asJava)
implicit val StringParameter: Parameter[String] = {
(value: String) => (statement: PreparedStatement, ix: Int) =>
statement.setString(ix, value)
}
implicit val UUIDParameter: Parameter[UUID] = {
(value: UUID) => (statement: PreparedStatement, ix: Int) =>
statement.setUuid(ix, value)
}
implicit val TokenParameter: Parameter[Token] = {
(value: Token) => (statement: PreparedStatement, ix: Int) =>
statement.setToken(ix, value)
}
implicit val TupleValueParameter: Parameter[TupleValue] = {
(value: TupleValue) => (statement: PreparedStatement, ix: Int) =>
statement.setTupleValue(ix, value.underlying)
}
implicit val UDTValueParameter: Parameter[UdtValue] = {
(value: UdtValue) => (statement: PreparedStatement, ix: Int) =>
statement.setUdtValue(ix, value)
}
implicit val BigIntegerParameter: Parameter[BigInteger] = {
(value: BigInteger) => (statement: PreparedStatement, ix: Int) =>
statement.setBigInteger(ix, value)
}
implicit def hlistParameterValue[
H <: HList,
ListH <: HList,
MappedTypesH <: HList,
MappedValuesH <: HList
](h: H
)(implicit dataTypeMapper: Mapper.Aux[TupleDataType.ToDataType.type, H, MappedTypesH],
dataTypeList: ToTraversable.Aux[MappedTypesH, Seq, DataType],
dataValueMapper: Mapper.Aux[TupleDataType.ToDataValue.type, H, MappedValuesH],
dataValueList: ToTraversable.Aux[MappedValuesH, Seq, AnyRef]
): ParameterValue = {
TupleValue.hlistTupleValue(h)
}
implicit def productParameterValue[
P,
H <: HList,
ListH <: HList,
MappedTypesH <: HList,
MappedValuesH <: HList
](p: P
)(implicit toHList: ToHList.Aux[P, H],
dataTypeMapper: Mapper.Aux[TupleDataType.ToDataType.type, H, MappedTypesH],
dataTypeList: ToTraversable.Aux[MappedTypesH, Seq, DataType],
dataValueMapper: Mapper.Aux[TupleDataType.ToDataValue.type, H, MappedValuesH],
dataValueList: ToTraversable.Aux[MappedValuesH, Seq, AnyRef],
toParameterValue: TupleValue => ParameterValue
): ParameterValue = {
val asTupleValue = TupleValue.productTupleValue(p)
toParameterValue(asTupleValue)
}
}
| rocketfuel/sdbc | cassandra/src/main/scala/com/rocketfuel/sdbc/cassandra/ParameterValue.scala | Scala | bsd-3-clause | 7,664 |
package com.twitter.finagle.kestrel.unit
import _root_.java.net.{InetSocketAddress, SocketAddress}
import _root_.java.nio.charset.Charset
import _root_.java.util.concurrent.{BlockingDeque, ExecutorService, Executors, LinkedBlockingDeque}
import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
import com.twitter.concurrent.{Broker, Spool}
import com.twitter.conversions.time._
import com.twitter.finagle.builder.{ClientBuilder, ClientConfig, Cluster}
import com.twitter.finagle.kestrel._
import com.twitter.finagle.kestrel.protocol.{Command, Response, Set}
import com.twitter.finagle.{Addr, ClientConnection, Service, ServiceFactory}
import com.twitter.io.Buf
import com.twitter.util._
import org.junit.runner.RunWith
import org.mockito.Mockito
import org.mockito.Mockito.{times, verify, when}
import org.scalatest.FunSuite
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
import scala.language.postfixOps
import scala.collection.immutable.{Set => ISet}
import scala.collection.mutable.{ArrayBuffer, Set => MSet}
@RunWith(classOf[JUnitRunner])
class MultiReaderTest extends FunSuite with MockitoSugar with Eventually with IntegrationPatience {
class MockHandle extends ReadHandle {
val _messages = new Broker[ReadMessage]
val _error = new Broker[Throwable]
val messages = _messages.recv
val error = _error.recv
def close() {} // to spy on!
}
trait MultiReaderHelper {
val queueName = "the_queue"
val queueNameBuf = Buf.Utf8(queueName)
val N = 3
val handles = (0 until N) map { _ => Mockito.spy(new MockHandle) }
val va: Var[Return[ISet[ReadHandle]]] = Var.value(Return(handles.toSet))
}
trait AddrClusterHelper {
val queueName = "the_queue"
val queueNameBuf = Buf.Utf8(queueName)
val N = 3
val hosts = 0 until N map { i =>
InetSocketAddress.createUnresolved("10.0.0.%d".format(i), 22133)
}
val executor = Executors.newCachedThreadPool()
def newKestrelService(
executor: Option[ExecutorService],
queues: LoadingCache[Buf, BlockingDeque[Buf]]
): Service[Command, Response] = {
val interpreter = new Interpreter(queues)
new Service[Command, Response] {
def apply(request: Command) = {
val promise = new Promise[Response]()
executor match {
case Some(exec) =>
exec.submit(new Runnable {
def run() {
promise.setValue(interpreter(request))
}
})
case None => promise.setValue(interpreter(request))
}
promise
}
}
}
val hostQueuesMap = hosts.map { host =>
val queues = CacheBuilder.newBuilder()
.build(new CacheLoader[Buf, BlockingDeque[Buf]] {
def load(k: Buf) = new LinkedBlockingDeque[Buf]
})
(host, queues)
}.toMap
lazy val mockClientBuilder = {
val result = mock[ClientBuilder[Command, Response, Nothing, ClientConfig.Yes, ClientConfig.Yes]]
hosts.foreach { host =>
val mockHostClientBuilder =
mock[ClientBuilder[Command, Response, ClientConfig.Yes, ClientConfig.Yes, ClientConfig.Yes]]
when(result.hosts(host)) thenReturn mockHostClientBuilder
val queues = hostQueuesMap(host)
val factory = new ServiceFactory[Command, Response] {
// use an executor so readReliably doesn't block waiting on an empty queue
def apply(conn: ClientConnection) =
Future.value(newKestrelService(Some(executor), queues))
def close(deadline: Time) = Future.Done
override def toString() = "ServiceFactory for %s".format(host)
}
when(mockHostClientBuilder.buildFactory()) thenReturn factory
}
result
}
val services = hosts.map { host =>
val queues = hostQueuesMap(host)
// no executor here: this one is used for writing to the queues
newKestrelService(None, queues)
}
def configureMessageReader(handle: ReadHandle): MSet[String] = {
val messages = MSet[String]()
val UTF8 = Charset.forName("UTF-8")
handle.messages foreach { msg =>
val Buf.Utf8(str) = msg.bytes
messages += str
msg.ack.sync()
}
messages
}
}
trait DynamicClusterHelper {
class DynamicCluster[U](initial: Seq[U]) extends Cluster[U] {
def this() = this(Seq[U]())
var set = initial.toSet
var s = new Promise[Spool[Cluster.Change[U]]]
def add(f: U) = {
set += f
performChange(Cluster.Add(f))
}
def del(f: U) = {
set -= f
performChange(Cluster.Rem(f))
}
private[this] def performChange(change: Cluster.Change[U]) = synchronized {
val newTail = new Promise[Spool[Cluster.Change[U]]]
s() = Return(change *:: newTail)
s = newTail
}
def snap = (set.toSeq, s)
}
val N = 3
val hosts = 0 until N map { i => InetSocketAddress.createUnresolved("10.0.0.%d".format(i), 22133) }
val executor = Executors.newCachedThreadPool()
def newKestrelService(
executor: Option[ExecutorService],
queues: LoadingCache[Buf, BlockingDeque[Buf]]
): Service[Command, Response] = {
val interpreter = new Interpreter(queues)
new Service[Command, Response] {
def apply(request: Command) = {
val promise = new Promise[Response]()
executor match {
case Some(exec) =>
exec.submit(new Runnable {
def run() {
promise.setValue(interpreter(request))
}
})
case None => promise.setValue(interpreter(request))
}
promise
}
}
}
val hostQueuesMap = hosts.map { host =>
val queues = CacheBuilder.newBuilder()
.build(new CacheLoader[Buf, BlockingDeque[Buf]] {
def load(k: Buf) = new LinkedBlockingDeque[Buf]
})
(host, queues)
}.toMap
lazy val mockClientBuilder = {
val result = mock[ClientBuilder[Command, Response, Nothing, ClientConfig.Yes, ClientConfig.Yes]]
hosts.foreach { host =>
val mockHostClientBuilder =
mock[ClientBuilder[Command, Response, ClientConfig.Yes, ClientConfig.Yes, ClientConfig.Yes]]
when(result.hosts(host)) thenReturn mockHostClientBuilder
val queues = hostQueuesMap(host)
val factory = new ServiceFactory[Command, Response] {
// use an executor so readReliably doesn't block waiting on an empty queue
def apply(conn: ClientConnection) = Future(newKestrelService(Some(executor), queues))
def close(deadline: Time) = Future.Done
override def toString() = "ServiceFactory for %s".format(host)
}
when(mockHostClientBuilder.buildFactory()) thenReturn factory
}
result
}
val services = hosts.map { host =>
val queues = hostQueuesMap(host)
// no executor here: this one is used for writing to the queues
newKestrelService(None, queues)
}
def configureMessageReader(handle: ReadHandle): MSet[String] = {
val messages = MSet[String]()
handle.messages foreach { msg =>
val Buf.Utf8(str) = msg.bytes
messages += str
msg.ack.sync()
}
messages
}
}
test("static ReadHandle cluster should always grab the first available message") {
new MultiReaderHelper {
val handle = MultiReaderHelper.merge(va)
val messages = new ArrayBuffer[ReadMessage]
handle.messages foreach { messages += _ }
// stripe some messages across
val sentMessages = 0 until N * 100 map { _ => mock[ReadMessage] }
assert(messages.size == 0)
sentMessages.zipWithIndex foreach { case (m, i) =>
handles(i % handles.size)._messages ! m
}
assert(messages == sentMessages)
}
}
test("static ReadHandle cluster should round robin from multiple available queues") {
// We use frozen time for deterministic randomness.
new MultiReaderHelper {
Time.withTimeAt(Time.epoch + 1.seconds) { _ =>
// stuff the queues beforehand
val ms = handles map { h =>
val m = mock[ReadMessage]
h._messages ! m
m
}
val handle = MultiReaderHelper.merge(va)
assert(
ISet((handle.messages ??), (handle.messages ??), (handle.messages ??)) ==
ISet(ms(0), ms(1), ms(2))
)
}
}
}
test("static ReadHandle cluster should propagate closes") {
new MultiReaderHelper {
handles foreach { h => verify(h, times(0)).close() }
val handle = MultiReaderHelper.merge(va)
handle.close()
handles foreach { h => verify(h).close() }
}
}
test("static ReadHandle cluster should propagate errors when everything's errored out") {
new MultiReaderHelper {
val handle = MultiReaderHelper.merge(va)
val e = handle.error.sync()
handles foreach { h =>
assert(e.isDefined == false)
h._error ! new Exception("sad panda")
}
assert(e.isDefined == true)
assert(Await.result(e) == AllHandlesDiedException)
}
}
test("Var[Addr]-based cluster should read messages from a ready cluster") {
new AddrClusterHelper {
val va = Var(Addr.Bound(hosts: _*))
val handle = MultiReader(va, queueName).clientBuilder(mockClientBuilder).build()
val messages = configureMessageReader(handle)
val sentMessages = 0 until N * 10 map { i => "message %d".format(i) }
assert(messages.size == 0)
sentMessages.zipWithIndex foreach { case (m, i) =>
Await.result(services(i % services.size).apply(Set(queueNameBuf, Time.now, Buf.Utf8(m))))
}
eventually {
assert(messages == sentMessages.toSet)
}
}
}
test("Var[Addr]-based cluster should read messages as cluster hosts are added") {
new AddrClusterHelper {
val va = Var(Addr.Bound(hosts.head))
val handle = MultiReader(va, queueName).clientBuilder(mockClientBuilder).build()
val messages = configureMessageReader(handle)
val sentMessages = 0 until N * 10 map { i => "message %d".format(i) }
assert(messages.size == 0)
sentMessages.zipWithIndex foreach { case (m, i) =>
Await.result(services(i % services.size).apply(Set(queueNameBuf, Time.now, Buf.Utf8(m))))
}
// 0, 3, 6 ...
eventually {
assert(messages == sentMessages.grouped(N).map { _.head }.toSet)
}
messages.clear()
va.update(Addr.Bound(hosts: _*))
// 1, 2, 4, 5, ...
eventually {
assert(messages == sentMessages.grouped(N).map { _.tail }.flatten.toSet)
}
}
}
test("Var[Addr]-based cluster should read messages as cluster hosts are removed") {
new AddrClusterHelper {
var mutableHosts: Seq[SocketAddress] = hosts
val va = Var(Addr.Bound(mutableHosts: _*))
val rest = hosts.tail.reverse
val handle = MultiReader(va, queueName).clientBuilder(mockClientBuilder).build()
val messages = configureMessageReader(handle)
val sentMessages = 0 until N * 10 map { i => "message %d".format(i) }
assert(messages.size == 0)
sentMessages.zipWithIndex foreach { case (m, i) =>
Await.result(services(i % services.size).apply(Set(queueNameBuf, Time.now, Buf.Utf8(m))))
}
eventually {
assert(messages == sentMessages.toSet)
}
rest.zipWithIndex.foreach { case (host, hostIndex) =>
messages.clear()
mutableHosts = (mutableHosts.toSet - host).toSeq
va.update(Addr.Bound(mutableHosts: _*))
// write to all 3
sentMessages.zipWithIndex foreach { case (m, i) =>
Await.result(services(i % services.size).apply(Set(queueNameBuf, Time.now, Buf.Utf8(m))))
}
// expect fewer to be read on each pass
val expectFirstN = N - hostIndex - 1
eventually {
assert(messages == sentMessages.grouped(N).map { _.take(expectFirstN) }.flatten.toSet)
}
}
}
}
test("Var[Addr]-based cluster should wait for cluster to become ready before snapping initial hosts") {
new AddrClusterHelper {
val va = Var(Addr.Bound())
val handle = MultiReader(va, queueName).clientBuilder(mockClientBuilder).build()
val messages = configureMessageReader(handle)
val error = handle.error.sync()
val sentMessages = 0 until N * 10 map { i => "message %d".format(i) }
assert(messages.size == 0)
sentMessages.zipWithIndex foreach { case (m, i) =>
Await.result(services(i % services.size).apply(Set(queueNameBuf, Time.now, Buf.Utf8(m))))
}
assert(messages.size == 0) // cluster not ready
assert(error.isDefined == false)
va.update(Addr.Bound(hosts: _*))
eventually {
assert(messages == sentMessages.toSet)
}
}
}
test("Var[Addr]-based cluster should report an error if all hosts are removed") {
new AddrClusterHelper {
val va = Var(Addr.Bound(hosts: _*))
val handle = MultiReader(va, queueName).clientBuilder(mockClientBuilder).build()
val error = handle.error.sync()
va.update(Addr.Bound())
assert(error.isDefined == true)
assert(Await.result(error) == AllHandlesDiedException)
}
}
test("Var[Addr]-based cluster should propagate exception if cluster fails") {
new AddrClusterHelper {
val ex = new Exception("uh oh")
val va: Var[Addr] with Updatable[Addr] = Var(Addr.Bound(hosts: _*))
val handle = MultiReader(va, queueName).clientBuilder(mockClientBuilder).build()
val error = handle.error.sync()
va.update(Addr.Failed(ex))
assert(error.isDefined == true)
assert(Await.result(error) == ex)
}
}
test("dynamic SocketAddress cluster should read messages from a ready cluster") {
new DynamicClusterHelper {
val cluster = new DynamicCluster[SocketAddress](hosts)
val handle = MultiReader(cluster, "the_queue").clientBuilder(mockClientBuilder).build()
val messages = configureMessageReader(handle)
val sentMessages = 0 until N * 10 map { i => "message %d".format(i) }
assert(messages.size == 0)
sentMessages.zipWithIndex foreach { case (m, i) =>
Await.result(services(i % services.size).apply(Set(Buf.Utf8("the_queue"), Time.now, Buf.Utf8(m))))
}
eventually {
assert(messages == sentMessages.toSet)
}
}
}
test("dynamic SocketAddress cluster should read messages as cluster hosts are added") {
new DynamicClusterHelper {
val (host, rest) = (hosts.head, hosts.tail)
val cluster = new DynamicCluster[SocketAddress](List(host))
val handle = MultiReader(cluster, "the_queue").clientBuilder(mockClientBuilder).build()
val messages = configureMessageReader(handle)
val sentMessages = 0 until N * 10 map { i => "message %d".format(i) }
assert(messages.size == 0)
sentMessages.zipWithIndex foreach { case (m, i) =>
Await.result(services(i % services.size).apply(Set(Buf.Utf8("the_queue"), Time.now, Buf.Utf8(m))))
}
// 0, 3, 6 ...
eventually {
assert(messages == sentMessages.grouped(N).map { _.head }.toSet)
}
messages.clear()
rest.foreach { host => cluster.add(host) }
// 1, 2, 4, 5, ...
eventually {
assert(messages == sentMessages.grouped(N).map { _.tail }.flatten.toSet)
}
}
}
test("dynamic SocketAddress cluster should read messages as cluster hosts are removed") {
new DynamicClusterHelper {
val cluster = new DynamicCluster[SocketAddress](hosts)
val rest = hosts.tail
val handle = MultiReader(cluster, "the_queue").clientBuilder(mockClientBuilder).build()
val messages = configureMessageReader(handle)
val sentMessages = 0 until N * 10 map { i => "message %d".format(i) }
assert(messages.size == 0)
sentMessages.zipWithIndex foreach { case (m, i) =>
Await.result(services(i % services.size).apply(Set(Buf.Utf8("the_queue"), Time.now, Buf.Utf8(m))))
}
eventually {
assert(messages == sentMessages.toSet)
}
rest.reverse.zipWithIndex.foreach { case (host, hostIndex) =>
messages.clear()
cluster.del(host)
// write to all 3
sentMessages.zipWithIndex foreach { case (m, i) =>
Await.result(services(i % services.size).apply(Set(Buf.Utf8("the_queue"), Time.now, Buf.Utf8(m))))
}
// expect fewer to be read on each pass
val expectFirstN = N - hostIndex - 1
eventually {
assert(messages == sentMessages.grouped(N).map { _.take(expectFirstN) }.flatten.toSet)
}
}
}
}
test("dynamic SocketAddress cluster should wait " +
"for cluster to become ready before snapping initial hosts") {
new DynamicClusterHelper {
val cluster = new DynamicCluster[SocketAddress](Seq())
val handle = MultiReader(cluster, "the_queue").clientBuilder(mockClientBuilder).build()
val messages = configureMessageReader(handle)
val errors = (handle.error ?)
val sentMessages = 0 until N * 10 map { i => "message %d".format(i) }
assert(messages.size == 0)
sentMessages.zipWithIndex foreach { case (m, i) =>
Await.result(services(i % services.size).apply(Set(Buf.Utf8("the_queue"), Time.now, Buf.Utf8(m))))
}
assert(messages.size == 0) // cluster not ready
assert(errors.isDefined == false)
hosts.foreach { host => cluster.add(host) }
eventually {
assert(messages == sentMessages.toSet)
}
}
}
test("dynamic SocketAddress cluster should report an error if all hosts are removed") {
new DynamicClusterHelper {
val cluster = new DynamicCluster[SocketAddress](hosts)
val handle = MultiReader(cluster, "the_queue").clientBuilder(mockClientBuilder).build()
val e = (handle.error ?)
hosts.foreach { host => cluster.del(host) }
assert(e.isDefined == true)
assert(Await.result(e) == AllHandlesDiedException)
}
}
test("dynamic SocketAddress cluster should silently" +
" handle the removal of a host that was never added") {
new DynamicClusterHelper {
val cluster = new DynamicCluster[SocketAddress](hosts)
val handle = MultiReader(cluster, "the_queue").clientBuilder(mockClientBuilder).build()
val messages = configureMessageReader(handle)
val sentMessages = 0 until N * 10 map { i => "message %d".format(i) }
sentMessages.zipWithIndex foreach { case (m, i) =>
Await.result(services(i % services.size).apply(Set(Buf.Utf8("the_queue"), Time.now, Buf.Utf8(m))))
}
eventually {
assert(messages == sentMessages.toSet)
}
messages.clear()
cluster.del(InetSocketAddress.createUnresolved("10.0.0.100", 22133))
sentMessages.zipWithIndex foreach { case (m, i) =>
Await.result(services(i % services.size).apply(Set(Buf.Utf8("the_queue"), Time.now, Buf.Utf8(m))))
}
eventually {
assert(messages == sentMessages.toSet)
}
}
}
}
| liamstewart/finagle | finagle-kestrel/src/test/scala/com/twitter/finagle/kestrel/unit/MultiReaderTest.scala | Scala | apache-2.0 | 19,323 |
package spray.json
import scala.util.Try
import scalaz._
import scalaz.Scalaz._
import spray.json.Scalaz._
import spray.json.DefaultJsonProtocol._
trait Formats {
implicit def maybeFormat[T: JsonFormat]: JsonFormat[Maybe[T]] =
optionFormat[T].xmap(Maybe.fromOption, _.toOption)
implicit def lazyOptionFormat[T: JsonFormat]: JsonFormat[LazyOption[T]] =
optionFormat[T].xmap(LazyOption.fromOption, _.toOption)
implicit def disjunctionFormat[A: JsonFormat, B: JsonFormat]: JsonFormat[A \\/ B] =
eitherFormat[A, B].xmap(\\/.fromEither, _.toEither)
implicit def lazyEitherFormat[A: JsonFormat, B: JsonFormat]: JsonFormat[A LazyEither B] =
eitherFormat[A, B].xmap(_.fold(x => LazyEither.lazyLeft(x), x => LazyEither.lazyRight(x)), _.toEither)
implicit def theseFormat[A: JsonFormat, B: JsonFormat]: JsonFormat[A \\&/ B] = new JsonFormat[A \\&/ B] {
def write(obj: A \\&/ B): JsValue =
obj.fold(_.toJson, _.toJson, (a: A, b: B) => (a, b).toJson)
def read(json: JsValue): A \\&/ B =
Try(json.convertTo[(A, B)].both)
.orElse(Try(json.convertTo[A].wrapThis[B]))
.orElse(Try(json.convertTo[B].wrapThat[A]))
.getOrElse(deserializationError("invalid json"))
}
implicit def nelFormat[T: JsonFormat]: JsonFormat[NonEmptyList[T]] =
(listFormat[T]: JsonFormat[List[T]]).xmap(xs => NonEmptyList(xs.head, xs.tail: _*), _.list)
}
| msimav/spray-contrib-scalaz | src/main/scala/spray/json/Formats.scala | Scala | mit | 1,394 |
package com.itszuvalex.itszulib.testing
import com.itszuvalex.itszulib.core.TileContainer
import net.minecraft.block.material.Material
import net.minecraft.tileentity.TileEntity
import net.minecraft.world.World
/**
* Created by Christopher Harris (Itszuvalex) on 8/3/15.
*/
class BlockLocTrackerTest extends TileContainer(Material.iron) {
override def createNewTileEntity(p_149915_1_ : World, p_149915_2_ : Int): TileEntity = new TileLocTrackerTest
}
| BlockWorker/ItszuLib | src/main/scala/com/itszuvalex/itszulib/testing/BlockLocTrackerTest.scala | Scala | gpl-2.0 | 457 |
package plugins
import _root_.services.HubNodeSubscriptionService
import play.api.{ Logger, Play, Application }
import play.api.Play.current
import models.{ OrganizationConfiguration, HubNode, Role }
import scala.collection.immutable.ListMap
import scala.util.matching.Regex
import play.api.mvc.Handler
import org.bson.types.ObjectId
import util.{ OrganizationConfigurationResourceHolder, OrganizationConfigurationHandler }
import core.services.MemoryServices
import core._
import node.{ NodeDirectoryService, NodeRegistrationService, NodeSubscriptionService }
/**
*
* @author Manuel Bernhardt <bernhardt.manuel@gmail.com>
*/
class HubNodePlugin(app: Application) extends CultureHubPlugin(app) {
val pluginKey: String = "hubNode"
override def organizationMenuEntries(configuration: OrganizationConfiguration, lang: String, roles: Seq[String]): Seq[MainMenuEntry] = Seq(
MainMenuEntry(
key = "hubNode",
titleKey = "hubnode.HubNodes",
roles = Seq(Role.OWN),
items = Seq(
MenuElement("/admin/hubNode", "hubnode.ListHubNodes"),
MenuElement("/admin/hubNode/add", "hubnode.CreateHubNode")
)
)
)
private val hubNodeController = new controllers.organization.HubNodes()(HubModule)
override val routes: ListMap[(String, Regex), (List[String], Map[String, String]) => Handler] = ListMap(
("GET", """^/admin/hubNode""".r) -> {
(pathArgs: List[String], queryString: Map[String, String]) => hubNodeController.list
},
("GET", """^/admin/hubNode/add""".r) -> {
(pathArgs: List[String], queryString: Map[String, String]) => hubNodeController.hubNode(None)
},
("GET", """^/admin/hubNode/([A-Za-z0-9-_]+)/update""".r) -> {
(pathArgs: List[String], queryString: Map[String, String]) => hubNodeController.hubNode(Some(new ObjectId(pathArgs(0))))
},
("POST", """^/admin/hubNode/submit""".r) -> {
(pathArgs: List[String], queryString: Map[String, String]) => hubNodeController.submit
},
("POST", """^/admin/hubNode/([A-Za-z0-9-_]+)/addMember""".r) -> {
(pathArgs: List[String], queryString: Map[String, String]) => hubNodeController.addMember(new ObjectId(pathArgs(0)))
},
("DELETE", """^/admin/hubNode/([A-Za-z0-9-_]+)/removeMember""".r) -> {
(pathArgs: List[String], queryString: Map[String, String]) => hubNodeController.removeMember(new ObjectId(pathArgs(0)))
},
("DELETE", """^/admin/hubNode/([A-Za-z0-9-_]+)/remove""".r) -> {
(pathArgs: List[String], queryString: Map[String, String]) => hubNodeController.delete(new ObjectId(pathArgs(0)))
}
)
// note: we abuse the concept of resource holder here and use instead the notification for new organizations
val hubNodes = new OrganizationConfigurationResourceHolder[OrganizationConfiguration, HubNode]("hubNodes") {
lazy val nodeDirectoryServiceLocator = HubModule.inject[DomainServiceLocator[NodeDirectoryService]](name = None)
lazy val nodeRegistrationServiceLocator = HubModule.inject[DomainServiceLocator[NodeRegistrationService]](name = None)
protected def resourceConfiguration(configuration: OrganizationConfiguration): OrganizationConfiguration = configuration
protected def onAdd(resourceConfiguration: OrganizationConfiguration): Option[HubNode] = {
implicit val configuration = resourceConfiguration
if (HubNode.dao.findOne(configuration.node.nodeId).isEmpty) {
val registered = nodeDirectoryServiceLocator.byDomain.findOneById(configuration.node.nodeId)
if (registered.isEmpty) {
val hubNode = HubNode(
nodeId = configuration.node.nodeId,
name = configuration.node.name,
orgId = configuration.node.orgId
)
try {
info("Attempting to create and register node '%s' for hub".format(configuration.node.nodeId))
nodeRegistrationServiceLocator.byDomain.registerNode(hubNode, "system")
HubNode.dao.insert(hubNode)
info("Node '%s' registered successfully".format(configuration.node.nodeId))
Some(hubNode)
} catch {
case t: Throwable =>
error("Cannot register node for hub", t)
None
}
} else {
error("System is in inconsistent state: node '%s' for hub is registered, but no local HubNode can be found".format(configuration.node.nodeId))
None
}
} else {
HubNode.dao.findOne(configuration.node.nodeId)
}
}
protected def onRemove(removed: HubNode) {}
}
override def onStart() {
OrganizationConfigurationHandler.registerResourceHolder(hubNodes)
if (Play.isTest || Play.isDev) {
OrganizationConfigurationHandler.getAllCurrentConfigurations.foreach { implicit organizationConfiguration =>
val service = HubServices.nodeRegistrationServiceLocator.byDomain(organizationConfiguration)
if (service.isInstanceOf[MemoryServices]) {
HubNode.dao.findAll.foreach { node =>
service.registerNode(node, "system")
}
}
}
}
}
/**
* Service instances this plugin provides
*/
override def services: Seq[Any] = Seq(HubNodePlugin.hubNodeConnectionService)
}
object HubNodePlugin {
lazy val hubNodeConnectionService: NodeSubscriptionService = new HubNodeSubscriptionService()(HubModule)
} | delving/culture-hub | modules/hubNode/app/plugins/HubNodePlugin.scala | Scala | apache-2.0 | 5,376 |
package momijikawa.lacquer
import akka.actor.{ ActorRef, ActorRefFactory, Props }
import akka.io.Tcp.{ Aborted, Closed }
import momijikawa.lacquer.KanColleWebSocketServer.Push
import spray.can.websocket
import spray.can.websocket.FrameCommandFailed
import spray.can.websocket.frame.{ BinaryFrame, TextFrame }
import spray.http.HttpRequest
import spray.routing.HttpServiceActor
object KanColleWebSocketWorker {
def props(serverConnection: ActorRef) = Props(classOf[KanColleWebSocketWorker], serverConnection)
}
class KanColleWebSocketWorker(val serverConnection: ActorRef) extends HttpServiceActor with websocket.WebSocketServerWorker {
override def receive = handshaking orElse businessLogicNoUpgrade orElse closeLogic
def businessLogic: Receive = {
case x @ (_: BinaryFrame | _: TextFrame) ⇒
sender ! x
case Push(msg) ⇒
log.info("Worker is sending data...", msg)
send(TextFrame(msg))
case x: FrameCommandFailed ⇒
log.error("frame command failed", x)
case x: HttpRequest ⇒
case Closed | Aborted ⇒
context.stop(self)
case unknown ⇒ log.warning("Worker has received unknown message: " + unknown.toString)
}
def businessLogicNoUpgrade: Receive = {
implicit val refFactory: ActorRefFactory = context
runRoute {
getFromResourceDirectory("webapp")
}
}
}
| windymelt/lacquer | src/main/scala/momijikawa/lacquer/KanColleWebSocketWorker.scala | Scala | bsd-3-clause | 1,353 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.calcite
import org.apache.flink.table.types.logical.{DecimalType, DoubleType, LogicalType}
import org.apache.flink.table.typeutils.TypeCheckUtils
import org.apache.calcite.rel.`type`.{RelDataType, RelDataTypeFactory, RelDataTypeSystemImpl}
import org.apache.calcite.sql.`type`.SqlTypeName
/**
* Custom type system for Flink.
*/
class FlinkTypeSystem extends RelDataTypeSystemImpl {
// set the maximum precision of a NUMERIC or DECIMAL type to DecimalType.MAX_PRECISION.
override def getMaxNumericPrecision: Int = DecimalType.MAX_PRECISION
// the max scale can't be greater than precision
override def getMaxNumericScale: Int = DecimalType.MAX_PRECISION
override def getDefaultPrecision(typeName: SqlTypeName): Int = typeName match {
// Calcite will limit the length of the VARCHAR field to 65536
case SqlTypeName.VARCHAR | SqlTypeName.VARBINARY =>
Int.MaxValue
// we currently support only timestamps with milliseconds precision
case SqlTypeName.TIMESTAMP =>
3
case _ =>
super.getDefaultPrecision(typeName)
}
override def getMaxPrecision(typeName: SqlTypeName): Int = typeName match {
case SqlTypeName.VARCHAR | SqlTypeName.CHAR | SqlTypeName.VARBINARY | SqlTypeName.BINARY =>
Int.MaxValue
case _ =>
super.getMaxPrecision(typeName)
}
// when union a number of CHAR types of different lengths, we should cast to a VARCHAR
// this fixes the problem of CASE WHEN with different length string literals but get wrong
// result with additional space suffix
override def shouldConvertRaggedUnionTypesToVarying(): Boolean = true
override def deriveAvgAggType(
typeFactory: RelDataTypeFactory, argType: RelDataType): RelDataType = {
val argTypeInfo = FlinkTypeFactory.toLogicalType(argType)
val avgType = FlinkTypeSystem.deriveAvgAggType(argTypeInfo)
typeFactory.asInstanceOf[FlinkTypeFactory].createFieldTypeFromLogicalType(
avgType.copy(argType.isNullable))
}
override def deriveSumType(
typeFactory: RelDataTypeFactory, argType: RelDataType): RelDataType = {
val argTypeInfo = FlinkTypeFactory.toLogicalType(argType)
val sumType = FlinkTypeSystem.deriveSumType(argTypeInfo)
typeFactory.asInstanceOf[FlinkTypeFactory].createFieldTypeFromLogicalType(
sumType.copy(argType.isNullable))
}
}
object FlinkTypeSystem {
def deriveAvgAggType(argType: LogicalType): LogicalType = argType match {
case dt: DecimalType =>
val result = inferAggAvgType(dt.getScale)
new DecimalType(result.getPrecision, result.getScale)
case nt if TypeCheckUtils.isNumeric(nt) => new DoubleType()
case _ =>
throw new RuntimeException("Unsupported argType for AVG(): " + argType)
}
def deriveSumType(argType: LogicalType): LogicalType = argType match {
case dt: DecimalType =>
val result = inferAggSumType(dt.getScale())
new DecimalType(result.getPrecision(), result.getScale())
case nt if TypeCheckUtils.isNumeric(nt) =>
argType
case _ =>
throw new RuntimeException("Unsupported argType for SUM(): " + argType)
}
/**
* https://docs.microsoft.com/en-us/sql/t-sql/data-types/precision-scale-and-length-transact-sql.
*/
def inferDivisionType(
precision1: Int, scale1: Int, precision2: Int, scale2: Int): DecimalType = {
// note: magic numbers are used directly here, because it's not really a general algorithm.
var scale = Math.max(6, scale1 + precision2 + 1)
var precision = precision1 - scale1 + scale2 + scale
if (precision > 38) {
scale = Math.max(6, 38 - (precision - scale))
precision = 38
}
new DecimalType(precision, scale)
}
def inferIntDivType(precision1: Int, scale1: Int, scale2: Int): DecimalType = {
val p = Math.min(38, precision1 - scale1 + scale2)
new DecimalType(p, 0)
}
/**
* https://docs.microsoft.com/en-us/sql/t-sql/functions/sum-transact-sql.
*/
def inferAggSumType(scale: Int) = new DecimalType(38, scale)
/**
* https://docs.microsoft.com/en-us/sql/t-sql/functions/avg-transact-sql
* however, we count by LONG, therefore divide by Decimal(20,0),
* but the end result is actually the same, which is Decimal(38, max(6,s)).
*/
def inferAggAvgType(scale: Int): DecimalType = inferDivisionType(38, scale, 20, 0)
/**
* return type of Round( DECIMAL(p,s), r).
*/
def inferRoundType(precision: Int, scale: Int, r: Int): DecimalType = {
if (r >= scale) new DecimalType(precision, scale)
else if (r < 0) new DecimalType(Math.min(38, 1 + precision - scale), 0)
else { // 0 <= r < s
new DecimalType(1 + precision - scale + r, r)
}
// NOTE: rounding may increase the digits by 1, therefore we need +1 on precisions.
}
val DECIMAL_SYSTEM_DEFAULT = new DecimalType(DecimalType.MAX_PRECISION, 18)
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/calcite/FlinkTypeSystem.scala | Scala | apache-2.0 | 5,687 |
package katas.scala.sieve
import org.junit.Test
import org.scalatest.Matchers
import scala.collection.mutable.ListBuffer
class SOE5 extends Matchers {
@Test def `find prime numbers`() {
primes().take(10) should equal(Seq(2, 3, 5, 7, 11, 13, 17, 19, 23, 29))
iteratorOfPrimes().take(10).toList should equal(Seq(2, 3, 5, 7, 11, 13, 17, 19, 23, 29))
iteratorOfPrimes().toStream.take(10) should equal(Seq(2, 3, 5, 7, 11, 13, 17, 19, 23, 29))
}
private def primes(): Stream[Int] = {
def streamOfPrimes(n: Int): Stream[Int] = {
Stream(n) append streamOfPrimes(n + 1).filter{ _ % n != 0 }
}
streamOfPrimes(2)
}
private def iteratorOfPrimes(): Iterator[Int] = {
new Iterator[Int]() {
private var n = 2
private val primes = ListBuffer[Int]()
override def next() = {
while (primes.exists{n % _ == 0}) n = n + 1
primes.append(n)
n
}
override def hasNext = true
}
}
} | dkandalov/katas | scala/src/katas/scala/sieve/SOE5.scala | Scala | unlicense | 914 |
package effectful.examples.adapter.cats
import cats.{Applicative, Functor, Monoid}
import cats.data.WriterT
import effectful.Capture
import effectful.augments._
package object writer {
type LogWriterT[F[_],A] = WriterT[F,List[LogEntry],A]
object LogWriterT {
def construct[F[_],A](run: F[(List[LogEntry],A)]) : LogWriterT[F,A] =
WriterT(run)
def apply[F[_],A](a: A)(implicit
F:Applicative[F]
) : LogWriterT[F,A] =
WriterT.put(a)(Nil)
def apply[F[_],A](
entries: List[LogEntry],
a: A
)(implicit
F:Applicative[F]
) : LogWriterT[F,A] =
WriterT.put[F,List[LogEntry],A](a)(entries)
}
implicit def par_WriterT[F[_],L](implicit
P:Par[F],
F:Functor[F],
L:Monoid[L]
) = new WriterTPar[F,L]
implicit def delay_WriterT[F[_],L](implicit
D:Delay[F],
F:Functor[F],
L:Monoid[L]
) = new WriterTDelay[F,L]
implicit def exceptions_WriterT[F[_],L](implicit
X:Exceptions[F],
F:Functor[F],
L:Monoid[L]
) = new WriterTExceptions[F,L]
implicit def capture_LogWriterT[F[_],L](implicit
F:Capture[F],
L:Monoid[L]
) = new WriterTCapture[F,L]()
}
| S-Mach/effectful | src/test/scala/effectful/examples/adapter/cats/writer/package.scala | Scala | mit | 1,158 |
import org.scalajs.sbtplugin.ScalaJSPlugin.autoImport._
import sbt._
object Dependencies {
//libs for testing
lazy val testing: Def.Initialize[Seq[ModuleID]] = Def.setting(Seq(
"org.scalatest" %%% "scalatest" % Versions.scalaTest % Test
))
//akka-related libs
lazy val akka = Def.setting(Seq(
"org.denigma" %%% "akka-http-extensions" % Versions.akkaHttpExtensions))
lazy val templates = Def.setting(Seq(
"com.github.japgolly.scalacss" %%% "core" % Versions.scalaCSS,
"com.github.japgolly.scalacss" %%% "ext-scalatags" % Versions.scalaCSS))
lazy val facadeDependencies = Def.setting(Seq(
"org.scala-js" %%% "scalajs-dom" % Versions.dom,
"org.querki" %%% "querki-jsext" % Versions.jsext //useful sclalajs extensions
))
//scalajs libs
lazy val sjsLibs = Def.setting(Seq(
"org.scala-js" %%% "scalajs-dom" % Versions.dom,
"org.querki" %%% "jquery-facade" % Versions.jqueryFacade, //scalajs facade for jQuery + jQuery extensions
"org.querki" %%% "querki-jsext" % Versions.jsext, //useful sclalajs extensions
"org.denigma" %%% "codemirror-facade" % Versions.codemirrorFacade,
"org.denigma" %%% "binding" % Versions.binding))
//dependencies on javascript libs
lazy val webjars = Def.setting(Seq(
"org.webjars" % "jquery" % Versions.jquery,
"org.webjars" % "Semantic-UI" % Versions.semanticUI, //css theme, similar to bootstrap
"org.webjars" % "codemirror" % Versions.codemirror,
"org.webjars" % "three.js" % Versions.threeJs))
//common purpose libs
lazy val commonShared: Def.Initialize[Seq[ModuleID]] = Def.setting(Seq( //"com.softwaremill.quicklens" %%% "quicklens" % Versions.quicklens//, //nice lenses for case classes
))
}
| antonkulaga/threejs-facade | project/Dependencies.scala | Scala | mpl-2.0 | 1,730 |
package mesosphere.marathon
package core.storage.backup
import java.net.URI
import akka.Done
import akka.actor.ActorSystem
import akka.stream.Materializer
import com.typesafe.scalalogging.StrictLogging
import mesosphere.marathon.core.storage.backup.impl.PersistentStoreBackupImpl
import mesosphere.marathon.core.storage.store.PersistenceStore
import scala.concurrent.{ExecutionContext, Future}
/**
* Backup & Restore functionality for configured persistent store and backup location.
*/
trait PersistentStoreBackup {
/**
* Backup the state of the configured persistent store.
* @param to the location to write this backup to.
* @return A future which succeeds, if the backup is written completely.
*/
def backup(to: URI): Future[Done]
/**
* Restore the state from a given backup.
* @param from the location to read this backup from.
* @return a future which succeeds, if the backup is restored completely.
*/
def restore(from: URI): Future[Done]
}
object PersistentStoreBackup extends StrictLogging {
def apply(
store: PersistenceStore[_, _, _]
)(implicit materializer: Materializer, actorSystem: ActorSystem, ec: ExecutionContext): PersistentStoreBackup = {
new PersistentStoreBackupImpl(store)
}
}
| mesosphere/marathon | src/main/scala/mesosphere/marathon/core/storage/backup/PersistentStoreBackup.scala | Scala | apache-2.0 | 1,268 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.io.{InterruptedIOException, IOException, UncheckedIOException}
import java.nio.channels.ClosedByInterruptException
import java.util.UUID
import java.util.concurrent.{CountDownLatch, ExecutionException, TimeoutException, TimeUnit}
import java.util.concurrent.atomic.AtomicReference
import java.util.concurrent.locks.ReentrantLock
import scala.collection.JavaConverters._
import scala.collection.mutable.{Map => MutableMap}
import scala.util.control.NonFatal
import com.google.common.util.concurrent.UncheckedExecutionException
import org.apache.hadoop.fs.Path
import org.apache.spark.{SparkContext, SparkException}
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes._
import org.apache.spark.sql.connector.catalog.{SupportsWrite, Table}
import org.apache.spark.sql.connector.read.streaming.{Offset => OffsetV2, ReadLimit, SparkDataStream}
import org.apache.spark.sql.connector.write.{LogicalWriteInfoImpl, SupportsTruncate, Write}
import org.apache.spark.sql.execution.command.StreamingExplainCommand
import org.apache.spark.sql.execution.datasources.v2.StreamWriterCommitProgress
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.connector.SupportsStreamingUpdateAsAppend
import org.apache.spark.sql.streaming._
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.{Clock, UninterruptibleThread, Utils}
/** States for [[StreamExecution]]'s lifecycle. */
trait State
case object INITIALIZING extends State
case object ACTIVE extends State
case object TERMINATED extends State
case object RECONFIGURING extends State
/**
* Manages the execution of a streaming Spark SQL query that is occurring in a separate thread.
* Unlike a standard query, a streaming query executes repeatedly each time new data arrives at any
* [[Source]] present in the query plan. Whenever new data arrives, a [[QueryExecution]] is created
* and the results are committed transactionally to the given [[Sink]].
*
* @param deleteCheckpointOnStop whether to delete the checkpoint if the query is stopped without
* errors. Checkpoint deletion can be forced with the appropriate
* Spark configuration.
*/
abstract class StreamExecution(
override val sparkSession: SparkSession,
override val name: String,
val resolvedCheckpointRoot: String,
val analyzedPlan: LogicalPlan,
val sink: Table,
val trigger: Trigger,
val triggerClock: Clock,
val outputMode: OutputMode,
deleteCheckpointOnStop: Boolean)
extends StreamingQuery with ProgressReporter with Logging {
import org.apache.spark.sql.streaming.StreamingQueryListener._
protected val pollingDelayMs: Long = sparkSession.sessionState.conf.streamingPollingDelay
protected val minLogEntriesToMaintain: Int = sparkSession.sessionState.conf.minBatchesToRetain
require(minLogEntriesToMaintain > 0, "minBatchesToRetain has to be positive")
/**
* A lock used to wait/notify when batches complete. Use a fair lock to avoid thread starvation.
*/
protected val awaitProgressLock = new ReentrantLock(true)
protected val awaitProgressLockCondition = awaitProgressLock.newCondition()
private val initializationLatch = new CountDownLatch(1)
private val startLatch = new CountDownLatch(1)
private val terminationLatch = new CountDownLatch(1)
def logicalPlan: LogicalPlan
/**
* Tracks how much data we have processed and committed to the sink or state store from each
* input source.
* Only the scheduler thread should modify this field, and only in atomic steps.
* Other threads should make a shallow copy if they are going to access this field more than
* once, since the field's value may change at any time.
*/
@volatile
var committedOffsets = new StreamProgress
/**
* Tracks the offsets that are available to be processed, but have not yet be committed to the
* sink.
* Only the scheduler thread should modify this field, and only in atomic steps.
* Other threads should make a shallow copy if they are going to access this field more than
* once, since the field's value may change at any time.
*/
@volatile
var availableOffsets = new StreamProgress
/**
* Tracks the latest offsets for each input source.
* Only the scheduler thread should modify this field, and only in atomic steps.
* Other threads should make a shallow copy if they are going to access this field more than
* once, since the field's value may change at any time.
*/
@volatile
var latestOffsets = new StreamProgress
@volatile
var sinkCommitProgress: Option[StreamWriterCommitProgress] = None
/** The current batchId or -1 if execution has not yet been initialized. */
protected var currentBatchId: Long = -1
/** Metadata associated with the whole query */
protected val streamMetadata: StreamMetadata = {
val metadataPath = new Path(checkpointFile("metadata"))
val hadoopConf = sparkSession.sessionState.newHadoopConf()
StreamMetadata.read(metadataPath, hadoopConf).getOrElse {
val newMetadata = new StreamMetadata(UUID.randomUUID.toString)
StreamMetadata.write(newMetadata, metadataPath, hadoopConf)
newMetadata
}
}
/** Metadata associated with the offset seq of a batch in the query. */
protected var offsetSeqMetadata = OffsetSeqMetadata(
batchWatermarkMs = 0, batchTimestampMs = 0, sparkSession.conf)
/**
* A map of current watermarks, keyed by the position of the watermark operator in the
* physical plan.
*
* This state is 'soft state', which does not affect the correctness and semantics of watermarks
* and is not persisted across query restarts.
* The fault-tolerant watermark state is in offsetSeqMetadata.
*/
protected val watermarkMsMap: MutableMap[Int, Long] = MutableMap()
override val id: UUID = UUID.fromString(streamMetadata.id)
override val runId: UUID = UUID.randomUUID
/**
* Pretty identified string of printing in logs. Format is
* If name is set "queryName [id = xyz, runId = abc]" else "[id = xyz, runId = abc]"
*/
protected val prettyIdString =
Option(name).map(_ + " ").getOrElse("") + s"[id = $id, runId = $runId]"
/**
* A list of unique sources in the query plan. This will be set when generating logical plan.
*/
@volatile protected var uniqueSources: Map[SparkDataStream, ReadLimit] = Map.empty
/** Defines the internal state of execution */
protected val state = new AtomicReference[State](INITIALIZING)
@volatile
var lastExecution: IncrementalExecution = _
/** Holds the most recent input data for each source. */
protected var newData: Map[SparkDataStream, LogicalPlan] = _
@volatile
protected var streamDeathCause: StreamingQueryException = null
/* Get the call site in the caller thread; will pass this into the micro batch thread */
private val callSite = Utils.getCallSite()
/** Used to report metrics to coda-hale. This uses id for easier tracking across restarts. */
lazy val streamMetrics = new MetricsReporter(
this, s"spark.streaming.${Option(name).getOrElse(id)}")
/** Isolated spark session to run the batches with. */
private val sparkSessionForStream = sparkSession.cloneSession()
/**
* The thread that runs the micro-batches of this stream. Note that this thread must be
* [[org.apache.spark.util.UninterruptibleThread]] to workaround KAFKA-1894: interrupting a
* running `KafkaConsumer` may cause endless loop.
*/
val queryExecutionThread: QueryExecutionThread =
new QueryExecutionThread(s"stream execution thread for $prettyIdString") {
override def run(): Unit = {
// To fix call site like "run at <unknown>:0", we bridge the call site from the caller
// thread to this micro batch thread
sparkSession.sparkContext.setCallSite(callSite)
runStream()
}
}
/**
* A write-ahead-log that records the offsets that are present in each batch. In order to ensure
* that a given batch will always consist of the same data, we write to this log *before* any
* processing is done. Thus, the Nth record in this log indicated data that is currently being
* processed and the N-1th entry indicates which offsets have been durably committed to the sink.
*/
val offsetLog = new OffsetSeqLog(sparkSession, checkpointFile("offsets"))
/**
* A log that records the batch ids that have completed. This is used to check if a batch was
* fully processed, and its output was committed to the sink, hence no need to process it again.
* This is used (for instance) during restart, to help identify which batch to run next.
*/
val commitLog = new CommitLog(sparkSession, checkpointFile("commits"))
/** Whether all fields of the query have been initialized */
private def isInitialized: Boolean = state.get != INITIALIZING
/** Whether the query is currently active or not */
override def isActive: Boolean = state.get != TERMINATED
/** Returns the [[StreamingQueryException]] if the query was terminated by an exception. */
override def exception: Option[StreamingQueryException] = Option(streamDeathCause)
/** Returns the path of a file with `name` in the checkpoint directory. */
protected def checkpointFile(name: String): String =
new Path(new Path(resolvedCheckpointRoot), name).toString
/** All checkpoint file operations should be performed through `CheckpointFileManager`. */
private val fileManager = CheckpointFileManager.create(new Path(resolvedCheckpointRoot),
sparkSession.sessionState.newHadoopConf)
/**
* Starts the execution. This returns only after the thread has started and [[QueryStartedEvent]]
* has been posted to all the listeners.
*/
def start(): Unit = {
logInfo(s"Starting $prettyIdString. Use $resolvedCheckpointRoot to store the query checkpoint.")
queryExecutionThread.setDaemon(true)
queryExecutionThread.start()
startLatch.await() // Wait until thread started and QueryStart event has been posted
}
/**
* Run the activated stream until stopped.
*/
protected def runActivatedStream(sparkSessionForStream: SparkSession): Unit
/**
* Activate the stream and then wrap a callout to runActivatedStream, handling start and stop.
*
* Note that this method ensures that [[QueryStartedEvent]] and [[QueryTerminatedEvent]] are
* posted such that listeners are guaranteed to get a start event before a termination.
* Furthermore, this method also ensures that [[QueryStartedEvent]] event is posted before the
* `start()` method returns.
*/
private def runStream(): Unit = {
try {
sparkSession.sparkContext.setJobGroup(runId.toString, getBatchDescriptionString,
interruptOnCancel = true)
sparkSession.sparkContext.setLocalProperty(StreamExecution.QUERY_ID_KEY, id.toString)
if (sparkSession.sessionState.conf.streamingMetricsEnabled) {
sparkSession.sparkContext.env.metricsSystem.registerSource(streamMetrics)
}
// `postEvent` does not throw non fatal exception.
val startTimestamp = triggerClock.getTimeMillis()
postEvent(new QueryStartedEvent(id, runId, name, formatTimestamp(startTimestamp)))
// Unblock starting thread
startLatch.countDown()
// While active, repeatedly attempt to run batches.
sparkSessionForStream.withActive {
// Adaptive execution can change num shuffle partitions, disallow
sparkSessionForStream.conf.set(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key, "false")
// Disable cost-based join optimization as we do not want stateful operations
// to be rearranged
sparkSessionForStream.conf.set(SQLConf.CBO_ENABLED.key, "false")
updateStatusMessage("Initializing sources")
// force initialization of the logical plan so that the sources can be created
logicalPlan
offsetSeqMetadata = OffsetSeqMetadata(
batchWatermarkMs = 0, batchTimestampMs = 0, sparkSessionForStream.conf)
if (state.compareAndSet(INITIALIZING, ACTIVE)) {
// Unblock `awaitInitialization`
initializationLatch.countDown()
runActivatedStream(sparkSessionForStream)
updateStatusMessage("Stopped")
} else {
// `stop()` is already called. Let `finally` finish the cleanup.
}
}
} catch {
case e if isInterruptedByStop(e, sparkSession.sparkContext) =>
// interrupted by stop()
updateStatusMessage("Stopped")
case e: IOException if e.getMessage != null
&& e.getMessage.startsWith(classOf[InterruptedException].getName)
&& state.get == TERMINATED =>
// This is a workaround for HADOOP-12074: `Shell.runCommand` converts `InterruptedException`
// to `new IOException(ie.toString())` before Hadoop 2.8.
updateStatusMessage("Stopped")
case e: Throwable =>
streamDeathCause = new StreamingQueryException(
toDebugString(includeLogicalPlan = isInitialized),
s"Query $prettyIdString terminated with exception: ${e.getMessage}",
e,
committedOffsets.toOffsetSeq(sources, offsetSeqMetadata).toString,
availableOffsets.toOffsetSeq(sources, offsetSeqMetadata).toString)
logError(s"Query $prettyIdString terminated with error", e)
updateStatusMessage(s"Terminated with exception: ${e.getMessage}")
// Rethrow the fatal errors to allow the user using `Thread.UncaughtExceptionHandler` to
// handle them
if (!NonFatal(e)) {
throw e
}
} finally queryExecutionThread.runUninterruptibly {
// The whole `finally` block must run inside `runUninterruptibly` to avoid being interrupted
// when a query is stopped by the user. We need to make sure the following codes finish
// otherwise it may throw `InterruptedException` to `UncaughtExceptionHandler` (SPARK-21248).
// Release latches to unblock the user codes since exception can happen in any place and we
// may not get a chance to release them
startLatch.countDown()
initializationLatch.countDown()
try {
stopSources()
state.set(TERMINATED)
currentStatus = status.copy(isTriggerActive = false, isDataAvailable = false)
// Update metrics and status
sparkSession.sparkContext.env.metricsSystem.removeSource(streamMetrics)
// Notify others
sparkSession.streams.notifyQueryTermination(StreamExecution.this)
postEvent(
new QueryTerminatedEvent(id, runId, exception.map(_.cause).map(Utils.exceptionString)))
// Delete the temp checkpoint when either force delete enabled or the query didn't fail
if (deleteCheckpointOnStop &&
(sparkSession.sessionState.conf
.getConf(SQLConf.FORCE_DELETE_TEMP_CHECKPOINT_LOCATION) || exception.isEmpty)) {
val checkpointPath = new Path(resolvedCheckpointRoot)
try {
logInfo(s"Deleting checkpoint $checkpointPath.")
fileManager.delete(checkpointPath)
} catch {
case NonFatal(e) =>
// Deleting temp checkpoint folder is best effort, don't throw non fatal exceptions
// when we cannot delete them.
logWarning(s"Cannot delete $checkpointPath", e)
}
}
} finally {
awaitProgressLock.lock()
try {
// Wake up any threads that are waiting for the stream to progress.
awaitProgressLockCondition.signalAll()
} finally {
awaitProgressLock.unlock()
}
terminationLatch.countDown()
}
}
}
private def isInterruptedByStop(e: Throwable, sc: SparkContext): Boolean = {
if (state.get == TERMINATED) {
StreamExecution.isInterruptionException(e, sc)
} else {
false
}
}
override protected def postEvent(event: StreamingQueryListener.Event): Unit = {
sparkSession.streams.postListenerEvent(event)
}
/** Stops all streaming sources safely. */
protected def stopSources(): Unit = {
uniqueSources.foreach { case (source, _) =>
try {
source.stop()
} catch {
case NonFatal(e) =>
logWarning(s"Failed to stop streaming source: $source. Resources may have leaked.", e)
}
}
}
/**
* Interrupts the query execution thread and awaits its termination until until it exceeds the
* timeout. The timeout can be set on "spark.sql.streaming.stopTimeout".
*
* @throws TimeoutException If the thread cannot be stopped within the timeout
*/
@throws[TimeoutException]
protected def interruptAndAwaitExecutionThreadTermination(): Unit = {
val timeout = math.max(
sparkSession.sessionState.conf.getConf(SQLConf.STREAMING_STOP_TIMEOUT), 0)
queryExecutionThread.interrupt()
queryExecutionThread.join(timeout)
if (queryExecutionThread.isAlive) {
val stackTraceException = new SparkException("The stream thread was last executing:")
stackTraceException.setStackTrace(queryExecutionThread.getStackTrace)
val timeoutException = new TimeoutException(
s"Stream Execution thread for stream $prettyIdString failed to stop within $timeout " +
s"milliseconds (specified by ${SQLConf.STREAMING_STOP_TIMEOUT.key}). See the cause on " +
s"what was being executed in the streaming query thread.")
timeoutException.initCause(stackTraceException)
throw timeoutException
}
}
/**
* Blocks the current thread until processing for data from the given `source` has reached at
* least the given `Offset`. This method is intended for use primarily when writing tests.
*/
private[sql] def awaitOffset(sourceIndex: Int, newOffset: OffsetV2, timeoutMs: Long): Unit = {
assertAwaitThread()
def notDone = {
val localCommittedOffsets = committedOffsets
if (sources == null) {
// sources might not be initialized yet
false
} else {
val source = sources(sourceIndex)
!localCommittedOffsets.contains(source) || localCommittedOffsets(source) != newOffset
}
}
while (notDone) {
awaitProgressLock.lock()
try {
awaitProgressLockCondition.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
} finally {
awaitProgressLock.unlock()
}
}
logDebug(s"Unblocked at $newOffset for ${sources(sourceIndex)}")
}
/** A flag to indicate that a batch has completed with no new data available. */
@volatile protected var noNewData = false
/**
* Assert that the await APIs should not be called in the stream thread. Otherwise, it may cause
* dead-lock, e.g., calling any await APIs in `StreamingQueryListener.onQueryStarted` will block
* the stream thread forever.
*/
private def assertAwaitThread(): Unit = {
if (queryExecutionThread eq Thread.currentThread) {
throw new IllegalStateException(
"Cannot wait for a query state from the same thread that is running the query")
}
}
/**
* Await until all fields of the query have been initialized.
*/
def awaitInitialization(timeoutMs: Long): Unit = {
assertAwaitThread()
require(timeoutMs > 0, "Timeout has to be positive")
if (streamDeathCause != null) {
throw streamDeathCause
}
initializationLatch.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
}
override def processAllAvailable(): Unit = {
assertAwaitThread()
if (streamDeathCause != null) {
throw streamDeathCause
}
if (!isActive) return
awaitProgressLock.lock()
try {
noNewData = false
while (true) {
awaitProgressLockCondition.await(10000, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
if (noNewData || !isActive) {
return
}
}
} finally {
awaitProgressLock.unlock()
}
}
override def awaitTermination(): Unit = {
assertAwaitThread()
terminationLatch.await()
if (streamDeathCause != null) {
throw streamDeathCause
}
}
override def awaitTermination(timeoutMs: Long): Boolean = {
assertAwaitThread()
require(timeoutMs > 0, "Timeout has to be positive")
terminationLatch.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
} else {
!isActive
}
}
/** Expose for tests */
def explainInternal(extended: Boolean): String = {
if (lastExecution == null) {
"No physical plan. Waiting for data."
} else {
val explain = StreamingExplainCommand(lastExecution, extended = extended)
sparkSession.sessionState.executePlan(explain).executedPlan.executeCollect()
.map(_.getString(0)).mkString("\\n")
}
}
override def explain(extended: Boolean): Unit = {
// scalastyle:off println
println(explainInternal(extended))
// scalastyle:on println
}
override def explain(): Unit = explain(extended = false)
override def toString: String = {
s"Streaming Query $prettyIdString [state = $state]"
}
private def toDebugString(includeLogicalPlan: Boolean): String = {
val debugString =
s"""|=== Streaming Query ===
|Identifier: $prettyIdString
|Current Committed Offsets: $committedOffsets
|Current Available Offsets: $availableOffsets
|
|Current State: $state
|Thread State: ${queryExecutionThread.getState}""".stripMargin
if (includeLogicalPlan) {
debugString + s"\\n\\nLogical Plan:\\n$logicalPlan"
} else {
debugString
}
}
protected def getBatchDescriptionString: String = {
val batchDescription = if (currentBatchId < 0) "init" else currentBatchId.toString
s"""|${Option(name).getOrElse("")}
|id = $id
|runId = $runId
|batch = $batchDescription""".stripMargin
}
protected def createWrite(
table: SupportsWrite,
options: Map[String, String],
inputPlan: LogicalPlan): Write = {
val info = LogicalWriteInfoImpl(
queryId = id.toString,
inputPlan.schema,
new CaseInsensitiveStringMap(options.asJava))
val writeBuilder = table.newWriteBuilder(info)
outputMode match {
case Append =>
writeBuilder.build()
case Complete =>
// TODO: we should do this check earlier when we have capability API.
require(writeBuilder.isInstanceOf[SupportsTruncate],
table.name + " does not support Complete mode.")
writeBuilder.asInstanceOf[SupportsTruncate].truncate().build()
case Update =>
require(writeBuilder.isInstanceOf[SupportsStreamingUpdateAsAppend],
table.name + " does not support Update mode.")
writeBuilder.asInstanceOf[SupportsStreamingUpdateAsAppend].build()
}
}
protected def purge(threshold: Long): Unit = {
logDebug(s"Purging metadata at threshold=$threshold")
offsetLog.purge(threshold)
commitLog.purge(threshold)
}
}
object StreamExecution {
val QUERY_ID_KEY = "sql.streaming.queryId"
val IS_CONTINUOUS_PROCESSING = "__is_continuous_processing"
@scala.annotation.tailrec
def isInterruptionException(e: Throwable, sc: SparkContext): Boolean = e match {
// InterruptedIOException - thrown when an I/O operation is interrupted
// ClosedByInterruptException - thrown when an I/O operation upon a channel is interrupted
case _: InterruptedException | _: InterruptedIOException | _: ClosedByInterruptException =>
true
// The cause of the following exceptions may be one of the above exceptions:
//
// UncheckedIOException - thrown by codes that cannot throw a checked IOException, such as
// BiFunction.apply
// ExecutionException - thrown by codes running in a thread pool and these codes throw an
// exception
// UncheckedExecutionException - thrown by codes that cannot throw a checked
// ExecutionException, such as BiFunction.apply
case e2 @ (_: UncheckedIOException | _: ExecutionException | _: UncheckedExecutionException)
if e2.getCause != null =>
isInterruptionException(e2.getCause, sc)
case se: SparkException =>
val jobGroup = sc.getLocalProperty("spark.jobGroup.id")
if (jobGroup == null) return false
val errorMsg = se.getMessage
if (errorMsg.contains("cancelled") && errorMsg.contains(jobGroup) && se.getCause == null) {
true
} else if (se.getCause != null) {
isInterruptionException(se.getCause, sc)
} else {
false
}
case _ =>
false
}
/** Whether the path contains special chars that will be escaped when converting to a `URI`. */
def containsSpecialCharsInPath(path: Path): Boolean = {
path.toUri.getPath != new Path(path.toUri.toString).toUri.getPath
}
}
/**
* A special thread to run the stream query. Some codes require to run in the QueryExecutionThread
* and will use `classOf[QueryExecutionThread]` to check.
*/
abstract class QueryExecutionThread(name: String) extends UninterruptibleThread(name)
| shaneknapp/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala | Scala | apache-2.0 | 26,380 |
package org.bitcoins.core.script.flag
import org.bitcoins.testkitcore.util.BitcoinSUnitTest
/** Created by chris on 4/6/16.
*/
class ScriptFlagUtilTest extends BitcoinSUnitTest {
"ScriptFlagUtil" must "check if strict der encoding check is required" in {
ScriptFlagUtil.requiresStrictDerEncoding(Seq(ScriptVerifyDerSig)) must be(
true)
ScriptFlagUtil.requiresStrictDerEncoding(
Seq(ScriptVerifyStrictEnc)) must be(true)
}
it must "return false if strict der encoding check is not required" in {
ScriptFlagUtil.requiresStrictDerEncoding(Seq()) must be(false)
ScriptFlagUtil.requiresStrictDerEncoding(
Seq(
ScriptVerifyCheckLocktimeVerify,
ScriptVerifyCheckSequenceVerify,
ScriptVerifyCleanStack,
ScriptVerifyDiscourageUpgradableNOPs,
ScriptVerifyLowS,
ScriptVerifyMinimalData,
ScriptVerifyNone,
ScriptVerifyNullDummy,
ScriptVerifyP2SH,
ScriptVerifySigPushOnly
)) must be(false)
}
}
| bitcoin-s/bitcoin-s | core-test/src/test/scala/org/bitcoins/core/script/flag/ScriptFlagUtilTest.scala | Scala | mit | 1,016 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.io.NotSerializableException
import org.apache.spark.{SparkContext, SparkException, SparkFunSuite, TaskContext}
import org.apache.spark.LocalSparkContext._
import org.apache.spark.partial.CountEvaluator
import org.apache.spark.rdd.RDD
class ClosureCleanerSuite extends SparkFunSuite {
test("closures inside an object") {
assert(TestObject.run() === 30) // 6 + 7 + 8 + 9
}
test("closures inside a class") {
val obj = new TestClass
assert(obj.run() === 30) // 6 + 7 + 8 + 9
}
test("closures inside a class with no default constructor") {
val obj = new TestClassWithoutDefaultConstructor(5)
assert(obj.run() === 30) // 6 + 7 + 8 + 9
}
test("closures that don't use fields of the outer class") {
val obj = new TestClassWithoutFieldAccess
assert(obj.run() === 30) // 6 + 7 + 8 + 9
}
test("nested closures inside an object") {
assert(TestObjectWithNesting.run() === 96) // 4 * (1+2+3+4) + 4 * (1+2+3+4) + 16 * 1
}
test("nested closures inside a class") {
val obj = new TestClassWithNesting(1)
assert(obj.run() === 96) // 4 * (1+2+3+4) + 4 * (1+2+3+4) + 16 * 1
}
test("toplevel return statements in closures are identified at cleaning time") {
intercept[ReturnStatementInClosureException] {
TestObjectWithBogusReturns.run()
}
}
test("return statements from named functions nested in closures don't raise exceptions") {
val result = TestObjectWithNestedReturns.run()
assert(result === 1)
}
test("user provided closures are actually cleaned") {
// We use return statements as an indication that a closure is actually being cleaned
// We expect closure cleaner to find the return statements in the user provided closures
def expectCorrectException(body: => Unit): Unit = {
try {
body
} catch {
case rse: ReturnStatementInClosureException => // Success!
case e @ (_: NotSerializableException | _: SparkException) =>
fail(s"Expected ReturnStatementInClosureException, but got $e.\n" +
"This means the closure provided by user is not actually cleaned.")
}
}
withSpark(new SparkContext("local", "test")) { sc =>
val rdd = sc.parallelize(1 to 10)
val pairRdd = rdd.map { i => (i, i) }
expectCorrectException { TestUserClosuresActuallyCleaned.testMap(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testFlatMap(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testFilter(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testSortBy(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testGroupBy(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testKeyBy(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testMapPartitions(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testMapPartitionsWithIndex(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testZipPartitions2(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testZipPartitions3(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testZipPartitions4(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testForeach(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testForeachPartition(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testReduce(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testTreeReduce(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testFold(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testAggregate(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testTreeAggregate(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testCombineByKey(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testAggregateByKey(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testFoldByKey(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testReduceByKey(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testReduceByKeyLocally(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testMapValues(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testFlatMapValues(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testForeachAsync(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testForeachPartitionAsync(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testRunJob1(sc) }
expectCorrectException { TestUserClosuresActuallyCleaned.testRunJob2(sc) }
expectCorrectException { TestUserClosuresActuallyCleaned.testRunApproximateJob(sc) }
expectCorrectException { TestUserClosuresActuallyCleaned.testSubmitJob(sc) }
}
}
test("createNullValue") {
new TestCreateNullValue().run()
}
}
// A non-serializable class we create in closures to make sure that we aren't
// keeping references to unneeded variables from our outer closures.
class NonSerializable(val id: Int = -1) {
override def hashCode(): Int = id
override def equals(other: Any): Boolean = {
other match {
case o: NonSerializable => id == o.id
case _ => false
}
}
}
object TestObject {
def run(): Int = {
var nonSer = new NonSerializable
val x = 5
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
nums.map(_ + x).reduce(_ + _)
}
}
}
class TestClass extends Serializable {
var x = 5
def getX: Int = x
def run(): Int = {
var nonSer = new NonSerializable
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
nums.map(_ + getX).reduce(_ + _)
}
}
}
class TestClassWithoutDefaultConstructor(x: Int) extends Serializable {
def getX: Int = x
def run(): Int = {
var nonSer = new NonSerializable
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
nums.map(_ + getX).reduce(_ + _)
}
}
}
// This class is not serializable, but we aren't using any of its fields in our
// closures, so they won't have a $outer pointing to it and should still work.
class TestClassWithoutFieldAccess {
var nonSer = new NonSerializable
def run(): Int = {
var nonSer2 = new NonSerializable
var x = 5
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
nums.map(_ + x).reduce(_ + _)
}
}
}
object TestObjectWithBogusReturns {
def run(): Int = {
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
// this return is invalid since it will transfer control outside the closure
nums.map {x => return 1 ; x * 2}
1
}
}
}
object TestObjectWithNestedReturns {
def run(): Int = {
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
nums.map {x =>
// this return is fine since it will not transfer control outside the closure
def foo(): Int = { return 5; 1 }
foo()
}
1
}
}
}
object TestObjectWithNesting {
def run(): Int = {
var nonSer = new NonSerializable
var answer = 0
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
var y = 1
for (i <- 1 to 4) {
var nonSer2 = new NonSerializable
var x = i
answer += nums.map(_ + x + y).reduce(_ + _)
}
answer
}
}
}
class TestClassWithNesting(val y: Int) extends Serializable {
def getY: Int = y
def run(): Int = {
var nonSer = new NonSerializable
var answer = 0
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
for (i <- 1 to 4) {
var nonSer2 = new NonSerializable
var x = i
answer += nums.map(_ + x + getY).reduce(_ + _)
}
answer
}
}
}
/**
* Test whether closures passed in through public APIs are actually cleaned.
*
* We put a return statement in each of these closures as a mechanism to detect whether the
* ClosureCleaner actually cleaned our closure. If it did, then it would throw an appropriate
* exception explicitly complaining about the return statement. Otherwise, we know the
* ClosureCleaner did not actually clean our closure, in which case we should fail the test.
*/
private object TestUserClosuresActuallyCleaned {
def testMap(rdd: RDD[Int]): Unit = { rdd.map { _ => return; 0 }.count() }
def testFlatMap(rdd: RDD[Int]): Unit = { rdd.flatMap { _ => return; Seq() }.count() }
def testFilter(rdd: RDD[Int]): Unit = { rdd.filter { _ => return; true }.count() }
def testSortBy(rdd: RDD[Int]): Unit = { rdd.sortBy { _ => return; 1 }.count() }
def testKeyBy(rdd: RDD[Int]): Unit = { rdd.keyBy { _ => return; 1 }.count() }
def testGroupBy(rdd: RDD[Int]): Unit = { rdd.groupBy { _ => return; 1 }.count() }
def testMapPartitions(rdd: RDD[Int]): Unit = { rdd.mapPartitions { it => return; it }.count() }
def testMapPartitionsWithIndex(rdd: RDD[Int]): Unit = {
rdd.mapPartitionsWithIndex { (_, it) => return; it }.count()
}
def testZipPartitions2(rdd: RDD[Int]): Unit = {
rdd.zipPartitions(rdd) { case (it1, it2) => return; it1 }.count()
}
def testZipPartitions3(rdd: RDD[Int]): Unit = {
rdd.zipPartitions(rdd, rdd) { case (it1, it2, it3) => return; it1 }.count()
}
def testZipPartitions4(rdd: RDD[Int]): Unit = {
rdd.zipPartitions(rdd, rdd, rdd) { case (it1, it2, it3, it4) => return; it1 }.count()
}
def testForeach(rdd: RDD[Int]): Unit = { rdd.foreach { _ => return } }
def testForeachPartition(rdd: RDD[Int]): Unit = { rdd.foreachPartition { _ => return } }
def testReduce(rdd: RDD[Int]): Unit = { rdd.reduce { case (_, _) => return; 1 } }
def testTreeReduce(rdd: RDD[Int]): Unit = { rdd.treeReduce { case (_, _) => return; 1 } }
def testFold(rdd: RDD[Int]): Unit = { rdd.fold(0) { case (_, _) => return; 1 } }
def testAggregate(rdd: RDD[Int]): Unit = {
rdd.aggregate(0)({ case (_, _) => return; 1 }, { case (_, _) => return; 1 })
}
def testTreeAggregate(rdd: RDD[Int]): Unit = {
rdd.treeAggregate(0)({ case (_, _) => return; 1 }, { case (_, _) => return; 1 })
}
// Test pair RDD functions
def testCombineByKey(rdd: RDD[(Int, Int)]): Unit = {
rdd.combineByKey(
{ _ => return; 1 }: Int => Int,
{ case (_, _) => return; 1 }: (Int, Int) => Int,
{ case (_, _) => return; 1 }: (Int, Int) => Int
).count()
}
def testAggregateByKey(rdd: RDD[(Int, Int)]): Unit = {
rdd.aggregateByKey(0)({ case (_, _) => return; 1 }, { case (_, _) => return; 1 }).count()
}
def testFoldByKey(rdd: RDD[(Int, Int)]): Unit = { rdd.foldByKey(0) { case (_, _) => return; 1 } }
def testReduceByKey(rdd: RDD[(Int, Int)]): Unit = { rdd.reduceByKey { case (_, _) => return; 1 } }
def testReduceByKeyLocally(rdd: RDD[(Int, Int)]): Unit = {
rdd.reduceByKeyLocally { case (_, _) => return; 1 }
}
def testMapValues(rdd: RDD[(Int, Int)]): Unit = { rdd.mapValues { _ => return; 1 } }
def testFlatMapValues(rdd: RDD[(Int, Int)]): Unit = { rdd.flatMapValues { _ => return; Seq() } }
// Test async RDD actions
def testForeachAsync(rdd: RDD[Int]): Unit = { rdd.foreachAsync { _ => return } }
def testForeachPartitionAsync(rdd: RDD[Int]): Unit = { rdd.foreachPartitionAsync { _ => return } }
// Test SparkContext runJob
def testRunJob1(sc: SparkContext): Unit = {
val rdd = sc.parallelize(1 to 10, 10)
sc.runJob(rdd, { (ctx: TaskContext, iter: Iterator[Int]) => return; 1 } )
}
def testRunJob2(sc: SparkContext): Unit = {
val rdd = sc.parallelize(1 to 10, 10)
sc.runJob(rdd, { iter: Iterator[Int] => return; 1 } )
}
def testRunApproximateJob(sc: SparkContext): Unit = {
val rdd = sc.parallelize(1 to 10, 10)
val evaluator = new CountEvaluator(1, 0.5)
sc.runApproximateJob(
rdd, { (ctx: TaskContext, iter: Iterator[Int]) => return; 1L }, evaluator, 1000)
}
def testSubmitJob(sc: SparkContext): Unit = {
val rdd = sc.parallelize(1 to 10, 10)
sc.submitJob(
rdd,
{ _ => return; 1 }: Iterator[Int] => Int,
Seq.empty,
{ case (_, _) => return }: (Int, Int) => Unit,
{ return }
)
}
}
class TestCreateNullValue {
var x = 5
def getX: Int = x
def run(): Unit = {
val bo: Boolean = true
val c: Char = '1'
val b: Byte = 1
val s: Short = 1
val i: Int = 1
val l: Long = 1
val f: Float = 1
val d: Double = 1
// Bring in all primitive types into the closure such that they become
// parameters of the closure constructor. This allows us to test whether
// null values are created correctly for each type.
val nestedClosure = () => {
// scalastyle:off println
if (s.toString == "123") { // Don't really output them to avoid noisy
println(bo)
println(c)
println(b)
println(s)
println(i)
println(l)
println(f)
println(d)
}
val closure = () => {
println(getX)
}
// scalastyle:on println
ClosureCleaner.clean(closure)
}
nestedClosure()
}
}
| Panos-Bletsos/spark-cost-model-optimizer | core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala | Scala | apache-2.0 | 14,347 |
/*
package.scala
Shared code
*/
package object rd {
import smfsb._
import breeze.linalg.{Vector => BVec, _}
import breeze.numerics._
import scalafx.scene.image.WritableImage
import scalafx.scene.paint._
def toSfxI(im: PMatrix[DenseVector[Double]]): WritableImage = {
val wi = new WritableImage(im.c, im.r)
val pw = wi.pixelWriter
val m = im.data.aggregate(0.0)((acc,v) => math.max(acc,max(v)), math.max(_,_))
val rsi = im map (_ / m)
(0 until im.c).par foreach (i =>
(0 until im.r).par foreach (j =>
pw.setColor(i, j, Color.rgb((rsi(i,j)(1)*255).toInt, 0, (rsi(i,j)(0)*255).toInt))
))
wi
}
def toSfxIi(im: PMatrix[DenseVector[Int]]): WritableImage =
toSfxI(im map (v => v map (_.toDouble)))
def sir[S: State](p: DenseVector[Double] = DenseVector(0.1, 0.5)): Spn[S] =
UnmarkedSpn[S](
List("S", "I", "R"),
DenseMatrix((1, 1, 0), (0, 1, 0)),
DenseMatrix((0, 2, 0), (0, 0, 1)),
(x, t) => {
val xd = x.toDvd
DenseVector(
xd(0) * xd(1) * p(0), xd(1) * p(1)
)}
)
def toSfxI3(im: PMatrix[DenseVector[Double]]): WritableImage = {
val wi = new WritableImage(im.c, im.r)
val pw = wi.pixelWriter
val m = im.data.aggregate(0.0)((acc,v) => math.max(acc,max(v)), math.max(_,_))
val rsi = im map (_ / m)
(0 until im.c).par foreach (i =>
(0 until im.r).par foreach (j =>
pw.setColor(i, j, Color.rgb((rsi(i,j)(1)*255).toInt, (rsi(i,j)(0)*255).toInt, (rsi(i,j)(2)*255).toInt))
))
wi
}
}
// eof
| darrenjw/blog | reaction-diffusion/src/main/scala/rd/package.scala | Scala | apache-2.0 | 1,547 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v2
import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtInteger}
import uk.gov.hmrc.ct.computations.HmrcAccountingPeriod
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
import uk.gov.hmrc.ct.ct600.v2.calculations.CorporationTaxCalculator
case class B43(value: Int) extends CtBoxIdentifier("Financial Year") with CtInteger
object B43 extends CorporationTaxCalculator with Calculated[B43, ComputationsBoxRetriever] {
override def calculate(fieldValueRetriever: ComputationsBoxRetriever): B43 =
financialYear1(
HmrcAccountingPeriod(fieldValueRetriever.retrieveCP1(), fieldValueRetriever.retrieveCP2())
)
}
| ahudspith-equalexperts/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v2/B43.scala | Scala | apache-2.0 | 1,284 |
import scala.io.Source
object AppendAndDelete extends App {
val lines = Source.stdin.getLines().toList
val s = lines(0)
val t = lines(1)
val k = lines(2).toInt
if(s == t) {
val appends = t.length
println(if(k - appends >= t.length) "Yes" else "No")
} else {
val commonPrefix = s.zip(t).takeWhile(c => c._1 == c._2).map(a => a._1).mkString("")
val prefixLength = commonPrefix.length
val deletions = s.length - prefixLength
val appends = t.length - prefixLength
println(if(deletions + appends == k) "Yes" else "No")
}
} | PaulNoth/hackerrank | contest/week_of_code_25/AppendAndDelete.scala | Scala | mit | 561 |
package controllers
import play.api.mvc._
import play.api.libs.concurrent.Akka
import actors.UserChannelsActor
import akka.actor.{ Props }
import scala.concurrent.duration._
import play.api.Play.current
import models.{ StartSearch, SearchFeed }
import play.api.libs.EventSource
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.ExecutionContext
import ExecutionContext.Implicits.global
import models.StartSearch
object Application extends Controller {
implicit val timeout = Timeout(5 seconds)
val channels = Akka.system.actorSelection("/user/channels")
def home = Action {
Ok(views.html.home())
}
def search(searchString: String) = Action.async {
(channels ? StartSearch(searchString = searchString)).map {
case SearchFeed(out) => Ok.chunked(out &> EventSource()).as("text/event-stream")
}
}
def real = Action {
Ok(views.html.real())
}
def mongofs = Action {
Ok(views.html.mongofs())
}
def cookbook = Action {
Ok(views.html.cookbook("Cookbook"))
}
} | dbuschman7/collection-of-things | playground/app/controllers/Application.scala | Scala | apache-2.0 | 1,036 |
package de.mukis
import akka.actor.{ ActorSystem, Props, Actor, ActorLogging }
import scala.concurrent.duration._
import scala.language.postfixOps
class Pong extends Actor with ActorLogging {
def receive = {
case "Ping" =>
log.info( "Received Ping" )
sender ! "Pong"
}
}
class Ping extends Actor with ActorLogging {
import context.dispatcher
val pong = context.actorOf( Props[Pong] )
val tick = context.system.scheduler.schedule( 500 millis, 1 second, self, "Tick" )
log.info( "Configuration value: " + context.system.settings.config.getConfig( "TestApp" ).getString( "myConfig" ) )
def receive = {
case "Tick" =>
log.info( "Received Tick")
pong ! "Ping"
case "Pong" =>
log.info( "Received Pong")
}
} | muuki88/sbt-native-packager-examples | sbt-web-example/src/main/scala/de/mukis/Actors.scala | Scala | apache-2.0 | 770 |
package build.unstable.sonicd.source
import akka.actor.ActorContext
import build.unstable.sonic.model.{DataSource, Query, RequestContext}
import build.unstable.sonicd.SonicdLogging
import build.unstable.sonicd.source.SonicdSource._
import build.unstable.sonicd.system.actor.SonicdController._
import spray.json.JsonFormat
import build.unstable.sonic.JsonProtocol._
abstract class SonicdSource(query: Query, actorContext: ActorContext, context: RequestContext)
extends DataSource(query, actorContext, context) with SonicdLogging {
def getConfig[T: JsonFormat](key: String): T = {
val value = query.sonicdConfig.fields.get(key).flatMap(_.convertTo[Option[T]])
.getOrElse(throw new MissingConfigurationException(key))
log.debug("getConfig({})={}", key, value)
value
}
def getOption[T: JsonFormat](key: String): Option[T] = {
val value = query.sonicdConfig.fields.get(key).flatMap(_.convertTo[Option[T]])
log.debug("getOption({})={}", key, value)
value
}
}
object SonicdSource {
class MissingConfigurationException(missing: String) extends Exception(s"config is missing '$missing' field")
}
| ernestrc/sonicd | server/src/main/scala/build/unstable/sonicd/source/SonicdSource.scala | Scala | mit | 1,137 |
package dsl
import ch.epfl.yinyang._
import ch.epfl.yinyang.typetransformers._
import scala.language.experimental.macros
import scala.reflect.macros.blackbox.Context
package object la {
def la[T](block: => T): T = macro implementations.liftRep[T]
object implementations {
def liftRep[T](c: Context)(block: c.Expr[T]): c.Expr[T] =
YYTransformer[c.type, T](c)(
"dsl.la.rep.VectorDSL",
new GenericTypeTransformer[c.type](c) {
override val IRType = "R"
},
None, None,
Map(
"shallow" -> false,
"virtualizeFunctions" -> true,
"virtualizeVal" -> true,
"debug" -> 0,
"featureAnalysing" -> false,
"ascriptionTransforming" -> false))(block)
}
} | vjovanov/scala-yinyang | components/dsls/src/la/package.scala | Scala | bsd-3-clause | 764 |
package $organization$.$app_prefix$.lib
import org.scalatest.{FunSpec, Matchers}
class $app_prefix;format="Camel"$LibraryTest extends FunSpec with Matchers {
describe($app_prefix;format="Camel"$Library.getClass.getSimpleName){
it("should create message"){
val message = $app_prefix;format="Camel"$Library createMessageFor "earthlings"
message should be("hello, earthlings!")
}
}
}
| x7c1/Slate | slate-template/src/main/g8/$app_prefix$-lib/src/test/scala/$organization__packaged$/$app_prefix__camel$/lib/$app_prefix__Camel$LibraryTest.scala | Scala | mit | 409 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.logging
import java.io._
import java.util.EnumSet
import java.util.concurrent.{ScheduledExecutorService, TimeUnit}
import org.apache.commons.io.FileUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, FSDataOutputStream, Path}
import org.apache.hadoop.fs.permission.FsPermission
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream
import org.apache.log4j.{FileAppender => Log4jFileAppender, _}
import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.util.{ThreadUtils, Utils}
private[spark] class DriverLogger(conf: SparkConf) extends Logging {
private val UPLOAD_CHUNK_SIZE = 1024 * 1024
private val UPLOAD_INTERVAL_IN_SECS = 5
private val DEFAULT_LAYOUT = "%d{yy/MM/dd HH:mm:ss.SSS} %t %p %c{1}: %m%n"
private val LOG_FILE_PERMISSIONS = new FsPermission(Integer.parseInt("770", 8).toShort)
private val localLogFile: String = FileUtils.getFile(
Utils.getLocalDir(conf),
DriverLogger.DRIVER_LOG_DIR,
DriverLogger.DRIVER_LOG_FILE).getAbsolutePath()
private var writer: Option[DfsAsyncWriter] = None
addLogAppender()
private def addLogAppender(): Unit = {
val appenders = LogManager.getRootLogger().getAllAppenders()
val layout = if (conf.contains(DRIVER_LOG_LAYOUT)) {
new PatternLayout(conf.get(DRIVER_LOG_LAYOUT).get)
} else if (appenders.hasMoreElements()) {
appenders.nextElement().asInstanceOf[Appender].getLayout()
} else {
new PatternLayout(DEFAULT_LAYOUT)
}
val fa = new Log4jFileAppender(layout, localLogFile)
fa.setName(DriverLogger.APPENDER_NAME)
LogManager.getRootLogger().addAppender(fa)
logInfo(s"Added a local log appender at: ${localLogFile}")
}
def startSync(hadoopConf: Configuration): Unit = {
try {
// Setup a writer which moves the local file to hdfs continuously
val appId = Utils.sanitizeDirName(conf.getAppId)
writer = Some(new DfsAsyncWriter(appId, hadoopConf))
} catch {
case e: Exception =>
logError(s"Could not persist driver logs to dfs", e)
}
}
def stop(): Unit = {
try {
val fa = LogManager.getRootLogger.getAppender(DriverLogger.APPENDER_NAME)
LogManager.getRootLogger().removeAppender(DriverLogger.APPENDER_NAME)
Utils.tryLogNonFatalError(fa.close())
writer.foreach(_.closeWriter())
} catch {
case e: Exception =>
logError(s"Error in persisting driver logs", e)
} finally {
Utils.tryLogNonFatalError {
JavaUtils.deleteRecursively(FileUtils.getFile(localLogFile).getParentFile())
}
}
}
// Visible for testing
private[spark] class DfsAsyncWriter(appId: String, hadoopConf: Configuration) extends Runnable
with Logging {
private var streamClosed = false
private var inStream: InputStream = null
private var outputStream: FSDataOutputStream = null
private val tmpBuffer = new Array[Byte](UPLOAD_CHUNK_SIZE)
private var threadpool: ScheduledExecutorService = _
init()
private def init(): Unit = {
val rootDir = conf.get(DRIVER_LOG_DFS_DIR).get
val fileSystem: FileSystem = new Path(rootDir).getFileSystem(hadoopConf)
if (!fileSystem.exists(new Path(rootDir))) {
throw new RuntimeException(s"${rootDir} does not exist." +
s" Please create this dir in order to persist driver logs")
}
val dfsLogFile: String = FileUtils.getFile(rootDir, appId
+ DriverLogger.DRIVER_LOG_FILE_SUFFIX).getAbsolutePath()
try {
inStream = new BufferedInputStream(new FileInputStream(localLogFile))
outputStream = SparkHadoopUtil.createFile(fileSystem, new Path(dfsLogFile),
conf.get(DRIVER_LOG_ALLOW_EC))
fileSystem.setPermission(new Path(dfsLogFile), LOG_FILE_PERMISSIONS)
} catch {
case e: Exception =>
JavaUtils.closeQuietly(inStream)
JavaUtils.closeQuietly(outputStream)
throw e
}
threadpool = ThreadUtils.newDaemonSingleThreadScheduledExecutor("dfsSyncThread")
threadpool.scheduleWithFixedDelay(this, UPLOAD_INTERVAL_IN_SECS, UPLOAD_INTERVAL_IN_SECS,
TimeUnit.SECONDS)
logInfo(s"Started driver log file sync to: ${dfsLogFile}")
}
def run(): Unit = {
if (streamClosed) {
return
}
try {
var remaining = inStream.available()
val hadData = remaining > 0
while (remaining > 0) {
val read = inStream.read(tmpBuffer, 0, math.min(remaining, UPLOAD_CHUNK_SIZE))
outputStream.write(tmpBuffer, 0, read)
remaining -= read
}
if (hadData) {
outputStream match {
case hdfsStream: HdfsDataOutputStream =>
hdfsStream.hsync(EnumSet.allOf(classOf[HdfsDataOutputStream.SyncFlag]))
case other =>
other.hflush()
}
}
} catch {
case e: Exception => logError("Failed writing driver logs to dfs", e)
}
}
private def close(): Unit = {
if (streamClosed) {
return
}
try {
// Write all remaining bytes
run()
} finally {
try {
streamClosed = true
inStream.close()
outputStream.close()
} catch {
case e: Exception =>
logError("Error in closing driver log input/output stream", e)
}
}
}
def closeWriter(): Unit = {
try {
threadpool.execute(() => DfsAsyncWriter.this.close())
threadpool.shutdown()
threadpool.awaitTermination(1, TimeUnit.MINUTES)
} catch {
case e: Exception =>
logError("Error in shutting down threadpool", e)
}
}
}
}
private[spark] object DriverLogger extends Logging {
val DRIVER_LOG_DIR = "__driver_logs__"
val DRIVER_LOG_FILE = "driver.log"
val DRIVER_LOG_FILE_SUFFIX = "_" + DRIVER_LOG_FILE
val APPENDER_NAME = "_DriverLogAppender"
def apply(conf: SparkConf): Option[DriverLogger] = {
if (conf.get(DRIVER_LOG_PERSISTTODFS) && Utils.isClientMode(conf)) {
if (conf.contains(DRIVER_LOG_DFS_DIR)) {
try {
Some(new DriverLogger(conf))
} catch {
case e: Exception =>
logError("Could not add driver logger", e)
None
}
} else {
logWarning(s"Driver logs are not persisted because" +
s" ${DRIVER_LOG_DFS_DIR.key} is not configured")
None
}
} else {
None
}
}
}
| maropu/spark | core/src/main/scala/org/apache/spark/util/logging/DriverLogger.scala | Scala | apache-2.0 | 7,498 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.quantized
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.Table
import com.intel.analytics.bigdl.utils.serializer._
import com.intel.analytics.bigdl.utils.serializer.converters.TensorConverter
import com.intel.analytics.bigdl.serialization.Bigdl.{AttrValue, BigDLModule}
import scala.reflect.ClassTag
trait QuantSerializer extends ModuleSerializable {
def serializeWeight[T: ClassTag](context: SerializeContext[T],
modelBuilder: BigDLModule.Builder)(implicit ev: TensorNumeric[T]): Unit
def serializeBias[T: ClassTag](context: SerializeContext[T],
modelBuilder: BigDLModule.Builder)(implicit ev: TensorNumeric[T]): Unit = {
val moduleData = context.moduleData
val paramTable : Table = moduleData.module.getParametersTable()
val moduleName = moduleData.module.getName()
if (paramTable != null && paramTable.contains(moduleName)) {
val modulePramTable: Table = paramTable(moduleName)
val bias: Tensor[T] = if (modulePramTable.contains("bias")) {
modulePramTable("bias")
} else {
null
}
if (bias != null) {
val biasAttr = AttrValue.newBuilder
TensorConverter.setAttributeValue(context, biasAttr, bias)
modelBuilder.setBias(biasAttr.getTensorValue)
}
}
}
def serializeOthers[T: ClassTag](context: SerializeContext[T],
modelBuilder: BigDLModule.Builder)(implicit ev: TensorNumeric[T]): Unit = {
}
def loadWeight[T: ClassTag](context: DeserializeContext,
module: ModuleData[T])(implicit ev: TensorNumeric[T]): Unit
def loadBias[T: ClassTag](context: DeserializeContext,
moduleData: ModuleData[T])(implicit ev: TensorNumeric[T]): Unit = {
val moduleName = moduleData.module.getName()
val paramTable : Table = moduleData.module.getParametersTable
if (paramTable != null && paramTable.contains(moduleName)) {
val modulePramTable : Table = paramTable(moduleName)
val bias : Tensor[T] = if (modulePramTable.contains("bias")) {
modulePramTable("bias")
} else {
null
}
if (bias != null) {
val attrValue = AttrValue.newBuilder
attrValue.setTensorValue(context.bigdlModule.getBias)
val bias = TensorConverter.getAttributeValue(context, attrValue.build)
modulePramTable("bias").asInstanceOf[Tensor[T]].copy(bias.asInstanceOf[Tensor[T]])
}
}
}
def loadOthers[T: ClassTag](context: DeserializeContext,
module: ModuleData[T])(implicit ev: TensorNumeric[T]): Unit = {
}
override protected def copyFromBigDL[T: ClassTag](context: SerializeContext[T],
modelBuilder: BigDLModule.Builder)(implicit ev: TensorNumeric[T]): Unit = {
val storageType = context.storageType
if (storageType == ProtoStorageType) {
serializeWeight(context, modelBuilder)
serializeBias(context, modelBuilder)
serializeOthers(context, modelBuilder)
} else {
throw new IllegalArgumentException(s"$storageType not supported!")
}
}
override protected def copy2BigDL[T: ClassTag](context: DeserializeContext, module: ModuleData[T])
(implicit ev: TensorNumeric[T]): Unit = {
loadWeight(context, module)
loadBias(context, module)
loadOthers(context, module)
}
}
| wzhongyuan/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/quantized/QuantSerializer.scala | Scala | apache-2.0 | 3,976 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.planner.logical.plans
import org.neo4j.cypher.internal.frontend.v2_3.ast.Expression
import org.neo4j.cypher.internal.compiler.v2_3.planner.{CardinalityEstimation, PlannerQuery}
case class DirectedRelationshipByIdSeek(idName: IdName,
relIds: SeekableArgs,
startNode: IdName,
endNode: IdName,
argumentIds: Set[IdName])(val solved: PlannerQuery with CardinalityEstimation)
extends LogicalLeafPlan {
def availableSymbols: Set[IdName] = argumentIds ++ Set(idName, startNode, endNode)
override def mapExpressions(f: (Set[IdName], Expression) => Expression): LogicalPlan =
copy(relIds = relIds.mapValues(f(argumentIds, _)))(solved)
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/planner/logical/plans/DirectedRelationshipByIdSeek.scala | Scala | apache-2.0 | 1,645 |
package kornell.server.content
import java.io.InputStream
import java.util.logging.Logger
import com.amazonaws.HttpMethod
import com.amazonaws.auth.BasicAWSCredentials
import com.amazonaws.services.s3.AmazonS3Client
import com.amazonaws.services.s3.model.{DeleteObjectsRequest, GeneratePresignedUrlRequest, ObjectMetadata}
import kornell.core.entity.ContentRepository
import kornell.core.util.StringUtils._
import kornell.server.jdbc.repository.{ContentRepositoriesRepo, InstitutionRepo}
import org.joda.time.DateTime
import scala.collection.JavaConverters._
import scala.io.{BufferedSource, Source}
import scala.util.Try
class S3ContentManager(repo: ContentRepository)
extends SyncContentManager {
val logger: Logger = Logger.getLogger(classOf[S3ContentManager].getName)
lazy val s3: AmazonS3Client = if (isSome(repo.getAccessKeyId))
new AmazonS3Client(new BasicAWSCredentials(repo.getAccessKeyId, repo.getSecretAccessKey))
else
new AmazonS3Client
def source(keys: String*): Try[BufferedSource] =
inputStream(keys: _*).map { Source.fromInputStream(_, "UTF-8") }
def inputStream(keys: String*): Try[InputStream] = Try {
val fqkn = url(keys: _*)
logger.finest(s"loading key [ ${fqkn} ]")
try {
s3.getObject(repo.getBucketName, fqkn).getObjectContent
} catch {
case e: Throwable => {
val cmd = s"aws s3api get-object --bucket ${repo.getBucketName} --key ${fqkn} --region ${repo.getRegion} file.out"
logger.warning("Could not load object. Try [ " + cmd + " ]")
throw e
}
}
}
def put(value: InputStream, contentLength: Int, contentType: String, contentDisposition: String, metadataMap: Map[String, String], keys: String*): Unit = {
val metadata = new ObjectMetadata()
metadata.setUserMetadata(metadataMap asJava)
Option(contentType).foreach { metadata.setContentType }
Option(contentDisposition).foreach { metadata.setContentDisposition }
s3.putObject(repo.getBucketName, url(keys: _*), value, metadata)
}
def delete(keys: String*): Unit = {
// keys we support delete for already have repo prefix appended
logger.info("Trying to delete object [ " + mkurl("", keys: _*) + " ]")
s3.deleteObject(repo.getBucketName, mkurl("", keys: _*))
}
def deleteFolder(keys: String*): Unit = {
val path = url(keys: _*)
logger.info("Trying to delete folder object [ " + path + " ]")
val objects = s3.listObjects(repo.getBucketName, path)
val keyPaths = objects.getObjectSummaries.asScala.map(f => f.getKey)
val deleteRequest = new DeleteObjectsRequest(repo.getBucketName).withKeys(keyPaths: _*)
try {
s3.deleteObjects(deleteRequest)
} catch {
case _: Throwable => {
logger.info("Could not delete folder object. [ " + path + " ]")
}
}
}
def getUploadUrl(path: String, contentType: String): String = {
logger.fine(path)
val presignedRequest = new GeneratePresignedUrlRequest(repo.getBucketName, path)
presignedRequest.setMethod(HttpMethod.PUT)
presignedRequest.setExpiration(new DateTime().plusMinutes(1).toDate)
presignedRequest.setContentType(contentType)
s3.generatePresignedUrl(presignedRequest).toString
}
def getPrefix: String = repo.getPrefix
}
object S3ContentManager {
def getAmazonS3Client(institutionUUID: String): AmazonS3Client = {
val institution = InstitutionRepo(institutionUUID).get
val repo = ContentRepositoriesRepo.firstRepository(institution.getAssetsRepositoryUUID).get
val s3 = if (isSome(repo.getAccessKeyId))
new AmazonS3Client(new BasicAWSCredentials(repo.getAccessKeyId, repo.getSecretAccessKey))
else
new AmazonS3Client
s3
}
}
| Craftware/Kornell | kornell-api/src/main/scala/kornell/server/content/S3ContentManager.scala | Scala | apache-2.0 | 3,706 |
package net.sansa_stack.query.spark.ontop.kryo
import com.esotericsoftware.kryo.io.{Input, Output}
import com.esotericsoftware.kryo.{Kryo, Serializer}
import it.unibz.inf.ontop.model.`type`.TypeFactory
import net.sansa_stack.query.spark.ontop.OntopConnection
class TypeFactorySerializer(ontopSessionID: String)
extends Serializer[TypeFactory](false, true) {
override def write(kryo: Kryo, output: Output, obj: TypeFactory): Unit = {
kryo.writeClass(output, obj.getClass)
}
override def read(kryo: Kryo, input: Input, `type`: Class[TypeFactory]): TypeFactory = {
kryo.readClass(input)
OntopConnection.configs.get(Option(ontopSessionID).getOrElse(OntopConnection.configs.head._1)).get.getTypeFactory
}
}
| SANSA-Stack/SANSA-RDF | sansa-query/sansa-query-spark/src/main/scala/net/sansa_stack/query/spark/ontop/kryo/TypeFactorySerializer.scala | Scala | apache-2.0 | 730 |
package bubblewrap
import org.scalatest.FlatSpec
import org.scalatest.Matchers.{be, convertToAnyShouldWrapper}
import TestUtils.{readAsBytes, readAsString}
class ContentSpec extends FlatSpec{
"Content" should "use the content encoding to encode the bytes back to string" in {
val koreanString = "동서게임 MS정품유선컨트롤러/PCGTA5호환 - 11번가"
val content = Content(WebUrl("http://www.example.com/dummy"),koreanString.getBytes("EUC-KR"),contentCharset = Some("EUC-KR"))
content.asString should be(koreanString)
}
it should "use UTF-8 incase no contentCharset is given" in {
val koreanString = "동서게임 MS정품유선컨트롤러/PCGTA5호환 - 11번가"
val content = Content(WebUrl("http://www.example.com/dummy"),koreanString.getBytes("UTF-8"))
content.asString should be(koreanString)
}
it should "read Gzipped content (with UTF-8 encoding & text/html content type) to string" in {
val encoding = "UTF-8"
val gzipped = readAsBytes("/fixtures/1-gzipped.html")
val ungzipped = readAsBytes("/fixtures/1-ungzipped.html")
val content = Content(WebUrl("http://www.example.com/dummy"), gzipped, contentType = Some("text/html; charset=utf-8"), contentCharset = Some(encoding), contentEncoding = Some("gzip"))
content.asString should be (new String(ungzipped, encoding))
}
it should "read Gzipped content (with UTF-8 encoding & gzip content type) to string" in {
val encoding = "UTF-8"
val gzipped = readAsBytes("/fixtures/1-gzipped.html")
val ungzipped = readAsBytes("/fixtures/1-ungzipped.html")
val content = Content(WebUrl("http://www.example.com/dummy"), gzipped, contentType = Some("gzip"), contentCharset = Some(encoding), contentEncoding = Some("text/html; charset=UTF-8"))
content.asString should be (new String(ungzipped, encoding))
}
it should "read Gzipped content (with ISO-8859-1 encoding) to string" in {
val encoding = "ISO-8859-1"
val gzipped = readAsBytes("/fixtures/2-gzipped-iso8859-1.html")
val ungzipped = readAsBytes("/fixtures/2-ungzipped-iso8859-1.html")
val content = Content(WebUrl("http://www.example.com/dummy"), gzipped, contentType = Some("application/gzip; charset=iso-8859-1"), contentCharset = Some(encoding), contentEncoding = None)
content.asString should be (new String(ungzipped, encoding))
}
it should "read Gzipped content (with unknown encoding) to string" in {
val gzipped = readAsBytes("/fixtures/3-gzipped.html")
val ungzipped = readAsBytes("/fixtures/3-ungzipped.html")
val content = Content(WebUrl("http://www.example.com/dummy"), gzipped, contentType = Some("gzip"), contentCharset = None, contentEncoding = Some("text/html"))
content.asString should be (new String(ungzipped))
}
}
| ind9/bubblewrap | src/test/scala/bubblewrap/ContentSpec.scala | Scala | apache-2.0 | 2,784 |
package com.socrata.datacoordinator.id
import com.rojoma.json.v3.codec.{DecodeError, JsonDecode, JsonEncode}
import com.rojoma.json.v3.ast.{JValue, JNumber}
class RowVersion(val underlying: Long) extends AnyVal {
override def toString = s"RowVersion($underlying)"
}
object RowVersion {
implicit val jCodec = new JsonDecode[RowVersion] with JsonEncode[RowVersion] {
def encode(versionId: RowVersion) = JNumber(versionId.underlying)
def decode(v: JValue) = v match {
case n: JNumber => Right(new RowVersion(n.toLong))
case other => Left(DecodeError.InvalidType(JNumber, other.jsonType))
}
}
implicit val ordering = new Ordering[RowVersion] {
def compare(x: RowVersion, y: RowVersion): Int = Ordering.Long.compare(x.underlying, y.underlying)
}
}
| socrata-platform/data-coordinator | coordinatorlib/src/main/scala/com/socrata/datacoordinator/id/RowVersion.scala | Scala | apache-2.0 | 790 |
package scalacookbook.chapter09
import com.typesafe.scalalogging.slf4j.Logger
import org.slf4j.LoggerFactory
/**
* Created by liguodong on 2016/7/23.
*/
object Main extends App{
val logger = Logger(LoggerFactory.getLogger(Main.getClass))
logger.info("This is very convenient...")
val a=0
val b=11
val greater = if (a > b) a else b
val aString = "213"
val result = try {
aString.toInt
} catch {
case _ => 0
}
println("result:"+result)
}
| liguodongIOT/java-scala-mix-sbt | src/main/scala/scalacookbook/chapter09/Main.scala | Scala | apache-2.0 | 479 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples
import org.apache.spark.{SparkConf, SparkContext}
/**
* Usage: BroadcastTest [slices] [numElem] [broadcastAlgo] [blockSize]
* 使用:广播测试 [分片] [元素数] [广播算法] [块的大小]
*/
object BroadcastTest {
def main(args: Array[String]) {
val bcName = if (args.length > 2) args(2) else "Http"
val blockSize = if (args.length > 3) args(3) else "4096" //4兆字节
val sparkConf = new SparkConf().setAppName("Broadcast Test").setMaster("local")
.set("spark.broadcast.factory", s"org.apache.spark.broadcast.${bcName}BroadcastFactory")
.set("spark.broadcast.blockSize", blockSize)
val sc = new SparkContext(sparkConf)
val slices = if (args.length > 0) args(0).toInt else 2
val num = if (args.length > 1) args(1).toInt else 1000000
val arr1 = (0 until num).toArray
for (i <- 0 until 3) {
println("Iteration " + i)
println("===========")
val startTime = System.nanoTime //系统计时器的当前值,以毫微秒为单位
val barr1 = sc.broadcast(arr1)
val observedSizes = sc.parallelize(1 to 10, slices).map(_ => barr1.value.size)
// Collect the small RDD so we can print the observed sizes locally.
//收集小RDD可以打印尺寸的本地观察
observedSizes.collect().foreach(i => println(i))
//1E6==1000000.0
println("Iteration %d took %.0f milliseconds(毫秒)".format(i, (System.nanoTime - startTime) / 1E6))
}
sc.stop()
}
}
// scalastyle:on println | tophua/spark1.52 | examples/src/main/scala/org/apache/spark/examples/BroadcastTest.scala | Scala | apache-2.0 | 2,361 |
/*
* Copyright (C) 2017 Michael Dippery <michael@monkey-robot.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mipadi.jupiter.math
import scala.collection.immutable.NumericRange
import scala.math
/** Contains an implicit conversion for longs and adds some useful
* methods to `long`. Since Scala's ints can be promoted to longs,
* these extension methods are available to ints as well. The implicit
* class `[[com.mipadi.jupiter.math.Numeric.RichLong RichLong]]` can
* be imported with:
*
* {{{
* import com.mipadi.jupiter.math.Numeric._
* val isPrime = 100.isPrime
* }}}
*
* @since 1.1
*/
object Numeric {
/** An implicit conversion from longs that adds some useful methods to
* `long`. Since ints can be promoted to longs, these methods are
* available to ints as well. The implicit class can be imported with:
*
* {{{
* import com.mipadi.jupiter.math.Numeric._
* val isPrime = 100.isPrime
* }}}
*
* @since 1.1
*/
implicit class RichLong(self: Long) {
/** The wrapped long's divisors */
lazy val divisors: Seq[Long] = (1L to self / 2).filter(_ divides self)
/** The wrapped long's individual digits */
lazy val digits: Seq[Int] = self.toString.map(_.toString).map(_.toInt)
/** `true` if the wrapped long is prime */
lazy val isPrime: Boolean = self match {
case 1 => false
case 2 => true
case _ => !(2L to math.sqrt(self).toLong).exists(_ divides self)
}
/** Factorial of the wrapped long */
lazy val factorial: Long = (1L to self).product
/** Returns `true` if the wrapped long divides evenly into `b`. This
* is equivalent to the mathematical notation `a|b` (where `a` is the
* wrapped long).
*
* @param b
* The dividend
* @return
* `true` if the wrapped long divides evenly into `b`
*/
def divides(b: Long): Boolean = b % self == 0
/** Returns a range using the wrapped long as an upper bound to the
* specified lower bound.
*
* @param lower
* Lower bound of the range
* @return
* Range from the wrapped long down to the specified lower bound
*/
def downto(lower: Long): NumericRange[Long] = (self to lower by -1)
}
}
| mdippery/jupiter | src/main/scala/com/mipadi/jupiter/math/Numeric.scala | Scala | apache-2.0 | 2,793 |
package com.philipborg.mummu.image.scaling
import java.util.concurrent.Callable
import com.philipborg.mummu.image.Image
trait Scaler extends Callable[Image] {
} | philipborg/Mummu | src/main/scala/com/philipborg/mummu/image/scaling/Scaler.scala | Scala | agpl-3.0 | 164 |
package org.scalaide.debug.internal.model
import org.eclipse.debug.core.DebugPlugin
import org.junit.Assert._
import org.junit.Before
import org.junit.Ignore
import org.junit.Test
import org.mockito.Mockito._
import com.sun.jdi.ThreadReference
import com.sun.jdi.ThreadGroupReference
import com.sun.jdi.StringReference
import com.sun.jdi.ArrayReference
import com.sun.jdi.IntegerValue
import com.sun.jdi.Value
/**
* More tests related of the ScalaDebugModelPresentation are in ScalaDebugComputeDetailTest.
*/
@Ignore("TODO - this test apparently uses some UI elements which are disabled in headless mode")
class ScalaDebugModelPresentationTest {
val modelPres = new ScalaDebugModelPresentation
@Before
def initializeDebugPlugin(): Unit = {
if (DebugPlugin.getDefault == null) {
new DebugPlugin
}
}
private def createThread(jdiThread: ThreadReference): ScalaThread =
ScalaThread(null, jdiThread)
@Test
def scalaThreadName(): Unit = {
val jdiThread = mock(classOf[ThreadReference])
when(jdiThread.name).thenReturn("thread name")
val jdiThreadGroup = mock(classOf[ThreadGroupReference])
when(jdiThread.threadGroup).thenReturn(jdiThreadGroup)
when(jdiThreadGroup.name).thenReturn("not system")
val scalaThread = createThread(jdiThread)
assertEquals("Bad display name for Scala thread", "Thread [thread name]", modelPres.getText(scalaThread))
}
@Test
def scalaThreadNameForSystemThread(): Unit = {
val jdiThread = mock(classOf[ThreadReference])
when(jdiThread.name).thenReturn("system thread name")
val jdiThreadGroup = mock(classOf[ThreadGroupReference])
when(jdiThread.threadGroup).thenReturn(jdiThreadGroup)
when(jdiThreadGroup.name).thenReturn("system")
val scalaThread = createThread(jdiThread)
assertEquals("Bad display name for Scala system thread", "Daemon System Thread [system thread name]", modelPres.getText(scalaThread))
}
@Test
def scalaStackFrame(): Unit = {
val scalaThread = mock(classOf[ScalaThread])
import ScalaStackFrameTest._
val jdiStackFrame = createJDIStackFrame("Lsome/package/TypeName;", "methodName", "(La/b/ParamType1;La/b/ParamType2;)V")
val location = jdiStackFrame.location
when(location.lineNumber).thenReturn(42)
val scalaStackFrame = ScalaStackFrame(scalaThread, jdiStackFrame)
assertEquals("Bad display name for Scala stack frame", "TypeName.methodName(ParamType1, ParamType2) line: 42", modelPres.getText(scalaStackFrame))
}
@Test
def scalaStackFrameLineNotAvailable(): Unit = {
val scalaThread = mock(classOf[ScalaThread])
import ScalaStackFrameTest._
val jdiStackFrame = createJDIStackFrame("Lsome/package/TypeName;", "methodName", "()V")
val location = jdiStackFrame.location
when(location.lineNumber).thenReturn(-1)
val scalaStackFrame = ScalaStackFrame(scalaThread, jdiStackFrame)
assertEquals("Bad display name for Scala stack frame", "TypeName.methodName() line: not available", modelPres.getText(scalaStackFrame))
}
@Test
def computeDetailNull(): Unit = {
val scalaValue = mock(classOf[ScalaNullValue])
val computedDetail = ScalaDebugModelPresentation.computeDetail(scalaValue)
assertEquals("Bad return value for computeDetail", "null", computedDetail)
}
@Test
def computeDetailPrimitiveNotString(): Unit = {
val scalaValue = new ScalaPrimitiveValue(null, "a value", null, null)
val computedDetail = ScalaDebugModelPresentation.computeDetail(scalaValue)
assertEquals("Bad return value for computeDetail", "a value", computedDetail)
}
@Test
def computeDetailString(): Unit = {
val stringReference = mock(classOf[StringReference])
when(stringReference.value).thenReturn("a string value")
val computedDetail = ScalaDebugModelPresentation.computeDetail(new ScalaStringReference(stringReference, null))
assertEquals("Bad return value for computeDetail", "a string value", computedDetail)
}
@Test
def computeDetailArrayOfPrimitive(): Unit = {
val arrayReference = mock(classOf[ArrayReference])
import scala.collection.JavaConverters._
val values = List(createIntValue(1), createIntValue(2), createIntValue(4)).asJava
when(arrayReference.length).thenReturn(3)
when(arrayReference.getValues).thenReturn(values)
val computedDetail = ScalaDebugModelPresentation.computeDetail(new ScalaArrayReference(arrayReference, null))
assertEquals("Bad return value for computeDetail", "Array(1, 2, 4)", computedDetail)
}
/**
* There is a bug in the JDT implementation of JDI.
* ArrayReference#getValues() return an IndexOutOfBoundsException when called on an empty array.
*/
@Test
def computeDetailEmptyArrayJDIBug(): Unit = {
// simulate JDT/JDI bug
val arrayReference = mock(classOf[ArrayReference])
when(arrayReference.length).thenReturn(0)
when(arrayReference.getValues).thenThrow(new IndexOutOfBoundsException)
val computedDetail = ScalaDebugModelPresentation.computeDetail(new ScalaArrayReference(arrayReference, null))
assertEquals("Bad return value for computeDetail", "Array()", computedDetail)
}
// -----
def createIntValue(i: Int): Value = {
val value = mock(classOf[IntegerValue])
when(value.value).thenReturn(i)
value
}
}
| scala-ide/scala-ide | org.scala-ide.sdt.debug.tests/src/org/scalaide/debug/internal/model/ScalaDebugModelPresentationTest.scala | Scala | bsd-3-clause | 5,310 |
package security
import play.api.Application
import play.api.data.Form
import play.api.mvc.Request
import play.api.templates.Html
import securesocial.controllers.DefaultTemplatesPlugin
class DefaultTemplates(application: Application) extends DefaultTemplatesPlugin(application)
{
/**
* Returns the html for the login page
* @param request
* @tparam A
* @return
*/
override def getLoginPage[A](implicit request: Request[A], form: Form[(String, String)],
msg: Option[String] = None): Html =
{
views.html.securitytemplates.login(form, msg)
}
} | lukaszbudnik/hackaton-portal | app/security/DefaultTemplates.scala | Scala | apache-2.0 | 600 |
/*
* Copyright (c) <2013>, Amanj Sherwany <http://www.amanj.me>
* All rights reserved.
* */
package ch.usi.inf.l3.piuma.plugin
import transformers._
import ch.usi.inf.l3.piuma.util._
import scala.tools.nsc.transform.TypingTransformers
import scala.tools.nsc.ast.TreeDSL
import scala.tools.nsc.transform.Transform
import scala.tools.nsc.plugins.PluginComponent
import scala.language.implicitConversions
import scala.reflect.runtime.universe._
abstract class TransformerPluginComponent(val plgn: PiumaPlugin)
extends PluginComponent
with PiumaImplicitsCake
with Transform
with TypingTransformers
with TreeDSL
with RenameTransformerCake
with TreeGenTransformerCake
with TreeDuplicatorCake
with TreeModifiersCake
with TreeTraversersCake
with ExtractorTransformerCake
with TreeTransformersCake {
import plgn._
val global: plgn.global.type = plgn.global
import plgn.global._
def newTransformer(unit: CompilationUnit): Transformer
/**
* The plugin framework should have a refactoring mode:
*
* 1- If you rename a field all its setters and getters (and of course uses
* shall be renamed).
* 2- If you add/remove a param in a method or constructor, the framework
* should automatically pass defaults/drop the arg for that param.
*/
abstract class TransformerComponent(val unit: CompilationUnit)
extends TypingTransformer(unit)
with PiumaImplicits
with RenameTransformer
with TreeDSL
with TreeGenTransformer
with TreeDuplicator
with TreeModifiers
with TreeTraversers
with ExtractorTransformer
with TreeTransformers {
val global: plgn.global.type = plgn.global
/**
* Types a tree and returns the typed tree
*
* @param tree the tree to be typed
*
* @return a typed tree
*/
protected def typed(tree: Tree): Tree = localTyper.typed(tree)
/**
* Returns a fresh and unique TermName based on a given TermName
*
* @param base the base TermName
*
* @return a fresh and unique TermName
*/
def freshName(base: TermName): TermName = {
unit.freshTermName(base.toString)
}
}
}
| amanjpro/piuma | piuma/src/main/scala/ch/usi/inf/l3/piuma/plugin/TransformerPluginComponent.scala | Scala | bsd-3-clause | 2,212 |
// Copyright (C) 2009 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gimd.jgit
import org.eclipse.jgit.lib.Repository
/**
* <p>Class that stores information about branch which consists of name of branch and repository
* where that branch is stored.</p>
*
* @throws InvalidJGitBranchNameException if supplied <code>name</code> does not pass
* <code>Repository.isValidRefName</code> test.
*/
@throws(classOf[InvalidJGitBranchNameException])
final case class JGitBranch(repository: Repository, name: String) {
if (!Repository.isValidRefName(name))
throw new InvalidJGitBranchNameException(repository, name)
}
| gkossakowski/gimd | src/main/scala/com/google/gimd/jgit/JGitBranch.scala | Scala | apache-2.0 | 1,189 |
/*
* MUSIT is a museum database to archive natural and cultural history data.
* Copyright (C) 2016 MUSIT Norway, part of www.uio.no (University of Oslo)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License,
* or any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package no.uio.musit.security.crypto
import no.uio.musit.test.MusitSpecWithAppPerSuite
class MusitCryptoSpec extends MusitSpecWithAppPerSuite {
val crypto = fromInstanceCache[MusitCrypto]
"MusitCrypto" should {
"successfully encrypt a token" in {
val orig = "27b9c7bc-06c3-4cc5-8c83-b34125377dd6"
val res = crypto.encryptAES(orig)
res must not be orig
res must not include " "
}
"successfully decrypt an encrypted token" in {
val orig = "27b9c7bc-06c3-4cc5-8c83-b34125377dd6"
val enc = crypto.encryptAES(orig)
val res = crypto.decryptAES(enc)
res mustBe orig
}
}
}
| kpmeen/musit | musit-service/src/test/scala/no/uio/musit/security/crypto/MusitCryptoSpec.scala | Scala | gpl-2.0 | 1,510 |
package org.apache.spark.repl
import scala.reflect._
import scala.reflect.api.{Mirror, Universe, TypeCreator}
import scala.tools.nsc.{io, Properties, Settings, interpreter}
import scala.tools.nsc.interpreter._
import scala.tools.nsc.util.ScalaClassLoader._
import scala.reflect.api.{Mirror, TypeCreator, Universe => ApiUniverse}
import scala.concurrent.{ ExecutionContext, Await, Future, future }
import ExecutionContext.Implicits._
import java.io.File
import scala.tools.nsc.interpreter._
class HackSparkILoop(out:JPrintWriter, val outputDir:File) extends org.apache.spark.repl.SparkILoop(None, out) {
// note:
// the creation of SecurityManager has to be lazy so SPARK_YARN_MODE is set if needed
/*val classServer = {
val s = org.apache.spark.Boot.classServer(outputDir)
s.start
s
}*/
override def initializeSpark(): Unit = {
// done using the metadata and init.sc
}
override def printWelcome(): Unit = {
//
}
override def process(settings: Settings): Boolean = savingContextLoader {
this.settings = settings
createInterpreter()
// sets in to some kind of reader depending on environmental cues
in = chooseReader(settings)// in0.fold(chooseReader(settings))(r => SimpleReader(r, out, interactive = true))
val globalFuture = Future {
intp.initializeSynchronous()
import scala.tools.nsc.interpreter.IMain
import scala.tools.nsc.interpreter.StdReplTags.tagOfIMain
intp.quietBind(NamedParam[IMain]("$intp", intp)(tagOfIMain, classTag[IMain]))
!intp.reporter.hasErrors
}
import scala.concurrent.duration._
Await.ready(globalFuture, 1 minute)
//printWelcome()
//initializeSpark()
loadFiles(settings)
/**
try loop()
catch AbstractOrMissingHandler()
finally closeInterpreter()
*/
true
}
}
| andypetrella/spark-notebook | modules/spark/src/main/scala_2.11/spark-last/HackSparkILoop.scala | Scala | apache-2.0 | 1,832 |
package pl.gigiel.seldon
object Const {
type Coordinate = Long
def coordinate(a: Long): Option[Const.Coordinate] = Option(a)
}
| tomaszacer/seldon | src/main/scala/pl/gigiel/seldon/Const.scala | Scala | apache-2.0 | 138 |
package java.io
abstract class OutputStream extends Object with Closeable with Flushable {
def close() {}
def flush() {}
def write(b: Array[Byte]) {
write(b, 0, b.length)
}
def write(b: Array[Byte], off: Int, len: Int) {
var n = off;
val stop = off+len
while (n < stop) {
write(b(n))
n = n+1
}
}
def write(b: Int): Unit
}
| swhgoon/scala-js | javalib/source/src/java/io/OutputStream.scala | Scala | bsd-3-clause | 373 |
package controllers.s_care_you_provide
import controllers.ClaimScenarioFactory
import org.specs2.mutable._
import utils.WithJsBrowser
import utils.pageobjects.PageObjects
import utils.pageobjects.s_care_you_provide.GTheirPersonalDetailsPage
class GTheirPersonalDetailsErrorSpec extends Specification {
section("integration", models.domain.AboutYou.id)
"Contact Details" should {
"contain 6 errors on invalid blank submission" in new WithJsBrowser with PageObjects {
val theirDetailsPage = GTheirPersonalDetailsPage(context)
theirDetailsPage goToThePage()
val submittedPage = theirDetailsPage submitPage()
submittedPage must beAnInstanceOf[GTheirPersonalDetailsPage]
submittedPage.listErrors.size mustEqual 6
}
"contain complete-address error if address not filled in" in new WithJsBrowser with PageObjects {
val page = GTheirPersonalDetailsPage(context)
val claim = ClaimScenarioFactory.s4CareYouProvide(hours35 = false)
claim.AboutTheCareYouProvideAddressPersonCareFor = ""
page goToThePage()
page fillPageWith claim
val errors = page.submitPage().listErrors
errors.size mustEqual 1
errors(0) must contain("Enter the address of the person you care for. You must complete the first two lines")
}
"contain complete-invalid-address error if address has bad chars line1 and empty line2" in new WithJsBrowser with PageObjects {
val page = GTheirPersonalDetailsPage(context)
val claim = ClaimScenarioFactory.s4CareYouProvide(hours35 = false)
claim.AboutTheCareYouProvideAddressPersonCareFor = "Dollar bad char $ in line1&"
page goToThePage()
page fillPageWith claim
val errors = page.submitPage().listErrors
errors.size mustEqual 1
errors(0) must contain("Enter the address of the person you care for. You must complete the first two lines and you must only use letters and numbers")
}
"contain invalid-address error if address has bad char line1 and line2" in new WithJsBrowser with PageObjects {
val page = GTheirPersonalDetailsPage(context)
val claim = ClaimScenarioFactory.s4CareYouProvide(hours35 = false)
claim.AboutTheCareYouProvideAddressPersonCareFor = "Dollar bad char $ in line1&Dollar bad char $ in line2"
page goToThePage()
page fillPageWith claim
val errors = page.submitPage().listErrors
errors.size mustEqual 1
errors(0) must contain("Enter a valid address for the person you care for, only using letters and numbers")
}
"contain invalid-address error if address has bad char line3" in new WithJsBrowser with PageObjects {
val page = GTheirPersonalDetailsPage(context)
val claim = ClaimScenarioFactory.s4CareYouProvide(hours35 = false)
claim.AboutTheCareYouProvideAddressPersonCareFor = "Good line1&Good line2&Bad $ line3"
page goToThePage()
page fillPageWith claim
val errors = page.submitPage().listErrors
errors.size mustEqual 1
errors(0) must contain("Enter a valid address for the person you care for, only using letters and numbers")
}
}
section("integration", models.domain.AboutYou.id)
}
| Department-for-Work-and-Pensions/ClaimCapture | c3/test/controllers/s_care_you_provide/GTheirPersonalDetailsErrorSpec.scala | Scala | mit | 3,187 |
package com.philipborg.mummu.io
import java.io.InputStream
import java.io.OutputStream
import org.apache.commons.vfs2.FileObject
import org.apache.commons.vfs2.VFS
class WebDavResolver(username: String, password: String, hostname: String, port: Int) extends PathResolver {
protected val fsMan = VFS.getManager;
private val webDav = "webdav://" + username + ":" + password + "@" + hostname + ":" + port + "/";
def allowedUserPath(path: String): Boolean = {
if (path.size == 0) return false;
return path.forall { c => Character.isLetterOrDigit(c) || c == '.' } && path.head.isLetterOrDigit && path.last.isLetterOrDigit;
}
protected def resolveFileObject(path: String): FileObject = {
if (!allowedUserPath(path)) throw new IllegalArgumentException("Only alphanumerical and . is allowed for filepaths, it most also start and end with a alphanumerical character.");
val fobj = fsMan.resolveFile(webDav + path);
return fobj;
}
def resolvePathToOutputstream(path: String, append: Boolean = false): OutputStream = {
val fobj = resolveFileObject(path);
return fobj.getContent.getOutputStream(append);
}
def resolvePathToInputstream(path: String): InputStream = {
val fobj = resolveFileObject(path);
return fobj.getContent.getInputStream;
}
} | philipborg/Mummu | src/main/scala/com/philipborg/mummu/io/WebDavResolver.scala | Scala | agpl-3.0 | 1,296 |
package mesosphere.raml.backend.treehugger
import mesosphere.raml.backend.{PlayJson, PlayJsNumber, PlayJsValue, PlayJsString, PLAY_JSON_FORMAT, PLAY_JSON_RESULT}
import mesosphere.raml.ir.{StringT, UnionT, NumberT}
import treehugger.forest._
import definitions._
import treehuggerDSL._
object UnionVisitor {
def visit(unionT: UnionT): GeneratedFile = {
val UnionT(name, childTypes, comments) = unionT
val base = (TRAITDEF(name) withParents ("RamlGenerated", "Product", "Serializable")).tree.withDoc(comments)
val childJson: Seq[GenericApply] = childTypes.map { child =>
REF("json") DOT s"validate" APPLYTYPE (child.name)
}
val obj = OBJECTDEF(name) := BLOCK(
OBJECTDEF("playJsonFormat") withParents PLAY_JSON_FORMAT(name) withFlags Flags.IMPLICIT := BLOCK(
DEF("reads", PLAY_JSON_RESULT(name)) withParams PARAM("json", PlayJsValue) := BLOCK(
childJson.reduce((acc, next) => acc DOT "orElse" APPLY next)
),
DEF("writes", PlayJsValue) withParams PARAM("o", name) := BLOCK(
REF("o") MATCH
childTypes.map { child =>
CASE(REF(s"f:${child.name}")) ==> (REF(PlayJson) DOT "toJson" APPLY REF("f") APPLY (REF(child.name) DOT "playJsonFormat"))
}
)
)
)
val children: Seq[GeneratedObject] = childTypes.flatMap {
case s: NumberT => {
val caseClass =
CASECLASSDEF(s.name) withParents name withParams s.defaultValue.fold(PARAM("value", DoubleClass).tree) { defaultValue =>
PARAM("value", DoubleClass) := LIT(defaultValue.toDouble)
}
val objectDef =
OBJECTDEF(s.name) := BLOCK(
Seq(
OBJECTDEF("playJsonFormat") withParents PLAY_JSON_FORMAT(s.name) withFlags Flags.IMPLICIT := BLOCK(
DEF("reads", PLAY_JSON_RESULT(s.name)) withParams PARAM("json", PlayJsValue) := BLOCK(
REF("json") DOT "validate" APPLYTYPE DoubleClass DOT "map" APPLY (REF(s.name) DOT "apply")
),
DEF("writes", PlayJsValue) withParams PARAM("o", s.name) := BLOCK(
REF(PlayJsNumber) APPLY (REF("o") DOT "value")
)
)
) ++ s.defaultValue.map { defaultValue =>
VAL("DefaultValue") withType (s.name) := REF(s.name) APPLY ()
}
)
val jacksonSerializerSym = RootClass.newClass(s.name + "Serializer")
val jacksonSerializer = OBJECTDEF(jacksonSerializerSym).withParents(
"com.fasterxml.jackson.databind.ser.std.StdSerializer[" + s.name + "](classOf[" + s.name + "])"
) := BLOCK(
DEF("serialize", UnitClass) withFlags Flags.OVERRIDE withParams (PARAM("value", s.name),
PARAM("gen", "com.fasterxml.jackson.core.JsonGenerator"),
PARAM("provider", "com.fasterxml.jackson.databind.SerializerProvider")) := BLOCK(
(REF("gen") DOT "writeNumber" APPLY (REF("value") DOT "value"))
)
)
Seq(GeneratedObject(s.name, Seq(caseClass, objectDef, jacksonSerializer), Some(jacksonSerializerSym)))
}
case s: StringT => {
val caseClass =
CASECLASSDEF(s.name) withParents name withParams s.defaultValue.fold(PARAM("value", StringClass).tree) { defaultValue =>
PARAM("value", StringClass) := LIT(defaultValue)
}
val objectDef =
OBJECTDEF(s.name) := BLOCK(
Seq(
OBJECTDEF("playJsonFormat") withParents PLAY_JSON_FORMAT(s.name) withFlags Flags.IMPLICIT := BLOCK(
DEF("reads", PLAY_JSON_RESULT(s.name)) withParams PARAM("json", PlayJsValue) := BLOCK(
REF("json") DOT "validate" APPLYTYPE StringClass DOT "map" APPLY (REF(s.name) DOT "apply")
),
DEF("writes", PlayJsValue) withParams PARAM("o", s.name) := BLOCK(
REF(PlayJsString) APPLY (REF("o") DOT "value")
)
)
) ++ s.defaultValue.map { defaultValue =>
VAL("DefaultValue") withType (s.name) := REF(s.name) APPLY ()
}
)
val jacksonSerializerSym = RootClass.newClass(s.name + "Serializer")
val jacksonSerializer = OBJECTDEF(jacksonSerializerSym).withParents(
"com.fasterxml.jackson.databind.ser.std.StdSerializer[" + s.name + "](classOf[" + s.name + "])"
) := BLOCK(
DEF("serialize", UnitClass) withFlags Flags.OVERRIDE withParams (PARAM("value", s.name),
PARAM("gen", "com.fasterxml.jackson.core.JsonGenerator"),
PARAM("provider", "com.fasterxml.jackson.databind.SerializerProvider")) := BLOCK(
(REF("gen") DOT "writeString" APPLY (REF("value") DOT "value"))
)
)
Seq(GeneratedObject(s.name, Seq(caseClass, objectDef, jacksonSerializer), Some(jacksonSerializerSym)))
}
case t => Visitor.visit(t).objects
}
GeneratedFile(
children
++
Seq(GeneratedObject(name, Seq(base) ++ Seq(obj), Option.empty))
)
}
}
| mesosphere/marathon | type-generator/src/main/scala/mesosphere/raml/backend/treehugger/UnionVisitor.scala | Scala | apache-2.0 | 5,086 |
package org.lolhens.renderengine.vector
final case class Vector3l private(override val x: Long,
override val y: Long,
override val z: Long) extends Vector3[Long](x, y, z) {
override type Self = Vector3l
override def Vector3(x: Long, y: Long, z: Long): Vector3l =
if (x == this.x && y == this.y && z == this.z) this
else Vector3l(x, y, z)
override def isZero: Boolean = x == 0 && y == 0 && z == 0
override def isOne: Boolean = x == 1 && y == 1 && z == 1
override def unary_- : Vector3l = Vector3l(-x, -y, -z)
override def +(x: Long, y: Long, z: Long): Vector3l = if (x == 0 && y == 0 && z == 0) this else Vector3l(this.x + x, this.y + y, this.z + z)
override def -(x: Long, y: Long, z: Long): Vector3l = if (x == 0 && y == 0 && z == 0) this else Vector3l(this.x - x, this.y - y, this.z - z)
override def *(x: Long, y: Long, z: Long): Vector3l = if (x == 1 && y == 1 && z == 1) this else Vector3l(this.x * x, this.y * y, this.z * z)
override def /(x: Long, y: Long, z: Long): Vector3l = if (x == 1 && y == 1 && z == 1) this else Vector3l(this.x / x, this.y / y, this.z / z)
override def `length²`: Long = x * x + y * y + z * z
override def length: Long = Math.sqrt(`length²`).toLong
}
object Vector3l {
val Zero = new Vector3l(0, 0, 0)
val One = new Vector3l(1, 1, 1)
val X = new Vector3l(1, 0, 0)
val Y = new Vector3l(0, 1, 0)
val Z = new Vector3l(0, 0, 1)
def apply(x: Long, y: Long, z: Long): Vector3l =
if (x == 0 && y == 0 && z == 0) Zero
else if (x == 1 && y == 1 && z == 1) One
else new Vector3l(x, y, z)
}
| LolHens/LibRenderEngine | src/main/scala/org/lolhens/renderengine/vector/Vector3l.scala | Scala | gpl-2.0 | 1,645 |
package at.forsyte.apalache.tla.lir.storage
import at.forsyte.apalache.tla.lir.NameEx
import at.forsyte.apalache.tla.lir.convenience.tla
import org.junit.runner.RunWith
import org.scalatest.{BeforeAndAfterEach, FunSuite}
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class TestChangeListener extends FunSuite with BeforeAndAfterEach {
private var listener = new ChangeListener
override protected def beforeEach(): Unit = {
listener = new ChangeListener
}
test("track 'x' to 'y'") {
val x = NameEx("x")
val y = NameEx("y")
listener.onTransformation(x, y)
assert(x.ID == listener.traceBack(y.ID))
}
test("track {'x'} to {'y'}") {
val x = NameEx("x")
val y = NameEx("y")
listener.onTransformation(x, y)
val setOfX = tla.enumSet(x)
val setOfY = tla.enumSet(y)
listener.onTransformation(setOfX, setOfY)
assert(setOfX.ID == listener.traceBack(setOfY.ID))
// the expressions inside the sets are not affected by the set transformation
assert(x.ID == listener.traceBack(y.ID))
}
test("track {'x'} to {'y'} without 'x' to 'y'") {
val x = NameEx("x")
val y = NameEx("y")
val setOfX = tla.enumSet(x)
val setOfY = tla.enumSet(y)
// in this case, we only know that setOfX was transformed to setOfY,
// but we do not know that x was transformed to y
listener.onTransformation(setOfX, setOfY)
assert(setOfX.ID == listener.traceBack(setOfY.ID))
// as a result, the origin of y is the origin id of the smallest expression that contains y and has the source info,
// that is, the id of setOfX
assert(setOfX.ID == listener.traceBack(y.ID))
}
test("track {{'x'}} to {{'y'}} without {'x'} to {'y'} but 'x' to 'y'") {
val x = NameEx("x")
val y = NameEx("y")
val setOfX = tla.enumSet(x)
val setOfSetOfX = tla.enumSet(setOfX)
val setOfY = tla.enumSet(y)
val setOfSetOfY = tla.enumSet(setOfY)
// we know that x was transformed to y
listener.onTransformation(x, y)
// we also know that setOfSetOfX was transformed to setOfSetOfY,
// but we do not know that setOfX was transformed to setOfY
listener.onTransformation(setOfSetOfX, setOfSetOfY)
assert(setOfSetOfX.ID == listener.traceBack(setOfSetOfY.ID))
// as a result, the origin of y is the origin id of the smallest expression
// that contains setOfY and has the source info,
// that is, the id of setOfSetOfX
assert(setOfSetOfX.ID == listener.traceBack(setOfY.ID))
// however, we should know that y was produced from x
assert(x.ID == listener.traceBack(y.ID))
}
}
| konnov/apalache | tlair/src/test/scala/at/forsyte/apalache/tla/lir/storage/TestChangeListener.scala | Scala | apache-2.0 | 2,619 |
package moveRefactoring.container
import moveRefactoring.bar.O.m1
object ObjUser {
def main(args: Array[String]) {
m1()
}
} | whorbowicz/intellij-scala | testdata/move/scl4621/after/moveRefactoring/container/ObjUser.scala | Scala | apache-2.0 | 133 |
package org.psliwa.idea.composerJson.intellij.codeAssist.composer
import com.intellij.codeInsight.intention.IntentionAction
import com.intellij.json.psi._
import com.intellij.lang.annotation.{AnnotationHolder, Annotator, HighlightSeverity}
import com.intellij.openapi.application.ApplicationManager
import com.intellij.openapi.project.Project
import com.intellij.patterns.PlatformPatterns._
import com.intellij.patterns.StandardPatterns._
import com.intellij.patterns.{PatternCondition, PsiElementPattern, StringPattern}
import com.intellij.psi.PsiElement
import com.intellij.util.ProcessingContext
import org.psliwa.idea.composerJson._
import org.psliwa.idea.composerJson.composer.InstalledPackages
import org.psliwa.idea.composerJson.composer.model.PackageName
import org.psliwa.idea.composerJson.composer.model.version._
import org.psliwa.idea.composerJson.intellij.Patterns._
import org.psliwa.idea.composerJson.intellij.PsiElements._
import org.psliwa.idea.composerJson.intellij.codeAssist.problem.ProblemDescriptor
import org.psliwa.idea.composerJson.intellij.codeAssist.{
QuickFixIntentionActionAdapter,
QuickFixIntentionActionAdapterWithPriority,
SetPropertyValueQuickFix
}
import org.psliwa.idea.composerJson.json.SString
import org.psliwa.idea.composerJson.settings.ProjectSettings
class PackageVersionAnnotator extends Annotator {
import org.psliwa.idea.composerJson.intellij.codeAssist.composer.PackageVersionAnnotator._
val suggestionHighlightSeverity: HighlightSeverity =
if (ApplicationManager.getApplication.isUnitTestMode) HighlightSeverity.INFORMATION
else `HighlightSeverity.SUGGESTION`
private type QuickFixGroup = (Option[String], Seq[IntentionAction])
override def annotate(element: PsiElement, annotations: AnnotationHolder): Unit = {
val pattern = psiElement()
.and(PackageVersionAnnotator.pattern)
.withParent(psiElement().withName(and(stringContains("/"), not(excluded(element.getProject)))))
if (pattern.accepts(element)) {
val problemDescriptors = for {
version <- getStringValue(element).toList
packageName <- ensureJsonProperty(element.getParent).map(_.getName).toList
(message, quickFixes) <- detectProblemsInVersion(PackageName(packageName), version, element)
} yield ProblemDescriptor(element, message, quickFixes)
problemDescriptors.foreach(problem => {
val maybeAnnotation = problem.message match {
case Some(message) =>
Some(annotations.createWarningAnnotation(problem.element.getContext, message))
case None if annotations.isBatchMode =>
None // skip annotation when there are only quick fixes without message in batch mode
case None =>
Some(
annotations.createAnnotation(suggestionHighlightSeverity, problem.element.getContext.getTextRange, null)
)
}
maybeAnnotation.foreach(annotation => problem.quickFixes.foreach(fix => annotation.registerFix(fix)))
})
}
}
private def detectProblemsInVersion(packageName: PackageName,
version: String,
element: PsiElement): Seq[QuickFixGroup] = {
val versionConstraint = parseVersion(version)
detectUnboundedVersionProblem(versionConstraint, packageName, element) ++
detectWildcardAndOperatorCombo(versionConstraint, packageName, element) ++
detectEquivalents(versionConstraint, packageName, element)
}
private def detectUnboundedVersionProblem(version: Option[Constraint],
packageName: PackageName,
element: PsiElement): Seq[QuickFixGroup] = {
version
.filter(!_.isBounded)
.map(versionConstraint => {
val installedPackages = InstalledPackages.forFile(element.getContainingFile.getVirtualFile)
def createChangeVersionQuickFixes(jsonObject: JsonObject): List[IntentionAction] = {
installedPackages
.get(packageName)
.toList
.filter(_.replacedBy.isEmpty)
.map(_.version)
.flatMap(VersionSuggestions.suggestionsForVersion(_, "", mostSignificantFirst = false))
.zipWithIndex
.map {
case (version, priority) =>
val message = ComposerBundle.message("inspection.quickfix.setPackageVersion", version)
changePackageVersionQuickFix(packageName, version, jsonObject, message, Some(priority))
}
}
(
Some(ComposerBundle.message("inspection.version.unboundVersion")),
createQuickFixes(element, createChangeVersionQuickFixes) ++
versionQuickFixes(getUnboundVersionFixers)(packageName, versionConstraint, element) ++
List(new ExcludePatternAction(packageName.presentation)) ++
packageVendorPattern(packageName).map(new ExcludePatternAction(_)).toList
)
})
.toList
}
private def versionQuickFixes(fixers: Seq[Constraint => Option[Constraint]])(
packageName: PackageName,
version: Constraint,
element: PsiElement
): Seq[IntentionAction] = {
def create(jsonObject: JsonObject) = {
fixers
.map(version.replace)
.filter(_ != version)
.map(fixedVersion => changePackageVersionQuickFix(packageName, fixedVersion.presentation, jsonObject))
}
createQuickFixes(element, create)
}
private def createQuickFixes(element: PsiElement,
createFix: JsonObject => Seq[IntentionAction]): Seq[IntentionAction] = {
for {
property <- ensureJsonProperty(element.getParent).toList
jsonObject <- ensureJsonObject(property.getParent).toList
fix <- createFix(jsonObject)
} yield fix
}
private def getUnboundVersionFixers: Seq[Constraint => Option[Constraint]] =
List(ConstraintOperator.~, ConstraintOperator.^).flatMap(operator => {
List(
(c: Constraint) =>
c match {
case OperatorConstraint(ConstraintOperator.>=, constraint, separator) =>
Some(OperatorConstraint(operator, constraint, separator))
case _ => None
},
(c: Constraint) =>
c match {
case OperatorConstraint(ConstraintOperator.>, constraint, separator) =>
Some(OperatorConstraint(operator, constraint.replace {
case SemanticConstraint(version) => Some(SemanticConstraint(version.incrementLast))
case _ => None
}, separator))
case _ => None
}
)
})
private def detectWildcardAndOperatorCombo(version: Option[Constraint],
packageName: PackageName,
element: PsiElement): Seq[QuickFixGroup] = {
version
.filter(_ contains wildcardAndOperatorCombination)
.map(
versionConstraint =>
(
Some(ComposerBundle.message("inspection.version.wildcardAndComparison")),
versionQuickFixes(getWildcardAndOperatorComboFixers)(packageName, versionConstraint, element)
)
)
.toList
}
private def wildcardAndOperatorCombination(constraint: Constraint) = constraint match {
case OperatorConstraint(_, WildcardConstraint(_), _) => true
case OperatorConstraint(_, WrappedConstraint(WildcardConstraint(_), _, _), _) => true
case _ => false
}
private def getWildcardAndOperatorComboFixers: Seq[Constraint => Option[Constraint]] = {
List(
(c: Constraint) =>
c match {
case OperatorConstraint(operator, WildcardConstraint(Some(constraint)), separator) =>
Some(OperatorConstraint(operator, constraint, separator))
case _ => None
},
(c: Constraint) =>
c match {
case OperatorConstraint(operator,
WrappedConstraint(WildcardConstraint(Some(constraint)), prefix, suffix),
separator) =>
Some(OperatorConstraint(operator, WrappedConstraint(constraint, prefix, suffix), separator))
case _ => None
}
)
}
def detectEquivalents(version: Option[Constraint],
packageName: PackageName,
element: PsiElement): Seq[QuickFixGroup] = {
version.toList
.flatMap(VersionEquivalents.equivalentsFor)
.map(
equivalentVersion =>
createQuickFixes(element,
jsonObject =>
List(changeEquivalentPackageVersionQuickFix(packageName, equivalentVersion, jsonObject)))
)
.map(quickFix => (None, quickFix))
}
private def changePackageVersionQuickFix(packageName: PackageName,
fixedVersion: String,
jsonObject: JsonObject): IntentionAction = {
changePackageVersionQuickFix(packageName,
fixedVersion,
jsonObject,
ComposerBundle.message("inspection.quickfix.setPackageVersion", fixedVersion))
}
private def changeEquivalentPackageVersionQuickFix(packageName: PackageName,
fixedVersion: Constraint,
jsonObject: JsonObject): IntentionAction = {
changePackageVersionQuickFix(
packageName,
fixedVersion.presentation,
jsonObject,
ComposerBundle.message("inspection.quickfix.setPackageEquivalentVersion", fixedVersion.presentation)
)
}
private def changePackageVersionQuickFix(packageName: PackageName,
newVersion: String,
jsonObject: JsonObject,
message: String,
maybePriority: Option[Int] = None): IntentionAction = {
val quickFix = new SetPropertyValueQuickFix(jsonObject, packageName.presentation, SString(), newVersion) {
override def getText: String = message
}
maybePriority match {
case Some(priority) =>
new QuickFixIntentionActionAdapterWithPriority(quickFix, priority)
case None =>
new QuickFixIntentionActionAdapter(quickFix)
}
}
private def packageVendorPattern(packageName: PackageName): Option[String] =
packageName.vendor.map(vendor => s"$vendor/*")
private def excluded(project: Project): StringPattern = {
string().`with`(new PatternCondition[String]("matches") {
override def accepts(t: String, context: ProcessingContext): Boolean = {
ProjectSettings(project).getUnboundedVersionInspectionSettings.isExcluded(t)
}
})
}
}
private object PackageVersionAnnotator {
import org.psliwa.idea.composerJson.util.Funcs._
val parseVersion: String => Option[Constraint] = memorize(40)(Parser.parse)
val pattern: PsiElementPattern.Capture[JsonStringLiteral] = packageElement.afterLeaf(":")
val `HighlightSeverity.SUGGESTION` =
new HighlightSeverity(HighlightSeverity.INFORMATION.myName, HighlightSeverity.WEAK_WARNING.myVal - 2)
}
| psliwa/idea-composer-plugin | src/main/scala/org/psliwa/idea/composerJson/intellij/codeAssist/composer/PackageVersionAnnotator.scala | Scala | mit | 11,332 |
/*
* Copyright 2015 ligaDATA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ligadata.pmml.transforms.printers.scala.common
import scala.collection.mutable._
import scala.math._
import scala.collection.immutable.StringLike
import scala.util.control.Breaks._
import com.ligadata.pmml.runtime._
import org.apache.logging.log4j.{ Logger, LogManager }
import com.ligadata.kamanja.metadata._
import com.ligadata.pmml.compiler._
import com.ligadata.pmml.support._
import com.ligadata.pmml.traits._
import com.ligadata.pmml.syntaxtree.cooked.common._
class SimpleSetPredicateCodePrinter(ctx : PmmlContext) extends CodePrinter with com.ligadata.pmml.compiler.LogTrait {
/**
* Answer a string (code representation) for the supplied node.
* @param node the PmmlExecNode
* @param the CodePrinterDispatch to use should recursion to child nodes be required.
* @param the kind of code fragment to generate...any
* {VARDECL, VALDECL, FUNCCALL, DERIVEDCLASS, RULECLASS, RULESETCLASS , MININGFIELD, MAPVALUE, AGGREGATE, USERFUNCTION}
* @order the traversalOrder to traverse this node...any {INORDER, PREORDER, POSTORDER}
*
* @return some string representation of this node
*/
def print(node : Option[PmmlExecNode]
, generator : CodePrinterDispatch
, kind : CodeFragment.Kind
, traversalOrder : Traversal.Order) : String = {
val xnode : xSimpleSetPredicate = node match {
case Some(node) => {
if (node.isInstanceOf[xSimpleSetPredicate]) node.asInstanceOf[xSimpleSetPredicate] else null
}
case _ => null
}
val printThis = if (xnode != null) {
codeGenerator(xnode, generator, kind, traversalOrder)
} else {
if (node != null) {
PmmlError.logError(ctx, s"For ${xnode.qName}, expecting an xSimpleSetPredicate... got a ${xnode.getClass.getName}... check CodePrinter dispatch map initialization")
}
""
}
printThis
}
private def codeGenerator(node : xSimpleSetPredicate
, generator : CodePrinterDispatch
, kind : CodeFragment.Kind
, traversalOrder : Traversal.Order) : String = {
val fcnBuffer : StringBuilder = new StringBuilder()
val simplePredStr : String = traversalOrder match {
case Traversal.INORDER => { "" }
case Traversal.POSTORDER => { "" }
case Traversal.PREORDER => {
val opFcn : String = PmmlTypes.scalaBuiltinNameFcnSelector(node.booleanOperator)
val sPred = s"$opFcn("
fcnBuffer.append(sPred)
var cnt = 0
node.Children.foreach((child : PmmlExecNode) => {
generator.generate(child.asInstanceOf[Option[PmmlExecNode]], fcnBuffer, CodeFragment.FUNCCALL)
cnt += 1
if (cnt < node.Children.length) {
fcnBuffer.append(", ")
}
})
val closingParen : String = s")\\n"
fcnBuffer.append(closingParen)
fcnBuffer.toString
}
}
simplePredStr
}
}
| traytonwhite/Kamanja | trunk/Pmml/PmmlCompiler/src/main/scala/com/ligadata/pmml/transforms/printers/scala/common/SimpleSetPredicateCodePrinter.scala | Scala | apache-2.0 | 3,369 |
package lambdacart.util.typealigned
import scalaz.Leibniz
import scalaz.Leibniz.===
/**
* Isomorphic to `AOption[λ[(α, β) => APair[F[α, ?], G[?, β]]], A, B]`,
* but avoids allocating an `APair` instance.
*/
sealed abstract class AOption2[F[_, _], G[_, _], A, B]
case class ASome2[F[_, _], G[_, _], A, X, B](f: F[A, X], g: G[X, B]) extends AOption2[F, G, A, B] {
type Pivot = X
}
abstract case class ANone2[F[_, _], G[_, _], A, B]() extends AOption2[F, G, A, B] {
def subst[H[_]](ha: H[A]): H[B]
def unsubst[H[_]](hb: H[B]): H[A]
def leibniz: A === B = subst[A === ?](Leibniz.refl)
}
object AOption2 {
def empty[F[_, _], G[_, _], A](): AOption2[F, G, A, A] = None.asInstanceOf[AOption2[F, G, A, A]]
def some[F[_, _], G[_, _], A, X, B](f: F[A, X], g: G[X, B]): AOption2[F, G, A, B] = ASome2(f, g)
def apply[F[_, _], G[_, _], A](): AOption2[F, G, A, A] = empty[F, G, A]
def apply[F[_, _], G[_, _], A, X, B](f: F[A, X], g: G[X, B]): AOption2[F, G, A, B] = some(f, g)
def of[F[_, _], G[_, _]]: MkAOption2[F, G] = new MkAOption2[F, G]
final class MkAOption2[F[_, _], G[_, _]](private val dummy: Boolean = false) extends AnyVal {
def apply[A, X, B](f: F[A, X], g: G[X, B]): AOption2[F, G, A, B] = some(f, g)
def apply[A](): AOption2[F, G, A, A] = empty[F, G, A]
}
private val None = none[Nothing, Nothing, Nothing]
private def none[F[_, _], G[_, _], A]: AOption2[F, G, A, A] = new ANone2[F, G, A, A] {
def subst[H[_]](ha: H[A]): H[A] = ha
def unsubst[H[_]](hb: H[A]): H[A] = hb
}
}
| TomasMikula/LambdaCart | src/main/scala/lambdacart/util/typealigned/AOption2.scala | Scala | apache-2.0 | 1,539 |
package me.heaton.profun.week2
import scala.annotation.tailrec
object TailRecursion {
@tailrec
def gcd(a: Int, b: Int): Int =
if (b == 0) a else gcd(b, a % b)
def factorial(n: Int): Int =
if (n == 0) 1 else n * factorial(n - 1)
def tailFactorial(n: Int): Int = {
@tailrec
def loop(acc: Int, n: Int): Int =
if (n == 0) acc else loop(acc * n, n - 1)
loop(1, n)
}
} | heaton/hello-scala | src/main/scala/me/heaton/profun/week2/TailRecursion.scala | Scala | mit | 406 |
package sgl.geometry
/** an AABB Rect.
*
* This class is mutable, and several of its methods modify the state instead
* of just returning a new Rect. This is very much not idiomatic Scala (which
* favors immutable objects), but this is also a trade-off necessary for games
* to avoid generating too much garbage to collect. Time will tell if this
* design decision was good or bad.
**/
class Rect(var left: Float, var top: Float, var width: Float, var height: Float) {
def right: Float = left + width
def bottom: Float = top + height
def centerX = left + width/2
def centerY = top + height/2
def center: Point = Point(centerX, centerY)
def +(m: Vec): Rect = Rect(left + m.x, top + m.y, width, height)
def -(m: Vec): Rect = Rect(left - m.x, top - m.y, width, height)
/*
* names are inversed with (x,y) coordinates, unfortunate...
*/
def topLeft: Point = Point(left, top)
def topRight: Point = Point(right, top)
def bottomLeft: Point = Point(left, bottom)
def bottomRight: Point = Point(right, bottom)
def vertices: Set[Point] = Set(topLeft, topRight, bottomLeft, bottomRight)
//maybe intersect should go into external objects since there is no notion of direction (point vs rect)
def intersect(x: Float, y: Float): Boolean =
x >= left && x <= right && y >= top && y <= bottom
def intersect(point: Point): Boolean = intersect(point.x, point.y)
def intersect(rect: Rect): Boolean = Collisions.aabbWithAabb(this, rect)
def intersect(circle: Circle): Boolean = Collisions.circleWithAabb(circle, this)
override def toString: String = s"Rect(left=$left, top=$top, width=$width, height=$height)"
override def clone: Rect = Rect(left, top, width, height)
}
object Rect {
def apply(left: Float, top: Float, width: Float, height: Float) = new Rect(left, top, width, height)
def fromBoundingBox(left: Float, top: Float, right: Float, bottom: Float): Rect =
Rect(left, top, right - left, bottom - top)
}
| regb/scala-game-library | core/src/main/scala/sgl/geometry/Rect.scala | Scala | mit | 1,979 |
/*!
* Copyright 2013-2014 Dennis Hörsch.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.dennishoersch.util.json
import scala.util.Success
import org.junit.Test
import org.scalatest.Matchers
import org.scalatest.junit.AssertionsForJUnit
import de.dennishoersch.dropwizard.blog.domain.Account
import de.dennishoersch.util.test.CustomMatchers
import de.dennishoersch.dropwizard.blog.domain.Author
class JsonTest extends AssertionsForJUnit with Matchers with CustomMatchers {
val json = new Json
@Test
def test_simple_stringify() {
val obj = Account(2, "Denn", "Mang", Author(1, "", ""))
val expected = """{
| "name" : "Denn",
| "pwd" : "Mang"
|}""".stripMargin
val stringified = json.stringify(obj)
stringified should be (anInstanceOf[Success[String]])
stringified.get should equal (expected)
}
}
| dhs3000/dropwizard-scala | src/test/scala/de/dennishoersch/util/json/JsonTest.scala | Scala | apache-2.0 | 1,428 |
package scala.collection.scalatest.mutable
import org.scalatest._
trait MutableBagBehaviours {
this: FlatSpec =>
def mutableBagBehaviour[A](bag: => scala.collection.mutable.Bag[A]) {
it should "support removing single elements with -=" in {
val b = bag
var expectedSize = b.size
for (elem <- bag) {
b -= elem
expectedSize = expectedSize - 1
assert(b.size == expectedSize)
}
}
it should "support removing a fixed number of elements with -= and multiplicities" in {
val b = bag
var expectedSize = b.size
for (elem <- bag.distinct) {
b -= (elem -> bag.multiplicity(elem))
expectedSize = expectedSize - bag.multiplicity(elem)
assert(b.size == expectedSize)
assert(!b.contains(elem))
}
}
it should "support removing single elements with remove" in {
val b = bag
var expectedSize = b.size
for (elem <- bag) {
b.remove(elem, 1)
expectedSize = expectedSize - 1
assert(b.size == expectedSize)
}
}
it should "support removing all elements with removeAll" in {
val b = bag
var expectedSize = b.size
for (elem <- bag.distinct) {
b.removeAll(elem)
expectedSize = expectedSize - bag.multiplicity(elem)
assert(b.size == expectedSize)
assert(!b.contains(elem))
}
}
it should "be unchanged when removing elements that do not exist" in {
val b = bag
val elem = b.head
b.removeAll(elem)
val expectedSize = b.size
b -= elem
assert(b.size == expectedSize)
b -= (elem, 1)
assert(b.size == expectedSize)
b.remove(elem, 1)
assert(b.size == expectedSize)
b.removeAll(elem)
assert(b.size == expectedSize)
}
}
}
| sageserpent-open/multisets | src/test/scala/scala/collection/scalatest/mutable/MutableBagBehaviours.scala | Scala | bsd-3-clause | 1,819 |
package thurloe.service
import org.broadinstitute.dsde.rawls.model.{RawlsUserEmail, RawlsUserSubjectId}
import org.broadinstitute.dsde.rawls.model.UserModelJsonSupport.{RawlsUserSubjectIdFormat, RawlsUserEmailFormat}
import spray.json.DefaultJsonProtocol
import scala.language.postfixOps
object ApiDataModelsJsonProtocol extends DefaultJsonProtocol {
implicit val keyValuePairFormat = jsonFormat2(KeyValuePair)
implicit val userKeyValuePairFormat = jsonFormat2(UserKeyValuePair)
implicit val userKeyValuePairsFormat = jsonFormat2(UserKeyValuePairs)
implicit val notificationFormat = jsonFormat7(Notification)
}
object ThurloeQuery {
private val UserIdParam = "userId"
private val KeyParam = "key"
private val ValueParam = "value"
private val UnrecognizedParams = "unrecogniZed"
private val AllowedKeys = Seq(UserIdParam, KeyParam, ValueParam)
private def getKeys(maybeStrings: Option[Seq[(String, String)]]): Option[Seq[String]] = {
maybeStrings map { sequence => sequence map { case (key, value) => key } }
}
private def getValues(maybeStrings: Option[Seq[(String, String)]]): Option[Seq[String]] = {
maybeStrings map { sequence => sequence map { case (key, value) => value } }
}
private def validKeyOrUnrecognized(maybeKey: String): String = {
AllowedKeys find { allowedKey => allowedKey.equalsIgnoreCase(maybeKey) } getOrElse UnrecognizedParams
}
def apply(params: Seq[(String, String)]): ThurloeQuery = {
val asMap = params groupBy {case (k,v) => validKeyOrUnrecognized(k) }
ThurloeQuery(
getValues(asMap.get(UserIdParam)),
getValues(asMap.get(KeyParam)),
getValues(asMap.get(ValueParam)),
getKeys(asMap.get(UnrecognizedParams)))
}
}
final case class ThurloeQuery(userId: Option[Seq[String]], key: Option[Seq[String]], value: Option[Seq[String]], unrecognizedFilters: Option[Seq[String]]) {
def isEmpty: Boolean = {
userId.isEmpty && key.isEmpty && value.isEmpty
}
}
case class KeyValuePair(key: String, value: String)
case class KeyValuePairWithId(id: Option[Int], keyValuePair: KeyValuePair)
case class UserKeyValuePairWithId(id: Option[Int], userKeyValuePair: UserKeyValuePair)
case class UserKeyValuePair(userId: String, keyValuePair: KeyValuePair)
case class UserKeyValuePairs(userId: String, keyValuePairs: Seq[KeyValuePair]) {
def toKeyValueSeq: Seq[UserKeyValuePair] = keyValuePairs.map(UserKeyValuePair(userId, _))
}
case class Notification(userId: Option[RawlsUserSubjectId], userEmail: Option[RawlsUserEmail], replyTos: Option[Set[RawlsUserSubjectId]], notificationId: String, substitutions: Map[String, String], emailLookupSubstitutions: Map[String, RawlsUserSubjectId], nameLookupSubstitution: Map[String, RawlsUserSubjectId])
| broadinstitute/thurloe | src/main/scala/thurloe/service/ApiDataModels.scala | Scala | bsd-3-clause | 2,743 |
package uk.gov.dvla.vdl.report.pdf
import java.io.InputStream
import java.util.Date
import org.scalatest.{FlatSpec, Matchers}
import uk.gov.dvla.vdl.report.JsonReport
import uk.gov.dvla.vdl.report.exception.NoCompiledTemplateException
import scala.io.Source.fromFile
class GeneratorTest extends FlatSpec with Matchers {
val reportName: String = "sample"
val generator = new Generator
behavior of "PDF generator"
it should "throw exception if template is missing" in {
val report = new JsonReport(null, fromFile(resourcePath("data/sample.json")).mkString)
intercept[IllegalArgumentException] {
generator.generate(report)
}
}
it should "throw exception if trying generate report without prior template compilation" in {
intercept[NoCompiledTemplateException] {
generator.generate(
new JsonReport(reportName, fromFile(resourcePath(s"data/$reportName.json")).mkString)
)
}
}
it should "generate PDF from data source and parameters" in {
generator.compile(reportName, resourceAsStream(s"reports/$reportName.jrxml"))
val printout: Array[Byte] = generator.generate(
new JsonReport(reportName, fromFile(resourcePath(s"data/$reportName.json")).mkString,
"GENERATED_BY" -> "Kainos Software Ltd",
"GENERATION_DATE" -> new Date()
)
)
printout.length should not be 0
}
it should "generate PDF/A 1A from data source and parameters" in {
generator.compile(reportName, resourceAsStream(s"reports/$reportName.jrxml"))
val printout: Array[Byte] = generator.generate(
new JsonReport(reportName, fromFile(resourcePath(s"data/$reportName.json")).mkString,
"GENERATED_BY" -> "Kainos Software Ltd",
"GENERATION_DATE" -> new Date()
),
A1AConformance
)
printout.length should not be 0
}
it should "generate PDF/A 1B from data source and parameters" in {
generator.compile(reportName, resourceAsStream(s"reports/$reportName.jrxml"))
val printout: Array[Byte] = generator.generate(
new JsonReport(reportName, fromFile(resourcePath(s"data/$reportName.json")).mkString,
"GENERATED_BY" -> "Kainos Software Ltd",
"GENERATION_DATE" -> new Date()
),
A1BConformance
)
printout.length should not be 0
}
private def resourcePath(resource: String): String = {
getClass.getResource(s"/$resource").getPath
}
private def resourceAsStream(resource: String): InputStream = {
getClass.getResourceAsStream(s"/$resource")
}
}
| dvla/pdf-generator | src/test/scala/uk/gov/dvla/vdl/report/pdf/GeneratorTest.scala | Scala | mit | 2,539 |
package com.github.agaro1121.marshalling
import io.circe.{Decoder, Encoder, HCursor}
import io.circe.generic.semiauto.{deriveDecoder, deriveEncoder}
import io.circe.syntax._
import com.github.agaro1121.models.pvpmatches.{PvpMatch, PvpMatches}
trait PvpMatchesMarshalling {
implicit protected lazy val PvpMatchDecoder: Decoder[PvpMatch] = deriveDecoder[PvpMatch]
implicit protected lazy val PvpMatchEncoder: Encoder[PvpMatch] = deriveEncoder[PvpMatch]
implicit protected lazy val PvpMatchesDecoder: Decoder[PvpMatches] =
(c: HCursor) => c.as[List[PvpMatch]].map(PvpMatches)
implicit protected lazy val PvpMatchesEncoder: Encoder[PvpMatches] =
(a: PvpMatches) => a.pvpMatches.asJson
}
| agaro1121/PathOfExileApiClient | src/main/scala/com/github/agaro1121/marshalling/PvpMatchesMarshalling.scala | Scala | mit | 705 |
package org.jetbrains.plugins.scala.lang.psi.types.api
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.NonValueType
import org.jetbrains.plugins.scala.lang.psi.types.{ScType, ScUndefinedSubstitutor}
import org.jetbrains.plugins.scala.project.ProjectContext
/**
* Use this type if you want to resolve generics.
* In conformance using ScUndefinedSubstitutor you can accumulate information
* about possible generic type.
*/
case class UndefinedType(parameterType: TypeParameterType, var level: Int = 0) extends NonValueType {
override implicit def projectContext: ProjectContext = parameterType.projectContext
override def visitType(visitor: TypeVisitor): Unit = visitor.visitUndefinedType(this)
def inferValueType: TypeParameterType = parameterType
override def equivInner(`type`: ScType, substitutor: ScUndefinedSubstitutor, falseUndef: Boolean): (Boolean, ScUndefinedSubstitutor) = {
val result = `type` match {
case _ if falseUndef => substitutor
case UndefinedType(_, thatLevel) if thatLevel == level => substitutor
case UndefinedType(thatParameterType, thatLevel) if thatLevel > level =>
substitutor.addUpper(thatParameterType.nameAndId, this)
case that: UndefinedType if that.level < level =>
substitutor.addUpper(parameterType.nameAndId, that)
case that =>
val name = parameterType.nameAndId
substitutor.addLower(name, that).addUpper(name, that)
}
(!falseUndef, result)
}
}
| loskutov/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/types/api/UndefinedType.scala | Scala | apache-2.0 | 1,485 |
/* Title: Pure/System/interrupt.scala
Author: Makarius
Support for POSIX interrupts (bypassed on Windows).
*/
package isabelle
import sun.misc.{Signal, SignalHandler}
object POSIX_Interrupt
{
def handler[A](h: => Unit)(e: => A): A =
{
val SIGINT = new Signal("INT")
val new_handler = new SignalHandler { def handle(s: Signal) { h } }
val old_handler = Signal.handle(SIGINT, new_handler)
try { e } finally { Signal.handle(SIGINT, old_handler) }
}
def exception[A](e: => A): A =
{
val thread = Thread.currentThread
handler { thread.interrupt } { e }
}
}
| MerelyAPseudonym/isabelle | src/Pure/System/posix_interrupt.scala | Scala | bsd-3-clause | 610 |
package utils
import javax.inject.Inject
import play.api.http.HttpFilters
import play.api.mvc.EssentialFilter
import play.filters.csrf.CSRFFilter
import play.filters.headers.SecurityHeadersFilter
/**
* Provides filters.
*/
class Filters @Inject()(csrfFilter: CSRFFilter, securityHeadersFilter: SecurityHeadersFilter) extends HttpFilters {
override def filters: Seq[EssentialFilter] = Seq(csrfFilter, securityHeadersFilter)
}
| agoetschm/linkmanager | server/app/utils/Filters.scala | Scala | gpl-3.0 | 432 |
package sbt
import org.scalacheck._
import Gen.{listOf}
import Prop.forAll
import Tags._
object TagsTest extends Properties("Tags")
{
def tagMap: Gen[TagMap] = for(ts <- listOf(tagAndFrequency)) yield ts.toMap
def tagAndFrequency: Gen[(Tag, Int)] = for(t <- tag; count <- Arbitrary.arbitrary[Int]) yield (t, count)
def tag: Gen[Tag] = for(s <- Arbitrary.arbitrary[String]) yield Tag(s)
implicit def aTagMap = Arbitrary(tagMap)
implicit def aTagAndFrequency = Arbitrary(tagAndFrequency)
implicit def aTag = Arbitrary(tag)
property("exclusive allows all groups without the exclusive tag") = forAll { (tm: TagMap, tag: Tag) =>
excl(tag)(tm - tag)
}
property("exclusive only allows a group with an excusive tag when the size is one") = forAll { (tm: TagMap, size: Int, etag: Tag) =>
val tm2: TagMap = tm.updated(etag, math.abs(size))
excl(etag)(tm2) == (size <= 1)
}
property("exclusive always allows a group of size one") = forAll { (etag: Tag, mapTag: Tag) =>
val tm: TagMap = Map(mapTag -> 1)
excl(etag)(tm)
}
private[this] def excl(tag: Tag): TagMap => Boolean = predicate(exclusive(tag) :: Nil)
}
| harrah/xsbt | main/src/test/scala/TagsTest.scala | Scala | bsd-3-clause | 1,126 |
/* __ *\\
** ________ ___ / / ___ __ ____ PhantomJS support for Scala.js **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013-2017, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ https://www.scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.jsenv.phantomjs.sbtplugin
import sbt._
import sbt.Keys._
import java.net.URLClassLoader
import org.scalajs.sbtplugin.ScalaJSPlugin
import org.scalajs.sbtplugin.ScalaJSPlugin.autoImport._
import org.scalajs.jsenv._
import org.scalajs.jsenv.phantomjs._
/** An sbt plugin that simplifies the setup of `PhantomJSEnv`s.
*
* There is no need to use `enablePlugins(PhantomJSEnvPlugin)`, as this plugin
* is automatically triggered by Scala.js projects.
*
* Usually, one only needs to use the
* [[PhantomJSEnvPlugin.autoImport PhantomJSEnv]] method.
*/
object PhantomJSEnvPlugin extends AutoPlugin {
override def requires: Plugins = ScalaJSPlugin
override def trigger: PluginTrigger = allRequirements
object autoImport {
/** Class loader for PhantomJSEnv, used to load jetty8.
*
* Usually, you should not need to use `scalaJSPhantomJSClassLoader`
* directly. Instead, use the `PhantomJSEnv()` function.
*/
val scalaJSPhantomJSClassLoader: TaskKey[ClassLoader] = {
TaskKey[ClassLoader](
"scalaJSPhantomJSClassLoader",
"Private class loader to load jetty8 without polluting the " +
"classpath. Only use this as the `jettyClassLoader` argument of " +
"a PhantomJSEnv.",
KeyRanks.Invisible)
}
/** A `Def.Initialize` for a `PhantomJSEnv`.
*
* Use this to specify in your build that you would like to run and/or
* test a project with PhantomJS:
*
* {{{
* jsEnv := PhantomJSEnv(...).value
* }}}
*
* The specified `Config` is augmented with an appropriate Jetty class
* loader (through `withJettyClassLoader`).
*
* Note that the resulting `Setting` is not scoped at all, but must be
* scoped in a project that has the ScalaJSPlugin enabled to work
* properly. Therefore, either put the upper line in your project settings
* (common case) or scope it manually, using `sbt.ProjectExtra.inScope`.
*/
def PhantomJSEnv(
config: org.scalajs.jsenv.phantomjs.PhantomJSEnv.Config
): Def.Initialize[Task[PhantomJSEnv]] = Def.task {
val loader = scalaJSPhantomJSClassLoader.value
new PhantomJSEnv(config.withJettyClassLoader(loader))
}
/** A `Def.Initialize` for a `PhantomJSEnv` with the
* default configuration.
*
* This is equivalent to
* {{{
* PhantomJSEnv(org.scalajs.jsenv.phantomjs.PhantomJSEnv.Config())
* }}}
*/
def PhantomJSEnv(): Def.Initialize[Task[PhantomJSEnv]] =
PhantomJSEnv(org.scalajs.jsenv.phantomjs.PhantomJSEnv.Config())
}
import autoImport._
val phantomJSJettyModules: Seq[ModuleID] = Seq(
"org.eclipse.jetty" % "jetty-websocket" % "8.1.16.v20140903",
"org.eclipse.jetty" % "jetty-server" % "8.1.16.v20140903"
)
/* Since sbt 1, a `config()` must be assigned to a `val` starting with an
* uppercase letter, which will become the "id" of the configuration.
*/
val PhantomJSJetty: Configuration = config("phantom-js-jetty").hide
override def projectSettings: Seq[Setting[_]] = Seq(
/* Depend on jetty artifacts in a dummy configuration to be able to inject
* them into the PhantomJS runner if necessary.
* See scalaJSPhantomJSClassLoader.
*/
ivyConfigurations += PhantomJSJetty,
libraryDependencies ++= phantomJSJettyModules.map(_ % "phantom-js-jetty"),
scalaJSPhantomJSClassLoader := {
val report = update.value
val jars = report.select(configurationFilter("phantom-js-jetty"))
val jettyLoader =
new URLClassLoader(jars.map(_.toURI.toURL).toArray, null)
new PhantomJettyClassLoader(jettyLoader, getClass.getClassLoader)
}
)
}
| sjrd/scala-js-env-phantomjs | phantomjs-sbt-plugin/src/main/scala/org/scalajs/jsenv/phantomjs/sbtplugin/PhantomJSEnvPlugin.scala | Scala | bsd-3-clause | 4,298 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.graphframes.lib
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.DataTypes
import org.graphframes.{GraphFrameTestSparkContext, GraphFrame, SparkFunSuite, TestUtils}
class StronglyConnectedComponentsSuite extends SparkFunSuite with GraphFrameTestSparkContext {
test("Island Strongly Connected Components") {
val vertices = sqlContext.createDataFrame(Seq(
(1L, "a"),
(2L, "b"),
(3L, "c"),
(4L, "d"),
(5L, "e"))).toDF("id", "value")
val edges = sqlContext.createDataFrame(Seq.empty[(Long, Long)]).toDF("src", "dst")
val graph = GraphFrame(vertices, edges)
val c = graph.stronglyConnectedComponents.maxIter(5).run()
TestUtils.testSchemaInvariants(graph, c)
TestUtils.checkColumnType(c.schema, "component", DataTypes.LongType)
for (Row(id: Long, component: Long, _)
<- c.select("id", "component", "value").collect()) {
assert(id === component)
}
}
}
| graphframes/graphframes | src/test/scala/org/graphframes/lib/StronglyConnectedComponentsSuite.scala | Scala | apache-2.0 | 1,760 |
package com.twitter.finagle.netty3.http
import org.jboss.netty.handler.codec.http.{DefaultHttpResponse, HttpResponseStatus, HttpVersion}
private[finagle] object OneHundredContinueResponse
extends DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.CONTINUE)
| mkhq/finagle | finagle-netty3-http/src/main/scala/com/twitter/finagle/netty3/http/OneHundredContinueResponse.scala | Scala | apache-2.0 | 273 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
package exceptions
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.prop.TableDrivenPropertyChecks
class StackDepthExceptionSpec extends FunSpec with ShouldMatchers with TableDrivenPropertyChecks {
class FunException(
messageFun: StackDepthException => Option[String],
cause: Option[Throwable],
failedCodeStackDepthFun: StackDepthException => Int
) extends StackDepthException(messageFun, cause, failedCodeStackDepthFun) {
def severedAtStackDepth: FunException = {
val truncated = getStackTrace.drop(failedCodeStackDepth)
val e = new FunException(messageFun, cause, e => 0)
e.setStackTrace(truncated)
e
}
}
class NoFunException(
message: Option[String],
cause: Option[Throwable],
failedCodeStackDepth: Int
) extends StackDepthException(message, cause, failedCodeStackDepth) {
def severedAtStackDepth: NoFunException = {
val truncated = getStackTrace.drop(failedCodeStackDepth)
val e = new NoFunException(message, cause, 0)
e.setStackTrace(truncated)
e
}
}
val invalidFunCombos =
Table[StackDepthException => Option[String], Option[Throwable], StackDepthException => Int](
("messageFun", "cause", "failedCodeStackDepthFun"),
(null, Some(new Exception), e => 17),
(e => Some("hi"), null, e => 17),
(e => Some("hi"), Some(null), e => 17),
(e => Some("hi"), Some(new Exception), null)
)
val invalidNoFunCombos =
Table(
("message", "cause"),
(null, Some(new Exception)),
(Some(null), Some(new Exception)),
(Some("hi"), null),
(Some("hi"), Some(null))
)
describe("A StackDepthException") {
it should behave like aStackDepthExceptionWhenGivenNulls(
(message, cause, failedCodeStackDepth) => new NoFunException(message, cause, failedCodeStackDepth),
(messageFun, cause, failedCodeStackDepthFun) => new FunException(messageFun, cause, failedCodeStackDepthFun)
)
}
describe("A TestFailedException") {
it should behave like aStackDepthExceptionWhenGivenNulls(
(message, cause, failedCodeStackDepth) => new TestFailedException(message, cause, failedCodeStackDepth),
(messageFun, cause, failedCodeStackDepthFun) => new TestFailedException(messageFun, cause, failedCodeStackDepthFun)
)
}
def aStackDepthExceptionWhenGivenNulls(
newSDE: (Option[String], Option[Throwable], Int) => StackDepthException,
newFunSDE: (StackDepthException => Option[String], Option[Throwable], StackDepthException => Int) => StackDepthException
) {
it("should throw NPE if passed nulls or Some(null)s") {
forAll (invalidFunCombos) { (msgFun, cause, fcsdFun) =>
evaluating {
newFunSDE(msgFun, cause, fcsdFun)
} should produce [NullPointerException]
}
forAll (invalidNoFunCombos) { (msg, cause) =>
evaluating {
newSDE(msg, cause, 17)
} should produce [NullPointerException]
}
}
it("should produce the Some(message) from getMessage, or null if message was None") {
val eDefined = newSDE(Some("howdy!"), None, 17)
eDefined.getMessage should be ("howdy!")
val eEmpty = newSDE(None, None, 17)
eEmpty.getMessage should be (null)
}
it("should produce the Some(cause) from getCause, or null if cause was None") {
val e = new Exception
val eDefined = newSDE(Some("howdy!"), Some(e), 17)
eDefined.getCause should be (e)
val eEmpty = newSDE(Some("howdy!"), None, 17)
eEmpty.getCause should be (null)
}
it("should produce the Some(message) from message, or None if message was None") {
val eDefined = newSDE(Some("howdy!"), None, 17)
eDefined.message should be (Some("howdy!"))
val eEmpty = newSDE(None, None, 17)
eEmpty.message should be (None)
}
it("should produce the Some(cause) from cause, or None if cause was None") {
val e = new Exception
val eDefined = newSDE(Some("howdy!"), Some(e), 17)
eDefined.cause should be (Some(e))
val eEmpty = newSDE(Some("howdy!"), None, 17)
eEmpty.cause should be (None)
}
}
}
| travisbrown/scalatest | src/test/scala/org/scalatest/exceptions/StackDepthExceptionSpec.scala | Scala | apache-2.0 | 4,905 |
// Test that we don't track variables which is referred in another closure.
object VarRef {
locally {
var x: String|Null = ???
val y = {
if (x != null) {
val _: String = x // ok: y doesn't create closure
}
}
if (x != null) {
val a: String = x // ok
}
}
locally {
var x: String|Null = ???
var y = {
if (x != null) {
val _: String = x // ok: y doesn't create closure
}
}
if (x != null) {
val a: String = x // ok
}
}
locally {
var x: String|Null = ???
lazy val y = {
if (x != null) {
x = null
}
x
}
if (x != null) {
val a: String = x // error: x exists in closure, no longer trackable
}
}
locally {
var x: String|Null = ???
def y = {
if (x != null) {
x = null
}
x
}
if (x != null) {
val a: String = x // error: x exists in closure, no longer trackable
}
}
locally {
var x: String|Null = ???
lazy val y = {
if (x != null) {
val a: String = x // error: x exists in closure, no longer trackable
}
x
}
}
locally {
var x: String|Null = ???
def y = {
if (x != null) {
val a: String = x // error: x exists in closure, no longer trackable
}
x
}
}
lazy val lazyblock = {
var x: String|Null = ???
lazy val y = {
if (x != null) {
// The enclosingMethods of x definition and x reference hare are same
val a: String = x // error: x exists in closure, no longer trackable
}
x
}
}
abstract class F {
def get(): String | Null
}
locally {
var x: String|Null = ???
val y: F = new F {
def get() = {
if (x != null) x = null
x
}
}
if (x != null) {
val a: String = x // error: x exists in closure, no longer trackable
}
}
locally {
var x: String|Null = ???
val y: F = new F {
def get() = {
if (x != null) {
val a: String = x // error: x exists in closure, no longer trackable
}
x
}
}
}
def f(x: => String | Null): F = new F {
def get() = x
}
locally {
var x: String|Null = ???
val y: F = f {
if (x != null) {
x = null
}
x
}
if (x != null) {
val a: String = x // error: x exists in closure, no longer trackable
}
}
// TODO: not working now
// locally {
// var x: String|Null = ???
// val y: F = f {
// if (x != null) {
// val a: String = x // err: x exists in closure, no longer trackable
// }
// x
// }
// }
locally {
var x: String|Null = ???
val y: String => String|Null = s => {
if (x != null) {
val a: String = x // error: x exists in closure, no longer trackable
}
x
}
}
locally {
val x: String|Null = ???
if (x != null) {
def f = {
val y: String = x // ok, x is a value definition
y
}
}
}
locally {
var x: String|Null = ???
if (x != null) {
def f = {
val y: String = x // error: the use of x is out of order
y
}
}
}
}
| som-snytt/dotty | tests/explicit-nulls/neg/var-ref-in-closure.scala | Scala | apache-2.0 | 3,227 |
package java.io
trait DataInput {
def readBoolean(): Boolean
def readByte(): Byte
def readChar(): Char
def readDouble(): Double
def readFloat(): Float
def readFully(b: Array[Byte]): Unit
def readFully(b: Array[Byte], off: Int, len: Int): Unit
def readInt(): Int
def readLine(): String
def readLong(): Long
def readShort(): Short
def readUnsignedByte(): Int
def readUnsignedShort(): Int
def readUTF(): String
def skipBytes(n: Int): Int
}
| colinrgodsey/scala-js | javalib/src/main/scala/java/io/DataInput.scala | Scala | bsd-3-clause | 468 |
package re.infrastructure
case class AppPostfix(str: String) {
import AppPostfix._
def isSnapshot = str.equals(snapshotStr)
private val maxPostfixLength = 100
def validate = str match {
case correct if correct.length < maxPostfixLength => true
case _ => false
}
}
object AppPostfix {
val snapshotStr = "-SNAPSHOT"
} | Vlad187/sandbox | src/main/scala/re/infrastructure/AppPostfix.scala | Scala | apache-2.0 | 342 |
package com.getjenny.starchat.services.esclient
/**
* Created by Angelo Leto <angelo@getjenny.com> on 14/11/17.
*/
object InstanceRegistryElasticClient extends SystemElasticClient {
override val indexSuffix: String = config.getString("es.system_instance_registry_suffix")
// decision table changes awareness per index mechanism
val instanceRegistryDeleteFrequency : Int = config.getInt("es.instance_registry_delete_frequency")
override val mappingPath = "/index_management/json_index_spec/system/instance_registry.json"
override val updateMappingPath = "/index_management/json_index_spec/system/update/instance_registry.json"
}
| GetJenny/starchat | src/main/scala/com/getjenny/starchat/services/esclient/InstanceRegistryElasticClient.scala | Scala | gpl-2.0 | 644 |
/*
* Copyright 2012-2013 Stephane Godbillon (@sgodbillon)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package play.modules.reactivemongo.json
import play.api.libs.json._
import play.modules.reactivemongo.ReactiveMongoPluginException
import reactivemongo.bson._
import reactivemongo.bson.utils.Converters
import scala.math.BigDecimal.{
double2bigDecimal,
int2bigDecimal,
long2bigDecimal
}
object `package` extends ImplicitBSONHandlers {
object readOpt {
implicit def optionReads[T](implicit r: Reads[T]): Reads[Option[T]] = Reads.optionWithNull[T]
def apply[T](lookup: JsLookupResult)(implicit r: Reads[T]): JsResult[Option[T]] = lookup.toOption.fold[JsResult[Option[T]]](JsSuccess(None))(_.validate[Option[T]])
}
}
object BSONFormats extends BSONFormats
/**
* JSON Formats for BSONValues.
*/
sealed trait BSONFormats extends LowerImplicitBSONHandlers {
trait PartialFormat[T <: BSONValue] extends Format[T] {
def partialReads: PartialFunction[JsValue, JsResult[T]]
def partialWrites: PartialFunction[BSONValue, JsValue]
def writes(t: T): JsValue = partialWrites(t)
def reads(json: JsValue) = partialReads.lift(json).getOrElse(JsError(s"unhandled json value: $json"))
}
implicit object BSONDoubleFormat extends PartialFormat[BSONDouble] {
val partialReads: PartialFunction[JsValue, JsResult[BSONDouble]] = {
case JsNumber(f) => JsSuccess(BSONDouble(f.toDouble))
case DoubleValue(value) => JsSuccess(BSONDouble(value))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case double: BSONDouble => JsNumber(double.value)
}
private object DoubleValue {
def unapply(obj: JsObject): Option[Double] =
(obj \ "$double").asOpt[JsNumber].map(_.value.toDouble)
}
}
implicit object BSONStringFormat extends PartialFormat[BSONString] {
val partialReads: PartialFunction[JsValue, JsResult[BSONString]] = {
case JsString(str) => JsSuccess(BSONString(str))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case str: BSONString => JsString(str.value)
}
}
class BSONDocumentFormat(toBSON: JsValue => JsResult[BSONValue], toJSON: BSONValue => JsValue) extends PartialFormat[BSONDocument] {
val partialReads: PartialFunction[JsValue, JsResult[BSONDocument]] = {
case obj: JsObject =>
try {
JsSuccess(bson(obj))
} catch {
case e: Throwable => JsError(e.getMessage)
}
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case doc: BSONDocument => json(doc)
}
// UNSAFE - FOR INTERNAL USE
private[json] def bson(obj: JsObject): BSONDocument = BSONDocument(
obj.fields.map { tuple =>
tuple._1 -> (toBSON(tuple._2) match {
case JsSuccess(bson, _) => bson
case JsError(err) => throw new ReactiveMongoPluginException(err.toString)
})
})
// UNSAFE - FOR INTERNAL USE
private[json] def json(bson: BSONDocument): JsObject =
JsObject(bson.elements.map(elem => elem._1 -> toJSON(elem._2)))
}
implicit object BSONDocumentFormat extends BSONDocumentFormat(toBSON, toJSON)
class BSONArrayFormat(toBSON: JsValue => JsResult[BSONValue], toJSON: BSONValue => JsValue) extends PartialFormat[BSONArray] {
val partialReads: PartialFunction[JsValue, JsResult[BSONArray]] = {
case arr: JsArray =>
try {
JsSuccess(BSONArray(arr.value.map { value =>
toBSON(value) match {
case JsSuccess(bson, _) => bson
case JsError(err) => throw new ReactiveMongoPluginException(err.toString)
}
}))
} catch {
case e: Throwable => JsError(e.getMessage)
}
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case array: BSONArray => JsArray(array.values.map(toJSON))
}
}
implicit object BSONArrayFormat extends BSONArrayFormat(toBSON, toJSON)
implicit object BSONObjectIDFormat extends PartialFormat[BSONObjectID] {
val partialReads: PartialFunction[JsValue, JsResult[BSONObjectID]] = {
case OidValue(oid) => JsSuccess(BSONObjectID(oid))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case oid: BSONObjectID => Json.obj("$oid" -> oid.stringify)
}
private object OidValue {
def unapply(obj: JsObject): Option[String] =
if (obj.fields.size != 1) None else (obj \ "$oid").asOpt[String]
}
}
implicit object BSONBooleanFormat extends PartialFormat[BSONBoolean] {
val partialReads: PartialFunction[JsValue, JsResult[BSONBoolean]] = {
case JsBoolean(v) => JsSuccess(BSONBoolean(v))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case boolean: BSONBoolean => JsBoolean(boolean.value)
}
}
implicit object BSONDateTimeFormat extends PartialFormat[BSONDateTime] {
val partialReads: PartialFunction[JsValue, JsResult[BSONDateTime]] = {
case DateValue(value) => JsSuccess(BSONDateTime(value))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case dt: BSONDateTime => Json.obj("$date" -> dt.value)
}
private object DateValue {
def unapply(obj: JsObject): Option[Long] = (obj \ "$date").asOpt[Long]
}
}
implicit object BSONTimestampFormat extends PartialFormat[BSONTimestamp] {
val partialReads: PartialFunction[JsValue, JsResult[BSONTimestamp]] = {
case TimeValue((time, i)) => JsSuccess(BSONTimestamp(time, i))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case ts: BSONTimestamp => Json.obj(
"$time" -> (ts.value >>> 32), "$i" -> ts.value.toInt)
}
private object TimeValue {
def unapply(obj: JsObject): Option[(Long, Int)] = for {
time <- (obj \ "$time").asOpt[Long]
i <- (obj \ "$i").asOpt[Int]
} yield (time, i)
}
}
implicit object BSONRegexFormat extends PartialFormat[BSONRegex] {
val partialReads: PartialFunction[JsValue, JsResult[BSONRegex]] = {
case js: JsObject if js.values.size == 1 && js.fields.head._1 == "$regex" =>
js.fields.head._2.asOpt[String].
map(rx => JsSuccess(BSONRegex(rx, ""))).
getOrElse(JsError(__ \ "$regex", "string expected"))
case js: JsObject if js.value.size == 2 && js.value.exists(_._1 == "$regex") && js.value.exists(_._1 == "$options") =>
val rx = (js \ "$regex").asOpt[String]
val opts = (js \ "$options").asOpt[String]
(rx, opts) match {
case (Some(rx), Some(opts)) => JsSuccess(BSONRegex(rx, opts))
case (None, Some(_)) => JsError(__ \ "$regex", "string expected")
case (Some(_), None) => JsError(__ \ "$options", "string expected")
case _ => JsError(__ \ "$regex", "string expected") ++ JsError(__ \ "$options", "string expected")
}
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case rx: BSONRegex =>
if (rx.flags.isEmpty)
Json.obj("$regex" -> rx.value)
else Json.obj("$regex" -> rx.value, "$options" -> rx.flags)
}
}
implicit object BSONNullFormat extends PartialFormat[BSONNull.type] {
val partialReads: PartialFunction[JsValue, JsResult[BSONNull.type]] = {
case JsNull => JsSuccess(BSONNull)
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case BSONNull => JsNull
}
}
implicit object BSONIntegerFormat extends PartialFormat[BSONInteger] {
val partialReads: PartialFunction[JsValue, JsResult[BSONInteger]] = {
case JsNumber(i) => JsSuccess(BSONInteger(i.toInt))
case IntValue(value) => JsSuccess(BSONInteger(value))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case int: BSONInteger => JsNumber(int.value)
}
private object IntValue {
def unapply(obj: JsObject): Option[Int] =
(obj \ "$int").asOpt[JsNumber].map(_.value.toInt)
}
}
implicit object BSONLongFormat extends PartialFormat[BSONLong] {
val partialReads: PartialFunction[JsValue, JsResult[BSONLong]] = {
case JsNumber(long) => JsSuccess(BSONLong(long.toLong))
case LongValue(value) => JsSuccess(BSONLong(value))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case long: BSONLong => JsNumber(long.value)
}
private object LongValue {
def unapply(obj: JsObject): Option[Long] =
(obj \ "$long").asOpt[JsNumber].map(_.value.toLong)
}
}
implicit object BSONBinaryFormat extends PartialFormat[BSONBinary] {
val partialReads: PartialFunction[JsValue, JsResult[BSONBinary]] = {
case JsString(str) => try {
JsSuccess(BSONBinary(Converters.str2Hex(str), Subtype.UserDefinedSubtype))
} catch {
case e: Throwable => JsError(s"error deserializing hex ${e.getMessage}")
}
case obj: JsObject if obj.fields.exists {
case (str, _: JsString) if str == "$binary" => true
case _ => false
} => try {
JsSuccess(BSONBinary(Converters.str2Hex((obj \ "$binary").as[String]), Subtype.UserDefinedSubtype))
} catch {
case e: Throwable => JsError(s"error deserializing hex ${e.getMessage}")
}
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case binary: BSONBinary =>
val remaining = binary.value.readable()
Json.obj(
"$binary" -> Converters.hex2Str(binary.value.slice(remaining).readArray(remaining)),
"$type" -> Converters.hex2Str(Array(binary.subtype.value.toByte)))
}
}
implicit object BSONSymbolFormat extends PartialFormat[BSONSymbol] {
val partialReads: PartialFunction[JsValue, JsResult[BSONSymbol]] = {
case SymbolValue(value) => JsSuccess(BSONSymbol(value))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case BSONSymbol(s) => Json.obj("$symbol" -> s)
}
private object SymbolValue {
def unapply(obj: JsObject): Option[String] =
if (obj.fields.size != 1) None else (obj \ "$symbol").asOpt[String]
}
}
val numberReads: PartialFunction[JsValue, JsResult[BSONValue]] = {
case JsNumber(n) if !n.ulp.isWhole => JsSuccess(BSONDouble(n.toDouble))
case JsNumber(n) if n.isValidInt => JsSuccess(BSONInteger(n.toInt))
case JsNumber(n) if n.isValidLong => JsSuccess(BSONLong(n.toLong))
}
def toBSON(json: JsValue): JsResult[BSONValue] =
BSONStringFormat.partialReads.
orElse(BSONObjectIDFormat.partialReads).
orElse(BSONDateTimeFormat.partialReads).
orElse(BSONTimestampFormat.partialReads).
orElse(BSONBinaryFormat.partialReads).
orElse(BSONRegexFormat.partialReads).
orElse(numberReads).
orElse(BSONBooleanFormat.partialReads).
orElse(BSONNullFormat.partialReads).
orElse(BSONSymbolFormat.partialReads).
orElse(BSONArrayFormat.partialReads).
orElse(BSONDocumentFormat.partialReads).
lift(json).getOrElse(JsError(s"unhandled json value: $json"))
def toJSON(bson: BSONValue): JsValue = BSONObjectIDFormat.partialWrites.
orElse(BSONDateTimeFormat.partialWrites).
orElse(BSONTimestampFormat.partialWrites).
orElse(BSONBinaryFormat.partialWrites).
orElse(BSONRegexFormat.partialWrites).
orElse(BSONDoubleFormat.partialWrites).
orElse(BSONIntegerFormat.partialWrites).
orElse(BSONLongFormat.partialWrites).
orElse(BSONBooleanFormat.partialWrites).
orElse(BSONNullFormat.partialWrites).
orElse(BSONStringFormat.partialWrites).
orElse(BSONSymbolFormat.partialWrites).
orElse(BSONArrayFormat.partialWrites).
orElse(BSONDocumentFormat.partialWrites).
lift(bson).getOrElse(throw new ReactiveMongoPluginException(s"Unhandled json value: $bson"))
}
object Writers {
implicit class JsPathMongo(val jp: JsPath) extends AnyVal {
def writemongo[A](implicit writer: Writes[A]): OWrites[A] = {
OWrites[A] { (o: A) =>
val newPath = jp.path.flatMap {
case e: KeyPathNode => Some(e.key)
case e: RecursiveSearch => Some(s"$$.${e.key}")
case e: IdxPathNode => Some(s"${e.idx}")
}.mkString(".")
val orig = writer.writes(o)
orig match {
case JsObject(e) =>
JsObject(e.flatMap {
case (k, v) => Seq(s"${newPath}.$k" -> v)
})
case e: JsValue => JsObject(Seq(newPath -> e))
}
}
}
}
}
object JSONSerializationPack extends reactivemongo.api.SerializationPack {
import reactivemongo.bson.buffer.{
DefaultBufferHandler,
ReadableBuffer,
WritableBuffer
}
type Value = JsValue
type ElementProducer = (String, Json.JsValueWrapper)
type Document = JsObject
type Writer[A] = OWrites[A]
type Reader[A] = Reads[A]
object IdentityReader extends Reader[Document] {
def reads(js: JsValue): JsResult[Document] = js match {
case o: JsObject => JsSuccess(o)
case v => JsError(s"object is expected: $v")
}
}
object IdentityWriter extends Writer[Document] {
def writes(document: Document): Document = document
}
def serialize[A](a: A, writer: Writer[A]): Document = writer.writes(a)
def deserialize[A](document: Document, reader: Reader[A]): A =
reader.reads(document) match {
case JsError(msg) => sys.error(msg mkString ", ")
case JsSuccess(v, _) => v
}
def writeToBuffer(buffer: WritableBuffer, document: Document): WritableBuffer = {
BSONDocument.write(BSONFormats.toBSON(document).flatMap[BSONDocument] {
case d: BSONDocument => JsSuccess(d)
case v => JsError(s"document is expected: $v")
}.get, buffer)
buffer
}
def readFromBuffer(buffer: ReadableBuffer): Document =
BSONFormats.toJSON(BSONDocument.read(buffer)).as[Document]
def writer[A](f: A => Document): Writer[A] = new OWrites[A] {
def writes(input: A): Document = f(input)
}
def isEmpty(document: Document): Boolean = document.values.isEmpty
}
import play.api.libs.json.{ JsObject, JsValue }
import reactivemongo.bson.{
BSONDocument,
BSONDocumentReader,
BSONDocumentWriter
}
object ImplicitBSONHandlers extends ImplicitBSONHandlers
/**
* Implicit BSON Handlers (BSONDocumentReader/BSONDocumentWriter for JsObject)
*/
sealed trait ImplicitBSONHandlers extends BSONFormats {
implicit object JsObjectWriter extends BSONDocumentWriter[JsObject] {
def write(obj: JsObject): BSONDocument =
BSONFormats.BSONDocumentFormat.bson(obj)
}
implicit object JsObjectReader extends BSONDocumentReader[JsObject] {
def read(document: BSONDocument) =
BSONFormats.BSONDocumentFormat.writes(document).as[JsObject]
}
implicit object BSONDocumentWrites
extends JSONSerializationPack.Writer[BSONDocument] {
def writes(bson: BSONDocument): JsObject =
BSONFormats.BSONDocumentFormat.json(bson)
}
implicit object JsObjectDocumentWriter // Identity writer
extends JSONSerializationPack.Writer[JsObject] {
def writes(obj: JsObject): JSONSerializationPack.Document = obj
}
}
sealed trait LowerImplicitBSONHandlers {
import reactivemongo.bson.{ BSONElement, Producer }
implicit def jsWriter[A <: JsValue, B <: BSONValue] = new BSONWriter[A, B] {
def write(js: A): B = BSONFormats.toBSON(js).get.asInstanceOf[B]
}
implicit def JsFieldBSONElementProducer[T <: JsValue](jsField: (String, T)): Producer[BSONElement] = Producer.nameValue2Producer(jsField)
implicit object BSONValueReads extends Reads[BSONValue] {
def reads(js: JsValue) = BSONFormats.toBSON(js)
}
implicit object BSONValueWrites extends Writes[BSONValue] {
def writes(bson: BSONValue) = BSONFormats.toJSON(bson)
}
}
| fr3akX/Play-ReactiveMongo | src/main/scala/play/modules/reactivemongo/json.scala | Scala | apache-2.0 | 16,333 |
package scoverage
import org.scalatest.{BeforeAndAfter, FunSuite, OneInstancePerTest}
/** @author Stephen Samuel */
class CoverageTest extends FunSuite with BeforeAndAfter with OneInstancePerTest {
test("coverage for no statements is 1") {
val coverage = Coverage()
assert(1.0 === coverage.statementCoverage)
}
test("coverage for no invoked statements is 0") {
val coverage = Coverage()
coverage.add(Statement("", Location("", "","", ClassType.Object, "", ""), 1, 2, 3, 4, "", "", "", false, 0))
assert(0 === coverage.statementCoverage)
}
test("coverage for invoked statements") {
val coverage = Coverage()
coverage.add(Statement("", Location("", "","", ClassType.Object, "", ""), 1, 2, 3, 4, "", "", "", false, 3))
coverage.add(Statement("", Location("", "", "", ClassType.Object, "", ""), 2, 2, 3, 4, "", "", "", false, 0))
coverage.add(Statement("", Location("", "", "", ClassType.Object, "", ""), 3, 2, 3, 4, "", "", "", false, 0))
coverage.add(Statement("", Location("", "", "", ClassType.Object, "", ""), 4, 2, 3, 4, "", "", "", false, 0))
assert(0.25 === coverage.statementCoverage)
}
}
| rorygraves/scalac-scoverage-plugin | scalac-scoverage-plugin/src/test/scala/scoverage/CoverageTest.scala | Scala | apache-2.0 | 1,156 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.streams.scala
package kstream
import org.apache.kafka.streams.kstream.internals.KTableImpl
import org.apache.kafka.streams.scala.serialization.Serdes
import org.apache.kafka.streams.kstream.{
SessionWindows,
SlidingWindows,
Window,
Windows,
KGroupedStream => KGroupedStreamJ,
KTable => KTableJ
}
import org.apache.kafka.streams.scala.FunctionsCompatConversions.{
AggregatorFromFunction,
InitializerFromFunction,
ReducerFromFunction,
ValueMapperFromFunction
}
/**
* Wraps the Java class KGroupedStream and delegates method calls to the underlying Java object.
*
* @tparam K Type of keys
* @tparam V Type of values
* @param inner The underlying Java abstraction for KGroupedStream
* @see `org.apache.kafka.streams.kstream.KGroupedStream`
*/
class KGroupedStream[K, V](val inner: KGroupedStreamJ[K, V]) {
/**
* Count the number of records in this stream by the grouped key.
* The result is written into a local `KeyValueStore` (which is basically an ever-updating materialized view)
* provided by the given `materialized`.
*
* @param materialized an instance of `Materialized` used to materialize a state store.
* @return a [[KTable]] that contains "update" records with unmodified keys and `Long` values that
* represent the latest (rolling) count (i.e., number of records) for each key
* @see `org.apache.kafka.streams.kstream.KGroupedStream#count`
*/
def count()(implicit materialized: Materialized[K, Long, ByteArrayKeyValueStore]): KTable[K, Long] = {
val javaCountTable: KTableJ[K, java.lang.Long] =
inner.count(materialized.asInstanceOf[Materialized[K, java.lang.Long, ByteArrayKeyValueStore]])
val tableImpl = javaCountTable.asInstanceOf[KTableImpl[K, ByteArrayKeyValueStore, java.lang.Long]]
new KTable(
javaCountTable.mapValues[Long](
((l: java.lang.Long) => Long2long(l)).asValueMapper,
Materialized.`with`[K, Long, ByteArrayKeyValueStore](tableImpl.keySerde(), Serdes.longSerde)
)
)
}
/**
* Count the number of records in this stream by the grouped key.
* The result is written into a local `KeyValueStore` (which is basically an ever-updating materialized view)
* provided by the given `materialized`.
*
* @param named a [[Named]] config used to name the processor in the topology
* @param materialized an instance of `Materialized` used to materialize a state store.
* @return a [[KTable]] that contains "update" records with unmodified keys and `Long` values that
* represent the latest (rolling) count (i.e., number of records) for each key
* @see `org.apache.kafka.streams.kstream.KGroupedStream#count`
*/
def count(named: Named)(implicit materialized: Materialized[K, Long, ByteArrayKeyValueStore]): KTable[K, Long] = {
val javaCountTable: KTableJ[K, java.lang.Long] =
inner.count(named, materialized.asInstanceOf[Materialized[K, java.lang.Long, ByteArrayKeyValueStore]])
val tableImpl = javaCountTable.asInstanceOf[KTableImpl[K, ByteArrayKeyValueStore, java.lang.Long]]
new KTable(
javaCountTable.mapValues[Long](
((l: java.lang.Long) => Long2long(l)).asValueMapper,
Materialized.`with`[K, Long, ByteArrayKeyValueStore](tableImpl.keySerde(), Serdes.longSerde)
)
)
}
/**
* Combine the values of records in this stream by the grouped key.
*
* @param reducer a function `(V, V) => V` that computes a new aggregate result.
* @param materialized an instance of `Materialized` used to materialize a state store.
* @return a [[KTable]] that contains "update" records with unmodified keys, and values that represent the
* latest (rolling) aggregate for each key
* @see `org.apache.kafka.streams.kstream.KGroupedStream#reduce`
*/
def reduce(reducer: (V, V) => V)(implicit materialized: Materialized[K, V, ByteArrayKeyValueStore]): KTable[K, V] =
new KTable(inner.reduce(reducer.asReducer, materialized))
/**
* Combine the values of records in this stream by the grouped key.
*
* @param reducer a function `(V, V) => V` that computes a new aggregate result.
* @param named a [[Named]] config used to name the processor in the topology
* @param materialized an instance of `Materialized` used to materialize a state store.
* @return a [[KTable]] that contains "update" records with unmodified keys, and values that represent the
* latest (rolling) aggregate for each key
* @see `org.apache.kafka.streams.kstream.KGroupedStream#reduce`
*/
def reduce(reducer: (V, V) => V, named: Named)(implicit
materialized: Materialized[K, V, ByteArrayKeyValueStore]
): KTable[K, V] =
new KTable(inner.reduce(reducer.asReducer, materialized))
/**
* Aggregate the values of records in this stream by the grouped key.
*
* @param initializer an `Initializer` that computes an initial intermediate aggregation result
* @param aggregator an `Aggregator` that computes a new aggregate result
* @param materialized an instance of `Materialized` used to materialize a state store.
* @return a [[KTable]] that contains "update" records with unmodified keys, and values that represent the
* latest (rolling) aggregate for each key
* @see `org.apache.kafka.streams.kstream.KGroupedStream#aggregate`
*/
def aggregate[VR](initializer: => VR)(aggregator: (K, V, VR) => VR)(implicit
materialized: Materialized[K, VR, ByteArrayKeyValueStore]
): KTable[K, VR] =
new KTable(inner.aggregate((() => initializer).asInitializer, aggregator.asAggregator, materialized))
/**
* Aggregate the values of records in this stream by the grouped key.
*
* @param initializer an `Initializer` that computes an initial intermediate aggregation result
* @param aggregator an `Aggregator` that computes a new aggregate result
* @param named a [[Named]] config used to name the processor in the topology
* @param materialized an instance of `Materialized` used to materialize a state store.
* @return a [[KTable]] that contains "update" records with unmodified keys, and values that represent the
* latest (rolling) aggregate for each key
* @see `org.apache.kafka.streams.kstream.KGroupedStream#aggregate`
*/
def aggregate[VR](initializer: => VR, named: Named)(aggregator: (K, V, VR) => VR)(implicit
materialized: Materialized[K, VR, ByteArrayKeyValueStore]
): KTable[K, VR] =
new KTable(inner.aggregate((() => initializer).asInitializer, aggregator.asAggregator, named, materialized))
/**
* Create a new [[TimeWindowedKStream]] instance that can be used to perform windowed aggregations.
*
* @param windows the specification of the aggregation `Windows`
* @return an instance of [[TimeWindowedKStream]]
* @see `org.apache.kafka.streams.kstream.KGroupedStream#windowedBy`
*/
def windowedBy[W <: Window](windows: Windows[W]): TimeWindowedKStream[K, V] =
new TimeWindowedKStream(inner.windowedBy(windows))
/**
* Create a new [[TimeWindowedKStream]] instance that can be used to perform sliding windowed aggregations.
*
* @param windows the specification of the aggregation `SlidingWindows`
* @return an instance of [[TimeWindowedKStream]]
* @see `org.apache.kafka.streams.kstream.KGroupedStream#windowedBy`
*/
def windowedBy(windows: SlidingWindows): TimeWindowedKStream[K, V] =
new TimeWindowedKStream(inner.windowedBy(windows))
/**
* Create a new [[SessionWindowedKStream]] instance that can be used to perform session windowed aggregations.
*
* @param windows the specification of the aggregation `SessionWindows`
* @return an instance of [[SessionWindowedKStream]]
* @see `org.apache.kafka.streams.kstream.KGroupedStream#windowedBy`
*/
def windowedBy(windows: SessionWindows): SessionWindowedKStream[K, V] =
new SessionWindowedKStream(inner.windowedBy(windows))
/**
* Create a new [[CogroupedKStream]] from this grouped KStream to allow cogrouping other [[KGroupedStream]] to it.
*
* @param aggregator an `Aggregator` that computes a new aggregate result
* @return an instance of [[CogroupedKStream]]
* @see `org.apache.kafka.streams.kstream.KGroupedStream#cogroup`
*/
def cogroup[VR](aggregator: (K, V, VR) => VR): CogroupedKStream[K, VR] =
new CogroupedKStream(inner.cogroup(aggregator.asAggregator))
}
| guozhangwang/kafka | streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/KGroupedStream.scala | Scala | apache-2.0 | 9,239 |
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.scalding.service
import com.twitter.summingbird.scalding._
class EmptyService[K, V] extends ExternalService[K, V] {
def lookup[W](getKeys: PipeFactory[(K, W)]): PipeFactory[(K, (W, Option[V]))] =
getKeys.map { _.map { _.map { case (t, (k, v)) => (t, (k, (v, None: Option[V]))) } } }
}
| nvoron23/summingbird | summingbird-scalding/src/main/scala/com/twitter/summingbird/scalding/service/EmptyService.scala | Scala | apache-2.0 | 894 |
package org.jetbrains.plugins.scala
package codeInspection.collections
import org.jetbrains.plugins.scala.codeInspection.InspectionBundle
import org.jetbrains.plugins.scala.codeInspection.collections.OperationOnCollectionsUtil._
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScMethodCall, ScExpression}
import org.jetbrains.plugins.scala.lang.psi.types.result.Success
import org.jetbrains.plugins.scala.lang.psi.types.ScFunctionType
import com.intellij.openapi.project.Project
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.config.ScalaVersionUtil
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
/**
* Nikolay.Tropin
* 2014-05-05
*/
class MapGetOrElseInspection extends OperationOnCollectionInspection {
override def possibleSimplificationTypes: Array[SimplificationType] =
Array(new MapGetOrElse(this))
}
class MapGetOrElse(inspection: OperationOnCollectionInspection) extends SimplificationType(inspection) {
def hint = InspectionBundle.message("map.getOrElse.hint")
override def getSimplification(last: MethodRepr, second: MethodRepr): List[Simplification] = {
(last.optionalMethodRef, second.optionalMethodRef) match {
case (Some(lastRef), Some(secondRef)) if lastRef.refName == "getOrElse" &&
secondRef.refName == "map" &&
checkScalaVersion(lastRef) &&
checkResolve(lastRef, likeOptionClasses) &&
checkResolve(secondRef, likeOptionClasses) &&
checkTypes(second.optionalBase, second.args, last.args)=>
createSimplification(second, last.itself, "fold", last.args, second.args)
case _ => Nil
}
}
def checkScalaVersion(elem: PsiElement) = { //there is no Option.fold in Scala 2.9
val isScala2_9 = ScalaVersionUtil.isGeneric(elem, false, ScalaVersionUtil.SCALA_2_9)
!isScala2_9
}
def checkTypes(optionalBase: Option[ScExpression], mapArgs: Seq[ScExpression], getOrElseArgs: Seq[ScExpression]): Boolean = {
val (mapArg, getOrElseArg) = (mapArgs, getOrElseArgs) match {
case (Seq(a1), Seq(a2)) => (a1, a2)
case _ => return false
}
val baseExpr = optionalBase match {
case Some(e) => e
case _ => return false
}
val mapArgRetType = mapArg.getType() match {
case Success(ScFunctionType(retType, _), _) => retType
case _ => return false
}
val firstArgText = stripped(getOrElseArg).getText
val secondArgText = stripped(mapArg).getText
val newExprText = s"${baseExpr.getText}.fold {$firstArgText}{$secondArgText}"
ScalaPsiElementFactory.createExpressionFromText(newExprText, baseExpr.getContext) match {
case ScMethodCall(ScMethodCall(_, Seq(firstArg)), _) => mapArgRetType.conforms(firstArg.getType().getOrNothing)
case _ => false
}
}
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/codeInspection/collections/MapGetOrElseInspection.scala | Scala | apache-2.0 | 2,811 |
/*
* Copyright University of Basel, Graphics and Vision Research Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalismo.faces.render
import scalismo.geometry.{EuclideanVector, _3D}
/** a bidirectional reflectance distribution function to describe reflectance models */
trait BRDF[A] {
def apply(lightDirection: EuclideanVector[_3D], viewDirection: EuclideanVector[_3D]): A
} | unibas-gravis/scalismo-faces | src/main/scala/scalismo/faces/render/BRDF.scala | Scala | apache-2.0 | 915 |
package ru.mipt.acsl.geotarget.web
import scala.scalajs.js.JSApp
import scala.scalajs.js.Dynamic.{global => g}
/**
* @author Artem Shein
*/
object GeoTargetWebJsApp extends JSApp {
def main(): Unit = {
println("Hello, JS world!")
g.alert("Scala.JS works fine, dude!")
}
}
| acsl-mipt/geo-target | geo-target-web/src/main/scala/ru/mipt/acsl/geotarget/web/GeoTargetWebJsApp.scala | Scala | mit | 290 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.