code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
import java.lang.reflect.Parameter
import scala.reflect.{ClassTag, classTag}
import scala.reflect.NameTransformer
class C(a: Int, `_`: String, *** : Long, `unary_!` : Float, ABC: Double, `scala.lang`: Boolean, `a/b`: Boolean) {
def f(a: Int, `_`: String, *** : Long, `unary_!` : Float, ABC: Double, `scala.lang`: Boolean, `a/b`: Boolean) = 42
}
class D
object D {
def f(a: Int, `_`: String, *** : Long, `unary_!` : Float, ABC: Double, `scala.lang`: Boolean, `a/b`: Boolean) = 42
}
object Test extends App {
def constrParams[A: ClassTag] = classTag[A].runtimeClass.getConstructors.head.getParameters
def methodParams[A: ClassTag] = classTag[A].runtimeClass.getDeclaredMethods.head.getParameters
def verifyParams(expected: Seq[String])(params: Array[Parameter]) =
expected.zip(params).foreach {
case (expect, actual) =>
assert(actual.isNamePresent, s"name $expect should be present")
assert(!actual.isSynthetic, s"name $expect should not be synthetic")
val encoded = NameTransformer.encode(expect)
assert(encoded == actual.getName, s"expected name $expect ($encoded) but was ${actual.getName}")
}
val expected = List("a", "_", "***", "unary_!", "ABC", "scala.lang", "a/b")
verifyParams(expected)(constrParams[C])
verifyParams(expected)(methodParams[C])
verifyParams(expected)(methodParams[D])
val c = new C(a = 1, `_` = "2", *** = 3L, `unary_!` = 4.0f, ABC = 5.0, `scala.lang` = true, `a/b` = false)
val res = c.f(a = 1, `_` = "2", *** = 3L, `unary_!` = 4.0f, ABC = 5.0, `scala.lang` = true, `a/b` = false)
assert(res == 42, s"bad result $res")
}
| scala/scala | test/files/run/t9437a.scala | Scala | apache-2.0 | 1,621 |
// Solution-5.scala
// Solution to Exercise 5 in "Testing"
import com.atomicscala.AtomicTest._
def squareArea(x:Int):Int = {
x * x
}
def rectangleArea(x:Int, y:Int):Int = {
x * x
}
def trapezoidArea(x:Int, y:Int, h:Int):Double = {
h/2 * (x + y)
}
squareArea(1) is 1
squareArea(2) is 4
squareArea(5) is 25
rectangleArea(2, 2) is 4
rectangleArea(5, 4) is 20
trapezoidArea(2, 2, 4) is 8
trapezoidArea(3, 4, 1) is 3.5
/* OUTPUT_SHOULD_BE
1
4
25
4
25
[Error] expected:
20
8.0
0.0
[Error] expected:
3.5
*/
| P7h/ScalaPlayground | Atomic Scala/atomic-scala-solutions/15_Testing/Solution-5.scala | Scala | apache-2.0 | 513 |
package com.github.diegopacheco.sandbox.scala.others
object OthersScala extends App {
val x = 10
var y = 3
assert( x > 0 )
assert( y > 0)
println("x and y are bigger than ZERO!")
require( x > y )
println("X is bigger than Y")
y = 11
require( x > y )
println("X is bigger than Y ? ")
} | diegopacheco/scala-playground | type-system-scala/src/main/scala/com/github/diegopacheco/sandbox/scala/others/OthersScala.scala | Scala | unlicense | 362 |
package lib
import play.api.Play
import org.mindrot.jbcrypt.BCrypt
import traits.Authenticator
import com.google.inject.Inject
import reactivemongo.api.DefaultDB
import reactivemongo.bson.{BSONInteger, BSONString, BSONDocument}
import reactivemongo.bson.handlers.DefaultBSONHandlers._
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import concurrent.ExecutionContext.Implicits.global
class MongoBackedAuthenticator @Inject()(db: DefaultDB) extends Authenticator {
private def config(key: String) = Play.current.configuration.getString(key).get
private val users = db.collection("users")
def authenticate(username: String, password: String): Boolean = {
val query = BSONDocument("username" -> BSONString(username))
val maybeUser = Await.result(users.find(query).headOption, Duration.Inf)
maybeUser map { user =>
val lockout = user.getAs[BSONInteger]("retryCount").map(_.value).getOrElse(0)
if(lockout > 5) {
false
} else {
val authn = user.getAs[BSONString]("password").map(_.value).get
val valid = BCrypt.checkpw(password, authn)
// increment or reset the lockout
users.update(
BSONDocument("_id" -> user.get("_id").get),
BSONDocument(
"$set" -> BSONDocument(
"retryCount" -> BSONInteger(if(valid) 0 else { lockout +1 })
)
)
)
valid
}
} getOrElse false
}
def authenticate(password: String): Boolean =
BCrypt.checkpw(password, config("credentials.password"))
}
| DFID/aid-platform-beta | src/platform/modules/admin/app/lib/MongoBackedAuthenticator.scala | Scala | mit | 1,573 |
package net.sansa_stack.owl.spark.writers
import java.io.{BufferedWriter, ByteArrayOutputStream, OutputStreamWriter, PrintWriter}
import java.util.Collections
import net.sansa_stack.owl.spark.rdd.OWLAxiomsRDD
import org.obolibrary.obo2owl.OWLAPIOwl2Obo
import org.obolibrary.oboformat.writer.OBOFormatWriter
import org.semanticweb.owlapi.apibinding.OWLManager
import scala.collection.JavaConverters._
object OBOWriter extends OWLWriterBase {
override def save(filePath: String, owlAxioms: OWLAxiomsRDD): Unit =
owlAxioms.mapPartitions(partition => if (partition.hasNext) {
val os = new ByteArrayOutputStream()
val osWriter = new OutputStreamWriter(os)
val buffWriter = new PrintWriter(new BufferedWriter(osWriter))
val translator = new OWLAPIOwl2Obo(OWLManager.createOWLOntologyManager())
val oboWriter = new OBOFormatWriter
partition.foreach(axiom => {
val ont = OWLManager.createOWLOntologyManager().createOntology(Seq(axiom).asJava)
val translation = translator.convert(ont)
val nameProvider = new OBOFormatWriter.OBODocNameProvider(translation)
translation.getTypedefFrames.asScala.foreach(oboWriter.write(_, buffWriter, nameProvider))
translation.getTermFrames.asScala.foreach(oboWriter.write(_, buffWriter, nameProvider))
translation.getInstanceFrames.asScala.foreach(oboWriter.write(_, buffWriter, nameProvider))
})
buffWriter.flush()
Collections.singleton(os.toString("UTF-8").trim + nl).iterator().asScala
} else {
Iterator()
}).saveAsTextFile(filePath)
}
| SANSA-Stack/SANSA-RDF | sansa-owl/sansa-owl-spark/src/main/scala/net/sansa_stack/owl/spark/writers/OBOWriter.scala | Scala | apache-2.0 | 1,595 |
package org.jetbrains.plugins.cbt.runner
trait CbtProcessListener {
def onTextAvailable(text: String, stderr: Boolean): Unit
def onComplete(): Unit
}
object CbtProcessListener {
val Dummy = new CbtProcessListener {
override def onComplete(): Unit = ()
override def onTextAvailable(text: String, stderr: Boolean): Unit = ()
}
}
| triplequote/intellij-scala | cbt/src/org/jetbrains/plugins/cbt/runner/CbtProcessListener.scala | Scala | apache-2.0 | 347 |
final class Not2[T] private ()
trait LowPriorityNot2 {
/** A fallback method used to emulate negation in Scala 2 */
implicit def default[T]: Not2[T] = Not2.value.asInstanceOf[Not2[T]]
}
object Not2 extends LowPriorityNot2 {
/** A value of type `Not` to signal a successful search for `Not[C]` (i.e. a failing
* search for `C`). A reference to this value will be explicitly constructed by
* Dotty's implicit search algorithm
*/
def value: Not2[Nothing] = new Not2[Nothing]()
/** One of two ambiguous methods used to emulate negation in Scala 2 */
implicit def amb1[T](implicit ev: T): Not2[T] = ???
/** One of two ambiguous methods used to emulate negation in Scala 2 */
implicit def amb2[T](implicit ev: T): Not2[T] = ???
}
object Test {
class Foo
class Bar
implicit def foo: Foo = ???
implicitly[Foo]
implicitly[Not2[Foo]] // error
implicitly[Not2[Bar]]
}
| som-snytt/dotty | tests/neg/i5234b.scala | Scala | apache-2.0 | 902 |
/* sbt -- Simple Build Tool
* Copyright 2010 Mark Harrah
*/
package sbt
import std._
import xsbt.api.{Discovered,Discovery}
import inc.Analysis
import TaskExtra._
import Types._
import xsbti.api.Definition
import ConcurrentRestrictions.Tag
import org.scalatools.testing.{AnnotatedFingerprint, Fingerprint, Framework, SubclassFingerprint}
import java.io.File
sealed trait TestOption
object Tests
{
// (overall result, individual results)
type Output = (TestResult.Value, Map[String,TestResult.Value])
final case class Setup(setup: ClassLoader => Unit) extends TestOption
def Setup(setup: () => Unit) = new Setup(_ => setup())
final case class Cleanup(cleanup: ClassLoader => Unit) extends TestOption
def Cleanup(setup: () => Unit) = new Cleanup(_ => setup())
final case class Exclude(tests: Iterable[String]) extends TestOption
final case class Listeners(listeners: Iterable[TestReportListener]) extends TestOption
final case class Filter(filterTest: String => Boolean) extends TestOption
// args for all frameworks
def Argument(args: String*): Argument = Argument(None, args.toList)
// args for a particular test framework
def Argument(tf: TestFramework, args: String*): Argument = Argument(Some(tf), args.toList)
// None means apply to all, Some(tf) means apply to a particular framework only.
final case class Argument(framework: Option[TestFramework], args: List[String]) extends TestOption
final case class Execution(options: Seq[TestOption], parallel: Boolean, tags: Seq[(Tag, Int)])
def apply(frameworks: Map[TestFramework, Framework], testLoader: ClassLoader, discovered: Seq[TestDefinition], config: Execution, log: Logger): Task[Output] =
{
import collection.mutable.{HashSet, ListBuffer, Map, Set}
val testFilters = new ListBuffer[String => Boolean]
val excludeTestsSet = new HashSet[String]
val setup, cleanup = new ListBuffer[ClassLoader => Unit]
val testListeners = new ListBuffer[TestReportListener]
val testArgsByFramework = Map[Framework, ListBuffer[String]]()
val undefinedFrameworks = new ListBuffer[String]
def frameworkArgs(framework: Framework, args: Seq[String]): Unit =
testArgsByFramework.getOrElseUpdate(framework, new ListBuffer[String]) ++= args
def frameworkArguments(framework: TestFramework, args: Seq[String]): Unit =
(frameworks get framework) match {
case Some(f) => frameworkArgs(f, args)
case None => undefinedFrameworks += framework.implClassName
}
for(option <- config.options)
{
option match
{
case Filter(include) => testFilters += include
case Exclude(exclude) => excludeTestsSet ++= exclude
case Listeners(listeners) => testListeners ++= listeners
case Setup(setupFunction) => setup += setupFunction
case Cleanup(cleanupFunction) => cleanup += cleanupFunction
/**
* There are two cases here.
* The first handles TestArguments in the project file, which
* might have a TestFramework specified.
* The second handles arguments to be applied to all test frameworks.
* -- arguments from the project file that didnt have a framework specified
* -- command line arguments (ex: test-only someClass -- someArg)
* (currently, command line args must be passed to all frameworks)
*/
case Argument(Some(framework), args) => frameworkArguments(framework, args)
case Argument(None, args) => frameworks.values.foreach { f => frameworkArgs(f, args) }
}
}
if(excludeTestsSet.size > 0)
log.debug(excludeTestsSet.mkString("Excluding tests: \\n\\t", "\\n\\t", ""))
if(undefinedFrameworks.size > 0)
log.warn("Arguments defined for test frameworks that are not present:\\n\\t" + undefinedFrameworks.mkString("\\n\\t"))
def includeTest(test: TestDefinition) = !excludeTestsSet.contains(test.name) && testFilters.forall(filter => filter(test.name))
val tests = discovered.filter(includeTest).toSet.toSeq
val arguments = testArgsByFramework.map { case (k,v) => (k, v.toList) } toMap;
testTask(frameworks.values.toSeq, testLoader, tests, setup.readOnly, cleanup.readOnly, log, testListeners.readOnly, arguments, config)
}
def testTask(frameworks: Seq[Framework], loader: ClassLoader, tests: Seq[TestDefinition],
userSetup: Iterable[ClassLoader => Unit], userCleanup: Iterable[ClassLoader => Unit],
log: Logger, testListeners: Seq[TestReportListener], arguments: Map[Framework, Seq[String]], config: Execution): Task[Output] =
{
def fj(actions: Iterable[() => Unit]): Task[Unit] = nop.dependsOn( actions.toSeq.fork( _() ) : _*)
def partApp(actions: Iterable[ClassLoader => Unit]) = actions.toSeq map {a => () => a(loader) }
val (frameworkSetup, runnables, frameworkCleanup) =
TestFramework.testTasks(frameworks, loader, tests, log, testListeners, arguments)
val setupTasks = fj(partApp(userSetup) :+ frameworkSetup)
val mainTasks =
if(config.parallel)
makeParallel(runnables, setupTasks, config.tags).toSeq.join
else
makeSerial(runnables, setupTasks, config.tags)
val taggedMainTasks = mainTasks.tagw(config.tags : _*)
taggedMainTasks map processResults flatMap { results =>
val cleanupTasks = fj(partApp(userCleanup) :+ frameworkCleanup(results._1))
cleanupTasks map { _ => results }
}
}
type TestRunnable = (String, () => TestResult.Value)
def makeParallel(runnables: Iterable[TestRunnable], setupTasks: Task[Unit], tags: Seq[(Tag,Int)]) =
runnables map { case (name, test) => task { (name, test()) } dependsOn setupTasks named name tagw(tags : _*) }
def makeSerial(runnables: Iterable[TestRunnable], setupTasks: Task[Unit], tags: Seq[(Tag,Int)]) =
task { runnables map { case (name, test) => (name, test()) } } dependsOn(setupTasks)
def processResults(results: Iterable[(String, TestResult.Value)]): (TestResult.Value, Map[String, TestResult.Value]) =
(overall(results.map(_._2)), results.toMap)
def foldTasks(results: Seq[Task[Output]], parallel: Boolean): Task[Output] =
if (parallel)
reduced(results.toIndexedSeq, {
case ((v1, m1), (v2, m2)) => (if (v1.id < v2.id) v2 else v1, m1 ++ m2)
})
else {
def sequence(tasks: List[Task[Output]], acc: List[Output]): Task[List[Output]] = tasks match {
case Nil => task(acc.reverse)
case hd::tl => hd flatMap { out => sequence(tl, out::acc) }
}
sequence(results.toList, List()) map { ress =>
val (rs, ms) = ress.unzip
(overall(rs), ms reduce (_ ++ _))
}
}
def overall(results: Iterable[TestResult.Value]): TestResult.Value =
(TestResult.Passed /: results) { (acc, result) => if(acc.id < result.id) result else acc }
def discover(frameworks: Seq[Framework], analysis: Analysis, log: Logger): (Seq[TestDefinition], Set[String]) =
discover(frameworks flatMap TestFramework.getTests, allDefs(analysis), log)
def allDefs(analysis: Analysis) = analysis.apis.internal.values.flatMap(_.api.definitions).toSeq
def discover(fingerprints: Seq[Fingerprint], definitions: Seq[Definition], log: Logger): (Seq[TestDefinition], Set[String]) =
{
val subclasses = fingerprints collect { case sub: SubclassFingerprint => (sub.superClassName, sub.isModule, sub) };
val annotations = fingerprints collect { case ann: AnnotatedFingerprint => (ann.annotationName, ann.isModule, ann) };
log.debug("Subclass fingerprints: " + subclasses)
log.debug("Annotation fingerprints: " + annotations)
def firsts[A,B,C](s: Seq[(A,B,C)]): Set[A] = s.map(_._1).toSet
def defined(in: Seq[(String,Boolean,Fingerprint)], names: Set[String], IsModule: Boolean): Seq[Fingerprint] =
in collect { case (name, IsModule, print) if names(name) => print }
def toFingerprints(d: Discovered): Seq[Fingerprint] =
defined(subclasses, d.baseClasses, d.isModule) ++
defined(annotations, d.annotations, d.isModule)
val discovered = Discovery(firsts(subclasses), firsts(annotations))(definitions)
val tests = for( (df, di) <- discovered; fingerprint <- toFingerprints(di) ) yield new TestDefinition(df.name, fingerprint)
val mains = discovered collect { case (df, di) if di.hasMain => df.name }
(tests, mains.toSet)
}
def showResults(log: Logger, results: (TestResult.Value, Map[String, TestResult.Value]), noTestsMessage: =>String): Unit =
{
if (results._2.isEmpty)
log.info(noTestsMessage)
else {
import TestResult.{Error, Failed, Passed}
def select(Tpe: TestResult.Value) = results._2 collect { case (name, Tpe) => name }
val failures = select(Failed)
val errors = select(Error)
val passed = select(Passed)
def show(label: String, level: Level.Value, tests: Iterable[String]): Unit =
if(!tests.isEmpty)
{
log.log(level, label)
log.log(level, tests.mkString("\\t", "\\n\\t", ""))
}
show("Passed tests:", Level.Debug, passed )
show("Failed tests:", Level.Error, failures)
show("Error during tests:", Level.Error, errors)
if(!failures.isEmpty || !errors.isEmpty)
error("Tests unsuccessful")
}
}
sealed trait TestRunPolicy
case object InProcess extends TestRunPolicy
final case class SubProcess(javaOptions: Seq[String]) extends TestRunPolicy
final case class Group(name: String, tests: Seq[TestDefinition], runPolicy: TestRunPolicy)
}
| olove/xsbt | main/actions/src/main/scala/sbt/Tests.scala | Scala | bsd-3-clause | 9,156 |
/*
* Copyright 2016 rdbc contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.rdbc.pgsql.core.internal.protocol.messages.backend
final case class PgPid(value: Int) extends AnyVal
final case class PgKey(value: Int) extends AnyVal
final case class BackendKeyData(pid: PgPid, key: PgKey) extends PgBackendMessage
| rdbc-io/rdbc-pgsql | rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/protocol/messages/backend/BackendKeyData.scala | Scala | apache-2.0 | 847 |
package java.time
import scala.collection.JavaConverters._
import java.time.temporal._
final class Duration private (seconds: Long, nanos: Int)
extends TemporalAmount with Comparable[Duration]
with java.io.Serializable {
import Preconditions.requireDateTime
import Constants._
import ChronoUnit._
requireDateTime(nanos >= 0 && nanos <= 999999999,
"nanos must be >= 0 and <= 999999999")
private val (normalizedSeconds, normalizedNanos) =
if (seconds < 0 && nanos > 0) (seconds + 1, nanos - NANOS_IN_SECOND)
else (seconds, nanos)
def get(unit: TemporalUnit): Long = unit match {
case SECONDS => seconds
case NANOS => nanos
case _ =>
throw new UnsupportedTemporalTypeException(s"Unit not supported: $unit")
}
def getUnits(): java.util.List[TemporalUnit] =
Seq[TemporalUnit](SECONDS, NANOS).asJava
def isZero(): Boolean = seconds == 0 && nanos == 0
def isNegative(): Boolean = seconds < 0
def getSeconds(): Long = seconds
def getNano(): Int = nanos
def withSeconds(seconds: Long): Duration =
new Duration(seconds, nanos)
def withNanos(nanosOfSecond: Int): Duration =
new Duration(seconds, nanosOfSecond)
def plus(duration: Duration): Duration = {
val seconds1 = duration.getSeconds
val sumNanos = nanos + duration.getNano
if (seconds1 >= 0) {
val sumSeconds = MathJDK8Bridge.addExact(seconds, seconds1)
if (sumNanos >= NANOS_IN_SECOND)
new Duration(MathJDK8Bridge.incrementExact(sumSeconds),
sumNanos - NANOS_IN_SECOND)
else
new Duration(sumSeconds, sumNanos)
} else {
val sumSeconds = MathJDK8Bridge.addExact(seconds, seconds1 + 1)
if (sumNanos >= NANOS_IN_SECOND)
new Duration(sumSeconds, sumNanos - NANOS_IN_SECOND)
else
new Duration(MathJDK8Bridge.decrementExact(sumSeconds), sumNanos)
}
}
def plus(amount: Long, unit: TemporalUnit): Duration = {
if (!unit.isDurationEstimated || unit == DAYS)
plus(unit.getDuration.multipliedBy(amount))
else
throw new UnsupportedTemporalTypeException(s"Unit not supported: $unit")
}
def plusDays(days: Long): Duration = plus(days, DAYS)
def plusHours(hours: Long): Duration = plus(hours, HOURS)
def plusMinutes(minutes: Long): Duration = plus(minutes, MINUTES)
def plusSeconds(seconds: Long): Duration = plus(seconds, SECONDS)
def plusMillis(millis: Long): Duration = plus(millis, MILLIS)
def plusNanos(nanos: Long): Duration = plus(nanos, NANOS)
def minus(duration: Duration): Duration = {
if (duration == Duration.Min)
plus(Duration.Max).plusNanos(1)
else
plus(duration.negated())
}
def minus(amount: Long, unit: TemporalUnit): Duration = {
if (!unit.isDurationEstimated || unit == DAYS)
minus(unit.getDuration.multipliedBy(amount))
else
throw new UnsupportedTemporalTypeException(s"Unit not supported: $unit")
}
def minusDays(days: Long): Duration = minus(days, DAYS)
def minusHours(hours: Long): Duration = minus(hours, HOURS)
def minusMinutes(minutes: Long): Duration = minus(minutes, MINUTES)
def minusSeconds(seconds: Long): Duration = minus(seconds, SECONDS)
def minusMillis(millis: Long): Duration = minus(millis, MILLIS)
def minusNanos(nanos: Long): Duration = minus(nanos, NANOS)
def multipliedBy(multiplicand: Long): Duration = {
val (prodNanosQuot, prodNanosRem) = {
try {
val prodNanos = MathJDK8Bridge.multiplyExact(normalizedNanos, multiplicand)
(prodNanos / NANOS_IN_SECOND, (prodNanos % NANOS_IN_SECOND).toInt)
} catch {
case _: ArithmeticException =>
val prodNanos = BigInt(normalizedNanos) * multiplicand
((prodNanos / NANOS_IN_SECOND).toLong, (prodNanos % NANOS_IN_SECOND).toInt)
}
}
val prodSeconds = MathJDK8Bridge.multiplyExact(normalizedSeconds, multiplicand)
val newSeconds =
if (prodNanosRem >= 0) MathJDK8Bridge.addExact(prodSeconds, prodNanosQuot)
else MathJDK8Bridge.addExact(prodSeconds, prodNanosQuot - 1)
val newNanos =
if (prodNanosRem >= 0) prodNanosRem
else prodNanosRem + NANOS_IN_SECOND
new Duration(newSeconds, newNanos)
}
def dividedBy(divisor: Long): Duration = divisor match {
case 1 => this
case -1 => negated
case _ =>
val secondsQuot = normalizedSeconds / divisor
val secondsRem = normalizedSeconds % divisor
val nanos = {
try {
val total = MathJDK8Bridge.addExact(
MathJDK8Bridge.multiplyExact(secondsRem, NANOS_IN_SECOND),
normalizedNanos)
total / divisor
} catch {
case _: ArithmeticException =>
val total = BigInt(secondsRem) * NANOS_IN_SECOND + normalizedNanos
(total / divisor).toLong
}
}
Duration.ofSeconds(secondsQuot).plusNanos(nanos)
}
def negated(): Duration = multipliedBy(-1)
def abs(): Duration = if (isNegative()) negated() else this
def addTo(temporal: Temporal): Temporal = {
val t1 =
if (seconds == 0) temporal
else temporal.plus(seconds, SECONDS)
if (nanos == 0) t1
else t1.plus(nanos, NANOS)
}
def subtractFrom(temporal: Temporal): Temporal = {
val t1 =
if (seconds == 0) temporal
else temporal.minus(seconds, SECONDS)
if (nanos == 0) t1
else t1.minus(nanos, NANOS)
}
def toDays(): Long = seconds / SECONDS_IN_DAY
def toHours(): Long = seconds / SECONDS_IN_HOUR
def toMinutes(): Long = seconds / SECONDS_IN_MINUTE
def toMillis(): Long = {
val millis1 = MathJDK8Bridge.multiplyExact(seconds, MILLIS_IN_SECOND)
val millis2 = nanos / NANOS_IN_MILLI
MathJDK8Bridge.addExact(millis1, millis2)
}
def toNanos(): Long =
MathJDK8Bridge.addExact(
MathJDK8Bridge.multiplyExact(seconds, NANOS_IN_SECOND), nanos)
def compareTo(that: Duration): Int = {
val secCmp = seconds.compareTo(that.getSeconds)
if (secCmp == 0) nanos.compareTo(that.getNano)
else secCmp
}
override def equals(that: Any): Boolean = that match {
case that: Duration =>
seconds == that.getSeconds && nanos == that.getNano
case _ => false
}
override def hashCode(): Int = 31 * seconds.hashCode + nanos
override def toString(): String = {
val mins = normalizedSeconds / 60
val secsOfMin = normalizedSeconds % 60
val hours = mins / 60
val minsOfHour = mins % 60
val hourPart = if (hours == 0) "" else hours.toString + "H"
val minPart = if (minsOfHour == 0) "" else minsOfHour.toString + "M"
val nanos1 = math.abs(normalizedNanos)
val decimals = f"$nanos1%09d".reverse.dropWhile(_ == '0').reverse
val decimalPart = if (decimals.isEmpty) "" else "." + decimals
val secsPart = secsOfMin match {
case 0 if seconds != 0 && nanos == 0 => ""
case 0 if seconds < 0 => "-0" + decimalPart + "S"
case n => n.toString + decimalPart + "S"
}
"PT" + hourPart + minPart + secsPart
}
}
object Duration {
import Constants._
final val ZERO = new Duration(0, 0)
private[time] final val Min = new Duration(Long.MinValue, 0)
private[time] final val Max = new Duration(Long.MaxValue, 999999999)
private[time] final val OneNano = new Duration(0, 1)
private[time] final val OneMicro = new Duration(0, NANOS_IN_MICRO)
private[time] final val OneMilli = new Duration(0, NANOS_IN_MILLI)
private[time] final val OneSecond = new Duration(1, 0)
private[time] final val OneMinute = new Duration(SECONDS_IN_MINUTE, 0)
private[time] final val OneHour = new Duration(SECONDS_IN_HOUR, 0)
private[time] final val OneDay = new Duration(SECONDS_IN_DAY, 0)
private[time] final val OneWeek = new Duration(SECONDS_IN_WEEK, 0)
private[time] final val OneMonth = new Duration(SECONDS_IN_MONTH, 0)
private[time] final val OneYear = OneMonth.multipliedBy(12)
def ofDays(days: Long): Duration = OneDay.multipliedBy(days)
def ofHours(hours: Long): Duration = OneHour.multipliedBy(hours)
def ofMinutes(minutes: Long): Duration = OneMinute.multipliedBy(minutes)
def ofSeconds(seconds: Long): Duration = new Duration(seconds, 0)
def ofSeconds(seconds: Long, nanoAdjustment: Long): Duration =
ofSeconds(seconds).plusNanos(nanoAdjustment)
def ofMillis(millis: Long): Duration = OneMilli.multipliedBy(millis)
def ofNanos(nanos: Long): Duration = OneNano.multipliedBy(nanos)
def of(amount: Long, unit: TemporalUnit): Duration = {
if (!unit.isDurationEstimated || unit == ChronoUnit.DAYS)
unit.getDuration.multipliedBy(amount)
else
throw new UnsupportedTemporalTypeException(s"Unit not supported: $unit")
}
def from(amount: TemporalAmount): Duration = {
amount.getUnits.asScala.foldLeft(ZERO) { (d, u) =>
d.plus(amount.get(u), u)
}
}
// Not implemented
// def parse(text: CharSequence): Duration
def between(start: Temporal, end: Temporal): Duration = {
try {
val nanos = start.until(end, ChronoUnit.NANOS)
Duration.ofNanos(nanos)
} catch {
case _:DateTimeException | _:ArithmeticException =>
val seconds = start.until(end, ChronoUnit.SECONDS)
val nanos = {
try {
end.get(ChronoField.NANO_OF_SECOND) -
start.get(ChronoField.NANO_OF_SECOND)
} catch {
case _: DateTimeException => 0
}
}
Duration.ofSeconds(seconds, nanos)
}
}
}
| jasonchaffee/scala-js | javalib/src/main/scala/java/time/Duration.scala | Scala | bsd-3-clause | 9,518 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.message
import java.nio._
import java.nio.channels._
import org.apache.kafka.common.record.Records
/**
* Message set helper functions
*/
object MessageSet {
val MessageSizeLength = 4
val OffsetLength = 8
val LogOverhead = MessageSizeLength + OffsetLength
val Empty = new ByteBufferMessageSet(ByteBuffer.allocate(0))
/**
* The size of a message set containing the given messages
*/
def messageSetSize(messages: Iterable[Message]): Int =
messages.foldLeft(0)(_ + entrySize(_))
/**
* The size of a size-delimited entry in a message set
*/
def entrySize(message: Message): Int = LogOverhead + message.size
/**
* Validate that all "magic" values in `messages` are the same and return their magic value and max timestamp
*/
def magicAndLargestTimestamp(messages: Seq[Message]): MagicAndTimestamp = {
val firstMagicValue = messages.head.magic
var largestTimestamp = Message.NoTimestamp
for (message <- messages) {
if (message.magic != firstMagicValue)
throw new IllegalStateException("Messages in the same message set must have same magic value")
if (firstMagicValue > Message.MagicValue_V0)
largestTimestamp = math.max(largestTimestamp, message.timestamp)
}
MagicAndTimestamp(firstMagicValue, largestTimestamp)
}
}
case class MagicAndTimestamp(magic: Byte, timestamp: Long)
/**
* A set of messages with offsets. A message set has a fixed serialized form, though the container
* for the bytes could be either in-memory or on disk. The format of each message is
* as follows:
* 8 byte message offset number
* 4 byte size containing an integer N
* N message bytes as described in the Message class
*/
abstract class MessageSet extends Iterable[MessageAndOffset] {
/**
* Check if all the wrapper messages in the message set have the expected magic value
*/
def isMagicValueInAllWrapperMessages(expectedMagicValue: Byte): Boolean
/**
* Provides an iterator over the message/offset pairs in this set
*/
def iterator: Iterator[MessageAndOffset]
/**
* Gives the total size of this message set in bytes
*/
def sizeInBytes: Int
/**
* Get the client representation of the message set
*/
def asRecords: Records
/**
* Print this message set's contents. If the message set has more than 100 messages, just
* print the first 100.
*/
override def toString: String = {
val builder = new StringBuilder()
builder.append(getClass.getSimpleName + "(")
val iter = this.iterator
var i = 0
while(iter.hasNext && i < 100) {
val message = iter.next
builder.append(message)
if(iter.hasNext)
builder.append(", ")
i += 1
}
if(iter.hasNext)
builder.append("...")
builder.append(")")
builder.toString
}
}
| geeag/kafka | core/src/main/scala/kafka/message/MessageSet.scala | Scala | apache-2.0 | 3,636 |
package com.airbnb.aerosolve.training
import java.io.BufferedWriter
import java.io.OutputStreamWriter
import java.util
import com.airbnb.aerosolve.core.{ModelRecord, ModelHeader, FeatureVector, Example}
import com.airbnb.aerosolve.core.models.LinearModel
import com.airbnb.aerosolve.core.util.Util
import com.typesafe.config.Config
import org.slf4j.{LoggerFactory, Logger}
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import scala.collection.mutable.HashMap
import scala.collection.mutable.HashSet
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.Buffer
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import scala.util.Random
import scala.math.abs
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.fs.Path
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
object FeatureSelection {
private final val log: Logger = LoggerFactory.getLogger("FeatureSelection")
val allKey : (String, String) = ("$ALL", "$POS")
// Given a RDD compute the pointwise mutual information between
// the positive label and the discrete features.
def pointwiseMutualInformation(examples : RDD[Example],
config : Config,
key : String,
rankKey : String,
posThreshold : Double,
minPosCount : Double,
newCrosses : Boolean) : RDD[((String, String), Double)] = {
val pointwise = LinearRankerUtils.makePointwise(examples, config, key, rankKey)
val features = pointwise
.mapPartitions(part => {
// The tuple2 is var, var | positive
val output = scala.collection.mutable.HashMap[(String, String), (Double, Double)]()
part.foreach(example =>{
val featureVector = example.example.get(0)
val isPos = if (featureVector.floatFeatures.get(rankKey).asScala.head._2 > posThreshold) 1.0
else 0.0
val all : (Double, Double) = output.getOrElse(allKey, (0.0, 0.0))
output.put(allKey, (all._1 + 1.0, all._2 + 1.0 * isPos))
val features : Array[(String, String)] =
LinearRankerUtils.getFeatures(featureVector)
if (newCrosses) {
for (i <- features) {
for (j <- features) {
if (i._1 < j._1) {
val key = ("%s<NEW>%s".format(i._1, j._1),
"%s<NEW>%s".format(i._2, j._2))
val x = output.getOrElse(key, (0.0, 0.0))
output.put(key, (x._1 + 1.0, x._2 + 1.0 * isPos))
}
}
}
}
for (feature <- features) {
val x = output.getOrElse(feature, (0.0, 0.0))
output.put(feature, (x._1 + 1.0, x._2 + 1.0 * isPos))
}
})
output.iterator
})
.reduceByKey((a, b) => (a._1 + b._1, a._2 + b._2))
.filter(x => x._2._2 >= minPosCount)
val allCount = features.filter(x => x._1.equals(allKey)).take(1).head
features.map(x => {
val prob = x._2._1 / allCount._2._1
val probPos = x._2._2 / allCount._2._2
(x._1, math.log(probPos / prob) / math.log(2.0))
})
}
// Returns the maximum entropy per family
def maxEntropy(input : RDD[((String, String), Double)]) : RDD[((String, String), Double)] = {
input
.map(x => (x._1._1, x))
.reduceByKey((a, b) => if (math.abs(a._2) > math.abs(b._2)) a else b)
.map(x => x._2)
}
}
| sagivo/aerosolve | training/src/main/scala/com/airbnb/aerosolve/training/FeatureSelection.scala | Scala | apache-2.0 | 3,588 |
import sbt._
import sbt.Keys._
object Owl2Build extends Build {
val logger = ConsoleLogger()
val baseVersion = "0.1.0"
lazy val core = project.settings(
organization := "uk.co.turingatemyhamster",
scalaVersion := "2.11.5",
crossScalaVersions := Seq("2.11.5", "2.10.4"),
scalacOptions ++= Seq("-deprecation", "-unchecked"),
version := baseVersion,
resolvers += Resolver.url(
"bintray-scalajs-releases",
url("http://dl.bintray.com/scala-js/scala-js-releases/"))(
Resolver.ivyStylePatterns),
resolvers += "bintray/non" at "http://dl.bintray.com/non/maven",
resolvers ++= Seq("snapshots", "releases").map(Resolver.sonatypeRepo),
resolvers += "spray repo" at "http://repo.spray.io",
resolvers += "Scalaz Bintray Repo" at "http://dl.bintray.com/scalaz/releases",
resolvers += "drdozer Bintray Repo" at "http://dl.bintray.com/content/drdozer/maven",
publishMavenStyle := true,
//repository in bintray := "maven",
//bintrayOrganization in bintray := None,
licenses +=("Apache-2.0", url("http://www.apache.org/licenses/LICENSE-2.0.html"))
)
}
| drdozer/owl2 | project/Build.scala | Scala | apache-2.0 | 1,129 |
import sbt._
// format: off
object Version {
final val Akka = "2.4.12"
final val AkkaHttp = "2.4.11"
final val AkkaHttpJson = "1.10.1"
final val Json4s = "3.4.2"
final val ReactiveMongo = "0.12.0"
final val Scala = "2.11.8"
final val ScalaTest = "3.0.0"
final val Spray = "1.3.4"
}
object Library {
val akka = "com.typesafe.akka" %% "akka-actor" % Version.Akka
val akkaHttp = "com.typesafe.akka" %% "akka-http-experimental" % Version.AkkaHttp
val akkaHttpJson = "de.heikoseeberger" %% "akka-http-json4s" % Version.AkkaHttpJson
val akkaSlf4j = "com.typesafe.akka" %% "akka-slf4j" % Version.Akka
val logbackClassic = "ch.qos.logback" % "logback-classic" % "1.1.2"
val json4sNative = "org.json4s" %% "json4s-native" % Version.Json4s
val reactiveMongo = "org.reactivemongo" %% "reactivemongo" % Version.ReactiveMongo
val akkaHttpTestKit = "com.typesafe.akka" %% "akka-http-testkit" % Version.AkkaHttp
val scalaTest = "org.scalatest" %% "scalatest" % Version.ScalaTest
}
object Dependencies {
import Library._
val iqNotes = Seq(
akka,
akkaHttp,
akkaHttpJson,
akkaSlf4j,
json4sNative,
logbackClassic,
reactiveMongo,
akkaHttpTestKit % "test",
scalaTest % "test"
)
}
| rockjam/iq-notes | project/Dependencies.scala | Scala | apache-2.0 | 1,421 |
package de.sciss.fscape
package tests
import de.sciss.fscape.Ops._
import de.sciss.fscape.stream.Control
object HistogramTest extends App {
val config = Control.Config()
config.useAsync = false
config.blockSize = 128 // 1024
val g = Graph {
import graph._
val sz = config.blockSize // 1024
val period = 1024 // 100
val bins = 16 // 256 // 200
// val gen0 = SinOsc(1.0/period)
val gen0 = LFSaw(1.0/period)
val gen = gen0.take(sz) // * Line(0.5, 1.0, sz)
val h0 = Histogram(gen, bins = bins, lo = -1.0, hi = +1.0, mode = 0)
val h = h0 // ResizeWindow(h0, 17 * 10, stop = -17 * 9)
Plot1D(h, size = bins /*sz*/, label = "Histogram")
}
// fscape.showStreamLog = true
Control(config).run(g)
} | Sciss/FScape-next | core/jvm/src/test/scala/de/sciss/fscape/tests/HistogramTest.scala | Scala | agpl-3.0 | 782 |
package com.typesafe.sbt
package packager
package universal
import sbt._
/** Helper methods to package up files into compressed archives. */
object Archives {
/**
* Makes a zip file in the given target directory using the given name.
* @param target folder to build package in
* @param name of output (without extension)
* @param mappings included in the output
* @param top level directory
* @return zip file
*/
@deprecated(
"Use [[com.typesafe.sbt.packager.universal.Archives.makeZip(File, String, Seq[(File, String)], Option[String], Seq[String]): File]]",
since = "1.0.5"
)
def makeZip(target: File, name: String, mappings: Seq[(File, String)], top: Option[String]): File =
makeZip(target, name, mappings, top, options = Seq.empty)
/**
* Makes a zip file in the given target directory using the given name.
*
* @param target folder to build package in
* @param name of output (without extension)
* @param mappings included in the output
* @param top level directory
* @param options NOT USED
* @return zip file
*/
def makeZip(
target: File,
name: String,
mappings: Seq[(File, String)],
top: Option[String],
options: Seq[String]
): File = {
val zip = target / (name + ".zip")
// add top level directory if defined
val m2 = top map { dir =>
mappings map { case (f, p) => f -> (dir + "/" + p) }
} getOrElse (mappings)
ZipHelper.zip(m2, zip)
zip
}
/**
* Makes a zip file in the given target directory using the given name.
*
* @param target folder to build package in
* @param name of output (without extension)
* @param mappings included in the output
* @param top level directory
* @return zip file
*/
@deprecated(
"Use [[com.typesafe.sbt.packager.universal.Archives.makeNativeZip(File, String, Seq[(File, String)], Option[String], Seq[String]): File]]",
since = "1.0.5"
)
def makeNativeZip(target: File, name: String, mappings: Seq[(File, String)], top: Option[String]): File =
makeNativeZip(target, name, mappings, top, options = Seq.empty)
/**
* Makes a zip file in the given target directory using the given name.
*
* @param target folder to build package in
* @param name of output (without extension)
* @param mappings included in the output
* @param top level directory
* @param options NOT USED
* @return zip file
*/
def makeNativeZip(
target: File,
name: String,
mappings: Seq[(File, String)],
top: Option[String],
options: Seq[String]
): File = {
val zip = target / (name + ".zip")
// add top level directory if defined
val m2 = top map { dir =>
mappings map { case (f, p) => f -> (dir + "/" + p) }
} getOrElse (mappings)
ZipHelper.zipNative(m2, zip)
zip
}
/**
* Makes a dmg file in the given target directory using the given name.
*
* Note: Only works on macOS
*
* @param target folder to build package in
* @param name of output (without extension)
* @param mappings included in the output
* @param top level directory : NOT USED
* @return dmg file
*/
@deprecated(
"Use [[com.typesafe.sbt.packager.universal.Archives.makeDmg(target: File, name: String, mappings: Seq[(File, String)], top: Option[String], options: Seq[String]): File]]",
since = "1.0.5"
)
def makeDmg(target: File, name: String, mappings: Seq[(File, String)], top: Option[String]): File =
makeDmg(target, name, mappings, top, options = Seq.empty)
/**
* Makes a dmg file in the given target directory using the given name.
*
* Note: Only works on macOS
*
* @param target folder to build package in
* @param name of output (without extension)
* @param mappings included in the output
* @param top level directory : NOT USED
* @param options NOT USED
* @return dmg file
*/
def makeDmg(
target: File,
name: String,
mappings: Seq[(File, String)],
top: Option[String],
options: Seq[String]
): File = {
val t = target / "dmg"
val dmg = target / (name + ".dmg")
if (!t.isDirectory) IO.createDirectory(t)
val sizeBytes =
mappings.map(_._1).filterNot(_.isDirectory).map(_.length).sum
// We should give ourselves a buffer....
val neededMegabytes = math.ceil((sizeBytes * 1.05) / (1024 * 1024)).toLong
// Create the DMG file:
sys.process
.Process(
Seq("hdiutil", "create", "-megabytes", "%d" format neededMegabytes, "-fs", "HFS+", "-volname", name, name),
Some(target)
)
.! match {
case 0 => ()
case n => sys.error("Error creating dmg: " + dmg + ". Exit code " + n)
}
// Now mount the DMG.
val mountPoint = (t / name)
if (!mountPoint.isDirectory) IO.createDirectory(mountPoint)
val mountedPath = mountPoint.getAbsolutePath
sys.process
.Process(Seq("hdiutil", "attach", dmg.getAbsolutePath, "-readwrite", "-mountpoint", mountedPath), Some(target))
.! match {
case 0 => ()
case n => sys.error("Unable to mount dmg: " + dmg + ". Exit code " + n)
}
// Now copy the files in
val m2 = mappings map { case (f, p) => f -> (mountPoint / p) }
IO.copy(m2)
// Update for permissions
for {
(from, to) <- m2
if from.canExecute()
} to.setExecutable(true, true)
// Now unmount
sys.process.Process(Seq("hdiutil", "detach", mountedPath), Some(target)).! match {
case 0 => ()
case n =>
sys.error("Unable to dismount dmg: " + dmg + ". Exit code " + n)
}
// Delete mount point
IO.delete(mountPoint)
dmg
}
/**
* GZips a file. Returns the new gzipped file.
* NOTE: This will 'consume' the input file.
*/
def gzip(f: File): File = {
val par = f.getParentFile
sys.process.Process(Seq("gzip", "-9", f.getAbsolutePath), Some(par)).! match {
case 0 => ()
case n => sys.error("Error gziping " + f + ". Exit code: " + n)
}
file(f.getAbsolutePath + ".gz")
}
/**
* xz compresses a file. Returns the new xz compressed file.
* NOTE: This will 'consume' the input file.
*/
def xz(f: File): File = {
val par = f.getParentFile
sys.process.Process(Seq("xz", "-9e", "-S", ".xz", f.getAbsolutePath), Some(par)).! match {
case 0 => ()
case n => sys.error("Error xz-ing " + f + ". Exit code: " + n)
}
file(f.getAbsolutePath + ".xz")
}
val makeTxz = makeTarballWithOptions(xz, ".txz") _
val makeTgz = makeTarballWithOptions(gzip, ".tgz") _
/**
* Helper method used to construct tar-related compression functions with `--force-local` and `-pvcf` option specified
* as default.
* @param target folder to build package in
* @param name of output (without extension)
* @param mappings included in the output
* @param top level directory
* @return tar file
*/
def makeTarball(
compressor: File => File,
ext: String
)(target: File, name: String, mappings: Seq[(File, String)], top: Option[String]): File =
makeTarballWithOptions(compressor, ext)(target, name, mappings, top, options = Seq("--force-local", "-pcvf"))
/**
* Helper method used to construct tar-related compression functions.
* @param target folder to build package in
* @param name of output (without extension)
* @param mappings included in the output
* @param topDirectory level directory
* @param options for tar command
* @return tar file
*/
def makeTarballWithOptions(compressor: File => File, ext: String)(
target: File,
name: String,
mappings: Seq[(File, String)],
topDirectory: Option[String],
options: Seq[String]
): File = {
val tarball = target / (name + ext)
IO.withTemporaryDirectory { tempDirectory =>
val workingDirectory = tempDirectory / name
val temporaryMappings = topDirectory
.map { dir =>
mappings map { case (f, p) => f -> (workingDirectory / dir / p) }
}
.getOrElse {
mappings map { case (f, p) => f -> (workingDirectory / p) }
}
// create the working directory
IO.createDirectory(workingDirectory)
IO.copy(temporaryMappings)
// setExecutable does not always work. There are known issues with macOS where
// the executable flags is missing after compression.
for ((from, to) <- temporaryMappings if (to.getAbsolutePath contains "/bin/") || from.canExecute) {
println("Making " + to.getAbsolutePath + " executable")
to.setExecutable(true, false)
}
IO.createDirectory(tarball.getParentFile)
// all directories that should be zipped
val distdirs = topDirectory.map(_ :: Nil).getOrElse {
IO.listFiles(workingDirectory).map(_.getName).toList // no top level dir, use all available
}
val temporaryTarFile = tempDirectory / (name + ".tar")
val cmd = Seq("tar") ++ options ++ Seq(temporaryTarFile.getAbsolutePath) ++ distdirs
println("Running with " + cmd.mkString(" "))
sys.process.Process(cmd, workingDirectory).! match {
case 0 => ()
case n =>
sys.error("Error tarballing " + tarball + ". Exit code: " + n)
}
IO.copyFile(compressor(temporaryTarFile), tarball)
}
tarball
}
}
| kardapoltsev/sbt-native-packager | src/main/scala/com/typesafe/sbt/packager/universal/Archives.scala | Scala | bsd-2-clause | 9,396 |
/*
* Copyright (c) 2015 Hugo Gävert. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.sanoma.cda.geo
/**
* This object provides basic Geohash encoding and decoding.
* Geohash web-site: http://geohash.org/
* Geohash description on Wikipedia: https://en.wikipedia.org/wiki/Geohash
*
* This implementation is very simple and mostly modelled after
* Python version at https://github.com/vinsci/geohash/blob/master/Geohash/geohash.py
*/
object GeoHash {
type Geohash = String
// geohash uses it's own base32 - from Wikipedia
val base32 = "0123456789bcdefghjkmnpqrstuvwxyz".toList
val decodeMap = base32.zipWithIndex.toMap
val bitMask = Array(16, 8, 4, 2, 1)
@inline def mid(interval: (Double, Double)) = (interval._1 + interval._2) / 2.0
/**
* This function decodes fully the given Geohash string
* @param geohash - Geohash string
* @return - Tuple with (latitude, longitude, latitude ±error, longitude ±error)
*/
def decodeFully(geohash: Geohash): (Double, Double, Double, Double) = {
// latitude and longitude (interval, error)
var (latI, latE) = ((-90.0, 90.0), 90.0)
var (lonI, lonE) = ((-180.0, 180.0), 180.0)
var isEven = true
for (c <- geohash.toCharArray) {
val cd = decodeMap(c)
for (mask <- bitMask) {
if (isEven) { // longitude
lonE = lonE / 2.0
lonI = if ((cd & mask) > 0) (mid(lonI), lonI._2) else (lonI._1, mid(lonI))
} else { // latitude
latE = latE / 2.0
latI = if ((cd & mask) > 0) (mid(latI), latI._2) else (latI._1, mid(latI))
}
isEven = !isEven
}
}
(mid(latI), mid(lonI), latE, lonE)
}
def geoHash2Rectangle(geohash: Geohash): Rectangle = {
val (latMid, lonMid, latE, lonE) = decodeFully(geohash)
new Rectangle(latMid + latE, lonMid + lonE, latMid - latE, lonMid - lonE)
}
/**
* This rounds the coordinates to desired precision. See wikipedia for rounding.
* @param x Value to round
* @param xError ±error for x
* @return Rounded value
*/
def getRounded(x: Double, xError: Double) = {
import math._
val xmin = x - xError
val xmax = x + xError
//println(s"x=$x => [$xmin, $xmax]")
var xPrecision: Double = max(1, round(-log10(xError))) - 1
var decimals: Double = pow(10, xPrecision)
var rounded = round(x * decimals) / decimals
//println(s"rounded = $rounded, decimals = $decimals")
while ((rounded < xmin) | (rounded > xmax)) {
xPrecision += 1.0
decimals = pow(10, xPrecision)
rounded = round(x * decimals) / decimals
//println(s"rounded = $rounded, decimals = $decimals")
}
rounded
}
/**
* Just decode and get the point. Rounds the precisions also
* @param gh Geohash to decode
* @return The point
*/
def decode(gh: Geohash): Point = {
val (lat, lon, latErr, lonErr) = decodeFully(gh)
Point(getRounded(lat, latErr), getRounded(lon, lonErr))
}
/**
* Encoding function
* @param point Point
* @param hashLength Desired length of the hash
* @return The geohash string
*/
def encode(point: Point, hashLength: Int = 12): Geohash = {
var latI = (-90.0, 90.0)
var lonI = (-180.0, 180.0)
var isEven = true
var geohash = new StringBuilder
var bit = 0
var ch = 0
while (geohash.length < hashLength) {
if (isEven) { // longitude
val midPoint = mid(lonI)
if (point.longitude > midPoint) {
ch = ch | bitMask(bit)
lonI = (midPoint, lonI._2)
} else lonI = (lonI._1, midPoint)
} else { // latitude
val midPoint = mid(latI)
if (point.latitude > midPoint) {
ch = ch | bitMask(bit)
latI = (midPoint, latI._2)
} else latI = (latI._1, midPoint)
}
isEven = !isEven
if (bit < 4) bit = bit + 1
else {
geohash += base32(ch)
bit = 0
ch = 0
}
}
geohash.toString
}
/**
* This function just returns the longest common prefix of the sequence of strings.
* @param strs Sequence of the strings
* @return The longest common prefix
*/
def longestCommonPrefix(strs: Seq[String]): String = {
val charA = strs.map(_.toCharArray)
val maxLength = charA.map(_.length).min
(0 until maxLength).view
.map{i => charA.map(_(i)).toSet}
.takeWhile(_.size == 1)
.map(_.head).mkString
}
/**
* This tries to find the smallest geohash that contains all the points.
* It does it by encoding all points to highest precision and looks for the
* longest common prefix of the geohash. However, due the way geohash is calculated
* this may return very large areas. If this is long or "accurate", the points are all
* in close proximity. However, this may be very large area even if the points are
* all from very small region that happens to cross large geohash borders.
* @param points List of points
* @return The smalles common Geohash
*/
def smallestCommonGeohash(points: Seq[Point]): Geohash =
longestCommonPrefix(points.map(p => encode(p)))
}
| Sanoma-CDA/maxmind-geoip2-scala | src/main/scala/com/sanoma/cda/geo/GeoHash.scala | Scala | apache-2.0 | 5,729 |
// package fif
// import algebra.Semigroup
// import Data.ops._
// import scala.language.higherKinds
// import scala.reflect.ClassTag
// object Sum extends Serializable {
// def apply[N: Numeric: ClassTag, D[_]: Data](data: D[N]): N = {
// val add = implicitly[Numeric[N]].plus _
// data.aggregate(implicitly[Numeric[N]].zero)(add, add)
// }
// }
// object ToMap extends Serializable {
// def addToMap[K, V: Semigroup](m: Map[K, V])(key: K, value: V): Map[K, V] =
// if (m.contains(key))
// (m - key) + (key -> implicitly[Semigroup[V]].combine(m(key), value))
// else
// m + (key -> value)
// def combine[K, V: Semigroup](m1: Map[K, V], m2: Map[K, V]): Map[K, V] = {
// val (larger, smaller) =
// if (m1.size > m2.size)
// (m1, m2)
// else
// (m2, m1)
// smaller.foldLeft(larger) {
// case (m, (key, value)) =>
// addToMap(m)(key, value)
// }
// }
// def apply[T: ClassTag, U: ClassTag: Semigroup, D[_]: Data](data: D[(T, U)]): Map[T, U] = {
// implicit val _ = identity[(T, U)] _
// apply[(T, U), T, U, D](data)
// }
// def apply[A, T: ClassTag, U: ClassTag: Semigroup, D[_]: Data](data: D[A])(implicit ev: A <:< (T, U)): Map[T, U] = {
// val sg = implicitly[Semigroup[U]]
// data.aggregate(Map.empty[T, U])(
// {
// case (m, a) =>
// val (t, u) = ev(a)
// addToMap(m)(t, u)
// },
// combine
// )
// }
// }
// object ImplicitSemigroup {
// implicit val int: Semigroup[Int] =
// new Semigroup[Int] {
// override def combine(x: Int, y: Int): Int =
// x + y
// }
// }
| malcolmgreaves/abstract_data | data-tc-spark/src/test/scala/fif/TestHelpers.scala | Scala | apache-2.0 | 1,671 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.encoders
import scala.reflect.ClassTag
import scala.reflect.runtime.universe.{typeTag, TypeTag}
import org.apache.spark.sql.Encoder
import org.apache.spark.sql.catalyst.{InternalRow, JavaTypeInference, ScalaReflection}
import org.apache.spark.sql.catalyst.analysis.{Analyzer, GetColumnByOrdinal, SimpleAnalyzer, UnresolvedAttribute, UnresolvedExtractValue}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.{GenerateSafeProjection, GenerateUnsafeProjection}
import org.apache.spark.sql.catalyst.expressions.objects.{AssertNotNull, Invoke, NewInstance}
import org.apache.spark.sql.catalyst.optimizer.SimplifyCasts
import org.apache.spark.sql.catalyst.plans.logical.{CatalystSerde, DeserializeToObject, LocalRelation}
import org.apache.spark.sql.types.{BooleanType, ObjectType, StructField, StructType}
import org.apache.spark.util.Utils
/**
* A factory for constructing encoders that convert objects and primitives to and from the
* internal row format using catalyst expressions and code generation. By default, the
* expressions used to retrieve values from an input row when producing an object will be created as
* follows:
* - Classes will have their sub fields extracted by name using [[UnresolvedAttribute]] expressions
* and [[UnresolvedExtractValue]] expressions.
* - Tuples will have their subfields extracted by position using [[BoundReference]] expressions.
* - Primitives will have their values extracted from the first ordinal with a schema that defaults
* to the name `value`.
*/
object ExpressionEncoder {
def apply[T : TypeTag](): ExpressionEncoder[T] = {
// We convert the not-serializable TypeTag into StructType and ClassTag.
val mirror = ScalaReflection.mirror
val tpe = typeTag[T].in(mirror).tpe
if (ScalaReflection.optionOfProductType(tpe)) {
throw new UnsupportedOperationException(
"Cannot create encoder for Option of Product type, because Product type is represented " +
"as a row, and the entire row can not be null in Spark SQL like normal databases. " +
"You can wrap your type with Tuple1 if you do want top level null Product objects, " +
"e.g. instead of creating `Dataset[Option[MyClass]]`, you can do something like " +
"`val ds: Dataset[Tuple1[MyClass]] = Seq(Tuple1(MyClass(...)), Tuple1(null)).toDS`")
}
val cls = mirror.runtimeClass(tpe)
val flat = !ScalaReflection.definedByConstructorParams(tpe)
val inputObject = BoundReference(0, ScalaReflection.dataTypeFor[T], nullable = true)
val nullSafeInput = if (flat) {
inputObject
} else {
// For input object of Product type, we can't encode it to row if it's null, as Spark SQL
// doesn't allow top-level row to be null, only its columns can be null.
AssertNotNull(inputObject, Seq("top level Product input object"))
}
val serializer = ScalaReflection.serializerFor[T](nullSafeInput)
val deserializer = ScalaReflection.deserializerFor[T]
val schema = ScalaReflection.schemaFor[T] match {
case ScalaReflection.Schema(s: StructType, _) => s
case ScalaReflection.Schema(dt, nullable) => new StructType().add("value", dt, nullable)
}
new ExpressionEncoder[T](
schema,
flat,
serializer.flatten,
deserializer,
ClassTag[T](cls))
}
// TODO: improve error message for java bean encoder.
def javaBean[T](beanClass: Class[T]): ExpressionEncoder[T] = {
val schema = JavaTypeInference.inferDataType(beanClass)._1
assert(schema.isInstanceOf[StructType])
val serializer = JavaTypeInference.serializerFor(beanClass)
val deserializer = JavaTypeInference.deserializerFor(beanClass)
new ExpressionEncoder[T](
schema.asInstanceOf[StructType],
flat = false,
serializer.flatten,
deserializer,
ClassTag[T](beanClass))
}
/**
* Given a set of N encoders, constructs a new encoder that produce objects as items in an
* N-tuple. Note that these encoders should be unresolved so that information about
* name/positional binding is preserved.
*/
def tuple(encoders: Seq[ExpressionEncoder[_]]): ExpressionEncoder[_] = {
encoders.foreach(_.assertUnresolved())
val schema = StructType(encoders.zipWithIndex.map {
case (e, i) =>
val (dataType, nullable) = if (e.flat) {
e.schema.head.dataType -> e.schema.head.nullable
} else {
e.schema -> true
}
StructField(s"_${i + 1}", dataType, nullable)
})
val cls = Utils.getContextOrSparkClassLoader.loadClass(s"scala.Tuple${encoders.size}")
val serializer = encoders.zipWithIndex.map { case (enc, index) =>
val originalInputObject = enc.serializer.head.collect { case b: BoundReference => b }.head
val newInputObject = Invoke(
BoundReference(0, ObjectType(cls), nullable = true),
s"_${index + 1}",
originalInputObject.dataType)
val newSerializer = enc.serializer.map(_.transformUp {
case b: BoundReference if b == originalInputObject => newInputObject
})
if (enc.flat) {
newSerializer.head
} else {
// For non-flat encoder, the input object is not top level anymore after being combined to
// a tuple encoder, thus it can be null and we should wrap the `CreateStruct` with `If` and
// null check to handle null case correctly.
// e.g. for Encoder[(Int, String)], the serializer expressions will create 2 columns, and is
// not able to handle the case when the input tuple is null. This is not a problem as there
// is a check to make sure the input object won't be null. However, if this encoder is used
// to create a bigger tuple encoder, the original input object becomes a filed of the new
// input tuple and can be null. So instead of creating a struct directly here, we should add
// a null/None check and return a null struct if the null/None check fails.
val struct = CreateStruct(newSerializer)
val nullCheck = Or(
IsNull(newInputObject),
Invoke(Literal.fromObject(None), "equals", BooleanType, newInputObject :: Nil))
If(nullCheck, Literal.create(null, struct.dataType), struct)
}
}
val childrenDeserializers = encoders.zipWithIndex.map { case (enc, index) =>
if (enc.flat) {
enc.deserializer.transform {
case g: GetColumnByOrdinal => g.copy(ordinal = index)
}
} else {
val input = GetColumnByOrdinal(index, enc.schema)
val deserialized = enc.deserializer.transformUp {
case UnresolvedAttribute(nameParts) =>
assert(nameParts.length == 1)
UnresolvedExtractValue(input, Literal(nameParts.head))
case GetColumnByOrdinal(ordinal, _) => GetStructField(input, ordinal)
}
If(IsNull(input), Literal.create(null, deserialized.dataType), deserialized)
}
}
val deserializer =
NewInstance(cls, childrenDeserializers, ObjectType(cls), propagateNull = false)
new ExpressionEncoder[Any](
schema,
flat = false,
serializer,
deserializer,
ClassTag(cls))
}
// Tuple1
def tuple[T](e: ExpressionEncoder[T]): ExpressionEncoder[Tuple1[T]] =
tuple(Seq(e)).asInstanceOf[ExpressionEncoder[Tuple1[T]]]
def tuple[T1, T2](
e1: ExpressionEncoder[T1],
e2: ExpressionEncoder[T2]): ExpressionEncoder[(T1, T2)] =
tuple(Seq(e1, e2)).asInstanceOf[ExpressionEncoder[(T1, T2)]]
def tuple[T1, T2, T3](
e1: ExpressionEncoder[T1],
e2: ExpressionEncoder[T2],
e3: ExpressionEncoder[T3]): ExpressionEncoder[(T1, T2, T3)] =
tuple(Seq(e1, e2, e3)).asInstanceOf[ExpressionEncoder[(T1, T2, T3)]]
def tuple[T1, T2, T3, T4](
e1: ExpressionEncoder[T1],
e2: ExpressionEncoder[T2],
e3: ExpressionEncoder[T3],
e4: ExpressionEncoder[T4]): ExpressionEncoder[(T1, T2, T3, T4)] =
tuple(Seq(e1, e2, e3, e4)).asInstanceOf[ExpressionEncoder[(T1, T2, T3, T4)]]
def tuple[T1, T2, T3, T4, T5](
e1: ExpressionEncoder[T1],
e2: ExpressionEncoder[T2],
e3: ExpressionEncoder[T3],
e4: ExpressionEncoder[T4],
e5: ExpressionEncoder[T5]): ExpressionEncoder[(T1, T2, T3, T4, T5)] =
tuple(Seq(e1, e2, e3, e4, e5)).asInstanceOf[ExpressionEncoder[(T1, T2, T3, T4, T5)]]
}
/**
* A generic encoder for JVM objects.
*
* @param schema The schema after converting `T` to a Spark SQL row.
* @param serializer A set of expressions, one for each top-level field that can be used to
* extract the values from a raw object into an [[InternalRow]].
* @param deserializer An expression that will construct an object given an [[InternalRow]].
* @param clsTag A classtag for `T`.
*/
case class ExpressionEncoder[T](
schema: StructType,
flat: Boolean,
serializer: Seq[Expression],
deserializer: Expression,
clsTag: ClassTag[T])
extends Encoder[T] {
if (flat) require(serializer.size == 1)
// serializer expressions are used to encode an object to a row, while the object is usually an
// intermediate value produced inside an operator, not from the output of the child operator. This
// is quite different from normal expressions, and `AttributeReference` doesn't work here
// (intermediate value is not an attribute). We assume that all serializer expressions use a same
// `BoundReference` to refer to the object, and throw exception if they don't.
assert(serializer.forall(_.references.isEmpty), "serializer cannot reference to any attributes.")
assert(serializer.flatMap { ser =>
val boundRefs = ser.collect { case b: BoundReference => b }
assert(boundRefs.nonEmpty,
"each serializer expression should contains at least one `BoundReference`")
boundRefs
}.distinct.length <= 1, "all serializer expressions must use the same BoundReference.")
/**
* Returns a new copy of this encoder, where the `deserializer` is resolved and bound to the
* given schema.
*
* Note that, ideally encoder is used as a container of serde expressions, the resolution and
* binding stuff should happen inside query framework. However, in some cases we need to
* use encoder as a function to do serialization directly(e.g. Dataset.collect), then we can use
* this method to do resolution and binding outside of query framework.
*/
def resolveAndBind(
attrs: Seq[Attribute] = schema.toAttributes,
analyzer: Analyzer = SimpleAnalyzer): ExpressionEncoder[T] = {
val dummyPlan = CatalystSerde.deserialize(LocalRelation(attrs))(this)
val analyzedPlan = analyzer.execute(dummyPlan)
analyzer.checkAnalysis(analyzedPlan)
val resolved = SimplifyCasts(analyzedPlan).asInstanceOf[DeserializeToObject].deserializer
val bound = BindReferences.bindReference(resolved, attrs)
copy(deserializer = bound)
}
@transient
private lazy val extractProjection = GenerateUnsafeProjection.generate(serializer)
@transient
private lazy val inputRow = new GenericInternalRow(1)
@transient
private lazy val constructProjection = GenerateSafeProjection.generate(deserializer :: Nil)
/**
* Returns a new set (with unique ids) of [[NamedExpression]] that represent the serialized form
* of this object.
*/
def namedExpressions: Seq[NamedExpression] = schema.map(_.name).zip(serializer).map {
case (_, ne: NamedExpression) => ne.newInstance()
case (name, e) => Alias(e, name)()
}
/**
* Returns an encoded version of `t` as a Spark SQL row. Note that multiple calls to
* toRow are allowed to return the same actual [[InternalRow]] object. Thus, the caller should
* copy the result before making another call if required.
*/
def toRow(t: T): InternalRow = try {
inputRow(0) = t
extractProjection(inputRow)
} catch {
case e: Exception =>
throw new RuntimeException(
s"Error while encoding: $e\\n${serializer.map(_.treeString).mkString("\\n")}", e)
}
/**
* Returns an object of type `T`, extracting the required values from the provided row. Note that
* you must `resolveAndBind` an encoder to a specific schema before you can call this
* function.
*/
def fromRow(row: InternalRow): T = try {
constructProjection(row).get(0, ObjectType(clsTag.runtimeClass)).asInstanceOf[T]
} catch {
case e: Exception =>
throw new RuntimeException(s"Error while decoding: $e\\n${deserializer.treeString}", e)
}
/**
* The process of resolution to a given schema throws away information about where a given field
* is being bound by ordinal instead of by name. This method checks to make sure this process
* has not been done already in places where we plan to do later composition of encoders.
*/
def assertUnresolved(): Unit = {
(deserializer +: serializer).foreach(_.foreach {
case a: AttributeReference if a.name != "loopVar" =>
sys.error(s"Unresolved encoder expected, but $a was found.")
case _ =>
})
}
protected val attrs = serializer.flatMap(_.collect {
case _: UnresolvedAttribute => ""
case a: Attribute => s"#${a.exprId}"
case b: BoundReference => s"[${b.ordinal}]"
})
protected val schemaString =
schema
.zip(attrs)
.map { case(f, a) => s"${f.name}$a: ${f.dataType.simpleString}"}.mkString(", ")
override def toString: String = s"class[$schemaString]"
}
| u2009cf/spark-radar | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala | Scala | apache-2.0 | 14,341 |
package dsmoq.persistence
object OwnerType {
val User = 1
val Group = 2
}
| nkawa/dsmoq | server/common/src/main/scala/dsmoq/persistence/OwnerType.scala | Scala | apache-2.0 | 79 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.types.IntegerType
class SubexpressionEliminationSuite extends SparkFunSuite {
test("Semantic equals and hash") {
val a: AttributeReference = AttributeReference("name", IntegerType)()
val id = {
// Make sure we use a "ExprId" different from "a.exprId"
val _id = ExprId(1)
if (a.exprId == _id) ExprId(2) else _id
}
val b1 = a.withName("name2").withExprId(id)
val b2 = a.withExprId(id)
val b3 = a.withQualifier(Some("qualifierName"))
assert(b1 != b2)
assert(a != b1)
assert(b1.semanticEquals(b2))
assert(!b1.semanticEquals(a))
assert(a.hashCode != b1.hashCode)
assert(b1.hashCode != b2.hashCode)
assert(b1.semanticHash() == b2.semanticHash())
assert(a != b3)
assert(a.hashCode != b3.hashCode)
assert(a.semanticEquals(b3))
}
test("Expression Equivalence - basic") {
val equivalence = new EquivalentExpressions
assert(equivalence.getAllEquivalentExprs.isEmpty)
val oneA = Literal(1)
val oneB = Literal(1)
val twoA = Literal(2)
var twoB = Literal(2)
assert(equivalence.getEquivalentExprs(oneA).isEmpty)
assert(equivalence.getEquivalentExprs(twoA).isEmpty)
// Add oneA and test if it is returned. Since it is a group of one, it does not.
assert(!equivalence.addExpr(oneA))
assert(equivalence.getEquivalentExprs(oneA).size == 1)
assert(equivalence.getEquivalentExprs(twoA).isEmpty)
assert(equivalence.addExpr((oneA)))
assert(equivalence.getEquivalentExprs(oneA).size == 2)
// Add B and make sure they can see each other.
assert(equivalence.addExpr(oneB))
// Use exists and reference equality because of how equals is defined.
assert(equivalence.getEquivalentExprs(oneA).exists(_ eq oneB))
assert(equivalence.getEquivalentExprs(oneA).exists(_ eq oneA))
assert(equivalence.getEquivalentExprs(oneB).exists(_ eq oneA))
assert(equivalence.getEquivalentExprs(oneB).exists(_ eq oneB))
assert(equivalence.getEquivalentExprs(twoA).isEmpty)
assert(equivalence.getAllEquivalentExprs.size == 1)
assert(equivalence.getAllEquivalentExprs.head.size == 3)
assert(equivalence.getAllEquivalentExprs.head.contains(oneA))
assert(equivalence.getAllEquivalentExprs.head.contains(oneB))
val add1 = Add(oneA, oneB)
val add2 = Add(oneA, oneB)
equivalence.addExpr(add1)
equivalence.addExpr(add2)
assert(equivalence.getAllEquivalentExprs.size == 2)
assert(equivalence.getEquivalentExprs(add2).exists(_ eq add1))
assert(equivalence.getEquivalentExprs(add2).size == 2)
assert(equivalence.getEquivalentExprs(add1).exists(_ eq add2))
}
test("Expression Equivalence - Trees") {
val one = Literal(1)
val two = Literal(2)
val add = Add(one, two)
val abs = Abs(add)
val add2 = Add(add, add)
var equivalence = new EquivalentExpressions
equivalence.addExprTree(add, true)
equivalence.addExprTree(abs, true)
equivalence.addExprTree(add2, true)
// Should only have one equivalence for `one + two`
assert(equivalence.getAllEquivalentExprs.count(_.size > 1) == 1)
assert(equivalence.getAllEquivalentExprs.filter(_.size > 1).head.size == 4)
// Set up the expressions
// one * two,
// (one * two) * (one * two)
// sqrt( (one * two) * (one * two) )
// (one * two) + sqrt( (one * two) * (one * two) )
equivalence = new EquivalentExpressions
val mul = Multiply(one, two)
val mul2 = Multiply(mul, mul)
val sqrt = Sqrt(mul2)
val sum = Add(mul2, sqrt)
equivalence.addExprTree(mul, true)
equivalence.addExprTree(mul2, true)
equivalence.addExprTree(sqrt, true)
equivalence.addExprTree(sum, true)
// (one * two), (one * two) * (one * two) and sqrt( (one * two) * (one * two) ) should be found
assert(equivalence.getAllEquivalentExprs.count(_.size > 1) == 3)
assert(equivalence.getEquivalentExprs(mul).size == 3)
assert(equivalence.getEquivalentExprs(mul2).size == 3)
assert(equivalence.getEquivalentExprs(sqrt).size == 2)
assert(equivalence.getEquivalentExprs(sum).size == 1)
// Some expressions inspired by TPCH-Q1
// sum(l_quantity) as sum_qty,
// sum(l_extendedprice) as sum_base_price,
// sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,
// sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,
// avg(l_extendedprice) as avg_price,
// avg(l_discount) as avg_disc
equivalence = new EquivalentExpressions
val quantity = Literal(1)
val price = Literal(1.1)
val discount = Literal(.24)
val tax = Literal(0.1)
equivalence.addExprTree(quantity, false)
equivalence.addExprTree(price, false)
equivalence.addExprTree(Multiply(price, Subtract(Literal(1), discount)), false)
equivalence.addExprTree(
Multiply(
Multiply(price, Subtract(Literal(1), discount)),
Add(Literal(1), tax)), false)
equivalence.addExprTree(price, false)
equivalence.addExprTree(discount, false)
// quantity, price, discount and (price * (1 - discount))
assert(equivalence.getAllEquivalentExprs.count(_.size > 1) == 4)
}
test("Expression equivalence - non deterministic") {
val sum = Add(Rand(0), Rand(0))
val equivalence = new EquivalentExpressions
equivalence.addExpr(sum)
equivalence.addExpr(sum)
assert(equivalence.getAllEquivalentExprs.isEmpty)
}
test("Children of CodegenFallback") {
val one = Literal(1)
val two = Add(one, one)
val explode = Explode(two)
val add = Add(two, explode)
var equivalence = new EquivalentExpressions
equivalence.addExprTree(add, true)
// the `two` inside `explode` should not be added
assert(equivalence.getAllEquivalentExprs.count(_.size > 1) == 0)
assert(equivalence.getAllEquivalentExprs.count(_.size == 1) == 3) // add, two, explode
}
}
| Panos-Bletsos/spark-cost-model-optimizer | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/SubexpressionEliminationSuite.scala | Scala | apache-2.0 | 6,777 |
/*
* Copyright 2009-2017. DigitalGlobe, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and limitations under the License.
*/
package org.mrgeo.mapalgebra
import java.io.{Externalizable, IOException, ObjectInput, ObjectOutput}
import org.apache.spark.{SparkConf, SparkContext}
import org.mrgeo.data.rdd.VectorRDD
import org.mrgeo.data.vector.FeatureIdWritable
import org.mrgeo.geometry.{Geometry, GeometryFactory}
import org.mrgeo.job.JobArguments
import org.mrgeo.mapalgebra.parser.{ParserException, ParserNode}
import org.mrgeo.mapalgebra.vector.VectorMapOp
import scala.collection.mutable.ListBuffer
object PointsMapOp {
def apply(coords:Array[Double]):PointsMapOp = {
new PointsMapOp(coords)
}
def apply(mapop:MapOp):Option[PointsMapOp] =
mapop match {
case pmo:PointsMapOp => Some(pmo)
case _ => None
}
}
class PointsMapOp extends VectorMapOp with Externalizable {
var vectorrdd:Option[VectorRDD] = None
var srcCoordinates:Option[Array[Double]] = None
override def execute(context:SparkContext):Boolean = {
true
}
def getCoordCount():Int = {
srcCoordinates match {
case Some(coords) =>
coords.length
case None => -1
}
}
override def setup(job:JobArguments, conf:SparkConf):Boolean = {
true
}
override def teardown(job:JobArguments, conf:SparkConf):Boolean = true
override def readExternal(in:ObjectInput):Unit = {
val coordCount = in.readInt()
srcCoordinates = if (coordCount < 0) {
None
}
else {
val coords = Array.ofDim[Double](coordCount)
var i:Int = 0
while (i < coordCount) {
coords(i) = in.readDouble()
i += 1
}
Some(coords)
}
}
override def writeExternal(out:ObjectOutput):Unit = {
srcCoordinates match {
case Some(coords) =>
out.writeInt(coords.length)
coords.foreach(c => out.writeDouble(c))
case None =>
out.writeInt(-1)
}
}
override def rdd():Option[VectorRDD] = {
load()
vectorrdd
}
private[mapalgebra] def this(coords:Array[Double]) = {
this()
this.srcCoordinates = Some(coords)
}
private[mapalgebra] def this(node:ParserNode, variables:String => Option[ParserNode]) = {
this()
if (node.getNumChildren % 2 != 0) {
throw new ParserException(
"points takes a list of coordinates \\"lon, lat, lon, lat, ...\\"")
}
val numCoords = node.getNumChildren
val coords = Array.ofDim[Double](numCoords)
for (i <- 0 until numCoords) {
coords(i) = MapOp.decodeDouble(node.getChild(i))
.getOrElse(throw new ParserException("Invalid coordinate " + node.getChild(i).getName))
}
srcCoordinates = Some(coords)
}
private def load():Unit = {
if (vectorrdd.isEmpty) {
val pointsrdd = srcCoordinates match {
case Some(coords) =>
// Convert the array of lon/let pairs to a VectorRDD
var recordData = new ListBuffer[(FeatureIdWritable, Geometry)]()
for (i <- coords.indices by 2) {
val geom = GeometryFactory.createPoint(coords(i).toFloat, coords(i + 1).toFloat)
recordData += ((new FeatureIdWritable(i / 2), geom))
}
VectorRDD(context.parallelize(recordData))
case None => throw new IOException("Invalid points input")
}
vectorrdd = Some(pointsrdd)
}
}
}
| ngageoint/mrgeo | mrgeo-mapalgebra/mrgeo-mapalgebra-vector/src/main/scala/org/mrgeo/mapalgebra/PointsMapOp.scala | Scala | apache-2.0 | 3,863 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CP83(value: Option[Int]) extends CtBoxIdentifier(name = "Expenditure qualifying for annual investment allowance(AIA)") with CtOptionalInteger with Input with SelfValidatableBox[ComputationsBoxRetriever, Option[Int]] {
override def validate(boxRetriever: ComputationsBoxRetriever) = {
collectErrors(
cannotExistErrorIf(hasValue && boxRetriever.cpQ8().isTrue),
validateZeroOrPositiveInteger(),
exceedsMax(value,200000)
)
}
}
object CP83 {
def apply(value: Int): CP83 = CP83(Some(value))
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CP83.scala | Scala | apache-2.0 | 1,274 |
import scala.tools.nsc._
import scala.tools.partest.CompilerTest
import scala.collection.{ mutable, immutable, generic }
object Test extends CompilerTest {
import global._
import rootMirror._
import definitions._
import global.analyzer.{Context, ImportInfo}
override def code = """
package context {
}
"""
def check(source: String, unit: global.CompilationUnit) = {
val context: Context = global.analyzer.rootContext(unit)
val importInfo: ImportInfo = context.imports.head // Predef._
val importedSym = importInfo.importedSymbol(termNames.CONSTRUCTOR)
assert(importedSym == NoSymbol, importedSym) // was "constructor Predef"
}
}
| folone/dotty | tests/pending/run/t6745-2.scala | Scala | bsd-3-clause | 664 |
package io.swarm.security.shiro
import io.swarm.management.dao.ManagementDaoComponent
import scala.slick.driver.{HsqldbDriver, JdbcProfile}
import io.swarm.management.impl.ManagementDaoJDBC
import scala.slick.jdbc.JdbcBackend._
import io.swarm.infrastructure.persistence.slick.SlickDbProvider
trait HSQLInMemoryManagementDaoComponent extends ManagementDaoComponent with SlickDbProvider {
val profile: JdbcProfile = HsqldbDriver
val managementDao = new ManagementDaoJDBC(profile)
lazy val db = Database.forURL("jdbc:hsqldb:mem:mymemdb", driver = "org.hsqldb.jdbc.JDBCDriver", user = "sa", password = "sa")
}
| Turkcell/swarm | coreservice/src/test/scala/io/swarm/security/shiro/HSQLInMemoryManagementDaoComponent.scala | Scala | apache-2.0 | 616 |
package com.querydsl.scala
import java.io.StringWriter
import com.querydsl.codegen.utils._
import com.querydsl.codegen._
import org.junit._
class CaseClassSerializerTest {
val typeMappings = ScalaTypeMappings.create
var entityType = EntityTypes.entityType
var writer = new StringWriter()
@Test
def Print {
val serializer = new CaseClassSerializer(typeMappings)
typeMappings.register(entityType, new QueryTypeFactoryImpl("Q", "", "").create(entityType))
serializer.serialize(entityType, SimpleSerializerConfig.DEFAULT, new ScalaWriter(writer))
//println(writer.toString)
}
@Test
def Compile {
val serializer = new CaseClassSerializer(typeMappings)
serializer.createCompanionObject = false
typeMappings.register(entityType, new QueryTypeFactoryImpl("Q", "", "").create(entityType))
serializer.serialize(entityType, SimpleSerializerConfig.DEFAULT, new ScalaWriter(writer))
val str = writer.toString
CompileTestUtils.assertCompileSuccess(str)
}
}
| lpandzic/querydsl | querydsl-scala/src/test/scala/com/querydsl/scala/CaseClassSerializerTest.scala | Scala | apache-2.0 | 1,012 |
package spbau.scala.ordian.task02
object Three extends App {
assert(Not(Not(X)) == X)
assert(Not(Not(Not(X))) == Or(Not(X), And(Not(X), Not(X))))
assert(Not(And(True, Not(False))) == False)
assert(False == And(And(Not(Or(X, X)), X), True))
abstract class Bool {
def ==(that: Bool): Boolean = simplify(this) equals simplify(that)
}
case class And(left: Bool, right: Bool) extends Bool
case class Or(left: Bool, right: Bool) extends Bool
case class Not(op: Bool) extends Bool
case object True extends Bool
case object False extends Bool
case object X extends Bool
/* clumsy */
def simplify(b: Bool): Bool = b match {
case And(left, right) => simplify(left) match {
case False => False
case True => simplify(right)
case X => simplify(right) match {
case False => False
case True => X
case Not(X) => False
case X => X
}
case Not(X) => simplify(right) match {
case False => False
case True => Not(X)
case Not(X) => Not(X)
case X => False
}
}
case Or(left, right) => simplify(left) match {
case True => True
case False => simplify(right)
case X => simplify(right) match {
case True => True
case False => X
case Not(X) => True
case X => X
}
case Not(X) => simplify(right) match {
case True => True
case False => Not(X)
case Not(X) => Not(X)
case X => True
}
}
case Not(op) => simplify(op) match {
case True => False
case False => True
case Not(x) => x
case And(left, right) => simplify(Or(Not(left), Not(right)))
case Or(left, right) => simplify(And(Not(left), Not(right)))
case X => Not(X)
}
case x@_ => x
}
}
| ordian/vm_languages_course | src/main/scala/spbau/scala/ordian/task02/Three.scala | Scala | gpl-3.0 | 1,897 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import time.Span
import java.util.concurrent.ConcurrentSkipListSet
import scala.collection.JavaConverters._
import java.io.PrintStream
import org.scalactic.Requirements._
private[scalatest] class SlowpokeDetector(timeout: Long = 60000, out: PrintStream = Console.err) { // Default timeout is 1 minute
// SKIP-SCALATESTJS,NATIVE-START
private final val runningTests = new ConcurrentSkipListSet[RunningTest]
// SKIP-SCALATESTJS,NATIVE-END
//SCALATESTJS,NATIVE-ONLY private final val runningTests = new scala.collection.mutable.TreeSet[RunningTest]
def testStarting(suiteName: String, suiteId: String, testName: String, timeStamp: Long): Unit = {
requireNonNull(suiteName, suiteId, testName)
require(timeStamp >= 0, "timeStamp must be >= 0")
runningTests.add(
new RunningTest(
suiteName = suiteName,
suiteId = suiteId,
testName = testName,
startTimeStamp = timeStamp
)
)
}
def testFinished(suiteName: String, suiteId: String, testName: String): Unit = {
requireNonNull(suiteName, suiteId, testName)
val wasRemoved =
runningTests.remove( // removal uses equality, which is determined only by suite ID and test name
new RunningTest(
suiteName = suiteName,
suiteId = suiteId,
testName = testName,
startTimeStamp = 0
)
)
if (!wasRemoved) {
val stringToPrint = Resources.slowpokeDetectorEventNotFound(suiteName, suiteId, testName)
out.println(stringToPrint)
}
}
def detectSlowpokes(currentTimeStamp: Long): IndexedSeq[Slowpoke] = {
// SKIP-SCALATESTJS,NATIVE-START
val rts = runningTests.iterator.asScala.toVector
// SKIP-SCALATESTJS,NATIVE-END
//SCALATESTJS,NATIVE-ONLY val rts = runningTests.iterator.toVector
val slowTests = rts.filter(currentTimeStamp - _.startTimeStamp > timeout)
slowTests.sortBy(_.startTimeStamp).map(_.toSlowpoke(currentTimeStamp))
}
}
| dotty-staging/scalatest | scalatest/src/main/scala/org/scalatest/SlowpokeDetector.scala | Scala | apache-2.0 | 2,581 |
package tpl.hostiledrops
import cpw.mods.fml.relauncher.SideOnly
import cpw.mods.fml.relauncher.Side
import net.minecraft.client.renderer.texture.IconRegister
import net.minecraft.creativetab.CreativeTabs
import net.minecraft.item.Item
import java.util.logging.Level
import cpw.mods.fml.common.FMLLog
import net.minecraft.util.Icon
import cpw.mods.fml.common.registry.LanguageRegistry
import java.lang.String;
abstract class BaseItem(id : Int) extends Item(id) {
setMaxStackSize(64)
setMaxDamage(0)
setCreativeTab(CreativeTabs.tabMisc)
setUnlocalizedName(getUnlocalizedName)
@SideOnly(Side.CLIENT)
override def registerIcons(icon : IconRegister) {
itemIcon = icon.registerIcon(HostileDropsMod.modName + ":" + getUnlocalizedName);
}
override def getUnlocalizedName = {
this.getClass.getSimpleName
}
def setLocalizedName(name : String) {
LanguageRegistry.instance.addStringLocalization("item." + getUnlocalizedName + ".name", name)
}
def setLocalizedName(locale : String, name : String) {
LanguageRegistry.instance.addStringLocalization("item." + getUnlocalizedName + ".name", locale, name)
}
} | piotrb/hostiledrops | src/main/scala/tpl/hostiledrops/BaseItem.scala | Scala | bsd-2-clause | 1,141 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils.json
import scala.collection.{Iterator, JavaConverters}
import JavaConverters._
import com.fasterxml.jackson.databind.node.ArrayNode
class JsonArray private[json] (protected val node: ArrayNode) extends JsonValue {
def iterator: Iterator[JsonValue] = node.elements.asScala.map(JsonValue(_))
}
| KevinLiLu/kafka | core/src/main/scala/kafka/utils/json/JsonArray.scala | Scala | apache-2.0 | 1,121 |
/*
* scala-bcp
* Copyright 2014 深圳岂凡网络有限公司 (Shenzhen QiFun Network Corp., LTD)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.qifun.bcp
import java.nio.channels.AsynchronousSocketChannel
import java.nio.channels.ShutdownChannelGroupException
import java.security.SecureRandom
import java.util.concurrent.CancellationException
import java.util.concurrent.ScheduledExecutorService
import scala.concurrent.stm.InTxn
import scala.concurrent.stm.Ref
import scala.concurrent.stm.TMap.asMap
import scala.concurrent.stm.Txn
import scala.concurrent.stm.atomic
import scala.util.control.Exception.Catcher
import com.dongxiguo.fastring.Fastring.Implicits.FastringContext
import com.dongxiguo.zeroLog.LogRecord.StringLogRecord
import com.dongxiguo.zeroLog.LogRecord.ThrowableLogRecord
import com.qifun.bcp.Bcp.BusyTimeout
import com.qifun.bcp.Bcp.ConnectionBusy
import com.qifun.bcp.Bcp.ConnectionHead
import com.qifun.bcp.Bcp.ConnectionIdle
import com.qifun.bcp.Bcp.ConnectionSlow
import com.qifun.bcp.Bcp.ConnectionState
import com.qifun.bcp.Bcp.IdleTimeout
import com.qifun.bcp.Bcp.MaxActiveConnectionsPerSession
import com.qifun.bcp.Bcp.MaxConnectionsPerSession
import com.qifun.bcp.Bcp.NumBytesSessionId
import com.qifun.bcp.Bcp.ReconnectTimeout
import com.qifun.statelessFuture.Future
import com.qifun.statelessFuture.Future.apply
import com.qifun.statelessFuture.util.CancellablePromise
import com.qifun.statelessFuture.util.Sleep
import java.io.IOException
object BcpClient {
private implicit val (logger, formatter, appender) = ZeroLoggerFactory.newLogger(this)
private[BcpClient] final class Stream(socket: AsynchronousSocketChannel) extends BcpSession.Stream(socket) {
// 客户端专有的数据结构,比如Timer
val busyPromise = Ref.make[CancellablePromise[Unit]]
val connectionState: Ref[ConnectionState] = Ref(ConnectionIdle)
}
private[BcpClient] final class Connection extends BcpSession.Connection[Stream] {
}
private val sessionIdGenerator = new SecureRandom
}
/**
* BCP协议客户端
*
* 每个BCP客户端管理多个TCP连接,最多不超过[[Bcp.MaxConnectionsPerSession]]。
*
* BCP客户端根据网络状况决定要增加TCP连接还是减少TCP连接。
*
* 对于每个TCP连接,有空闲、繁忙和迟缓三种状态:
*
* - 如果BCP客户端从某个TCP连接发出的所有[[Bcp.AcknowledgeRequired]],都收到了对应的[[Bcp.Acknowledge]],
* 那么这个TCP连接是空闲状态。
* - 如果某个TCP连接,原本是空闲状态,接着发送了一个[[Bcp.AcknowledgeRequired]],
* 那么这个TCP连接变成繁忙状态。
* - 如果某个TCP连接的繁忙状态保持了[[Bcp.BusyTimeout]],还没有变回空闲状态,
* 那么这个TCP连接变成迟缓状态。
*
* 每当一个BCP客户端的所有TCP连接都变成迟缓状态,且BCP客户端管理的TCP连接数尚未达到上限,BCP客户端建立新TCP连接。
*
* 如果一个BCP客户端管理的TCP连接数量大于一,且这个BCP客户端所属的空闲TCP连接数量大于零,那么这个BCP客户端是过剩状态。
*
* 如果BCP客户端连续[[Bcp.IdleTimeout]],都在过剩状态,那么这个BCP客户端关掉一个TCP连接。
*
*/
abstract class BcpClient(sessionId: Array[Byte]) extends BcpSession[BcpClient.Stream, BcpClient.Connection] {
def this() = this{
val id = Array.ofDim[Byte](NumBytesSessionId)
BcpClient.sessionIdGenerator.nextBytes(id)
id
}
// TODO 添加一个构造函数,崩溃时重置功能
// TODO 添加一个renew接口,Unavailable太长时间时重置功能
import BcpClient.{ logger, formatter, appender }
private val reconnectPromise = Ref.make[CancellablePromise[Unit]]
private val idlePromise = Ref.make[CancellablePromise[Unit]]
private val nextConnectionId = Ref(0)
private val isConnecting = Ref(false)
private val isShutedDown = Ref(false)
override private[bcp] final def newConnection = new BcpClient.Connection
protected def connect(): Future[AsynchronousSocketChannel]
protected def executor: ScheduledExecutorService
override private[bcp] final def internalExecutor: ScheduledExecutorService = executor
override private[bcp] final def release()(implicit txn: InTxn) {
// assert(!isShutedDown())
isShutedDown() = true
val oldReconnectPromise = reconnectPromise()
reconnectPromise() = null;
if (oldReconnectPromise != null) {
Txn.afterCommit(_ => oldReconnectPromise.cancel())
}
val oldIdlePromise = idlePromise()
idlePromise() = null;
if (oldIdlePromise != null) {
Txn.afterCommit(_ => oldIdlePromise.cancel())
}
}
private def busyComplete(busyConnection: BcpClient.Connection, thisTimer: CancellablePromise[Unit]): Unit = {
atomic { implicit txn =>
if (busyConnection.stream() != null && busyConnection.stream().busyPromise() == thisTimer) {
busyConnection.stream().connectionState() = ConnectionSlow
increaseConnection()
busyConnection.stream().busyPromise() = null
}
}
}
override private[bcp] final def busy(busyConnection: BcpClient.Connection)(implicit txn: InTxn): Unit = {
logger.finest("the connection is busy!")
busyConnection.stream().connectionState() = ConnectionBusy
val oldReconnectPromise = reconnectPromise()
if (oldReconnectPromise != null) {
reconnectPromise() = null
Txn.afterCommit(_ => oldReconnectPromise.cancel())
}
val oldBusyPromise = busyConnection.stream().busyPromise()
if (oldBusyPromise == null) {
implicit def catcher: Catcher[Unit] = {
case _: CancellationException =>
logger.finer("The busy timer is cancelled!")
case e: Exception =>
logger.warning(e)
}
val newBusyPromise = CancellablePromise[Unit]
busyConnection.stream().busyPromise() = newBusyPromise
Txn.afterCommit { _ =>
newBusyPromise.foreach(_ => busyComplete(busyConnection, newBusyPromise))
Sleep.start(newBusyPromise, executor, BusyTimeout)
}
// bcp-client 不是过剩状态
if (!(connections.size > 1 &&
connections.exists(connection =>
connection._2.stream() != null && connection._2.stream().connectionState() == ConnectionIdle))) {
val oldIdlePromise = idlePromise()
if (oldIdlePromise != null) {
idlePromise() = null
Txn.afterCommit(_ => oldIdlePromise.cancel())
}
}
}
}
override private[bcp] final def idle(connection: BcpClient.Connection)(implicit txn: InTxn): Unit = {
logger.finest("the connection is idle!")
if (connection.stream() != null) {
val busyPromise = connection.stream().busyPromise()
if (busyPromise != null) {
connection.stream().busyPromise() = null
Txn.afterCommit(_ => busyPromise.cancel())
}
connection.stream().connectionState() = ConnectionIdle
checkFinishConnection()
}
}
override private[bcp] final def close(closeConnection: BcpClient.Connection)(implicit txn: InTxn): Unit = {
val connectionSize = connections.size
if (closeConnection.stream() != null) {
val busyPromise = closeConnection.stream().busyPromise()
if (busyPromise != null) {
closeConnection.stream().busyPromise() = null
Txn.afterCommit(_ => busyPromise.cancel())
}
}
if (connections.forall(connection =>
connection._2 == closeConnection || connection._2.stream() == null) &&
connectionSize < MaxConnectionsPerSession) {
startReconnectTimer()
}
if (connectionSize >= MaxConnectionsPerSession &&
connections.forall(connection =>
connection._2 == closeConnection || connection._2.stream() == null)) {
internalInterrupt()
}
}
private def afterConnect(socket: AsynchronousSocketChannel): Future[Unit] = Future {
logger.finer(fast"bcp client connect server success, socket: ${socket}")
val stream = new BcpClient.Stream(socket)
atomic { implicit txn =>
if (!isShutedDown()) {
val connectionId = nextConnectionId() + 1
nextConnectionId() = nextConnectionId() + 1
Txn.afterCommit { _ =>
BcpIo.enqueueHead(stream, ConnectionHead(sessionId, false, connectionId))
logger.fine(
fast"bcp client send head to server success, sessionId: ${sessionId.toSeq} , connectionId: ${connectionId}")
}
addStream(connectionId, stream)
isConnecting() = false
} else {
Txn.afterCommit { _ =>
socket.close()
}
}
}
stream.flush()
}
private def tryIncreaseConnection() {
val connectFuture = Future {
val socket = connect().await
afterConnect(socket).await
}
implicit def catcher: Catcher[Unit] = {
case e: ShutdownChannelGroupException => {
logger.finer(e)
}
case e: IOException => {
logger.finer(e)
}
case e: Exception => {
logger.severe(e)
atomic { implicit txn =>
if (!isShutedDown()) {
startReconnectTimer()
}
}
}
}
for (_ <- connectFuture) {
logger.finest("Increase connection success.")
}
}
private final def increaseConnection()(implicit txn: InTxn) {
if (!isConnecting() &&
connections.size < MaxConnectionsPerSession &&
connections.count(_._2.stream() != null) < MaxActiveConnectionsPerSession &&
connections.forall(connection =>
connection._2.stream() == null || connection._2.stream().connectionState() == ConnectionSlow)) {
isConnecting() = true
Txn.afterCommit { _ =>
tryIncreaseConnection()
}
}
}
def idleComplete(thisTimer: CancellablePromise[Unit]): Unit = {
atomic { implicit txn =>
if (idlePromise() == thisTimer) {
connections.find(connection =>
(connection._2.stream() != null) &&
(connection._2.stream().connectionState() == ConnectionIdle)) match {
case Some((connectionId, toFinishConnection)) =>
finishConnection(connectionId, toFinishConnection)
case None =>
}
idlePromise() == null
}
}
}
private final def checkFinishConnection()(implicit txn: InTxn) {
if (connections.size > 1 &&
connections.exists(connection =>
connection._2.stream() != null && connection._2.stream().connectionState() == ConnectionIdle)) { // 过剩状态
if (idlePromise() == null) {
implicit def catcher: Catcher[Unit] = {
case _: CancellationException =>
logger.finer("The finish connection is cancelled!")
case e: Exception =>
logger.warning(e)
}
val newIdlePromise = CancellablePromise[Unit]
idlePromise() = newIdlePromise
Txn.afterCommit { _ =>
newIdlePromise.foreach { _ => idleComplete(newIdlePromise) }
Sleep.start(newIdlePromise, executor, IdleTimeout)
}
}
}
}
private def reconnectComplete(thisTimer: CancellablePromise[Unit]): Unit = {
atomic { implicit txn =>
if (reconnectPromise() == thisTimer) {
reconnectPromise() = null
increaseConnection()
}
}
}
private final def startReconnectTimer()(implicit txn: InTxn): Unit = {
if (reconnectPromise() == null) {
implicit def catcher: Catcher[Unit] = {
case _: CancellationException =>
logger.finer("The reconnect timer is cancelled!")
case e: Exception =>
logger.warning(e)
}
val newReconnectPromise = CancellablePromise[Unit]
reconnectPromise() = newReconnectPromise
Txn.afterCommit { _ =>
newReconnectPromise.foreach { _ => reconnectComplete(newReconnectPromise) }
Sleep.start(newReconnectPromise, executor, ReconnectTimeout)
}
reconnectPromise() = newReconnectPromise
}
}
final def start() {
atomic { implicit txn: InTxn =>
increaseConnection()
}
}
} | qifun/scala-bcp | src/main/scala/com/qifun/bcp/BcpClient.scala | Scala | apache-2.0 | 12,598 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.physical.batch
import org.apache.flink.table.api.TableException
import org.apache.flink.table.planner.plan.`trait`.FlinkRelDistribution
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalLegacySink
import org.apache.flink.table.planner.plan.nodes.physical.batch.BatchPhysicalLegacySink
import org.apache.flink.table.planner.plan.utils.FlinkRelOptUtil
import org.apache.flink.table.sinks.PartitionableTableSink
import org.apache.calcite.plan.RelOptRule
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.calcite.rel.{RelCollations, RelNode}
import scala.collection.JavaConversions._
class BatchPhysicalLegacySinkRule extends ConverterRule(
classOf[FlinkLogicalLegacySink],
FlinkConventions.LOGICAL,
FlinkConventions.BATCH_PHYSICAL,
"BatchPhysicalLegacySinkRule") {
def convert(rel: RelNode): RelNode = {
val sink = rel.asInstanceOf[FlinkLogicalLegacySink]
val newTrait = rel.getTraitSet.replace(FlinkConventions.BATCH_PHYSICAL)
var requiredTraitSet = sink.getInput.getTraitSet.replace(FlinkConventions.BATCH_PHYSICAL)
if (sink.catalogTable != null && sink.catalogTable.isPartitioned) {
sink.sink match {
case partitionSink: PartitionableTableSink =>
partitionSink.setStaticPartition(sink.staticPartitions)
val dynamicPartFields = sink.catalogTable.getPartitionKeys
.filter(!sink.staticPartitions.contains(_))
if (dynamicPartFields.nonEmpty) {
val dynamicPartIndices =
dynamicPartFields.map(partitionSink.getTableSchema.getFieldNames.indexOf(_))
// TODO This option is hardcoded to remove the dependency of planner from
// flink-connector-files. We should move this option out of FileSystemConnectorOptions
val shuffleEnable = sink
.catalogTable
.getOptions
.getOrDefault("sink.shuffle-by-partition.enable", "false")
if (shuffleEnable.toBoolean) {
requiredTraitSet = requiredTraitSet.plus(
FlinkRelDistribution.hash(dynamicPartIndices
.map(Integer.valueOf), requireStrict = false))
}
if (partitionSink.configurePartitionGrouping(true)) {
// default to asc.
val fieldCollations = dynamicPartIndices.map(FlinkRelOptUtil.ofRelFieldCollation)
requiredTraitSet = requiredTraitSet.plus(RelCollations.of(fieldCollations: _*))
}
}
case _ => throw new TableException("We need PartitionableTableSink to write data to" +
s" partitioned table: ${sink.sinkName}")
}
}
val newInput = RelOptRule.convert(sink.getInput, requiredTraitSet)
new BatchPhysicalLegacySink(
rel.getCluster,
newTrait,
newInput,
sink.hints,
sink.sink,
sink.sinkName)
}
}
object BatchPhysicalLegacySinkRule {
val INSTANCE: RelOptRule = new BatchPhysicalLegacySinkRule
}
| lincoln-lil/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/batch/BatchPhysicalLegacySinkRule.scala | Scala | apache-2.0 | 3,921 |
// Copyright © 2010-2016, Esko Luontola <www.orfjackal.net>
// This software is released under the Apache License 2.0.
// The license text is at http://www.apache.org/licenses/LICENSE-2.0
package org.specsy.examples.scala
import org.specsy.scala.ScalaSpecsy
import org.hamcrest.MatcherAssert.assertThat
import org.hamcrest.Matchers._
class ShareSideEffectsExampleSpec extends ScalaSpecsy {
var counter = 0
// Without the call to `shareSideEffects()` the value of `counter` would be `1`
// in the asserts of each of the following child specs.
shareSideEffects()
"One" >> {
counter += 1
assertThat(counter, is(1))
}
"Two" >> {
counter += 1
assertThat(counter, is(2))
}
"Three" >> {
counter += 1
assertThat(counter, is(3))
}
}
| orfjackal/specsy | specsy-examples/src/test/scala/org/specsy/examples/scala/ShareSideEffectsExampleSpec.scala | Scala | apache-2.0 | 773 |
package chandu0101.scalajs.react.components.searchboxes
import chandu0101.scalajs.react.components.all._
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
import scala.scalajs.js
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
import scala.scalajs.js
import scala.scalajs.js
import scala.scalajs.js.undefined
import scala.scalajs.js.UndefOr
import scalacss.Defaults._
import scalacss.ScalaCssReact._
object ReactSearchBox {
class Style extends StyleSheet.Inline {
import dsl._
val searchBox = style(marginBottom(10 px))
val input = style(border.none,
fontSize(13 px),
fontWeight._300,
padding(3 px),
width(100.%%),
backgroundColor.transparent,
borderBottom :=! "1px solid #B2ADAD",
&.focus(outline.none,
borderBottom :=! "1.5px solid #03a9f4"
)
)
}
class Backend(t: BackendScope[Props, _]) {
def onTextChange(e: ReactEventI) = {
e.preventDefault()
t.props.onTextChange(e.target.value)
}
}
object DefaultStyle extends Style
val component = ReactComponentB[Props]("ReactSearchBox")
.stateless
.backend(new Backend(_))
.render((P, S, B) => {
<.div(P.style.searchBox)(
<.input(P.style.input, ^.placeholder := "Search ..", ^.onKeyUp ==> B.onTextChange)
)
})
.build
case class Props(onTextChange: String => Unit, style: Style)
def apply(onTextChange: String => Unit, style: Style = DefaultStyle, ref: js.UndefOr[String] = "", key: js.Any = {}) = component.set(key, ref)(Props(onTextChange,style))
} | coreyauger/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/searchboxes/ReactSearchBox.scala | Scala | apache-2.0 | 1,611 |
package com.arcusys.valamis.persistence.impl.scorm.storage
import com.arcusys.valamis.lesson.scorm.model.manifest.{AssetResource, Resource, ScoResource}
import com.arcusys.valamis.lesson.scorm.storage.ResourcesStorage
import com.arcusys.valamis.persistence.common.SlickProfile
import com.arcusys.valamis.persistence.impl.scorm.model.ResourceModel
import com.arcusys.valamis.persistence.impl.scorm.schema.ResourceTableComponent
import scala.slick.driver.JdbcProfile
import scala.slick.jdbc.JdbcBackend
class ResourcesStorageImpl(val db: JdbcBackend#DatabaseDef,
val driver: JdbcProfile)
extends ResourcesStorage
with ResourceTableComponent
with SlickProfile {
import driver.simple._
override def getAll: Seq[Resource] = db.withSession { implicit s =>
val resources = resourceTQ.run
resources.map(_.convert)
}
override def getByID(packageId: Long, resourceId: String): Option[Resource] =
db.withSession { implicit s =>
val resources = resourceTQ.filter(r => r.packageId === packageId && r.resourceId === resourceId).firstOption
resources.map(_.convert)
}
override def delete(packageId: Long): Unit = db.withSession { implicit s =>
resourceTQ.filter(_.packageId === packageId).delete
}
override def getByPackageId(packageId: Long): Seq[Resource] =
db.withSession { implicit s =>
val resources = resourceTQ.filter(_.packageId === packageId).run
resources.map(_.convert)
}
override def createForPackageAndGetId(packageId: Long, entity: Resource): Long =
db.withSession { implicit s =>
val scormType = entity match {
case s: ScoResource => "sco"
case a: AssetResource => "asset"
case _ => throw new UnsupportedOperationException("Unknown resource type")
}
val resource = new ResourceModel(None,
Some(packageId),
scormType,
Some(entity.id),
entity.href,
entity.base)
val resourceId = (resourceTQ returning resourceTQ.map(_.id)) += resource
resourceId
}
implicit class ResourceToModel(entity: ResourceModel) {
def convert: Resource = {
if (entity.scormType.equalsIgnoreCase("sco"))
new ScoResource(entity.resourceId.get, entity.href.get, entity.base, Nil, Nil)
else
new AssetResource(entity.resourceId.get, entity.href, entity.base, Nil, Nil)
}
}
}
| igor-borisov/valamis | valamis-slick-persistence/src/main/scala/com/arcusys/valamis/persistence/impl/scorm/storage/ResourcesStorageImpl.scala | Scala | gpl-3.0 | 2,399 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2002-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
// DO NOT EDIT, CHANGES WILL BE LOST
// This auto-generated code can be modified in "project/GenerateAnyVals.scala".
// Afterwards, running "sbt generateSources" regenerates this source file.
package scala
/** `Double`, a 64-bit IEEE-754 floating point number (equivalent to Java's `double` primitive type) is a
* subtype of [[scala.AnyVal]]. Instances of `Double` are not
* represented by an object in the underlying runtime system.
*
* There is an implicit conversion from [[scala.Double]] => [[scala.runtime.RichDouble]]
* which provides useful non-primitive operations.
*/
final abstract class Double private extends AnyVal {
def toByte: Byte
def toShort: Short
def toChar: Char
def toInt: Int
def toLong: Long
def toFloat: Float
def toDouble: Double
/** Returns this value, unmodified. */
def unary_+ : Double
/** Returns the negation of this value. */
def unary_- : Double
def +(x: String): String
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Byte): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Short): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Char): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Int): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Long): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Float): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Double): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Byte): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Short): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Char): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Int): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Long): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Float): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Double): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Byte): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Short): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Char): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Int): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Long): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Float): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Double): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Byte): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Short): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Char): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Int): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Long): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Float): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Double): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Byte): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Short): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Char): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Int): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Long): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Float): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Double): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Byte): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Short): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Char): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Int): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Long): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Float): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Double): Boolean
/** Returns the sum of this value and `x`. */
def +(x: Byte): Double
/** Returns the sum of this value and `x`. */
def +(x: Short): Double
/** Returns the sum of this value and `x`. */
def +(x: Char): Double
/** Returns the sum of this value and `x`. */
def +(x: Int): Double
/** Returns the sum of this value and `x`. */
def +(x: Long): Double
/** Returns the sum of this value and `x`. */
def +(x: Float): Double
/** Returns the sum of this value and `x`. */
def +(x: Double): Double
/** Returns the difference of this value and `x`. */
def -(x: Byte): Double
/** Returns the difference of this value and `x`. */
def -(x: Short): Double
/** Returns the difference of this value and `x`. */
def -(x: Char): Double
/** Returns the difference of this value and `x`. */
def -(x: Int): Double
/** Returns the difference of this value and `x`. */
def -(x: Long): Double
/** Returns the difference of this value and `x`. */
def -(x: Float): Double
/** Returns the difference of this value and `x`. */
def -(x: Double): Double
/** Returns the product of this value and `x`. */
def *(x: Byte): Double
/** Returns the product of this value and `x`. */
def *(x: Short): Double
/** Returns the product of this value and `x`. */
def *(x: Char): Double
/** Returns the product of this value and `x`. */
def *(x: Int): Double
/** Returns the product of this value and `x`. */
def *(x: Long): Double
/** Returns the product of this value and `x`. */
def *(x: Float): Double
/** Returns the product of this value and `x`. */
def *(x: Double): Double
/** Returns the quotient of this value and `x`. */
def /(x: Byte): Double
/** Returns the quotient of this value and `x`. */
def /(x: Short): Double
/** Returns the quotient of this value and `x`. */
def /(x: Char): Double
/** Returns the quotient of this value and `x`. */
def /(x: Int): Double
/** Returns the quotient of this value and `x`. */
def /(x: Long): Double
/** Returns the quotient of this value and `x`. */
def /(x: Float): Double
/** Returns the quotient of this value and `x`. */
def /(x: Double): Double
/** Returns the remainder of the division of this value by `x`. */
def %(x: Byte): Double
/** Returns the remainder of the division of this value by `x`. */
def %(x: Short): Double
/** Returns the remainder of the division of this value by `x`. */
def %(x: Char): Double
/** Returns the remainder of the division of this value by `x`. */
def %(x: Int): Double
/** Returns the remainder of the division of this value by `x`. */
def %(x: Long): Double
/** Returns the remainder of the division of this value by `x`. */
def %(x: Float): Double
/** Returns the remainder of the division of this value by `x`. */
def %(x: Double): Double
// Provide a more specific return type for Scaladoc
override def getClass(): Class[Double] = ???
}
object Double extends AnyValCompanion {
/** The smallest positive value greater than 0.0d which is
* representable as a Double.
*/
final val MinPositiveValue = java.lang.Double.MIN_VALUE
final val NaN = java.lang.Double.NaN
final val PositiveInfinity = java.lang.Double.POSITIVE_INFINITY
final val NegativeInfinity = java.lang.Double.NEGATIVE_INFINITY
/** The negative number with the greatest (finite) absolute value which is representable
* by a Double. Note that it differs from [[java.lang.Double.MIN_VALUE]], which
* is the smallest positive value representable by a Double. In Scala that number
* is called Double.MinPositiveValue.
*/
final val MinValue = -java.lang.Double.MAX_VALUE
/** The largest finite positive number representable as a Double. */
final val MaxValue = java.lang.Double.MAX_VALUE
/** Transform a value type into a boxed reference type.
*
* Runtime implementation determined by `scala.runtime.BoxesRunTime.boxToDouble`. See [[https://github.com/scala/scala src/library/scala/runtime/BoxesRunTime.java]].
*
* @param x the Double to be boxed
* @return a java.lang.Double offering `x` as its underlying value.
*/
def box(x: Double): java.lang.Double = ???
/** Transform a boxed type into a value type. Note that this
* method is not typesafe: it accepts any Object, but will throw
* an exception if the argument is not a java.lang.Double.
*
* Runtime implementation determined by `scala.runtime.BoxesRunTime.unboxToDouble`. See [[https://github.com/scala/scala src/library/scala/runtime/BoxesRunTime.java]].
*
* @param x the java.lang.Double to be unboxed.
* @throws ClassCastException if the argument is not a java.lang.Double
* @return the Double resulting from calling doubleValue() on `x`
*/
def unbox(x: java.lang.Object): Double = ???
/** The String representation of the scala.Double companion object. */
override def toString = "object scala.Double"
}
| felixmulder/scala | src/library/scala/Double.scala | Scala | bsd-3-clause | 10,871 |
/*
* Ported from https://github.com/junit-team/junit
*/
package org.junit.internal
object ArrayComparisonFailure
class ArrayComparisonFailure(message: String, cause: AssertionError, index: Int)
extends AssertionError(message, cause) {
private var fIndices: List[Int] = index :: Nil
@deprecated("This constructor is not used and will be removed", "0.6.21")
def this(fMessage: String) =
this(fMessage, new AssertionError, 0)
def addDimension(index: Int): Unit = {
fIndices = index :: fIndices
}
override def getMessage(): String = {
val msg = if (message != null) message else ""
val indices =
if (fIndices == null) s"[$index]" // see #3148
else fIndices.map(index => s"[$index]").mkString
val causeMessage = getCause.getMessage
s"${msg}arrays first differed at element $indices; $causeMessage"
}
override def toString(): String = getMessage
}
| scala-js/scala-js | junit-runtime/src/main/scala/org/junit/internal/ArrayComparisonFailure.scala | Scala | apache-2.0 | 907 |
package org.scalatra
import javax.servlet._
import javax.servlet.http._
import org.scalatra.servlet.ServletBase
import org.scalatra.util.RicherString._
import scala.util.control.Exception.catching
object ScalatraServlet {
import org.scalatra.servlet.ServletApiImplicits._
val RequestPathKey = "org.scalatra.ScalatraServlet.requestPath"
def requestPath(request: HttpServletRequest): String = {
require(request != null, "The request can't be null for getting the request path")
def startIndex(r: HttpServletRequest) =
r.getContextPath.blankOption.map(_.length).getOrElse(0) + r.getServletPath.blankOption.map(_.length).getOrElse(0)
def getRequestPath(r: HttpServletRequest) = {
val u = (catching(classOf[NullPointerException]) opt { r.getRequestURI } getOrElse "/")
requestPath(u, startIndex(r))
}
request.get(RequestPathKey) map (_.toString) getOrElse {
val rp = getRequestPath(request)
request(RequestPathKey) = rp
rp
}
}
def requestPath(uri: String, idx: Int): String = {
val u1 = UriDecoder.firstStep(uri)
val u2 = (u1.blankOption map { _.substring(idx) } flatMap (_.blankOption) getOrElse "/")
val pos = u2.indexOf(';')
if (pos > -1) u2.substring(0, pos) else u2
}
}
/**
* An implementation of the Scalatra DSL in a servlet. This is the recommended
* base trait for most Scalatra applications. Use a servlet if:
*
* $ - your Scalatra routes run in a subcontext of your web application.
* $ - you want Scalatra to have complete control of unmatched requests.
* $ - you think you want a filter just for serving static content with the
* default servlet; ScalatraServlet can do this too
* $ - you don't know the difference
*
* @see ScalatraFilter
*/
trait ScalatraServlet
extends HttpServlet
with ServletBase
with Initializable {
override def service(request: HttpServletRequest, response: HttpServletResponse): Unit = {
handle(request, response)
}
/**
* Defines the request path to be matched by routers. The default
* definition is optimized for `path mapped` servlets (i.e., servlet
* mapping ends in `/*`). The route should match everything matched by
* the `/*`. In the event that the request URI equals the servlet path
* with no trailing slash (e.g., mapping = `/admin/*`, request URI =
* '/admin'), a '/' is returned.
*
* All other servlet mappings likely want to return request.getServletPath.
* Custom implementations are allowed for unusual cases.
*/
def requestPath(implicit request: HttpServletRequest): String = ScalatraServlet.requestPath(request)
protected def routeBasePath(implicit request: HttpServletRequest): String = {
require(config != null, "routeBasePath requires the servlet to be initialized")
require(request != null, "routeBasePath requires an active request to determine the servlet path")
servletContext.getContextPath + request.getServletPath
}
/**
* Invoked when no route matches. By default, calls `serveStaticResource()`,
* and if that fails, calls `resourceNotFound()`.
*
* This action can be overridden by a notFound block.
*/
protected var doNotFound: Action = () => {
serveStaticResource() getOrElse resourceNotFound()
}
/**
* Attempts to find a static resource matching the request path. Override
* to return None to stop this.
*/
protected def serveStaticResource()(implicit request: HttpServletRequest, response: HttpServletResponse): Option[Any] = {
servletContext.resource(request) map { _ =>
servletContext.getNamedDispatcher("default").forward(request, response)
}
}
/**
* Called by default notFound if no routes matched and no static resource
* could be found.
*/
protected def resourceNotFound()(implicit request: HttpServletRequest, response: HttpServletResponse): Any = {
response.setStatus(404)
if (isDevelopmentMode) {
val error = "Requesting \"%s %s\" on servlet \"%s\" but only have: %s"
response.getWriter println error.format(
request.getMethod,
Option(request.getPathInfo) getOrElse "/",
request.getServletPath,
routes.entryPoints.mkString("<ul><li>", "</li><li>", "</li></ul>"))
}
}
type ConfigT = ServletConfig
override def init(config: ServletConfig): Unit = {
super.init(config)
initialize(config) // see Initializable.initialize for why
}
override def initialize(config: ServletConfig): Unit = {
super.initialize(config)
}
override def destroy(): Unit = {
shutdown()
super.destroy()
}
}
| dozed/scalatra | core/src/main/scala/org/scalatra/ScalatraServlet.scala | Scala | bsd-2-clause | 4,623 |
import org.apache.spark.sql.functions._
import org.scalatest._
class MotTests extends FlatSpec with Matchers {
import MotUdfs._
//val parquetData = "/Users/DTAYLOR/Data/mot/parquet/test_results_2011.parquet"
val parquetData = "/Users/DTAYLOR/Data/mot/parquet/test_results.parquet"
val resultsPath = "/Users/DTAYLOR/Development/mot-data-in-spark/vis/results/"
it should "calculate covariance and correlation between mileage and age" in {
//https://databricks.com/blog/2015/06/02/statistical-and-mathematical-functions-with-dataframes-in-spark.html
val motTests = Spark.sqlContext.read.parquet(parquetData).toDF()
motTests.registerTempTable("mot_tests")
val df = motTests
.filter("testClass like '4%'") // Cars, not buses, bikes etc
.filter("testType = 'N'") // only interested in the first test
.withColumn("pass", passCodeToInt(col("testResult")))
println(s"cov(testMileage, age) = ${df.stat.cov("testMileage", "age")}")
println(s"corr(testMileage, age) = ${df.stat.corr("testMileage", "age")}")
}
it should "calculate covariance and correlation for normal cars" in {
//https://databricks.com/blog/2015/06/02/statistical-and-mathematical-functions-with-dataframes-in-spark.html
val motTests = Spark.sqlContext.read.parquet(parquetData).toDF()
motTests.registerTempTable("mot_tests")
val df = motTests
.filter("testClass like '4%'") // Cars, not buses, bikes etc
.filter("testType = 'N'") // only interested in the first test
.filter("age <= 20")
.filter("testMileage <= 250000")
.withColumn("pass", passCodeToInt(col("testResult")))
println("For cars < 20 years and < 250,000 miles")
println(s"cov(testMileage, pass) = ${df.stat.cov("testMileage", "pass")}")
println(s"corr(testMileage, pass) = ${df.stat.corr("testMileage", "pass")}")
println(s"cov(age, pass) = ${df.stat.cov("age", "pass")}")
println(s"corr(age, pass) = ${df.stat.corr("age", "pass")}")
println(s"cov(age, age) = ${df.stat.cov("age", "age")}")
println(s"corr(age, age) = ${df.stat.corr("age", "age")}")
}
it should "calculate covariance and correlation over all the data" in {
//https://databricks.com/blog/2015/06/02/statistical-and-mathematical-functions-with-dataframes-in-spark.html
val motTests = Spark.sqlContext.read.parquet(parquetData).toDF()
motTests.registerTempTable("mot_tests")
val df = motTests
.filter("testClass like '4%'") // Cars, not buses, bikes etc
.filter("testType = 'N'") // only interested in the first test
.filter("firstUseDate <> 'NULL' and date <> 'NULL'")
.withColumn("pass", passCodeToInt(col("testResult")))
println("For all data")
println(s"cov(testMileage, pass) = ${df.stat.cov("testMileage", "pass")}")
println(s"corr(testMileage, pass) = ${df.stat.corr("testMileage", "pass")}")
println(s"cov(age, pass) = ${df.stat.cov("age", "pass")}")
println(s"corr(age, pass) = ${df.stat.corr("age", "pass")}")
println(s"cov(age, age) = ${df.stat.cov("age", "age")}")
println(s"corr(age, age) = ${df.stat.corr("age", "age")}")
}
it should "describe the dataset" in {
// This is very slow!
Spark
.sqlContext
.read
.parquet(parquetData)
.toDF()
.describe()
.show()
}
it should "calculate pass rate by mileage band and age" in {
val motTests = Spark.sqlContext.read.parquet(parquetData).toDF()
motTests.registerTempTable("mot_tests")
val results = motTests
.filter("testClass like '4%'") // Cars, not buses, bikes etc
.filter("testType = 'N'") // only interested in the first test
.filter("firstUseDate <> 'NULL' and date <> 'NULL'")
.withColumn("passCount", passCodeToInt(col("testResult")))
.withColumn("mileageBand", mileageToBand(col("testMileage")))
.groupBy("age", "mileageBand")
.agg(count("*") as "cnt", sum("passCount") as "passCount")
.selectExpr("age", "mileageBand", "cnt", "passCount * 100 / cnt as rate")
.cache()
results
.sort(asc("age"), asc("mileageBand"))
.show(1000)
val resultMap = results.map({
x => RateByAgeAndMileage(
x.getInt(0),
x.getDouble(1).toLong,
x.getLong(2),
x.getDouble(3))
})
.collect()
JsonWriter.writeToFile(resultMap, resultsPath + "passRateByAgeAndMileageBand.json")
}
it should "calculate pass rate by mileage band" in {
val motTests = Spark.sqlContext.read.parquet(parquetData).toDF()
motTests.registerTempTable("mot_tests")
val results = motTests
.filter("testClass like '4%'") // Cars, not buses, bikes etc
.filter("testType = 'N'") // only interested in the first test
.filter("firstUseDate <> 'NULL' and date <> 'NULL'")
.withColumn("passCount", passCodeToInt(col("testResult")))
.withColumn("mileageBand", mileageToBand(col("testMileage")))
.groupBy("mileageBand")
.agg(count("*") as "cnt", sum("passCount") as "passCount")
.selectExpr("mileageBand", "cnt", "passCount * 100 / cnt as rate")
.cache()
results
.sort(asc("mileageBand"))
.show(1000)
val resultMap = results.map({
x => RateByMileage(
x.getDouble(0).toLong,
x.getLong(1),
x.getDouble(2))
})
.collect()
JsonWriter.writeToFile(resultMap, resultsPath + "passRateByMileageBand.json")
}
it should "calculate pass rate by age band" in {
val motTests = Spark.sqlContext.read.parquet(parquetData).toDF()
motTests.registerTempTable("mot_tests")
val results = motTests
.filter("testClass like '4%'") // Cars, not buses, bikes etc
.filter("testType = 'N'") // only interested in the first test
.filter("firstUseDate <> 'NULL' and date <> 'NULL'")
.withColumn("passCount", passCodeToInt(col("testResult")))
.withColumn("age", testDateAndVehicleFirstRegDateToAge(col("date"), col("firstUseDate")))
.groupBy("age")
.agg(count("*") as "cnt", sum("passCount") as "passCount")
.selectExpr("age", "cnt", "passCount * 100 / cnt as rate")
.cache()
results
.sort(asc("age"))
.show(101)
val resultMap = results.map({
x => RateByAge(
x.getInt(0),
x.getLong(1),
x.getDouble(2))
})
.collect()
JsonWriter.writeToFile(resultMap, resultsPath + "passRateByAgeBand.json")
}
it should "calculate pass rate by age band and make" in {
val motTests = Spark.sqlContext.read.parquet(parquetData).toDF()
motTests.registerTempTable("mot_tests")
val results =
motTests
.filter("testClass like '4%'") // Cars, not buses, bikes etc
.filter("testType = 'N'") // only interested in the first test
.filter("firstUseDate <> 'NULL' and date <> 'NULL'")
.withColumn("passCount", passCodeToInt(col("testResult")))
.withColumn("age", testDateAndVehicleFirstRegDateToAge(col("date"), col("firstUseDate")))
.groupBy("age", "make")
.agg(count("*") as "cnt", sum("passCount") as "passCount")
.selectExpr("make", "age", "cnt", "passCount * 100 / cnt as rate")
.filter("cnt >= 1000")
.rdd
val resultMap =
results
.map({
x => (
x.getString(0),
x.getInt(1),
x.getLong(2),
x.getDouble(3)
)
})
val mappedResults =
resultMap
.groupBy { case (make, age, cnt, rate) => make }
.map { case (make, stuff) =>
AgeAndMakeResults(make,
stuff
.map { case (_, age, cnt, rate) => RateByAge(age, cnt, rate) }
.filter(x => x.age >= 3 && x.age <= 20)
.toSeq
)
}
.filter(_.series.length >= 18)
.collect()
JsonWriter.writeToFile(mappedResults, resultsPath + "passRateByAgeBandAndMake.json")
}
it should "prepare data for a decision tree to classify probability classes" in {
val motTests = Spark.sqlContext.read.parquet(parquetData).toDF()
motTests.registerTempTable("mot_tests")
val keyFields = Seq("make", "mileageBand", "cylinderCapacity", "age", "isPetrol", "isDiesel")
val data =
motTests
.filter("testClass like '4%'") // Cars, not buses, bikes etc
.filter("firstUseDate <> 'NULL' and date <> 'NULL'") // Must be able to calculate age
.filter("testMileage > 0") // ignore tests where no mileage reported
.filter("testType = 'N'") // only interested in the first test
.withColumn("testPassed", passCodeToInt(col("testResult")))
.withColumn("age", testDateAndVehicleFirstRegDateToAge(col("date"), col("firstUseDate")))
.withColumn("isPetrol", valueToOneOrZero(lit("P"), col("fuelType")))
.withColumn("isDiesel", valueToOneOrZero(lit("D"), col("fuelType")))
.withColumn("mileageBand", mileageToBand(col("testMileage")))
.groupBy(keyFields.map(col): _*)
.agg(count("*") as "cnt", sum("testPassed") as "passCount")
.filter("cnt > 10")
.withColumn("passRateCategory", passRateToCategory(col("cnt"), col("passCount")))
.selectExpr(keyFields ++ Seq("cnt", "passCount * 100 / cnt as passRate", "passRateCategory"):_*)
.cache()
data.printSchema()
data
.sort(desc("cnt"))
.show()
data
.sort(asc("passRate"))
.show()
}
it should "count tests by make and model" in {
val motTests = Spark.sqlContext.read.parquet(parquetData).toDF()
motTests.registerTempTable("mot_tests")
val mm = motTests
.filter("testClass like '4%'") // Cars, not buses, bikes etc
.filter("testType = 'N'") // only interested in the first test
.withColumn("shortModel", modelToShortModel(col("model")))
.groupBy("make", "shortModel")
.agg(count("*") as "cnt")
.selectExpr("make", "shortModel", "cnt")
.cache()
val results = mm.map(x => CountsByMakeAndModel(x.getString(0).toLowerCase, x.getString(1).toLowerCase, x.getLong(2))).collect()
val tree = results
.groupBy(_.make)
.map({case (key : String, values : Array[CountsByMakeAndModel]) =>
MakeModelTreeItem(key,
values.map(_.count).sum,
values.map(x => CountsByModelForTree(x.model, x.count)))
})
JsonWriter.writeToFile(tree, resultsPath + "motTestsByMakeAndModel.json")
println(results)
}
it should "count tests by colour" in {
val motTests = Spark.sqlContext.read.parquet(parquetData).toDF()
motTests.registerTempTable("mot_tests")
val colours = Spark.sqlContext
.sql("select colour as colour, count(*) as cnt from mot_tests where testClass like '4%' and testType = 'N' group by colour")
val results = colours.map(x => CountsByColour(x.getString(0).toLowerCase, x.getLong(1))).collect()
JsonWriter.writeToFile(results, resultsPath + "motTestsByVehicleColour.json")
println(results)
}
it should "count tests by make" in {
val motTests = Spark.sqlContext.read.parquet(parquetData).toDF()
motTests.registerTempTable("mot_tests")
val colours = Spark.sqlContext
.sql("select make, count(*) as cnt from mot_tests where testClass like '4%' and testType = 'N' group by make")
val results = colours.map(x => CountsByMake(x.getString(0).toLowerCase, x.getLong(1))).collect()
JsonWriter.writeToFile(results, resultsPath + "motTestsByMake.json")
println(results)
}
it should "calculate pass rate by make and model" in {
val motTests = Spark.sqlContext.read.parquet(parquetData).toDF()
motTests.registerTempTable("mot_tests")
val results = motTests
.filter("testClass like '4%'") // Cars, not buses, bikes etc
.filter("testType = 'N'") // only interested in the first test
.withColumn("passCount", passCodeToInt(col("testResult")))
.withColumn("shortModel", modelToShortModel(col("model")))
.groupBy("make", "shortModel")
.agg(count("*") as "cnt", sum("passCount") as "passCount")
.filter("cnt > 1000")
.selectExpr("make", "shortModel", "cnt", "passCount * 100 / cnt as rate")
.cache()
println("Best:")
results
.sort(desc("rate"))
.show(10)
println("Worst:")
results
.sort(asc("rate"))
.show(10)
val resultMap =
results
.sort("rate")
.map(x => (x.getString(0), x.getString(1), x.getDouble(3)))
.collect
.reverse
.toSeq
.zipWithIndex
.map({case ((ma, mo, r), i) => RateByMakeAndModel(ma, mo, r, i)} )
JsonWriter.writeToFile(resultMap, resultsPath + "passRateByMakeAndModel.json")
}
it should "calculate pass rate by make" in {
val motTests = Spark.sqlContext.read.parquet(parquetData).toDF()
motTests.registerTempTable("mot_tests")
val results = motTests
.filter("testClass like '4%'") // Cars, not buses, bikes etc
.filter("testType = 'N'") // only interested in the first test
.withColumn("passCount", passCodeToInt(col("testResult")))
.groupBy("make")
.agg(count("*") as "cnt", sum("passCount") as "passCount")
.filter("cnt > 1000")
.selectExpr("make", "cnt", "passCount * 100 / cnt as rate")
.cache()
println("Best:")
results
.sort(desc("rate"))
.show(10)
println("Worst:")
results
.sort(asc("rate"))
.show(10)
val resultMap =
results
.sort("rate")
.map(x => (x.getString(0), x.getDouble(2)))
.collect
.reverse
.toSeq
.zipWithIndex
.map({case ((m, r), i) => RateByMake(m, r, i)} )
JsonWriter.writeToFile(resultMap, resultsPath + "passRateByMake.json")
}
it should "calculate overall pass rate" in {
val motTests = Spark.sqlContext.read.parquet(parquetData).toDF()
motTests.registerTempTable("mot_tests")
val results = motTests
.filter("testClass like '4%'") // Cars, not buses, bikes etc
.filter("testType = 'N'") // only interested in the first test
.filter("firstUseDate <> 'NULL' and date <> 'NULL'")
.withColumn("passCount", passCodeToInt(col("testResult")))
.groupBy()
.agg(count("*") as "cnt", sum("passCount") as "passCount")
.selectExpr("passCount * 100 / cnt as rate")
.cache()
results.map(x => x.get(0)).collect().foreach(println)
}
it should "find all the car colours" in {
val motTests = Spark.sqlContext.read.parquet(parquetData).toDF()
motTests.registerTempTable("mot_tests")
val colours = Spark.sqlContext
.sql("select colour as colour, count(*) as cnt from mot_tests where testClass like '4%' and testType = 'N' group by colour")
val results = colours.map(x => CountsByColour(x.getString(0).toLowerCase, x.getLong(1))).collect()
JsonWriter.writeToFile(results, resultsPath + "motTestsByVehicleColour.json")
println(results)
}
}
| DanteLore/mot-data-in-spark | src/test/scala/MotTests.scala | Scala | mit | 14,974 |
package extracells.block
import java.util.Random
import javax.annotation.Nullable
import appeng.api.config.SecurityPermissions
import appeng.api.implementations.items.IAEWrench
import appeng.api.util.AEPartLocation
import extracells.api.IECTileEntity
import extracells.network.GuiHandler
import extracells.tileentity.{IListenerTile, TileEntityFluidFiller, TileEntityFluidInterface}
import extracells.util.{PermissionUtil, TileUtil, WrenchUtil}
import net.minecraft.block.material.Material
import net.minecraft.block.state.IBlockState
import net.minecraft.entity.EntityLivingBase
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.item.ItemStack
import net.minecraft.nbt.NBTTagCompound
import net.minecraft.tileentity.TileEntity
import net.minecraft.util.math.{BlockPos, RayTraceResult, Vec3d}
import net.minecraft.util.{EnumFacing, EnumHand}
import net.minecraft.world.World
object BlockFluidFiller extends BlockEC(Material.IRON, 2.0F, 10.0F) {
//Only needed because BlockEnum is in java. not in scala
val instance = this
def createNewTileEntity(world: World, meta: Int): TileEntity = {
return new TileEntityFluidFiller
}
override def onBlockActivated(world: World, pos: BlockPos, state: IBlockState, player: EntityPlayer, hand: EnumHand, side: EnumFacing, hitX: Float, hitY: Float, hitZ: Float): Boolean = {
val x: Int = pos.getX
val y: Int = pos.getY
val z: Int = pos.getZ
val current = player.getHeldItem(hand)
if (world.isRemote) return true
val rand: Random = new Random
val tile: TileEntity = world.getTileEntity(pos)
if (tile.isInstanceOf[IECTileEntity]) if (!PermissionUtil.hasPermission(player, SecurityPermissions.BUILD, (tile.asInstanceOf[IECTileEntity]).getGridNode(AEPartLocation.INTERNAL))) return false
if (player.isSneaking) {
val rayTraceResult = new RayTraceResult(new Vec3d(hitX, hitY, hitZ), side, pos)
val wrenchHandler = WrenchUtil.getHandler(current, player, rayTraceResult, hand)
if (wrenchHandler != null) {
val block: ItemStack = new ItemStack(this, 1, 0)
if (tile != null && tile.isInstanceOf[TileEntityFluidInterface]) block.setTagCompound(tile.asInstanceOf[TileEntityFluidInterface].writeFilter(new NBTTagCompound))
dropBlockAsItem(world, pos, state, 1)
world.setBlockToAir(pos)
wrenchHandler.wrenchUsed(current, player, rayTraceResult, hand)
return true
}
}
GuiHandler.launchGui(0, player, world, x, y, z)
true
}
override def onBlockPlacedBy(world: World, pos: BlockPos, state: IBlockState, entity: EntityLivingBase, stack: ItemStack) {
super.onBlockPlacedBy(world, pos, state, entity, stack)
if (world.isRemote) return
TileUtil.setOwner(world, pos, entity)
val tile: TileEntity = world.getTileEntity(pos)
if (tile != null) {
if (tile.isInstanceOf[IListenerTile]) (tile.asInstanceOf[IListenerTile]).registerListener()
}
}
override def breakBlock(world: World, pos: BlockPos, state: IBlockState) {
if (world.isRemote) {
super.breakBlock(world, pos, state)
return
}
TileUtil.destroy(world, pos)
val tile: TileEntity = world.getTileEntity(pos)
if (tile != null) {
if (tile.isInstanceOf[IListenerTile]) (tile.asInstanceOf[IListenerTile]).removeListener()
}
super.breakBlock(world, pos, state)
}
} | ExtraCells/ExtraCells2 | src/main/scala/extracells/block/BlockFluidFiller.scala | Scala | mit | 3,369 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.container
import akka.event.Logging.ErrorLevel
import whisk.common.{ Logging, LoggingMarkers, SimpleExec, TransactionId }
object RuncUtils {
def list()(implicit transid: TransactionId, logging: Logging): (Int, String) = {
runRuncCmd(false, Seq("list"))
}
def pause(id: ContainerIdentifier)(implicit transid: TransactionId, logging: Logging): (Int, String) = {
runRuncCmd(false, Seq("pause", id.toString))
}
def resume(id: ContainerIdentifier)(implicit transid: TransactionId, logging: Logging): (Int, String) = {
runRuncCmd(false, Seq("resume", id.toString))
}
/**
* Synchronously runs the given runc command returning stdout if successful.
*/
def runRuncCmd(skipLogError: Boolean, args: Seq[String])(implicit transid: TransactionId, logging: Logging): (Int, String) = {
val start = transid.started(this, LoggingMarkers.INVOKER_RUNC_CMD(args(0)))
try {
val fullCmd = getRuncCmd() ++ args
val (stdout, stderr, exitCode) = SimpleExec.syncRunCmd(fullCmd)
if (exitCode == 0) {
transid.finished(this, start)
(exitCode, stdout.trim)
} else {
if (!skipLogError) {
transid.failed(this, start, s"stdout:\\n$stdout\\nstderr:\\n$stderr", ErrorLevel)
} else {
transid.failed(this, start)
}
(exitCode, (stdout + stderr).trim)
}
} catch {
case t: Throwable =>
val errorMsg = "error: " + t.getMessage
transid.failed(this, start, errorMsg, ErrorLevel)
(-1, errorMsg)
}
}
def isSuccessful(result : (Int, String)) : Boolean =
result match {
case (0, _) => true
case _ => false
}
/*
* Any global flags are added here.
*/
private def getRuncCmd(): Seq[String] = {
val runcBin = "/usr/bin/docker-runc"
Seq(runcBin)
}
}
| prccaraujo/openwhisk | core/invoker/src/main/scala/whisk/core/container/RuncUtils.scala | Scala | apache-2.0 | 2,869 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.security
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.security.Credentials
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
/**
* Manages all the registered HadoopDelegationTokenProviders and offer APIs for other modules to
* obtain delegation tokens and their renewal time. By default [[HadoopFSDelegationTokenProvider]],
* [[HiveDelegationTokenProvider]] and [[HBaseDelegationTokenProvider]] will be loaded in if not
* explicitly disabled.
*
* Also, each HadoopDelegationTokenProvider is controlled by
* spark.security.credentials.{service}.enabled, and will not be loaded if this config is set to
* false. For example, Hive's delegation token provider [[HiveDelegationTokenProvider]] can be
* enabled/disabled by the configuration spark.security.credentials.hive.enabled.
*
* @param sparkConf Spark configuration
* @param hadoopConf Hadoop configuration
* @param fileSystems Delegation tokens will be fetched for these Hadoop filesystems.
*/
private[spark] class HadoopDelegationTokenManager(
sparkConf: SparkConf,
hadoopConf: Configuration,
fileSystems: Configuration => Set[FileSystem])
extends Logging {
private val deprecatedProviderEnabledConfigs = List(
"spark.yarn.security.tokens.%s.enabled",
"spark.yarn.security.credentials.%s.enabled")
private val providerEnabledConfig = "spark.security.credentials.%s.enabled"
// Maintain all the registered delegation token providers
private val delegationTokenProviders = getDelegationTokenProviders
logDebug(s"Using the following delegation token providers: " +
s"${delegationTokenProviders.keys.mkString(", ")}.")
/** Construct a [[HadoopDelegationTokenManager]] for the default Hadoop filesystem */
def this(sparkConf: SparkConf, hadoopConf: Configuration) = {
this(
sparkConf,
hadoopConf,
hadoopConf => Set(FileSystem.get(hadoopConf).getHomeDirectory.getFileSystem(hadoopConf)))
}
private def getDelegationTokenProviders: Map[String, HadoopDelegationTokenProvider] = {
val providers = List(new HadoopFSDelegationTokenProvider(fileSystems),
new HiveDelegationTokenProvider,
new HBaseDelegationTokenProvider)
// Filter out providers for which spark.security.credentials.{service}.enabled is false.
providers
.filter { p => isServiceEnabled(p.serviceName) }
.map { p => (p.serviceName, p) }
.toMap
}
def isServiceEnabled(serviceName: String): Boolean = {
val key = providerEnabledConfig.format(serviceName)
deprecatedProviderEnabledConfigs.foreach { pattern =>
val deprecatedKey = pattern.format(serviceName)
if (sparkConf.contains(deprecatedKey)) {
logWarning(s"${deprecatedKey} is deprecated. Please use ${key} instead.")
}
}
val isEnabledDeprecated = deprecatedProviderEnabledConfigs.forall { pattern =>
sparkConf
.getOption(pattern.format(serviceName))
.map(_.toBoolean)
.getOrElse(true)
}
sparkConf
.getOption(key)
.map(_.toBoolean)
.getOrElse(isEnabledDeprecated)
}
/**
* Get delegation token provider for the specified service.
*/
def getServiceDelegationTokenProvider(service: String): Option[HadoopDelegationTokenProvider] = {
delegationTokenProviders.get(service)
}
/**
* Writes delegation tokens to creds. Delegation tokens are fetched from all registered
* providers.
*
* @return Time after which the fetched delegation tokens should be renewed.
*/
def obtainDelegationTokens(
hadoopConf: Configuration,
creds: Credentials): Long = {
delegationTokenProviders.values.flatMap { provider =>
if (provider.delegationTokensRequired(hadoopConf)) {
provider.obtainDelegationTokens(hadoopConf, sparkConf, creds)
} else {
logDebug(s"Service ${provider.serviceName} does not require a token." +
s" Check your configuration to see if security is disabled or not.")
None
}
}.foldLeft(Long.MaxValue)(math.min)
}
}
| shubhamchopra/spark | core/src/main/scala/org/apache/spark/deploy/security/HadoopDelegationTokenManager.scala | Scala | apache-2.0 | 4,939 |
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webtrends.service.sockorest
import akka.util.Timeout
import com.webtrends.harness.command.{Command, CommandBean, CommandException, CommandResponse}
import com.webtrends.harness.component.ComponentHelper
import com.webtrends.harness.component.socko.route.SockoPost
import com.webtrends.service.{Person, PersonService}
import org.mashupbots.socko.events.HttpResponseStatus
import scala.concurrent.duration._
import scala.concurrent.{Future, Promise}
import scala.util.{Failure, Success}
class SCreate extends Command
with SockoPost
with ComponentHelper {
implicit val executionContext = context.dispatcher
implicit val timeout = Timeout(2 seconds)
// Below is a custom unmarshaller if you want to use it differently
// The Create command will use a custom unmarshaller and mimetype
// and the Update command will use the default unmarshaller using JSON
override def unmarshall[T <: AnyRef : Manifest](obj: Array[Byte], contentType: String = "application/json"): Option[T] = {
val Array(name, age) = (new String(obj)).split(":")
Some(Person(name, Integer.parseInt(age)).asInstanceOf[T])
}
// Below is a custom marshaller if you want to use it differently
// The Create command will use a custom marshaller and mimetype
// and the Update command will use the default marshaller
override def marshallObject(obj: Any, respType:String="json"): Array[Byte] = {
val person = obj.asInstanceOf[Person]
s"Socko Person: ${person.name}, ${person.age}".getBytes
}
override def responseStatusCode = HttpResponseStatus.CREATED
override def path: String = "/person"
/**
* Name of the command that will be used for the actor name
*
* @return
*/
override def commandName: String = SCreate.CommandName
/**
* The primary entry point for the command, the actor for this command
* will ignore all other messaging and only execute through this
*
* @return
*/
def execute[T](bean: Option[CommandBean]): Future[CommandResponse[T]] = {
val p = Promise[CommandResponse[T]]
bean match {
case Some(b) =>
getComponent("wookiee-cache-memcache") onComplete {
case Success(actor) =>
// If we were doing a real API we might want to check the cache to see if it
// exists first and if it does then throw some sort of exception, but this is just an example
val person = b(CommandBean.KeyEntity).asInstanceOf[Person]
person.writeInCache(actor) onComplete {
case Success(s) => p success CommandResponse[T](Some(person.asInstanceOf[T]), PersonService.PersonMimeType)
case Failure(f) => p failure f
}
case Failure(f) => p failure f
}
case None => p failure new CommandException("SCreate", "Bean not set")
}
p.future
}
}
object SCreate {
def CommandName = "SCreate"
} | mjwallin1/wookiee-spray | example-rest/src/main/scala/com/webtrends/service/sockorest/SCreate.scala | Scala | apache-2.0 | 3,629 |
package org.jetbrains.plugins.scala
package lang
package surroundWith
package surrounders
package expression
import com.intellij.psi.PsiElement
import com.intellij.lang.ASTNode
import com.intellij.openapi.util.TextRange
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElementImpl
import lang.psi.api.expr._
import lang.psi.api.base.patterns._
import org.jetbrains.plugins.scala.lang.parser._
/**
* @author Alexander Podkhalyuzin
* Date: 28.04.2008
*/
class ScalaWithTryFinallySurrounder extends ScalaExpressionSurrounder {
override def getTemplateAsString(elements: Array[PsiElement]): String = {
return "try {\n" + super.getTemplateAsString(elements) + "\n}\nfinally a"
}
override def getTemplateDescription = "try / finally"
override def getSurroundSelectionRange (withTryCatchNode : ASTNode) : TextRange = {
val element: PsiElement = withTryCatchNode.getPsi match {
case x: ScParenthesisedExpr => x.expr match {
case Some(y) => y
case _ => return x.getTextRange
}
case x => x
}
val tryCatchStmt = element.asInstanceOf[ScTryStmt]
val caseClause = tryCatchStmt.getNode().getLastChildNode().getLastChildNode().getPsi
val offset = caseClause.getTextRange.getStartOffset
tryCatchStmt.getNode.removeChild(caseClause.getNode)
new TextRange(offset, offset)
}
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/surroundWith/surrounders/expression/ScalaWithTryFinallySurrounder.scala | Scala | apache-2.0 | 1,407 |
package ucesoft.cbm.expansion
import ucesoft.cbm.ChipID
import ucesoft.cbm.cpu.Memory
import ucesoft.cbm.Clock
import ucesoft.cbm.ClockEvent
import java.io.{BufferedInputStream, File, FileInputStream, ObjectInputStream, ObjectOutputStream}
import ucesoft.cbm.Log
object REU {
val REU_1700 = 128
val REU_1750 = 512
val REU_1764 = 256
val REU_1M = 1024
val REU_2M = 2048
val REU_4M = 4096
val REU_8M = 8192
val REU_16M = 16384
var attached16MFileName : String = null
def getREU(size:Int,mem:Memory,setDMA: (Boolean) => Unit,setIRQ: (Boolean) => Unit,file:Option[File]) : ExpansionPort = {
val reu = new REUImpl(size,mem,setDMA,setIRQ)
file match {
case Some(f) =>
reu.loadFrom(f)
case None =>
}
reu
}
private class REUImpl(size:Int,
mem:Memory,
setDMA: (Boolean) => Unit,
setIRQ: (Boolean) => Unit) extends ExpansionPort {
val TYPE : ExpansionPortType.Value = ExpansionPortType.REU
override val name = "REU_" + size
override val componentID = "REU"
val EXROM = true
val GAME = true
val ROML = null
val ROMH = null
final private[this] val IS_256 = size == REU.REU_1764
private[this] val J1 = if (size != REU.REU_1700) 1 << 4 else 0
private[this] val clk = Clock.systemClock
private[this] val reuMem = Array.ofDim[Int](size << 10)
final private[this] val REU_WRAP_ADDRESS = (size << 10) - 1
// status & command registers
private[this] var statusRegister = 0
private[this] var commandRegister = 0x10
// addressing registers
private[this] var c64Address,reuAddress,transferRegister = 0
private[this] var shadowC64Address = 0
private[this] var shadowReuAddress = 0xF80000
private[this] var shadowTransferRegister = 0xFF
// control registers
private[this] var interruptMaskRegister = 0x1F
private[this] var addressControlRegister = 0x3F
private[this] var currentOperation = IDLE_OP
private[this] var exchangeFirstPhase = true
private[this] var exchangeTmp1,exchangeTmp2 = 0
private[this] var ff00 = false
final private[this] val IDLE_OP = -1
final private[this] val C64_TO_REU_OP = 0
final private[this] val REU_TO_C64_OP = 1
final private[this] val EXCHANGE_OP = 2
final private[this] val VERIFY_OP = 3
final private[this] val STATUS_END_OF_BLOCK = 0x40
final private[this] val STATUS_VERIFY_ERROR = 0x20
final private[this] val CMD_EXECUTE = 0x80
final private[this] val CMD_AUTOLOAD = 0x20
final private[this] val CMD_FF00_TRIGGER = 0x10
final private[this] val CMD_TRANSFER_TYPE = 0x3
final private[this] val CTRL_IRQ_MASK = 0x80
final private[this] val CTRL_IRQ_END_OF_BLOCK_MASK = 0x40
final private[this] val CTRL_IRQ_VERIFY_ERROR_MASK = 0x20
final private[this] val FLOATING_BUS_VALUE_DECAY_CYCLES = 260
private[this] var floatingBusValue = 0xFF
private[this] var floatingBusValueDecayCycle = Long.MaxValue
private[this] var reuBank = 0
override def eject : Unit = {
mem.setForwardWriteTo(None)
}
def loadFrom(file:File) : Unit = {
if (file.length > reuMem.length) throw new IllegalArgumentException("REU file size is greater than the REU size")
Log.info("Loading REU from " + file)
val in = new BufferedInputStream(new FileInputStream(file))
val buffer = Array.ofDim[Byte](1024)
var index = 0
var read = 0
while (read != -1) {
read = in.read(buffer)
if (read != -1) {
for(i <- 0 until read) reuMem(index + i) = buffer(i).toInt & 0xFF
index += read
}
}
in.close
}
override def init : Unit = if (size != REU_16M) ramInitPattern
final override def reset : Unit = {
statusRegister = 0
commandRegister = 0x10
c64Address = 0
reuAddress = 0
transferRegister = 0
interruptMaskRegister = 0x1F
addressControlRegister = 0x3F
currentOperation = IDLE_OP
shadowC64Address = 0
shadowReuAddress = 0xF80000
shadowTransferRegister = 0xFF
floatingBusValue = 0xFF
floatingBusValueDecayCycle = Long.MaxValue
reuBank = 0
}
private def ramInitPattern : Unit = {
var s = 0xFF
var m = 0
var i = 0
while (i < reuMem.length) {
for(_ <- 0 until 64) {
reuMem(i) = s ; i += 1
reuMem(i) = m ; i += 1
reuMem(i) = m ; i += 1
reuMem(i) = s ; i += 1
}
s ^= 0xFF
m ^= 0xFF
}
}
override def hardReset : Unit = {
reset
if (size != REU_16M) ramInitPattern
}
final override def read(address: Int, chipID: ChipID.ID = ChipID.CPU) = {
if (currentOperation == IDLE_OP && address >= 0xDF00 && address < 0xE000) readREU(address & 0x1F)
else super.read(address,chipID)
}
final override def write(address: Int, value: Int, chipID: ChipID.ID = ChipID.CPU) : Unit = {
if (address == 0xFF00 && ff00 && (commandRegister & CMD_EXECUTE) > 0) {
ff00 = false
//println("Starting deferred operation")
startOperation
}
else
if (currentOperation == IDLE_OP && address >= 0xDF00 && address < 0xE000) {
//println(s"Writing REU at ${Integer.toHexString(address)}")
writeREU(address & 0x1F,value)
}
}
private def readREU(offset:Int) = {
offset match {
case 0 => // status register, bit 7-5 cleared after reading
val oldReg = statusRegister
statusRegister &= 0x1F
setIRQ(false)
(oldReg | J1) & 0xF8 // 0-3 bits are 0
case 1 => commandRegister
case 2 => c64Address & 0xFF
case 3 => c64Address >> 8
case 4 => reuAddress & 0xFF
case 5 => reuAddress >> 8
case 6 => ((reuAddress >> 16) & 0xFF) | 0xF8
case 7 => transferRegister & 0xFF
case 8 => transferRegister >> 8
case 9 => interruptMaskRegister | 0x1F
case 10 => addressControlRegister | 0x3F
case _ => 0xFF
}
}
private def writeREU(offset:Int,value:Int) : Unit = {
offset match {
case 1 =>
commandRegister = value
checkOperation
case 2 =>
shadowC64Address &= 0xff00
shadowC64Address |= (value & 0xff)
c64Address = shadowC64Address
case 3 =>
shadowC64Address &= 0x00ff
shadowC64Address |= (value & 0xff) << 8
c64Address = shadowC64Address
case 4 =>
shadowReuAddress &= 0xffff00
shadowReuAddress |= (value & 0xff)
/* copy bits, keep Bank */
reuAddress &= REU_WRAP_ADDRESS & 0xff0000
reuAddress |= shadowReuAddress & 0xffff
reuAddress &= REU_WRAP_ADDRESS
case 5 =>
shadowReuAddress &= 0xff00ff
shadowReuAddress |= (value & 0xff) << 8
/* copy bits, keep Bank */
reuAddress &= REU_WRAP_ADDRESS & 0xff0000
reuAddress |= shadowReuAddress & 0xffff
reuAddress &= REU_WRAP_ADDRESS
case 6 =>
/*
* Modify bank and shadow copy of bank, kept on the high bits of
* ramAddr, which is a deviation from hardware's behavior.
*/
reuAddress &= 0xffff
reuAddress |= (value & 0xff) << 16
reuAddress &= REU_WRAP_ADDRESS
shadowReuAddress &= 0xffff
shadowReuAddress |= (value & 0xff) << 16
reuBank = value & 0x07
case 7 =>
shadowTransferRegister &= 0xff00
shadowTransferRegister |= (value & 0xff)
transferRegister = shadowTransferRegister
case 8 =>
shadowTransferRegister &= 0x00ff
shadowTransferRegister |= (value & 0xff) << 8
transferRegister = shadowTransferRegister
case 9 =>
interruptMaskRegister = value
checkInterrupt
case 10 => addressControlRegister = value
case _ =>
}
}
private def checkOperation : Unit = {
ff00 = (commandRegister & 0x90) == 0x80
if ((commandRegister & 0x90) == 0x90) clk.schedule(new ClockEvent("REUStartOperation",clk.nextCycles,cycles => startOperation))
if (ff00) {
//println(s"Start of operation ${currentOperation} deferred clk=${clk.currentCycles}")
mem.setForwardWriteTo(Some(this))
}
else mem.setForwardWriteTo(None)
}
private def startOperation : Unit = {
currentOperation = commandRegister & CMD_TRANSFER_TYPE
//println(s"Start of operation ${currentOperation} clk=${clk.currentCycles} c64Addr=${Integer.toHexString(c64Address)} reuAddr=${Integer.toHexString(reuAddress)} length=${Integer.toHexString(transferRegister)}")
setDMA(true) // DMA request
currentOperation match {
case C64_TO_REU_OP => transferOperation(true)
case REU_TO_C64_OP => transferOperation(false)
case VERIFY_OP => verifyOperation
case EXCHANGE_OP =>
exchangeFirstPhase = true
exchangeOperation
}
}
@inline private def readFromREU : Int = {
val r = if (IS_256 && reuBank > 3) {
//if (clk.currentCycles > floatingBusValueDecayCycle) floatingBusValue = 0xFF
//floatingBusValueDecayCycle = clk.currentCycles + FLOATING_BUS_VALUE_DECAY_CYCLES
floatingBusValue
}
else reuMem(reuAddress)
//floatingBusValueDecayCycle = clk.currentCycles + FLOATING_BUS_VALUE_DECAY_CYCLES
r
}
@inline private def writeToREU(value:Int) : Unit = {
if (IS_256) {
if (reuBank < 4) reuMem(reuAddress) = value
}
else reuMem(reuAddress) = value
floatingBusValue = value
//floatingBusValueDecayCycle = clk.currentCycles + FLOATING_BUS_VALUE_DECAY_CYCLES
}
private def exchangeOperation : Unit = {
if (!baLow) { // exchange
if (exchangeFirstPhase) {
exchangeTmp1 = mem.read(c64Address)
exchangeTmp2 = readFromREU
clk.schedule(new ClockEvent("REUExchange",clk.nextCycles,cycles => exchangeOperation))
}
else {
mem.write(c64Address,exchangeTmp2)
writeToREU(exchangeTmp1)
incrementAddresses
if (transferRegister == 0x01) {
floatingBusValue = readFromREU
statusRegister |= STATUS_END_OF_BLOCK
clk.schedule(new ClockEvent("REUEndOperation",clk.currentCycles + 1,cycles => endOperation))
}
else {
transferRegister = (transferRegister - 1) & 0xFFFF
clk.schedule(new ClockEvent("REUExchange",clk.nextCycles,cycles => exchangeOperation))
}
}
exchangeFirstPhase = !exchangeFirstPhase
}
else clk.schedule(new ClockEvent("REUExchange",clk.nextCycles,cycles => exchangeOperation))
}
private def verifyOperation : Unit = {
if (!baLow) { // verify
if (mem.read(c64Address) != readFromREU) statusRegister |= STATUS_VERIFY_ERROR
incrementAddresses
if (transferRegister == 0x01) {
statusRegister |= STATUS_END_OF_BLOCK
clk.schedule(new ClockEvent("REUEndOperation",clk.currentCycles + 1,cycles => endOperation))
}
else {
transferRegister = (transferRegister - 1) & 0xFFFF
if ((statusRegister & STATUS_VERIFY_ERROR) > 0) {
if (transferRegister == 0x01 && mem.read(c64Address) == readFromREU) statusRegister |= STATUS_END_OF_BLOCK
clk.schedule(new ClockEvent("REUEndOperation",clk.currentCycles + 2,_ => endOperation))
}
else
clk.schedule(new ClockEvent("REUVerify",clk.nextCycles,cycles => verifyOperation))
}
}
else
clk.schedule(new ClockEvent("REUVerify",clk.nextCycles,cycles => verifyOperation))
}
private def transferOperation(isC64Source:Boolean) : Unit = {
//println(s"Transfer balow=$baLow op=${currentOperation} clk=${clk.currentCycles} c64Addr=${Integer.toHexString(c64Address)} reuAddr=${Integer.toHexString(reuAddress)} length=${Integer.toHexString(transferRegister)}")
if (!baLow) { // transfer
if (isC64Source) writeToREU(mem.read(c64Address))
else mem.write(c64Address,readFromREU)
incrementAddresses
if (transferRegister == 0x01) {
statusRegister |= STATUS_END_OF_BLOCK
clk.schedule(new ClockEvent("REUEndOperation",clk.currentCycles + 1,cycles => endOperation))
if (!isC64Source) {
floatingBusValue = readFromREU
}
}
else {
transferRegister = (transferRegister - 1) & 0xFFFF
clk.schedule(new ClockEvent("REUTransfer",clk.nextCycles,cycles => transferOperation(isC64Source)))
}
}
else
clk.schedule(new ClockEvent("REUTransfer",clk.nextCycles,cycles => transferOperation(isC64Source)))
}
private def checkInterrupt : Unit = {
if ((interruptMaskRegister & CTRL_IRQ_MASK) > 0) {
val irq = ((statusRegister & STATUS_END_OF_BLOCK) > 0 && (interruptMaskRegister & CTRL_IRQ_END_OF_BLOCK_MASK) > 0) ||
((statusRegister & STATUS_VERIFY_ERROR) > 0 && (interruptMaskRegister & CTRL_IRQ_VERIFY_ERROR_MASK) > 0)
if (irq) {
setIRQ(true)
statusRegister |= 0x80
}
}
}
private def endOperation : Unit = {
// clear execute bit
commandRegister &= ~CMD_EXECUTE
// set FF00 bit
commandRegister |= CMD_FF00_TRIGGER
mem.setForwardWriteTo(None)
// released DMA
setDMA(false)
// check IRQ
checkInterrupt
// check autoload
if ((commandRegister & CMD_AUTOLOAD) > 0) {
//println(s"Autoload ${currentOperation} clk=${clk.currentCycles} c64Addr=${Integer.toHexString(c64Address)} reuAddr=${Integer.toHexString(reuAddress)} length=${Integer.toHexString(transferRegister)}")
c64Address = shadowC64Address
reuAddress = shadowReuAddress & REU_WRAP_ADDRESS
transferRegister = shadowTransferRegister
}
//println(s"End of operation ${currentOperation} clk=${clk.currentCycles} c64Addr=${Integer.toHexString(c64Address)} reuAddr=${Integer.toHexString(reuAddress)} length=${Integer.toHexString(transferRegister)}")
currentOperation = IDLE_OP
}
@inline private def incrementAddresses : Unit = {
if ((addressControlRegister & 0x80) == 0) c64Address = (c64Address + 1) & 0xFFFF
if ((addressControlRegister & 0x40) == 0) reuAddress = (reuAddress + 1) & REU_WRAP_ADDRESS
}
override def saveState(out: ObjectOutputStream): Unit = {
out.writeInt(size)
super.saveState(out)
out.writeInt(statusRegister)
out.writeInt(commandRegister)
out.writeInt(c64Address)
out.writeInt(reuAddress)
out.writeInt(transferRegister)
out.writeInt(interruptMaskRegister)
out.writeInt(addressControlRegister)
out.writeInt(currentOperation)
out.writeInt(shadowC64Address)
out.writeInt(shadowReuAddress)
out.writeInt(shadowTransferRegister)
out.writeObject(reuMem)
out.writeInt(reuBank)
}
override def loadState(in: ObjectInputStream): Unit = {
// size is handled by export port handler
super.loadState(in)
statusRegister = in.readInt
commandRegister = in.readInt
c64Address = in.readInt
reuAddress = in.readInt
transferRegister = in.readInt
interruptMaskRegister = in.readInt
addressControlRegister = in.readInt
currentOperation = in.readInt
shadowC64Address = in.readInt
shadowReuAddress = in.readInt
shadowTransferRegister = in.readInt
loadMemory[Int](reuMem,in)
reuBank = in.readInt
checkOperation
}
}
}
| abbruzze/kernal64 | Kernal64/src/ucesoft/cbm/expansion/REU.scala | Scala | mit | 15,872 |
package com.twitter.finatra.http
import com.twitter.finagle.Filter
import com.twitter.finagle.http.Method._
import com.twitter.finagle.http.{Request, Response}
import com.twitter.inject.Injector
import scala.collection.mutable.ArrayBuffer
private[http] trait RouteDSL { self =>
private type HttpFilter = Filter[Request, Response, Request, Response]
private[http] val routeBuilders = ArrayBuffer[RouteBuilder[_, _]]()
private[http] val annotations = getClass.getDeclaredAnnotations
private[http] def buildFilter(injector: Injector): HttpFilter = Filter.identity
protected def filter[FilterType <: HttpFilter : Manifest] = new RouteDSL {
override val routeBuilders = self.routeBuilders
override val annotations = self.annotations
override def buildFilter(injector: Injector) = self.buildFilter(injector).andThen(injector.instance[FilterType])
}
def filter(next: HttpFilter) = new RouteDSL {
override val routeBuilders = self.routeBuilders
override def buildFilter(injector: Injector) = self.buildFilter(injector).andThen(next)
}
def get[RequestType: Manifest, ResponseType: Manifest](route: String, name: String = "")(callback: RequestType => ResponseType): Unit = routeBuilders += new RouteBuilder(Get, route, name, callback, self)
def post[RequestType: Manifest, ResponseType: Manifest](route: String, name: String = "")(callback: RequestType => ResponseType): Unit = routeBuilders += new RouteBuilder(Post, route, name, callback, self)
def put[RequestType: Manifest, ResponseType: Manifest](route: String, name: String = "")(callback: RequestType => ResponseType): Unit = routeBuilders += new RouteBuilder(Put, route, name, callback, self)
def delete[RequestType: Manifest, ResponseType: Manifest](route: String, name: String = "")(callback: RequestType => ResponseType): Unit = routeBuilders += new RouteBuilder(Delete, route, name, callback, self)
def options[RequestType: Manifest, ResponseType: Manifest](route: String, name: String = "")(callback: RequestType => ResponseType): Unit = routeBuilders += new RouteBuilder(Options, route, name, callback, self)
def patch[RequestType: Manifest, ResponseType: Manifest](route: String, name: String = "")(callback: RequestType => ResponseType): Unit = routeBuilders += new RouteBuilder(Patch, route, name, callback, self)
def head[RequestType: Manifest, ResponseType: Manifest](route: String, name: String = "")(callback: RequestType => ResponseType): Unit = routeBuilders += new RouteBuilder(Head, route, name, callback, self)
def trace[RequestType: Manifest, ResponseType: Manifest](route: String, name: String = "")(callback: RequestType => ResponseType): Unit = routeBuilders += new RouteBuilder(Trace, route, name, callback, self)
}
| joecwu/finatra | http/src/main/scala/com/twitter/finatra/http/RouteDSL.scala | Scala | apache-2.0 | 2,747 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.aggregate
import java.lang.Iterable
import org.apache.flink.api.java.tuple.Tuple
import org.apache.flink.types.Row
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import org.apache.flink.table.runtime.types.CRow
import org.apache.flink.util.Collector
/**
* Computes the final aggregate value from incrementally computed aggreagtes.
*
* @param numGroupingKey the number of grouping keys
* @param numAggregates the number of aggregates
* @param windowStartOffset the offset of the window start property
* @param windowEndOffset the offset of the window end property
* @param windowRowtimeOffset the offset of the window rowtime property
* @param finalRowArity The arity of the final output row.
*/
class IncrementalAggregateTimeWindowFunction(
private val numGroupingKey: Int,
private val numAggregates: Int,
private val windowStartOffset: Option[Int],
private val windowEndOffset: Option[Int],
private val windowRowtimeOffset: Option[Int],
private val finalRowArity: Int)
extends IncrementalAggregateWindowFunction[TimeWindow](
numGroupingKey,
numAggregates,
finalRowArity) {
private var collector: DataStreamTimeWindowPropertyCollector = _
override def open(parameters: Configuration): Unit = {
collector = new DataStreamTimeWindowPropertyCollector(
windowStartOffset,
windowEndOffset,
windowRowtimeOffset)
super.open(parameters)
}
override def apply(
key: Row,
window: TimeWindow,
records: Iterable[Row],
out: Collector[CRow]): Unit = {
// set collector and window
collector.wrappedCollector = out
collector.windowStart = window.getStart
collector.windowEnd = window.getEnd
super.apply(key, window, records, collector)
}
}
| zimmermatt/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateTimeWindowFunction.scala | Scala | apache-2.0 | 2,687 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
package termination
import purescala.Definitions._
import scala.collection.mutable.{Map => MutableMap}
class ComplexTerminationChecker(context: LeonContext, initProgram: Program) extends ProcessingPipeline(context, initProgram) {
val name = "Complex Termination Checker"
val description = "A modular termination checker with a few basic modules™"
val modules =
new StructuralSize
with ArgsSizeSumRelationComparator
with ChainComparator
with Strengthener
with RelationBuilder
with ChainBuilder {
val checker = ComplexTerminationChecker.this
}
val modulesLexicographic =
new StructuralSize
with LexicographicRelationComparator
with Strengthener
with RelationBuilder {
val checker = ComplexTerminationChecker.this
}
val modulesOuter =
new StructuralSize
with ArgsOuterSizeRelationComparator
with Strengthener
with RelationBuilder {
val checker = ComplexTerminationChecker.this
}
val modulesBV =
new StructuralSize
with BVRelationComparator
with Strengthener
with RelationBuilder {
val checker = ComplexTerminationChecker.this
}
def processors = List(
//new DecreasesByProcessor(this),
new RecursionProcessor(this, modules),
// RelationProcessor is the only Processor which benefits from trying a different RelationComparator
new RelationProcessor(this, modulesBV),
new RelationProcessor(this, modulesOuter),
new RelationProcessor(this, modulesLexicographic),
new RelationProcessor(this, modules),
new ChainProcessor(this, modules),
new SelfCallsProcessor(this),
new LoopProcessor(this, modules)
)
}
| regb/leon | src/main/scala/leon/termination/ComplexTerminationChecker.scala | Scala | gpl-3.0 | 1,812 |
package im.actor.server.persist
import java.time.{ LocalDateTime, ZoneOffset }
import im.actor.server.db.ActorPostgresDriver.api._
import im.actor.server.model.{ Sex, User, UserState }
import im.actor.util.misc.PhoneNumberUtils
import slick.dbio.Effect.Read
import slick.lifted.ColumnOrdered
import slick.profile.FixedSqlStreamingAction
import scala.concurrent.ExecutionContext
final class UserTable(tag: Tag) extends Table[User](tag, "users") {
import SexColumnType._
import UserStateColumnType._
def id = column[Int]("id", O.PrimaryKey)
def accessSalt = column[String]("access_salt")
def name = column[String]("name")
def countryCode = column[String]("country_code")
def sex = column[Sex]("sex")
def state = column[UserState]("state")
def createdAt = column[LocalDateTime]("created_at")
def nickname = column[Option[String]]("nickname")
def about = column[Option[String]]("about")
def deletedAt = column[Option[LocalDateTime]]("deleted_at")
def isBot = column[Boolean]("is_bot")
def external = column[Option[String]]("external")
def * = (id, accessSalt, name, countryCode, sex, state, createdAt, nickname, about, deletedAt, isBot, external) <> (User.tupled, User.unapply)
}
object UserRepo {
val users = TableQuery[UserTable]
def byId(id: Rep[Int]) = users filter (_.id === id)
def nameById(id: Rep[Int]) = byId(id) map (_.name)
val byIdC = Compiled(byId _)
val nameByIdC = Compiled(nameById _)
private def byNickname(nickname: Rep[String]) =
users filter (_.nickname.toLowerCase === nickname.toLowerCase)
private def byNicknamePrefix(nickPrefix: Rep[String]) =
users filter (_.nickname.toLowerCase.like(nickPrefix.toLowerCase))
private val byNicknameC = Compiled(byNickname _)
private val byNicknamePrefixC = Compiled(byNicknamePrefix _)
def byPhone(phone: Rep[Long]) = (for {
phones ← UserPhoneRepo.phones.filter(_.number === phone)
users ← users if users.id === phones.userId
} yield users).take(1)
def idByPhone(phone: Rep[Long]) = byPhone(phone) map (_.id)
val idByPhoneC = Compiled(idByPhone _)
def idsByEmail(email: Rep[String]) =
for {
emails ← UserEmailRepo.emails filter (_.email.toLowerCase === email.toLowerCase)
users ← users filter (_.id === emails.userId) map (_.id)
} yield users
val idsByEmailC = Compiled(idsByEmail _)
private val activeHumanUsers =
users.filter(u ⇒ u.deletedAt.isEmpty && !u.isBot)
private val activeHumanUsersC = Compiled(activeHumanUsers)
private val activeHumanUsersIdsC = Compiled(activeHumanUsers map (_.id))
private def activeHumanUsersIds(createdAfter: Rep[LocalDateTime]) =
Compiled {
users.filter(u ⇒ u.deletedAt.isEmpty && !u.isBot && u.createdAt > createdAfter).sortBy(_.createdAt.asc).map(u ⇒ u.id → u.createdAt)
}
def activeUserIdsCreatedAfter(createdAfter: LocalDateTime): DBIO[Seq[(Int, LocalDateTime)]] = activeHumanUsersIds(createdAfter).result
def fetchPeople = activeHumanUsersC.result
def create(user: User) =
users += user
def setCountryCode(userId: Int, countryCode: String) =
users.filter(_.id === userId).map(_.countryCode).update(countryCode)
def setDeletedAt(userId: Int) =
users.filter(_.id === userId).
map(_.deletedAt).
update(Some(LocalDateTime.now(ZoneOffset.UTC)))
def setName(userId: Int, name: String) =
users.filter(_.id === userId).map(_.name).update(name)
def allIds = users.map(_.id).result
def all = users.result
def find(id: Int) =
byIdC(id).result.headOption
@deprecated("Duplicates ", "2016-07-07")
def findName(id: Int) =
nameById(id).result.headOption
// TODO: #perf will it create prepared statement for each ids length?
def findSalts(ids: Set[Int]) =
users.filter(_.id inSet ids).map(u ⇒ (u.id, u.accessSalt)).result
@deprecated("user GlobalNamesStorageKeyValueStorage instead", "2016-07-17")
def findByNickname(query: String): DBIO[Option[User]] = {
val nickname =
if (query.startsWith("@")) query.drop(1) else query
byNicknameC(nickname).result.headOption
}
@deprecated("user GlobalNamesStorageKeyValueStorage instead", "2016-07-17")
def findByNicknamePrefix(query: String): DBIO[Seq[User]] = {
val nickname: String =
if (query.startsWith("@")) query.drop(1) else query
byNicknamePrefixC(nickname).result
}
def findIdsByEmail(email: String) =
idsByEmailC(email).result.headOption
def findIds(query: String)(implicit ec: ExecutionContext) =
for {
e ← idsByEmailC(query).result
p ← PhoneNumberUtils.normalizeStr(query)
.headOption
.map(idByPhoneC(_).result)
.getOrElse(DBIO.successful(Nil))
} yield e ++ p
@deprecated("user GlobalNamesStorageKeyValueStorage instead", "2016-07-17")
def setNickname(userId: Int, nickname: Option[String]) =
byId(userId).map(_.nickname).update(nickname)
def setAbout(userId: Int, about: Option[String]) =
byId(userId).map(_.about).update(about)
@deprecated("user GlobalNamesStorageKeyValueStorage instead", "2016-07-17")
def nicknameExists(nickname: String) =
users.filter(_.nickname.toLowerCase === nickname.toLowerCase).exists.result
def findByIds(ids: Set[Int]) =
users.filter(_.id inSet ids).result
def findByIdsPaged(ids: Set[Int], number: Int, size: Int) = {
val offset = (number - 1) * size
users.
filter(_.id inSet ids).
sortBy(_.name).
drop(offset).
take(size).
result
}
def activeUsersIds = activeHumanUsersIdsC.result
def fetchPagedNewest(pageNumber: Int, pageSize: Int): DBIO[Seq[User]] =
paged(pageNumber, pageSize, { ut ⇒ ut.createdAt.desc }).result
def paged[A](pageNumber: Int, pageSize: Int, sorting: UserTable ⇒ ColumnOrdered[A]): Query[UserTable, User, Seq] = {
val offset = (pageNumber - 1) * pageSize
activeHumanUsers
.sortBy(sorting)
.drop(offset)
.take(pageSize)
}
def isDeleted(userId: Int): DBIO[Boolean] =
byIdC.applied(userId).filter(_.deletedAt.nonEmpty).exists.result
}
| EaglesoftZJ/actor-platform | actor-server/actor-persist/src/main/scala/im/actor/server/persist/UserRepo.scala | Scala | agpl-3.0 | 6,076 |
package dyno
package partest
import scala.tools.partest.DirectTest
import scala.tools.nsc._
//import nest.FileUtil._
import scala.reflect.io._
import java.io.ByteArrayOutputStream
import java.io.PrintStream
import java.io.{File => JFile}
import java.io.PrintWriter
/* Taken from: [[https://github.com/nicolasstucki/specialized/commit/f7ee90610d0052cb3607cef138051575db3c2eb9]]
* Using the Partest tool in Scala:
* [[https://github.com/scala/scala/blob/master/src/partest/scala/tools/partest/DirectTest.scala]]
*/
class CompileTest(val code: String, flags: String) extends DirectTest {
lazy val tmpDir = System.getProperty("java.io.tmpdir")
override lazy val testPath = File(new JFile(tmpDir))
override lazy val testOutput = Directory(new JFile(tmpDir))
override def extraSettings = flags
def show() = compilationOutput()
def compilationOutput(): String = {
val ba = new ByteArrayOutputStream();
val pa = new PrintStream(ba)
Console.withOut(pa) {
Console.withErr(pa) {
val result = compile()
}
}
pa.flush()
ba.toString
}
}
| scaladyno/scaladyno-plugin | tests/correctness/test/dyno/partest/CompileTest.scala | Scala | bsd-3-clause | 1,088 |
package com.goticks
import akka.actor.ActorSystem
import com.typesafe.config.ConfigFactory
object BackendMain extends App with RequestTimeout {
val config = ConfigFactory.load("backend")
val system = ActorSystem("backend", config)
implicit val requestTimeout = configuredRequestTimeout(config)
system.actorOf(BoxOffice.props, BoxOffice.name)
}
| RayRoestenburg/akka-in-action | chapter-remoting/src/main/scala/com/goticks/BackendMain.scala | Scala | mit | 354 |
object ExtractorTest{
def main(args: Array[String]) {
println ("Apply 方法 : " + apply("Zara", "gmail.com"));
println ("Unapply 方法 : " + unapply("Zara@gmail.com"));
println ("Unapply 方法 : " + unapply("Zara Ali"));
println(ExtractorTest("wpl","imobpay.com")) //直接调用对象, 就调用了apply方法
println(ExtractorTest.unapply("wpl@imobpay.com"))
}
// 注入方法 (可选)
def apply(user: String, domain: String) = {
user +"@"+ domain
}
// 提取方法(必选)
def unapply(str: String): Option[(String, String)] = {
val parts = str split "@"
if (parts.length == 2){
Some(parts(0), parts(1))
}else{
None
}
}
}
| PengLiangWang/Scala | Extractor/ExtractorTest.scala | Scala | gpl-3.0 | 796 |
/*
* Copyright 2015 Heiko Seeberger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.heikoseeberger.akkasse
import akka.stream.stage.{ Context, PushStage }
private object ServerSentEventParser {
private final val LF = "\\n"
private final val Data = "data"
private final val Event = "event"
private final val Id = "id"
private final val Retry = "retry"
private val linePattern = """([^:]+): ?(.*)""".r
private def parseServerSentEvent(lines: Seq[String]) = {
val valuesByField = lines
.collect {
case linePattern(field @ (Data | Event | Id | Retry), value) => field -> value
case field if field.nonEmpty => field -> ""
}
.groupBy(_._1)
def values(field: String) = valuesByField
.getOrElse(field, Vector.empty)
.map(_._2)
val data = values(Data).mkString(LF)
val event = values(Event).lastOption
val idField = values(Id).lastOption
val retry = values(Retry)
.lastOption
.flatMap { s =>
try
Some(s.trim.toInt)
catch {
case _: NumberFormatException => None
}
}
ServerSentEvent(data, event, idField, retry)
}
}
private final class ServerSentEventParser(maxEventSize: Int) extends PushStage[String, ServerSentEvent] {
import ServerSentEventParser._
private var lines = Vector.empty[String]
override def onPush(line: String, ctx: Context[ServerSentEvent]) =
if (line.nonEmpty) {
lines :+= line
if (lines.map(_.length).sum > maxEventSize)
ctx.fail(new IllegalStateException(s"maxEventSize of $maxEventSize exceeded!"))
else
ctx.pull()
} else {
val event = parseServerSentEvent(lines)
lines = Vector.empty
ctx.push(event)
}
}
| jasonchaffee/akka-sse | akka-sse/src/main/scala/de/heikoseeberger/akkasse/ServerSentEventParser.scala | Scala | apache-2.0 | 2,308 |
object Module {
val data = new Array[Byte](32 * 1024 * 1024)
}
object Test {
private val readResolve = classOf[scala.runtime.ModuleSerializationProxy].getDeclaredMethod("readResolve")
readResolve.setAccessible(true)
val testClassesDir = System.getProperty("partest.output")
def main(args: Array[String]): Unit = {
for (i <- 1 to 256) {
// This would "java.lang.OutOfMemoryError: Java heap space" if ModuleSerializationProxy
// prevented class unloading.
deserializeDynamicLoadedClass()
}
}
def deserializeDynamicLoadedClass(): Unit = {
val loader = new java.net.URLClassLoader(Array(new java.io.File(testClassesDir).toURI.toURL), ClassLoader.getSystemClassLoader)
val moduleClass = loader.loadClass("Module$")
assert(moduleClass ne Module.getClass)
val result = readResolve.invoke(new scala.runtime.ModuleSerializationProxy(moduleClass))
assert(result.getClass == moduleClass)
}
}
| martijnhoekstra/scala | test/files/run/module-serialization-proxy-class-unload.scala | Scala | apache-2.0 | 946 |
class TestApp {
}
| bdas/macros-scala | macro/src/test/scala/TestApp.scala | Scala | apache-2.0 | 19 |
package com.hexagrammatic.cruciform
import java.io.{InputStreamReader, InputStream, OutputStreamWriter, OutputStream}
import java.security.KeyPair
import java.security.PublicKey
import java.security.PrivateKey
import javax.security.cert.Certificate
import org.bouncycastle.asn1.pkcs.PrivateKeyInfo
import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo
import org.bouncycastle.openssl.{PEMEncryptedKeyPair, PEMKeyPair, PEMParser, PEMWriter}
import org.bouncycastle.openssl.jcajce.{JcePEMDecryptorProviderBuilder, JcePEMEncryptorBuilder, JcaPEMKeyConverter}
trait Codecs extends StreamConversions {
class PEMEncoder(objs: AnyRef*) extends Writeable {
def write(writer: PEMWriter, obj: AnyRef) {
writer.writeObject(obj)
}
def to[T <: OutputStream](out: T): T = {
val writer = new PEMWriter(new OutputStreamWriter(out))
objs.foreach((r:AnyRef) => write(writer, r))
writer.flush
out
}
}
class PEMCertificateEncoder(cert: Certificate) extends PEMEncoder(cert)
class PEMPublicKeyEncoder(key: PublicKey) extends PEMEncoder(key)
class PEMPrivateKeyEncoder(
key: PrivateKey,
password: Option[String] = None,
encryptionAlgortihm: Option[String] = None) extends PEMEncoder(key) {
val encryptorBuilder = encryptionAlgortihm match {
case Some(algorithm) => new JcePEMEncryptorBuilder(algorithm)
case None => new JcePEMEncryptorBuilder("AES-256-CBC")
}
def withPassword(password: String): PEMPrivateKeyEncoder =
new PEMPrivateKeyEncoder(key, Option(password), encryptionAlgortihm)
def withEncryptionAlgorithm(algorithm: String): PEMPrivateKeyEncoder =
new PEMPrivateKeyEncoder(key, password, Option(algorithm))
override def write(writer: PEMWriter, obj: AnyRef) {
password match {
case Some(password) => writer.writeObject(obj, encryptorBuilder.build(password.toCharArray))
case None => writer.writeObject(obj)
}
}
}
class PEMDecoder(
in: InputStream,
password: Option[String] = None) {
private[this] val EmptyKeyPair = new KeyPair(null, null)
def keypair =
Option(new PEMParser(new InputStreamReader(in)).readObject) map {
case priv: PrivateKeyInfo => new KeyPair(null, new JcaPEMKeyConverter().getPrivateKey(priv))
case pub: SubjectPublicKeyInfo => new KeyPair(new JcaPEMKeyConverter().getPublicKey(pub), null)
case pair: PEMKeyPair => new JcaPEMKeyConverter().getKeyPair(pair)
case encrypted: PEMEncryptedKeyPair => {
password match {
case Some(password) => {
val decryptor = new JcePEMDecryptorProviderBuilder().build(password.toCharArray)
new JcaPEMKeyConverter().getKeyPair(encrypted.decryptKeyPair(decryptor))
}
case None => EmptyKeyPair
}
}
} getOrElse (EmptyKeyPair)
def withPassword(password: String): PEMDecoder = new PEMDecoder(in, Option(password))
def asPrivateKey: Option[PrivateKey] = Option(keypair.getPrivate)
def asPublicKey: Option[PublicKey] = Option(keypair.getPublic)
}
object PEM {
def encode(cert: Certificate): PEMCertificateEncoder = new PEMCertificateEncoder(cert)
def encode(key: PublicKey): PEMPublicKeyEncoder = new PEMPublicKeyEncoder(key)
def encode(key: PrivateKey): PEMPrivateKeyEncoder = new PEMPrivateKeyEncoder(key)
def decode(in: InputStream): PEMDecoder = new PEMDecoder(in)
}
}
| bdimmick/cruciform | src/main/scala/com/hexagrammatic/cruciform/Codecs.scala | Scala | apache-2.0 | 3,469 |
package com.twitter.util
import scala.util.control.NonFatal
import scala.util.{Success, Failure}
/**
* The Try type represents a computation that may either result in an exception
* or return a success value. It is analogous to the Either type but encodes
* common idioms for handling exceptional cases (such as rescue/ensure which
* is analogous to try/finally).
*/
object Try {
case class PredicateDoesNotObtain() extends Exception()
/**
* A constant `Try` that returns `Unit`.
*/
val Unit: Try[Unit] = Try(())
/**
* A constant `Try` that returns `Void`.
*/
val Void: Try[Void] = Try(null: Void)
def apply[R](r: => R): Try[R] = {
try { Return(r) }
catch {
case NonFatal(e) => Throw(e)
}
}
/**
* Build a Try from a scala.util.Try. This does nothing
* more than pattern match and translate Success and Failure
* to Return and Throw respectively.
*/
def fromScala[R](tr: scala.util.Try[R]): Try[R] =
tr match {
case Success(r) => Return(r)
case Failure(e) => Throw(e)
}
/**
* Like [[Try.apply]] but allows the caller to specify a handler for fatal
* errors.
*/
def withFatals[R](r: => R)(f: PartialFunction[Throwable, Try[R]]): Try[R] =
try Try(r)
catch {
case e: Throwable if f.isDefinedAt(e) => f(e)
}
/**
* Collect the results from the given Trys into a new Try. The result will be a Throw if any of
* the argument Trys are Throws. The first Throw in the Seq is the one which is surfaced.
*/
def collect[A](ts: Seq[Try[A]]): Try[Seq[A]] = {
if (ts.isEmpty) Return(Seq.empty[A])
else
Try {
ts map { t => t() }
}
}
/**
* Convert an [[scala.Option]] to a [[Try]].
*
* For users from scala, there's also the implicit class [[OrThrow]] which
* allows
*
* {{{
* import Try._
* Option(null).orThrow { new Exception("boom!") }
* }}}
*
* @param o the Option to convert to a Try
* @param failure a function that returns the Throwable that should be
* returned if the option is None
*/
def orThrow[A](o: Option[A])(failure: () => Throwable): Try[A] =
try {
o match {
case Some(item) => Return(item)
case None => Throw(failure())
}
} catch {
case NonFatal(e) => Throw(e)
}
implicit class OrThrow[A](val option: Option[A]) extends AnyVal {
def orThrow(failure: => Throwable): Try[A] = Try.orThrow(option)(() => failure)
}
}
/**
* This class represents a computation that can succeed or fail. It has two
* concrete implementations, Return (for success) and Throw (for failure)
*/
sealed abstract class Try[+R] {
/**
* Convert to a scala.util.Try
*/
def asScala: scala.util.Try[R]
/**
* Returns true if the Try is a Throw, false otherwise.
*/
def isThrow: Boolean
/**
* Returns true if the Try is a Return, false otherwise.
*/
def isReturn: Boolean
/**
* Returns the throwable if this is a Throw, else raises IllegalStateException.
*
* Callers should consult isThrow() prior to calling this method to determine whether
* or not this is a Throw.
*
* This method is intended for Java compatibility. Scala consumers are encouraged to
* pattern match for Throw(t).
*/
def throwable: Throwable
/**
* Returns the value from this Return or the given argument if this is a Throw.
*/
def getOrElse[R2 >: R](default: => R2): R2 = if (isReturn) apply() else default
/**
* Returns the value from this Return or throws the exception if this is a Throw
*/
def apply(): R
/**
* Returns the value from this Return or throws the exception if this is a Throw.
* Alias for apply()
*/
def get(): R = apply()
/**
* Applies the given function f if this is a Result.
*/
def foreach(f: R => Unit): Unit = { onSuccess(f) }
/**
* Returns the given function applied to the value from this Return or returns this if this is a Throw.
*
* ''Note'' The gnarly type parameterization is there for Java compatibility, since Java
* does not support higher-kinded types.
*/
def flatMap[R2](f: R => Try[R2]): Try[R2]
/**
* Maps the given function to the value from this Return or returns this if this is a Throw
*/
def map[X](f: R => X): Try[X]
/**
* Returns true if this Try is a Return and the predicate p returns true when
* applied to its value.
*/
def exists(p: R => Boolean): Boolean
/**
* Converts this to a Throw if the predicate does not obtain.
*/
def filter(p: R => Boolean): Try[R]
/**
* Converts this to a Throw if the predicate does not obtain.
*/
def withFilter(p: R => Boolean): Try[R]
/**
* Calls the exceptionHandler with the exception if this is a Throw. This is like flatMap for the exception.
*
* ''Note'' The gnarly type parameterization is there for Java compatibility, since Java
* does not support higher-kinded types.
*/
def rescue[R2 >: R](rescueException: PartialFunction[Throwable, Try[R2]]): Try[R2]
/**
* Calls the exceptionHandler with the exception if this is a Throw. This is like map for the exception.
*/
def handle[R2 >: R](rescueException: PartialFunction[Throwable, R2]): Try[R2]
/**
* Invoked only if the computation was successful. Returns a
* chained `this` as in `respond`.
*/
def onSuccess(f: R => Unit): Try[R]
/**
* Invoke the function on the error, if the computation was
* unsuccessful. Returns a chained `this` as in `respond`.
*/
def onFailure(rescueException: Throwable => Unit): Try[R]
/**
* Invoked regardless of whether the computation completed
* successfully or unsuccessfully. Implemented in terms of
* `respond` so that subclasses control evaluation order. Returns a
* chained `this` as in `respond`.
*/
def ensure(f: => Unit): Try[R] =
respond { _ => f }
/**
* Returns None if this is a Throw or a Some containing the value if this is a Return
*/
def toOption: Option[R] = if (isReturn) Some(apply()) else None
/**
* Invokes the given closure when the value is available. Returns
* another 'This[R]' that is guaranteed to be available only *after*
* 'k' has run. This enables the enforcement of invocation ordering.
*/
def respond(k: Try[R] => Unit): Try[R] = { k(this); this }
/**
* Invokes the given transformation when the value is available,
* returning the transformed value. This method is like a combination
* of flatMap and rescue. This method is typically used for more
* imperative control-flow than flatMap/rescue which often exploits
* the Null Object Pattern.
*/
def transform[R2](f: Try[R] => Try[R2]): Try[R2] = f(this)
/**
* Returns the given function applied to the value from this Return or returns this if this is a Throw.
* Alias for flatMap
*/
def andThen[R2](f: R => Try[R2]): Try[R2] = flatMap(f)
def flatten[T](implicit ev: R <:< Try[T]): Try[T]
}
object Throw {
private val NotApplied: Throw[Nothing] = Throw[Nothing](null)
private val AlwaysNotApplied: Any => Throw[Nothing] = scala.Function.const(NotApplied) _
}
final case class Throw[+R](e: Throwable) extends Try[R] {
def asScala: scala.util.Try[R] = Failure(e)
def isThrow: Boolean = true
def isReturn: Boolean = false
def throwable: Throwable = e
def rescue[R2 >: R](rescueException: PartialFunction[Throwable, Try[R2]]): Try[R2] = {
try {
val result = rescueException.applyOrElse(e, Throw.AlwaysNotApplied)
if (result eq Throw.NotApplied) this else result
} catch {
case NonFatal(e2) => Throw(e2)
}
}
def apply(): R = throw e
def flatMap[R2](f: R => Try[R2]): Throw[R2] = this.asInstanceOf[Throw[R2]]
def flatten[T](implicit ev: R <:< Try[T]): Try[T] = this.asInstanceOf[Throw[T]]
def map[X](f: R => X): Try[X] = this.asInstanceOf[Throw[X]]
def cast[X]: Try[X] = this.asInstanceOf[Throw[X]]
def exists(p: R => Boolean): Boolean = false
def filter(p: R => Boolean): Throw[R] = this
def withFilter(p: R => Boolean): Throw[R] = this
def onFailure(rescueException: Throwable => Unit): Throw[R] = { rescueException(e); this }
def onSuccess(f: R => Unit): Throw[R] = this
def handle[R2 >: R](rescueException: PartialFunction[Throwable, R2]): Try[R2] =
if (rescueException.isDefinedAt(e)) {
Try(rescueException(e))
} else {
this
}
}
object Return {
val Unit: Return[Unit] = Return(())
val Void: Return[Void] = Return[Void](null)
val None: Return[Option[Nothing]] = Return(Option.empty)
val Nil: Return[Seq[Nothing]] = Return(Seq.empty)
val True: Return[Boolean] = Return(true)
val False: Return[Boolean] = Return(false)
}
final case class Return[+R](r: R) extends Try[R] {
def asScala: scala.util.Try[R] = Success(r)
def isThrow: Boolean = false
def isReturn: Boolean = true
def throwable: Throwable =
throw new IllegalStateException("this Try is not a Throw; did you fail to check isThrow?")
def rescue[R2 >: R](rescueException: PartialFunction[Throwable, Try[R2]]): Try[R2] =
this
def apply(): R = r
def flatMap[R2](f: R => Try[R2]): Try[R2] =
try f(r)
catch { case NonFatal(e) => Throw(e) }
def flatten[T](implicit ev: R <:< Try[T]): Try[T] = r
def map[X](f: R => X): Try[X] =
try Return(f(r))
catch { case NonFatal(e) => Throw(e) }
def exists(p: R => Boolean): Boolean = p(r)
def filter(p: R => Boolean): Try[R] =
if (p(apply())) this else Throw(new Try.PredicateDoesNotObtain)
def withFilter(p: R => Boolean): Try[R] = filter(p)
def onFailure(rescueException: Throwable => Unit): Try[R] = this
def onSuccess(f: R => Unit): Try[R] = { f(r); this }
def handle[R2 >: R](rescueException: PartialFunction[Throwable, R2]): Try[R2] = this
}
| twitter/util | util-core/src/main/scala/com/twitter/util/Try.scala | Scala | apache-2.0 | 9,810 |
package com.wordnik.swagger.jaxrs.listing
import com.wordnik.swagger.config._
import com.wordnik.swagger.reader._
import com.wordnik.swagger.core.util._
import com.wordnik.swagger.model._
import com.wordnik.swagger.core.filter._
import com.wordnik.swagger.annotations._
import com.wordnik.swagger.jaxrs._
import com.wordnik.swagger.jaxrs.config._
import org.slf4j.LoggerFactory
import java.lang.annotation.Annotation
import java.lang.reflect.Method
import javax.ws.rs.core.{ UriInfo, HttpHeaders, Context, Response, MediaType, Application, MultivaluedMap }
import javax.ws.rs.core.Response._
import javax.ws.rs._
import javax.ws.rs.ext.Provider
import javax.servlet.ServletConfig
import java.util.HashMap
import scala.collection.mutable.LinkedHashMap
import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer
object ApiListingCache {
private val LOGGER = LoggerFactory.getLogger(ApiListingCache.getClass)
var _cache: Option[Map[String, ApiListing]] = None
var caches: java.util.Map[Object, Option[Map[String, ApiListing]]] = new HashMap[Object, Option[Map[String, ApiListing]]]
def listing(docRoot: String, app: Application, sc: ServletConfig): Option[Map[String, ApiListing]] = {
val scanner = Option(sc).map(_.getServletContext().getAttribute("SCANNER")).orNull
if (scanner != null) {
_cache = caches.get(scanner)
}
if (_cache == null) {
_cache = None
}
_cache.orElse{
LOGGER.debug("loading cache")
ClassReaders.reader.map{reader =>
val classes = scanner match {
case scanner: JaxrsScanner => scanner.asInstanceOf[JaxrsScanner].classesFromContext(app, null)
case _ => List()
}
// For each top level resource, parse it and look for swagger annotations.
val listings = (for(cls <- classes) yield reader.read(docRoot, cls, ConfigFactory.config)).flatten.toList
_cache = Some((listings.map(m => {
// always start with "/"
val resourcePath = m.resourcePath.startsWith ("/") match {
case true => m.resourcePath
case false => "/" + m.resourcePath
}
LOGGER.debug("adding resource path " + resourcePath)
(resourcePath, m)
})).toMap)
;
}
_cache
}
caches.put(scanner, _cache)
if(_cache != None)
LOGGER.debug("cache has " + _cache.get.keys + " keys")
else
LOGGER.debug("cache is empty")
_cache
}
def invalidateCache() = {
_cache = None
}
}
class ApiListingResource {
private val LOGGER = LoggerFactory.getLogger(classOf[ApiListingResource])
@GET
def resourceListing (
@Context app: Application,
@Context sc: ServletConfig,
@Context headers: HttpHeaders,
@Context uriInfo: UriInfo
): Response = {
val docRoot = this.getClass.getAnnotation(classOf[Path]).value
val f = new SpecFilter
val listings = ApiListingCache.listing(docRoot, app, sc).map(specs => {
(for(spec <- specs.values)
yield f.filter(spec, FilterFactory.filter, paramsToMap(uriInfo.getQueryParameters), cookiesToMap(headers), headersToMap(headers))
).filter(m => m.apis.size > 0)
})
val references = (for(listing <- listings.getOrElse(List())) yield {
ApiListingReference(listing.resourcePath, listing.description, listing.position)
}).toList.sortWith(_.position < _.position)
val config = ConfigFactory.config
val resourceListing = ResourceListing(config.apiVersion,
config.swaggerVersion,
references,
config.authorizations,
config.info
)
Response.ok(resourceListing).build
}
/**
* individual api listing
**/
@GET
@Path("/{route: .+}")
def apiDeclaration (
@PathParam("route") route: String,
@Context app: Application,
@Context sc: ServletConfig,
@Context headers: HttpHeaders,
@Context uriInfo: UriInfo
): Response = {
LOGGER.debug("requested apiDeclaration for " + route)
val docRoot = this.getClass.getAnnotation(classOf[Path]).value
val f = new SpecFilter
val pathPart = cleanRoute(route)
LOGGER.debug("requested route " + pathPart)
val listings = ApiListingCache.listing(docRoot, app, sc).map(specs => {
(for(spec <- specs.values) yield {
LOGGER.debug("inspecting path " + spec.resourcePath)
f.filter(spec, FilterFactory.filter, paramsToMap(uriInfo.getQueryParameters), cookiesToMap(headers), headersToMap(headers))
}).filter(m => {
val resourcePath = m.resourcePath match {
case e: String if(e.startsWith("/")) => e
case e: String => "/" + e
}
resourcePath == pathPart
})
}).toList.flatten
listings.size match {
case 1 => Response.ok(listings(0)).build
case _ => Response.status(404).build
}
}
// ensure leading slash, remove trailing
def cleanRoute(route: String) = {
val cleanStart = {
if(route.startsWith("/")) route
else "/" + route
}
if(cleanStart.endsWith("/")) cleanStart.substring(0, cleanStart.length - 1)
else cleanStart
}
def invalidateCache() = {
ApiListingCache.invalidateCache()
}
def paramsToMap(params: MultivaluedMap[String, String]): Map[String, List[String]] = {
(for((key, list) <- params.asScala) yield (key, list.asScala.toList)).toMap
}
def cookiesToMap(headers: HttpHeaders): Map[String, String] = {
Option(headers).map(h => {
(for((name, cookie) <- h.getCookies.asScala) yield (name, cookie.getValue)).toMap
}).getOrElse(Map())
}
def headersToMap(headers: HttpHeaders): Map[String, List[String]] = {
(for((key, values) <- headers.getRequestHeaders.asScala) yield (key, values.asScala.toList)).toMap
}
}
| apache/servicemix-bundles | swagger-jaxrs_2.11-1.3.12/src/main/scala/com/wordnik/swagger/jaxrs/listing/ApiListing.scala | Scala | apache-2.0 | 5,809 |
package beamly.core.lang.future.extensions
import scala.concurrent.{ExecutionContext, Future}
final class FutureEitherW[+A](val underlying: Future[Either[Throwable, A]]) extends AnyVal {
/**
* Converts to successful or failed future
* @param executor The execution context
* @return Future, mapping left to failed Future and right to successful Future
*/
@inline
def join(implicit executor: ExecutionContext): Future[A] = underlying flatMap (_.fold(Future.failed, Future.successful))
}
| beamly/beamly.core.lang | src/main/scala/beamly/core/lang/future/extensions/FutureEitherW.scala | Scala | apache-2.0 | 506 |
package hostclub
import java.io.{ File, FileWriter }
import scala.io.Source
object Aliases {
private lazy val aliases = {
ensureExists
Map(Source.fromFile(file)
.getLines
.toSeq
.map(_.split(" ")
.take(2) match {
case Array(a,b) => (a, b)
}):_*)
}
private def serialize(current: Map[String, String]) = {
ensureExists
val w = new FileWriter(file)
current.foreach {
case (ali, ip) =>
w.write("%s %s\\n".format(ali, ip))
}
w.flush
w.close
}
def alias(alias: String, ip: String) =
serialize(aliases + (alias -> ip))
def apply(name: String) = aliases.get(name)
def grep(name: String) =
aliases.keys.filter(_.startsWith(name)).toSeq
def ls = aliases
private def ensureExists =
if (!file.exists) {
file.getParentFile.mkdirs()
file.createNewFile
}
private def file = new File(configdir, "aliases")
private def configdir = new File(System.getProperty("user.home"), ".hc")
}
| softprops/hostclub | src/main/scala/aliases.scala | Scala | mit | 1,072 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.json
import com.fasterxml.jackson.core.{JsonParser, JsonToken}
private object JacksonUtils {
/**
* Advance the parser until a null or a specific token is found
*/
def nextUntil(parser: JsonParser, stopOn: JsonToken): Boolean = {
parser.nextToken() match {
case null => false
case x => x != stopOn
}
}
}
| andrewor14/iolap | sql/core/src/main/scala/org/apache/spark/sql/json/JacksonUtils.scala | Scala | apache-2.0 | 1,168 |
package io.udash.web.guide.views.frontend
import io.udash._
import io.udash.css.CssView
import io.udash.web.commons.components.ForceBootstrap
import io.udash.web.guide._
import io.udash.web.guide.styles.partials.GuideStyles
import io.udash.web.guide.views.frontend.demos._
import scalatags.JsDom
case object FrontendFormsViewFactory extends StaticViewFactory[FrontendFormsState.type](() => new FrontendFormsView)
class FrontendFormsView extends View with CssView {
import JsDom.all._
import io.udash.web.guide.Context._
private val (textInputDemo, textInputSnippet) = TextInputDemo.demoWithSnippet()
private val (textAreaDemo, textAreaSnippet) = TextAreaDemo.demoWithSnippet()
private val (checkboxDemo, checkboxSnippet) = CheckboxDemo.demoWithSnippet()
private val (checkButtonsDemo, checkButtonsSnippet) = CheckButtonsDemo.demoWithSnippet()
private val (radioButtonsDemo, radioButtonsSnippet) = RadioButtonsDemo.demoWithSnippet()
private val (selectDemo, selectSnippet) = SelectDemo.demoWithSnippet()
private val (multiSelectDemo, multiSelectSnippet) = MultiSelectDemo.demoWithSnippet()
private val (dateTimeLocalDemo, dateTimeLocalSnippet) = DateTimeLocalDemo.demoWithSnippet()
private val (dateDemo, dateSnippet) = DateDemo.demoWithSnippet()
private val (timeDemo, timeSnippet) = TimeDemo.demoWithSnippet()
override def getTemplate: Modifier = div(
h2("Two-way Form Bindings"),
p(
"In the ", a(href := FrontendBindingsState.url)("previous"), " chapter you could read about one way properties to Scalatags templates bindings. ",
"In this part of the guide you will learn means of binding properties to form elements."
),
p("Let's briefly introduce all bindable form elements:"),
ul(GuideStyles.defaultList)(
li(i("Checkbox"), " - a single checkbox bound to ", i("Property[Boolean]"), "."),
li(i("CheckButtons"), " - a group of checkboxes bound to ", i("SeqProperty[T]"), "."),
li(i("NumberInput"), " - input accepting only numbers, bound to ", i("Property[String]"), "."),
li(i("PasswordInput"), " - password input bound to ", i("Property[String]"), "."),
li(i("RadioButtons"), " - a group of radio buttons bound to ", i("Property[T]"), "."),
li(i("Select"), " - a select element bound to ", i("Property[T]"), "."),
li(i("TextArea"), " - multiline input bound to ", i("Property[String]"), "."),
li(i("TextInput"), " - standard input bound to ", i("Property[String]"), "."),
li(
i("DateTimeLocalInput"), " - date and time input bound to ", i("Property[String]"), ". ",
"This input type is not supported by Firefox and Internet Explorer as stated in ",
a(`href` := "https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/datetime-local", "documentation.")
),
li(i("DateInput"), " - date input bound to ", i("Property[String]"), "."),
li(i("TimeInput"), " - time input bound to ", i("Property[String]"), ".")
),
h3("TextInput & NumberInput & PasswordInput"),
p(
"Let's start with simple input fields. ",
"The below example presents how easily you can bind your properties to HTML input elements. ", i("TextInput"), " takes ",
"a property which should be bound to an input and takes care of updating a field and property after every change."
),
textInputSnippet,
ForceBootstrap(textInputDemo),
h3("TextArea"),
p("Below you can find a similar example, this time with text areas."),
textAreaSnippet,
ForceBootstrap(textAreaDemo),
h3("Checkbox"),
p(
"Below you can find the example of creating a single checkbox. Notice that the third property contains String, so it uses ",
"property transformation for checkbox binding. "
),
checkboxSnippet,
ForceBootstrap(checkboxDemo),
h3("CheckButtons"),
p(
"The below example shows how to create a sequence of checkboxes for a provided sequence of possible values and bind them ",
"with a SeqProperty. The CheckButtons constructor gets ", i("SeqProperty[String]"), ", ", i("Seq[String]"), " with possible values and ",
"a decorator method. The decorator gets ", i("Seq[(Input, String)]"), ", where the Input generates a checkbox and the String ",
"is the bound value. This generates a Scalatags template containing the checkboxes."
),
checkButtonsSnippet,
ForceBootstrap(checkButtonsDemo),
h3("RadioButtons"),
p(
"RadioButtons work very similarly to CheckButtons. The only difference is that they work with a ", i("Property"), ", ",
"not with a ", i("SeqProperty"), ", so only one value can be selected. "
),
radioButtonsSnippet,
ForceBootstrap(radioButtonsDemo),
h3("Select"),
p("The HTML select element might be used in two ways: with or without multi selection. Below you can find examples of both usages."),
selectSnippet,
ForceBootstrap(selectDemo),
h4("Select with multiple selected values"),
p("Notice that the only difference is the type of the used property."),
multiSelectSnippet,
ForceBootstrap(multiSelectDemo),
h3("Date and time"),
p("Below examples show how to utilise ", i("datetime-local"), ", ", i("date"), " and ", i("time"), " input bindings. All of them are bound to String property, " +
"so additional parsing might be required."),
dateTimeLocalSnippet,
ForceBootstrap(dateTimeLocalDemo),
dateSnippet,
ForceBootstrap(dateDemo),
timeSnippet,
ForceBootstrap(timeDemo),
h2("What's next?"),
p(
"Now you know everything you need to start frontend development using Udash. ",
"If you want to learn more about client-server communication, check the ",
a(href := RpcIntroState.url)("RPC"), " chapter. ",
"You might find ", a(href := BootstrapExtState.url)("Bootstrap Components"), " and ",
a(href := FrontendFilesState.url)("File upload"), " interesting later on."
)
)
} | UdashFramework/udash-core | guide/guide/.js/src/main/scala/io/udash/web/guide/views/frontend/FrontendFormsView.scala | Scala | apache-2.0 | 5,969 |
package de.frosner.broccoli.nomad.models
import play.api.libs.json._
import play.api.libs.functional.syntax._
final case class TaskGroup(tasks: Seq[Task])
object TaskGroup {
implicit val taskGroupFormat: Format[TaskGroup] =
(__ \\ "Tasks")
.format[Seq[Task]]
.inmap(tasks => TaskGroup(tasks), (taskGroup: TaskGroup) => taskGroup.tasks)
}
| FRosner/cluster-broccoli | server/src/main/scala/de/frosner/broccoli/nomad/models/TaskGroup.scala | Scala | apache-2.0 | 360 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.avro
import java.io.{FileNotFoundException, IOException}
import java.util.Locale
import scala.collection.JavaConverters._
import org.apache.avro.Schema
import org.apache.avro.file.{DataFileReader, FileReader}
import org.apache.avro.file.DataFileConstants.{BZIP2_CODEC, DEFLATE_CODEC, SNAPPY_CODEC, XZ_CODEC, ZSTANDARD_CODEC}
import org.apache.avro.generic.{GenericDatumReader, GenericRecord}
import org.apache.avro.mapred.{AvroOutputFormat, FsInput}
import org.apache.avro.mapreduce.AvroJob
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileStatus
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.SparkException
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.avro.AvroOptions.ignoreExtensionKey
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.datasources.OutputWriterFactory
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
private[sql] object AvroUtils extends Logging {
def inferSchema(
spark: SparkSession,
options: Map[String, String],
files: Seq[FileStatus]): Option[StructType] = {
val conf = spark.sessionState.newHadoopConfWithOptions(options)
val parsedOptions = new AvroOptions(options, conf)
if (parsedOptions.parameters.contains(ignoreExtensionKey)) {
logWarning(s"Option $ignoreExtensionKey is deprecated. Please use the " +
"general data source option pathGlobFilter for filtering file names.")
}
// User can specify an optional avro json schema.
val avroSchema = parsedOptions.schema
.getOrElse {
inferAvroSchemaFromFiles(files, conf, parsedOptions.ignoreExtension,
spark.sessionState.conf.ignoreCorruptFiles)
}
SchemaConverters.toSqlType(avroSchema).dataType match {
case t: StructType => Some(t)
case _ => throw new RuntimeException(
s"""Avro schema cannot be converted to a Spark SQL StructType:
|
|${avroSchema.toString(true)}
|""".stripMargin)
}
}
def supportsDataType(dataType: DataType): Boolean = dataType match {
case _: AnsiIntervalType => false
case _: AtomicType => true
case st: StructType => st.forall { f => supportsDataType(f.dataType) }
case ArrayType(elementType, _) => supportsDataType(elementType)
case MapType(keyType, valueType, _) =>
supportsDataType(keyType) && supportsDataType(valueType)
case udt: UserDefinedType[_] => supportsDataType(udt.sqlType)
case _: NullType => true
case _ => false
}
def prepareWrite(
sqlConf: SQLConf,
job: Job,
options: Map[String, String],
dataSchema: StructType): OutputWriterFactory = {
val parsedOptions = new AvroOptions(options, job.getConfiguration)
val outputAvroSchema: Schema = parsedOptions.schema
.getOrElse(SchemaConverters.toAvroType(dataSchema, nullable = false,
parsedOptions.recordName, parsedOptions.recordNamespace))
AvroJob.setOutputKeySchema(job, outputAvroSchema)
if (parsedOptions.compression == "uncompressed") {
job.getConfiguration.setBoolean("mapred.output.compress", false)
} else {
job.getConfiguration.setBoolean("mapred.output.compress", true)
logInfo(s"Compressing Avro output using the ${parsedOptions.compression} codec")
val codec = parsedOptions.compression match {
case DEFLATE_CODEC =>
val deflateLevel = sqlConf.avroDeflateLevel
logInfo(s"Avro compression level $deflateLevel will be used for $DEFLATE_CODEC codec.")
job.getConfiguration.setInt(AvroOutputFormat.DEFLATE_LEVEL_KEY, deflateLevel)
DEFLATE_CODEC
case codec @ (SNAPPY_CODEC | BZIP2_CODEC | XZ_CODEC | ZSTANDARD_CODEC) => codec
case unknown => throw new IllegalArgumentException(s"Invalid compression codec: $unknown")
}
job.getConfiguration.set(AvroJob.CONF_OUTPUT_CODEC, codec)
}
new AvroOutputWriterFactory(dataSchema,
outputAvroSchema.toString,
parsedOptions.positionalFieldMatching)
}
private def inferAvroSchemaFromFiles(
files: Seq[FileStatus],
conf: Configuration,
ignoreExtension: Boolean,
ignoreCorruptFiles: Boolean): Schema = {
// Schema evolution is not supported yet. Here we only pick first random readable sample file to
// figure out the schema of the whole dataset.
val avroReader = files.iterator.map { f =>
val path = f.getPath
if (!ignoreExtension && !path.getName.endsWith(".avro")) {
None
} else {
Utils.tryWithResource {
new FsInput(path, conf)
} { in =>
try {
Some(DataFileReader.openReader(in, new GenericDatumReader[GenericRecord]()))
} catch {
case e: IOException =>
if (ignoreCorruptFiles) {
logWarning(s"Skipped the footer in the corrupted file: $path", e)
None
} else {
throw new SparkException(s"Could not read file: $path", e)
}
}
}
}
}.collectFirst {
case Some(reader) => reader
}
avroReader match {
case Some(reader) =>
try {
reader.getSchema
} finally {
reader.close()
}
case None =>
throw new FileNotFoundException(
"No Avro files found. If files don't have .avro extension, set ignoreExtension to true")
}
}
// The trait provides iterator-like interface for reading records from an Avro file,
// deserializing and returning them as internal rows.
trait RowReader {
protected val fileReader: FileReader[GenericRecord]
protected val deserializer: AvroDeserializer
protected val stopPosition: Long
private[this] var completed = false
private[this] var currentRow: Option[InternalRow] = None
def hasNextRow: Boolean = {
while (!completed && currentRow.isEmpty) {
val r = fileReader.hasNext && !fileReader.pastSync(stopPosition)
if (!r) {
fileReader.close()
completed = true
currentRow = None
} else {
val record = fileReader.next()
// the row must be deserialized in hasNextRow, because AvroDeserializer#deserialize
// potentially filters rows
currentRow = deserializer.deserialize(record).asInstanceOf[Option[InternalRow]]
}
}
currentRow.isDefined
}
def nextRow: InternalRow = {
if (currentRow.isEmpty) {
hasNextRow
}
val returnRow = currentRow
currentRow = None // free up hasNextRow to consume more Avro records, if not exhausted
returnRow.getOrElse {
throw new NoSuchElementException("next on empty iterator")
}
}
}
/** Wrapper for a pair of matched fields, one Catalyst and one corresponding Avro field. */
case class AvroMatchedField(
catalystField: StructField,
catalystPosition: Int,
avroField: Schema.Field)
/**
* Helper class to perform field lookup/matching on Avro schemas.
*
* This will match `avroSchema` against `catalystSchema`, attempting to find a matching field in
* the Avro schema for each field in the Catalyst schema and vice-versa, respecting settings for
* case sensitivity. The match results can be accessed using the getter methods.
*
* @param avroSchema The schema in which to search for fields. Must be of type RECORD.
* @param catalystSchema The Catalyst schema to use for matching.
* @param avroPath The seq of parent field names leading to `avroSchema`.
* @param catalystPath The seq of parent field names leading to `catalystSchema`.
* @param positionalFieldMatch If true, perform field matching in a positional fashion
* (structural comparison between schemas, ignoring names);
* otherwise, perform field matching using field names.
*/
class AvroSchemaHelper(
avroSchema: Schema,
catalystSchema: StructType,
avroPath: Seq[String],
catalystPath: Seq[String],
positionalFieldMatch: Boolean) {
if (avroSchema.getType != Schema.Type.RECORD) {
throw new IncompatibleSchemaException(
s"Attempting to treat ${avroSchema.getName} as a RECORD, but it was: ${avroSchema.getType}")
}
private[this] val avroFieldArray = avroSchema.getFields.asScala.toArray
private[this] val fieldMap = avroSchema.getFields.asScala
.groupBy(_.name.toLowerCase(Locale.ROOT))
.mapValues(_.toSeq) // toSeq needed for scala 2.13
/** The fields which have matching equivalents in both Avro and Catalyst schemas. */
val matchedFields: Seq[AvroMatchedField] = catalystSchema.zipWithIndex.flatMap {
case (sqlField, sqlPos) =>
getAvroField(sqlField.name, sqlPos).map(AvroMatchedField(sqlField, sqlPos, _))
}
/**
* Validate that there are no Catalyst fields which don't have a matching Avro field, throwing
* [[IncompatibleSchemaException]] if such extra fields are found. If `ignoreNullable` is false,
* consider nullable Catalyst fields to be eligible to be an extra field; otherwise,
* ignore nullable Catalyst fields when checking for extras.
*/
def validateNoExtraCatalystFields(ignoreNullable: Boolean): Unit =
catalystSchema.zipWithIndex.foreach { case (sqlField, sqlPos) =>
if (getAvroField(sqlField.name, sqlPos).isEmpty &&
(!ignoreNullable || !sqlField.nullable)) {
if (positionalFieldMatch) {
throw new IncompatibleSchemaException("Cannot find field at position " +
s"$sqlPos of ${toFieldStr(avroPath)} from Avro schema (using positional matching)")
} else {
throw new IncompatibleSchemaException(
s"Cannot find ${toFieldStr(catalystPath :+ sqlField.name)} in Avro schema")
}
}
}
/**
* Validate that there are no Avro fields which don't have a matching Catalyst field, throwing
* [[IncompatibleSchemaException]] if such extra fields are found.
*/
def validateNoExtraAvroFields(): Unit = {
(avroFieldArray.toSet -- matchedFields.map(_.avroField)).foreach { extraField =>
if (positionalFieldMatch) {
throw new IncompatibleSchemaException(s"Found field '${extraField.name()}' at position " +
s"${extraField.pos()} of ${toFieldStr(avroPath)} from Avro schema but there is no " +
s"match in the SQL schema at ${toFieldStr(catalystPath)} (using positional matching)")
} else {
throw new IncompatibleSchemaException(
s"Found ${toFieldStr(avroPath :+ extraField.name())} in Avro schema but there is no " +
"match in the SQL schema")
}
}
}
/**
* Extract a single field from the contained avro schema which has the desired field name,
* performing the matching with proper case sensitivity according to SQLConf.resolver.
*
* @param name The name of the field to search for.
* @return `Some(match)` if a matching Avro field is found, otherwise `None`.
*/
private[avro] def getFieldByName(name: String): Option[Schema.Field] = {
// get candidates, ignoring case of field name
val candidates = fieldMap.getOrElse(name.toLowerCase(Locale.ROOT), Seq.empty)
// search candidates, taking into account case sensitivity settings
candidates.filter(f => SQLConf.get.resolver(f.name(), name)) match {
case Seq(avroField) => Some(avroField)
case Seq() => None
case matches => throw new IncompatibleSchemaException(s"Searching for '$name' in Avro " +
s"schema at ${toFieldStr(avroPath)} gave ${matches.size} matches. Candidates: " +
matches.map(_.name()).mkString("[", ", ", "]")
)
}
}
/** Get the Avro field corresponding to the provided Catalyst field name/position, if any. */
def getAvroField(fieldName: String, catalystPos: Int): Option[Schema.Field] = {
if (positionalFieldMatch) {
avroFieldArray.lift(catalystPos)
} else {
getFieldByName(fieldName)
}
}
}
/**
* Convert a sequence of hierarchical field names (like `Seq(foo, bar)`) into a human-readable
* string representing the field, like "field 'foo.bar'". If `names` is empty, the string
* "top-level record" is returned.
*/
private[avro] def toFieldStr(names: Seq[String]): String = names match {
case Seq() => "top-level record"
case n => s"field '${n.mkString(".")}'"
}
}
| chuckchen/spark | external/avro/src/main/scala/org/apache/spark/sql/avro/AvroUtils.scala | Scala | apache-2.0 | 13,539 |
/**
* Copyright (C) 2017 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.cache
import net.sf.ehcache
import net.sf.ehcache.CacheManager
import org.orbeon.oxf.common.OXFException
import org.orbeon.oxf.resources.URLFactory
import org.slf4j.LoggerFactory
import scala.util.control.NonFatal
object Caches {
import Private._
val Logger = LoggerFactory.getLogger("org.orbeon.caches")
def getOrElseThrow(cacheName: String): ehcache.Cache =
cacheManager.getCache(cacheName) match {
case cache: ehcache.Cache ⇒
withMessage(cache, s"found cache configuration for `$cacheName`")
case _ ⇒
throw new OXFException(s"Cache configuration not found for `$cacheName`. Make sure `$EhcachePath` exists.")
}
private object Private {
val EhcachePath = "oxf:/config/ehcache.xml"
val cacheManager =
withMessage(
try new CacheManager(URLFactory.createURL(EhcachePath))
catch {
case NonFatal(t) ⇒
throw new OXFException(s"unable to read cache manager configuration from `$EhcachePath`", t)
},
s"initialized cache manager from `$EhcachePath`"
)
def withMessage[T](t: T, message: String) = { Logger.debug(message); t }
}
} | brunobuzzi/orbeon-forms | src/main/scala/org/orbeon/oxf/cache/Caches.scala | Scala | lgpl-2.1 | 1,845 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.`trait`
import org.apache.flink.table.plan.util.{FlinkRelOptUtil, RelFieldCollationUtil}
import com.google.common.collect.{ImmutableList, Ordering}
import org.apache.calcite.plan.{RelMultipleTrait, RelOptPlanner, RelTrait}
import org.apache.calcite.rel.RelDistribution.Type
import org.apache.calcite.rel.{RelDistribution, RelFieldCollation}
import org.apache.calcite.util.mapping.Mappings
import org.apache.calcite.util.{ImmutableIntList, Util}
import java.util
import scala.collection.JavaConversions._
/**
* Description of the physical distribution of a relational expression.
* See [[RelDistribution]] for more details.
*
* NOTE: it's intended to have a private constructor for this class.
*/
class FlinkRelDistribution private(
private val distributionType: RelDistribution.Type,
private val keys: ImmutableIntList,
private val fieldCollations: Option[ImmutableList[RelFieldCollation]] = None,
val requireStrict: Boolean = true)
extends RelDistribution {
require((distributionType == Type.HASH_DISTRIBUTED)
|| (distributionType == Type.RANGE_DISTRIBUTED)
|| keys.isEmpty)
require((distributionType != Type.RANGE_DISTRIBUTED) || fieldCollations.nonEmpty)
private val ORDERING = Ordering.natural[Integer].lexicographical[Integer]
def getFieldCollations: Option[ImmutableList[RelFieldCollation]] = fieldCollations
override def getKeys: ImmutableIntList = keys
override def getType: RelDistribution.Type = distributionType
override def getTraitDef: FlinkRelDistributionTraitDef = FlinkRelDistributionTraitDef.INSTANCE
override def satisfies(relTrait: RelTrait): Boolean = relTrait match {
case other: FlinkRelDistribution =>
if (this == other || other.getType == Type.ANY) {
true
} else if (distributionType == other.distributionType) {
if (distributionType == Type.HASH_DISTRIBUTED) {
if (other.requireStrict) {
// Join and union require strict satisfy.
// First: Hash[x] does not satisfy Hash[x, y],
// See https://issues.apache.org/jira/browse/DRILL-1102 for more details
// Second: Hash[x, y] does not satisfy Hash[y, x].
this == other
} else {
// Agg does not need require strict satisfy.
// First: Hash[x] satisfy Hash[x, y]
// Second: Hash[x, y] satisfy Hash[y, x]
other.keys.containsAll(keys)
}
} else if (distributionType == Type.RANGE_DISTRIBUTED) {
Util.startsWith(other.fieldCollations.get, fieldCollations.get)
} else {
true
}
} else if (other.distributionType == Type.RANDOM_DISTRIBUTED) {
// RANDOM is satisfied by HASH, ROUND-ROBIN, RANDOM, RANGE;
distributionType == Type.HASH_DISTRIBUTED ||
distributionType == Type.ROUND_ROBIN_DISTRIBUTED ||
distributionType == Type.RANGE_DISTRIBUTED
} else {
false
}
case _ => false
}
override def apply(mapping: Mappings.TargetMapping): FlinkRelDistribution = {
if (distributionType == Type.HASH_DISTRIBUTED) {
val newKeys = new util.ArrayList[Integer]
keys.foreach { key =>
try {
val i = mapping.getTargetOpt(key)
if (i >= 0) {
newKeys.add(i)
} else {
return FlinkRelDistribution.ANY
}
} catch {
case _: IndexOutOfBoundsException => return FlinkRelDistribution.ANY
}
}
FlinkRelDistribution.hash(newKeys, requireStrict)
} else if (distributionType == Type.RANGE_DISTRIBUTED) {
val newFieldCollations = new util.ArrayList[RelFieldCollation]
fieldCollations.get.foreach { fieldCollation =>
try {
val i = mapping.getTargetOpt(fieldCollation.getFieldIndex)
if (i >= 0) {
newFieldCollations.add(fieldCollation.copy(i))
} else {
return FlinkRelDistribution.ANY
}
} catch {
case _: IndexOutOfBoundsException => return FlinkRelDistribution.ANY
}
}
FlinkRelDistribution.range(newFieldCollations)
} else {
this
}
}
override def register(planner: RelOptPlanner): Unit = {}
def canEqual(other: Any): Boolean = other.isInstanceOf[FlinkRelDistribution]
override def equals(other: Any): Boolean = other match {
case that: FlinkRelDistribution => (that canEqual this) &&
distributionType == that.distributionType &&
keys == that.keys && fieldCollations == that.fieldCollations &&
requireStrict == that.requireStrict
case _ => false
}
override def hashCode(): Int = {
val state = Seq(distributionType, keys, fieldCollations, requireStrict)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
override def toString: String = {
if (keys.isEmpty) {
distributionType.shortName
} else if (fieldCollations.nonEmpty) {
distributionType.shortName + fieldCollations.get.asList + requireStrict
} else {
distributionType.shortName + keys + requireStrict
}
}
override def isTop: Boolean = distributionType == Type.ANY
// here we only need to define a determinate order between RelMultipleTraits
override def compareTo(o: RelMultipleTrait): Int = o match {
case other: FlinkRelDistribution =>
if (this.equals(other)) {
0
} else if (distributionType == other.distributionType) {
if (distributionType == Type.HASH_DISTRIBUTED) {
ORDERING.compare(
Ordering.natural().sortedCopy(keys),
Ordering.natural().sortedCopy(other.keys))
} else if (distributionType == Type.RANGE_DISTRIBUTED) {
val collations1 = fieldCollations.get.asList()
val collations2 = other.fieldCollations.get.asList()
for (i <- 0 until math.min(collations1.size(), collations2.size())) {
val c = collations1.get(i).toString.compareTo(collations2.get(i).toString)
if (c != 0) {
return c
}
}
if (collations1.size() == collations2.size()) {
0
} else {
if (collations1.size() > collations2.size()) 1 else -1
}
} else {
0
}
} else {
distributionType.compareTo(other.getType)
}
case _ => -1
}
}
object FlinkRelDistribution {
private val EMPTY: ImmutableIntList = ImmutableIntList.of
val ANY = new FlinkRelDistribution(RelDistribution.Type.ANY, EMPTY)
val DEFAULT: FlinkRelDistribution = ANY
/** The singleton singleton distribution. */
val SINGLETON = new FlinkRelDistribution(RelDistribution.Type.SINGLETON, EMPTY)
/** The singleton broadcast distribution. */
val BROADCAST_DISTRIBUTED = new FlinkRelDistribution(
RelDistribution.Type.BROADCAST_DISTRIBUTED, EMPTY)
/** The singleton random distribution. */
val RANDOM_DISTRIBUTED: RelDistribution = new FlinkRelDistribution(
RelDistribution.Type.RANDOM_DISTRIBUTED, EMPTY)
/** The singleton round-robin distribution. */
val ROUND_ROBIN_DISTRIBUTED: RelDistribution = new FlinkRelDistribution(
RelDistribution.Type.ROUND_ROBIN_DISTRIBUTED, EMPTY)
def hash(
columns: util.Collection[_ <: Number],
requireStrict: Boolean = true): FlinkRelDistribution = {
val list = ImmutableIntList.copyOf(columns)
canonize(new FlinkRelDistribution(Type.HASH_DISTRIBUTED, list, requireStrict = requireStrict))
}
/** Creates a range distribution. */
def range(collations: util.List[RelFieldCollation]): FlinkRelDistribution = {
val keys = collations.map(i => Integer.valueOf(i.getFieldIndex)).toList
val fieldCollations = ImmutableList.copyOf[RelFieldCollation](collations)
canonize(new FlinkRelDistribution(RelDistribution.Type.RANGE_DISTRIBUTED,
ImmutableIntList.copyOf(keys: _*), Some(fieldCollations)))
}
def range(collations: RelFieldCollation*): FlinkRelDistribution = range(collations)
def range(columns: util.Collection[_ <: Number]): FlinkRelDistribution = {
val keys = ImmutableIntList.copyOf(columns)
val collations = new util.ArrayList[RelFieldCollation]()
columns.foreach(f => collations.add(RelFieldCollationUtil.of(f.intValue())))
val fieldCollations = ImmutableList.copyOf[RelFieldCollation](collations)
canonize(new FlinkRelDistribution(
RelDistribution.Type.RANGE_DISTRIBUTED, keys, Some(fieldCollations)))
}
/**
* NOTE: All creation of FlinkRelDistribution should be canonized
*/
private def canonize(in: FlinkRelDistribution): FlinkRelDistribution = {
FlinkRelDistributionTraitDef.INSTANCE.canonize(in)
}
}
| ueshin/apache-flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/trait/FlinkRelDistribution.scala | Scala | apache-2.0 | 9,521 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.BindReferences.bindReferences
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.expressions.codegen.Block._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics}
import org.apache.spark.util.collection.BitSet
/**
* Performs a sort merge join of two child relations.
*/
case class SortMergeJoinExec(
leftKeys: Seq[Expression],
rightKeys: Seq[Expression],
joinType: JoinType,
condition: Option[Expression],
left: SparkPlan,
right: SparkPlan) extends BinaryExecNode with CodegenSupport {
override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"))
override def simpleStringWithNodeId(): String = {
val opId = ExplainUtils.getOpId(this)
s"$nodeName $joinType ($opId)".trim
}
override def verboseStringWithOperatorId(): String = {
val joinCondStr = if (condition.isDefined) {
s"${condition.get}"
} else "None"
s"""
|(${ExplainUtils.getOpId(this)}) $nodeName ${ExplainUtils.getCodegenId(this)}
|Left keys : ${leftKeys}
|Right keys: ${rightKeys}
|Join condition : ${joinCondStr}
""".stripMargin
}
override def output: Seq[Attribute] = {
joinType match {
case _: InnerLike =>
left.output ++ right.output
case LeftOuter =>
left.output ++ right.output.map(_.withNullability(true))
case RightOuter =>
left.output.map(_.withNullability(true)) ++ right.output
case FullOuter =>
(left.output ++ right.output).map(_.withNullability(true))
case j: ExistenceJoin =>
left.output :+ j.exists
case LeftExistence(_) =>
left.output
case x =>
throw new IllegalArgumentException(
s"${getClass.getSimpleName} should not take $x as the JoinType")
}
}
override def outputPartitioning: Partitioning = joinType match {
case _: InnerLike =>
PartitioningCollection(Seq(left.outputPartitioning, right.outputPartitioning))
// For left and right outer joins, the output is partitioned by the streamed input's join keys.
case LeftOuter => left.outputPartitioning
case RightOuter => right.outputPartitioning
case FullOuter => UnknownPartitioning(left.outputPartitioning.numPartitions)
case LeftExistence(_) => left.outputPartitioning
case x =>
throw new IllegalArgumentException(
s"${getClass.getSimpleName} should not take $x as the JoinType")
}
override def requiredChildDistribution: Seq[Distribution] =
HashClusteredDistribution(leftKeys) :: HashClusteredDistribution(rightKeys) :: Nil
override def outputOrdering: Seq[SortOrder] = joinType match {
// For inner join, orders of both sides keys should be kept.
case _: InnerLike =>
val leftKeyOrdering = getKeyOrdering(leftKeys, left.outputOrdering)
val rightKeyOrdering = getKeyOrdering(rightKeys, right.outputOrdering)
leftKeyOrdering.zip(rightKeyOrdering).map { case (lKey, rKey) =>
// Also add the right key and its `sameOrderExpressions`
SortOrder(lKey.child, Ascending, lKey.sameOrderExpressions + rKey.child ++ rKey
.sameOrderExpressions)
}
// For left and right outer joins, the output is ordered by the streamed input's join keys.
case LeftOuter => getKeyOrdering(leftKeys, left.outputOrdering)
case RightOuter => getKeyOrdering(rightKeys, right.outputOrdering)
// There are null rows in both streams, so there is no order.
case FullOuter => Nil
case LeftExistence(_) => getKeyOrdering(leftKeys, left.outputOrdering)
case x =>
throw new IllegalArgumentException(
s"${getClass.getSimpleName} should not take $x as the JoinType")
}
/**
* The utility method to get output ordering for left or right side of the join.
*
* Returns the required ordering for left or right child if childOutputOrdering does not
* satisfy the required ordering; otherwise, which means the child does not need to be sorted
* again, returns the required ordering for this child with extra "sameOrderExpressions" from
* the child's outputOrdering.
*/
private def getKeyOrdering(keys: Seq[Expression], childOutputOrdering: Seq[SortOrder])
: Seq[SortOrder] = {
val requiredOrdering = requiredOrders(keys)
if (SortOrder.orderingSatisfies(childOutputOrdering, requiredOrdering)) {
keys.zip(childOutputOrdering).map { case (key, childOrder) =>
SortOrder(key, Ascending, childOrder.sameOrderExpressions + childOrder.child - key)
}
} else {
requiredOrdering
}
}
override def requiredChildOrdering: Seq[Seq[SortOrder]] =
requiredOrders(leftKeys) :: requiredOrders(rightKeys) :: Nil
private def requiredOrders(keys: Seq[Expression]): Seq[SortOrder] = {
// This must be ascending in order to agree with the `keyOrdering` defined in `doExecute()`.
keys.map(SortOrder(_, Ascending))
}
private def createLeftKeyGenerator(): Projection =
UnsafeProjection.create(leftKeys, left.output)
private def createRightKeyGenerator(): Projection =
UnsafeProjection.create(rightKeys, right.output)
private def getSpillThreshold: Int = {
sqlContext.conf.sortMergeJoinExecBufferSpillThreshold
}
private def getInMemoryThreshold: Int = {
sqlContext.conf.sortMergeJoinExecBufferInMemoryThreshold
}
protected override def doExecute(): RDD[InternalRow] = {
val numOutputRows = longMetric("numOutputRows")
val spillThreshold = getSpillThreshold
val inMemoryThreshold = getInMemoryThreshold
left.execute().zipPartitions(right.execute()) { (leftIter, rightIter) =>
val boundCondition: (InternalRow) => Boolean = {
condition.map { cond =>
newPredicate(cond, left.output ++ right.output).eval _
}.getOrElse {
(r: InternalRow) => true
}
}
// An ordering that can be used to compare keys from both sides.
val keyOrdering = newNaturalAscendingOrdering(leftKeys.map(_.dataType))
val resultProj: InternalRow => InternalRow = UnsafeProjection.create(output, output)
joinType match {
case _: InnerLike =>
new RowIterator {
private[this] var currentLeftRow: InternalRow = _
private[this] var currentRightMatches: ExternalAppendOnlyUnsafeRowArray = _
private[this] var rightMatchesIterator: Iterator[UnsafeRow] = null
private[this] val smjScanner = new SortMergeJoinScanner(
createLeftKeyGenerator(),
createRightKeyGenerator(),
keyOrdering,
RowIterator.fromScala(leftIter),
RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold,
cleanupResources
)
private[this] val joinRow = new JoinedRow
if (smjScanner.findNextInnerJoinRows()) {
currentRightMatches = smjScanner.getBufferedMatches
currentLeftRow = smjScanner.getStreamedRow
rightMatchesIterator = currentRightMatches.generateIterator()
}
override def advanceNext(): Boolean = {
while (rightMatchesIterator != null) {
if (!rightMatchesIterator.hasNext) {
if (smjScanner.findNextInnerJoinRows()) {
currentRightMatches = smjScanner.getBufferedMatches
currentLeftRow = smjScanner.getStreamedRow
rightMatchesIterator = currentRightMatches.generateIterator()
} else {
currentRightMatches = null
currentLeftRow = null
rightMatchesIterator = null
return false
}
}
joinRow(currentLeftRow, rightMatchesIterator.next())
if (boundCondition(joinRow)) {
numOutputRows += 1
return true
}
}
false
}
override def getRow: InternalRow = resultProj(joinRow)
}.toScala
case LeftOuter =>
val smjScanner = new SortMergeJoinScanner(
streamedKeyGenerator = createLeftKeyGenerator(),
bufferedKeyGenerator = createRightKeyGenerator(),
keyOrdering,
streamedIter = RowIterator.fromScala(leftIter),
bufferedIter = RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold,
cleanupResources
)
val rightNullRow = new GenericInternalRow(right.output.length)
new LeftOuterIterator(
smjScanner, rightNullRow, boundCondition, resultProj, numOutputRows).toScala
case RightOuter =>
val smjScanner = new SortMergeJoinScanner(
streamedKeyGenerator = createRightKeyGenerator(),
bufferedKeyGenerator = createLeftKeyGenerator(),
keyOrdering,
streamedIter = RowIterator.fromScala(rightIter),
bufferedIter = RowIterator.fromScala(leftIter),
inMemoryThreshold,
spillThreshold,
cleanupResources
)
val leftNullRow = new GenericInternalRow(left.output.length)
new RightOuterIterator(
smjScanner, leftNullRow, boundCondition, resultProj, numOutputRows).toScala
case FullOuter =>
val leftNullRow = new GenericInternalRow(left.output.length)
val rightNullRow = new GenericInternalRow(right.output.length)
val smjScanner = new SortMergeFullOuterJoinScanner(
leftKeyGenerator = createLeftKeyGenerator(),
rightKeyGenerator = createRightKeyGenerator(),
keyOrdering,
leftIter = RowIterator.fromScala(leftIter),
rightIter = RowIterator.fromScala(rightIter),
boundCondition,
leftNullRow,
rightNullRow)
new FullOuterIterator(
smjScanner,
resultProj,
numOutputRows).toScala
case LeftSemi =>
new RowIterator {
private[this] var currentLeftRow: InternalRow = _
private[this] val smjScanner = new SortMergeJoinScanner(
createLeftKeyGenerator(),
createRightKeyGenerator(),
keyOrdering,
RowIterator.fromScala(leftIter),
RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold,
cleanupResources
)
private[this] val joinRow = new JoinedRow
override def advanceNext(): Boolean = {
while (smjScanner.findNextInnerJoinRows()) {
val currentRightMatches = smjScanner.getBufferedMatches
currentLeftRow = smjScanner.getStreamedRow
if (currentRightMatches != null && currentRightMatches.length > 0) {
val rightMatchesIterator = currentRightMatches.generateIterator()
while (rightMatchesIterator.hasNext) {
joinRow(currentLeftRow, rightMatchesIterator.next())
if (boundCondition(joinRow)) {
numOutputRows += 1
return true
}
}
}
}
false
}
override def getRow: InternalRow = currentLeftRow
}.toScala
case LeftAnti =>
new RowIterator {
private[this] var currentLeftRow: InternalRow = _
private[this] val smjScanner = new SortMergeJoinScanner(
createLeftKeyGenerator(),
createRightKeyGenerator(),
keyOrdering,
RowIterator.fromScala(leftIter),
RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold,
cleanupResources
)
private[this] val joinRow = new JoinedRow
override def advanceNext(): Boolean = {
while (smjScanner.findNextOuterJoinRows()) {
currentLeftRow = smjScanner.getStreamedRow
val currentRightMatches = smjScanner.getBufferedMatches
if (currentRightMatches == null || currentRightMatches.length == 0) {
numOutputRows += 1
return true
}
var found = false
val rightMatchesIterator = currentRightMatches.generateIterator()
while (!found && rightMatchesIterator.hasNext) {
joinRow(currentLeftRow, rightMatchesIterator.next())
if (boundCondition(joinRow)) {
found = true
}
}
if (!found) {
numOutputRows += 1
return true
}
}
false
}
override def getRow: InternalRow = currentLeftRow
}.toScala
case j: ExistenceJoin =>
new RowIterator {
private[this] var currentLeftRow: InternalRow = _
private[this] val result: InternalRow = new GenericInternalRow(Array[Any](null))
private[this] val smjScanner = new SortMergeJoinScanner(
createLeftKeyGenerator(),
createRightKeyGenerator(),
keyOrdering,
RowIterator.fromScala(leftIter),
RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold,
cleanupResources
)
private[this] val joinRow = new JoinedRow
override def advanceNext(): Boolean = {
while (smjScanner.findNextOuterJoinRows()) {
currentLeftRow = smjScanner.getStreamedRow
val currentRightMatches = smjScanner.getBufferedMatches
var found = false
if (currentRightMatches != null && currentRightMatches.length > 0) {
val rightMatchesIterator = currentRightMatches.generateIterator()
while (!found && rightMatchesIterator.hasNext) {
joinRow(currentLeftRow, rightMatchesIterator.next())
if (boundCondition(joinRow)) {
found = true
}
}
}
result.setBoolean(0, found)
numOutputRows += 1
return true
}
false
}
override def getRow: InternalRow = resultProj(joinRow(currentLeftRow, result))
}.toScala
case x =>
throw new IllegalArgumentException(
s"SortMergeJoin should not take $x as the JoinType")
}
}
}
override def supportCodegen: Boolean = {
joinType.isInstanceOf[InnerLike]
}
override def inputRDDs(): Seq[RDD[InternalRow]] = {
left.execute() :: right.execute() :: Nil
}
private def createJoinKey(
ctx: CodegenContext,
row: String,
keys: Seq[Expression],
input: Seq[Attribute]): Seq[ExprCode] = {
ctx.INPUT_ROW = row
ctx.currentVars = null
bindReferences(keys, input).map(_.genCode(ctx))
}
private def copyKeys(ctx: CodegenContext, vars: Seq[ExprCode]): Seq[ExprCode] = {
vars.zipWithIndex.map { case (ev, i) =>
ctx.addBufferedState(leftKeys(i).dataType, "value", ev.value)
}
}
private def genComparison(ctx: CodegenContext, a: Seq[ExprCode], b: Seq[ExprCode]): String = {
val comparisons = a.zip(b).zipWithIndex.map { case ((l, r), i) =>
s"""
|if (comp == 0) {
| comp = ${ctx.genComp(leftKeys(i).dataType, l.value, r.value)};
|}
""".stripMargin.trim
}
s"""
|comp = 0;
|${comparisons.mkString("\\n")}
""".stripMargin
}
/**
* Generate a function to scan both left and right to find a match, returns the term for
* matched one row from left side and buffered rows from right side.
*/
private def genScanner(ctx: CodegenContext): (String, String) = {
// Create class member for next row from both sides.
// Inline mutable state since not many join operations in a task
val leftRow = ctx.addMutableState("InternalRow", "leftRow", forceInline = true)
val rightRow = ctx.addMutableState("InternalRow", "rightRow", forceInline = true)
// Create variables for join keys from both sides.
val leftKeyVars = createJoinKey(ctx, leftRow, leftKeys, left.output)
val leftAnyNull = leftKeyVars.map(_.isNull).mkString(" || ")
val rightKeyTmpVars = createJoinKey(ctx, rightRow, rightKeys, right.output)
val rightAnyNull = rightKeyTmpVars.map(_.isNull).mkString(" || ")
// Copy the right key as class members so they could be used in next function call.
val rightKeyVars = copyKeys(ctx, rightKeyTmpVars)
// A list to hold all matched rows from right side.
val clsName = classOf[ExternalAppendOnlyUnsafeRowArray].getName
val spillThreshold = getSpillThreshold
val inMemoryThreshold = getInMemoryThreshold
// Inline mutable state since not many join operations in a task
val matches = ctx.addMutableState(clsName, "matches",
v => s"$v = new $clsName($inMemoryThreshold, $spillThreshold);", forceInline = true)
// Copy the left keys as class members so they could be used in next function call.
val matchedKeyVars = copyKeys(ctx, leftKeyVars)
ctx.addNewFunction("findNextInnerJoinRows",
s"""
|private boolean findNextInnerJoinRows(
| scala.collection.Iterator leftIter,
| scala.collection.Iterator rightIter) {
| $leftRow = null;
| int comp = 0;
| while ($leftRow == null) {
| if (!leftIter.hasNext()) return false;
| $leftRow = (InternalRow) leftIter.next();
| ${leftKeyVars.map(_.code).mkString("\\n")}
| if ($leftAnyNull) {
| $leftRow = null;
| continue;
| }
| if (!$matches.isEmpty()) {
| ${genComparison(ctx, leftKeyVars, matchedKeyVars)}
| if (comp == 0) {
| return true;
| }
| $matches.clear();
| }
|
| do {
| if ($rightRow == null) {
| if (!rightIter.hasNext()) {
| ${matchedKeyVars.map(_.code).mkString("\\n")}
| return !$matches.isEmpty();
| }
| $rightRow = (InternalRow) rightIter.next();
| ${rightKeyTmpVars.map(_.code).mkString("\\n")}
| if ($rightAnyNull) {
| $rightRow = null;
| continue;
| }
| ${rightKeyVars.map(_.code).mkString("\\n")}
| }
| ${genComparison(ctx, leftKeyVars, rightKeyVars)}
| if (comp > 0) {
| $rightRow = null;
| } else if (comp < 0) {
| if (!$matches.isEmpty()) {
| ${matchedKeyVars.map(_.code).mkString("\\n")}
| return true;
| }
| $leftRow = null;
| } else {
| $matches.add((UnsafeRow) $rightRow);
| $rightRow = null;
| }
| } while ($leftRow != null);
| }
| return false; // unreachable
|}
""".stripMargin, inlineToOuterClass = true)
(leftRow, matches)
}
/**
* Creates variables and declarations for left part of result row.
*
* In order to defer the access after condition and also only access once in the loop,
* the variables should be declared separately from accessing the columns, we can't use the
* codegen of BoundReference here.
*/
private def createLeftVars(ctx: CodegenContext, leftRow: String): (Seq[ExprCode], Seq[String]) = {
ctx.INPUT_ROW = leftRow
left.output.zipWithIndex.map { case (a, i) =>
val value = ctx.freshName("value")
val valueCode = CodeGenerator.getValue(leftRow, a.dataType, i.toString)
val javaType = CodeGenerator.javaType(a.dataType)
val defaultValue = CodeGenerator.defaultValue(a.dataType)
if (a.nullable) {
val isNull = ctx.freshName("isNull")
val code =
code"""
|$isNull = $leftRow.isNullAt($i);
|$value = $isNull ? $defaultValue : ($valueCode);
""".stripMargin
val leftVarsDecl =
s"""
|boolean $isNull = false;
|$javaType $value = $defaultValue;
""".stripMargin
(ExprCode(code, JavaCode.isNullVariable(isNull), JavaCode.variable(value, a.dataType)),
leftVarsDecl)
} else {
val code = code"$value = $valueCode;"
val leftVarsDecl = s"""$javaType $value = $defaultValue;"""
(ExprCode(code, FalseLiteral, JavaCode.variable(value, a.dataType)), leftVarsDecl)
}
}.unzip
}
/**
* Creates the variables for right part of result row, using BoundReference, since the right
* part are accessed inside the loop.
*/
private def createRightVar(ctx: CodegenContext, rightRow: String): Seq[ExprCode] = {
ctx.INPUT_ROW = rightRow
right.output.zipWithIndex.map { case (a, i) =>
BoundReference(i, a.dataType, a.nullable).genCode(ctx)
}
}
/**
* Splits variables based on whether it's used by condition or not, returns the code to create
* these variables before the condition and after the condition.
*
* Only a few columns are used by condition, then we can skip the accessing of those columns
* that are not used by condition also filtered out by condition.
*/
private def splitVarsByCondition(
attributes: Seq[Attribute],
variables: Seq[ExprCode]): (String, String) = {
if (condition.isDefined) {
val condRefs = condition.get.references
val (used, notUsed) = attributes.zip(variables).partition{ case (a, ev) =>
condRefs.contains(a)
}
val beforeCond = evaluateVariables(used.map(_._2))
val afterCond = evaluateVariables(notUsed.map(_._2))
(beforeCond, afterCond)
} else {
(evaluateVariables(variables), "")
}
}
override def needCopyResult: Boolean = true
override def doProduce(ctx: CodegenContext): String = {
// Inline mutable state since not many join operations in a task
val leftInput = ctx.addMutableState("scala.collection.Iterator", "leftInput",
v => s"$v = inputs[0];", forceInline = true)
val rightInput = ctx.addMutableState("scala.collection.Iterator", "rightInput",
v => s"$v = inputs[1];", forceInline = true)
val (leftRow, matches) = genScanner(ctx)
// Create variables for row from both sides.
val (leftVars, leftVarDecl) = createLeftVars(ctx, leftRow)
val rightRow = ctx.freshName("rightRow")
val rightVars = createRightVar(ctx, rightRow)
val iterator = ctx.freshName("iterator")
val numOutput = metricTerm(ctx, "numOutputRows")
val (beforeLoop, condCheck) = if (condition.isDefined) {
// Split the code of creating variables based on whether it's used by condition or not.
val loaded = ctx.freshName("loaded")
val (leftBefore, leftAfter) = splitVarsByCondition(left.output, leftVars)
val (rightBefore, rightAfter) = splitVarsByCondition(right.output, rightVars)
// Generate code for condition
ctx.currentVars = leftVars ++ rightVars
val cond = BindReferences.bindReference(condition.get, output).genCode(ctx)
// evaluate the columns those used by condition before loop
val before = s"""
|boolean $loaded = false;
|$leftBefore
""".stripMargin
val checking = s"""
|$rightBefore
|${cond.code}
|if (${cond.isNull} || !${cond.value}) continue;
|if (!$loaded) {
| $loaded = true;
| $leftAfter
|}
|$rightAfter
""".stripMargin
(before, checking)
} else {
(evaluateVariables(leftVars), "")
}
val thisPlan = ctx.addReferenceObj("plan", this)
val eagerCleanup = s"$thisPlan.cleanupResources();"
s"""
|while (findNextInnerJoinRows($leftInput, $rightInput)) {
| ${leftVarDecl.mkString("\\n")}
| ${beforeLoop.trim}
| scala.collection.Iterator<UnsafeRow> $iterator = $matches.generateIterator();
| while ($iterator.hasNext()) {
| InternalRow $rightRow = (InternalRow) $iterator.next();
| ${condCheck.trim}
| $numOutput.add(1);
| ${consume(ctx, leftVars ++ rightVars)}
| }
| if (shouldStop()) return;
|}
|$eagerCleanup
""".stripMargin
}
}
/**
* Helper class that is used to implement [[SortMergeJoinExec]].
*
* To perform an inner (outer) join, users of this class call [[findNextInnerJoinRows()]]
* ([[findNextOuterJoinRows()]]), which returns `true` if a result has been produced and `false`
* otherwise. If a result has been produced, then the caller may call [[getStreamedRow]] to return
* the matching row from the streamed input and may call [[getBufferedMatches]] to return the
* sequence of matching rows from the buffered input (in the case of an outer join, this will return
* an empty sequence if there are no matches from the buffered input). For efficiency, both of these
* methods return mutable objects which are re-used across calls to the `findNext*JoinRows()`
* methods.
*
* @param streamedKeyGenerator a projection that produces join keys from the streamed input.
* @param bufferedKeyGenerator a projection that produces join keys from the buffered input.
* @param keyOrdering an ordering which can be used to compare join keys.
* @param streamedIter an input whose rows will be streamed.
* @param bufferedIter an input whose rows will be buffered to construct sequences of rows that
* have the same join key.
* @param inMemoryThreshold Threshold for number of rows guaranteed to be held in memory by
* internal buffer
* @param spillThreshold Threshold for number of rows to be spilled by internal buffer
* @param eagerCleanupResources the eager cleanup function to be invoked when no join row found
*/
private[joins] class SortMergeJoinScanner(
streamedKeyGenerator: Projection,
bufferedKeyGenerator: Projection,
keyOrdering: Ordering[InternalRow],
streamedIter: RowIterator,
bufferedIter: RowIterator,
inMemoryThreshold: Int,
spillThreshold: Int,
eagerCleanupResources: () => Unit) {
private[this] var streamedRow: InternalRow = _
private[this] var streamedRowKey: InternalRow = _
private[this] var bufferedRow: InternalRow = _
// Note: this is guaranteed to never have any null columns:
private[this] var bufferedRowKey: InternalRow = _
/**
* The join key for the rows buffered in `bufferedMatches`, or null if `bufferedMatches` is empty
*/
private[this] var matchJoinKey: InternalRow = _
/** Buffered rows from the buffered side of the join. This is empty if there are no matches. */
private[this] val bufferedMatches =
new ExternalAppendOnlyUnsafeRowArray(inMemoryThreshold, spillThreshold)
// Initialization (note: do _not_ want to advance streamed here).
advancedBufferedToRowWithNullFreeJoinKey()
// --- Public methods ---------------------------------------------------------------------------
def getStreamedRow: InternalRow = streamedRow
def getBufferedMatches: ExternalAppendOnlyUnsafeRowArray = bufferedMatches
/**
* Advances both input iterators, stopping when we have found rows with matching join keys. If no
* join rows found, try to do the eager resources cleanup.
* @return true if matching rows have been found and false otherwise. If this returns true, then
* [[getStreamedRow]] and [[getBufferedMatches]] can be called to construct the join
* results.
*/
final def findNextInnerJoinRows(): Boolean = {
while (advancedStreamed() && streamedRowKey.anyNull) {
// Advance the streamed side of the join until we find the next row whose join key contains
// no nulls or we hit the end of the streamed iterator.
}
val found = if (streamedRow == null) {
// We have consumed the entire streamed iterator, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else if (matchJoinKey != null && keyOrdering.compare(streamedRowKey, matchJoinKey) == 0) {
// The new streamed row has the same join key as the previous row, so return the same matches.
true
} else if (bufferedRow == null) {
// The streamed row's join key does not match the current batch of buffered rows and there are
// no more rows to read from the buffered iterator, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else {
// Advance both the streamed and buffered iterators to find the next pair of matching rows.
var comp = keyOrdering.compare(streamedRowKey, bufferedRowKey)
do {
if (streamedRowKey.anyNull) {
advancedStreamed()
} else {
assert(!bufferedRowKey.anyNull)
comp = keyOrdering.compare(streamedRowKey, bufferedRowKey)
if (comp > 0) advancedBufferedToRowWithNullFreeJoinKey()
else if (comp < 0) advancedStreamed()
}
} while (streamedRow != null && bufferedRow != null && comp != 0)
if (streamedRow == null || bufferedRow == null) {
// We have either hit the end of one of the iterators, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else {
// The streamed row's join key matches the current buffered row's join, so walk through the
// buffered iterator to buffer the rest of the matching rows.
assert(comp == 0)
bufferMatchingRows()
true
}
}
if (!found) eagerCleanupResources()
found
}
/**
* Advances the streamed input iterator and buffers all rows from the buffered input that
* have matching keys. If no join rows found, try to do the eager resources cleanup.
* @return true if the streamed iterator returned a row, false otherwise. If this returns true,
* then [[getStreamedRow]] and [[getBufferedMatches]] can be called to produce the outer
* join results.
*/
final def findNextOuterJoinRows(): Boolean = {
val found = if (!advancedStreamed()) {
// We have consumed the entire streamed iterator, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else {
if (matchJoinKey != null && keyOrdering.compare(streamedRowKey, matchJoinKey) == 0) {
// Matches the current group, so do nothing.
} else {
// The streamed row does not match the current group.
matchJoinKey = null
bufferedMatches.clear()
if (bufferedRow != null && !streamedRowKey.anyNull) {
// The buffered iterator could still contain matching rows, so we'll need to walk through
// it until we either find matches or pass where they would be found.
var comp = 1
do {
comp = keyOrdering.compare(streamedRowKey, bufferedRowKey)
} while (comp > 0 && advancedBufferedToRowWithNullFreeJoinKey())
if (comp == 0) {
// We have found matches, so buffer them (this updates matchJoinKey)
bufferMatchingRows()
} else {
// We have overshot the position where the row would be found, hence no matches.
}
}
}
// If there is a streamed input then we always return true
true
}
if (!found) eagerCleanupResources()
found
}
// --- Private methods --------------------------------------------------------------------------
/**
* Advance the streamed iterator and compute the new row's join key.
* @return true if the streamed iterator returned a row and false otherwise.
*/
private def advancedStreamed(): Boolean = {
if (streamedIter.advanceNext()) {
streamedRow = streamedIter.getRow
streamedRowKey = streamedKeyGenerator(streamedRow)
true
} else {
streamedRow = null
streamedRowKey = null
false
}
}
/**
* Advance the buffered iterator until we find a row with join key that does not contain nulls.
* @return true if the buffered iterator returned a row and false otherwise.
*/
private def advancedBufferedToRowWithNullFreeJoinKey(): Boolean = {
var foundRow: Boolean = false
while (!foundRow && bufferedIter.advanceNext()) {
bufferedRow = bufferedIter.getRow
bufferedRowKey = bufferedKeyGenerator(bufferedRow)
foundRow = !bufferedRowKey.anyNull
}
if (!foundRow) {
bufferedRow = null
bufferedRowKey = null
false
} else {
true
}
}
/**
* Called when the streamed and buffered join keys match in order to buffer the matching rows.
*/
private def bufferMatchingRows(): Unit = {
assert(streamedRowKey != null)
assert(!streamedRowKey.anyNull)
assert(bufferedRowKey != null)
assert(!bufferedRowKey.anyNull)
assert(keyOrdering.compare(streamedRowKey, bufferedRowKey) == 0)
// This join key may have been produced by a mutable projection, so we need to make a copy:
matchJoinKey = streamedRowKey.copy()
bufferedMatches.clear()
do {
bufferedMatches.add(bufferedRow.asInstanceOf[UnsafeRow])
advancedBufferedToRowWithNullFreeJoinKey()
} while (bufferedRow != null && keyOrdering.compare(streamedRowKey, bufferedRowKey) == 0)
}
}
/**
* An iterator for outputting rows in left outer join.
*/
private class LeftOuterIterator(
smjScanner: SortMergeJoinScanner,
rightNullRow: InternalRow,
boundCondition: InternalRow => Boolean,
resultProj: InternalRow => InternalRow,
numOutputRows: SQLMetric)
extends OneSideOuterIterator(
smjScanner, rightNullRow, boundCondition, resultProj, numOutputRows) {
protected override def setStreamSideOutput(row: InternalRow): Unit = joinedRow.withLeft(row)
protected override def setBufferedSideOutput(row: InternalRow): Unit = joinedRow.withRight(row)
}
/**
* An iterator for outputting rows in right outer join.
*/
private class RightOuterIterator(
smjScanner: SortMergeJoinScanner,
leftNullRow: InternalRow,
boundCondition: InternalRow => Boolean,
resultProj: InternalRow => InternalRow,
numOutputRows: SQLMetric)
extends OneSideOuterIterator(smjScanner, leftNullRow, boundCondition, resultProj, numOutputRows) {
protected override def setStreamSideOutput(row: InternalRow): Unit = joinedRow.withRight(row)
protected override def setBufferedSideOutput(row: InternalRow): Unit = joinedRow.withLeft(row)
}
/**
* An abstract iterator for sharing code between [[LeftOuterIterator]] and [[RightOuterIterator]].
*
* Each [[OneSideOuterIterator]] has a streamed side and a buffered side. Each row on the
* streamed side will output 0 or many rows, one for each matching row on the buffered side.
* If there are no matches, then the buffered side of the joined output will be a null row.
*
* In left outer join, the left is the streamed side and the right is the buffered side.
* In right outer join, the right is the streamed side and the left is the buffered side.
*
* @param smjScanner a scanner that streams rows and buffers any matching rows
* @param bufferedSideNullRow the default row to return when a streamed row has no matches
* @param boundCondition an additional filter condition for buffered rows
* @param resultProj how the output should be projected
* @param numOutputRows an accumulator metric for the number of rows output
*/
private abstract class OneSideOuterIterator(
smjScanner: SortMergeJoinScanner,
bufferedSideNullRow: InternalRow,
boundCondition: InternalRow => Boolean,
resultProj: InternalRow => InternalRow,
numOutputRows: SQLMetric) extends RowIterator {
// A row to store the joined result, reused many times
protected[this] val joinedRow: JoinedRow = new JoinedRow()
// Index of the buffered rows, reset to 0 whenever we advance to a new streamed row
private[this] var rightMatchesIterator: Iterator[UnsafeRow] = null
// This iterator is initialized lazily so there should be no matches initially
assert(smjScanner.getBufferedMatches.length == 0)
// Set output methods to be overridden by subclasses
protected def setStreamSideOutput(row: InternalRow): Unit
protected def setBufferedSideOutput(row: InternalRow): Unit
/**
* Advance to the next row on the stream side and populate the buffer with matches.
* @return whether there are more rows in the stream to consume.
*/
private def advanceStream(): Boolean = {
rightMatchesIterator = null
if (smjScanner.findNextOuterJoinRows()) {
setStreamSideOutput(smjScanner.getStreamedRow)
if (smjScanner.getBufferedMatches.isEmpty) {
// There are no matching rows in the buffer, so return the null row
setBufferedSideOutput(bufferedSideNullRow)
} else {
// Find the next row in the buffer that satisfied the bound condition
if (!advanceBufferUntilBoundConditionSatisfied()) {
setBufferedSideOutput(bufferedSideNullRow)
}
}
true
} else {
// Stream has been exhausted
false
}
}
/**
* Advance to the next row in the buffer that satisfies the bound condition.
* @return whether there is such a row in the current buffer.
*/
private def advanceBufferUntilBoundConditionSatisfied(): Boolean = {
var foundMatch: Boolean = false
if (rightMatchesIterator == null) {
rightMatchesIterator = smjScanner.getBufferedMatches.generateIterator()
}
while (!foundMatch && rightMatchesIterator.hasNext) {
setBufferedSideOutput(rightMatchesIterator.next())
foundMatch = boundCondition(joinedRow)
}
foundMatch
}
override def advanceNext(): Boolean = {
val r = advanceBufferUntilBoundConditionSatisfied() || advanceStream()
if (r) numOutputRows += 1
r
}
override def getRow: InternalRow = resultProj(joinedRow)
}
private class SortMergeFullOuterJoinScanner(
leftKeyGenerator: Projection,
rightKeyGenerator: Projection,
keyOrdering: Ordering[InternalRow],
leftIter: RowIterator,
rightIter: RowIterator,
boundCondition: InternalRow => Boolean,
leftNullRow: InternalRow,
rightNullRow: InternalRow) {
private[this] val joinedRow: JoinedRow = new JoinedRow()
private[this] var leftRow: InternalRow = _
private[this] var leftRowKey: InternalRow = _
private[this] var rightRow: InternalRow = _
private[this] var rightRowKey: InternalRow = _
private[this] var leftIndex: Int = 0
private[this] var rightIndex: Int = 0
private[this] val leftMatches: ArrayBuffer[InternalRow] = new ArrayBuffer[InternalRow]
private[this] val rightMatches: ArrayBuffer[InternalRow] = new ArrayBuffer[InternalRow]
private[this] var leftMatched: BitSet = new BitSet(1)
private[this] var rightMatched: BitSet = new BitSet(1)
advancedLeft()
advancedRight()
// --- Private methods --------------------------------------------------------------------------
/**
* Advance the left iterator and compute the new row's join key.
* @return true if the left iterator returned a row and false otherwise.
*/
private def advancedLeft(): Boolean = {
if (leftIter.advanceNext()) {
leftRow = leftIter.getRow
leftRowKey = leftKeyGenerator(leftRow)
true
} else {
leftRow = null
leftRowKey = null
false
}
}
/**
* Advance the right iterator and compute the new row's join key.
* @return true if the right iterator returned a row and false otherwise.
*/
private def advancedRight(): Boolean = {
if (rightIter.advanceNext()) {
rightRow = rightIter.getRow
rightRowKey = rightKeyGenerator(rightRow)
true
} else {
rightRow = null
rightRowKey = null
false
}
}
/**
* Populate the left and right buffers with rows matching the provided key.
* This consumes rows from both iterators until their keys are different from the matching key.
*/
private def findMatchingRows(matchingKey: InternalRow): Unit = {
leftMatches.clear()
rightMatches.clear()
leftIndex = 0
rightIndex = 0
while (leftRowKey != null && keyOrdering.compare(leftRowKey, matchingKey) == 0) {
leftMatches += leftRow.copy()
advancedLeft()
}
while (rightRowKey != null && keyOrdering.compare(rightRowKey, matchingKey) == 0) {
rightMatches += rightRow.copy()
advancedRight()
}
if (leftMatches.size <= leftMatched.capacity) {
leftMatched.clearUntil(leftMatches.size)
} else {
leftMatched = new BitSet(leftMatches.size)
}
if (rightMatches.size <= rightMatched.capacity) {
rightMatched.clearUntil(rightMatches.size)
} else {
rightMatched = new BitSet(rightMatches.size)
}
}
/**
* Scan the left and right buffers for the next valid match.
*
* Note: this method mutates `joinedRow` to point to the latest matching rows in the buffers.
* If a left row has no valid matches on the right, or a right row has no valid matches on the
* left, then the row is joined with the null row and the result is considered a valid match.
*
* @return true if a valid match is found, false otherwise.
*/
private def scanNextInBuffered(): Boolean = {
while (leftIndex < leftMatches.size) {
while (rightIndex < rightMatches.size) {
joinedRow(leftMatches(leftIndex), rightMatches(rightIndex))
if (boundCondition(joinedRow)) {
leftMatched.set(leftIndex)
rightMatched.set(rightIndex)
rightIndex += 1
return true
}
rightIndex += 1
}
rightIndex = 0
if (!leftMatched.get(leftIndex)) {
// the left row has never matched any right row, join it with null row
joinedRow(leftMatches(leftIndex), rightNullRow)
leftIndex += 1
return true
}
leftIndex += 1
}
while (rightIndex < rightMatches.size) {
if (!rightMatched.get(rightIndex)) {
// the right row has never matched any left row, join it with null row
joinedRow(leftNullRow, rightMatches(rightIndex))
rightIndex += 1
return true
}
rightIndex += 1
}
// There are no more valid matches in the left and right buffers
false
}
// --- Public methods --------------------------------------------------------------------------
def getJoinedRow(): JoinedRow = joinedRow
def advanceNext(): Boolean = {
// If we already buffered some matching rows, use them directly
if (leftIndex <= leftMatches.size || rightIndex <= rightMatches.size) {
if (scanNextInBuffered()) {
return true
}
}
if (leftRow != null && (leftRowKey.anyNull || rightRow == null)) {
joinedRow(leftRow.copy(), rightNullRow)
advancedLeft()
true
} else if (rightRow != null && (rightRowKey.anyNull || leftRow == null)) {
joinedRow(leftNullRow, rightRow.copy())
advancedRight()
true
} else if (leftRow != null && rightRow != null) {
// Both rows are present and neither have null values,
// so we populate the buffers with rows matching the next key
val comp = keyOrdering.compare(leftRowKey, rightRowKey)
if (comp <= 0) {
findMatchingRows(leftRowKey.copy())
} else {
findMatchingRows(rightRowKey.copy())
}
scanNextInBuffered()
true
} else {
// Both iterators have been consumed
false
}
}
}
private class FullOuterIterator(
smjScanner: SortMergeFullOuterJoinScanner,
resultProj: InternalRow => InternalRow,
numRows: SQLMetric) extends RowIterator {
private[this] val joinedRow: JoinedRow = smjScanner.getJoinedRow()
override def advanceNext(): Boolean = {
val r = smjScanner.advanceNext()
if (r) numRows += 1
r
}
override def getRow: InternalRow = resultProj(joinedRow)
}
| caneGuy/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoinExec.scala | Scala | apache-2.0 | 45,822 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.iterators
import java.util
import com.typesafe.scalalogging.LazyLogging
import org.apache.accumulo.core.data._
import org.apache.accumulo.core.iterators.{IteratorEnvironment, WrappingIterator}
/**
* This Iterator should wrap the AttributeIndexIterator. It will skip duplicate attributes to optimize
* unique queries.
*/
class UniqueAttributeIterator extends WrappingIterator with LazyLogging {
var seekColFamilies: util.Collection[ByteSequence] = null
var seekInclusive: Boolean = false
var seekRange: Range = null
var outOfRange = false
override def next() = {
// skip to the next row key - this will skip over any duplicate attribute values
val following = new Range(Range.followingPrefix(getTopKey.getRow), true, null, false)
val range = Option(seekRange.clip(following))
range match {
case Some(r) => super.seek(r, seekColFamilies, seekInclusive)
case None => outOfRange = true
}
}
override def hasTop() :Boolean = !outOfRange && super.hasTop()
override def seek(range: Range, columnFamilies: util.Collection[ByteSequence], inclusive: Boolean) {
// keep track of the current range we're handling
outOfRange = false
seekRange = range
seekColFamilies = columnFamilies
seekInclusive = inclusive
super.seek(range, columnFamilies, inclusive)
}
override def deepCopy(env: IteratorEnvironment) =
throw new UnsupportedOperationException("UniqueAttributeIterator does not support deepCopy.")
}
| vpipkt/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/iterators/UniqueAttributeIterator.scala | Scala | apache-2.0 | 1,988 |
package pt.cnbc.wikimodels.client.record.visitor
import scala.collection.JavaConversions._
import pt.cnbc.wikimodels.client.record._
import pt.cnbc.wikimodels.dataModel._
import net.liftweb.common.{Empty, Box, Full}
import net.liftweb.common.Box._
import alexmsmartins.log.LoggerWrapper
import pt.cnbc.wikimodels.exceptions.NotImplementedException
import visitor.SBMLFromRecord.createKineticLawFrom
import pt.cnbc.wikimodels.mathparser.{AsciiMathPrettyPrinter, MathMLMatchParser, MathMLPrettyPrinter, AsciiMathParser}
import pt.cnbc.wikimodels.mathml.elements.MathMLElem
import scala.xml.Utility
/*
* Copyright (c) 2011. Alexandre Martins. All rights reserved.
*/
object SBMLFromRecord extends LoggerWrapper {
import scala.language.implicitConversions
def createSBMLElementFrom[T <: SBaseRecord[T]](er:T):Element = {
er match {
case mr:SBMLModelRecord => createModelFrom(mr)
case cr:CompartmentRecord => createCompartmentFrom(cr)
case sr:SpeciesRecord => createSpeciesFrom(sr)
case pr:ParameterRecord => createParameterFrom(pr)
case ct:ConstraintRecord => createConstraintFrom(ct)
case r:ReactionRecord => createReactionFrom(r)
case fd:FunctionDefinitionRecord => createFunctionDefinitionFrom(fd)
case rt:ReactantRecord => createSpeciesReferenceFrom(rt)
case pd:ProductRecord => createSpeciesReferenceFrom(pd)
case mf:ModifierRecord => createModifierSpeciesReferenceFrom(mf)
case t => throw new NotImplementedException("ERROR: Method create" + er.sbmlType + "From(_) not implemented yet"); null
}
}
implicit def createModelFrom(mr:SBMLModelRecord):SBMLModel = {
val m = new SBMLModel()
m.metaid = mr.metaIdO.get
m.id = mr.idO.get
m.name = mr.nameO.get.getOrElse(null)
m.notes = mr.notesO.get.getOrElse(null)
m.listOfCompartments = Set.empty ++ mr.listOfCompartmentsRec.map(createCompartmentFrom(_))
m.listOfSpecies = Set.empty ++ mr.listOfSpeciesRec.map(createSpeciesFrom(_))
m.listOfParameters = Set.empty ++ mr.listOfParametersRec.map(createParameterFrom(_))
m.listOfConstraints = Set.empty ++ mr.listOfConstraintsRec.map(createConstraintFrom(_))
m.listOfReactions = Set.empty ++ mr.listOfReactionsRec.map(createReactionFrom(_))
//TODO - write code for the remaining lists
m
}
implicit def createCompartmentFrom(cr: CompartmentRecord):Compartment = {
val c = new Compartment()
c.metaid = cr.metaIdO.get
c.id = cr.idO.get
c.name = cr.nameO.get.getOrElse(null)
c.notes = cr.notesO.get.getOrElse(null)
//c.compartmentType = cr.compartmentType
c.spatialDimensions = cr.spatialDimensions0.get.id
c.size = cr.sizeO.get.getOrElse(null).asInstanceOf[java.lang.Double]
//c.units = cr.units
c.outside = cr.outsideO.get.getOrElse(null)
c.constant = cr.constantO.get
c
}
implicit def createSpeciesFrom(sr:SpeciesRecord):Species = {
val s = new Species()
s.metaid = sr.metaIdO.get
s.id = sr.idO.get
s.name = sr.nameO.get.getOrElse(null)
s.notes = sr.notesO.get.getOrElse(null)
s.compartment= sr.compartmentO.get
s.initialAmount = sr.initialAmountO.get.getOrElse(null).asInstanceOf[java.lang.Double]
s.initialConcentration = sr.initialConcentrationO.get.getOrElse(null).asInstanceOf[java.lang.Double]
//s.substanceUnits = sr.substanceUnits
//s.hasOnlySubstanceUnits = sr.hasOnlySubstanceUnits
s.boundaryCondition = sr.boundaryConditionO.get
s.constant = sr.constantO.get
s
}
implicit def createParameterFrom(pr:ParameterRecord):Parameter = {
val p = new Parameter
p.metaid = pr.metaIdO.get
p.id = pr.idO.get
p.name = pr.nameO.get.getOrElse(null)
p.notes = pr.notesO.get.getOrElse(null)
p.value = pr.valueO.get.getOrElse(null).asInstanceOf[java.lang.Double]
//p.units = pr.units
p.constant = pr.constantO.get
p
}
implicit def createFunctionDefinitionFrom(fdr:FunctionDefinitionRecord):FunctionDefinition = {
val fd = new FunctionDefinition
fd.metaid = fdr.metaIdO.get
fd.id = fdr.idO.get
fd.name = fdr.nameO.get.getOrElse(null)
fd.notes = fdr.notesO.get.getOrElse(null)
val amp = AsciiMathParser()
val ast:amp.ParseResult[amp.MME] = amp.parseAll( amp.LambdaExpr, fdr.mathO.get )
fd.math = MathMLPrettyPrinter.toXML(ast.get).toString()
debug("fdr.math0.get = " + fdr.mathO.get)
debug("fd.math = " + fd.math)
fd
}
implicit def createConstraintFrom(ctr: ConstraintRecord):Constraint = {
val ct = new Constraint
ct.metaid = ctr.metaIdO.get
ct.id = ctr.idO.get
ct.name = ctr.nameO.get.getOrElse(null)
ct.notes = ctr.notesO.get.getOrElse(null)
ct.message = ctr.mathO.get
ct
}
implicit def createReactionFrom(rr: ReactionRecord): Reaction = {
val r = new Reaction()
r.metaid = rr.metaIdO.get
r.id = rr.idO.get
r.name = rr.nameO.get.getOrElse(null)
r.notes = rr.notesO.get.getOrElse(null)
//TODO r.reversible = rr.reversible
//todo r.fast = rr.fast
if (rr.listOfReactantsRec != null) {
debug("Loaded listOfReactants has size " + r.listOfReactants.size)
r.listOfReactants = rr.listOfReactantsRec.map(createSpeciesReferenceFrom(_)).toList
debug("Finished copying list")
debug("listOfReactantsRec has size " + rr.listOfReactantsRec.size)
}
var listOfProducts: java.util.Collection[SpeciesReference] = null
if (rr.listOfProductsRec != null) {
debug("Loaded listOfProducts has size " + r.listOfProducts.size)
r.listOfProducts = rr.listOfProductsRec.map(createSpeciesReferenceFrom(_)).toList
debug("Finished copying list")
debug("listOfProductsRec has size " + rr.listOfProductsRec.size)
}
var listOfModifiers: java.util.Collection[ModifierSpeciesReference] = null
if (rr.listOfModifiersRec != null) {
debug("Loaded listOfModifiers has size " + r.listOfModifiers.size)
r.listOfModifiers = rr.listOfModifiersRec.map(createModifierSpeciesReferenceFrom(_)).toList
debug("Finished copying list")
debug("listOfModifiersRec has size " + rr.listOfModifiersRec.size)
}
if (rr.kineticLawRec != null)
r.kineticLaw = createKineticLawFrom(rr.kineticLawRec)
r
}
implicit def createSpeciesReferenceFrom(rtr: ReactantRecord):SpeciesReference = {
val rt = new SpeciesReference
rt.metaid = rtr.metaIdO.get
rt.id = rtr.idO.get
rt.name = rtr.nameO.get.getOrElse(null)
rt.notes = rtr.notesO.get.getOrElse(null)
rt.stoichiometry = {
throw new RuntimeException("Add stoichiometry field into SpeciesReference")
}
rt.stoichiometryMath = {
throw new RuntimeException("Add stoichiometryMath field into SpeciesReference")
}
rt.species = {
throw new RuntimeException("Add species field into SpeciesReference")
}
rt
}
implicit def createSpeciesReferenceFrom(pdr: ProductRecord):SpeciesReference = {
val pd = new SpeciesReference
pd.metaid = pdr.metaIdO.get
pd.id = pdr.idO.get
pd.name = pdr.nameO.get.getOrElse(null)
pd.notes = pdr.notesO.get.getOrElse(null)
pd.stoichiometry = {
throw new RuntimeException("Add stoichiometry field into SpeciesReference")
}
pd.stoichiometryMath = {
throw new RuntimeException("Add stoichiometryMath field into SpeciesReference")
}
pd.species = {
throw new RuntimeException("Add species field into SpeciesReference")
}
pd
}
implicit def createModifierSpeciesReferenceFrom(mdr: ModifierRecord):ModifierSpeciesReference = {
val md = new ModifierSpeciesReference
md.metaid = mdr.metaIdO.get
md.id = mdr.idO.get
md.name = mdr.nameO.get.getOrElse(null)
md.notes = mdr.notesO.get.getOrElse(null)
md.species = {
throw new RuntimeException("Add species field into ModifierSpeciesReference")
}
md
}
implicit def createKineticLawFrom(klr:KineticLawRecord):KineticLaw = {
val kl = new KineticLaw
kl.metaid = klr.metaIdO.get
kl.notes = klr.notesO.get.getOrElse(null)
kl.math = klr.mathO.get
kl.listOfParameters = Set.empty ++ klr.listOfParametersRec.map(createParameterFrom(_))
kl
}
}
object RecordFromSBML extends LoggerWrapper {
def createRecordFrom(er:Element):SBaseRecord[_] = {
er match {
case m:SBMLModel => createModelRecordFrom(m)
case c:Compartment => createCompartmentRecordFrom(c)
case s:Species => createSpeciesRecordFrom(s)
case p:Parameter => createParameterRecordFrom(p)
case fd:FunctionDefinition => createFunctionDefinitionRecordFrom(fd)
case ct:Constraint => createConstraintRecordFrom(ct)
case r:Reaction => createReactionRecordFrom(r)
//TODO - write code for the remaining sbml types
case _ => throw new NotImplementedException("ERROR: Method create" + er.sbmlType + "From(_) not implemented yet")
}
}
implicit def createModelRecordFrom(m:SBMLModel):SBMLModelRecord = {
val mr = new SBMLModelRecord()
mr.metaIdO.set(m.metaid)
mr.idO.set(m.id)
mr.nameO.setBox(Box.legacyNullTest(m.name))
mr.notesO.setBox(Box.legacyNullTest(m.notes))
//TODO to simplify this code try to replace the initialization if listOf in wm_libjsbml from null to Set.empty or Nil
if(m.listOfCompartments != null){
debug("Loaded listOfCompartments has size " + m.listOfCompartments.size)
mr.listOfCompartmentsRec = m.listOfCompartments.map(createCompartmentRecordFrom(_)).toList
.map(i => {
i.parent = Full(mr) //to build complete URLs
i
}
)
debug("Finished copying list")
debug("ListOfCompartmentsRec has size " + mr.listOfCompartmentsRec.size)
}
if(m.listOfSpecies != null){
debug("Loaded listOfSpecies has size " + m.listOfSpecies.size)
mr.listOfSpeciesRec = m.listOfSpecies.map(createSpeciesRecordFrom(_)).toList
.map(i => {
i.parent = Full(mr) //to build complete URLs
i
}
)
debug("Finished copying list")
debug("ListOfSpeciesRec has size " + mr.listOfSpeciesRec.size)
}
if(m.listOfParameters != null){
debug("Loaded listOfParameters has size " + m.listOfParameters.size)
mr.listOfParametersRec = m.listOfParameters.map(createParameterRecordFrom(_)).toList
.map(i => {
i.parent = Full(mr) //to build complete URLs
i
}
)
debug("Finished copying list")
debug("listOfParametersRec has size " + mr.listOfParametersRec.size)
}
if(m.listOfFunctionDefinitions != null){
debug("Loaded listOfFunctionDefinitions has size " + m.listOfFunctionDefinitions.size)
mr.listOfFunctionDefinitionsRec = m.listOfFunctionDefinitions.map(createFunctionDefinitionRecordFrom(_)).toList
.map(i => {
i.parent = Full(mr) //to build complete URLs
i
}
)
debug("Finished copying list")
debug("listOfFunctionDefinitionsRec has size " + mr.listOfFunctionDefinitionsRec.size)
}
if(m.listOfConstraints != null){
debug("Loaded listOfConstraints has size " + m.listOfConstraints.size)
mr.listOfConstraintsRec = m.listOfConstraints.map(createConstraintRecordFrom(_)).toList
.map(i => {
i.parent = Full(mr) //to build complete URLs
i
}
)
debug("Finished copying list")
debug("listOfConstraintsRec has size " + mr.listOfConstraintsRec.size)
}
if(m.listOfReactions != null){
debug("Loaded listOfReactions has size " + m.listOfReactions.size)
mr.listOfReactionsRec = m.listOfReactions.map(createReactionRecordFrom(_)).toList
.map(i => {
i.parent = Full(mr) //to build complete URLs
i
}
)
debug("Finished copying list")
debug("listOfReactionsRec has size " + mr.listOfReactionsRec.size)
}
//TODO - write code for the remaining lists
mr
}
implicit def createCompartmentRecordFrom(c: Compartment):CompartmentRecord = {
val cr = new CompartmentRecord()
cr.metaIdO.set(c.metaid)
cr.idO.set(c.id)
cr.nameO.setBox(Box.legacyNullTest(c.name))
cr.notesO.setBox(Box.legacyNullTest(c.notes))
//cr.compartmentType = c.compartmentType
cr.spatialDimensions0.set(
ValidSpatialDimensions(c.spatialDimensions))
cr.sizeO.setBox(jDoubleToBoxDouble(c.size))
//cr.units = c.units
cr.outsideO.setBox(Box.legacyNullTest( c.outside))
cr.constantO.set(c.constant)
cr
}
implicit def createSpeciesRecordFrom(s: Species):SpeciesRecord = {
val sr = new SpeciesRecord()
sr.metaIdO.set(s.metaid)
sr.idO.set(s.id)
sr.nameO.setBox(Box.legacyNullTest(s.name))
sr.notesO.setBox(Box.legacyNullTest(s.notes))
sr.compartmentO.set(s.compartment)
sr.initialAmountO.setBox(jDoubleToBoxDouble(s.initialAmount))
sr.initialConcentrationO.setBox(jDoubleToBoxDouble(s.initialConcentration))
//sr.substanceUnits = s.substanceUnits
//sr.hasOnlySubstanceUnits = s.hasOnlySubstanceUnits
sr.boundaryConditionO.setBox(Box.legacyNullTest(s.boundaryCondition))
sr.constantO.set(s.constant)
sr
}
implicit def createParameterRecordFrom(p: Parameter):ParameterRecord = {
val pr = new ParameterRecord()
pr.metaIdO.set(p.metaid)
pr.idO.set(p.id)
pr.nameO.setBox(Box.legacyNullTest(p.name))
pr.notesO.setBox(Box.legacyNullTest(p.notes))
pr.valueO.setBox(jDoubleToBoxDouble(p.value))
//pr.units = p.units
pr.constantO.set(p.constant)
pr
}
implicit def createFunctionDefinitionRecordFrom(fd: FunctionDefinition):FunctionDefinitionRecord = {
val fdr = new FunctionDefinitionRecord()
fdr.metaIdO.set(fd.metaid)
fdr.idO.set(fd.id)
fdr.nameO.setBox(Box.legacyNullTest(fd.name))
fdr.notesO.setBox(Box.legacyNullTest(fd.notes))
debug("fd.math is " + fd.math)
val <math>{xmlLambda}</math> = Utility.trim(scala.xml.XML.loadString(fd.math))
debug("Lambda is " + xmlLambda)
val p = MathMLMatchParser()
val mathMLaST = p.parse(xmlLambda.asInstanceOf[scala.xml.Elem])
val asciiLambda = AsciiMathPrettyPrinter.toAsciiMathML(mathMLaST)
fdr.mathO.set(asciiLambda)
fdr
}
implicit def createConstraintRecordFrom(ct: Constraint):ConstraintRecord = {
val ctr = new ConstraintRecord()
ctr.metaIdO.set(ct.metaid)
ctr.idO.set(ct.id)
ctr.nameO.setBox(Box.legacyNullTest(ct.name))
ctr.notesO.setBox(Box.legacyNullTest(ct.notes))
ctr.mathO.set(ct.math)
//TODO - ctr.message = ct.message
ctr
}
implicit def createReactionRecordFrom(r: Reaction):ReactionRecord = {
val rr = new ReactionRecord()
rr.metaIdO.set(r.metaid)
rr.idO.set(r.id)
rr.nameO.setBox(Box.legacyNullTest(r.name))
rr.notesO.setBox(Box.legacyNullTest(r.notes))
//rr.reversible = r.reversible
//rr.fast = r.fast
/* TODO complete the conversion from species to speciesReference
if(r.listOfReactants != null){
debug("Loaded listOfReactants has size " + r.listOfReactants.size)
rr.listOfReactantsRec = r.listOfReactants.map(createReactionRecordFrom(_)).toList
.map(i => {
i.parent = Full(rr) //to build complete URLs
i
}
)
debug("Finished copying list")
debug("listOfReactantsRec has size " + rr.listOfReactantsRec.size)
}
var listOfProducts:java.util.Collection[SpeciesReference] = null
if(r.listOfProducts != null){
debug("Loaded listOfProducts has size " + r.listOfProducts.size)
rr.listOfProductsRec = r.listOfProducts.map(createReactionRecordFrom(_)).toList
.map(i => {
i.parent = Full(rr) //to build complete URLs
i
}
)
debug("Finished copying list")
debug("listOfProductsRec has size " + rr.listOfProductsRec.size)
}
var listOfModifiers:java.util.Collection[ModifierSpeciesReference] = null
if(r.listOfModifiers != null){
debug("Loaded listOfModifiers has size " + r.listOfModifiers.size)
rr.listOfModifiersRec = r.listOfModifiers.map(createReactionRecordFrom(_)).toList
.map(i => {
i.parent = Full(rr) //to build complete URLs
i
}
)
debug("Finished copying list")
debug("listOfModifiersRec has size " + rr.listOfModifiersRec.size)
}*/
if(r.kineticLaw != null)
rr.kineticLawRec = createKineticLawRecordFrom(r.kineticLaw)
rr
}
implicit def createKineticLawRecordFrom(kl:KineticLaw) = {
val klr = new KineticLawRecord()
klr.metaIdO.set(kl.metaid)
klr.notesO.setBox(Box.legacyNullTest(kl.notes))
klr.math = kl.math
if(kl.listOfParameters != null){
debug("Loaded listOfParameters has size " + kl.listOfParameters.size)
klr.listOfParametersRec = kl.listOfParameters.map(createParameterRecordFrom(_)).toList
.map(i => {
i.parent = Full(klr) //to build complete URLs
i
}
)
debug("Finished copying list")
debug("listOfParametersRec has size " + klr.listOfParametersRec.size)
}
klr
}
//TODO WRITE VISITING FUNCTIONS for the remaining SBML entities
protected def jDoubleToBoxDouble(i:java.lang.Double):Box[Double] = {
trace("RecordFromSBML.jDoubleToBoxDouble( " + i + " )")
i match {
case null => Empty
case x => Full(x)
}
}
} | alexmsmartins/WikiModels | wm_web_client/src/main/scala/pt/cnbc/wikimodels/client/record/visitor/SBMLRecordVisitor.scala | Scala | mit | 17,345 |
package com.wixpress.petri.petri
import java.util.concurrent.{ScheduledExecutorService, TimeUnit}
import javax.mail.internet.InternetAddress
import com.wixpress.petri.experiments.domain.{Experiment, Trigger}
import scala.collection.JavaConversions._
class ConductionKeeper(clock: Clock, metricsReportsDao: MetricsReportsDao,
experimentsDao: ExperimentsDao,
scheduler: ScheduledExecutorService,
scheduledInterval: Long,
notifier: PetriNotifier){
val triggerMessage = "Experiment paused due to conduction limit reached"
val triggerOwner = "Conduction Keeper"
val mailFromField = new InternetAddress("petri@wix.com")
scheduler.scheduleAtFixedRate(runPauseExperimentIfConductionLimitReached(), scheduledInterval, scheduledInterval, TimeUnit.MILLISECONDS)
val pauseTrigger:Trigger = new Trigger(triggerMessage, triggerOwner)
private def runPauseExperimentIfConductionLimitReached(): Runnable = new Runnable {
override def run(): Unit = pauseExperimentIfConductionLimitReached()
}
def pauseExperimentIfConductionLimitReached(): Unit = {
val lastReportedExperimentConduction = metricsReportsDao.getReportedExperimentsSince(scheduledInterval)
val allExperiments = experimentsDao.fetch()
val experimentsToPause = getPauseCandidates(lastReportedExperimentConduction, allExperiments)
experimentsToPause.foreach{ pauseCandidate =>
val notifyMessage = createNotifyMessageAndTitle(pauseCandidate)
val experiment = pauseCandidate.experiment
val pausedExperiment = experiment.pause(pauseTrigger)
updateExperimentInRepo(pausedExperiment)
notifier.notify(notifyMessage.title, notifyMessage.message, true, Seq(notifyMessage.updaterEmail))
}
}
private def updateExperimentInRepo(experiment: Experiment) = {
experimentsDao.update(experiment, clock.getCurrentDateTime)
}
private def getPauseCandidates(lastReportedExperimentsConduction: List[TotalExperimentConduction],
allExperiments: Seq[Experiment]) :List[PauseCandidate] = {
lastReportedExperimentsConduction
.map(x=> PauseCandidate(x, allExperiments.find(exp => exp.getId == x.experimentId).get))
.filterNot(candidate => candidate.experiment.getExperimentSnapshot.conductLimit == 0)
.filterNot(candidate => candidate.experiment.isPaused)
.filter(candidate => candidate.conductionTotal.totalConduction >= candidate.experiment.getExperimentSnapshot.conductLimit)
}
private def createNotifyMessageAndTitle(pausedCandidate: PauseCandidate) = {
val name = pausedCandidate.experiment.getName
val id = pausedCandidate.experiment.getId
val total = pausedCandidate.conductionTotal.totalConduction
val limit = pausedCandidate.experiment.getExperimentSnapshot.conductLimit
val message = s"Experiment:$name id:$id conduction:$total limit:$limit" //TODO move outside
val title = s"Experiment $name id:$id paused due to conduction limit reach"
NotifyMessage(message, title, pausedCandidate.experiment.getUpdater)
}
case class NotifyMessage(message: String, title: String, updaterEmail: String)
}
| wix/petri | petri-server-core/src/main/java/com/wixpress/petri/petri/ConductionKeeper.scala | Scala | bsd-3-clause | 3,224 |
package fr.thomasdufour.autodiff
package derived
import cats.syntax.option._
import shapeless.labelled.FieldType
import shapeless.:+:
import shapeless.CNil
import shapeless.Coproduct
import shapeless.Inl
import shapeless.Inr
import shapeless.OrElse
import shapeless.Witness
trait CoproductDiff[C <: Coproduct] {
def apply( left: C, right: C ): Option[Difference]
def show( value: C ): String
def tag( value: C ): String
}
object CoproductDiff {
implicit val cnilTag: CoproductDiff[CNil] =
new CoproductDiff[CNil] {
// $COVERAGE-OFF$
override def apply( left: CNil, right: CNil ): Option[Difference] = none
override def show( value: CNil ): String = value.impossible
override def tag( value: CNil ): String = value.impossible
// $COVERAGE-ON$
}
implicit def cconsTag[K <: Symbol, V, C <: Coproduct](
implicit K: Witness.Aux[K],
V: Diff[V] OrElse MkDiff[V],
T: CoproductDiff[C]
): CoproductDiff[FieldType[K, V] :+: C] =
new CoproductDiff[FieldType[K, V] :+: C] {
private def showTag( value: FieldType[K, V] :+: C ): String = s"${tag( value )}(...)"
override def apply( left: FieldType[K, V] :+: C, right: FieldType[K, V] :+: C ): Option[Difference] =
( left, right ) match {
case ( Inl( l ), Inl( r ) ) => V.unify.apply( l, r )
case ( Inr( l ), Inr( r ) ) => T( l, r )
case _ => Difference.Value( showTag( left ), showTag( right ) ).some
}
override def show( value: FieldType[K, V] :+: C ): String =
value.eliminate( V.unify.show, T.show )
override def tag( value: FieldType[K, V] :+: C ): String = value match {
case Inl( _ ) => K.value.name
case Inr( x ) => T.tag( x )
}
}
}
| chwthewke/auto-diff | auto-diff-generic/src/main/scala/fr/thomasdufour/autodiff/derived/CoproductDiff.scala | Scala | apache-2.0 | 1,784 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle
import java.io._
import com.google.common.io.ByteStreams
import org.apache.spark.{SparkConf, SparkEnv}
import org.apache.spark.internal.Logging
import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer}
import org.apache.spark.network.netty.SparkTransportConf
import org.apache.spark.shuffle.IndexShuffleBlockResolver.NOOP_REDUCE_ID
import org.apache.spark.storage._
import org.apache.spark.util.Utils
/**
* Create and maintain the shuffle blocks' mapping between logic block and physical file location.
* Data of shuffle blocks from the same map task are stored in a single consolidated data file.
* The offsets of the data blocks in the data file are stored in a separate index file.
*
* We use the name of the shuffle data's shuffleBlockId with reduce ID set to 0 and add ".data"
* as the filename postfix for data file, and ".index" as the filename postfix for index file.
*
*/
// Note: Changes to the format in this file should be kept in sync with
// org.apache.spark.network.shuffle.ExternalShuffleBlockResolver#getSortBasedShuffleBlockData().
private[spark] class IndexShuffleBlockResolver(
conf: SparkConf,
_blockManager: BlockManager = null)
extends ShuffleBlockResolver
with Logging {
private lazy val blockManager = Option(_blockManager).getOrElse(SparkEnv.get.blockManager)
private val transportConf = SparkTransportConf.fromSparkConf(conf, "shuffle")
def getDataFile(shuffleId: Int, mapId: Int): File = {
blockManager.diskBlockManager.getFile(ShuffleDataBlockId(shuffleId, mapId, NOOP_REDUCE_ID))
}
private def getIndexFile(shuffleId: Int, mapId: Int): File = {
blockManager.diskBlockManager.getFile(ShuffleIndexBlockId(shuffleId, mapId, NOOP_REDUCE_ID))
}
/**
* Remove data file and index file that contain the output data from one map.
* */
def removeDataByMap(shuffleId: Int, mapId: Int): Unit = {
var file = getDataFile(shuffleId, mapId)
if (file.exists()) {
if (!file.delete()) {
logWarning(s"Error deleting data ${file.getPath()}")
}
}
file = getIndexFile(shuffleId, mapId)
if (file.exists()) {
if (!file.delete()) {
logWarning(s"Error deleting index ${file.getPath()}")
}
}
}
/**
* Check whether the given index and data files match each other.
* If so, return the partition lengths in the data file. Otherwise return null.
*/
private def checkIndexAndDataFile(index: File, data: File, blocks: Int): Array[Long] = {
// the index file should have `block + 1` longs as offset.
if (index.length() != (blocks + 1) * 8) {
return null
}
val lengths = new Array[Long](blocks)
// Read the lengths of blocks
val in = try {
new DataInputStream(new BufferedInputStream(new FileInputStream(index)))
} catch {
case e: IOException =>
return null
}
try {
// Convert the offsets into lengths of each block
var offset = in.readLong()
if (offset != 0L) {
return null
}
var i = 0
while (i < blocks) {
val off = in.readLong()
lengths(i) = off - offset
offset = off
i += 1
}
} catch {
case e: IOException =>
return null
} finally {
in.close()
}
// the size of data file should match with index file
if (data.length() == lengths.sum) {
lengths
} else {
null
}
}
/**
* Write an index file with the offsets of each block, plus a final offset at the end for the
* end of the output file. This will be used by getBlockData to figure out where each block
* begins and ends.
*
* It will commit the data and index file as an atomic operation, use the existing ones, or
* replace them with new ones.
*
* Note: the `lengths` will be updated to match the existing index file if use the existing ones.
* */
def writeIndexFileAndCommit(
shuffleId: Int,
mapId: Int,
lengths: Array[Long],
dataTmp: File): Unit = {
val indexFile = getIndexFile(shuffleId, mapId)
val indexTmp = Utils.tempFileWith(indexFile)
try {
val out = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(indexTmp)))
Utils.tryWithSafeFinally {
// We take in lengths of each block, need to convert it to offsets.
var offset = 0L
out.writeLong(offset)
for (length <- lengths) {
offset += length
out.writeLong(offset)
}
} {
out.close()
}
val dataFile = getDataFile(shuffleId, mapId)
// There is only one IndexShuffleBlockResolver per executor, this synchronization make sure
// the following check and rename are atomic.
synchronized {
val existingLengths = checkIndexAndDataFile(indexFile, dataFile, lengths.length)
if (existingLengths != null) {
// Another attempt for the same task has already written our map outputs successfully,
// so just use the existing partition lengths and delete our temporary map outputs.
System.arraycopy(existingLengths, 0, lengths, 0, lengths.length)
if (dataTmp != null && dataTmp.exists()) {
dataTmp.delete()
}
indexTmp.delete()
} else {
// This is the first successful attempt in writing the map outputs for this task,
// so override any existing index and data files with the ones we wrote.
if (indexFile.exists()) {
indexFile.delete()
}
if (dataFile.exists()) {
dataFile.delete()
}
if (!indexTmp.renameTo(indexFile)) {
throw new IOException("fail to rename file " + indexTmp + " to " + indexFile)
}
if (dataTmp != null && dataTmp.exists() && !dataTmp.renameTo(dataFile)) {
throw new IOException("fail to rename file " + dataTmp + " to " + dataFile)
}
}
}
} finally {
if (indexTmp.exists() && !indexTmp.delete()) {
logError(s"Failed to delete temporary index file at ${indexTmp.getAbsolutePath}")
}
}
}
override def getBlockData(blockId: ShuffleBlockId): ManagedBuffer = {
// The block is actually going to be a range of a single map output file for this map, so
// find out the consolidated file, then the offset within that from our index
val indexFile = getIndexFile(blockId.shuffleId, blockId.mapId)
val in = new DataInputStream(new FileInputStream(indexFile))
try {
ByteStreams.skipFully(in, blockId.reduceId * 8)
val offset = in.readLong()
val nextOffset = in.readLong()
new FileSegmentManagedBuffer(
transportConf,
getDataFile(blockId.shuffleId, blockId.mapId),
offset,
nextOffset - offset)
} finally {
in.close()
}
}
override def stop(): Unit = {}
}
private[spark] object IndexShuffleBlockResolver {
// No-op reduce ID used in interactions with disk store.
// The disk store currently expects puts to relate to a (map, reduce) pair, but in the sort
// shuffle outputs for several reduces are glommed into a single file.
val NOOP_REDUCE_ID = 0
}
| gioenn/xSpark | core/src/main/scala/org/apache/spark/shuffle/IndexShuffleBlockResolver.scala | Scala | apache-2.0 | 8,041 |
package co.ledger.wallet.web.ripple.wallet
import java.net.URI
import co.ledger.wallet.core.concurrent.{AbstractAsyncCursor, AsyncCursor}
import co.ledger.wallet.core.device.utils.{EventEmitter, EventReceiver}
import co.ledger.wallet.core.net.WebSocketFactory
import co.ledger.wallet.core.utils.DerivationPath
import co.ledger.wallet.core.wallet.ripple.Wallet.{StartSynchronizationEvent, StopSynchronizationEvent}
import co.ledger.wallet.core.wallet.ripple._
import co.ledger.wallet.core.wallet.ripple.api.WebSocketRipple
import co.ledger.wallet.core.wallet.ripple.database.AccountRow
import co.ledger.wallet.core.wallet.ripple.events.NewTransaction
import co.ledger.wallet.web.ripple.core.event.JsEventEmitter
import co.ledger.wallet.web.ripple.core.net.JsWebSocketFactory
import co.ledger.wallet.web.ripple.core.utils.ChromeGlobalPreferences
import co.ledger.wallet.web.ripple.services.SessionService
import co.ledger.wallet.web.ripple.wallet.database.RippleDatabase
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.{Failure, Success}
/**
* Created by alix on 4/13/17.
*/
class RippleWalletClient(override val name: String,
override val password: Option[String],
provider: RippleAccountProvider,
chain: SessionService.RippleChainIdentifier
) extends Wallet with RippleDatabase {
private def init(): Future[Array[RippleAccountClient]] = {
if (_stopped)
Future.failed(new Exception("Client is stopped"))
else {
_accounts.getOrElse({
_accounts = Some(queryAccounts() flatMap { (accounts) =>
if (accounts.isEmpty) {
createNewAccount(0).map(Array(_))
} else {
Future.successful(accounts)
}
} map { (accounts) =>
accounts map { (account) =>
new RippleAccountClient(this, account)
}
})
_accounts.get
})
}
}
private var _firstConnection: Boolean = false
private def createNewAccount(index: Int): Future[AccountRow] = {
provider.getRippleAccount(DerivationPath(s"44'/${chain.coinType}'/$index'/0/0"))
.flatMap {(account) =>
val row = new AccountRow(index, account.toString, XRP.Zero)
putAccount(row).map(_ => row)
}
}
override def bip44CoinType: String = chain.coinType
override def coinPathPrefix: String = chain.pathPrefix
override def account(index: Int): Future[Account] = {
init() map {(accounts) =>
accounts(index)}
}
override def accounts(): Future[Array[Account]] = {
init() map {(accounts) =>
accounts.asInstanceOf[Array[Account]]}
}
override def balance(): Future[XRP] = {
accounts() flatMap { (accounts) =>
val futureBalances = accounts map { (account) =>
account.balance()
}
Future.sequence(futureBalances.toSeq)
} map {(balances) =>
balances.foldLeft(XRP.Zero)(_ + _)
}
}
override def synchronize(): Future[Unit] = {
println("Synchronizing wallet started")
(if (_webSocketRipple.get.connected) {
println("Websocket already connected")
Future.successful(Unit)
} else {
println("Waiting on ws to connect")
eventEmitter.emit(StartSynchronizationEvent())
_firstConnection = true
_webSocketRipple.get.connecting.future
}) flatMap { (_) =>
println("Checking future for synchronization")
if ((_synchronizationFuture.isEmpty || (_synchronizationFuture.isDefined && _synchronizationFuture.get.isCompleted && _synchronizationFuture.get.value.get.isFailure))) {
if (_firstConnection) {
_firstConnection = false
} else {
eventEmitter.emit(StartSynchronizationEvent())
}
_synchronizationFuture = Some(
accounts() flatMap { (accounts) =>
Future.sequence(accounts.map(_.synchronize()).toSeq)
} map { _ =>
_synchronizationFuture = None
eventEmitter.emit(StopSynchronizationEvent())
}
)
_synchronizationFuture.get
} else {
_synchronizationFuture.get
}
}
}
override def isSynchronizing(): Future[Boolean] = Future.successful(
_synchronizationFuture.nonEmpty
)
override def pushTransaction(transaction: Array[Byte]): Future[Unit] = ???
override val eventEmitter: EventEmitter = new JsEventEmitter()
override def operations(from: Int, batchSize: Int): Future[AsyncCursor[Operation]] = ???
override def stop(): Unit = {
init() foreach {(_) =>
_stopped = true
_webSocketRipple.get.stop()
}
}
private def websocketFactory: WebSocketFactory = new JsWebSocketFactory(new URI(new ChromeGlobalPreferences("Settings").string("node").get))
private var _webSocketRipple: Option[WebSocketRipple] = None
private var _accounts: Option[Future[Array[RippleAccountClient]]] = None
private var _stopped = false
private var _synchronizationFuture: Option[Future[Unit]] = None
accounts().map({(accounts) =>
_webSocketRipple = Some(new WebSocketRipple(websocketFactory, accounts.map(_.toString), this))
_webSocketRipple.get.start()
})
override def webSocket = _webSocketRipple
override def isConnected(): Boolean = {
if (_webSocketRipple.isDefined){
_webSocketRipple.get.connected
} else {
false
}
}
}
| LedgerHQ/ledger-wallet-ripple | src/main/scala/co/ledger/wallet/web/ripple/wallet/RippleWalletClient.scala | Scala | mit | 5,432 |
package blended.jms.bridge.internal
import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.Flow
import akka.stream.KillSwitch
import blended.jms.utils.IdAwareConnectionFactory
import blended.streams.BlendedStreamsConfig
import blended.streams.message.FlowEnvelope
import blended.testsupport.RequiresForkedJVM
import scala.concurrent.duration._
@RequiresForkedJVM
class TransactionSendFailedRetryBridgeSpec extends BridgeSpecSupport {
private def sendOutbound(cf : IdAwareConnectionFactory, timeout : FiniteDuration, msgCount : Int, track : Boolean) : KillSwitch = {
val msgs : Seq[FlowEnvelope] = generateMessages(msgCount){ env =>
env
.withHeader(destHeader(headerCfg.prefix), s"sampleOut").get
.withHeader(headerCfg.headerTrack, track).get
}.get
sendMessages("bridge.data.out.activemq.external", cf, timeout)(msgs:_*)
}
override protected def bridgeActivator: BridgeActivator = new BridgeActivator() {
override protected def streamBuilderFactory(system: ActorSystem)(
cfg: BridgeStreamConfig, streamsCfg: BlendedStreamsConfig
): BridgeStreamBuilder =
new BridgeStreamBuilder(cfg, streamsCfg)(system) {
override protected def sendTransaction : Flow[FlowEnvelope, FlowEnvelope, NotUsed] =
Flow.fromFunction[FlowEnvelope, FlowEnvelope]{ env =>
env.withException(new Exception("Boom !"))
}
}
}
"The outbound bridge should " - {
"pass messages to the retry destination if the send of the transaction envelope fails" in logException {
val msgCount = 2
val actorSys = system(registry)
val (internal, _) = getConnectionFactories(registry)
val switch = sendOutbound(internal, timeout, msgCount, track = true)
val retried : List[FlowEnvelope] = consumeMessages(
cf = internal,
destName = "retries",
expected = 2,
timeout = timeout
)(actorSys).get
retried should have size msgCount
consumeEvents(internal, timeout)(actorSys).get should be (empty)
retried.foreach{ env =>
env.header[Unit]("UnitProperty") should be (Some(()))
}
consumeMessages(
cf = internal,
destName = "bridge.data.out.activemq.external",
timeout = timeout
)(actorSys).get should be (empty)
switch.shutdown()
}
}
}
| woq-blended/blended | blended.jms.bridge/src/test/scala/blended/jms/bridge/internal/TransactionSendFailedRetryBridgeSpec.scala | Scala | apache-2.0 | 2,376 |
package org.orbroker
import java.sql.ResultSet
import org.orbroker.adapt.BrokerAdapter
private[orbroker] class JoinIterable[T](key: Set[String], rs: ResultSet, adapter: BrokerAdapter, extractor: (Row, Join) => T) {
def iterator(): Iterator[T] =
if (rs.next) {
new JoinIterator(rs, adapter)
} else {
Iterator.empty
}
private class JoinIterator(rs: ResultSet, adapter: BrokerAdapter) extends Iterator[T] {
private[this] var first = true
private[this] val join = new JoinGroup(key, rs, Map.empty, adapter)
def hasNext = first || join.rsReadable
def next = {
if (hasNext) {
first = false
join.newGroup()
val value = extractor(join.row, join)
if (!join.isRsAdvanced) join.rsReadable = rs.next
value
} else {
throw new NoSuchElementException(s"Result set has no more rows")
}
}
}
}
| nilskp/orbroker | src/main/scala/org/orbroker/JoinIterable.scala | Scala | mit | 896 |
/*
* MatchLen.scala
* (FScape)
*
* Copyright (c) 2001-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.fscape
package stream
import akka.stream.stage.{InHandler, OutHandler}
import akka.stream.{Attributes, FanInShape2, Inlet, Outlet}
import de.sciss.fscape.stream.impl.{NodeImpl, StageImpl}
object MatchLen {
def apply[A, E <: BufElem[A]](in: Outlet[E], ref: OutA)
(implicit b: Builder, tpe: StreamType[A, E]): Outlet[E] = {
val stage0 = new Stage[A, E](b.layer)
val stage = b.add(stage0)
b.connect(in , stage.in0)
b.connect(ref, stage.in1)
stage.out
}
private final val name = "MatchLen"
private type Shp[E] = FanInShape2[E, BufLike, E]
private final class Stage[A, E <: BufElem[A]](layer: Layer)(implicit a: Allocator, tpe: StreamType[A, E])
extends StageImpl[Shp[E]](name) { stage =>
val shape: Shape = new FanInShape2(
in0 = Inlet [E] (s"${stage.name}.in" ),
in1 = InA (s"${stage.name}.ref" ),
out = Outlet[E] (s"${stage.name}.out" )
)
def createLogic(attr: Attributes): NodeImpl[Shape] = new Logic[A, E](shape, layer)
}
private final class Logic[A, E <: BufElem[A]](shape: Shp[E], layer: Layer)
(implicit a: Allocator, tpe: StreamType[A, E])
extends NodeImpl(name, layer, shape) with OutHandler with InHandler { logic =>
private[this] var refDone = false
private[this] var isZero = false
private[this] var refLen = 0L
private[this] var inLen = 0L
private object RefH extends InHandler {
def onPush(): Unit = {
val refBuf = grab(shape.in1)
refLen += refBuf.size
refBuf.release()
tryPull(shape.in1)
}
override def onUpstreamFinish(): Unit = {
if (isAvailable(shape.in1)) {
onPush()
}
refDone = true
process()
}
}
private[this] var inBuf: E = _
private def writeInBuf(): Unit = {
if (inBuf.size > 0) {
push(shape.out, inBuf)
inLen += inBuf.size
} else {
inBuf.release()
}
inBuf = null.asInstanceOf[E]
}
private def process(): Unit = {
if (inBuf == null && isAvailable(shape.in0)) {
inBuf = grab(shape.in0)
tryPull(shape.in0)
}
if (inBuf != null) {
if (isAvailable(shape.out)) {
if (inLen + inBuf.size <= refLen) {
writeInBuf()
} else if (refDone) {
inBuf.size = (refLen - inLen).toInt
writeInBuf()
}
}
} else if (isZero) {
if (isAvailable(shape.out) && (inLen < refLen)) {
val zeroBuf = tpe.allocBuf()
tpe.clear(zeroBuf.buf, 0, zeroBuf.size)
zeroBuf.size = math.min(zeroBuf.size, (refLen - inLen).toInt)
push(shape.out, zeroBuf)
inLen += zeroBuf.size
}
}
if (refDone && inLen == refLen) completeStage()
}
def onPush(): Unit =
process()
def onPull(): Unit =
process()
override def onUpstreamFinish(): Unit = {
process()
isZero = true
process()
}
setHandler(shape.in0, this)
setHandler(shape.in1, RefH)
setHandler(shape.out, this)
}
} | Sciss/FScape-next | core/shared/src/main/scala/de/sciss/fscape/stream/MatchLen.scala | Scala | agpl-3.0 | 3,467 |
package actors
import akka.actor.ActorLogging
import markets.Fill
import markets.actors.settlement.SettlementMechanismActor
import play.api.libs.json.{JsNumber, JsObject, JsString, JsValue, Json}
class LoggingSettlementMechanismActor extends SettlementMechanismActor with ActorLogging {
override def receive: Receive = {
case fill: Fill =>
val jsonFill = convertToJson(fill)
log.info(jsonFill.toString())
super.receive(fill)
}
private[this] def fillToString(fill: Fill): String = {
s"""{"class":"${fill.getClass.getName}","tradable":"${fill.askOrder.tradable.symbol}",""" +
s""""askPrice":${fill.askOrder.price},"bidPrice":${fill.bidOrder.price},""" +
s""""price":${fill.price},"quantity":${fill.quantity},"timestamp":${fill.timestamp}}"""
}
/** Converts a Fill to JSON.
*
* @param fill
* @return JSON data representing the Fill.
*/
private[this] def convertToJson(fill: Fill): JsValue = {
val fillAsList = Seq(
"class" -> JsString(fill.getClass.getName),
"tradable" -> JsString(fill.askOrder.tradable.symbol),
"askPrice" -> JsNumber(fill.askOrder.price),
"bidPrice" -> JsNumber(fill.bidOrder.price),
"price" -> JsNumber(fill.price),
"quantity" -> JsNumber(fill.quantity),
"timestamp" -> JsNumber(fill.timestamp)
)
JsObject(fillAsList)
}
}
| ScalABM/models-library | gode-sunder/src/main/scala-2.11/actors/LoggingSettlementMechanismActor.scala | Scala | apache-2.0 | 1,370 |
package lottery.domain.model
import java.time.OffsetDateTime
import lottery.domain.model.LotteryBehaviour.LotteryId
import lottery.domain.model.LotteryProtocol._
import pl.newicom.dddd.actor.Config
import pl.newicom.dddd.aggregate._
import pl.newicom.dddd.aggregate.error.DomainException
import scala.util.Random
object LotteryBehaviour {
sealed trait Lottery extends Behavior[LotteryEvent, Lottery, Config]
implicit case object UninitializedLottery extends Lottery with Uninitialized[Lottery] {
def actions =
handleCommand {
case CreateLottery(id) => LotteryCreated(id)
}
.handleEvent {
case LotteryCreated(_) => EmptyLottery
}
}
case object EmptyLottery extends Lottery {
/**
* Action: reject Run command if has no participants
* Only applicable when list of participants is empty
*/
def canNotRunWithoutParticipants =
handleCommand {
// can't run if there is no participants
case Run(_) =>
reject("Lottery has no participants")
}
/**
* Action: add a participant
* Applicable as long as we don't have a winner
*/
def acceptParticipants =
handleCommand {
case AddParticipant(id, name) => ParticipantAdded(name, id)
}
.handleEvent {
case ParticipantAdded(name, _) =>
NonEmptyLottery(List(name))
}
def actions = canNotRunWithoutParticipants ++ acceptParticipants
}
case class NonEmptyLottery(participants: List[String]) extends Lottery {
/**
* Action: reject double booking. Can't add the same participant twice
* Only applicable after adding at least one participant
*/
def rejectDoubleBooking = {
def hasParticipant(name: String) = participants.contains(name)
handleCommand {
// can't add participant twice
case cmd: AddParticipant if hasParticipant(cmd.name) =>
reject(s"Participant ${cmd.name} already added!")
}
}
/**
* Action: add a participant
* Applicable as long as we don't have a winner
*/
def acceptParticipants =
handleCommand {
case AddParticipant(id, name) => ParticipantAdded(name, id)
}
.handleEvent {
case ParticipantAdded(name, _) => copy(participants = name :: participants)
}
/**
* Action: remove participants (single or all)
* Only applicable if Lottery has participants
*/
def removeParticipants =
// removing participants (single or all) produce ParticipantRemoved events
handleCommand {
case RemoveParticipant(id, name) => ParticipantRemoved(name, id)
case RemoveAllParticipants(id) =>
this.participants
.map { name => ParticipantRemoved(name, id) }
}
.handleEvent {
case ParticipantRemoved(name, id) =>
val newParticipants = participants.filter(_ != name)
// NOTE: if last participant is removed, transition back to EmptyLottery
if (newParticipants.isEmpty)
EmptyLottery
else
copy(participants = newParticipants)
}
/**
* Action: run the lottery
* Only applicable if it has at least one participant
*/
def runTheLottery =
handleCommand {
case Run(id) =>
val index = Random.nextInt(participants.size)
val winner = participants(index)
WinnerSelected(winner, OffsetDateTime.now, id)
}
.handleEvent {
// transition to end state on winner selection
case WinnerSelected(winner, _, id) => FinishedLottery(winner, id)
}
def actions = rejectDoubleBooking ++ acceptParticipants ++ removeParticipants ++ runTheLottery
}
case class FinishedLottery(winner: String, id: LotteryId) extends Lottery {
/**
* Action: reject all
* Applicable when a winner is selected. No new commands should be accepts.
*/
def rejectAllCommands =
handleCommand {
// no command can be accepted after having selected a winner
case anyCommand =>
reject (new LotteryHasAlreadyAWinner(s"Lottery has already a winner and the winner is $winner"))
}
def actions = rejectAllCommands
}
type LotteryId = AggregateId
}
/** Defines the Lottery Protocol, all Commands it may receive and Events it may emit */
object LotteryProtocol {
// Commands ============================================================
sealed trait LotteryCommand extends Command {
def id: LotteryId
override def aggregateId: AggregateId = id
}
// Creation Command
case class CreateLottery(id: LotteryId) extends LotteryCommand
// Update Commands
case class AddParticipant(id: LotteryId, name: String) extends LotteryCommand
case class RemoveParticipant(id: LotteryId, name: String) extends LotteryCommand
case class RemoveAllParticipants(id: LotteryId) extends LotteryCommand
case class Run(id: LotteryId) extends LotteryCommand
// Events ============================================================
sealed trait LotteryEvent {
def lotteryId: LotteryId
}
// Creation Event
case class LotteryCreated(lotteryId: LotteryId) extends LotteryEvent
// Update Events
sealed trait LotteryUpdateEvent extends LotteryEvent
case class ParticipantAdded(name: String, lotteryId: LotteryId) extends LotteryUpdateEvent
case class ParticipantRemoved(name: String, lotteryId: LotteryId) extends LotteryUpdateEvent
case class WinnerSelected(winner: String, date: OffsetDateTime, lotteryId: LotteryId) extends LotteryUpdateEvent
}
class LotteryHasAlreadyAWinner(msg: String) extends DomainException(msg) | pawelkaczor/akka-ddd | akka-ddd-test/src/test/scala/lottery/domain/model/Lottery.scala | Scala | mit | 5,710 |
package idv.brianhsu.maidroid.plurk.view
import idv.brianhsu.maidroid.plurk._
import idv.brianhsu.maidroid.plurk.activity._
import idv.brianhsu.maidroid.plurk.adapter._
import idv.brianhsu.maidroid.plurk.cache._
import idv.brianhsu.maidroid.plurk.dialog._
import idv.brianhsu.maidroid.plurk.fragment._
import idv.brianhsu.maidroid.plurk.TypedResource._
import idv.brianhsu.maidroid.plurk.util._
import idv.brianhsu.maidroid.ui.util.AsyncUI._
import idv.brianhsu.maidroid.ui.util.CallbackConversions._
import scala.concurrent._
import android.app.Activity
import android.content.ClipboardManager
import android.content.ClipData
import android.content.Context
import android.graphics.Bitmap
import android.os.Bundle
import android.support.v4.app.FragmentActivity
import android.support.v7.internal.view.menu.MenuBuilder
import android.support.v7.widget.PopupMenu
import android.text.Html
import android.text.method.LinkMovementMethod
import android.view.LayoutInflater
import android.view.MenuItem
import android.view.View
import android.widget.LinearLayout
import android.widget.Toast
import org.bone.soplurk.api.PlurkAPI._
import org.bone.soplurk.constant.ReadStatus._
import org.bone.soplurk.model._
import java.net.URL
import java.text.SimpleDateFormat
import java.util.Date
class ResponseView(adapter: ResponseAdapter)
(implicit val activity: FragmentActivity with ConfirmDialog.Listener with ResponseListFragment.Listener)
extends LinearLayout(activity) {
private val inflater = LayoutInflater.from(activity)
initView()
lazy val dateTimeFormatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
lazy val dateFormatter = new SimpleDateFormat("MM-dd")
lazy val avatar = this.findView(TR.itemResponseAvatar)
lazy val content = this.findView(TR.itemResponseText)
lazy val displayName = this.findView(TR.itemResponseDisplayName)
lazy val qualifier = this.findView(TR.itemResponseQualifier)
lazy val postedDate = this.findView(TR.itemResponsePostedDate)
lazy val dropdownMenu = this.findView(TR.itemResponseDropdownMenu)
lazy val cakeIcon = this.findView(TR.itemResponseCake)
lazy val plurkAPI = PlurkAPIHelper.getPlurkAPI(activity)
private var owner: User = _
private def initView() {
inflater.inflate(R.layout.item_response, this, true)
content.setMovementMethod(LinkMovementMethod.getInstance())
}
private def showDeleteConfirmDialog(response: Response) {
val data = new Bundle
data.putLong("plurkID", response.plurkID)
data.putLong("responseID", response.id)
val alertDialog = ConfirmDialog.createDialog(
activity, 'DeleteResponseConfirm,
activity.getString(R.string.viewResponseViewDeleteConfirmTitle),
activity.getString(R.string.viewResponseViewDeleteConfirm),
activity.getString(R.string.delete),
activity.getString(R.string.cancel),
Some(data)
)
val fm = activity.getSupportFragmentManager
alertDialog.show(fm, "DeleteResponseConfirm")
}
private def showBlockConfirmDialog(response: Response) {
val data = new Bundle
data.putLong("plurkID", response.plurkID)
data.putLong("responseID", response.id)
data.putLong("ownerID", response.userID)
val alertDialog = ConfirmDialog.createDialog(
activity, 'BlockUserResponseConfirm,
activity.getString(R.string.viewResponseViewBlockConfirmTitle),
activity.getString(R.string.viewResponseViewBlockConfirm),
activity.getString(R.string.delete),
activity.getString(R.string.cancel),
Some(data)
)
val fm = activity.getSupportFragmentManager
alertDialog.show(fm, "BlockUserResponseConfirm")
}
private def copyContent(response: Response) {
val clipboard = activity.getSystemService(Context.CLIPBOARD_SERVICE).asInstanceOf[ClipboardManager]
val clipData = ClipData.newPlainText(s"PlurkResponse(${response.id}", response.contentRaw)
clipboard.setPrimaryClip(clipData)
Toast.makeText(activity, R.string.contentCopied, Toast.LENGTH_SHORT).show()
}
private def setDropdownMenu(response: Response, isDeletable: Boolean) {
dropdownMenu.setOnClickListener { button: View =>
val popupMenu = new MyPopupMenu(activity, button) {
override def onMenuItemSelected(menu: MenuBuilder, item: MenuItem): Boolean = {
item.getItemId match {
case R.id.popup_comment_copy_content => copyContent(response); true
case R.id.popup_comment_block => showBlockConfirmDialog(response); true
case R.id.popup_comment_delete => showDeleteConfirmDialog(response); true
case R.id.popup_comment_reply => activity.onReplyTo(owner.nickname, response.contentRaw); true
case _ => true
}
}
}
popupMenu.getMenuInflater.inflate(R.menu.popup_comment, popupMenu.getMenu)
val isMineResponse = response.myAnonymous getOrElse (PlurkAPIHelper.plurkUserID == response.userID)
if (!isDeletable) {
val deleteMenuItem = popupMenu.getMenu.findItem(R.id.popup_comment_delete)
deleteMenuItem.setVisible(false)
}
if (!isDeletable || isMineResponse || response.userID == 99999) {
val blockMenuItem = popupMenu.getMenu.findItem(R.id.popup_comment_block)
blockMenuItem.setVisible(false)
}
popupMenu.show()
}
}
private def setCakeIcon(user: User) {
val shouldDisplay = user.birthday match {
case Some(birthday) => dateFormatter.format(birthday.getTime) == dateFormatter.format(new Date)
case None => false
}
val visibility = if (shouldDisplay) View.VISIBLE else View.GONE
cakeIcon.setVisibility(visibility)
}
def update(response: Response, owner: User, isDeletable: Boolean,
imageGetter: PlurkImageGetter): View = {
this.owner = owner
content.setText(Html.fromHtml(response.content, imageGetter, StrikeTagHandler))
postedDate.setText(dateTimeFormatter.format(response.posted))
displayName.setText((response.handle orElse owner.displayName) getOrElse owner.nickname)
displayName.setOnClickListener { view: View => UserTimelineActivity.startActivity(activity, owner) }
avatar.setOnClickListener { view: View => UserTimelineActivity.startActivity(activity, owner) }
setDropdownMenu(response, isDeletable)
setCakeIcon(owner)
QualifierDisplay(response.qualifier, activity) match {
case None => qualifier.setVisibility(View.GONE)
case Some((backgroundColor, translatedName)) =>
qualifier.setBackgroundColor(backgroundColor)
qualifier.setText(translatedName)
qualifier.setVisibility(View.VISIBLE)
}
avatar.setImageResource(R.drawable.default_avatar)
AvatarCache.getAvatarBitmapFromCache(activity, owner) match {
case Some(avatarBitmap) => setAvatarFromCache(avatarBitmap)
case None => setAvatarFromNetwork(activity, owner)
}
this
}
def setAvatarFromCache(avatarBitmap: Bitmap) {
avatar.setImageBitmap(avatarBitmap)
}
def setAvatarFromNetwork(context: Context, user: User) {
val avatarFuture = AvatarCache.getAvatarBitmapFromNetwork(activity, user)
avatarFuture.onSuccessInUI { case(userID, bitmap) =>
// Prevent race condition that cause display incorrect avatar for
// recylced row view.
if (userID == owner.id) {
avatar.setImageBitmap(bitmap)
}
}
}
}
| brianhsu/MaidroidPlurk | src/main/scala/view/ResponseView.scala | Scala | gpl-3.0 | 7,415 |
package scala.tools.nsc.transform.patmat
import org.junit.Assert._
import org.junit.Test
import scala.collection.mutable
import scala.reflect.internal.util.Position
import scala.tools.nsc.{Global, Settings}
object TestSolver extends Logic with Solving {
val global: Global = new Global(new Settings())
// disable max recursion depth in order to get all solutions
global.settings.YpatmatExhaustdepth.tryToSet("off" :: Nil)
object TestSolver extends Solver {
class Const {
override def toString: String = "Const"
}
val NullConst = new Const
type Type = Int
case class TypeConst(i: Int) extends Const
object TypeConst extends TypeConstExtractor
case class ValueConst(i: Int) extends Const
object ValueConst extends ValueConstExtractor {
def apply(t: Tree): Const = ???
}
case class Tree(name: String)
class Var(val x: Tree) extends AbsVar {
override def equals(other: scala.Any): Boolean = other match {
case that: Var => this.x == that.x
case _ => false
}
override def hashCode(): Int = x.hashCode()
override def toString: String = {
s"Var($x)"
}
def domainSyms = None
def groupedDomains: List[Set[TestSolver.Sym]] = Nil
def implications = Nil
def mayBeNull = false
def propForEqualsTo(c: Const): Prop = ???
def registerEquality(c: Const) = ()
def registerNull() = ()
def symForStaticTp = None
}
object Var extends VarExtractor {
def apply(x: Tree): Var = new Var(x)
def unapply(v: Var): Some[Tree] = Some(v.x)
}
def prepareNewAnalysis() = {}
def uncheckedWarning(pos: Position, msg: String, site: global.Symbol): Unit = sys.error(msg)
def reportWarning(msg: String) = sys.error(msg)
/**
* The DPLL procedure only returns a minimal mapping from literal to value
* such that the CNF formula is satisfied.
* E.g. for:
* `(a \\/ b)`
* The DPLL procedure will find either {a = true} or {b = true}
* as solution.
*
* The expansion step will amend both solutions with the unassigned variable
* i.e., {a = true} will be expanded to {a = true, b = true} and
* {a = true, b = false}.
*/
def expandUnassigned(solution: Solution): List[Model] = {
import solution._
// the number of solutions is doubled for every unassigned variable
val expandedModels = 1 << unassigned.size
var current = new mutable.ArrayBuffer[Model](expandedModels)
var next = new mutable.ArrayBuffer[Model](expandedModels)
current += model
// we use double buffering:
// read from `current` and create a two models for each model in `next`
for {
s <- unassigned
} {
for {
model <- current
} {
def force(s: Sym, pol: Boolean) = model + (s -> pol)
next += force(s, pol = true)
next += force(s, pol = false)
}
val tmp = current
current = next
next = tmp
next.clear()
}
current.toList
}
/**
* Old CNF conversion code, used for reference:
* - convert formula into NNF
* (i.e., no negated terms, only negated variables)
* - use distributive laws to convert into CNF
*/
def eqFreePropToSolvableViaDistribution(p: Prop) = {
val symbolMapping = new SymbolMapping(gatherSymbols(p))
type Formula = Array[TestSolver.Clause]
def formula(c: Clause*): Formula = c.toArray
def merge(a: Clause, b: Clause) = a ++ b
def negationNormalFormNot(p: Prop): Prop = p match {
case And(ps) => Or(ps map negationNormalFormNot)
case Or(ps) => And(ps map negationNormalFormNot)
case Not(p) => negationNormalForm(p)
case True => False
case False => True
case s: Sym => Not(s)
}
def negationNormalForm(p: Prop): Prop = p match {
case Or(ps) => Or(ps map negationNormalForm)
case And(ps) => And(ps map negationNormalForm)
case Not(negated) => negationNormalFormNot(negated)
case True
| False
| (_: Sym) => p
}
val TrueF: Formula = Array()
val FalseF = Array(clause())
def lit(sym: Sym) = Array(clause(symbolMapping.lit(sym)))
def negLit(sym: Sym) = Array(clause(-symbolMapping.lit(sym)))
def conjunctiveNormalForm(p: Prop): Formula = {
def distribute(a: Formula, b: Formula): Formula =
(a, b) match {
// true \\/ _ = true
// _ \\/ true = true
case (trueA, trueB) if trueA.size == 0 || trueB.size == 0 => TrueF
// lit \\/ lit
case (a, b) if a.size == 1 && b.size == 1 => formula(merge(a(0), b(0)))
// (c1 /\\ ... /\\ cn) \\/ d = ((c1 \\/ d) /\\ ... /\\ (cn \\/ d))
// d \\/ (c1 /\\ ... /\\ cn) = ((d \\/ c1) /\\ ... /\\ (d \\/ cn))
case (cs, ds) =>
val (big, small) = if (cs.size > ds.size) (cs, ds) else (ds, cs)
big flatMap (c => distribute(formula(c), small))
}
p match {
case True => TrueF
case False => FalseF
case s: Sym => lit(s)
case Not(s: Sym) => negLit(s)
case And(ps) =>
ps.toArray.flatMap(conjunctiveNormalForm _)
case Or(ps) =>
ps map conjunctiveNormalForm reduceLeft { (a, b) =>
distribute(a, b)
}
}
}
val cnf = conjunctiveNormalForm(negationNormalForm(p))
Solvable(cnf, symbolMapping)
}
}
}
/**
* Testing CNF conversion via Tseitin vs NNF & expansion.
*/
class SolvingTest {
import scala.tools.nsc.transform.patmat.TestSolver.TestSolver._
object SymName {
def unapply(s: Sym): Some[String] = {
val Var(Tree(name)) = s.variable
Some(name)
}
}
implicit val ModelOrd: Ordering[TestSolver.TestSolver.Model] = {
import Ordering.Implicits._
val tupleOrd = Ordering.by[(Sym, Boolean), String]({ case (SymName(name), _) => name }).orElseBy(_._2)
Ordering.by { _.toSeq.sorted(tupleOrd) }
}
implicit val SolutionOrd: Ordering[TestSolver.TestSolver.Solution] =
Ordering.by(_.model)
def formatSolution(solution: Solution): String = {
formatModel(solution.model)
}
def formatModel(model: Model): String = {
(for {
(SymName(name), value) <- model
} yield {
val v = if (value) "T" else "F"
s"$name -> $v"
}).mkString(", ")
}
def sym(name: String) = Sym(Var(Tree(name)), NullConst)
@Test
def testSymCreation(): Unit = {
val s1 = sym("hello")
val s2 = sym("hello")
assertEquals(s1, s2)
}
/**
* Simplest possible test: solve a formula and check the solution(s)
*/
@Test
def testUnassigned(): Unit = {
val pSym = sym("p")
val solvable = propToSolvable(Or(pSym, Not(pSym)))
val solutions = TestSolver.TestSolver.findAllModelsFor(solvable)
val expected = List(Solution(Map(), List(pSym)))
assertEquals(expected, solutions)
}
/**
* Unassigned variables must be expanded
* for stable results
*/
@Test
def testNoUnassigned(): Unit = {
val pSym = sym("p")
val qSym = sym("q")
val solvable = propToSolvable(Or(pSym, Not(qSym)))
val solutions = findAllModelsFor(solvable)
val expanded = solutions.flatMap(expandUnassigned).sorted
val expected = Seq(
Map(pSym -> false, qSym -> false),
Map(pSym -> true, qSym -> false),
Map(pSym -> true, qSym -> true)
).sorted
assertEquals(expected, expanded)
}
@Test
def testTseitinVsExpansionFrom_t7020(): Unit = {
val formulas = Seq(
And(And(And(Not(sym("V1=null")),
sym("V1=scala.collection.immutable.::[?]")), And(Not(sym("V1=null")),
And(Or(sym("V2=4"), Or(sym("V2=5"), sym("V2=6"))), sym("V3=Nil")))),
And(And(Or(Not(sym("V1=scala.collection.immutable.::[?]")),
Not(sym("V1=null"))), And(Or(sym("V3=scala.collection.immutable.::[?]"),
Or(sym("V3=Nil"), sym("V3=null"))), And(Or(Not(sym("V3=Nil")),
Not(sym("V3=null"))),
And(Or(Not(sym("V3=scala.collection.immutable.::[?]")),
Not(sym("V3=null"))), And(Or(Not(sym("V1=Nil")), Not(sym("V1=null"))),
Or(sym("V1=scala.collection.immutable.::[?]"), Or(sym("V1=Nil"),
sym("V1=null")))))))), And(Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=1")), Not(sym("V3=Nil"))))), Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=2")), Not(sym("V3=Nil")))))))),
And(And(And(Not(sym("V1=null")),
sym("V1=scala.collection.immutable.::[?]")), And(Not(sym("V1=null")),
And(sym("V2=7"), sym("V3=Nil")))),
And(And(Or(Not(sym("V1=scala.collection.immutable.::[?]")),
Not(sym("V1=null"))), And(Or(sym("V3=scala.collection.immutable.::[?]"),
Or(sym("V3=Nil"), sym("V3=null"))), And(Or(Not(sym("V3=Nil")),
Not(sym("V3=null"))),
And(Or(Not(sym("V3=scala.collection.immutable.::[?]")),
Not(sym("V3=null"))), And(Or(Not(sym("V1=Nil")), Not(sym("V1=null"))),
Or(sym("V1=scala.collection.immutable.::[?]"), Or(sym("V1=Nil"),
sym("V1=null")))))))), And(And(Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=1")), Not(sym("V3=Nil"))))), Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=2")), Not(sym("V3=Nil")))))), Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(And(Not(sym("V2=4")), And(Not(sym("V2=5")), Not(sym("V2=6")))),
Not(sym("V3=Nil")))))))),
And(And(Not(sym("V1=null")),
sym("V1=scala.collection.immutable.::[?]")), And(Not(sym("V1=null")),
And(Or(sym("V2=4"), Or(sym("V2=5"), sym("V2=6"))), sym("V3=Nil")))),
And(And(Not(sym("V1=null")), sym("V1=scala.collection.immutable.::[?]")),
And(Not(sym("V1=null")), And(sym("V2=7"), sym("V3=Nil")))),
And(And(Or(Not(sym("V1=scala.collection.immutable.::[?]")),
Not(sym("V1=null"))), And(Or(sym("V3=scala.collection.immutable.::[?]"),
Or(sym("V3=Nil"), sym("V3=null"))), And(Or(Not(sym("V3=Nil")),
Not(sym("V3=null"))),
And(Or(Not(sym("V3=scala.collection.immutable.::[?]")),
Not(sym("V3=null"))), And(Or(Not(sym("V1=Nil")), Not(sym("V1=null"))),
Or(sym("V1=scala.collection.immutable.::[?]"), Or(sym("V1=Nil"),
sym("V1=null")))))))), And(And(Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=1")), Not(sym("V3=Nil"))))), Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=2")), Not(sym("V3=Nil")))))), Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(And(Not(sym("V2=4")), And(Not(sym("V2=5")), Not(sym("V2=6")))),
Not(sym("V3=Nil"))))))),
And(And(Or(Not(sym("V1=scala.collection.immutable.::[?]")),
Not(sym("V1=null"))), And(Or(sym("V3=scala.collection.immutable.::[?]"),
Or(sym("V3=Nil"), sym("V3=null"))), And(Or(Not(sym("V3=Nil")),
Not(sym("V3=null"))),
And(Or(Not(sym("V3=scala.collection.immutable.::[?]")),
Not(sym("V3=null"))), And(Or(Not(sym("V1=Nil")), Not(sym("V1=null"))),
Or(sym("V1=scala.collection.immutable.::[?]"), Or(sym("V1=Nil"),
sym("V1=null")))))))), And(Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=1")), Not(sym("V3=Nil"))))), Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=2")), Not(sym("V3=Nil"))))))),
And(And(Or(Not(sym("V1=scala.collection.immutable.::[?]")),
Not(sym("V1=null"))), And(Or(sym("V3=scala.collection.immutable.::[?]"),
Or(sym("V3=Nil"), sym("V3=null"))), And(Or(Not(sym("V3=Nil")),
Not(sym("V3=null"))),
And(Or(Not(sym("V3=scala.collection.immutable.::[?]")),
Not(sym("V3=null"))), And(Or(Not(sym("V1=Nil")), Not(sym("V1=null"))),
Or(sym("V1=scala.collection.immutable.::[?]"), Or(sym("V1=Nil"),
sym("V1=null")))))))), And(sym("V1=Nil"), And(Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(And(Not(sym("V2=4")), And(Not(sym("V2=5")), Not(sym("V2=6")))),
Not(sym("V3=Nil"))))), And(Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=1")), Not(sym("V3=Nil"))))), Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=2")), Not(sym("V3=Nil"))))))))),
And(And(Or(Or(False,
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(False,
Or(Not(sym("V2=1")), Not(sym("V3=Nil"))))), Or(Or(False,
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(False,
Or(Not(sym("V2=2")), Not(sym("V3=Nil")))))), And(Or(Or(False,
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(False,
Or(And(Not(sym("V2=4")), And(Not(sym("V2=5")), Not(sym("V2=6")))),
Not(sym("V3=Nil"))))), And(Or(Or(False,
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(False,
Or(Not(sym("V2=7")), Not(sym("V3=Nil"))))), Not(sym("V1=Nil"))))),
And(And(Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=1")), Not(sym("V3=Nil"))))), Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=2")), Not(sym("V3=Nil")))))), Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(And(Not(sym("V2=4")), And(Not(sym("V2=5")), Not(sym("V2=6")))),
Not(sym("V3=Nil")))))),
And(And(Or(sym("V3=scala.collection.immutable.::[?]"), sym("V3=Nil")),
Or(sym("V1=scala.collection.immutable.::[?]"), sym("V1=Nil"))),
And(And(Or(Or(False, Not(sym("V1=scala.collection.immutable.::[?]"))),
Or(False, Or(Not(sym("V2=1")), Not(sym("V3=Nil"))))), Or(Or(False,
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(False,
Or(Not(sym("V2=2")), Not(sym("V3=Nil")))))), And(Or(Or(False,
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(False,
Or(And(Not(sym("V2=4")), And(Not(sym("V2=5")), Not(sym("V2=6")))),
Not(sym("V3=Nil"))))), And(Or(Or(False,
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(False,
Or(Not(sym("V2=7")), Not(sym("V3=Nil"))))), Not(sym("V1=Nil")))))),
And(Not(sym("V1=null")), And(Or(sym("V2=4"), Or(sym("V2=5"), sym("V2=6"))),
sym("V3=Nil"))),
And(Not(sym("V1=null")), And(sym("V2=7"), sym("V3=Nil"))),
And(Not(sym("V1=null")), sym("V1=scala.collection.immutable.::[?]")),
And(Not(sym("V2=4")), And(Not(sym("V2=5")), Not(sym("V2=6")))),
And(Not(sym("V2=5")), Not(sym("V2=6"))),
And(Or(Not(sym("V1=Nil")), Not(sym("V1=null"))),
Or(sym("V1=scala.collection.immutable.::[?]"), Or(sym("V1=Nil"),
sym("V1=null")))),
And(Or(Not(sym("V1=scala.collection.immutable.::[?]")),
Not(sym("V1=null"))), And(Or(sym("V3=scala.collection.immutable.::[?]"),
Or(sym("V3=Nil"), sym("V3=null"))), And(Or(Not(sym("V3=Nil")),
Not(sym("V3=null"))),
And(Or(Not(sym("V3=scala.collection.immutable.::[?]")),
Not(sym("V3=null"))), And(Or(Not(sym("V1=Nil")), Not(sym("V1=null"))),
Or(sym("V1=scala.collection.immutable.::[?]"), Or(sym("V1=Nil"),
sym("V1=null")))))))),
And(Or(Not(sym("V3=Nil")), Not(sym("V3=null"))),
And(Or(Not(sym("V3=scala.collection.immutable.::[?]")),
Not(sym("V3=null"))), And(Or(Not(sym("V1=Nil")), Not(sym("V1=null"))),
Or(sym("V1=scala.collection.immutable.::[?]"), Or(sym("V1=Nil"),
sym("V1=null")))))),
And(Or(Not(sym("V3=scala.collection.immutable.::[?]")),
Not(sym("V3=null"))), And(Or(Not(sym("V1=Nil")), Not(sym("V1=null"))),
Or(sym("V1=scala.collection.immutable.::[?]"), Or(sym("V1=Nil"),
sym("V1=null"))))),
And(Or(Or(False,
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(False,
Or(And(Not(sym("V2=4")), And(Not(sym("V2=5")), Not(sym("V2=6")))),
Not(sym("V3=Nil"))))), And(Or(Or(False,
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(False,
Or(Not(sym("V2=7")), Not(sym("V3=Nil"))))), Not(sym("V1=Nil")))),
And(Or(Or(False, Not(sym("V1=scala.collection.immutable.::[?]"))), Or(False,
Or(Not(sym("V2=1")), Not(sym("V3=Nil"))))), Or(Or(False,
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(False,
Or(Not(sym("V2=2")), Not(sym("V3=Nil")))))),
And(Or(Or(False,
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(False,
Or(Not(sym("V2=7")), Not(sym("V3=Nil"))))), Not(sym("V1=Nil"))),
And(Or(Or(sym("V1=null"), Not(sym("V1=scala.collection.immutable.::[?]"))),
Or(sym("V1=null"), Or(And(Not(sym("V2=4")), And(Not(sym("V2=5")),
Not(sym("V2=6")))), Not(sym("V3=Nil"))))), And(Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=1")), Not(sym("V3=Nil"))))), Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=2")), Not(sym("V3=Nil"))))))),
And(Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=1")), Not(sym("V3=Nil"))))), Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=2")), Not(sym("V3=Nil")))))),
And(Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=7")), Not(sym("V3=Nil"))))),
And(And(Or(Not(sym("V1=scala.collection.immutable.::[?]")),
Not(sym("V1=null"))), And(Or(sym("V3=scala.collection.immutable.::[?]"),
Or(sym("V3=Nil"), sym("V3=null"))), And(Or(Not(sym("V3=Nil")),
Not(sym("V3=null"))),
And(Or(Not(sym("V3=scala.collection.immutable.::[?]")),
Not(sym("V3=null"))), And(Or(Not(sym("V1=Nil")), Not(sym("V1=null"))),
Or(sym("V1=scala.collection.immutable.::[?]"), Or(sym("V1=Nil"),
sym("V1=null")))))))), And(sym("V1=Nil"), And(Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(And(Not(sym("V2=4")), And(Not(sym("V2=5")), Not(sym("V2=6")))),
Not(sym("V3=Nil"))))), And(Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=1")), Not(sym("V3=Nil"))))), Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=2")), Not(sym("V3=Nil")))))))))),
And(Or(sym("V2=4"), Or(sym("V2=5"), sym("V2=6"))), sym("V3=Nil")),
And(Or(sym("V3=scala.collection.immutable.::[?]"), Or(sym("V3=Nil"),
sym("V3=null"))), And(Or(Not(sym("V3=Nil")), Not(sym("V3=null"))),
And(Or(Not(sym("V3=scala.collection.immutable.::[?]")),
Not(sym("V3=null"))), And(Or(Not(sym("V1=Nil")), Not(sym("V1=null"))),
Or(sym("V1=scala.collection.immutable.::[?]"), Or(sym("V1=Nil"),
sym("V1=null"))))))),
And(Or(sym("V3=scala.collection.immutable.::[?]"),
sym("V3=Nil")), Or(sym("V1=scala.collection.immutable.::[?]"),
sym("V1=Nil"))),
And(sym("V1=Nil"), And(Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(And(Not(sym("V2=4")), And(Not(sym("V2=5")), Not(sym("V2=6")))),
Not(sym("V3=Nil"))))), And(Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=1")), Not(sym("V3=Nil"))))), Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=2")), Not(sym("V3=Nil")))))))),
And(sym("V2=7"), sym("V3=Nil")),
False,
Not(sym("V1=Nil")),
Or(And(Not(sym("V2=4")),
And(Not(sym("V2=5")), Not(sym("V2=6")))), Not(sym("V3=Nil"))),
Or(False, Not(sym("V1=scala.collection.immutable.::[?]"))),
Or(False,
Or(And(Not(sym("V2=4")), And(Not(sym("V2=5")), Not(sym("V2=6")))),
Not(sym("V3=Nil")))),
Or(False, Or(Not(sym("V2=1")), Not(sym("V3=Nil")))),
Or(Not(sym("V1=Nil")), Not(sym("V1=null"))),
Or(Not(sym("V3=scala.collection.immutable.::[?]")), Not(sym("V3=null"))),
Or(Or(False, Not(sym("V1=scala.collection.immutable.::[?]"))), Or(False,
Or(And(Not(sym("V2=4")), And(Not(sym("V2=5")), Not(sym("V2=6")))),
Not(sym("V3=Nil"))))),
Or(Or(False,
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(False,
Or(Not(sym("V2=1")), Not(sym("V3=Nil"))))),
Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(And(Not(sym("V2=4")), And(Not(sym("V2=5")), Not(sym("V2=6")))),
Not(sym("V3=Nil"))))),
Or(Or(sym("V1=null"),
Not(sym("V1=scala.collection.immutable.::[?]"))), Or(sym("V1=null"),
Or(Not(sym("V2=1")), Not(sym("V3=Nil"))))),
Or(sym("V1=null"), Not(sym("V1=scala.collection.immutable.::[?]"))),
Or(sym("V1=null"),
Or(And(Not(sym("V2=4")), And(Not(sym("V2=5")), Not(sym("V2=6")))),
Not(sym("V3=Nil")))),
Or(sym("V1=null"), Or(Not(sym("V2=1")), Not(sym("V3=Nil")))),
Or(sym("V1=scala.collection.immutable.::[?]"),
Or(sym("V1=Nil"), sym("V1=null"))),
Or(sym("V1=scala.collection.immutable.::[?]"), sym("V1=Nil")),
Or(sym("V2=4"), Or(sym("V2=5"), sym("V2=6"))),
sym("V3=scala.collection.immutable.::[?]")
)
formulas foreach {
f =>
// build CNF
val tseitinCnf = propToSolvable(f)
val expansionCnf = eqFreePropToSolvableViaDistribution(f)
// ALL-SAT
val tseitinSolutions = findAllModelsFor(tseitinCnf)
val expansionSolutins = findAllModelsFor(expansionCnf)
// expand unassigned variables
// (otherwise solutions can not be compared)
val tseitinNoUnassigned = tseitinSolutions.flatMap(expandUnassigned).sorted
val expansionNoUnassigned = expansionSolutins.flatMap(expandUnassigned).sorted
assertEquals(tseitinNoUnassigned, expansionNoUnassigned)
}
}
def pairWiseEncoding(ops: List[Sym]) = {
And(ops.combinations(2).collect {
case a :: b :: Nil => Or(Not(a), Not(b))
}.toSet[TestSolver.TestSolver.Prop])
}
@Test
def testAtMostOne(): Unit = {
val dummySym = sym("dummy")
val syms = "pqrstu".map(c => sym(c.toString)).toList
// expand unassigned variables
// (otherwise solutions can not be compared)
val expected = TestSolver.TestSolver.findAllModelsFor(propToSolvable(And(dummySym, pairWiseEncoding(syms)))).flatMap(expandUnassigned)
val actual = TestSolver.TestSolver.findAllModelsFor(propToSolvable(And(dummySym, AtMostOne(syms)))).flatMap(expandUnassigned)
assertEquals(expected.toSet, actual.toSet)
}
}
| scala/scala | test/junit/scala/tools/nsc/transform/patmat/SolvingTest.scala | Scala | apache-2.0 | 23,998 |
package com.querydsl.scala
import java.io.StringWriter
import com.querydsl.codegen.utils._
import com.querydsl.codegen._
import org.junit.Assert._
import org.junit._
class ScalaBeanSerializerTest {
val typeMappings = ScalaTypeMappings.create
var writer = new StringWriter()
val entityType = EntityTypes.entityType
@Test
def Print {
val serializer = new ScalaBeanSerializer(typeMappings)
serializer.javaBeanSupport = true
typeMappings.register(entityType, new QueryTypeFactoryImpl("Q", "", "").create(entityType))
serializer.serialize(entityType, SimpleSerializerConfig.DEFAULT, new ScalaWriter(writer))
//println(writer.toString)
var toMatch = """package com.querydsl
import scala.beans.BeanProperty
import java.util.List
import java.util.Map
/**
* DomainClass is a Querydsl bean type
*/
class DomainClass {
@BeanProperty var arrayField: Array[String] = _
@BeanProperty var boolean$: java.lang.Boolean = _
@BeanProperty var collection: java.util.Collection[DomainClass] = _
@BeanProperty var date: java.util.Date = _
@BeanProperty var entityField: DomainClass = _
@BeanProperty var integer: Integer = _
@BeanProperty var listField: List[DomainClass] = _
@BeanProperty var mapField: Map[DomainClass, DomainClass] = _
@BeanProperty var setField: java.util.Set[DomainClass] = _
@BeanProperty var time: java.sql.Time = _"""
val str = writer.toString.replaceAll("\\s+", " ")
//println(str)
toMatch.split("\\n").map(_.trim).foreach { line =>
assertTrue(line, str.contains(line))
}
}
@Test
def Compile {
val serializer = new ScalaBeanSerializer(typeMappings)
serializer.createCompanionObject = false
typeMappings.register(entityType, new QueryTypeFactoryImpl("Q", "", "").create(entityType))
serializer.serialize(entityType, SimpleSerializerConfig.DEFAULT, new ScalaWriter(writer))
val str = writer.toString
CompileTestUtils.assertCompileSuccess(str)
}
} | lpandzic/querydsl | querydsl-scala/src/test/scala/com/querydsl/scala/ScalaBeanSerializerTest.scala | Scala | apache-2.0 | 2,016 |
package so.blacklight.swarm.stats
class StatEvent
case class IncrementCounter(counter: String) extends StatEvent
case class DecrementCounter(counter: String) extends StatEvent
case class GetCounterValue(counter: String) extends StatEvent
case class CounterValue(counter: String, value: Option[Long]) extends StatEvent
case class BatchCounterValue(toMap: Map[String, Long]) extends StatEvent
| xea/swarm-msg | src/main/scala/so/blacklight/swarm/stats/StatEvent.scala | Scala | apache-2.0 | 393 |
package im.actor.server.sequence
import akka.actor._
import akka.pattern.pipe
import im.actor.server.db.DbExtension
import im.actor.server.model.push.{ ActorPushCredentials, ApplePushCredentials, GooglePushCredentials, PushCredentials }
import im.actor.server.model.{ DeviceType, Peer, PeerType }
import im.actor.server.persist.AuthSessionRepo
import im.actor.server.persist.configs.ParameterRepo
import im.actor.server.persist.push.{ ActorPushCredentialsRepo, ApplePushCredentialsRepo, GooglePushCredentialsRepo }
import im.actor.server.push.actor.ActorPush
import im.actor.server.sequence.UserSequenceCommands.ReloadSettings
import im.actor.server.userconfig.SettingsKeys
import slick.dbio.DBIO
import scala.concurrent.Future
import scala.util.control.NoStackTrace
private[sequence] trait VendorPushCommand
private final case class PushCredentialsInfo(appId: Int, authSid: Int)
private final case class AllNotificationSettings(
generic: NotificationSettings = NotificationSettings(),
specific: Map[String, NotificationSettings] = Map.empty
)
private final case class NotificationSettings(
enabled: Boolean = true,
sound: Boolean = true,
vibration: Boolean = true,
text: Boolean = true,
customSounds: Map[Peer, String] = Map.empty,
peers: Map[Peer, Boolean] = Map.empty
)
private case object FailedToUnregister extends RuntimeException("Failed to unregister push credentials")
private[sequence] object VendorPush {
private final case class Initialized(creds: Seq[(PushCredentials, PushCredentialsInfo)])
def props(userId: Int) =
Props(new VendorPush(userId))
}
private object SettingsControl {
def props(userId: Int) = Props(new SettingsControl(userId))
}
private final class SettingsControl(userId: Int) extends Actor with ActorLogging with Stash {
import context.dispatcher
private val db = DbExtension(context.system).db
private var notificationSettings = AllNotificationSettings()
self ! ReloadSettings()
def receive: Receive = {
case ReloadSettings() ⇒
context.become(waitForSettings, discardOld = false)
load() pipeTo self
}
def waitForSettings: Receive = {
case s: AllNotificationSettings ⇒
this.notificationSettings = s
log.debug("Loaded settings: {}", s)
context.parent ! s
unstashAll()
context.unbecome()
case Status.Failure(e) ⇒
log.error(e, "Failed to load settings")
load() pipeTo self
case msg ⇒ stash()
}
private def load(): Future[AllNotificationSettings] =
db.run(for {
generic ← loadAction(DeviceType.Generic)
mobile ← loadAction(DeviceType.Mobile)
tablet ← loadAction(DeviceType.Tablet)
desktop ← loadAction(DeviceType.Desktop)
} yield AllNotificationSettings(
generic = generic,
specific = Map(
DeviceType.Mobile → mobile,
DeviceType.Tablet → tablet,
DeviceType.Desktop → desktop
)
))
private def loadAction(deviceType: String): DBIO[NotificationSettings] = {
for {
enabled ← ParameterRepo.findBooleanValue(userId, SettingsKeys.enabled(deviceType), true)
sound ← ParameterRepo.findBooleanValue(userId, SettingsKeys.soundEnabled(deviceType), true)
vibration ← ParameterRepo.findBooleanValue(userId, SettingsKeys.vibrationEnabled(deviceType), true)
text ← ParameterRepo.findBooleanValue(userId, SettingsKeys.textEnabled(deviceType), true)
peers ← ParameterRepo.findPeerNotifications(userId, deviceType)
customSounds ← ParameterRepo.findPeerRingtone(userId)
} yield NotificationSettings(enabled, sound, vibration, text, customSounds.toMap, peers.toMap)
}
}
private[sequence] final class VendorPush(userId: Int) extends Actor with ActorLogging with Stash {
import VendorPush._
import context.dispatcher
import im.actor.server.sequence.UserSequenceCommands._
protected val db = DbExtension(context.system).db
private val settingsControl = context.actorOf(SettingsControl.props(userId), "settings")
private val googlePushProvider = new GooglePushProvider(userId, context.system)
private val applePushProvider = new ApplePushProvider(userId)(context.system)
private val actorPushProvider = ActorPush(context.system)
private var mapping: Map[PushCredentials, PushCredentialsInfo] = Map.empty
private var notificationSettings = AllNotificationSettings()
init()
def receive = initializing
def initializing: Receive = {
case Initialized(creds) ⇒
unstashAll()
context become initialized
mapping = creds.toMap
case Status.Failure(e) ⇒
log.error(e, "Failed to init")
throw e
case msg ⇒ stash()
}
def initialized = commands orElse internal
def commands: Receive = {
case r: RegisterPushCredentials if r.creds.isActor ⇒
register(r.getActor)
case r: RegisterPushCredentials if r.creds.isApple ⇒
register(r.getApple)
case r: RegisterPushCredentials if r.creds.isGoogle ⇒
register(r.getGoogle)
case u: UnregisterPushCredentials if u.creds.isActor ⇒
unregister(u.getActor)
case u: UnregisterPushCredentials if u.creds.isApple ⇒
unregister(u.getApple)
case u: UnregisterPushCredentials if u.creds.isGoogle ⇒
unregister(u.getGoogle)
case DeliverPush(seq, rules) ⇒
deliver(seq, rules.getOrElse(PushRules()))
case r: ReloadSettings ⇒
settingsControl forward r
}
def internal: Receive = {
case n: AllNotificationSettings ⇒
this.notificationSettings = n
case (c: PushCredentials, info: PushCredentialsInfo) ⇒
mapping += (c → info)
}
private def init(): Unit = {
log.debug("Initializing")
db.run(for {
googleCreds ← GooglePushCredentialsRepo.findByUser(userId)
appleCreds ← ApplePushCredentialsRepo.findByUser(userId)
actorCreds ← ActorPushCredentialsRepo.findByUser(userId)
google ← DBIO.sequence(googleCreds map withInfo) map (_.flatten)
apple ← DBIO.sequence(appleCreds.filterNot(_.isVoip) map withInfo) map (_.flatten)
actor ← DBIO.sequence(actorCreds map withInfo) map (_.flatten)
} yield Initialized(apple ++ google ++ actor)) pipeTo self
}
/**
* Delivers a push to all credentials according to push rules
*
* @param seq
* @param rules
*/
private def deliver(seq: Int, rules: PushRules): Unit = {
mapping foreach {
case (creds, info) ⇒ deliver(seq, rules, creds, info)
}
}
/**
* Delivers to a specific creds according to push rules
*
* @param seq
* @param rules
* @param creds
* @param info
*/
private def deliver(seq: Int, rules: PushRules, creds: PushCredentials, info: PushCredentialsInfo): Unit = {
val deviceType = DeviceType(info.appId)
if (rules.excludeAuthSids.contains(info.authSid)) {
log.debug("AuthSid is excluded, not pushing")
} else {
rules.data match {
case Some(data) ⇒
val settings = notificationSettings.specific.getOrElse(deviceType, notificationSettings.generic)
val isVisible =
(settings.enabled, data.peer) match {
case (true, Some(peer)) ⇒
settings.peers.get(peer) match {
case Some(true) ⇒
log.debug("Notifications for peer {} are enabled, push will be visible", peer)
true
case Some(false) ⇒
log.debug("Notifications for peer {} are disabled, push will be invisible", peer)
false
case None ⇒
log.debug("Notifications for peer {} are not set, push will be visible", peer)
true
}
case (true, None) ⇒
log.debug("Notifications are enabled, delivering visible push")
true
case (false, _) ⇒
log.debug("Notifications are disabled, delivering invisible push")
false
}
if (isVisible)
deliverVisible(
seq = seq,
creds = creds,
data = data,
isTextEnabled = settings.text,
isSoundEnabled = settings.sound,
customSound = data.peer flatMap (p ⇒ settings.customSounds.get(p)),
isVibrationEnabled = settings.vibration
)
else
deliverInvisible(seq, creds)
case _ ⇒
log.debug("No text, delivering simple seq")
deliverInvisible(seq, creds)
}
}
}
/**
* Delivers an invisible push with seq and contentAvailable
*
* @param seq
* @param creds
*/
private def deliverInvisible(seq: Int, creds: PushCredentials): Unit = {
creds match {
case c: GooglePushCredentials ⇒
googlePushProvider.deliverInvisible(seq, c)
case c: ApplePushCredentials ⇒
applePushProvider.deliverInvisible(seq, c)
case c: ActorPushCredentials ⇒
actorPushProvider.deliver(seq, c)
}
}
/**
* Delivers a visible push with seq and (optionally) text, sound, vibration
*
* @param seq
* @param creds
* @param data
* @param isTextEnabled
* @param isSoundEnabled
* @param isVibrationEnabled
* @return
*/
private def deliverVisible(
seq: Int,
creds: PushCredentials,
data: PushData,
isTextEnabled: Boolean,
isSoundEnabled: Boolean,
customSound: Option[String],
isVibrationEnabled: Boolean
) = {
creds match {
case c: GooglePushCredentials ⇒
googlePushProvider.deliverVisible(
seq = seq,
creds = c,
data = data,
isTextEnabled = isTextEnabled,
isSoundEnabled = isSoundEnabled,
isVibrationEnabled = isVibrationEnabled
)
case c: ApplePushCredentials ⇒
applePushProvider.deliverVisible(
seq = seq,
creds = c,
data = data,
isTextEnabled = isTextEnabled,
isSoundEnabled = isSoundEnabled,
customSound = customSound,
isVibrationEnabled = isVibrationEnabled
)
case c: ActorPushCredentials ⇒
actorPushProvider.deliver(seq, c)
}
}
private def register(creds: PushCredentials): Unit =
db.run {
withInfo(creds) map (_.getOrElse(throw new RuntimeException(s"Cannot find appId for $creds")))
} pipeTo self
private def withInfo(c: PushCredentials): DBIO[Option[(PushCredentials, PushCredentialsInfo)]] =
for {
authSessionOpt ← AuthSessionRepo.findByAuthId(c.authId)
} yield authSessionOpt map (s ⇒ c → PushCredentialsInfo(s.appId, s.id))
private def remove(creds: PushCredentials): Unit =
mapping -= creds
private def unregister(creds: PushCredentials): Unit = {
val replyTo = sender()
if (mapping.contains(creds)) {
remove(creds)
val removeFu = db.run(creds match {
case c: GooglePushCredentials ⇒ GooglePushCredentialsRepo.deleteByToken(c.regId)
case c: ApplePushCredentials ⇒ ApplePushCredentialsRepo.deleteByToken(c.token.toByteArray)
case c: ActorPushCredentials ⇒ ActorPushCredentialsRepo.deleteByTopic(c.endpoint)
}) map (_ ⇒ UnregisterPushCredentialsAck()) pipeTo replyTo
removeFu onFailure {
case e ⇒
log.error("Failed to unregister creds: {}", creds)
replyTo ! Status.Failure(FailedToUnregister)
}
} else {
replyTo ! UnregisterPushCredentialsAck()
}
}
} | ljshj/actor-platform | actor-server/actor-core/src/main/scala/im/actor/server/sequence/VendorPush.scala | Scala | mit | 11,715 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.tools.export
import com.beust.jcommander.Parameters
import org.locationtech.geomesa.features.avro.AvroSimpleFeatureUtils
import org.locationtech.geomesa.tools._
import org.locationtech.geomesa.tools.utils.CLArgResolver
import org.locationtech.geomesa.utils.geotools.SftArgResolver
class GenerateAvroSchemaCommand extends Command {
override val name = "gen-avro-schema"
val params = new GenerateAvroSchemaParams
override def execute(): Unit = {
val sft = CLArgResolver.getSft(params.spec, params.featureName)
val schema = AvroSimpleFeatureUtils.generateSchema(sft, withUserData = true)
Command.output.info(schema.toString(true))
}
}
@Parameters(commandDescription = "Generate an Avro schema from a SimpleFeatureType")
class GenerateAvroSchemaParams extends RequiredFeatureSpecParam with OptionalTypeNameParam
| ronq/geomesa | geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/export/GenerateAvroSchemaCommand.scala | Scala | apache-2.0 | 1,331 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.stream.table.validation
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.scala._
import org.apache.flink.table.expressions.utils._
import org.apache.flink.table.runtime.utils._
import org.apache.flink.table.utils.{ObjectTableFunction, TableFunc1, TableFunc2, TableTestBase}
import org.junit.Assert.{assertTrue, fail}
import org.junit.Test
class CorrelateValidationTest extends TableTestBase {
@Test
def testRegisterFunctionException(): Unit ={
val util = streamTestUtil()
val t = util.addTable[(Int, Long, String)]('a, 'b, 'c)
// check scala object is forbidden
expectExceptionThrown(
util.tableEnv.registerFunction("func3", ObjectTableFunction), "Scala object")
expectExceptionThrown(
util.javaTableEnv.registerFunction("func3", ObjectTableFunction), "Scala object")
expectExceptionThrown(t.join(ObjectTableFunction('a, 1)), "Scala object")
}
@Test
def testInvalidTableFunctions(): Unit = {
val util = streamTestUtil()
val func1 = new TableFunc1
util.javaTableEnv.registerFunction("func1", func1)
// table function call select
expectExceptionThrown(
func1('c).select("f0"),
"TableFunction can only be used in join and leftOuterJoin."
)
// table function call select
expectExceptionThrown(
func1('c).select('f0),
"TableFunction can only be used in join and leftOuterJoin."
)
// table function call writeToSink
expectExceptionThrown(
func1('c).writeToSink(null),
"Cannot translate a query with an unbounded table function call."
)
// table function call distinct
expectExceptionThrown(
func1('c).distinct(),
"TableFunction can only be used in join and leftOuterJoin."
)
// table function call filter
expectExceptionThrown(
func1('c).filter('f0 === "?"),
"TableFunction can only be used in join and leftOuterJoin."
)
// table function call filter
expectExceptionThrown(
func1('c).filter("f0 = '?'"),
"TableFunction can only be used in join and leftOuterJoin."
)
// table function call limit
expectExceptionThrown(
func1('c).orderBy('f0).limit(3),
"TableFunction can only be used in join and leftOuterJoin."
)
// table function call limit
expectExceptionThrown(
func1('c).orderBy('f0).limit(0, 3),
"TableFunction can only be used in join and leftOuterJoin."
)
// table function call orderBy
expectExceptionThrown(
func1('c).orderBy("f0"),
"TableFunction can only be used in join and leftOuterJoin."
)
// table function call orderBy
expectExceptionThrown(
func1('c).orderBy('f0),
"TableFunction can only be used in join and leftOuterJoin."
)
// table function call where
expectExceptionThrown(
func1('c).where("f0 = '?'"),
"TableFunction can only be used in join and leftOuterJoin."
)
// table function call where
expectExceptionThrown(
func1('c).where('f0 === "?"),
"TableFunction can only be used in join and leftOuterJoin."
)
}
@Test
def testInvalidTableFunction(): Unit = {
val util = streamTestUtil()
val t = util.addTable[(Int, Long, String)]("MyTable", 'a, 'b, 'c)
//=================== check scala object is forbidden =====================
// Scala table environment register
expectExceptionThrown(util.addFunction("udtf", ObjectTableFunction), "Scala object")
// Java table environment register
expectExceptionThrown(
util.tableEnv.registerFunction("udtf", ObjectTableFunction), "Scala object")
// Scala Table API directly call
expectExceptionThrown(t.join(ObjectTableFunction('a, 1)), "Scala object")
//============ throw exception when table function is not registered =========
// Java Table API call
expectExceptionThrown(
t.join(new Table(util.tableEnv, "nonexist(a)")
), "Undefined function: NONEXIST")
// SQL API call
expectExceptionThrown(
util.tableEnv.sql("SELECT * FROM MyTable, LATERAL TABLE(nonexist(a))"),
"No match found for function signature nonexist(<NUMERIC>)")
//========= throw exception when the called function is a scalar function ====
util.tableEnv.registerFunction("func0", Func0)
// Java Table API call
expectExceptionThrown(
t.join(new Table(util.tableEnv, "func0(a)")),
"only accept String that define table function",
classOf[TableException])
// SQL API call
// NOTE: it doesn't throw an exception but an AssertionError, maybe a Calcite bug
expectExceptionThrown(
util.tableEnv.sql("SELECT * FROM MyTable, LATERAL TABLE(func0(a))"),
null,
classOf[AssertionError])
//========== throw exception when the parameters is not correct ===============
// Java Table API call
util.addFunction("func2", new TableFunc2)
expectExceptionThrown(
t.join(new Table(util.tableEnv, "func2(c, c)")),
"Given parameters of function 'FUNC2' do not match any signature")
// SQL API call
expectExceptionThrown(
util.tableEnv.sql("SELECT * FROM MyTable, LATERAL TABLE(func2(c, c))"),
"No match found for function signature func2(<CHARACTER>, <CHARACTER>)")
}
// ----------------------------------------------------------------------------------------------
private def expectExceptionThrown(
function: => Unit,
keywords: String,
clazz: Class[_ <: Throwable] = classOf[ValidationException])
: Unit = {
try {
function
fail(s"Expected a $clazz, but no exception is thrown.")
} catch {
case e if e.getClass == clazz =>
if (keywords != null) {
assertTrue(
s"The exception message '${e.getMessage}' doesn't contain keyword '$keywords'",
e.getMessage.contains(keywords))
}
case e: Throwable => fail(s"Expected throw ${clazz.getSimpleName}, but is $e.")
}
}
}
| zohar-mizrahi/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/table/validation/CorrelateValidationTest.scala | Scala | apache-2.0 | 6,857 |
package tutorial.webapp
import org.scalajs.dom
import dom.html
import scalajs.js.annotation.JSExport
import scalatags.JsDom.all._
@JSExport
object HelloWorld1 extends{
@JSExport
def main(target: html.Div) = {
val (animalA, animalB) = ("fox", "dog")
target.appendChild(
div(
h1("Hello World!"),
p(
"The quick brown ", b(animalA),
" jumps over the lazy ",
i(animalB), "."
)
).render
)
}
}
| CraigGiles/mynab | scalajs/src/main/scala/tutorial/webapp/ScalatagsExample.scala | Scala | mit | 473 |
/*
* Open Korean Text - Scala library to process Korean text
*
* Copyright 2014 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openkoreantext.processor.tokenizer
import java.util.HashMap
import org.openkoreantext.processor.TestBase
import org.openkoreantext.processor.tokenizer.KoreanTokenizer._
import org.openkoreantext.processor.util.KoreanDictionaryProvider
import org.openkoreantext.processor.util.KoreanPos._
class KoreanTokenizerTest extends TestBase {
val parsedChunk = ParsedChunk(
List(KoreanToken("하", Noun, 0, 0), KoreanToken("하", Noun, 0, 0), KoreanToken("하", Noun, 0, 0)), 1
)
val parsedChunkWithTwoTokens = ParsedChunk(
List(KoreanToken("하", Noun, 0, 0), KoreanToken("하", Noun, 0, 0)), 1
)
val parsedChunkWithUnknowns = ParsedChunk(
List(KoreanToken("하하", Noun, 0, 0, unknown = true),
KoreanToken("하", Noun, 0, 0, unknown = true), KoreanToken("하", Noun, 0, 0)), 1
)
val parsedChunkWithCommonNouns = ParsedChunk(
List(KoreanToken("사람", Noun, 0, 0), KoreanToken("강아지", Noun, 0, 0)), 1
)
val parsedChunkWithVerbs = ParsedChunk(
List(KoreanToken("사람", Noun, 0, 0), KoreanToken("하다", Verb, 0, 0)), 1
)
val parsedChunkWithExactMatch = ParsedChunk(
List(KoreanToken("강아지", Noun, 0, 0)), 1
)
test("ParsedChunk should correctly count unknowns") {
assert(
parsedChunkWithUnknowns.countUnknowns === 2
)
assert(
parsedChunk.countUnknowns === 0
)
}
test("ParsedChunk should correctly count tokens") {
assert(
parsedChunk.countTokens === 3
)
assert(
parsedChunkWithTwoTokens.countTokens === 2
)
}
test("ParsedChunk should correctly return unknown coverage") {
assert(
parsedChunkWithUnknowns.getUnknownCoverage === 3
)
assert(
parsedChunkWithTwoTokens.getUnknownCoverage === 0
)
}
test("ParsedChunk should get correct frequency score") {
assert(
parsedChunkWithTwoTokens.getFreqScore === 1.0f
)
assert(
parsedChunkWithCommonNouns.getFreqScore === 0.4544f
)
}
test("ParsedChunk should correctly count POSes") {
assert(
parsedChunk.countPos(Noun) === 3
)
assert(
parsedChunkWithVerbs.countPos(Noun) === 1
)
assert(
parsedChunkWithVerbs.countPos(Verb) === 1
)
}
test("ParsedChunk should correctly determine if the chunk is an exact match") {
assert(
parsedChunk.isExactMatch === 1
)
assert(
parsedChunkWithExactMatch.isExactMatch === 0
)
}
test("ParsedChunk should correctly determine if the chunk is all noun") {
assert(
parsedChunk.isAllNouns === 0
)
assert(
parsedChunkWithVerbs.isAllNouns === 1
)
}
test("tokenize should return expected tokens") {
assert(
tokenize("개루루야") ===
List(KoreanToken("개", Noun, 0, 1), KoreanToken("루루", Noun, 1, 2), KoreanToken("야", Josa, 3, 1))
)
assert(
tokenize("쵸귀여운") ===
List(KoreanToken("쵸", VerbPrefix, 0, 1), KoreanToken("귀여운", Adjective, 1, 3, stem = Some("귀엽다")))
)
assert(
tokenize("이사람의") ===
List(KoreanToken("이", Determiner, 0, 1), KoreanToken("사람", Noun, 1, 2), KoreanToken("의", Josa, 3, 1))
)
assert(
tokenize("엄청작아서귀엽다") ===
List(
KoreanToken("엄청", Adverb, 0, 2),
KoreanToken("작아서", Adjective, 2, 3, stem = Some("작다")),
KoreanToken("귀엽다", Adjective, 5, 3, stem = Some("귀엽다")))
)
assert(
tokenize("안녕하셨어요") ===
List(
KoreanToken("안녕하셨어요", Adjective, 0, 6, stem = Some("안녕하다"))
)
)
assert(
tokenize("쵸귀여운개루루") ===
List(
KoreanToken("쵸", VerbPrefix, 0, 1),
KoreanToken("귀여운", Adjective, 1, 3, stem = Some("귀엽다")),
KoreanToken("개", Noun, 4, 1), KoreanToken("루루", Noun, 5, 2)
)
)
assert(
tokenize("그리고") ===
List(KoreanToken("그리고", Conjunction, 0, 3))
)
assert(
tokenize("안녕ㅋㅋ") ===
List(KoreanToken("안녕", Noun, 0, 2), KoreanToken("ㅋㅋ", KoreanParticle, 2, 2))
)
assert(
tokenize("라고만") ===
List(KoreanToken("라고만", Eomi, 0, 3))
)
assert(
tokenize("\\"라면서 외쳤다") ===
List(
KoreanToken("\\"", Punctuation, 0, 1),
KoreanToken("라면서", Eomi, 1, 3),
KoreanToken(" ", Space, 4, 1),
KoreanToken("외쳤다", Verb, 5, 3, stem = Some("외치다"))
)
)
assert(
tokenize("사랑해") ===
List(
KoreanToken("사랑", Noun, 0, 2),
KoreanToken("해", Verb, 2, 1, stem = Some("하다"))
)
)
}
test("tokenize should handle unknown nouns") {
assert(
tokenize("개컁컁아") ===
List(KoreanToken("개컁컁", Noun, 0, 3, unknown = true), KoreanToken("아", Josa, 3, 1))
)
assert(
tokenize("안녕하세요쿛툐캬님") ===
List(KoreanToken("안녕하세요", Adjective, 0, 5, stem = Some("안녕하다")),
KoreanToken("쿛툐캬", Noun, 5, 3, unknown = true), KoreanToken("님", Suffix, 8, 1))
)
}
test("tokenize should handle edge cases") {
assert(
tokenize("이승기가") ===
List(KoreanToken("이승기", Noun, 0, 3), KoreanToken("가", Josa, 3, 1))
)
assert(
tokenize("야이건뭐").mkString(", ") ===
"야(Exclamation: 0, 1), 이건(Noun: 1, 2), 뭐(Noun: 3, 1)"
)
assert(
tokenize("아이럴수가").mkString(", ") ===
"아(Exclamation: 0, 1), 이럴수가(Adjective(이렇다): 1, 4)"
)
assert(
tokenize("보다가").mkString(", ") === "보다가(Verb(보다): 0, 3)"
)
assert(
tokenize("하...").mkString(", ") === "하(Exclamation: 0, 1), ...(Punctuation: 1, 3)"
)
assert(
tokenize("시전하는").mkString(", ") === "시전(Noun: 0, 2), 하는(Verb(하다): 2, 2)"
)
}
test("tokenize should be able to tokenize long non-space-correctable ones") {
assert(
tokenize("훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌쩍훌")
.map(_.text).mkString(" ") ===
"훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 " +
"훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌쩍 훌"
)
}
test("tokenize should properly tokenize edge cases") {
assert(
tokenize("해쵸쵸쵸쵸쵸쵸쵸쵸춏").mkString(" ") === "해(Noun: 0, 1) 쵸쵸쵸쵸쵸쵸쵸쵸*(Noun: 1, 8) 춏*(Noun: 9, 1)"
)
}
test("tokenize should add user-added nouns to dictionary") {
assert(!KoreanDictionaryProvider.koreanDictionary.get(Noun).contains("뇬뇨"))
assert(!KoreanDictionaryProvider.koreanDictionary.get(Noun).contains("츄쵸"))
assert(tokenize("뇬뇨뇬뇨뇬뇨뇬뇨츄쵸").mkString(" ") ===
"뇬뇨뇬뇨뇬뇨뇬뇨*(Noun: 0, 8) 츄쵸*(Noun: 8, 2)")
KoreanDictionaryProvider.addWordsToDictionary(Noun, List("뇬뇨", "츄쵸"))
assert(KoreanDictionaryProvider.koreanDictionary.get(Noun).contains("뇬뇨"))
assert(KoreanDictionaryProvider.koreanDictionary.get(Noun).contains("츄쵸"))
assert(tokenize("뇬뇨뇬뇨뇬뇨뇬뇨츄쵸").mkString(" ") ===
"뇬뇨(Noun: 0, 2) 뇬뇨(Noun: 2, 2) 뇬뇨(Noun: 4, 2) 뇬뇨(Noun: 6, 2) 츄쵸(Noun: 8, 2)")
}
test("test noun-josa unmatched") {
assert(tokenize("울다").mkString(" ") === "울다(Verb(울다): 0, 2)")
assert(tokenize("울이다").mkString(" ") === "울(Noun: 0, 1) 이다(Josa: 1, 2)")
assert(tokenize("사랑으로").mkString(" ") === "사랑(Noun: 0, 2) 으로(Josa: 2, 2)")
assert(tokenize("사랑로").mkString(" ") === "사랑(Noun: 0, 2) 로(Noun: 2, 1)")
assert(tokenize("고화질로").mkString(" ") === "고화질(Noun: 0, 3) 로(Josa: 3, 1)")
}
test("test remove unused solutions") {
val unmodifiable = new HashMap[Int, String] { put(0, null); }
assert(removeUnusedSolutions(0, 1, unmodifiable).size() === 1)
assert(removeUnusedSolutions(0, 2, unmodifiable).size() === 1)
assert(removeUnusedSolutions(1, 3, unmodifiable).size() === 1)
assert(removeUnusedSolutions(1, 8, unmodifiable).size() === 1)
assert(removeUnusedSolutions(7, 8, unmodifiable).size() === 1)
assert(removeUnusedSolutions(8, 9, new HashMap[Int, String] { put(0, null); }).size() == 0)
assert(removeUnusedSolutions(9, 10, new HashMap[Int, String] { put(1, null); }).size() == 0)
assert(removeUnusedSolutions(10, 11, new HashMap[Int, String] { put(2, null); }).size() == 0)
assert(removeUnusedSolutions(299, 300, new HashMap[Int, String] { put(291, null); }).size() == 0)
assert(removeUnusedSolutions(298, 200, unmodifiable).size() === 1)
assert(removeUnusedSolutions(7, 10, unmodifiable).size() === 1)
assert(removeUnusedSolutions(9, 9, unmodifiable).size() === 1)
}
} | open-korean-text/open-korean-text | src/test/scala/org/openkoreantext/processor/tokenizer/KoreanTokenizerTest.scala | Scala | apache-2.0 | 9,779 |
package pl.touk.nussknacker.engine.api.typed
import java.util.Collections
import java.{util => ju}
/**
* The idea of this class is to be something like java bean with properties represented by Map entries.
* If you use this class as a global variables, it will be typed using `TypedObjectTypingResult`.
* Just like in java bean, case when property was typed correctly and is missing during runtime is something that
* should not happen and is treated Exceptionally. Check `MapPropertyAccessor.canRead` for more details.
*/
class TypedMap(map: ju.Map[String, Any]) extends ju.HashMap[String, Any](map) {
def this() =
this(Collections.emptyMap())
}
object TypedMap {
import scala.collection.JavaConverters._
def apply(scalaFields: Map[String, Any]): TypedMap = {
new TypedMap(scalaFields.asJava)
}
}
| TouK/nussknacker | components-api/src/main/scala/pl/touk/nussknacker/engine/api/typed/TypedMap.scala | Scala | apache-2.0 | 827 |
package com.benoj.janus
import cats.data.Xor
object Events {
trait JanusEvent
case class Failed(msg: Any)
type ReceiveEvent = PartialFunction[JanusEvent, Failed Xor Unit]
}
| benoj/janus | core/src/main/scala/com/benoj/janus/Events.scala | Scala | mit | 185 |
package contadamination.results
import com.twitter.algebird.{ BloomFilterMonoid }
import contadamination.test.utils.ContadaminationSuite
/**
* Created by dahljo on 7/10/15.
*/
class ContaminationFilterUtilsTest extends ContadaminationSuite {
val windowSize = 3
val bloomFilterCreater =
BloomFilterMonoid(6, 10000, 1)
val bloomFilterX = bloomFilterCreater.create("AAA")
val bloomFilterY = bloomFilterCreater.create("TTT")
val contaminationFilterX = ContaminationFilter(bloomFilterX, "test_organism", totalNbrOfQueries = 2, hits = 1)
val contaminationFilterY = ContaminationFilter(bloomFilterY, "test_organism", totalNbrOfQueries = 5, hits = 3)
test("testSeqOp") {
val filters = Array(contaminationFilterX)
val read = "AAA"
val result = ContaminationFilterUtils.seqOp(windowSize)(filters, read)
assert(result(0).hits === contaminationFilterX.hits + 1)
}
test("testCombOp") {
val firstFilter = Array(contaminationFilterX)
val secondFilter = Array(contaminationFilterY)
val result = ContaminationFilterUtils.combOp(firstFilter, secondFilter)
assert(result(0).hits === contaminationFilterX.hits + contaminationFilterY.hits)
assert(result(0).totalNbrOfQueries ===
contaminationFilterX.totalNbrOfQueries + contaminationFilterY.totalNbrOfQueries)
}
test("testAdd") {
val result = ContaminationFilterUtils.add(contaminationFilterX, contaminationFilterY)
assert(result.hits == 4)
assert(result.totalNbrOfQueries == 7)
}
}
| heuermh/contAdamination | src/test/scala/contadamination/results/ContaminationFilterUtilsTest.scala | Scala | apache-2.0 | 1,505 |
package demo
package components
package elementalui
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.html_<^._
import scalacss.Defaults._
object EuiInfo {
object Style extends StyleSheet.Inline {
import dsl._
val content = style(textAlign.center, fontSize(30.px), paddingTop(40.px))
}
val component = ScalaComponent
.builder[Unit]("EuiInfo")
.render(P => {
InfoTemplate(componentFilePath = "elementalui/")(
<.div(
<.h3("elemental-ui "),
<.p("scalajs-react wrapper for ", RedLink("elemental-ui", "http://elemental-ui.com")),
<.div(<.h4("Supported Version :"), <.span("0.5.4")),
<.div(
<.h4("How To Use :"),
<.p(
"Follow the installation guide from :",
RedLink("here", "https://github.com/elementalui/elemental"),
<.br(),
<.br(),
"Configure elemental-ui context in your top level component :",
RedLink(
"example",
"https://github.com/chandu0101/scalajs-react-components/blob/master/demo/src/main/scala/demo/pages/EuiPage.scala")
)
)
))
})
.build
def apply() = component()
}
| rleibman/scalajs-react-components | demo/src/main/scala/demo/components/elementalui/EuiInfo.scala | Scala | apache-2.0 | 1,253 |
package com.productfoundry.akka.cqrs.process
import com.productfoundry.akka.cqrs.EntityIdResolution
import scala.reflect.ClassTag
/**
* Simplifies registration of process managers
*/
abstract class ProcessManagerCompanion[P <: ProcessManager: ClassTag] {
/**
* Name of the process manager, based on class name
*/
val name = implicitly[ClassTag[P]].runtimeClass.getSimpleName
/**
* Defines how to resolve ids for this process manager.
*
* Events resolving to the same id are sent to the same process manager instance.
* @return id if the process.
*/
def idResolution: EntityIdResolution[P]
implicit val ProcessManagerCompanionObject: ProcessManagerCompanion[P] = this
}
| Product-Foundry/akka-cqrs | core/src/main/scala/com/productfoundry/akka/cqrs/process/ProcessManagerCompanion.scala | Scala | apache-2.0 | 708 |
package xkcd1083
import com.ning.http.client.oauth.{RequestToken, ConsumerKey}
import scala.io.Source
import grizzled.config.Configuration
//* Going to be an ugly error if the config file isn't located at `config`. */
trait HasConfig {
val config = Configuration(Source.fromFile("config"))
}
/** The twitter APPLICATION's API key. */
trait HasConsumerKey extends HasConfig {
val consumerKey: ConsumerKey = new ConsumerKey(
config.get("oauth", "consumer_key").get,
config.get("oauth", "consumer_secret").get
)
}
/** The USER's access token information. */
trait HasAccessToken extends HasConfig {
val accessToken: RequestToken = new RequestToken(
config.get("oauth", "access_token").get,
config.get("oauth", "access_token_secret").get
)
}
| ohbadiah/xkcd1083 | src/main/scala/oauth_tokens.scala | Scala | lgpl-3.0 | 768 |
package com.datastax.spark.connector.rdd
import com.datastax.spark.connector.{CassandraRow, SparkCassandraITFlatSpecBase}
import com.datastax.spark.connector.cql.CassandraConnector
import com.datastax.spark.connector.embedded.YamlTransformations
class CassandraRDDMockSpec extends SparkCassandraITFlatSpecBase {
useCassandraConfig(Seq(YamlTransformations.Default))
useSparkConf(defaultConf)
override val conn = CassandraConnector(defaultConf)
"A CassandraRDDMock" should "behave like a CassandraRDD without needing Cassandra" in {
val columns = Seq("key", "value")
//Create a fake CassandraRDD[CassandraRow]
val rdd = sc
.parallelize(1 to 10)
.map(num => CassandraRow.fromMap(columns.zip(Seq(num, num)).toMap))
val fakeCassandraRDD: CassandraRDD[CassandraRow] = new CassandraRDDMock(rdd)
fakeCassandraRDD.cassandraCount() should be (10)
}
}
| shashwat7/spark-cassandra-connector | spark-cassandra-connector/src/it/scala/com/datastax/spark/connector/rdd/CassandraRDDMockSpec.scala | Scala | apache-2.0 | 889 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.util.namejuggler
import cc.factorie.util.namejuggler.StringUtils._
object PersonNameWithDerivations {
def apply(s: NonemptyString): PersonNameWithDerivations = {
new PersonNameWithDerivations {
override val originalFullNames = Set(s)
}
}
/**
* Make a new PersonNameWithDerivations. Generally we should prefer the "primary" values, and back off to the secondary values.
* However sometimes we may judge that the backup value is superior (e.g., full name vs initial).
* @return
*/
def merge(primary: PersonNameWithDerivations, secondary: PersonNameWithDerivations): PersonNameWithDerivations = {
val mergedCanonical = PersonName.merge(primary, secondary)
new PersonNameWithDerivations {
// first set the canonical fields
override lazy val preferredFullName = mergedCanonical.preferredFullName
override lazy val prefixes = mergedCanonical.prefixes
override lazy val givenNames = mergedCanonical.givenNames
override lazy val nickNames = mergedCanonical.nickNames
override lazy val surNames = mergedCanonical.surNames
override lazy val hereditySuffix = mergedCanonical.hereditySuffix
override lazy val degrees = mergedCanonical.degrees
// then merge the derived fields. The result may be different from just rederiving them!
override lazy val firstInitial = OptionUtils.mergeWarn(primary.firstInitial, secondary.firstInitial)
override lazy val middleInitials = OptionUtils.mergeWarn(primary.middleInitials, secondary.middleInitials)
override lazy val lastInitial = OptionUtils.mergeWarn(primary.lastInitial, secondary.lastInitial)
override lazy val givenInitials = OptionUtils.mergeWarn(primary.givenInitials, secondary.givenInitials)
override lazy val allInitials = OptionUtils.mergeWarn(primary.allInitials, secondary.allInitials)
override lazy val firstName = OptionUtils.mergeWarn(primary.firstName, secondary.firstName)
override lazy val middleNames : scala.Seq[NonemptyString]= SeqUtils.mergeWarn[NonemptyString,Seq[NonemptyString]](primary.middleNames, secondary.middleNames)
// various representations of full name may be present in a single mention (e.g., a metadata xml file)
override lazy val originalFullNames = primary.originalFullNames ++ secondary.originalFullNames
override lazy val derivedFullNames = primary.derivedFullNames ++ secondary.derivedFullNames
}
}
}
trait PersonNameWithDerivations extends PersonName {
// a person mention may distinguish name components, or not.
// here, just store whatever we get (i.e. if we get first and last, leave fullname blank).
// then we can do whatever normalizations are needed later.
// firstInitial is not just a char; could be Ja. for Jacques, etc.
// DS, DAWS, D.S., D.A.W.S, D. S., D S, etc.
def firstInitial: Option[NonemptyString] = None
def middleInitials: Option[NonemptyString] = None
// van Dyke = v.D. ?
// de Arajuna Barbosa = D.? de A. B.? A.? A. B.?
def lastInitial: Option[NonemptyString] = None
// initials may be for full name, or just given names (e.g., from PubMed)
def givenInitials: Option[NonemptyString] = None
def allInitials: Option[NonemptyString] = None
// typically first name + middle initial, etc.
//def givenNames: Option[NonemptyString] = None
// can be mashed initials, e.g. JA, J.A.
def firstName: Option[NonemptyString] = None
def middleNames: Seq[NonemptyString] = Nil
// various representations of full name may be present in a single mention (e.g., a metadata xml file)
def originalFullNames: Set[NonemptyString] = Set.empty
def derivedFullNames: Set[NonemptyString] = Set.empty
// assume that the longest is the most informative
final def longestSurName: Option[NonemptyString] = toCanonical.surNames.toSeq.sortBy(-_.s.size).headOption
// assume that the longest is the most informative
final def longestOriginalFullName: Option[NonemptyString] = inferFully.originalFullNames.toSeq.sortBy(-_.s.size).headOption
final def longestDerivedFullName: Option[NonemptyString] = inferFully.derivedFullNames.toSeq.sortBy(-_.s.size).headOption
final def longestFullName: Option[NonemptyString] = (inferFully.originalFullNames.toSeq ++ inferFully.derivedFullNames.toSeq).sortBy(-_.s.size).headOption
final def bestFullName: Option[NonemptyString] = inferFully.preferredFullName.orElse(longestFullName)
/**
* Propagate info around all the fields.
* @return
*/
lazy val inferFully: PersonNameWithDerivations = {
// first infer canonical fields only from derived fields
//val proposedCanonical: CanonicalPersonName = new InferredCanonicalPersonName(this)
// override those with explicit canonical fields
val canonical = PersonName.merge(this, toCanonical)
// rederive all fields
val rederived = canonical.withDerivations
// override those with explicit derived fields
val result = PersonNameWithDerivations.merge(this, rederived)
result
}
/*
def compatibleWith(other: PersonNameWithDerivations): Boolean = {
inferFully.toCanonical compatibleWith( other.inferFully.toCanonical)
}
*/
override def toString = bestFullName.map(_.s).getOrElse("")
lazy val toCanonical: CanonicalPersonName = new InferredCanonicalPersonName(this)
/*
//.orElse(n.lastInitial)
override val firstName =
{
var gf: Option[NonemptyString] = n.givenNames.map(r => NonemptyString(r.split(" ").head))
var asd: Option[NonemptyString] = nParsedFullNames.map(_.firstName).flatten.headOption.map(NonemptyString(_))
n.firstName.orElse(gf).orElse(asd).orElse(n.firstInitial)
}
override val middleNames: Seq[NonemptyString] =
{
import SeqUtils.emptyCollectionToNone
val mid: Option[Seq[NonemptyString]] = n.givenNames.map(_.split(" ").tail.filter(_.nonEmpty).map(new NonemptyString(_)))
val fromInitials: Option[Seq[NonemptyString]] = n.middleInitials.map(_.split(" ").filter(_.nonEmpty).map(new NonemptyString(_)))
val result: Seq[NonemptyString] =
emptyCollectionToNone[Seq[NonemptyString]](n.middleNames).orElse(mid).orElse(fromInitials).orElse(Nil).getOrElse(Nil)
result
}*/
}
/**
* Derive all derivable fields solely from provided canonical fields.
*
* @param n
*/
class CanonicalPersonNameWithDerivations(n: CanonicalPersonName) extends CanonicalPersonName with PersonNameWithDerivations {
override lazy val toCanonical = n
// first copy the canonical fields
override lazy val prefixes = n.prefixes
override lazy val givenNames = n.givenNames
override lazy val nickNames = n.nickNames
override lazy val surNames = n.surNames
override lazy val hereditySuffix = n.hereditySuffix
override lazy val degrees = n.degrees
// ** lots not implemented and generally broken
// then derive the remaining fields
override lazy val firstName: Option[NonemptyString] = n.givenNames.headOption
override lazy val middleNames: Seq[NonemptyString] = firstName.map(x=>n.givenNames.tail).getOrElse(Nil)
override lazy val firstInitial: Option[NonemptyString] = firstName.map(x => NonemptyString(x.s(0) + "."))
override lazy val middleInitials: Option[NonemptyString] = middleNames.map(_.s(0) + ".").mkString(" ").trim
// ** We just take the first uppercase letter from the longest surname,
// desJardins? drop all particles? etc. etc.
// is Amanda Jones-Albrecht => A. J. or A. J.-A. or what?
// yuck
//** for now, just take the first capital letter from the longest surname
// todo: make a set of reasonable lastInitials
override lazy val lastInitial: Option[NonemptyString] = longestSurName.map(x => NonemptyString((("[A-Z]".r findAllIn x.s).toSeq)(0) + "."))
override lazy val givenInitials: Option[NonemptyString] = List(firstInitial, middleInitials).flatten.mkString(" ").trim
override lazy val allInitials: Option[NonemptyString] = List(firstInitial, middleInitials, lastInitial).flatten.mkString(" ").trim
// van Dyke = v.D. ?
// various representations of full name may be present in a single mention (e.g., a metadata xml file)
override lazy val preferredFullName: Option[NonemptyString] = {
val prefixString: Option[NonemptyString] = prefixes.mkString(" ").trim
val givenString: Option[NonemptyString] = givenNames.mkString(" ").trim
val degreesString: Option[NonemptyString] = degrees.map(", " + _).mkString("").trim
val rebuiltFullName: Option[NonemptyString] = Seq(prefixString, givenString, nickNamesInQuotes, longestSurName,
hereditySuffix).flatten.mkString(" ").trim + degreesString.getOrElse("")
rebuiltFullName
//if(rebuiltFullName.nonEmpty) Set(rebuiltFullName) else Set.empty
}
override lazy val derivedFullNames: Set[NonemptyString] = {
preferredFullName.toSet
}
}
| Craigacp/factorie | src/main/scala/cc/factorie/util/namejuggler/PersonNameWithDerivations.scala | Scala | apache-2.0 | 9,550 |
package com.lvxingpai.model.guide
import javax.validation.constraints.Min
import java.util.Date
import org.mongodb.morphia.annotations.Entity
import scala.beans.BeanProperty
/**
* 攻略
* Created by pengyt on 2015/10/21.
*/
@Entity
class Guide extends AbstractGuide {
/**
* 用户id
*/
@Min(value = 1)
@BeanProperty
var userId: Long = 0
/**
* 行程天数
*/
@Min(value = 1)
@BeanProperty
var itineraryDays: Int = 0
/**
* 更新时间
*/
@BeanProperty
var updateTime: Date = null
/**
* 攻略摘要
*/
@BeanProperty
var summary: String = null
/**
* 攻略详情
*/
@BeanProperty
var detailUrl: String = null
/**
* 可见度:public-所有人可见,private-自己可见
*/
@BeanProperty
var visibility: String = null
/**
* 状态:traveled-已走过的,planned-计划的
*/
@BeanProperty
var status: String = null
}
| Lvxingpai/core-model | src/main/scala/com/lvxingpai/model/guide/Guide.scala | Scala | apache-2.0 | 924 |
/*
* (C) Copyright 2015 Atomic BITS (http://atomicbits.io).
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the GNU Affero General Public License
* (AGPL) version 3.0 which accompanies this distribution, and is available in
* the LICENSE file or at http://www.gnu.org/licenses/agpl-3.0.en.html
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Affero General Public License for more details.
*
* Contributors:
* Peter Rigole
*
*/
package io.atomicbits.scraml.jsonschemaparser.model
import io.atomicbits.scraml.jsonschemaparser.{Id, IdExtractor}
import play.api.libs.json.JsObject
/**
* Created by peter on 7/06/15.
*/
case class NullEl(id: Id, required: Boolean = false) extends PrimitiveSchema with AllowedAsObjectField {
override def updated(updatedId: Id): Schema = copy(id = updatedId)
}
object NullEl {
def apply(schema: JsObject): Schema = {
val id = schema match {
case IdExtractor(schemaId) => schemaId
}
val required = (schema \\ "required").asOpt[Boolean]
NullEl(id, required.getOrElse(false))
}
}
| rcavalcanti/scraml | modules/scraml-jsonschema-parser/src/main/scala/io/atomicbits/scraml/jsonschemaparser/model/NullEl.scala | Scala | agpl-3.0 | 1,283 |
/*
*
* * Licensed to the Apache Software Foundation (ASF) under one or more
* * contributor license agreements. See the NOTICE file distributed with
* * this work for additional information regarding copyright ownership.
* * The ASF licenses this file to You under the Apache License, Version 2.0
* * (the "License"); you may not use this file except in compliance with
* * the License. You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package org.apache.eagle.datastream.core
import com.typesafe.config.Config
import org.jgrapht.experimental.dag.DirectedAcyclicGraph
abstract class StreamDAGExpansion(config: Config) {
def expand(dag: DirectedAcyclicGraph[StreamProducer[Any], StreamConnector[Any,Any]])
} | sunlibin/incubator-eagle | eagle-core/eagle-data-process/eagle-stream-process-api/src/main/scala/org/apache/eagle/datastream/core/StreamDAGExpansion.scala | Scala | apache-2.0 | 1,124 |
import sbt._
import Keys._
import org.apache.ivy.ant._
import org.apache.ivy.core.settings.IvySettings
import org.apache.ivy.Ivy
object ProguardCache extends Plugin {
object Settings {
lazy val settings = proguardCacheSettings
}
val proguardCacheBuild = TaskKey[Unit]("proguard-cache-build", "build the jar")
val proguardCacheBase = SettingKey[File]("proguard-cache-base", "path to the directory containing the proguard cache plugin source")
val proguardCacheRubyLib = SettingKey[File]("proguard-cache-ruby-lib", "path to the directory containing the jruby library")
val proguardCacheStorage = SettingKey[File]("proguard-cache-storage", "path to the directory to store dependency files")
val proguardCacheConfigFile = SettingKey[File]("proguard-cache-config-file", "path to the proguard configuration file")
val proguardCacheFinalJar = SettingKey[File]("proguard-cache-final-jar", "path to the final jarfile")
val proguardCacheIvyLocation = SettingKey[File]("proguard-cache-ivy-location", "path to where ivy stores jars")
lazy val proguardCacheSettings = Seq(
proguardCacheBase := file("/must/specify/this"),
proguardCacheRubyLib <<= proguardCacheBase(_ / "src" / "main" / "jruby"),
proguardCacheBuild <<= buildProguardCachedJar,
proguardCacheStorage <<= cacheDirectory(_ / "proguard_cache"),
proguardCacheIvyLocation := file(System.getProperty("user.home") + "/.ivy2")
)
lazy val buildProguardCachedJar = (
proguardCacheBase,
proguardCacheRubyLib,
classDirectory in Compile,
proguardCacheStorage,
proguardCacheConfigFile,
proguardCacheFinalJar,
managedClasspath in Compile,
proguardCacheIvyLocation) map {
(pcb, proguardCacheRubyLibValue,
classDirectoryValue,
proguardCacheStorageValue,
proguardCacheConfigFileValue,
proguardCacheFinalJarValue,
managedClasspathValue,
proguardCacheIvyLocationValue) =>
val oldContextClassLoader = Thread.currentThread().getContextClassLoader
// SBT doesn't set the thread context class loader. JRuby depends on it, so set it and then restore to the old value
Thread.currentThread().setContextClassLoader(this.getClass().getClassLoader())
com.restphone.JrubyEnvironmentSetup.addIvyDirectoryToLoadPath(proguardCacheIvyLocationValue.toString)
((managedClasspathValue map { _.data.getParent }) ++ List(proguardCacheRubyLibValue)) map { _.toString } map com.restphone.JrubyEnvironmentSetup.addToLoadPath
val pc = new com.restphone.ProguardCacheRuby
pc.build_dependency_files_and_final_jar(
List(classDirectoryValue.toString).toArray,
proguardCacheConfigFileValue.toString,
proguardCacheFinalJarValue.toString,
proguardCacheStorageValue.toString,
proguardCacheStorageValue.toString + "/scala-library.CKSUM.jar")
Thread.currentThread().setContextClassLoader(oldContextClassLoader)
()
}
}
| banshee/ProguardCache | src/main/scala/com/restphone/ProguardCachePlugin.scala | Scala | gpl-2.0 | 3,001 |
package scala.slick.driver
import scala.slick.SlickException
import scala.slick.lifted._
import scala.slick.ast._
import scala.slick.util.MacroSupport.macroSupportInterpolation
import java.sql.{Timestamp, Time, Date}
/**
* Slick driver for SQLite.
*
* This driver implements the [[scala.slick.driver.ExtendedProfile]]
* ''without'' the following capabilities:
*
* <ul>
* <li>[[scala.slick.driver.BasicProfile.capabilities.functionDatabase]],
* [[scala.slick.driver.BasicProfile.capabilities.functionUser]]:
* <code>Functions.user</code> and <code>Functions.database</code> are
* not available in SQLite. Slick will return empty strings for both.</li>
* <li>[[scala.slick.driver.BasicProfile.capabilities.joinFull]],
* [[scala.slick.driver.BasicProfile.capabilities.joinRight]]:
* Right and full outer joins are not supported by SQLite.</li>
* <li>[[scala.slick.driver.BasicProfile.capabilities.mutable]]:
* SQLite does not allow mutation of result sets. All cursors are
* read-only.</li>
* <li>[[scala.slick.driver.BasicProfile.capabilities.sequence]]:
* Sequences are not supported by SQLite.</li>
* <li>[[scala.slick.driver.BasicProfile.capabilities.returnInsertOther]]:
* When returning columns from an INSERT operation, only a single column
* may be specified which must be the table's AutoInc column.</li>
* <li>[[scala.slick.driver.BasicProfile.capabilities.typeBigDecimal]]:
* SQLite does not support a decimal type.</li>
* <li>[[scala.slick.driver.BasicProfile.capabilities.typeBlob]]: Blobs are
* not supported by the SQLite JDBC driver (but binary data in the form of
* <code>Array[Byte]</code> is).</li>
* <li>[[scala.slick.driver.BasicProfile.capabilities.zip]]:
* Row numbers (required by <code>zip</code> and
* <code>zipWithIndex</code>) are not supported. Trying to generate SQL
* code which uses this feature throws a SlickException.</li>
* </ul>
*
* @author Paul Snively
* @author Stefan Zeiger
*/
trait SQLiteDriver extends ExtendedDriver { driver =>
override val capabilities: Set[Capability] = (BasicProfile.capabilities.all
- BasicProfile.capabilities.functionDatabase
- BasicProfile.capabilities.functionUser
- BasicProfile.capabilities.joinFull
- BasicProfile.capabilities.joinRight
- BasicProfile.capabilities.mutable
- BasicProfile.capabilities.sequence
- BasicProfile.capabilities.returnInsertOther
- BasicProfile.capabilities.typeBigDecimal
- BasicProfile.capabilities.typeBlob
- BasicProfile.capabilities.zip
)
override val typeMapperDelegates = new TypeMapperDelegates
override def createQueryBuilder(input: QueryBuilderInput): QueryBuilder = new QueryBuilder(input)
override def createTableDDLBuilder(table: Table[_]): TableDDLBuilder = new TableDDLBuilder(table)
override def createColumnDDLBuilder(column: FieldSymbol, table: Table[_]): ColumnDDLBuilder = new ColumnDDLBuilder(column)
class QueryBuilder(input: QueryBuilderInput) extends super.QueryBuilder(input) {
override protected val supportsTuples = false
override protected val concatOperator = Some("||")
override protected def buildOrdering(n: Node, o: Ordering) {
if(o.nulls.last && !o.direction.desc)
b"($n) is null,"
else if(o.nulls.first && o.direction.desc)
b"($n) is null desc,"
expr(n)
if(o.direction.desc) b" desc"
}
override protected def buildFetchOffsetClause(fetch: Option[Long], offset: Option[Long]) = (fetch, offset) match {
case (Some(t), Some(d)) => b" LIMIT $d,$t"
case (Some(t), None ) => b" LIMIT $t"
case (None, Some(d)) => b" LIMIT $d,-1"
case _ =>
}
override def expr(c: Node, skipParens: Boolean = false): Unit = c match {
case Library.UCase(ch) => b"upper(!$ch)"
case Library.LCase(ch) => b"lower(!$ch)"
case Library.%(l, r) => b"\\($l%$r\\)"
case Library.Ceiling(ch) => b"round($ch+0.5)"
case Library.Floor(ch) => b"round($ch-0.5)"
case Library.User() => b"''"
case Library.Database() => b"''"
case Apply(j: Library.JdbcFunction, ch) if j != Library.Concat =>
/* The SQLite JDBC driver does not support ODBC {fn ...} escapes, so we try
* unescaped function calls by default */
b"${j.name}("
b.sep(ch, ",")(expr(_, true))
b")"
case s: SimpleFunction if s.scalar =>
/* The SQLite JDBC driver does not support ODBC {fn ...} escapes, so we try
* unescaped function calls by default */
b"${s.name}("
b.sep(s.nodeChildren, ",")(expr(_, true))
b")"
case RowNumber(_) => throw new SlickException("SQLite does not support row numbers")
case _ => super.expr(c, skipParens)
}
}
class TableDDLBuilder(table: Table[_]) extends super.TableDDLBuilder(table) {
override protected val foreignKeys = Nil // handled directly in addTableOptions
override protected val primaryKeys = Nil // handled directly in addTableOptions
override protected def addTableOptions(b: StringBuilder) {
for(pk <- table.primaryKeys) {
b append ","
addPrimaryKey(pk, b)
}
for(fk <- table.foreignKeys) {
b append ","
addForeignKey(fk, b)
}
}
}
class ColumnDDLBuilder(column: FieldSymbol) extends super.ColumnDDLBuilder(column) {
override protected def appendOptions(sb: StringBuilder) {
if(defaultLiteral ne null) sb append " DEFAULT " append defaultLiteral
if(autoIncrement) sb append " PRIMARY KEY AUTOINCREMENT"
else if(notNull) sb append " NOT NULL"
else if(primaryKey) sb append " PRIMARY KEY"
}
}
class TypeMapperDelegates extends super.TypeMapperDelegates {
override val booleanTypeMapperDelegate = new BooleanTypeMapperDelegate
override val dateTypeMapperDelegate = new DateTypeMapperDelegate
override val timeTypeMapperDelegate = new TimeTypeMapperDelegate
override val timestampTypeMapperDelegate = new TimestampTypeMapperDelegate
override val uuidTypeMapperDelegate = new UUIDTypeMapperDelegate
/* SQLite does not have a proper BOOLEAN type. The suggested workaround is
* INTEGER with constants 1 and 0 for TRUE and FALSE. */
class BooleanTypeMapperDelegate extends super.BooleanTypeMapperDelegate {
override def sqlTypeName = "INTEGER"
override def valueToSQLLiteral(value: Boolean) = if(value) "1" else "0"
}
/* The SQLite JDBC driver does not support the JDBC escape syntax for
* date/time/timestamp literals. SQLite expects these values as milliseconds
* since epoch. */
class DateTypeMapperDelegate extends super.DateTypeMapperDelegate {
override def valueToSQLLiteral(value: Date) = value.getTime.toString
}
class TimeTypeMapperDelegate extends super.TimeTypeMapperDelegate {
override def valueToSQLLiteral(value: Time) = value.getTime.toString
}
class TimestampTypeMapperDelegate extends super.TimestampTypeMapperDelegate {
override def valueToSQLLiteral(value: Timestamp) = value.getTime.toString
}
class UUIDTypeMapperDelegate extends super.UUIDTypeMapperDelegate {
override def sqlType = java.sql.Types.BLOB
}
}
}
object SQLiteDriver extends SQLiteDriver
| zefonseca/slick-1.0.0-scala.2.11.1 | src/main/scala/scala/slick/driver/SQLiteDriver.scala | Scala | bsd-2-clause | 7,318 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.