code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package sbt
import java.lang.reflect.{Array => _, _}
import java.lang.annotation.Annotation
import xsbti.api
import xsbti.SafeLazy
import SafeLazy.strict
import collection.mutable
object ClassToAPI
{
def apply(c: Seq[Class[_]]): api.SourceAPI =
{
val pkgs = packages(c).map(p => new api.Package(p))
val defs = c.filter(isTopLevel).flatMap(toDefinitions(new mutable.HashMap))
new api.SourceAPI(pkgs.toArray, defs.toArray)
}
// Avoiding implicit allocation.
private def arrayMap[T <: AnyRef, U <: AnyRef : ClassManifest](xs: Array[T])(f: T => U): Array[U] = {
val len = xs.length
var i = 0
val res = new Array[U](len)
while (i < len) {
res(i) = f(xs(i))
i += 1
}
res
}
def packages(c: Seq[Class[_]]): Set[String] =
c.flatMap(packageName).toSet
def isTopLevel(c: Class[_]): Boolean =
c.getEnclosingClass eq null
type ClassMap = mutable.Map[String, Seq[api.ClassLike]]
def toDefinitions(cmap: ClassMap)(c: Class[_]): Seq[api.ClassLike] =
cmap.getOrElseUpdate(c.getName, toDefinitions0(c, cmap))
def toDefinitions0(c: Class[_], cmap: ClassMap): Seq[api.ClassLike] =
{
import api.DefinitionType.{ClassDef, Module, Trait}
val enclPkg = packageName(c)
val mods = modifiers(c.getModifiers)
val acc = access(c.getModifiers, enclPkg)
val annots = annotations(c.getAnnotations)
val name = c.getName
val tpe = if(Modifier.isInterface(c.getModifiers)) Trait else ClassDef
lazy val (static, instance) = structure(c, enclPkg, cmap)
val cls = new api.ClassLike(tpe, strict(Empty), lzy(instance), emptyStringArray, typeParameters(c.getTypeParameters), name, acc, mods, annots)
val stat = new api.ClassLike(Module, strict(Empty), lzy(static), emptyStringArray, emptyTypeParameterArray, name, acc, mods, annots)
val defs = cls :: stat :: Nil
cmap(c.getName) = defs
defs
}
def structure(c: Class[_], enclPkg: Option[String], cmap: ClassMap): (api.Structure, api.Structure) =
{
val methods = mergeMap(c, c.getMethods, c.getDeclaredMethods, methodToDef(enclPkg))
val fields = mergeMap(c, c.getFields, c.getDeclaredFields, fieldToDef(enclPkg))
val constructors = mergeMap(c, c.getConstructors, c.getDeclaredConstructors, constructorToDef(enclPkg))
val classes = merge[Class[_]](c, c.getClasses, c.getDeclaredClasses, toDefinitions(cmap), (_: Seq[Class[_]]).partition(isStatic), _.getEnclosingClass != c)
val all = (methods ++ fields ++ constructors ++ classes)
val parentTypes = parents(c)
val instanceStructure = new api.Structure(lzy(parentTypes.toArray), lzy(all.declared.toArray), lzy(all.inherited.toArray))
val staticStructure = new api.Structure(lzyEmptyTpeArray, lzy(all.staticDeclared.toArray), lzy(all.staticInherited.toArray))
(staticStructure, instanceStructure)
}
def lzy[T <: AnyRef](t: => T): xsbti.api.Lazy[T] = xsbti.SafeLazy(t)
private val emptyStringArray = new Array[String](0)
private val emptyTypeArray = new Array[xsbti.api.Type](0)
private val emptyAnnotationArray = new Array[xsbti.api.Annotation](0)
private val emptyTypeParameterArray = new Array[xsbti.api.TypeParameter](0)
private val emptySimpleTypeArray = new Array[xsbti.api.SimpleType](0)
private val lzyEmptyTpeArray = lzy(emptyTypeArray)
private val lzyEmptyDefArray = lzy(new Array[xsbti.api.Definition](0))
def parents(c: Class[_]): Seq[api.Type] =
types(c.getGenericSuperclass +: c.getGenericInterfaces)
def types(ts: Seq[Type]): Array[api.Type] = ts filter (_ ne null) map reference toArray;
def upperBounds(ts: Array[Type]): api.Type =
new api.Structure(lzy(types(ts)), lzyEmptyDefArray, lzyEmptyDefArray)
def fieldToDef(enclPkg: Option[String])(f: Field): api.FieldLike =
{
val name = f.getName
val accs = access(f.getModifiers, enclPkg)
val mods = modifiers(f.getModifiers)
val annots = annotations(f.getDeclaredAnnotations)
val tpe = reference(f.getGenericType)
if(mods.isFinal) new api.Val(tpe, name, accs, mods, annots) else new api.Var(tpe, name, accs, mods, annots)
}
def methodToDef(enclPkg: Option[String])(m: Method): api.Def =
defLike(m.getName, m.getModifiers, m.getDeclaredAnnotations, m.getTypeParameters, m.getParameterAnnotations, m.getGenericParameterTypes, Some(m.getGenericReturnType), m.getGenericExceptionTypes, m.isVarArgs, enclPkg)
def constructorToDef(enclPkg: Option[String])(c: Constructor[_]): api.Def =
defLike("<init>", c.getModifiers, c.getDeclaredAnnotations, c.getTypeParameters, c.getParameterAnnotations, c.getGenericParameterTypes, None, c.getGenericExceptionTypes, c.isVarArgs, enclPkg)
def defLike[T <: GenericDeclaration](name: String, mods: Int, annots: Array[Annotation], tps: Array[TypeVariable[T]], paramAnnots: Array[Array[Annotation]], paramTypes: Array[Type], retType: Option[Type], exceptions: Array[Type], varArgs: Boolean, enclPkg: Option[String]): api.Def =
{
val varArgPosition = if(varArgs) paramTypes.length - 1 else -1
val isVarArg = List.tabulate(paramTypes.length)(_ == varArgPosition)
val pa = (paramAnnots, paramTypes, isVarArg).zipped map { case (a,p,v) => parameter(a,p,v) }
val params = new api.ParameterList(pa, false)
val ret = retType match { case Some(rt) => reference(rt); case None => Empty }
new api.Def(Array(params), ret, typeParameters(tps), name, access(mods, enclPkg), modifiers(mods), annotations(annots) ++ exceptionAnnotations(exceptions))
}
def exceptionAnnotations(exceptions: Array[Type]): Array[api.Annotation] =
if (exceptions.length == 0) emptyAnnotationArray
else arrayMap(exceptions)(t => new api.Annotation(Throws, Array(new api.AnnotationArgument("value", t.toString))))
def parameter(annots: Array[Annotation], parameter: Type, varArgs: Boolean): api.MethodParameter =
new api.MethodParameter("", annotated(reference(parameter),annots), false, if(varArgs) api.ParameterModifier.Repeated else api.ParameterModifier.Plain)
def annotated(t: api.SimpleType, annots: Array[Annotation]): api.Type = (
if (annots.length == 0) t
else new api.Annotated(t, annotations(annots))
)
case class Defs(declared: Seq[api.Definition], inherited: Seq[api.Definition], staticDeclared: Seq[api.Definition], staticInherited: Seq[api.Definition])
{
def ++(o: Defs) = Defs(declared ++ o.declared, inherited ++ o.inherited, staticDeclared ++ o.staticDeclared, staticInherited ++ o.staticInherited)
}
def mergeMap[T <: Member](of: Class[_], self: Seq[T], public: Seq[T], f: T => api.Definition): Defs =
merge[T](of, self, public, x => f(x) :: Nil, splitStatic _, _.getDeclaringClass != of)
def merge[T](of: Class[_], self: Seq[T], public: Seq[T], f: T => Seq[api.Definition], splitStatic: Seq[T] => (Seq[T],Seq[T]), isInherited: T => Boolean): Defs =
{
val (selfStatic, selfInstance) = splitStatic(self)
val (inheritedStatic, inheritedInstance) = splitStatic(public filter isInherited)
Defs(selfInstance flatMap f, inheritedInstance flatMap f, selfStatic flatMap f, inheritedStatic flatMap f)
}
def splitStatic[T <: Member](defs: Seq[T]): (Seq[T], Seq[T]) =
defs partition isStatic
def isStatic(c: Class[_]): Boolean = Modifier.isStatic(c.getModifiers)
def isStatic(a: Member): Boolean = Modifier.isStatic(a.getModifiers)
def typeParameters[T <: GenericDeclaration](tps: Array[TypeVariable[T]]): Array[api.TypeParameter] =
if (tps.length == 0) emptyTypeParameterArray
else arrayMap(tps)(typeParameter)
def typeParameter[T <: GenericDeclaration](tp: TypeVariable[T]): api.TypeParameter =
new api.TypeParameter(typeVariable(tp), emptyAnnotationArray, emptyTypeParameterArray, api.Variance.Invariant, NothingRef, upperBounds(tp.getBounds))
// needs to be stable across compilations
// preferably, it would be a proper unique id based on de Bruijn index
def typeVariable[T <: GenericDeclaration](tv: TypeVariable[T]): Int =
reduceHash((name(tv.getGenericDeclaration) + " " + tv.getName).getBytes)
def reduceHash(in: Array[Byte]): Int =
(0 /: in)( (acc, b) => (acc * 43) ^ b)
def name(gd: GenericDeclaration): String =
gd match
{
case c: Class[_] => c.getName
case m: Method => m.getName
case c: Constructor[_] => c.getName
}
def modifiers(i: Int): api.Modifiers =
{
import Modifier.{isAbstract, isFinal}
new api.Modifiers( isAbstract(i), false, isFinal(i), false, false, false)
}
def access(i: Int, pkg: Option[String]): api.Access =
{
import Modifier.{isPublic, isPrivate, isProtected}
if(isPublic(i)) Public else if(isPrivate(i)) Private else if(isProtected(i)) Protected else packagePrivate(pkg)
}
def annotations(a: Array[Annotation]): Array[api.Annotation] = if (a.length == 0) emptyAnnotationArray else arrayMap(a)(annotation)
def annotation(a: Annotation): api.Annotation =
new api.Annotation( reference(a.annotationType), Array(javaAnnotation(a.toString)))
// full information not available from reflection
def javaAnnotation(s: String): api.AnnotationArgument =
new api.AnnotationArgument("toString", s)
def array(tpe: api.Type): api.SimpleType = new api.Parameterized(ArrayRef, Array(tpe))
def reference(c: Class[_]): api.SimpleType =
if(c.isArray) array(reference(c.getComponentType)) else if(c.isPrimitive) primitive(c.getName) else reference(c.getName)
// does not handle primitives
def reference(s: String): api.SimpleType =
{
val (pkg, cls) = packageAndName(s)
pkg match
{
// translate all primitives?
case None => new api.Projection(Empty, cls)
case Some(p) =>
new api.Projection(new api.Singleton(pathFromString(p)), cls)
}
}
def referenceP(t: ParameterizedType): api.Parameterized =
{
val targs = t.getActualTypeArguments
val args = if (targs.length == 0) emptyTypeArray else arrayMap(targs)(t => reference(t): api.Type)
val base = reference(t.getRawType)
new api.Parameterized(base, args.toArray[api.Type])
}
def reference(t: Type): api.SimpleType =
t match
{
case w: WildcardType => reference("_")
case tv: TypeVariable[_] => new api.ParameterRef(typeVariable(tv))
case pt: ParameterizedType => referenceP(pt)
case gat: GenericArrayType => array(reference(gat.getGenericComponentType))
case c: Class[_] => reference(c)
}
def pathFromString(s: String): api.Path =
new api.Path(s.split("\\\\.").map(new api.Id(_)) :+ ThisRef )
def packageName(c: Class[_]) = packageAndName(c)._1
def packageAndName(c: Class[_]): (Option[String], String) =
packageAndName(c.getName)
def packageAndName(name: String): (Option[String], String) =
{
val lastDot = name.lastIndexOf('.')
if(lastDot >= 0)
(Some(name.substring(0, lastDot)), name.substring(lastDot+1))
else
(None, name)
}
val Empty = new api.EmptyType
val ThisRef = new api.This
val Public = new api.Public
val Private = new api.Private(Unqualified)
val Protected = new api.Protected(Unqualified)
val Unqualified = new api.Unqualified
def packagePrivate(pkg: Option[String]): api.Access = new api.Private(new api.IdQualifier(pkg getOrElse ""))
val ArrayRef = reference("scala.Array")
val Throws = reference("scala.throws")
val NothingRef = reference("scala.Nothing")
private[this] def PrimitiveNames = Seq("boolean", "byte", "char", "short", "int", "long", "float", "double")
private[this] def PrimitiveMap = PrimitiveNames.map( j => (j, j.capitalize)) :+ ("void" -> "Unit")
private[this] val PrimitiveRefs = PrimitiveMap.map { case (n, sn) => (n, reference("scala." + sn)) }.toMap
def primitive(name: String): api.SimpleType = PrimitiveRefs(name)
}
|
kuochaoyi/xsbt
|
compile/api/ClassToAPI.scala
|
Scala
|
bsd-3-clause
| 11,418
|
import sbt._
object Dependencies {
// Generic
lazy val gson = "com.google.code.gson" % "gson" % "2.3.1"
lazy val jodaTime = "joda-time" % "joda-time" % "2.5"
lazy val postgresql = "org.postgresql" % "postgresql" % "9.3-1102-jdbc41"
// Codacy
lazy val playTomcatCP = "com.codacy" %% "play-tomcatcp" % "1.0.1"
// Test-Only
lazy val scalaTest = "org.scalatest" %% "scalatest" % "2.2.2" % "test"
lazy val mockitoAll = "org.mockito" % "mockito-all" % "1.10.8" % "test"
// Typesafe
lazy val slick = "com.typesafe.slick" %% "slick" % "2.1.0"
lazy val akkaActor = "com.typesafe.akka" %% "akka-actor" % "2.3.6"
//WebJars
lazy val jquery = "org.webjars" % "jquery" % "2.1.1"
lazy val bootstrap = "org.webjars" % "bootstrap" % "3.3.1"
}
|
rtfpessoa/distributed-twitter-crawler
|
project/Dependencies.scala
|
Scala
|
mit
| 1,127
|
package com.sksamuel.elastic4s.search
import com.sksamuel.elastic4s.ElasticsearchClientUri
import com.sksamuel.elastic4s.http.{ElasticDsl, HttpClient}
import com.sksamuel.elastic4s.testkit.ElasticSugar
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy
import org.scalatest.{FlatSpec, Matchers}
class IdQueryTest extends FlatSpec with ElasticSugar with Matchers with ElasticDsl {
val http = HttpClient(ElasticsearchClientUri("elasticsearch://" + node.ipAndPort))
http.execute {
createIndex("sodas")
}.await
http.execute {
bulk(
indexInto("sodas/zero").fields("name" -> "sprite zero", "style" -> "lemonade") id 5,
indexInto("sodas/zero").fields("name" -> "coke zero", "style" -> "cola") id 9
).refresh(RefreshPolicy.IMMEDIATE)
}.await
"id query" should "find by id" in {
val resp = http.execute {
search("sodas/zero").query {
idsQuery(5)
}
}.await
resp.totalHits shouldBe 1
resp.hits.hits.head.sourceField("name") shouldBe "sprite zero"
}
it should "find multiple ids" in {
val resp = http.execute {
search("sodas/zero").query {
idsQuery(5, 9)
}
}.await
resp.totalHits shouldBe 2
resp.hits.hits.map(_.sourceField("name")).toSet shouldBe Set("sprite zero", "coke zero")
}
}
|
aroundus-inc/elastic4s
|
elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/search/IdQueryTest.scala
|
Scala
|
apache-2.0
| 1,311
|
/*
Copyright (c) 2014 by Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ml.dmlc.xgboost4j.scala.spark
import scala.collection.mutable
import scala.io.Source
import ml.dmlc.xgboost4j.{LabeledPoint => XGBLabeledPoint}
trait TrainTestData {
protected def getResourceLines(resource: String): Iterator[String] = {
require(resource.startsWith("/"), "resource must start with /")
val is = getClass.getResourceAsStream(resource)
if (is == null) {
sys.error(s"failed to resolve resource $resource")
}
Source.fromInputStream(is).getLines()
}
protected def getLabeledPoints(resource: String, featureSize: Int, zeroBased: Boolean):
Seq[XGBLabeledPoint] = {
getResourceLines(resource).map { line =>
val labelAndFeatures = line.split(" ")
val label = labelAndFeatures.head.toFloat
val values = new Array[Float](featureSize)
for (feature <- labelAndFeatures.tail) {
val idAndValue = feature.split(":")
if (!zeroBased) {
values(idAndValue(0).toInt - 1) = idAndValue(1).toFloat
} else {
values(idAndValue(0).toInt) = idAndValue(1).toFloat
}
}
XGBLabeledPoint(label, featureSize, null, values)
}.toList
}
protected def getLabeledPointsWithGroup(resource: String): Seq[XGBLabeledPoint] = {
getResourceLines(resource).map { line =>
val original = line.split(",")
val length = original.length
val label = original.head.toFloat
val group = original.last.toInt
val values = original.slice(1, length - 1).map(_.toFloat)
XGBLabeledPoint(label, values.size, null, values, 1f, group, Float.NaN)
}.toList
}
}
object Classification extends TrainTestData {
val train: Seq[XGBLabeledPoint] = getLabeledPoints("/agaricus.txt.train", 126, zeroBased = false)
val test: Seq[XGBLabeledPoint] = getLabeledPoints("/agaricus.txt.test", 126, zeroBased = false)
}
object MultiClassification extends TrainTestData {
val train: Seq[XGBLabeledPoint] = getLabeledPoints("/dermatology.data")
private def getLabeledPoints(resource: String): Seq[XGBLabeledPoint] = {
getResourceLines(resource).map { line =>
val featuresAndLabel = line.split(",")
val label = featuresAndLabel.last.toFloat - 1
val values = new Array[Float](featuresAndLabel.length - 1)
values(values.length - 1) =
if (featuresAndLabel(featuresAndLabel.length - 2) == "?") 1 else 0
for (i <- 0 until values.length - 2) {
values(i) = featuresAndLabel(i).toFloat
}
XGBLabeledPoint(label, values.length - 1, null, values.take(values.length - 1))
}.toList
}
}
object Regression extends TrainTestData {
val MACHINE_COL_NUM = 36
val train: Seq[XGBLabeledPoint] = getLabeledPoints(
"/machine.txt.train", MACHINE_COL_NUM, zeroBased = true)
val test: Seq[XGBLabeledPoint] = getLabeledPoints(
"/machine.txt.test", MACHINE_COL_NUM, zeroBased = true)
}
object Ranking extends TrainTestData {
val RANK_COL_NUM = 3
val train: Seq[XGBLabeledPoint] = getLabeledPointsWithGroup("/rank.train.csv")
val test: Seq[XGBLabeledPoint] = getLabeledPoints(
"/rank.test.txt", RANK_COL_NUM, zeroBased = false)
private def getGroups(resource: String): Seq[Int] = {
getResourceLines(resource).map(_.toInt).toList
}
}
object Synthetic extends {
val TRAIN_COL_NUM = 3
val TRAIN_WRONG_COL_NUM = 2
val train: Seq[XGBLabeledPoint] = Seq(
XGBLabeledPoint(1.0f, TRAIN_COL_NUM, Array(0, 1), Array(1.0f, 2.0f)),
XGBLabeledPoint(0.0f, TRAIN_COL_NUM, Array(0, 1, 2), Array(1.0f, 2.0f, 3.0f)),
XGBLabeledPoint(0.0f, TRAIN_COL_NUM, Array(0, 1, 2), Array(1.0f, 2.0f, 3.0f)),
XGBLabeledPoint(1.0f, TRAIN_COL_NUM, Array(0, 1), Array(1.0f, 2.0f))
)
val trainWithDiffFeatureSize: Seq[XGBLabeledPoint] = Seq(
XGBLabeledPoint(1.0f, TRAIN_WRONG_COL_NUM, Array(0, 1), Array(1.0f, 2.0f)),
XGBLabeledPoint(0.0f, TRAIN_COL_NUM, Array(0, 1, 2), Array(1.0f, 2.0f, 3.0f))
)
}
|
dmlc/xgboost
|
jvm-packages/xgboost4j-spark/src/test/scala/ml/dmlc/xgboost4j/scala/spark/TrainTestData.scala
|
Scala
|
apache-2.0
| 4,490
|
/*
* Copyright (c) 2016, Groupon, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* Neither the name of GROUPON nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.apache.spark.groupon.metrics.util
import org.apache.spark.groupon.metrics._
import org.apache.spark.rpc.{ThreadSafeRpcEndpoint, RpcEnv}
class TestMetricsRpcEndpoint(override val rpcEnv: RpcEnv) extends ThreadSafeRpcEndpoint {
val metricStore = new scala.collection.mutable.ArrayBuffer[MetricMessage]()
override def receive: PartialFunction[Any, Unit] = {
case msg: MetricMessage => {
metricStore += msg
}
}
def clear(): Unit = {
metricStore.clear()
}
def getMetricNames: Seq[String] = {
metricStore.map(metricMessage => metricMessage.metricName).toSeq
}
def getMetricValues: Seq[AnyVal] = {
metricStore.map(metricMessage => metricMessage.value).toSeq
}
}
|
groupon/spark-metrics
|
src/test/scala/org/apache/spark/groupon/metrics/util/TestMetricsRpcEndpoint.scala
|
Scala
|
bsd-3-clause
| 2,269
|
package so.blacklight.swarm.smtp.policy
import akka.actor.Actor
class IgnoreLoopback extends Actor {
override def receive: Receive = {
case ProcessEmail(email) =>
email.isLoopback() match {
case true =>
sender() ! PolicyReject("Loopback message")
case false =>
sender() ! PolicyPass(email)
}
}
}
|
xea/swarm-msg
|
src/main/scala/so/blacklight/swarm/smtp/policy/IgnoreLoopback.scala
|
Scala
|
apache-2.0
| 326
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.parquet
import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName
import org.apache.parquet.schema.{MessageType, PrimitiveType, Types}
import wvlet.airframe.codec.PrimitiveCodec.AnyCodec
import wvlet.airframe.control.Control.withResource
import wvlet.airframe.surface.Surface
import wvlet.airspec.AirSpec
import wvlet.log.io.IOUtil
/**
*/
object ParquetRecordWriterTest extends AirSpec {
case class MyRecord(id: Int, name: String)
private val schema = Parquet.toParquetSchema(Surface.of[MyRecord])
info(schema)
test("write generic records with a schema") {
IOUtil.withTempFile("target/tmp-record", ".parquet") { file =>
withResource(Parquet.newRecordWriter(file.getPath, schema)) { writer =>
writer.write(Map("id" -> 1, "name" -> "leo"))
writer.write(Array(2, "yui"))
writer.write("""{"id":3, "name":"aina"}""")
writer.write("""[4, "ruri"]""")
writer.write(AnyCodec.toMsgPack(Map("id" -> 5, "name" -> "xxx")))
}
withResource(Parquet.newReader[Map[String, Any]](file.getPath)) { reader =>
reader.read() shouldBe Map("id" -> 1, "name" -> "leo")
reader.read() shouldBe Map("id" -> 2, "name" -> "yui")
reader.read() shouldBe Map("id" -> 3, "name" -> "aina")
reader.read() shouldBe Map("id" -> 4, "name" -> "ruri")
reader.read() shouldBe Map("id" -> 5, "name" -> "xxx")
reader.read() shouldBe null
}
}
}
test("throw an exception for an invalid input") {
IOUtil.withTempFile("target/tmp-record-invalid", ".parquet") { file =>
withResource(Parquet.newRecordWriter(file.getPath, schema)) { writer =>
intercept[IllegalArgumentException] {
writer.write("{broken json data}")
}
intercept[IllegalArgumentException] {
writer.write("not a json data")
}
intercept[IllegalArgumentException] {
// Broken MessagePack data
writer.write(Array[Byte](0x1))
}
intercept[IllegalArgumentException] {
// Insufficient array size
writer.write(Array(1))
}
intercept[IllegalArgumentException] {
// Too large array size
writer.write(Array(1, 2, 3))
}
intercept[IllegalArgumentException] {
writer.write(null)
}
}
}
}
case class RecordOpt(id: Int, flag: Option[Int] = None)
private val schema2 = new MessageType(
"my record",
Types.required(PrimitiveTypeName.INT32).named("id"),
Types.optional(PrimitiveTypeName.INT32).named("flag")
)
test("write records with Option") {
IOUtil.withTempFile("target/tmp-record-opt", ".parquet") { file =>
withResource(Parquet.newRecordWriter(file.getPath, schema2)) { writer =>
writer.write(RecordOpt(1, Some(1)))
writer.write(RecordOpt(2, None))
writer.write("""{"id":"3"}""")
}
withResource(Parquet.newReader[Map[String, Any]](file.getPath)) { reader =>
reader.read() shouldBe Map("id" -> 1, "flag" -> 1)
reader.read() shouldBe Map("id" -> 2)
reader.read() shouldBe Map("id" -> 3)
reader.read() shouldBe null
}
}
}
}
|
wvlet/airframe
|
airframe-parquet/src/test/scala/wvlet/airframe/parquet/ParquetRecordWriterTest.scala
|
Scala
|
apache-2.0
| 3,771
|
package com.ctask.data
import org.scalatest.{FlatSpec, Matchers}
import play.api.libs.json.Json
/**
* Spec file for TaskList.
*/
class TaskListSpec extends FlatSpec with Matchers {
behavior of "TaskList Json converters"
it should "convert a TaskList with no tasks" in {
val expectedJson = "{\"nameStr\":\"taskList\"," +
"\"tasks\":[]," +
"\"email\":\"my@email.it\"}"
val taskList = new TaskList("taskList", Array.empty, "my@email.it")
Json.toJson(taskList)(TaskListJsonUtils.taskListWrites).toString shouldBe expectedJson
}
it should "convert a TaskList with no email" in {
val expectedJson = "{\"nameStr\":\"taskList\"," +
"\"tasks\":[]," +
"\"email\":null}"
val taskList = new TaskList("taskList", Array.empty, None.orNull)
Json.toJson(taskList)(TaskListJsonUtils.taskListWrites).toString shouldBe expectedJson
}
it should "convert a TaskList with tasks" in {
val expectedJson = "{\"nameStr\":\"taskList\"," +
"\"tasks\":[" +
"{\"name\":\"taskName\"," +
"\"description\":\"desk\"," +
"\"id\":2," +
"\"dueDate\":\"none\"," +
"\"done\":false," +
"\"recurrence\":\"never\"," +
"\"reminderSent\":false}" +
"]," +
"\"email\":\"my@email.it\"}"
val taskList = new TaskList("taskList",
Array(new Task("taskName", "desk", 2, None.orNull, false, Task.Recurrence.NEVER, false)),
"my@email.it")
Json.toJson(taskList)(TaskListJsonUtils.taskListWrites).toString shouldBe expectedJson
}
}
|
modsrm/ctask
|
common/src/test/scala/com/ctask/data/TaskListSpec.scala
|
Scala
|
gpl-3.0
| 1,790
|
package demo
package components
package elementalui
import chandu0101.macros.tojs.GhPagesMacros
import chandu0101.scalajs.react.components.elementalui._
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.html_<^._
import scala.scalajs.js
object EuiButtonsDemo {
val code = GhPagesMacros.exampleSource
// EXAMPLE:START
case class Backend($ : BackendScope[Unit, Unit]) {
val renderButtonSizes =
<.div(
EuiButton(size = ButtonSize.lg)("Large"),
EuiButton()("Default"),
EuiButton(size = ButtonSize.sm)("Small"),
EuiButton(size = ButtonSize.xs)("Extra Small")
)
def renderButtonVariants(list: (ButtonType, String)*) =
<.div(
list.map(t => EuiButton(`type` = t._1)(t._2)).toTagMod
)
def render =
CodeExample(code, "EuiButtons")(
<.div(
<.h1("Buttons"),
<.h2("Sizes"),
renderButtonSizes,
<.h2("Fill Buttons"),
renderButtonVariants(
(ButtonType.primary, "Primary"),
(ButtonType.success, "Success"),
(ButtonType.warning, "Warning"),
(ButtonType.danger, "Danger")
),
<.h2("Default Button Alternatives"),
renderButtonVariants(
(ButtonType.default_primary, "Primary"),
(ButtonType.default_success, "Success"),
(ButtonType.default_warning, "Warning"),
(ButtonType.default_danger, "Danger")
),
<.h2("Hollow Button Alternatives"),
renderButtonVariants(
(ButtonType.hollow_primary, "Primary"),
(ButtonType.hollow_success, "Success"),
(ButtonType.hollow_warning, "Warning"),
(ButtonType.hollow_danger, "Danger")
),
<.h2("Link Style Buttons"),
renderButtonVariants(
(ButtonType.link, "Link"),
(ButtonType.link_cancel, "Cancel"),
(ButtonType.link_delete, "Delete"),
(ButtonType.link_text, "Text")
),
<.h2("Button Groups"),
EuiButtonGroup()(
EuiButton()("Left"),
EuiButton()("Middle"),
EuiButton()("Right")
),
<.h2("Dropdown"),
EuiDropdown(
buttonLabel = "Default Trigger",
items = js.Array(
EuiDropdownMenuItem(label = "Action"),
EuiDropdownMenuItem(label = "Another Action"),
EuiDropdownMenuItem(`type` = EuiDropdownMenuItemType.DIVIDER),
EuiDropdownMenuItem(label = "Header", `type` = EuiDropdownMenuItemType.HEADER),
EuiDropdownMenuItem(label = "Action")
)
)(),
<.h3("Custom Trigger"),
EuiDropdown(
items = js.Array(
EuiDropdownMenuItem(label = "Action"),
EuiDropdownMenuItem(label = "Another Action"),
EuiDropdownMenuItem(`type` = EuiDropdownMenuItemType.DIVIDER),
EuiDropdownMenuItem(label = "Header", `type` = EuiDropdownMenuItemType.HEADER),
EuiDropdownMenuItem(label = "Action")
)
)(<.h3("I am an H3!!!"))
)
)
}
val component = ScalaComponent
.builder[Unit]("EuiButtonsDemo")
.renderBackend[Backend]
.build
// EXAMPLE:END
def apply() = component()
}
|
chandu0101/scalajs-react-components
|
demo/src/main/scala/demo/components/elementalui/EuiButtonsDemo.scala
|
Scala
|
apache-2.0
| 3,363
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.benchmark
import org.apache.spark.util.Benchmark
/**
* Benchmark to measure whole stage codegen performance.
* To run this:
* build/sbt "sql/test-only *benchmark.MiscBenchmark"
*
* Benchmarks in this file are skipped in normal builds.
*/
class MiscBenchmark extends BenchmarkBase {
ignore("filter & aggregate without group") {
val N = 500L << 22
runBenchmark("range/filter/sum", N) {
sparkSession.range(N).filter("(id & 1) = 1").groupBy().sum().collect()
}
/*
Java HotSpot(TM) 64-Bit Server VM 1.8.0_60-b27 on Mac OS X 10.11
Intel(R) Core(TM) i7-4960HQ CPU @ 2.60GHz
range/filter/sum: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
range/filter/sum codegen=false 30663 / 31216 68.4 14.6 1.0X
range/filter/sum codegen=true 2399 / 2409 874.1 1.1 12.8X
*/
}
ignore("range/limit/sum") {
val N = 500L << 20
runBenchmark("range/limit/sum", N) {
sparkSession.range(N).limit(1000000).groupBy().sum().collect()
}
/*
Westmere E56xx/L56xx/X56xx (Nehalem-C)
range/limit/sum: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------------
range/limit/sum codegen=false 609 / 672 861.6 1.2 1.0X
range/limit/sum codegen=true 561 / 621 935.3 1.1 1.1X
*/
}
ignore("sample") {
val N = 500 << 18
runBenchmark("sample with replacement", N) {
sparkSession.range(N).sample(withReplacement = true, 0.01).groupBy().sum().collect()
}
/*
Java HotSpot(TM) 64-Bit Server VM 1.8.0_60-b27 on Mac OS X 10.11
Intel(R) Core(TM) i7-4960HQ CPU @ 2.60GHz
sample with replacement: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
sample with replacement codegen=false 7073 / 7227 18.5 54.0 1.0X
sample with replacement codegen=true 5199 / 5203 25.2 39.7 1.4X
*/
runBenchmark("sample without replacement", N) {
sparkSession.range(N).sample(withReplacement = false, 0.01).groupBy().sum().collect()
}
/*
Java HotSpot(TM) 64-Bit Server VM 1.8.0_60-b27 on Mac OS X 10.11
Intel(R) Core(TM) i7-4960HQ CPU @ 2.60GHz
sample without replacement: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
sample without replacement codegen=false 1508 / 1529 86.9 11.5 1.0X
sample without replacement codegen=true 644 / 662 203.5 4.9 2.3X
*/
}
ignore("collect") {
val N = 1 << 20
val benchmark = new Benchmark("collect", N)
benchmark.addCase("collect 1 million") { iter =>
sparkSession.range(N).collect()
}
benchmark.addCase("collect 2 millions") { iter =>
sparkSession.range(N * 2).collect()
}
benchmark.addCase("collect 4 millions") { iter =>
sparkSession.range(N * 4).collect()
}
benchmark.run()
/*
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
collect: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------------
collect 1 million 439 / 654 2.4 418.7 1.0X
collect 2 millions 961 / 1907 1.1 916.4 0.5X
collect 4 millions 3193 / 3895 0.3 3044.7 0.1X
*/
}
ignore("collect limit") {
val N = 1 << 20
val benchmark = new Benchmark("collect limit", N)
benchmark.addCase("collect limit 1 million") { iter =>
sparkSession.range(N * 4).limit(N).collect()
}
benchmark.addCase("collect limit 2 millions") { iter =>
sparkSession.range(N * 4).limit(N * 2).collect()
}
benchmark.run()
/*
model name : Westmere E56xx/L56xx/X56xx (Nehalem-C)
collect limit: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------------
collect limit 1 million 833 / 1284 1.3 794.4 1.0X
collect limit 2 millions 3348 / 4005 0.3 3193.3 0.2X
*/
}
ignore("generate explode") {
val N = 1 << 24
runBenchmark("generate explode array", N) {
val df = sparkSession.range(N).selectExpr(
"id as key",
"array(rand(), rand(), rand(), rand(), rand()) as values")
df.selectExpr("key", "explode(values) value").count()
}
/*
Java HotSpot(TM) 64-Bit Server VM 1.8.0_92-b14 on Mac OS X 10.11.6
Intel(R) Core(TM) i7-4980HQ CPU @ 2.80GHz
generate explode array: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
generate explode array wholestage off 6920 / 7129 2.4 412.5 1.0X
generate explode array wholestage on 623 / 646 26.9 37.1 11.1X
*/
runBenchmark("generate explode map", N) {
val df = sparkSession.range(N).selectExpr(
"id as key",
"map('a', rand(), 'b', rand(), 'c', rand(), 'd', rand(), 'e', rand()) pairs")
df.selectExpr("key", "explode(pairs) as (k, v)").count()
}
/*
Java HotSpot(TM) 64-Bit Server VM 1.8.0_92-b14 on Mac OS X 10.11.6
Intel(R) Core(TM) i7-4980HQ CPU @ 2.80GHz
generate explode map: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
generate explode map wholestage off 11978 / 11993 1.4 714.0 1.0X
generate explode map wholestage on 866 / 919 19.4 51.6 13.8X
*/
runBenchmark("generate posexplode array", N) {
val df = sparkSession.range(N).selectExpr(
"id as key",
"array(rand(), rand(), rand(), rand(), rand()) as values")
df.selectExpr("key", "posexplode(values) as (idx, value)").count()
}
/*
Java HotSpot(TM) 64-Bit Server VM 1.8.0_92-b14 on Mac OS X 10.11.6
Intel(R) Core(TM) i7-4980HQ CPU @ 2.80GHz
generate posexplode array: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
generate posexplode array wholestage off 7502 / 7513 2.2 447.1 1.0X
generate posexplode array wholestage on 617 / 623 27.2 36.8 12.2X
*/
runBenchmark("generate inline array", N) {
val df = sparkSession.range(N).selectExpr(
"id as key",
"array((rand(), rand()), (rand(), rand()), (rand(), 0.0d)) as values")
df.selectExpr("key", "inline(values) as (r1, r2)").count()
}
/*
Java HotSpot(TM) 64-Bit Server VM 1.8.0_92-b14 on Mac OS X 10.11.6
Intel(R) Core(TM) i7-4980HQ CPU @ 2.80GHz
generate inline array: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
generate inline array wholestage off 6901 / 6928 2.4 411.3 1.0X
generate inline array wholestage on 1001 / 1010 16.8 59.7 6.9X
*/
val M = 60000
runBenchmark("generate big struct array", M) {
import sparkSession.implicits._
val df = sparkSession.sparkContext.parallelize(Seq(("1",
Array.fill(M)({
val i = math.random
(i.toString, (i + 1).toString, (i + 2).toString, (i + 3).toString)
})))).toDF("col", "arr")
df.selectExpr("*", "expode(arr) as arr_col")
.select("col", "arr_col.*").count
}
/*
Java HotSpot(TM) 64-Bit Server VM 1.8.0_151-b12 on Mac OS X 10.12.6
Intel(R) Core(TM) i7-4980HQ CPU @ 2.80GHz
test the impact of adding the optimization of Generate.unrequiredChildIndex,
we can see enormous improvement of x250 in this case! and it grows O(n^2).
with Optimization ON:
generate big struct array: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
generate big struct array wholestage off 331 / 378 0.2 5524.9 1.0X
generate big struct array wholestage on 205 / 232 0.3 3413.1 1.6X
with Optimization OFF:
generate big struct array: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
generate big struct array wholestage off 49697 / 51496 0.0 828277.7 1.0X
generate big struct array wholestage on 50558 / 51434 0.0 842641.6 1.0X
*/
}
ignore("generate regular generator") {
val N = 1 << 24
runBenchmark("generate stack", N) {
val df = sparkSession.range(N).selectExpr(
"id as key",
"id % 2 as t1",
"id % 3 as t2",
"id % 5 as t3",
"id % 7 as t4",
"id % 13 as t5")
df.selectExpr("key", "stack(4, t1, t2, t3, t4, t5)").count()
}
/*
Java HotSpot(TM) 64-Bit Server VM 1.8.0_92-b14 on Mac OS X 10.11.6
Intel(R) Core(TM) i7-4980HQ CPU @ 2.80GHz
generate stack: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
generate stack wholestage off 12953 / 13070 1.3 772.1 1.0X
generate stack wholestage on 836 / 847 20.1 49.8 15.5X
*/
}
}
|
bravo-zhang/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MiscBenchmark.scala
|
Scala
|
apache-2.0
| 11,425
|
package org.jetbrains.plugins.scala.worksheet.integration.plain
import org.jetbrains.plugins.scala.WorksheetEvaluationTests
import org.junit.experimental.categories.Category
@Category(Array(classOf[WorksheetEvaluationTests]))
class WorksheetPlainCompileOnServerRunLocallyIntegrationTest extends WorksheetPlainIntegrationBaseTest {
override def useCompileServer = true
override def runInCompileServerProcess = false
}
|
JetBrains/intellij-scala
|
scala/worksheet/test/org/jetbrains/plugins/scala/worksheet/integration/plain/WorksheetPlainCompileOnServerRunLocallyIntegrationTest.scala
|
Scala
|
apache-2.0
| 425
|
package net.snowflake.spark.snowflake
import java.net.{InetSocketAddress, Proxy}
import java.net.Proxy.Type
import java.util.Properties
import net.snowflake.client.core.SFSessionProperty
import net.snowflake.client.jdbc.internal.amazonaws.ClientConfiguration
import net.snowflake.client.jdbc.internal.microsoft.azure.storage.OperationContext
private[snowflake] class ProxyInfo(proxyHost: Option[String],
proxyPort: Option[String],
proxyUser: Option[String],
proxyPassword: Option[String],
nonProxyHosts: Option[String])
extends Serializable {
private def validate(): Unit = {
if (proxyHost.isEmpty || proxyPort.isEmpty) {
throw new IllegalArgumentException(
"proxy host and port are mandatory when using proxy."
)
}
try {
proxyPort.get.toInt
} catch {
case _: Any =>
throw new IllegalArgumentException("proxy port must be a valid number.")
}
if (proxyPassword.isDefined && proxyUser.isEmpty) {
throw new IllegalArgumentException(
"proxy user must be set if proxy password is set."
)
}
}
def setProxyForJDBC(jdbcProperties: Properties): Unit = {
validate()
// Setup 3 mandatory properties
jdbcProperties.put(SFSessionProperty.USE_PROXY.getPropertyKey, "true")
jdbcProperties.put(
SFSessionProperty.PROXY_HOST.getPropertyKey,
proxyHost.get
)
jdbcProperties.put(
SFSessionProperty.PROXY_PORT.getPropertyKey,
proxyPort.get
)
// Hard code to manually test proxy is used by negative test
// if (jdbcProperties.size() <= 3) {
// jdbcProperties.put(SFSessionProperty.PROXY_HOST.getPropertyKey(), "wronghost")
// jdbcProperties.put(SFSessionProperty.PROXY_PORT.getPropertyKey(), "12345")
// }
// Setup 3 optional properties if they are provided.
proxyUser match {
case Some(optionValue) =>
jdbcProperties.put(
SFSessionProperty.PROXY_USER.getPropertyKey,
optionValue
)
case None =>
}
proxyPassword match {
case Some(optionValue) =>
jdbcProperties.put(
SFSessionProperty.PROXY_PASSWORD.getPropertyKey,
optionValue
)
case None =>
}
nonProxyHosts match {
case Some(optionValue) =>
jdbcProperties.put(
SFSessionProperty.NON_PROXY_HOSTS.getPropertyKey,
optionValue
)
case None =>
}
}
def setProxyForS3(s3client: ClientConfiguration): Unit = {
validate()
// Setup 2 mandatory properties
s3client.setProxyHost(proxyHost.get)
s3client.setProxyPort(proxyPort.get.toInt)
// Hard code to manually test proxy is used by negative test
// s3client.setProxyHost("wronghost")
// s3client.setProxyPort(12345)
// Setup 3 optional properties if they are provided.
proxyUser match {
case Some(optionValue) =>
s3client.setProxyUsername(optionValue)
case None =>
}
proxyPassword match {
case Some(optionValue) =>
s3client.setProxyPassword(optionValue)
case None =>
}
nonProxyHosts match {
case Some(optionValue) =>
s3client.setNonProxyHosts(optionValue)
case None =>
}
}
def setProxyForAzure(): Unit = {
validate()
OperationContext.setDefaultProxy(
new Proxy(
Type.HTTP,
new InetSocketAddress(proxyHost.get, proxyPort.get.toInt)
)
)
// Hard code to manually test proxy is used by negative test
// OperationContext.setDefaultProxy(new Proxy(Type.HTTP,
// new InetSocketAddress("wronghost", 12345)))
}
}
|
snowflakedb/spark-snowflake
|
src/main/scala/net/snowflake/spark/snowflake/ProxyInfo.scala
|
Scala
|
apache-2.0
| 3,752
|
package net.al3x.blog
import java.io.File
import scala.collection.{immutable, mutable}
import scala.collection.jcl
import scala.xml.XML
object Blog extends FileHelpers {
def main(args: Array[String]) {
val posts = findPosts(new File(Config.postDir))
val lastTenPosts = posts.reverse.slice(0, 10)
if (args.isDefinedAt(0)) {
args(0) match {
case "-f" => posts.foreach(post => { post.write; print(".") }); println("Done.")
case "-n" => Post.newPost
case _ => println("Unknown argument."); System.exit(-1)
}
} else {
lastTenPosts.foreach(post => { post.write; print(".") }); println("Done.")
}
// copy static files
copyAllFiles(Config.staticDir, Config.wwwDir)
// generate dynamic files
new About(Config.aboutPost).write
new Archive(posts).write
new Sitemap(posts).write
new AtomFeed(lastTenPosts).write
new Index(lastTenPosts).write
}
}
|
al3x/simple-scala-blog
|
src/Blog.scala
|
Scala
|
mit
| 938
|
/* NSC -- new Scala compiler
* Copyright 2005-2013 LAMP/EPFL
* @author Stephane Micheloud
* Adapted from Lex Spoon's sbaz manual
*/
package scala.tools.docutil
// For help on man pages see:
// - http://www.linuxfocus.org/English/November2003/article309.shtml
// - http://www.schweikhardt.net/man_page_howto.html
object EmitManPage {
import ManPage._
val out = Console
def escape(text: String) =
text.replaceAll("-", "\\\\-")
def emitSection(section: Section, depth: Int): Unit = {
def emitPara(text: AbstractText): Unit = {
emitText(text)
out println "\\n.IP"
}
def emitText(text: AbstractText): Unit = {
text match {
case seq:SeqText =>
seq.components foreach emitText
case seq:SeqPara =>
seq.components foreach emitPara
case Text(text) =>
out print escape(text)
case BSlash =>
out print "\\\\e"
case NDash | MDash =>
out print "\\\\-"
case Bold(text) =>
out print "\\\\fB"
emitText(text)
out print "\\\\fR"
case Italic(text) =>
out print "\\\\fI"
emitText(text)
out print "\\\\fR"
case Emph(text) =>
out.print("\\\\fI")
emitText(text)
out.print("\\\\fI")
case Mono(text) =>
out.print("")
emitText(text)
out.print("")
case Quote(text) =>
out.print("\\"")
emitText(text)
out.print("\\"")
case DefinitionList(definitions @ _*) =>
var n = definitions.length
for (d <- definitions) {
out println ".TP"
emitText(d.term)
out.println
emitText(d.description)
if (n > 1) { out.println; n -= 1 }
}
case Link(label, url) =>
emitText(label)
case _ =>
sys.error("unknown text node: " + text)
}
}
def emitParagraph(para: Paragraph): Unit = {
para match {
case TextParagraph(text) =>
out println ".PP"
emitText(text)
out.println
case BlockQuote(text) =>
out println ".TP"
emitText(text)
out.println
case CodeSample(text) =>
out println "\\n.nf"
out.print(text)
out println "\\n.fi"
case lst:BulletList =>
for (item <- lst.items) {
out println ".IP"
emitText(item)
out.println
}
case lst:NumberedList =>
for {
idx <- List.range(0, lst.items.length)
} {
val item = lst.items(idx)
out.println(".IP \\" " + (idx+1) + ".\\"")
emitText(item)
out.println
}
case TitledPara(title, text) =>
out println ".PP"
out print "\\\\fB"
emitText(title)
out print "\\\\fR"
emitText(text)
case EmbeddedSection(sect) =>
emitSection(sect, depth + 1)
case _ =>
sys.error("unknown paragraph node: " + para)
}
}
out println ".\\\\\\""
out.println(".\\\\\\" ############################## " + section.title + " ###############################")
out println ".\\\\\\""
val tag = if (depth > 1) ".SS" else ".SH"
val title =
if (section.title.indexOf(" ") > 0) "\\"" + section.title + "\\""
else section.title
out.println(tag + " " + title)
section.paragraphs foreach emitParagraph
}
def emitDocument(doc: Document): Unit = {
out println ".\\\\\\" ##########################################################################"
out println ".\\\\\\" # __ #"
out println ".\\\\\\" # ________ ___ / / ___ Scala 2 On-line Manual Pages #"
out println ".\\\\\\" # / __/ __// _ | / / / _ | (c) 2002-2013, LAMP/EPFL #"
out println ".\\\\\\" # __\\\\ \\\\/ /__/ __ |/ /__/ __ | #"
out println ".\\\\\\" # /____/\\\\___/_/ |_/____/_/ | | http://scala-lang.org/ #"
out println ".\\\\\\" # |/ #"
out println ".\\\\\\" ##########################################################################"
out println ".\\\\\\""
out println ".\\\\\\" Process this file with nroff -man scala.1"
out println ".\\\\\\""
out.println(".TH " + doc.title + " " + doc.category.id +
" \\"" + doc.date + "\\" \\"version " + doc.version +
"\\" \\"" + doc.category + "\\"")
doc.sections foreach (s => emitSection(s, 1))
}
def main(args: Array[String]) = args match{
case Array(classname) => emitManPage(classname)
case Array(classname, file, _*) => emitManPage(classname, new java.io.FileOutputStream(file))
case _ => sys.exit(1)
}
def emitManPage(classname: String, outStream: java.io.OutputStream = out.out): Unit =
Console.withOut(outStream) {
try {
val cl = this.getClass.getClassLoader()
val clasz = cl loadClass classname
val meth = clasz getDeclaredMethod "manpage"
val doc = meth.invoke(null).asInstanceOf[Document]
emitDocument(doc)
} catch {
case ex: Exception =>
ex.printStackTrace()
System.err println "Error in EmitManPage"
sys.exit(1)
}
}
}
|
martijnhoekstra/scala
|
src/manual/scala/tools/docutil/EmitManPage.scala
|
Scala
|
apache-2.0
| 5,510
|
package dpla.ingestion3.utils
import scala.io.Source
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.fasterxml.jackson.module.scala.experimental.ScalaObjectMapper
// Get data from files
trait FileLoader {
/**
*
* @return
*/
def files: Seq[String] // NPE if val, Order of operations? Why?
/**
*
* @param files
* @return
*/
def getVocabFromCsvFiles(files: Seq[String]): Set[Array[String]] =
getVocabFromFiles(files).map(_.split(",", 2))
def getVocabFromJsonFiles(files: Seq[String]): Seq[(String, String)] = {
val mapper = new ObjectMapper() with ScalaObjectMapper
mapper.registerModule(DefaultScalaModule)
files.flatMap(file => {
val fileContentString = Source.fromInputStream(getClass.getResourceAsStream(file)).getLines().mkString
mapper.readValue[Map[String, String]](fileContentString)
})
}
/**
*
* @param files
* @return
*/
def getVocabFromFiles(files: Seq[String]): Set[String] =
files.flatMap(readFile).toSet
/**
* Read text files ignoring lines starting with `#`
*/
def readFile(file: String): Seq[String] =
Source.fromInputStream(getClass.getResourceAsStream(file))
.getLines()
.filterNot(_.startsWith("#"))
.toSeq
}
|
dpla/ingestion3
|
src/main/scala/dpla/ingestion3/utils/FileLoader.scala
|
Scala
|
mit
| 1,341
|
package nars.language
import nars.storage.Memory
//remove if not needed
import scala.collection.JavaConversions._
object InstanceProperty {
/**
* Try to make a new compound from two components. Called by the inference rules.
* <p>
* A {-] B becomes {A} --> [B]
* @param subject The first compoment
* @param predicate The second compoment
* @param memory Reference to the memory
* @return A compound generated or null
*/
def make(subject: Term, predicate: Term, memory: Memory): Statement = {
Inheritance.make(SetExt.make(subject, memory), SetInt.make(predicate, memory), memory)
}
}
|
automenta/opennars
|
nars_scala/src/main/scala/nars/language/InstanceProperty.scala
|
Scala
|
gpl-2.0
| 621
|
/* Title: Pure/library.scala
Module: PIDE
Author: Makarius
Basic library.
*/
package isabelle
import scala.collection.mutable
object Library
{
/* user errors */
object ERROR
{
def apply(message: String): Throwable = new RuntimeException(message)
def unapply(exn: Throwable): Option[String] = Exn.user_message(exn)
}
def error(message: String): Nothing = throw ERROR(message)
def cat_message(msg1: String, msg2: String): String =
if (msg1 == "") msg2
else if (msg2 == "") msg1
else msg1 + "\n" + msg2
def cat_error(msg1: String, msg2: String): Nothing =
error(cat_message(msg1, msg2))
/* integers */
private val small_int = 10000
private lazy val small_int_table =
{
val array = new Array[String](small_int)
for (i <- 0 until small_int) array(i) = i.toString
array
}
def is_small_int(s: String): Boolean =
{
val len = s.length
1 <= len && len <= 4 &&
s.forall(c => '0' <= c && c <= '9') &&
(len == 1 || s(0) != '0')
}
def signed_string_of_long(i: Long): String =
if (0 <= i && i < small_int) small_int_table(i.toInt)
else i.toString
def signed_string_of_int(i: Int): String =
if (0 <= i && i < small_int) small_int_table(i)
else i.toString
/* separated chunks */
def separate[A](s: A, list: List[A]): List[A] =
{
val result = new mutable.ListBuffer[A]
var first = true
for (x <- list) {
if (first) {
first = false
result += x
}
else {
result += s
result += x
}
}
result.toList
}
def separated_chunks(sep: Char => Boolean, source: CharSequence): Iterator[CharSequence] =
new Iterator[CharSequence] {
private val end = source.length
private def next_chunk(i: Int): Option[(CharSequence, Int)] =
{
if (i < end) {
var j = i; do j += 1 while (j < end && !sep(source.charAt(j)))
Some((source.subSequence(i + 1, j), j))
}
else None
}
private var state: Option[(CharSequence, Int)] = if (end == 0) None else next_chunk(-1)
def hasNext(): Boolean = state.isDefined
def next(): CharSequence =
state match {
case Some((s, i)) => { state = next_chunk(i); s }
case None => Iterator.empty.next()
}
}
def space_explode(sep: Char, str: String): List[String] =
separated_chunks(_ == sep, str).map(_.toString).toList
/* lines */
def terminate_lines(lines: Iterable[CharSequence]): Iterable[CharSequence] =
new Iterable[CharSequence] {
def iterator: Iterator[CharSequence] =
lines.iterator.map(line => new Line_Termination(line))
}
def cat_lines(lines: TraversableOnce[String]): String = lines.mkString("\n")
def split_lines(str: String): List[String] = space_explode('\n', str)
def first_line(source: CharSequence): String =
{
val lines = separated_chunks(_ == '\n', source)
if (lines.hasNext) lines.next.toString
else ""
}
/* strings */
def try_unprefix(prfx: String, s: String): Option[String] =
if (s.startsWith(prfx)) Some(s.substring(prfx.length)) else None
def try_unsuffix(sffx: String, s: String): Option[String] =
if (s.endsWith(sffx)) Some(s.substring(0, s.length - sffx.length)) else None
def trim_line(s: String): String =
if (s.endsWith("\r\n")) s.substring(0, s.length - 2)
else if (s.endsWith("\r") || s.endsWith("\n")) s.substring(0, s.length - 1)
else s
/* quote */
def quote(s: String): String = "\"" + s + "\""
def try_unquote(s: String): Option[String] =
if (s.startsWith("\"") && s.endsWith("\"")) Some(s.substring(1, s.length - 1))
else None
def perhaps_unquote(s: String): String = try_unquote(s) getOrElse s
def commas(ss: Iterable[String]): String = ss.iterator.mkString(", ")
def commas_quote(ss: Iterable[String]): String = ss.iterator.map(quote).mkString(", ")
/* CharSequence */
class Reverse(text: CharSequence, start: Int, end: Int) extends CharSequence
{
require(0 <= start && start <= end && end <= text.length)
def this(text: CharSequence) = this(text, 0, text.length)
def length: Int = end - start
def charAt(i: Int): Char = text.charAt(end - i - 1)
def subSequence(i: Int, j: Int): CharSequence =
if (0 <= i && i <= j && j <= length) new Reverse(text, end - j, end - i)
else throw new IndexOutOfBoundsException
override def toString: String =
{
val buf = new StringBuilder(length)
for (i <- 0 until length)
buf.append(charAt(i))
buf.toString
}
}
class Line_Termination(text: CharSequence) extends CharSequence
{
def length: Int = text.length + 1
def charAt(i: Int): Char = if (i == text.length) '\n' else text.charAt(i)
def subSequence(i: Int, j: Int): CharSequence =
if (j == text.length + 1) new Line_Termination(text.subSequence(i, j - 1))
else text.subSequence(i, j)
override def toString: String = text.toString + "\n"
}
/* canonical list operations */
def member[A, B](xs: List[A])(x: B): Boolean = xs.exists(_ == x)
def insert[A](x: A)(xs: List[A]): List[A] = if (xs.contains(x)) xs else x :: xs
def remove[A, B](x: B)(xs: List[A]): List[A] = if (member(xs)(x)) xs.filterNot(_ == x) else xs
def update[A](x: A)(xs: List[A]): List[A] = x :: remove(x)(xs)
}
class Basic_Library
{
val ERROR = Library.ERROR
val error = Library.error _
val cat_error = Library.cat_error _
val space_explode = Library.space_explode _
val split_lines = Library.split_lines _
val cat_lines = Library.cat_lines _
val quote = Library.quote _
val commas = Library.commas _
val commas_quote = Library.commas_quote _
}
|
MerelyAPseudonym/isabelle
|
src/Pure/library.scala
|
Scala
|
bsd-3-clause
| 5,749
|
def functionMonoid[A,B](B: Monoid[B]): Monoid[A => B] =
new Monoid[A => B] {
def op(f: A => B, g: A => B) = a => B.op(f(a), g(a))
val zero: A => B = a => B.zero
}
|
lucaviolanti/scala-redbook
|
answerkey/monoids/17.answer.scala
|
Scala
|
mit
| 178
|
package honor
import models.db.MasterShipBase
import ranking.EvolutionBase
import scalikejdbc._
import scala.collection.mutable
/**
*
* @author ponkotuy
* Date: 15/03/17.
*/
object ShipMaster extends HonorCategory {
import com.ponkotuy.value.ShipIds._
override def category = 1
override def approved(memberId: Long, db: HonorCache): List[String] = {
val ships = db.shipWithName
val lvs = mutable.Map[Int, Int]().withDefaultValue(0)
ships.foreach { ship =>
lvs(EvolutionBase(ship.shipId)) += ship.lv
}
val result = lvs.toVector.sortBy(-_._2).takeWhile(_._2 >= 200).map(_._1)
val withAliases = result ++ result.flatMap(EvolutionBase.Aliases.get)
val mss = MasterShipBase.findAllBy(sqls.in(MasterShipBase.column.id, withAliases))
mss.flatMap { ms => toHonor(ms) :: OriginalHonor.get(ms.id).toList }
}
private def toHonor(ms: MasterShipBase): String = s"${ms.name}提督"
val OriginalHonor = Map(
Tone -> "我輩は利根提督である",
Yudachi -> "夕立提督っぽい",
Hibiki -> "ハラショー",
Naka -> "アイドル提督",
Tenryu -> "俺の名は天龍提督",
Sendai -> "夜戦提督",
Kumano -> "お嬢様提督",
Akebono -> "クソ提督",
Sazanami -> "ご主人様"
)
override val comment: String = "ある艦娘(改造後含む)の合計Lvが200以上。特定艦娘固有称号含む"
}
|
nekoworkshop/MyFleetGirls
|
server/app/honor/ShipMaster.scala
|
Scala
|
mit
| 1,405
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.tuning
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.{Estimator, Model, Pipeline}
import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel, OneVsRest}
import org.apache.spark.ml.classification.LogisticRegressionSuite.generateLogisticInput
import org.apache.spark.ml.evaluation.{BinaryClassificationEvaluator, Evaluator, MulticlassClassificationEvaluator, RegressionEvaluator}
import org.apache.spark.ml.feature.HashingTF
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.param.shared.HasInputCol
import org.apache.spark.ml.regression.LinearRegression
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTestingUtils}
import org.apache.spark.mllib.util.{LinearDataGenerator, MLlibTestSparkContext}
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.types.StructType
class CrossValidatorSuite
extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
import testImplicits._
@transient var dataset: Dataset[_] = _
override def beforeAll(): Unit = {
super.beforeAll()
dataset = sc.parallelize(generateLogisticInput(1.0, 1.0, 100, 42), 2).toDF()
}
test("cross validation with logistic regression") {
val lr = new LogisticRegression
val lrParamMaps = new ParamGridBuilder()
.addGrid(lr.regParam, Array(0.001, 1000.0))
.addGrid(lr.maxIter, Array(0, 10))
.build()
val eval = new BinaryClassificationEvaluator
val cv = new CrossValidator()
.setEstimator(lr)
.setEstimatorParamMaps(lrParamMaps)
.setEvaluator(eval)
.setNumFolds(3)
val cvModel = cv.fit(dataset)
MLTestingUtils.checkCopyAndUids(cv, cvModel)
val parent = cvModel.bestModel.parent.asInstanceOf[LogisticRegression]
assert(parent.getRegParam === 0.001)
assert(parent.getMaxIter === 10)
assert(cvModel.avgMetrics.length === lrParamMaps.length)
}
test("cross validation with linear regression") {
val dataset = sc.parallelize(
LinearDataGenerator.generateLinearInput(
6.3, Array(4.7, 7.2), Array(0.9, -1.3), Array(0.7, 1.2), 100, 42, 0.1), 2)
.map(_.asML).toDF()
val trainer = new LinearRegression().setSolver("l-bfgs")
val lrParamMaps = new ParamGridBuilder()
.addGrid(trainer.regParam, Array(1000.0, 0.001))
.addGrid(trainer.maxIter, Array(0, 10))
.build()
val eval = new RegressionEvaluator()
val cv = new CrossValidator()
.setEstimator(trainer)
.setEstimatorParamMaps(lrParamMaps)
.setEvaluator(eval)
.setNumFolds(3)
val cvModel = cv.fit(dataset)
val parent = cvModel.bestModel.parent.asInstanceOf[LinearRegression]
assert(parent.getRegParam === 0.001)
assert(parent.getMaxIter === 10)
assert(cvModel.avgMetrics.length === lrParamMaps.length)
eval.setMetricName("r2")
val cvModel2 = cv.fit(dataset)
val parent2 = cvModel2.bestModel.parent.asInstanceOf[LinearRegression]
assert(parent2.getRegParam === 0.001)
assert(parent2.getMaxIter === 10)
assert(cvModel2.avgMetrics.length === lrParamMaps.length)
}
test("transformSchema should check estimatorParamMaps") {
import CrossValidatorSuite.{MyEstimator, MyEvaluator}
val est = new MyEstimator("est")
val eval = new MyEvaluator
val paramMaps = new ParamGridBuilder()
.addGrid(est.inputCol, Array("input1", "input2"))
.build()
val cv = new CrossValidator()
.setEstimator(est)
.setEstimatorParamMaps(paramMaps)
.setEvaluator(eval)
cv.transformSchema(new StructType()) // This should pass.
val invalidParamMaps = paramMaps :+ ParamMap(est.inputCol -> "")
cv.setEstimatorParamMaps(invalidParamMaps)
intercept[IllegalArgumentException] {
cv.transformSchema(new StructType())
}
}
test("read/write: CrossValidator with simple estimator") {
val lr = new LogisticRegression().setMaxIter(3)
val evaluator = new BinaryClassificationEvaluator()
.setMetricName("areaUnderPR") // not default metric
val paramMaps = new ParamGridBuilder()
.addGrid(lr.regParam, Array(0.1, 0.2))
.build()
val cv = new CrossValidator()
.setEstimator(lr)
.setEvaluator(evaluator)
.setNumFolds(20)
.setEstimatorParamMaps(paramMaps)
val cv2 = testDefaultReadWrite(cv, testParams = false)
assert(cv.uid === cv2.uid)
assert(cv.getNumFolds === cv2.getNumFolds)
assert(cv.getSeed === cv2.getSeed)
assert(cv2.getEvaluator.isInstanceOf[BinaryClassificationEvaluator])
val evaluator2 = cv2.getEvaluator.asInstanceOf[BinaryClassificationEvaluator]
assert(evaluator.uid === evaluator2.uid)
assert(evaluator.getMetricName === evaluator2.getMetricName)
cv2.getEstimator match {
case lr2: LogisticRegression =>
assert(lr.uid === lr2.uid)
assert(lr.getMaxIter === lr2.getMaxIter)
case other =>
throw new AssertionError(s"Loaded CrossValidator expected estimator of type" +
s" LogisticRegression but found ${other.getClass.getName}")
}
ValidatorParamsSuiteHelpers
.compareParamMaps(cv.getEstimatorParamMaps, cv2.getEstimatorParamMaps)
}
test("read/write: CrossValidator with nested estimator") {
val ova = new OneVsRest().setClassifier(new LogisticRegression)
val evaluator = new MulticlassClassificationEvaluator()
.setMetricName("accuracy")
val classifier1 = new LogisticRegression().setRegParam(2.0)
val classifier2 = new LogisticRegression().setRegParam(3.0)
// params that are not JSON serializable must inherit from Params
val paramMaps = new ParamGridBuilder()
.addGrid(ova.classifier, Array(classifier1, classifier2))
.build()
val cv = new CrossValidator()
.setEstimator(ova)
.setEvaluator(evaluator)
.setNumFolds(20)
.setEstimatorParamMaps(paramMaps)
val cv2 = testDefaultReadWrite(cv, testParams = false)
assert(cv.uid === cv2.uid)
assert(cv.getNumFolds === cv2.getNumFolds)
assert(cv.getSeed === cv2.getSeed)
assert(cv2.getEvaluator.isInstanceOf[MulticlassClassificationEvaluator])
val evaluator2 = cv2.getEvaluator.asInstanceOf[MulticlassClassificationEvaluator]
assert(evaluator.uid === evaluator2.uid)
assert(evaluator.getMetricName === evaluator2.getMetricName)
cv2.getEstimator match {
case ova2: OneVsRest =>
assert(ova.uid === ova2.uid)
ova2.getClassifier match {
case lr: LogisticRegression =>
assert(ova.getClassifier.asInstanceOf[LogisticRegression].getMaxIter
=== lr.getMaxIter)
case other =>
throw new AssertionError(s"Loaded CrossValidator expected estimator of type" +
s" LogisticRegression but found ${other.getClass.getName}")
}
case other =>
throw new AssertionError(s"Loaded CrossValidator expected estimator of type" +
s" OneVsRest but found ${other.getClass.getName}")
}
ValidatorParamsSuiteHelpers
.compareParamMaps(cv.getEstimatorParamMaps, cv2.getEstimatorParamMaps)
}
test("read/write: Persistence of nested estimator works if parent directory changes") {
val ova = new OneVsRest().setClassifier(new LogisticRegression)
val evaluator = new MulticlassClassificationEvaluator()
.setMetricName("accuracy")
val classifier1 = new LogisticRegression().setRegParam(2.0)
val classifier2 = new LogisticRegression().setRegParam(3.0)
// params that are not JSON serializable must inherit from Params
val paramMaps = new ParamGridBuilder()
.addGrid(ova.classifier, Array(classifier1, classifier2))
.build()
val cv = new CrossValidator()
.setEstimator(ova)
.setEvaluator(evaluator)
.setNumFolds(20)
.setEstimatorParamMaps(paramMaps)
ValidatorParamsSuiteHelpers.testFileMove(cv, tempDir)
}
test("read/write: CrossValidator with complex estimator") {
// workflow: CrossValidator[Pipeline[HashingTF, CrossValidator[LogisticRegression]]]
val lrEvaluator = new BinaryClassificationEvaluator()
.setMetricName("areaUnderPR") // not default metric
val lr = new LogisticRegression().setMaxIter(3)
val lrParamMaps = new ParamGridBuilder()
.addGrid(lr.regParam, Array(0.1, 0.2))
.build()
val lrcv = new CrossValidator()
.setEstimator(lr)
.setEvaluator(lrEvaluator)
.setEstimatorParamMaps(lrParamMaps)
val hashingTF = new HashingTF()
val pipeline = new Pipeline().setStages(Array(hashingTF, lrcv))
val paramMaps = new ParamGridBuilder()
.addGrid(hashingTF.numFeatures, Array(10, 20))
.addGrid(lr.elasticNetParam, Array(0.0, 1.0))
.build()
val evaluator = new BinaryClassificationEvaluator()
val cv = new CrossValidator()
.setEstimator(pipeline)
.setEvaluator(evaluator)
.setNumFolds(20)
.setEstimatorParamMaps(paramMaps)
val cv2 = testDefaultReadWrite(cv, testParams = false)
assert(cv.uid === cv2.uid)
assert(cv.getNumFolds === cv2.getNumFolds)
assert(cv.getSeed === cv2.getSeed)
assert(cv2.getEvaluator.isInstanceOf[BinaryClassificationEvaluator])
assert(cv.getEvaluator.uid === cv2.getEvaluator.uid)
ValidatorParamsSuiteHelpers
.compareParamMaps(cv.getEstimatorParamMaps, cv2.getEstimatorParamMaps)
cv2.getEstimator match {
case pipeline2: Pipeline =>
assert(pipeline.uid === pipeline2.uid)
pipeline2.getStages match {
case Array(hashingTF2: HashingTF, lrcv2: CrossValidator) =>
assert(hashingTF.uid === hashingTF2.uid)
lrcv2.getEstimator match {
case lr2: LogisticRegression =>
assert(lr.uid === lr2.uid)
assert(lr.getMaxIter === lr2.getMaxIter)
case other =>
throw new AssertionError(s"Loaded internal CrossValidator expected to be" +
s" LogisticRegression but found type ${other.getClass.getName}")
}
assert(lrcv.uid === lrcv2.uid)
assert(lrcv2.getEvaluator.isInstanceOf[BinaryClassificationEvaluator])
assert(lrEvaluator.uid === lrcv2.getEvaluator.uid)
ValidatorParamsSuiteHelpers
.compareParamMaps(lrParamMaps, lrcv2.getEstimatorParamMaps)
case other =>
throw new AssertionError("Loaded Pipeline expected stages (HashingTF, CrossValidator)" +
" but found: " + other.map(_.getClass.getName).mkString(", "))
}
case other =>
throw new AssertionError(s"Loaded CrossValidator expected estimator of type" +
s" CrossValidator but found ${other.getClass.getName}")
}
}
test("read/write: CrossValidator fails for extraneous Param") {
val lr = new LogisticRegression()
val lr2 = new LogisticRegression()
val evaluator = new BinaryClassificationEvaluator()
val paramMaps = new ParamGridBuilder()
.addGrid(lr.regParam, Array(0.1, 0.2))
.addGrid(lr2.regParam, Array(0.1, 0.2))
.build()
val cv = new CrossValidator()
.setEstimator(lr)
.setEvaluator(evaluator)
.setEstimatorParamMaps(paramMaps)
withClue("CrossValidator.write failed to catch extraneous Param error") {
intercept[IllegalArgumentException] {
cv.write
}
}
}
test("read/write: CrossValidatorModel") {
val lr = new LogisticRegression()
.setThreshold(0.6)
val lrModel = new LogisticRegressionModel(lr.uid, Vectors.dense(1.0, 2.0), 1.2)
.setThreshold(0.6)
val evaluator = new BinaryClassificationEvaluator()
.setMetricName("areaUnderPR") // not default metric
val paramMaps = new ParamGridBuilder()
.addGrid(lr.regParam, Array(0.1, 0.2))
.build()
val cv = new CrossValidatorModel("cvUid", lrModel, Array(0.3, 0.6))
cv.set(cv.estimator, lr)
.set(cv.evaluator, evaluator)
.set(cv.numFolds, 20)
.set(cv.estimatorParamMaps, paramMaps)
val cv2 = testDefaultReadWrite(cv, testParams = false)
assert(cv.uid === cv2.uid)
assert(cv.getNumFolds === cv2.getNumFolds)
assert(cv.getSeed === cv2.getSeed)
assert(cv2.getEvaluator.isInstanceOf[BinaryClassificationEvaluator])
val evaluator2 = cv2.getEvaluator.asInstanceOf[BinaryClassificationEvaluator]
assert(evaluator.uid === evaluator2.uid)
assert(evaluator.getMetricName === evaluator2.getMetricName)
cv2.getEstimator match {
case lr2: LogisticRegression =>
assert(lr.uid === lr2.uid)
assert(lr.getThreshold === lr2.getThreshold)
case other =>
throw new AssertionError(s"Loaded CrossValidator expected estimator of type" +
s" LogisticRegression but found ${other.getClass.getName}")
}
ValidatorParamsSuiteHelpers
.compareParamMaps(cv.getEstimatorParamMaps, cv2.getEstimatorParamMaps)
cv2.bestModel match {
case lrModel2: LogisticRegressionModel =>
assert(lrModel.uid === lrModel2.uid)
assert(lrModel.getThreshold === lrModel2.getThreshold)
assert(lrModel.coefficients === lrModel2.coefficients)
assert(lrModel.intercept === lrModel2.intercept)
case other =>
throw new AssertionError(s"Loaded CrossValidator expected bestModel of type" +
s" LogisticRegressionModel but found ${other.getClass.getName}")
}
assert(cv.avgMetrics === cv2.avgMetrics)
}
}
object CrossValidatorSuite extends SparkFunSuite {
abstract class MyModel extends Model[MyModel]
class MyEstimator(override val uid: String) extends Estimator[MyModel] with HasInputCol {
override def fit(dataset: Dataset[_]): MyModel = {
throw new UnsupportedOperationException
}
override def transformSchema(schema: StructType): StructType = {
require($(inputCol).nonEmpty)
schema
}
override def copy(extra: ParamMap): MyEstimator = defaultCopy(extra)
}
class MyEvaluator extends Evaluator {
override def evaluate(dataset: Dataset[_]): Double = {
throw new UnsupportedOperationException
}
override def isLargerBetter: Boolean = true
override val uid: String = "eval"
override def copy(extra: ParamMap): MyEvaluator = defaultCopy(extra)
}
}
|
stanzhai/spark
|
mllib/src/test/scala/org/apache/spark/ml/tuning/CrossValidatorSuite.scala
|
Scala
|
apache-2.0
| 15,149
|
/*
* Copyright (C) 2014 Dennis J. McWherter, Jr.
*
* This software may be modified and distributed under the terms of the MIT License.
*/
package com.deathbytape
import javax.ws.rs.{PathParam, Produces, GET, Path}
import javax.ws.rs.core.{MediaType, Response}
/**
* Servlet to serve a little interactive HelloWorld for RESTful API
*/
@Path("/hello")
class HelloServlet {
@GET
@Path("{name}")
@Produces(Array(MediaType.TEXT_HTML))
def helloName(@PathParam("name") name: String): Response = {
name match {
case x if x != null => Response.status(200).entity("Hello Sir/Madam, " + x).build()
case _ => Response.status(200).entity("Please input a name (as PathParam)!").build()
}
}
}
|
DeathByTape/scalaREST
|
src/main/scala/com/deathbytape/HelloServlet.scala
|
Scala
|
mit
| 717
|
/*
* Copyright 2017 WeightWatchers
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.weightwatchers.reactive.kinesis.consumer
import java.util.concurrent.atomic.AtomicBoolean
import akka.actor.ActorRef
import akka.pattern.ask
import akka.util.Timeout
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.{
IRecordProcessor,
IShutdownNotificationAware
}
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.{ShutdownReason, Worker}
import com.amazonaws.services.kinesis.clientlibrary.types._
import com.amazonaws.services.kinesis.model.Record
import com.typesafe.scalalogging.LazyLogging
import com.weightwatchers.reactive.kinesis.consumer.ConsumerWorker.{
GracefulShutdown,
ProcessEvents,
ProcessingComplete
}
import com.weightwatchers.reactive.kinesis.models.{CompoundSequenceNumber, ConsumerEvent}
import org.joda.time.{DateTime, DateTimeZone}
import scala.collection.JavaConverters._
import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.util.Try
/**
* Manages the processing of messages for a SPECIFIC SHARD.
* This means we have one instance of this class per shard that we're consuming from.
*
* @param consumerWorker The [[ConsumerWorker]] which handles the processing completion logic and checkpointing (not shared between managers).
* @param batchTimeout The timeout for processing an individual batch.
* @param shutdownTimeout The timeout for on shutdown.
*/
private[consumer] class ConsumerProcessingManager(
consumerWorker: ActorRef,
kclWorker: Worker,
implicit val batchTimeout: Timeout,
shutdownTimeout: FiniteDuration = 25.seconds
)(implicit ec: ExecutionContext)
extends IRecordProcessor
with IShutdownNotificationAware
with LazyLogging {
logger.info(
s"Created shard specific ConsumerProcessingManager for worker: ${kclWorker.getApplicationName}:${kclWorker.toString}"
)
private[consumer] var kinesisShardId: String = ""
private[consumer] val shuttingDown = new AtomicBoolean(false)
override def initialize(initializationInput: InitializationInput): Unit = {
logger.info(
"Initializing ConsumerProcessingManager for shard: " + initializationInput.getShardId
)
this.kinesisShardId = initializationInput.getShardId
}
/**
* Each shard will be processed by its own AWS-level worker, and each will have it's own instance of this class.
* Therefore we can assume this method is called by only one concurrent thread (we block to ensure this is the case).
*/
override def processRecords(processRecordsInput: ProcessRecordsInput): Unit = {
def toConsumerEvent(record: Record): ConsumerEvent = {
val userRecord = record.asInstanceOf[UserRecord]
ConsumerEvent(
CompoundSequenceNumber(userRecord.getSequenceNumber, userRecord.getSubSequenceNumber),
userRecord.getData,
new DateTime(userRecord.getApproximateArrivalTimestamp, DateTimeZone.UTC)
)
}
if (shuttingDown.get) {
logger.info(s"Skipping batch due to pending shutdown for shard: $kinesisShardId")
} else {
logger.info(
s"Processing ${processRecordsInput.getRecords.size()} records from shard: $kinesisShardId"
)
// Process records and perform all exception handling.
val allRecordsProcessedFut: Future[ProcessingComplete] =
(consumerWorker ? ProcessEvents(
processRecordsInput.getRecords.asScala.map(toConsumerEvent),
processRecordsInput.getCheckpointer,
kinesisShardId
)).mapTo[ProcessingComplete]
// Not great, but we don't have another option, we need to block!
// The AWS library has already wrapped this in a task and if we fire-and-forget the processing of records,
// we'll eventually run out of resources as this will be called continually according to the `idleTimeBetweenReadsInMillis` setting.
Try {
logger.debug(s"Consumer Awaiting batch result from worker, timeout is $batchTimeout")
val allRecordsProcessed: ProcessingComplete =
Await.result(allRecordsProcessedFut, batchTimeout.duration)
if (!allRecordsProcessed.successful) {
closeManager()
}
} recover {
case exception =>
logger.error(s"Unexpected Error processing records, shutting down worker", exception)
closeManager()
}
}
}
override def shutdownRequested(checkpointer: IRecordProcessorCheckpointer): Unit = {
logger.info(s"Graceful shutdown requested for record processor of shard: $kinesisShardId.")
shutdown(checkpointer)
}
override def shutdown(shutdownInput: ShutdownInput): Unit = {
if (shutdownInput.getShutdownReason == ShutdownReason.TERMINATE) {
logger.info(
s"Shutdown record processor for shard: $kinesisShardId. Reason: ${shutdownInput.getShutdownReason}. Forcing checkpoint."
)
shutdownInput.getCheckpointer.checkpoint()
} else
logger.info(
s"Shutdown record processor for shard: $kinesisShardId. Reason: ${shutdownInput.getShutdownReason}"
)
shutdown(shutdownInput.getCheckpointer)
}
private[consumer] def shutdown(checkpointer: IRecordProcessorCheckpointer): Unit = {
if (shuttingDown.compareAndSet(false, true)) {
logger.info(s"*** Shutting down record processor for shard: $kinesisShardId ***")
Try {
Await.result(consumerWorker ? GracefulShutdown(checkpointer), shutdownTimeout)
} recover {
case exception =>
logger.error(
s"Unexpected exception on shutdown, final checkpoint attempt may have failed",
exception
)
}
} else {
logger.warn(s"Shutdown already initiated for record processor of shard: $kinesisShardId.")
}
}
private[consumer] def closeManager(): Unit = {
val canCloseManager = shuttingDown.compareAndSet(false, true)
if (canCloseManager) {
Future(kclWorker.startGracefulShutdown()) //Needs to be async otherwise we hog the processRecords thread
}
}
}
|
WW-Digital/reactive-kinesis
|
src/main/scala/com/weightwatchers/reactive/kinesis/consumer/ConsumerProcessingManager.scala
|
Scala
|
apache-2.0
| 6,733
|
package com.factor10.plugins
trait FormattingPlugin {
def name: String
def convert(data: Map[String, Any]): String
}
// class XmlFormatter extends FormattingPlugin {
// def name = "XmlFormatter Plugin"
// def convert(data: Map[String, Any]): String = {
// "<some xml=true />"
// }
// }
|
marhel/splug
|
plugins/xml/src/main/scala/formattingPlugin.scala
|
Scala
|
mit
| 296
|
package com.github.tminglei.slickpg
import org.scalatest.FunSuite
import scala.concurrent.Await
import scala.concurrent.duration._
class PgInheritsSuite extends FunSuite {
import ExPostgresDriver.api._
val db = Database.forURL(url = dbUrl, driver = "org.postgresql.Driver")
abstract class BaseT[T](tag: Tag, tname: String = "test_tab1") extends Table[T](tag, tname) {
def col1 = column[String]("COL1")
def col2 = column[String]("COL2")
def col3 = column[String]("COL3")
def col4 = column[Int]("COL4", O.PrimaryKey)
}
case class Tab1(col1: String, col2: String, col3: String, col4: Int)
class Tabs1(tag: Tag) extends BaseT[Tab1](tag, "test_tab1") {
def * = (col1, col2, col3, col4) <> (Tab1.tupled, Tab1.unapply)
}
val tabs1 = TableQuery(new Tabs1(_))
///
case class Tab2(col1: String, col2: String, col3: String, col4: Int, col5: Long)
class Tabs2(tag: Tag) extends BaseT[Tab2](tag, "test_tab2") with InheritingTable {
val inherited = tabs1.baseTableRow
def col5 = column[Long]("col5")
def * = (col1, col2, col3, col4, col5) <> (Tab2.tupled, Tab2.unapply)
}
val tabs2 = TableQuery(new Tabs2(_))
test("Inherits support") {
Await.result(db.run(
DBIO.seq(
(tabs1.schema ++ tabs2.schema) create,
///
tabs1 ++= Seq(
Tab1("foo", "bar", "bat", 1),
Tab1("foo", "bar", "bat", 2),
Tab1("foo", "quux", "bat", 3),
Tab1("baz", "quux", "bat", 4)
),
tabs2 ++= Seq(
Tab2("plus", "bar", "bat", 5, 101),
Tab2("plus", "quux", "bat", 6, 102)
)
).andThen(
DBIO.seq(
tabs1.sortBy(_.col4).to[List].result.map(
r => assert(Seq(
Tab1("foo", "bar", "bat", 1),
Tab1("foo", "bar", "bat", 2),
Tab1("foo", "quux", "bat", 3),
Tab1("baz", "quux", "bat", 4),
Tab1("plus", "bar", "bat", 5),
Tab1("plus", "quux", "bat", 6)
) === r)
),
tabs2.sortBy(_.col4).to[List].result.map(
r => assert(Seq(
Tab2("plus", "bar", "bat", 5, 101),
Tab2("plus", "quux", "bat", 6, 102)
) === r)
)
)
).andFinally(
(tabs1.schema ++ tabs2.schema) drop
).transactionally
), Duration.Inf)
}
}
|
bearrito/slick-pg
|
src/test/scala/com/github/tminglei/slickpg/PgInheritsSuite.scala
|
Scala
|
bsd-2-clause
| 2,376
|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v2
import uk.gov.hmrc.ct.box.{Calculated, CtBigDecimal, CtBoxIdentifier}
import uk.gov.hmrc.ct.ct600.v2.calculations.CorporationTaxCalculator
import uk.gov.hmrc.ct.ct600.v2.retriever.CT600BoxRetriever
case class B65(value: BigDecimal) extends CtBoxIdentifier("Corporation Tax net of MRR") with CtBigDecimal
object B65 extends CorporationTaxCalculator with Calculated[B65, CT600BoxRetriever]{
override def calculate(fieldValueRetriever: CT600BoxRetriever): B65 =
corporationTaxNetOfMrr(fieldValueRetriever.b46(),
fieldValueRetriever.b56(),
fieldValueRetriever.b64()
)
}
|
hmrc/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/ct600/v2/B65.scala
|
Scala
|
apache-2.0
| 1,271
|
package controllers
import play.api._
import play.api.mvc._
import models.Paste
import java.util.UUID
import models.PasteMapDAO
import models.PasteElastic4sDAO
object Application extends Controller {
// val dao = PasteMapDAO()
val dao = PasteElastic4sDAO()
def paste = Action {
Ok(views.html.paste())
}
def index(content: Option[String], id: Option[String]) = Action {
val paste = dao.getOrCreate(id, content)
Ok(views.html.index("Your new application is ready.", paste))
}
}
|
scalaftb-dubai/paste
|
app/controllers/Application.scala
|
Scala
|
apache-2.0
| 507
|
package composition.webserviceclients.vrmretentioneligibility
import com.tzavellas.sse.guice.ScalaModule
import org.mockito.Matchers.any
import org.mockito.Mockito.when
import org.scalatest.mock.MockitoSugar
import scala.concurrent.Future
import uk.gov.dvla.vehicles.presentation.common.clientsidesession.TrackingId
import webserviceclients.vrmretentioneligibility.VRMRetentionEligibilityRequest
import webserviceclients.vrmretentioneligibility.VRMRetentionEligibilityWebService
final class EligibilityWebServiceCallFails extends ScalaModule with MockitoSugar {
val stub = {
val webService = mock[VRMRetentionEligibilityWebService]
when(webService.invoke(any[VRMRetentionEligibilityRequest], any[TrackingId]))
.thenReturn(
Future.failed(
new RuntimeException("This error is generated deliberately by a stub for VehicleAndKeeperLookupWebService")
)
)
webService
}
def configure() = bind[VRMRetentionEligibilityWebService].toInstance(stub)
}
|
dvla/vrm-retention-online
|
test/composition/webserviceclients/vrmretentioneligibility/EligibilityWebServiceCallFails.scala
|
Scala
|
mit
| 999
|
package org.http4s
import cats._
import cats.implicits._
import scala.util.control.{NoStackTrace, NonFatal}
/** Indicates a failure to handle an HTTP [[Message]]. */
trait MessageFailure extends RuntimeException {
/** Provides a message appropriate for logging. */
def message: String
/* Overridden for sensible logging of the failure */
final override def getMessage: String = message
def cause: Option[Throwable]
final override def getCause = cause.orNull
/** Provides a default rendering of this failure as a [[Response]]. */
def toHttpResponse[F[_]](httpVersion: HttpVersion)(implicit F: Monad[F]): F[Response[F]]
}
/**
* Indicates an error parsing an HTTP [[Message]].
*
* @param sanitized May safely be displayed to a client to describe an error
* condition. Should not echo any part of a Request.
* @param details Contains any relevant details omitted from the sanitized
* version of the error. This may freely echo a Request.
*/
final case class ParseFailure(sanitized: String, details: String)
extends MessageFailure
with NoStackTrace {
def message: String =
if (sanitized.isEmpty) details
else if (details.isEmpty) sanitized
else s"$sanitized: $details"
def cause: Option[Throwable] = None
def toHttpResponse[F[_]](httpVersion: HttpVersion)(implicit F: Monad[F]): F[Response[F]] =
Response[F](Status.BadRequest, httpVersion)
.withBody(sanitized)(F, EntityEncoder.stringEncoder[F])
}
object ParseFailure {
implicit val eq = Eq.fromUniversalEquals[ParseFailure]
}
object ParseResult {
def fail(sanitized: String, details: String): ParseResult[Nothing] =
Either.left(ParseFailure(sanitized, details))
def success[A](a: A): ParseResult[A] =
Either.right(a)
def fromTryCatchNonFatal[A](sanitized: String)(f: => A): ParseResult[A] =
try ParseResult.success(f)
catch {
case NonFatal(e) => Either.left(ParseFailure(sanitized, e.getMessage))
}
implicit val parseResultMonad: MonadError[ParseResult, ParseFailure] =
catsStdInstancesForEither[ParseFailure]
}
/** Indicates a problem decoding a [[Message]]. This may either be a problem with
* the entity headers or with the entity itself. */
trait DecodeFailure extends MessageFailure
/** Indicates a problem decoding a [[Message]] body. */
trait MessageBodyFailure extends DecodeFailure
/** Indicates an syntactic error decoding the body of an HTTP [[Message]]. */
final case class MalformedMessageBodyFailure(details: String, cause: Option[Throwable] = None)
extends MessageBodyFailure {
def message: String =
s"Malformed message body: $details"
def toHttpResponse[F[_]](httpVersion: HttpVersion)(implicit F: Monad[F]): F[Response[F]] =
Response[F](Status.BadRequest, httpVersion)
.withBody(s"The request body was malformed.")(F, EntityEncoder.stringEncoder[F])
}
/** Indicates a semantic error decoding the body of an HTTP [[Message]]. */
final case class InvalidMessageBodyFailure(details: String, cause: Option[Throwable] = None)
extends MessageBodyFailure {
def message: String =
s"Invalid message body: $details"
override def toHttpResponse[F[_]](httpVersion: HttpVersion)(
implicit F: Monad[F]): F[Response[F]] =
Response[F](Status.UnprocessableEntity, httpVersion)
.withBody(s"The request body was invalid.")(F, EntityEncoder.stringEncoder[F])
}
/** Indicates that a [[Message]] came with no supported [[MediaType]]. */
sealed abstract class UnsupportedMediaTypeFailure extends DecodeFailure with NoStackTrace {
def expected: Set[MediaRange]
def cause: Option[Throwable] = None
protected def sanitizedResponsePrefix: String
protected def expectedMsg: String =
s"Expected one of the following media ranges: ${expected.map(_.renderString).mkString(", ")}"
protected def responseMsg: String = s"$sanitizedResponsePrefix. $expectedMsg"
def toHttpResponse[F[_]](httpVersion: HttpVersion)(implicit F: Monad[F]): F[Response[F]] =
Response[F](Status.UnsupportedMediaType, httpVersion)
.withBody(responseMsg)(F, EntityEncoder.stringEncoder[F])
}
/** Indicates that a [[Message]] attempting to be decoded has no [[MediaType]] and no
* [[EntityDecoder]] was lenient enough to accept it. */
final case class MediaTypeMissing(expected: Set[MediaRange]) extends UnsupportedMediaTypeFailure {
def sanitizedResponsePrefix: String = "No media type specified in Content-Type header"
def message: String = responseMsg
}
/** Indicates that no [[EntityDecoder]] matches the [[MediaType]] of the [[Message]] being decoded */
final case class MediaTypeMismatch(messageType: MediaType, expected: Set[MediaRange])
extends UnsupportedMediaTypeFailure {
def sanitizedResponsePrefix: String =
"Media type supplied in Content-Type header is not supported"
def message: String = s"${messageType.renderString} is not a supported media type. $expectedMsg"
}
|
reactormonk/http4s
|
core/src/main/scala/org/http4s/MessageFailure.scala
|
Scala
|
apache-2.0
| 4,950
|
package org.jetbrains.plugins.scala
package lang.refactoring.rename.inplace
import com.intellij.internal.statistic.UsageTrigger
import com.intellij.openapi.actionSystem.DataContext
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.psi._
import com.intellij.psi.search.LocalSearchScope
import com.intellij.refactoring.rename.inplace.{InplaceRefactoring, MemberInplaceRenameHandler, MemberInplaceRenamer}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition
import org.jetbrains.plugins.scala.lang.psi.light.PsiClassWrapper
/**
* Nikolay.Tropin
* 6/20/13
*/
class ScalaMemberInplaceRenameHandler extends MemberInplaceRenameHandler with ScalaInplaceRenameHandler {
override def isAvailable(element: PsiElement, editor: Editor, file: PsiFile): Boolean = {
val processor = renameProcessor(element)
editor.getSettings.isVariableInplaceRenameEnabled && processor != null && processor.canProcessElement(element) &&
!element.getUseScope.isInstanceOf[LocalSearchScope]
}
override def invoke(project: Project, editor: Editor, file: PsiFile, dataContext: DataContext): Unit = {
UsageTrigger.trigger(ScalaBundle.message("rename.member.id"))
super.invoke(project, editor, file, dataContext)
}
protected override def createMemberRenamer(substituted: PsiElement,
elementToRename: PsiNameIdentifierOwner,
editor: Editor): MemberInplaceRenamer = {
val (maybeFirstElement, maybeSecondElement) = substituted match {
case clazz: PsiClass if elementToRename.isInstanceOf[PsiClassWrapper] =>
(None, None)
case definition: ScTypeDefinition =>
(Some(definition), definition.baseCompanionModule)
case clazz: PsiClass =>
(Some(clazz), None)
case subst: PsiNamedElement =>
(None, None)
case _ => throw new IllegalArgumentException("Substituted element for renaming has no name")
}
new ScalaMemberInplaceRenamer(maybeFirstElement.getOrElse(elementToRename),
maybeSecondElement.getOrElse(substituted),
editor)
}
override def doRename(elementToRename: PsiElement, editor: Editor, dataContext: DataContext): InplaceRefactoring = {
afterElementSubstitution(elementToRename, editor, dataContext) {
super.doRename(_, editor, dataContext)
}
}
}
|
loskutov/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/refactoring/rename/inplace/ScalaMemberInplaceRenameHandler.scala
|
Scala
|
apache-2.0
| 2,441
|
package org.algorithms
import org.scalatest.{BeforeAndAfter, FunSuite, Matchers}
import scala.io.Source._
class DistanceCalcTest extends FunSuite with Matchers with BeforeAndAfter {
var sdc = new DistanceCalc
before {
sdc = new DistanceCalc
}
test("calculate shortest path a -> d") {
sdc.add("a to b = 10")
sdc.add("b to c = 20")
sdc.add("a to d = 5")
sdc.add("d to c = 10")
sdc.shortestDistance() shouldBe 25
}
test("shortest distance between cities") {
sdc.add("London to Dublin = 464")
sdc.add("London to Belfast = 518")
sdc.add("Dublin to Belfast = 141")
sdc.shortestDistance() shouldBe 605
}
test("longest distance between cities") {
sdc.add("London to Dublin = 464")
sdc.add("London to Belfast = 518")
sdc.add("Dublin to Belfast = 141")
sdc.longestDistance() shouldBe 982
}
test("for given data") {
fromFile("advent-code/src/main/resources/distances").getLines().foreach(l => sdc.add(l))
sdc.longestDistance() shouldBe 804
}
}
|
Alex-Diez/Scala-Algorithms
|
advent-code/src/test/scala/org/algorithms/DistanceCalcTest.scala
|
Scala
|
mit
| 1,118
|
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.scenario
import scala.concurrent.duration.{ Duration, FiniteDuration }
import io.gatling.commons.stats.assertion.Assertion
import io.gatling.core.CoreComponents
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.controller.throttle.{ ThrottleStep, Throttling, Throttlings }
import io.gatling.core.pause._
import io.gatling.core.protocol.{ Protocol, ProtocolComponentsRegistries, Protocols }
import io.gatling.core.session.Expression
import io.gatling.core.structure.PopulationBuilder
import akka.actor.ActorSystem
abstract class Simulation {
private var _populationBuilders: List[PopulationBuilder] = Nil
private var _globalProtocols: Protocols = Protocols()
private var _assertions = Seq.empty[Assertion]
private var _maxDuration: Option[FiniteDuration] = None
private var _globalPauseType: PauseType = Constant
private var _globalThrottleSteps: Iterable[ThrottleStep] = Nil
private var _beforeSteps: List[() => Unit] = Nil
private var _afterSteps: List[() => Unit] = Nil
def before(step: => Unit): Unit =
_beforeSteps = _beforeSteps ::: List(() => step)
def setUp(populationBuilders: PopulationBuilder*): SetUp = setUp(populationBuilders.toList)
def setUp(populationBuilders: List[PopulationBuilder]): SetUp = {
if (_populationBuilders.nonEmpty)
throw new UnsupportedOperationException("setUp can only be called once")
_populationBuilders = populationBuilders
new SetUp
}
def after(step: => Unit): Unit =
_afterSteps = _afterSteps ::: List(() => step)
class SetUp {
def protocols(ps: Protocol*): SetUp = protocols(ps.toIterable)
def protocols(ps: Iterable[Protocol]): SetUp = {
_globalProtocols = _globalProtocols ++ ps
this
}
def assertions(asserts: Assertion*): SetUp = assertions(asserts.toIterable)
def assertions(asserts: Iterable[Assertion]): SetUp = {
_assertions = _assertions ++ asserts
this
}
def maxDuration(duration: FiniteDuration): SetUp = {
_maxDuration = Some(duration)
this
}
def throttle(throttleSteps: ThrottleStep*): SetUp = throttle(throttleSteps.toIterable)
def throttle(throttleSteps: Iterable[ThrottleStep]): SetUp = {
_globalThrottleSteps = throttleSteps
this
}
def disablePauses = pauses(Disabled)
def constantPauses = pauses(Constant)
def exponentialPauses = pauses(Exponential)
def customPauses(custom: Expression[Long]) = pauses(Custom(custom))
def uniformPauses(plusOrMinus: Double) = pauses(UniformPercentage(plusOrMinus))
def uniformPauses(plusOrMinus: Duration) = pauses(UniformDuration(plusOrMinus))
def pauses(pauseType: PauseType): SetUp = {
_globalPauseType = pauseType
this
}
}
private def resolvePopulationBuilders(populationBuilders: List[PopulationBuilder], configuration: GatlingConfiguration): List[PopulationBuilder] =
configuration.resolve(
// [fl]
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
// [fl]
_populationBuilders
)
private def resolveThrottleSteps(steps: Iterable[ThrottleStep], configuration: GatlingConfiguration): Iterable[ThrottleStep] =
configuration.resolve(
// [fl]
//
//
//
//
//
//
//
//
//
//
//
//
//
//
// [fl]
steps
)
private[gatling] def params(configuration: GatlingConfiguration): SimulationParams = {
require(_populationBuilders.nonEmpty, "No scenario set up")
val duplicates = _populationBuilders.groupBy(_.scenarioBuilder.name).collect { case (name, scns) if scns.size > 1 => name }
require(duplicates.isEmpty, s"Scenario names must be unique but found duplicates: $duplicates")
_populationBuilders.foreach(scn => require(scn.scenarioBuilder.actionBuilders.nonEmpty, s"Scenario ${scn.scenarioBuilder.name} is empty"))
val populationBuilders = resolvePopulationBuilders(_populationBuilders, configuration)
val scenarioThrottlings: Map[String, Throttling] = populationBuilders.flatMap { scn =>
val steps = resolveThrottleSteps(scn.scenarioThrottleSteps, configuration)
if (steps.isEmpty) {
None
} else {
Some(scn.scenarioBuilder.name -> Throttling(steps))
}
}.toMap
val globalThrottling =
if (_globalThrottleSteps.isEmpty) {
None
} else {
Some(Throttling(resolveThrottleSteps(_globalThrottleSteps, configuration)))
}
val maxDuration = {
val globalThrottlingMaxDuration = globalThrottling.map(_.duration)
val scenarioThrottlingMaxDurations = scenarioThrottlings.values.map(_.duration).toList
_maxDuration.map(List(_)).getOrElse(Nil) ::: globalThrottlingMaxDuration.map(List(_)).getOrElse(Nil) ::: scenarioThrottlingMaxDurations match {
case Nil => None
case nel => Some(nel.min)
}
}
SimulationParams(
getClass.getName,
populationBuilders,
_globalProtocols,
_globalPauseType,
Throttlings(globalThrottling, scenarioThrottlings),
maxDuration,
_assertions
)
}
private[gatling] def executeBefore(): Unit = _beforeSteps.foreach(_.apply())
private[gatling] def executeAfter(): Unit = _afterSteps.foreach(_.apply())
}
case class SimulationParams(
name: String,
populationBuilders: List[PopulationBuilder],
globalProtocols: Protocols,
globalPauseType: PauseType,
throttlings: Throttlings,
maxDuration: Option[FiniteDuration],
assertions: Seq[Assertion]
) {
def scenarios(system: ActorSystem, coreComponents: CoreComponents): List[Scenario] = {
val protocolComponentsRegistries = new ProtocolComponentsRegistries(system, coreComponents, globalProtocols)
populationBuilders.map(_.build(system, coreComponents, protocolComponentsRegistries, globalPauseType, throttlings.global))
}
}
|
wiacekm/gatling
|
gatling-core/src/main/scala/io/gatling/core/scenario/Simulation.scala
|
Scala
|
apache-2.0
| 6,730
|
package net.liftmodules.staticsitemap.path
import net.liftweb.common.{Full, Empty}
import net.liftweb.util.Helpers._
trait PathUtils {
/**
* Split the path and query.
* @param url
* @return A tuple of optional path and query strings
*/
def splitPathAndQuery(url: String): (String, String) =
url.split("\\\\?", 2).toList match {
case Nil => ("", "")
case path :: Nil => (path, "")
case path :: queryString :: Nil => (path, queryString)
case _ => throw new IllegalStateException("Url was split into more than two parts for the path and query string.")
}
def takePath(url: String): String = splitPathAndQuery(url)._1
def takeQuery(url: String): String = splitPathAndQuery(url)._2
def splitPath(url: String): List[String] = {
takePath(url) match {
case "" => Nil
case path => path.split('/').filterNot(_.isEmpty).toList
}
}
/**
* Extract all parameters from the URL.
*
* Cribbed from Lift's Req.scala
*
* @param url The full url to parse
* @return a map of parameter name to list of values for that parameter
*/
def splitParams(url: String): Map[String, List[String]] = {
val queryString = takeQuery(url)
val params: List[(String, String)] = for {
nameVal <- queryString.split("&").toList.map(_.trim).filter(_.length > 0)
(name, value) <- nameVal.split("=").toList match {
case Nil => Empty
case n :: v :: _ => Full((urlDecode(n), urlDecode(v)))
case n :: _ => Full((urlDecode(n), ""))
}} yield (name, value)
val nvp: Map[String, List[String]] = params.foldLeft(Map[String, List[String]]()) {
case (map, (name, value)) => map + (name -> (map.getOrElse(name, Nil) ::: List(value)))
}
nvp
}
def mkFullPath(urlParts: Seq[String]): String =
urlParts.mkString("/", "/", "")
}
object PathUtils extends PathUtils
|
jeffmay/lift-staticsitemap
|
src/main/scala/net/liftmodules/staticsitemap/path/PathUtils.scala
|
Scala
|
apache-2.0
| 1,885
|
/**
* Bolt
* ExpressionValue
*
* Copyright (c) 2017 Osamu Takahashi
*
* This software is released under the MIT License.
* http://opensource.org/licenses/mit-license.php
*
* @author Osamu Takahashi
*/
package com.sopranoworks.bolt.values
/**
*
* @param op operator
* @param left left expression value
* @param right right expression value
*/
case class ExpressionValue(op:String,left:Value,right:Value) extends WrappedValue {
override def text = (_ref,left,right) match {
case (Some(r),_,_) => r.text
case (_,a,null) => s"($op ${left.text})"
case _ => s"(${left.text} $op ${right.text})"
}
override def eval: Value = {
if (_ref.isEmpty) {
val l = left.eval.stayUnresolved
val r = if (right != null) right.eval.stayUnresolved else false
_stayUnresolved = l || r
if (_stayUnresolved) {
return this
}
(op, left.eval.asValue, if (right != null) right.eval.asValue else right) match {
case ("-", l:IntValue, null) =>
val v = -l.value
_ref = Some(IntValue(v.toString,v,true))
case ("-", l:DoubleValue, null) =>
val v = -l.value
_ref = Some(DoubleValue(v.toString,v,true))
case ("~", l:IntValue, null) =>
val v = ~l.value
_ref = Some(IntValue(v.toString,v,true))
case ("+", l:IntValue, r:IntValue) =>
val res = l.value + r.value
_ref = Some(IntValue(res.toString,res,true))
case ("+", l:DoubleValue, r:DoubleValue) =>
val res = l.value + r.value
_ref = Some(DoubleValue(res.toString,res,true))
case ("+", l:IntValue, r:DoubleValue) =>
val res = l.value + r.value
_ref = Some(DoubleValue(res.toString,res,true))
case ("+", l:DoubleValue, r:IntValue) =>
val res = l.value + r.value
_ref = Some(DoubleValue(res.toString,res,true))
case ("+", NullValue, _) | ("+", _, NullValue) =>
_ref = Some(NullValue)
case ("-", l:IntValue, r:IntValue) =>
val res = l.value - r.value
_ref = Some(IntValue(res.toString,res,true))
case ("-", l:DoubleValue, r:DoubleValue) =>
val res = l.value - r.value
_ref = Some(DoubleValue(res.toString,res,true))
case ("-", l:IntValue, r:DoubleValue) =>
val res = l.value - r.value
_ref = Some(DoubleValue(res.toString,res,true))
case ("-", l:DoubleValue, r:IntValue) =>
val res = l.value - r.value
_ref = Some(DoubleValue(res.toString,res,true))
case ("-", NullValue, _) | ("-", _, NullValue) =>
_ref = Some(NullValue)
case ("*", l:IntValue, r:IntValue) =>
val res = l.value * r.value
_ref = Some(IntValue(res.toString,res,true))
case ("*", l:DoubleValue, r:DoubleValue) =>
val res = l.value * r.value
_ref = Some(DoubleValue(res.toString,res,true))
case ("*", l:IntValue, r:DoubleValue) =>
val res = l.value * r.value
_ref = Some(DoubleValue(res.toString,res,true))
case ("*", l:DoubleValue, r:IntValue) =>
val res = l.value * r.value
_ref = Some(DoubleValue(res.toString,res,true))
case ("*", NullValue, _) | ("*", _, NullValue) =>
_ref = Some(NullValue)
case ("/", l:IntValue, r:IntValue) =>
if (r.value == 0) throw new RuntimeException("Zero divide")
val res = l.value / r.value
_ref = Some(IntValue(res.toString,res,true))
case ("/", l:DoubleValue, r:DoubleValue) =>
if (r.value == 0) throw new RuntimeException("Zero divide")
val res = l.value / r.value
_ref = Some(DoubleValue(res.toString,res,true))
case ("/", l:IntValue, r:DoubleValue) =>
if (r.value == 0) throw new RuntimeException("Zero divide")
val res = l.value / r.value
_ref = Some(DoubleValue(res.toString,res,true))
case ("/", l:DoubleValue, r:IntValue) =>
if (r.value == 0) throw new RuntimeException("Zero divide")
val res = l.value / r.value
_ref = Some(DoubleValue(res.toString,res,true))
case ("/", NullValue, _) | ("/", _, NullValue) =>
_ref = Some(NullValue)
case ("<<", l:IntValue, r:IntValue) =>
val res = l.value << r.value
_ref = Some(IntValue(res.toString,res,true))
case (">>", l:IntValue, r:IntValue) =>
val res = l.value >>> r.value
_ref = Some(IntValue(res.toString,res,true))
case ("|", l:IntValue, r:IntValue) =>
val res = l.value | r.value
_ref = Some(IntValue(res.toString,res,true))
case ("&", l:IntValue, r:IntValue) =>
val res = l.value & r.value
_ref = Some(IntValue(res.toString,res,true))
case ("^", l:IntValue, r:IntValue) =>
val res = l.value ^ r.value
_ref = Some(IntValue(res.toString,res,true))
case (_,l,r) =>
throw new RuntimeException(s"Invalid operator between the types:$l $op $r")
}
}
this
}
override def invalidateEvaluatedValueIfContains(values:List[Value]):Boolean = {
if ((if (left != null) left.invalidateEvaluatedValueIfContains(values) else false) ||
(if (right != null) right.invalidateEvaluatedValueIfContains(values) else false)) {
_ref = None
true
} else false
}
override def resolveReference(columns:Map[String,TableColumnValue] = Map.empty[String,TableColumnValue]): Map[String,TableColumnValue] = {
val lc = if (left != null) left.resolveReference(columns) else columns
if (right != null) right.resolveReference(lc) else lc
}
}
|
OsamuTakahashi/bolt
|
src/main/scala/com/sopranoworks/bolt/values/ExpressionValue.scala
|
Scala
|
mit
| 5,729
|
import org.joda.time.DateTime
package object models {
case class Tag(_id: String, total: Long, accepted: Long, rate: Double, top: Long, updated: DateTime)
object JsonFormats {
import play.api.libs.json.Json
import play.api.data._
import play.api.data.Forms._
implicit val tagFormat = Json.format[Tag]
}
}
|
EugenyLoy/StackOverflowBadge
|
app/models/package.scala
|
Scala
|
mit
| 335
|
/*
* Copyright 2016 Coursera Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.coursera.naptime.path
/**
* An HList for parsers, usually built up automatically via the helper traits (TODO: FILL IN LINKS)
*
* @tparam ParseAsType The type this HList of parsers will parse a URL into.
*/
sealed trait PathKeyParser[+ParseAsType <: ParsedPathKey] {
def parse(path: String): UrlParseResult[ParseAsType]
/**
* Concatenates a new parser on the front of the chain (nesting that parser underneath the others)
* @param resourcePath The resource path (and parse information) for the new resource.
* @tparam H The new resource has path parameters of this type.
* @tparam TT This type parameter exists to work around the covariance/contravariance issues with
* the return type of this function.
* @return The new chain.
*/
def ::[H, TT >: ParseAsType <: ParsedPathKey](
resourcePath: ResourcePathParser[H]): NestedPathKeyParser[H, TT] =
new NestedPathKeyParser[H, TT](resourcePath, this)
}
/**
* HCons for parsers - includes an extra type parameter because we care about both the type we will
* parse as, as well as the type of the chain itself.
*
* @param head All information needed to parse from a URL something of the type H
* @param tail The rest of the parser chain.
* @tparam H The type the head of the chain will parse as.
* @tparam TailParseAsType The tail of the chain will parse as this type
*/
final case class NestedPathKeyParser[+H, TailParseAsType <: ParsedPathKey](
head: ResourcePathParser[H],
tail: PathKeyParser[TailParseAsType])
extends PathKeyParser[H ::: TailParseAsType] {
override def toString = s"$head :: $tail"
override def parse(path: String): UrlParseResult[H ::: TailParseAsType] = {
parseFinalLevel(path) match {
case ParseSuccess(restOfUrl, Some(h) ::: rest) =>
ParseSuccess(restOfUrl, h ::: rest)
case _ => ParseFailure
}
}
def parseFinalLevel(path: String): UrlParseResult[Option[H] ::: TailParseAsType] = {
tail.parse(path) match {
case ParseFailure => ParseFailure
case ParseSuccess(None, _) => ParseFailure // Nothing left to parse; fail.
case ParseSuccess(Some(urlThroughParent), tailParse) =>
head.parseOptUrl(urlThroughParent) match {
case ParseFailure => ParseFailure
case ParseSuccess(restOfUrl, elem) =>
ParseSuccess(restOfUrl, new :::(elem, tailParse))
}
}
}
}
/**
* The base / end of the PathKey HList-like datastructure.
*/
sealed trait RootPathParser extends PathKeyParser[RootParsedPathKey] {
override def parse(path: String): UrlParseResult[RootParsedPathKey] =
ParseSuccess(Some(path), RootParsedPathKey)
}
case object RootPathParser extends RootPathParser
|
coursera/naptime
|
naptime/src/main/scala/org/coursera/naptime/path/PathKeyParser.scala
|
Scala
|
apache-2.0
| 3,335
|
package providers
import models.JsonHelper._
import models.Location
import play.api.libs.concurrent.Execution.Implicits._
import play.api.libs.ws.{Response, WS}
import scala.concurrent.Future
trait LocationProvider {
def getLocations(address: String): Future[Seq[Location]] = {
WS.url(config("maps.api"))
.withQueryString("address" -> address, "sensor" -> "false")
.get()
.map(response => responseToLocations(response))
}
private def responseToLocations(response: Response): Seq[Location] = (response.json \\ "results").as[Seq[Location]]
}
|
callistaenterprise/async-weather
|
app/providers/LocationProvider.scala
|
Scala
|
mit
| 571
|
class OAuthPlainTextCalculator(consumerKey: ConsumerKey, requestToken: RequestToken) extends WSSignatureCalculator with com.ning.http.client.SignatureCalculator {
import com.ning.http.client.{ Request, RequestBuilderBase }
import com.ning.http.util.UTF8UrlEncoder
import com.ning.http.util.Base64
val HEADER_AUTHORIZATION = "Authorization"
val KEY_OAUTH_CONSUMER_KEY = "oauth_consumer_key"
val KEY_OAUTH_NONCE = "oauth_nonce"
val KEY_OAUTH_SIGNATURE = "oauth_signature"
val KEY_OAUTH_SIGNATURE_METHOD = "oauth_signature_method"
val KEY_OAUTH_TIMESTAMP = "oauth_timestamp"
val KEY_OAUTH_TOKEN = "oauth_token"
val KEY_OAUTH_VERSION = "oauth_version"
val OAUTH_VERSION_1_0 = "1.0"
val OAUTH_SIGNATURE_METHOD = "PLAINTEXT"
protected final val nonceBuffer: Array[Byte] = new Array[Byte](16)
override def calculateAndAddSignature(request: Request, requestBuilder: RequestBuilderBase[_]): Unit = {
val nonce: String = generateNonce
val timestamp: Long = System.currentTimeMillis() / 1000L
val signature = calculateSignature(request.getMethod, request.getUrl, timestamp, nonce, request.getFormParams, request.getQueryParams)
val headerValue = constructAuthHeader(signature, nonce, timestamp)
requestBuilder.setHeader(HEADER_AUTHORIZATION, headerValue);
}
/**
* from http://oauth.net/core/1.0/#signing_process
* oauth_signature is set to the concatenated encoded values of the
* Consumer Secret and Token Secret,
* separated by a ‘&’ character (ASCII code 38),
* even if either secret is empty.
* The result MUST be encoded again.
*/
def calculateSignature(method: String, baseURL: String, oauthTimestamp: Long, nonce: String, formParams: java.util.List[com.ning.http.client.Param], queryParams: java.util.List[com.ning.http.client.Param]) = {
val signedText = new StringBuilder(100)
signedText.append(consumerKey.secret)
signedText.append('&');
signedText.append(requestToken.secret)
UTF8UrlEncoder.encode(signedText.toString)
}
def constructAuthHeader(signature: String, nonce: String, oauthTimestamp: Long, sb: StringBuilder = new StringBuilder) = {
constructAuthHeader_sb(signature, nonce, oauthTimestamp).toString
}
def constructAuthHeader_sb(signature: String, nonce: String, oauthTimestamp: Long, sb: StringBuilder = new StringBuilder(250)) = {
sb.synchronized {
sb.append("OAuth ")
sb.append(KEY_OAUTH_CONSUMER_KEY)
sb.append("=\\"")
sb.append(consumerKey.key)
sb.append("\\", ")
sb.append(KEY_OAUTH_TOKEN)
sb.append("=\\"")
sb.append(requestToken.token)
sb.append("\\", ")
sb.append(KEY_OAUTH_SIGNATURE_METHOD)
sb.append("=\\"")
sb.append(OAUTH_SIGNATURE_METHOD)
sb.append("\\", ")
// careful: base64 has chars that need URL encoding:
sb.append(KEY_OAUTH_SIGNATURE)
sb.append("=\\"");
sb.append(signature)
sb.append("\\", ")
sb.append(KEY_OAUTH_TIMESTAMP)
sb.append("=\\"")
sb.append(oauthTimestamp)
sb.append("\\", ")
// also: nonce may contain things that need URL encoding (esp. when using base64):
sb.append(KEY_OAUTH_NONCE)
sb.append("=\\"");
sb.append(UTF8UrlEncoder.encode(nonce))
sb.append("\\", ")
sb.append(KEY_OAUTH_VERSION)
sb.append("=\\"")
sb.append(OAUTH_VERSION_1_0)
sb.append("\\"")
sb
}
}
def generateNonce = synchronized {
scala.util.Random.nextBytes(nonceBuffer)
// let's use base64 encoding over hex, slightly more compact than hex or decimals
Base64.encode(nonceBuffer)
}
}
object OAuthPlainTextCalculator {
def apply(consumerKey: ConsumerKey, token: RequestToken): WSSignatureCalculator = {
new OAuthPlainTextCalculator(consumerKey, token)
}
}
|
techmag/OAuthPlainTextCalculator
|
OAuthPlainTextCalculator.scala
|
Scala
|
apache-2.0
| 3,809
|
package doodle
package core
import doodle.syntax._
import org.scalatest._
import org.scalatest.prop.GeneratorDrivenPropertyChecks
class ColorSpec extends FlatSpec with Matchers with GeneratorDrivenPropertyChecks {
import doodle.arbitrary._
import doodle.syntax.approximatelyEqual._
"Angle" should "have bijection to Double as radians" in {
forAll { (a: Angle) =>
assert(a ~= Angle.radians(a.toRadians))
}
}
"Angle" should "have bijection to Double as degrees" in {
forAll { (a: Angle) =>
assert(a ~= Angle.degrees(a.toDegrees))
}
}
"Angle" should "have bijection to Double as turns" in {
forAll { (a: Angle) =>
assert(a ~= Angle.turns(a.toTurns))
}
}
"toRGBA" should "convert to expected RGBA color" in {
val blueHSLA = Color.hsl(240.degrees, 0.5.normalized, 0.5.normalized).toRGBA
val blueRGBA = Color.rgb(64.uByte, 64.uByte, 191.uByte)
assert(blueHSLA ~= blueRGBA)
val greenHSLA = Color.hsl(120.degrees, 0.5.normalized, 0.5.normalized).toRGBA
val greenRGBA = Color.rgb(64.uByte, 191.uByte, 64.uByte)
assert(greenHSLA ~= greenRGBA)
val redHSLA = Color.hsl(0.degrees, 0.75.normalized, 0.5.normalized).toRGBA
val redRGBA = Color.rgb(223.uByte, 32.uByte, 32.uByte)
assert(redHSLA ~= redRGBA)
}
"toHSLA" should "converts to expected HSLA color" in {
val blueHSLA = Color.hsl(240.degrees, 0.5.normalized, 0.5.normalized)
val blueRGBA = Color.rgb(64.uByte, 64.uByte, 191.uByte).toHSLA
assert(blueHSLA ~= blueRGBA)
val greenHSLA = Color.hsl(120.degrees, 0.5.normalized, 0.5.normalized)
val greenRGBA = Color.rgb(64.uByte, 191.uByte, 64.uByte).toHSLA
assert(greenHSLA ~= greenRGBA)
val redHSLA = Color.hsl(0.degrees, 0.75.normalized, 0.5.normalized)
val redRGBA = Color.rgb(223.uByte, 32.uByte, 32.uByte).toHSLA
assert(redHSLA ~= redRGBA)
}
"HSLA with 0 saturation" should "convert to gray RGBA" in {
val grey1HSLA = Color.hsl(0.degrees, 0.normalized, 0.5.normalized).toRGBA
val grey1RGBA = Color.rgb(128.uByte, 128.uByte, 128.uByte)
assert(grey1HSLA ~= grey1RGBA)
val grey2HSLA = Color.hsl(0.degrees, 0.normalized, 1.0.normalized).toRGBA
val grey2RGBA = Color.rgb(255.uByte, 255.uByte, 255.uByte)
assert(grey2HSLA ~= grey2RGBA)
}
"HSLA spin" should "transform correctly" in {
val original = Color.hsl(120.degrees, 0.5.normalized, 0.5.normalized)
val spun = original.spin(60.degrees)
val unspun = original.spin(-60.degrees)
assert(spun ~= Color.hsl(180.degrees, 0.5.normalized, 0.5.normalized))
assert(unspun ~= Color.hsl(60.degrees, 0.5.normalized, 0.5.normalized))
}
"Fade in/out" should "transform correctly" in {
val original = Color.hsla(120.degrees, 0.5.normalized, 0.5.normalized, 0.5.normalized)
val fadeOut = original.fadeOut(0.5.normalized)
val fadeIn = original.fadeIn(0.5.normalized)
fadeOut.alpha should ===(0.0.normalized)
fadeIn.alpha should ===(1.0.normalized)
}
".toRGBA andThen .toHSLA" should "be the identity" in {
forAll { (hsla: HSLA) =>
assert(hsla ~= (hsla.toRGBA.toHSLA))
}
}
".toHSLA andThen .toRGBA" should "be the identity" in {
forAll { (rgba: RGBA) =>
assert(rgba ~= (rgba.toHSLA.toRGBA))
}
}
}
|
Angeldude/doodle
|
shared/src/test/scala/doodle/core/ColorSpec.scala
|
Scala
|
apache-2.0
| 3,288
|
package scavlink.sbt.mavgen
import java.io.File
import org.fusesource.scalate.{Template, TemplateEngine}
import org.scalatest.{Matchers, WordSpec}
import sbt.Logger
import scala.xml.Elem
class GeneratorFunctionSpec extends WordSpec with Matchers {
val fixture = new Generator {
def generate(xmls: Map[String, Elem], templates: Map[String, Template], outputPath: File)(implicit engine: TemplateEngine, log: Logger): Map[File, String] = ???
}
"camelClassName" should {
"split on underscore and capitalize all parts" in {
fixture.className("some_kind_of_name") shouldBe "SomeKindOfName"
}
"capitalize a single part" in {
fixture.className("some") shouldBe "Some"
}
}
"safeFieldName" should {
"split on underscore and capitalize all but the first part" in {
fixture.fieldName("some_kind_of_name") shouldBe "someKindOfName"
}
"not capitalize a single part" in {
fixture.fieldName("some") shouldBe "some"
}
"put back-ticks around reserved word" in {
fixture.reservedWords.foreach { s =>
fixture.fieldName(s) shouldBe s"`$s`"
}
}
}
"parsePath" should {
"parse a plain path" in {
val base = new File("parent", "dir")
val bundle = "common"
val template = "MAVLink.message.enums.Enum"
val (dir, pkg, name) = fixture.parsePath(base, bundle, template)
dir shouldBe new File(new File(new File(base, "MAVLink"), "message"), "enums")
pkg shouldBe "MAVLink.message.enums"
name shouldBe "Enum"
}
"substitute the bundle for a single underscore" in {
val base = new File("parent", "dir")
val bundle = "common"
val template = "MAVLink.message._.Messages"
val (dir, pkg, name) = fixture.parsePath(base, bundle, template)
dir shouldBe new File(new File(new File(base, "MAVLink"), "message"), bundle)
pkg shouldBe "MAVLink.message.common"
name shouldBe "Messages"
}
"substitute the bundle capitalized for a double underscore" in {
val base = new File("parent", "dir")
val bundle = "common"
val template = "MAVLink.connection.marshal.__Marshaller"
val (dir, pkg, name) = fixture.parsePath(base, bundle, template)
dir shouldBe new File(new File(new File(base, "MAVLink"), "connection"), "marshal")
pkg shouldBe "MAVLink.connection.marshal"
name shouldBe "CommonMarshaller"
}
"substitute the bundle more than once" in {
val base = new File("parent", "dir")
val bundle = "common"
val template = "MAVLink.connection._.__Marshaller"
val (dir, pkg, name) = fixture.parsePath(base, bundle, template)
dir shouldBe new File(new File(new File(base, "MAVLink"), "connection"), bundle)
pkg shouldBe "MAVLink.connection.common"
name shouldBe "CommonMarshaller"
}
}
"scalaSource" should {
"append the scala extension to the name" in {
fixture.scalaSource("file") shouldBe "file.scala"
}
}
}
|
nickolasrossi/sbt-mavgen
|
src/test/scala/scavlink/sbt/mavgen/GeneratorFunctionSpec.scala
|
Scala
|
mit
| 2,993
|
package net.snowflake.spark.snowflake.streaming
import java.nio.charset.Charset
import java.sql.Connection
import net.snowflake.client.jdbc.internal.apache.commons.logging.{Log, LogFactory}
import net.snowflake.client.jdbc.internal.fasterxml.jackson.databind.ObjectMapper
import net.snowflake.client.jdbc.internal.fasterxml.jackson.databind.node.ArrayNode
import net.snowflake.ingest.SimpleIngestManager
import net.snowflake.ingest.connection.IngestStatus
import net.snowflake.spark.snowflake.DefaultJDBCWrapper.DataBaseOperations
import net.snowflake.spark.snowflake.Parameters.MergedParameters
import net.snowflake.spark.snowflake.io.CloudStorage
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.language.postfixOps
class SnowflakeIngestService(param: MergedParameters,
pipeName: String,
storage: CloudStorage,
conn: Connection) {
val SLEEP_TIME: Long = 60 * 1000 // 1m
val HISTORY_CHECK_TIME: Long = 60 * 60 * 1000 // 1h
val WAITING_TIME_ON_TERMINATION: Int = 10 // 10m
lazy implicit val ingestManager: SimpleIngestManager =
SnowflakeIngestConnector.createIngestManager(param, pipeName)
private var notClosed: Boolean = true
private val ingestedFileList: IngestedFileList = init()
private lazy val checker =
SnowflakeIngestConnector.createHistoryChecker(ingestManager)
private var pipeDropped = false
// run clean function periodically
private val process = Future {
while (notClosed) {
Thread.sleep(SLEEP_TIME)
val time = System.currentTimeMillis()
ingestedFileList.checkResponseList(checker())
if (ingestedFileList.getFirstTimeStamp.isDefined &&
time - ingestedFileList.getFirstTimeStamp.get > HISTORY_CHECK_TIME) {
ingestedFileList
.checkResponseList(
SnowflakeIngestConnector
.checkHistoryByRange(
ingestManager,
ingestedFileList.getFirstTimeStamp.get,
time
)
)
}
}
cleanAll()
}
def ingestFiles(list: List[String]): Unit = {
SnowflakeIngestConnector.ingestFiles(list)
ingestedFileList.addFiles(list)
}
def cleanAll(): Unit = {
while (ingestedFileList.nonEmpty) {
Thread.sleep(SLEEP_TIME)
val time = System.currentTimeMillis()
if (time - ingestedFileList.getFirstTimeStamp.get > 10 * 60 * 1000) {
ingestedFileList
.checkResponseList(
SnowflakeIngestConnector
.checkHistoryByRange(
ingestManager,
ingestedFileList.getFirstTimeStamp.get,
time
)
)
} else ingestedFileList.checkResponseList(checker())
}
conn.dropPipe(pipeName)
ingestedFileList.remove()
pipeDropped = true
}
def close(): Unit = {
val ct = System.currentTimeMillis()
IngestContextManager.logger.debug("closing ingest service")
notClosed = false
Await.result(process, WAITING_TIME_ON_TERMINATION minutes)
if (!pipeDropped) {
IngestContextManager.logger.error(
s"closing ingest service time out, please drop pipe: $pipeName manually"
)
}
IngestContextManager.logger.debug(
s"ingest service closed: ${(System.currentTimeMillis() - ct) / 1000.0}"
)
}
/**
* recover from context files or create new data
*/
private def init(): IngestedFileList =
IngestContextManager.readIngestList(storage, conn)
}
object IngestContextManager {
val CONTEXT_DIR = "context"
val INGEST_FILE_LIST_NAME = "ingested_file_list.json"
val FAILED_FILE_INDEX = "failed_file_index"
val LIST = "list"
val NAME = "name"
val TIME = "time"
val mapper = new ObjectMapper()
val logger: Log = LogFactory.getLog(getClass)
def readIngestList(storage: CloudStorage,
conn: Connection): IngestedFileList = {
val fileName = s"$CONTEXT_DIR/$INGEST_FILE_LIST_NAME"
if (storage.fileExists(fileName)) {
val inputStream = storage.download(fileName, compress = false)
val buffer = ArrayBuffer.empty[Byte]
var c: Int = inputStream.read()
while (c != -1) {
buffer.append(c.toByte)
c = inputStream.read()
}
try {
val node =
mapper.readTree(new String(buffer.toArray, Charset.forName("UTF-8")))
val failedIndex: Int = node.get(FAILED_FILE_INDEX).asInt()
val failedList: FailedFileList =
readFailedFileList(failedIndex, storage, conn)
val arrNode = node.get(LIST).asInstanceOf[ArrayNode]
var list: List[(String, Long)] = Nil
(0 until arrNode.size()).foreach(i => {
list = arrNode.get(i).get(NAME).asText() -> arrNode
.get(i)
.get(TIME)
.asLong() :: list
})
IngestedFileList(storage, conn, Some(failedList), Some(list))
} catch {
case e: Exception =>
throw new IllegalArgumentException(
s"context file: $fileName is broken: $e"
)
}
} else IngestedFileList(storage, conn)
}
def readFailedFileList(index: Int,
storage: CloudStorage,
conn: Connection): FailedFileList = {
val fileName = s"$CONTEXT_DIR/failed_file_list_$index.json"
if (storage.fileExists(fileName)) {
val inputStream = storage.download(fileName, compress = false)
val buffer = ArrayBuffer.empty[Byte]
var c: Int = inputStream.read()
while (c != -1) {
buffer.append(c.toByte)
c = inputStream.read()
}
try {
val list = mapper
.readTree(new String(buffer.toArray, Charset.forName("UTF-8")))
.asInstanceOf[ArrayNode]
var set = mutable.HashSet.empty[String]
(0 until list.size()).foreach(i => {
set += list.get(i).asText()
})
FailedFileList(storage, conn, index, Some(set))
} catch {
case e: Exception =>
throw new IllegalArgumentException(
s"context file: $fileName is broken: $e"
)
}
} else FailedFileList(storage, conn, index)
}
}
sealed trait IngestContext {
val storage: CloudStorage
val fileName: String
val conn: Connection
def save(): Unit = {
IngestContextManager.logger.debug(s"$fileName:$toString")
val output =
storage.upload(fileName, Some(IngestContextManager.CONTEXT_DIR), compress = false)
output.write(toString.getBytes("UTF-8"))
output.close()
}
}
case class FailedFileList(override val storage: CloudStorage,
override val conn: Connection,
fileIndex: Int = 0,
files: Option[mutable.HashSet[String]] = None)
extends IngestContext {
val MAX_FILE_SIZE: Int = 1000 // how many file names
private var fileSet: mutable.HashSet[String] =
files.getOrElse(mutable.HashSet.empty[String])
override lazy val fileName: String = s"failed_file_list_$fileIndex.json"
def addFiles(names: List[String]): FailedFileList = {
val part1 = names.slice(0, MAX_FILE_SIZE - fileSet.size)
val part2 = names.slice(MAX_FILE_SIZE - fileSet.size, Int.MaxValue)
fileSet ++= part1.toSet
save()
if (part2.isEmpty) this
else FailedFileList(storage, conn, fileIndex + 1).addFiles(part2)
}
override def toString: String = {
val node = IngestContextManager.mapper.createArrayNode()
fileSet.foreach(node.add)
node.toString
}
}
case class IngestedFileList(override val storage: CloudStorage,
override val conn: Connection,
failedFileList: Option[FailedFileList] = None,
ingestList: Option[List[(String, Long)]] = None)
extends IngestContext {
override val fileName: String = IngestContextManager.INGEST_FILE_LIST_NAME
private var failedFiles: FailedFileList =
failedFileList.getOrElse(FailedFileList(storage, conn))
private var fileList: mutable.PriorityQueue[(String, Long)] =
mutable.PriorityQueue
.empty[(String, Long)](Ordering.by[(String, Long), Long](_._2).reverse)
if (ingestList.isDefined) {
ingestList.get.foreach(fileList += _)
}
def addFiles(names: List[String]): Unit = {
val time = System.currentTimeMillis()
names.foreach(fileList += _ -> time)
save()
}
override def toString: String = {
val node = IngestContextManager.mapper.createObjectNode()
node.put(IngestContextManager.FAILED_FILE_INDEX, failedFiles.fileIndex)
val arr = node.putArray(IngestContextManager.LIST)
fileList.foreach {
case (name, time) =>
val n = IngestContextManager.mapper.createObjectNode()
n.put(IngestContextManager.NAME, name)
n.put(IngestContextManager.TIME, time)
arr.add(n)
}
node.toString
}
def checkResponseList(list: List[(String, IngestStatus)]): Unit = {
var toClean: List[String] = Nil
var failed: List[String] = Nil
list.foreach {
case (name, status) =>
if (fileList.exists(_._1 == name)) {
status match {
case IngestStatus.LOADED =>
toClean = name :: toClean
fileList = fileList.filterNot(_._1 == name)
case IngestStatus.LOAD_FAILED | IngestStatus.PARTIALLY_LOADED =>
failed = name :: failed
fileList = fileList.filterNot(_._1 == name)
case _ => // do nothing
}
}
}
if (toClean.nonEmpty) storage.deleteFiles(toClean)
if (failed.nonEmpty) failedFiles = failedFiles.addFiles(failed)
save()
}
def getFirstTimeStamp: Option[Long] =
if (fileList.isEmpty) None else Some(fileList.head._2)
def isEmpty: Boolean = fileList.isEmpty
def nonEmpty: Boolean = fileList.nonEmpty
def remove(): Unit =
storage.deleteFile(IngestContextManager.CONTEXT_DIR + "/" + fileName)
}
|
snowflakedb/spark-snowflake
|
src/main/scala/net/snowflake/spark/snowflake/streaming/SnowflakeIngestService.scala
|
Scala
|
apache-2.0
| 10,146
|
//
// Scaled - a scalable editor extensible via JVM languages
// http://github.com/scaled/scaled/blob/master/LICENSE
package scaled.impl
import org.junit._
import org.junit.Assert._
import scaled.major.TextMode
class FnBindingsTest {
@Test def testCollectBindings () {
val view = new BufferViewImpl(TestData.editor, TestData.buffer("test", ""), 80, 24)
val mode = new TextMode(TestData.env(view))
val binds = new FnBindings(mode, System.err.println)
// binds.bindings foreach println
assertTrue(binds.binding("forward-char").isDefined)
assertTrue(binds.binding("backward-char").isDefined)
assertFalse(binds.binding("peanut").isDefined)
}
}
|
swhgoon/scaled
|
editor/src/test/scala/scaled/impl/FnBindingsTest.scala
|
Scala
|
bsd-3-clause
| 677
|
package edu.msstate.dasi.csb.workload.spark
import edu.msstate.dasi.csb.workload.Workload
import org.apache.spark.graphx.Graph
import scala.reflect.ClassTag
/**
* Strongly Connected Components algorithm implementation.
*/
class StronglyConnectedComponents(engine: SparkEngine, iterations: Int) extends Workload {
val name = "Strongly Connected Components"
/**
* Runs Strongly Connected Components.
*/
def run[VD: ClassTag, ED: ClassTag](graph: Graph[VD, ED]): Unit = {
graph.stronglyConnectedComponents(iterations).vertices.foreach(engine.doNothing)
}
}
|
msstate-dasi/csb
|
csb/src/main/scala/edu/msstate/dasi/csb/workload/spark/StronglyConnectedComponents.scala
|
Scala
|
gpl-3.0
| 578
|
package com.owlike.genson.ext.scala
import com.owlike.genson.{Context, Converter, Factory}
import java.lang.reflect.Type
import com.owlike.genson.reflect.TypeUtil._
import com.owlike.genson.convert.DefaultConverters.{MapConverterFactory => JavaMapFactory, KeyAdapter}
import com.owlike.genson.stream.{ObjectReader, ObjectWriter}
import scala.collection.generic.CanBuildFrom
import scala.collection.immutable.{:: => colon, _}
import scala.collection.mutable.{
Map => MMap,
ListMap => MListMap,
HashMap => MHashMap,
Set => MSet,
HashSet => MHashSet,
ListBuffer,
Queue => MQueue,
Buffer
}
import scala.collection.Map
import scala.collection.immutable.{Map => IMap}
import scala.collection.immutable.HashSet.HashTrieSet
import scala.collection.immutable.Set.{Set1, Set2, Set3, Set4}
import scala.Vector
import scala.Seq
import scala.Traversable
import scala.List
import scala.collection.immutable.Map.{Map4, Map3, Map2, Map1}
import com.owlike.genson.annotation.{HandleBeanView, HandleClassMetadata}
class MapConverterFactory extends Factory[Converter[_ <: Any]]() {
val cbfByType = List[(Class[_], CanBuildFrom[_ <: Traversable[_], _, _ <: Traversable[_]])](
classOf[Map1[_, _]] -> Map.canBuildFrom,
classOf[Map2[_, _]] -> Map.canBuildFrom,
classOf[Map3[_, _]] -> Map.canBuildFrom,
classOf[Map4[_, _]] -> Map.canBuildFrom,
classOf[MListMap[_, _]] -> MListMap.canBuildFrom,
classOf[MHashMap[_, _]] -> MHashMap.canBuildFrom,
classOf[HashMap[_, _]] -> HashMap.canBuildFrom,
classOf[ListMap[_, _]] -> ListMap.canBuildFrom,
classOf[MMap[_, _]] -> MMap.canBuildFrom,
classOf[Map[_, _]] -> Map.canBuildFrom
)
def create(genType: Type, genson: Genson): Converter[_ <: Any] = {
val rawClass = getRawClass(genType)
cbfByType.filter { case (clazz, cbf) =>
clazz.isAssignableFrom(rawClass)
}.headOption.map { case (clazz, cbf) =>
val castCBF = cbf.asInstanceOf[CanBuildFrom[Map[Any, Any], Any, Map[Any, Any]]]
val expandedType = expandType(lookupGenericType(classOf[Map[_, _]], getRawClass(genType)), genType)
val keyType: Type = typeOf(0, expandedType)
val valueType: Type = typeOf(1, expandedType)
val elemConverter: Converter[Any] = genson.provideConverter(valueType)
val keyAdapter = Option(JavaMapFactory.keyAdapter(getRawClass(keyType)))
.getOrElse(KeyAdapter.runtimeAdapter)
.asInstanceOf[KeyAdapter[Any]]
new MapConverter[Any, Any, Map[Any, Any]](keyAdapter, elemConverter)(castCBF)
}.getOrElse(null)
}
}
class TraversableConverterFactory extends Factory[Converter[_ <: Traversable[Any]]]() {
val cbfByType = List[(Class[_], CanBuildFrom[_ <: Traversable[_], _, _ <: Traversable[_]])](
classOf[colon[_]] -> List.canBuildFrom,
classOf[HashTrieSet[_]] -> HashSet.canBuildFrom,
classOf[HashSet[_]] -> HashSet.canBuildFrom,
classOf[ListSet[_]] -> ListSet.canBuildFrom,
classOf[Set1[_]] -> Set.canBuildFrom,
classOf[Set2[_]] -> Set.canBuildFrom,
classOf[Set3[_]] -> Set.canBuildFrom,
classOf[Set4[_]] -> Set.canBuildFrom,
classOf[Set[_]] -> Set.canBuildFrom,
classOf[Queue[_]] -> Queue.canBuildFrom,
classOf[MQueue[_]] -> MQueue.canBuildFrom,
classOf[ListBuffer[_]] -> ListBuffer.canBuildFrom,
classOf[Buffer[_]] -> Buffer.canBuildFrom,
classOf[Vector[_]] -> Vector.canBuildFrom,
classOf[List[_]] -> List.canBuildFrom,
classOf[MHashSet[_]] -> MHashSet.canBuildFrom,
classOf[MSet[_]] -> MSet.canBuildFrom,
classOf[Seq[_]] -> Seq.canBuildFrom
)
def create(genType: Type, genson: Genson): Converter[_ <: Traversable[Any]] = {
val rawClass = getRawClass(genType)
cbfByType.filter { case (clazz, cbf) =>
clazz.isAssignableFrom(rawClass)
}.headOption.map { case (_, cbf) =>
val castCBF = cbf.asInstanceOf[CanBuildFrom[Traversable[Any], Any, Traversable[Any]]]
val elemConverter: Converter[Any] = genson.provideConverter(ScalaBundle.getTraversableType(genType))
new TraversableConverter[Any, Traversable[Any]](elemConverter)(castCBF)
}.getOrElse(null)
}
}
@HandleClassMetadata
class MapConverter[K, V, C <: Map[K, V]]
(keyAdapter: KeyAdapter[K], elemConverter: Converter[V])(implicit cbf: CanBuildFrom[C, (K, V), C])
extends Converter[C] {
def serialize(value: C, writer: ObjectWriter, ctx: Context): Unit = {
writer.beginObject()
value.foreach { t =>
writer.writeName(keyAdapter.adapt(t._1))
elemConverter.serialize(t._2, writer, ctx)
}
writer.endObject()
}
def deserialize(reader: ObjectReader, ctx: Context): C = {
val builder = cbf()
reader.beginObject()
while (reader.hasNext) {
reader.next()
builder += (keyAdapter.adapt(reader.name()) -> elemConverter.deserialize(reader, ctx))
}
reader.endObject()
builder.result()
}
}
@HandleClassMetadata
class TraversableConverter[T, C <: Traversable[T]](elemConverter: Converter[T])(implicit cbf: CanBuildFrom[C, T, C])
extends Converter[C] {
def serialize(value: C, writer: ObjectWriter, ctx: Context): Unit = {
writer.beginArray()
value.foreach { t =>
elemConverter.serialize(t, writer, ctx)
}
writer.endArray()
}
def deserialize(reader: ObjectReader, ctx: Context): C = {
val builder = cbf()
reader.beginArray()
while (reader.hasNext) {
reader.next()
builder += elemConverter.deserialize(reader, ctx)
}
reader.endArray()
builder.result()
}
}
|
hieuit7/genson
|
src/main/scala/com/owlike/genson/ext/scala/TraversableConverters.scala
|
Scala
|
apache-2.0
| 5,506
|
package org.scalatra
package atmosphere
import java.nio.CharBuffer
import javax.servlet.http.{ HttpServletRequest, HttpSession }
import grizzled.slf4j.Logger
import org.atmosphere.cpr.AtmosphereResource.TRANSPORT._
import org.atmosphere.cpr._
import org.atmosphere.handler.AbstractReflectorAtmosphereHandler
import org.scalatra.servlet.ServletApiImplicits._
import org.scalatra.util.RicherString._
object ScalatraAtmosphereHandler {
@deprecated("Use `org.scalatra.atmosphere.AtmosphereClientKey` instead", "2.2.1")
val AtmosphereClientKey = org.scalatra.atmosphere.AtmosphereClientKey
@deprecated("Use `org.scalatra.atmosphere.AtmosphereRouteKey` instead", "2.2.1")
val AtmosphereRouteKey = org.scalatra.atmosphere.AtmosphereRouteKey
private class ScalatraResourceEventListener extends AtmosphereResourceEventListener {
def client(resource: AtmosphereResource) =
Option(resource.session()).flatMap(_.get(org.scalatra.atmosphere.AtmosphereClientKey)).map(_.asInstanceOf[AtmosphereClient])
def onPreSuspend(event: AtmosphereResourceEvent) {}
def onHeartbeat(event: AtmosphereResourceEvent) {
client(event.getResource) foreach (_.receive.lift(Heartbeat))
}
def onBroadcast(event: AtmosphereResourceEvent) {
val resource = event.getResource
resource.transport match {
case JSONP | AJAX | LONG_POLLING =>
case _ => resource.getResponse.flushBuffer()
}
}
def onDisconnect(event: AtmosphereResourceEvent) {
val disconnector = if (event.isCancelled) ClientDisconnected else ServerDisconnected
client(event.getResource) foreach (_.receive.lift(Disconnected(disconnector, Option(event.throwable))))
// if (!event.getResource.isResumed) {
// event.getResource.session.invalidate()
// } else {
event.getResource.session.removeAttribute(org.scalatra.atmosphere.AtmosphereClientKey)
// }
}
def onResume(event: AtmosphereResourceEvent) {}
def onSuspend(event: AtmosphereResourceEvent) {}
def onThrowable(event: AtmosphereResourceEvent) {
client(event.getResource) foreach (_.receive.lift(Error(Option(event.throwable()))))
}
def onClose(event: AtmosphereResourceEvent) {}
}
}
class ScalatraAtmosphereException(message: String) extends ScalatraException(message)
class ScalatraAtmosphereHandler(scalatraApp: ScalatraBase)(implicit wireFormat: WireFormat) extends AbstractReflectorAtmosphereHandler {
import org.scalatra.atmosphere.ScalatraAtmosphereHandler._
private[this] val internalLogger = Logger(getClass)
def onRequest(resource: AtmosphereResource) {
implicit val req = resource.getRequest
implicit val res = resource.getResponse
val route = Option(req.getAttribute(org.scalatra.atmosphere.AtmosphereRouteKey)).map(_.asInstanceOf[MatchedRoute])
var session = resource.session()
val isNew = !session.contains(org.scalatra.atmosphere.AtmosphereClientKey)
scalatraApp.withRequestResponse(resource.getRequest, resource.getResponse) {
scalatraApp.withRouteMultiParams(route) {
(req.requestMethod, route.isDefined) match {
case (Post, _) =>
var client: AtmosphereClient = null
if (isNew) {
session = AtmosphereResourceFactory.getDefault.find(resource.uuid).session
}
client = session(org.scalatra.atmosphere.AtmosphereClientKey).asInstanceOf[AtmosphereClient]
handleIncomingMessage(req, client)
case (_, true) =>
val cl = if (isNew) {
createClient(route.get, session, resource)
} else null
addEventListener(resource)
resumeIfNeeded(resource)
configureBroadcaster(resource)
if (isNew && cl != null) handleIncomingMessage(Connected, cl)
resource.suspend
case _ =>
val ex = new ScalatraAtmosphereException("There is no atmosphere route defined for " + req.getRequestURI)
internalLogger.warn(ex.getMessage)
throw ex
}
}
}
}
private[this] def createClient(route: MatchedRoute, session: HttpSession, resource: AtmosphereResource) = {
val client = clientForRoute(route)
session(org.scalatra.atmosphere.AtmosphereClientKey) = client
client.resource = resource
client
}
private[this] def createClient(route: MatchedRoute, resource: AtmosphereResource) = {
val client = clientForRoute(route)
client.resource = resource
client
}
private[this] def clientForRoute(route: MatchedRoute): AtmosphereClient = {
liftAction(route.action) getOrElse {
throw new ScalatraException("An atmosphere route should return an atmosphere client")
}
}
private[this] def requestUri(resource: AtmosphereResource) = {
val u = resource.getRequest.getRequestURI.blankOption getOrElse "/"
if (u.endsWith("/")) u + "*" else u + "/*"
}
private[this] def configureBroadcaster(resource: AtmosphereResource) {
val bc = BroadcasterFactory.getDefault.get(requestUri(resource))
resource.setBroadcaster(bc)
}
private[this] def handleIncomingMessage(req: AtmosphereRequest, client: AtmosphereClient) {
val parsed: InboundMessage = wireFormat.parseInMessage(readBody(req))
handleIncomingMessage(parsed, client)
}
private[this] def handleIncomingMessage(msg: InboundMessage, client: AtmosphereClient) {
// the ScalatraContext provides the correct request/response values to the AtmosphereClient.receive method
// this can be later refactored to a (Request, Response) => Any
client.receiveWithScalatraContext(scalatraApp).lift(msg)
}
private[this] def readBody(req: AtmosphereRequest) = {
val buff = CharBuffer.allocate(8192)
val body = new StringBuilder
val rdr = req.getReader
while (rdr.read(buff) >= 0) {
body.append(buff.flip.toString)
buff.clear()
}
body.toString()
}
private[this] def addEventListener(resource: AtmosphereResource) {
resource.addEventListener(new ScalatraResourceEventListener)
}
private[this] def liftAction(action: org.scalatra.Action) = try {
action() match {
case cl: AtmosphereClient => Some(cl)
case _ => None
}
} catch {
case t: Throwable =>
t.printStackTrace()
None
}
private[this] def resumeIfNeeded(resource: AtmosphereResource) {
import org.atmosphere.cpr.AtmosphereResource.TRANSPORT._
resource.transport match {
case JSONP | AJAX | LONG_POLLING => resource.resumeOnBroadcast(true)
case _ =>
}
}
}
|
lightvector/scalatra
|
atmosphere/src/main/scala/org/scalatra/atmosphere/ScalatraAtmosphereHandler.scala
|
Scala
|
bsd-2-clause
| 6,604
|
package com.tutorial.sparkcore
import com.tutorial.utils.SparkCommon
/**
* An example to create RDD from different data sources
* Created by ved on 7/1/16.
*/
object CreateRDD {
/**
* Create a Scala Spark Context.
*/
val sc = SparkCommon.sparkContext
def main(args: Array[String]) {
/**
* Create RDDs using parallelize() method of SparkContext
*/
val lines = sc.parallelize(List("pandas", "i like pandas"))
lines.collect().map(println)
/**
* Create RDDs is to load data from external storage
*/
val rddDataset = sc.textFile("src/main/resources/test_file.txt")
rddDataset.collect().map(println)
//sc.stop()
}
}
|
rklick-solutions/spark-tutorial
|
src/main/scala/com/tutorial/sparkcore/CreateRDD.scala
|
Scala
|
apache-2.0
| 696
|
/**
* Copyright (c) 2015, Cloudera, Inc. All Rights Reserved.
*
* Cloudera, Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"). You may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* This software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for
* the specific language governing permissions and limitations under the
* License.
*/
package com.cloudera.sparkts
import breeze.linalg.{DenseVector, DenseMatrix}
import com.cloudera.sparkts.TimeSeries._
import com.github.nscala_time.time.Imports._
import org.scalatest.{FunSuite, ShouldMatchers}
import scala.collection.immutable.IndexedSeq
class TimeSeriesSuite extends FunSuite with ShouldMatchers {
test("timeSeriesFromIrregularSamples") {
val dt = new DateTime("2015-4-8")
val samples = Array(
((dt, Array(1.0, 2.0, 3.0))),
((dt + 1.days, Array(4.0, 5.0, 6.0))),
((dt + 2.days, Array(7.0, 8.0, 9.0))),
((dt + 4.days, Array(10.0, 11.0, 12.0)))
)
val labels = Array("a", "b", "c", "d")
val ts = timeSeriesFromIrregularSamples(samples, labels)
ts.data.valuesIterator.toArray should be ((1 to 12).map(_.toDouble).toArray)
}
test("lagsIncludingOriginals") {
val originalIndex = new UniformDateTimeIndex(0, 5, new DayFrequency(1))
val data = DenseMatrix((1.0, 6.0), (2.0, 7.0), (3.0, 8.0), (4.0, 9.0), (5.0, 10.0))
val originalTimeSeries = new TimeSeries(originalIndex, data, Array("a", "b"))
val laggedTimeSeries = originalTimeSeries.lags(2, true)
laggedTimeSeries.keys should be (Array("a", "lag1(a)", "lag2(a)", "b", "lag1(b)", "lag2(b)"))
laggedTimeSeries.index.size should be (3)
laggedTimeSeries.data should be (DenseMatrix((3.0, 2.0, 1.0, 8.0, 7.0, 6.0),
(4.0, 3.0, 2.0, 9.0, 8.0, 7.0), (5.0, 4.0, 3.0, 10.0, 9.0, 8.0)))
}
test("lagsExcludingOriginals") {
val originalIndex = new UniformDateTimeIndex(0, 5, new DayFrequency(1))
val data = DenseMatrix((1.0, 6.0), (2.0, 7.0), (3.0, 8.0), (4.0, 9.0), (5.0, 10.0))
val originalTimeSeries = new TimeSeries(originalIndex, data, Array("a", "b"))
val laggedTimeSeries = originalTimeSeries.lags(2, false)
laggedTimeSeries.keys should be (Array("lag1(a)", "lag2(a)", "lag1(b)", "lag2(b)"))
laggedTimeSeries.index.size should be (3)
laggedTimeSeries.data should be (DenseMatrix((2.0, 1.0, 7.0, 6.0), (3.0, 2.0, 8.0, 7.0),
(4.0, 3.0, 9.0, 8.0)))
}
}
|
sunheehnus/spark-timeseries
|
src/test/scala/com/cloudera/sparkts/TimeSeriesSuite.scala
|
Scala
|
apache-2.0
| 2,624
|
package vultura.factor
import org.specs2.matcher.{MatchResult, Expectable, Matcher}
import vultura.factor.inference.{VariableElimination, MarginalI, ParFunI, JunctionTree}
/**
* Matchers for use with FastFactor objects.
* @author Thomas Geier <thomas.geier@uni-ulm.de>
*/
trait FactorMatchers {
def haveSameStructureAs(ref: Factor): Matcher[Factor] = new Matcher[Factor]{
def apply[S <: Factor](t: Expectable[S]): MatchResult[S] = result(
ref.variables.deep == t.value.variables.deep && ref.values.size == t.value.values.size,
s"${t.description} has same structure as " + ref,
s"${t.description} differs in structure from " + ref,
t
)
}
def haveValuesCloseTo(ref: Factor, tol: Double = 1e-7): Matcher[Factor] = new Matcher[Factor]{
def apply[S <: Factor](t: Expectable[S]): MatchResult[S] = result(
Factor.maxDiff(t.value,ref,NormalD) < tol,
s"${t.description} has close marginals to " + ref,
s"${t.description} differs in some value from $ref by " + Factor.maxDiff(t.value,ref,NormalD),
t
)
}
def beSimilarTo(ref: Factor, tol: Double): Matcher[Factor] =
haveSameStructureAs(ref) and haveValuesCloseTo(ref,tol)
def haveSameLogZ(reference: ParFunI, tol: Double): Matcher[ParFunI] = new Matcher[ParFunI]{
def apply[S <: ParFunI](t: Expectable[S]): MatchResult[S] = {
val obtainedZ: Double = t.value.logZ
val otherZ: Double = reference.logZ
result(
math.abs(obtainedZ - otherZ) < tol,
s"${t.description} has same Z as exact inference",
f"${t.description} has different Z as expected: ${t.description}: $obtainedZ, expected: $otherZ, diff: ${math.abs(obtainedZ - otherZ)}})",
t
)
}
}
def haveExactMarginals(p: Problem, tol: Double = 1e-9) = haveSameMarginals(new JunctionTree(p),tol)
def haveExactZ(p: Problem, tol: Double = 1e-9) = haveSameLogZ(new JunctionTree(p),tol)
def haveSameMarginals(reference: MarginalI, tol: Double, logDomain: Boolean = true): Matcher[MarginalI] = new Matcher[MarginalI]{
def apply[S <: MarginalI](t: Expectable[S]): MatchResult[S] = {
val p = t.value.problem
def marg(mi: MarginalI,v: Int): Factor = if(logDomain) mi.logVariableBelief(v) else mi.decodedVariableBelief(v)
val error: Option[(Int, Double)] = p.variables.map(v =>
v -> (marg(reference,v).values zip marg(t.value,v).values)
.map{case (x,y) => math.abs(x-y)}
.max
).find(_._2 > tol)
val domainString: String = if(logDomain) "log" else "normal"
result(
error.isEmpty,
s"$t has exact marginals",
s"${t.description} differs in marginals ($domainString encoded) by ${error.get._2} for variable ${error.get._1}",
t
)
}
}
def haveValidMarginals: Matcher[MarginalI] = {
import org.specs2.matcher.Matchers._
val sumsToOne: Matcher[Iterable[Double]] = beCloseTo(1d,delta=1e-9).^^((_:Iterable[Double]).sum).updateMessage("does not sum to one: " + _)
val nonNegative: Matcher[Iterable[Double]] = foreach(beGreaterThanOrEqualTo(0d).updateMessage("is negative: " + _))
val allAreDistributions: Matcher[Iterable[Iterable[Double]]] = foreach(sumsToOne and nonNegative).updateMessage("is not a valid distribution: " + _)
allAreDistributions ^^ ((margs: MarginalI) => margs.problem.variables.map(margs.varBelief(_).values.toIterable))
}
def beCloseToSeq(ref: Seq[Double], tol: Double = 1e-12): Matcher[Seq[Double]] = new Matcher[Seq[Double]]{
override def apply[S <: Seq[Double]](t: Expectable[S]): MatchResult[S] = result(
t.value.zip(ref).map{case (x,y) => math.abs(x - y)}.max < tol,
"has close values to " + ref,
"differs in some values by up to " + t.value.zip(ref).map{case (x,y) => math.abs(x - y)}.max,
t
)
}
}
|
ziggystar/vultura-factor
|
src/test/scala/vultura/factor/FactorMatchers.scala
|
Scala
|
mit
| 3,827
|
package java.util
trait NavigableSet[E] extends SortedSet[E] {
def lower(e: E): E
def floor(e: E): E
def ceiling(e: E): E
def higher(e: E): E
def pollFirst(): E
def pollLast(): E
def iterator(): Iterator[E]
def descendingSet(): NavigableSet[E]
def descendingIterator(): Iterator[E]
def subSet(fromElement: E, fromInclusive: Boolean, toElement: E, toInclusive: Boolean): NavigableSet[E]
def headSet(toElement: E, inclusive: Boolean): NavigableSet[E]
def tailSet(fromElement: E, inclusive: Boolean): NavigableSet[E]
def subSet(fromElement: E, toElement: E): SortedSet[E]
def headSet(toElement: E): SortedSet[E]
def tailSet(fromElement: E): SortedSet[E]
}
|
mdedetrich/scala-js
|
javalib/src/main/scala/java/util/NavigableSet.scala
|
Scala
|
bsd-3-clause
| 684
|
package sclib.z
import sclib.ops.either._
/**
* minimalistic `Either` monad transformer
*
* @example
* {{{
* scala> import sclib.z._
* scala> import sclib.ops.either._
*
* scala> val et = EitherT[Function1[Int, ?], String, Int]{i => if(i < 10) i.right else "BOOM".left}
* et: sclib.z.EitherT[[A]Int => A,String,Int] = EitherT(<function1>)
*
* scala> et.runEitherT(5)
* res0: Either[String,Int] = Right(5)
*
* scala> et.runEitherT(50)
* res1: Either[String,Int] = Left(BOOM)
* }}}
*
*/
case class EitherT[F[_], A, B](runEitherT: F[Either[A, B]]) {
def map[C](f: B => C)(implicit F: Functor[F]): EitherT[F, A, C] = EitherT {
F.map(runEitherT)(_.right.map(f))
}
def flatMap[AA >: A, D](f: B => EitherT[F, AA, D])(implicit F: Monad[F]): EitherT[F, AA, D] = EitherT {
F.flatMap(runEitherT)(_.fold(l => F.pure(l.left[D]), r => f(r).runEitherT))
}
def flatMapF[C](f: B => F[Either[A, C]])(implicit F: Monad[F]): EitherT[F, A, C] = EitherT {
F.flatMap(runEitherT)(_.fold(a => F.pure(a.left), f))
}
}
|
j-keck/sclib
|
src/main/scala/sclib/z/EitherT.scala
|
Scala
|
mit
| 1,057
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import org.json4s.JsonDSL._
import org.apache.spark.deploy.DeployMessages.{MasterStateResponse, WorkerStateResponse}
import org.apache.spark.deploy.master.{ApplicationInfo, DriverInfo, WorkerInfo}
import org.apache.spark.deploy.worker.ExecutorRunner
private[spark] object JsonProtocol {
def writeWorkerInfo(obj: WorkerInfo) = {
("id" -> obj.id) ~
("host" -> obj.host) ~
("port" -> obj.port) ~
("webuiaddress" -> obj.webUiAddress) ~
("cores" -> obj.cores) ~
("coresused" -> obj.coresUsed) ~
("coresfree" -> obj.coresFree) ~
("memory" -> obj.memory) ~
("memoryused" -> obj.memoryUsed) ~
("memoryfree" -> obj.memoryFree) ~
("state" -> obj.state.toString) ~
("lastheartbeat" -> obj.lastHeartbeat)
}
def writeApplicationInfo(obj: ApplicationInfo) = {
("starttime" -> obj.startTime) ~
("id" -> obj.id) ~
("name" -> obj.desc.name) ~
("cores" -> obj.desc.maxCores) ~
("user" -> obj.desc.user) ~
("memoryperslave" -> obj.desc.memoryPerSlave) ~
("submitdate" -> obj.submitDate.toString) ~
("state" -> obj.state.toString) ~
("duration" -> obj.duration)
}
def writeApplicationDescription(obj: ApplicationDescription) = {
("name" -> obj.name) ~
("cores" -> obj.maxCores) ~
("memoryperslave" -> obj.memoryPerSlave) ~
("user" -> obj.user) ~
("command" -> obj.command.toString)
}
def writeExecutorRunner(obj: ExecutorRunner) = {
("id" -> obj.execId) ~
("memory" -> obj.memory) ~
("appid" -> obj.appId) ~
("appdesc" -> writeApplicationDescription(obj.appDesc))
}
def writeDriverInfo(obj: DriverInfo) = {
("id" -> obj.id) ~
("starttime" -> obj.startTime.toString) ~
("state" -> obj.state.toString) ~
("cores" -> obj.desc.cores) ~
("memory" -> obj.desc.mem)
}
def writeMasterState(obj: MasterStateResponse) = {
("url" -> obj.uri) ~
("workers" -> obj.workers.toList.map(writeWorkerInfo)) ~
("cores" -> obj.workers.map(_.cores).sum) ~
("coresused" -> obj.workers.map(_.coresUsed).sum) ~
("memory" -> obj.workers.map(_.memory).sum) ~
("memoryused" -> obj.workers.map(_.memoryUsed).sum) ~
("activeapps" -> obj.activeApps.toList.map(writeApplicationInfo)) ~
("completedapps" -> obj.completedApps.toList.map(writeApplicationInfo)) ~
("activedrivers" -> obj.activeDrivers.toList.map(writeDriverInfo)) ~
("status" -> obj.status.toString)
}
def writeWorkerState(obj: WorkerStateResponse) = {
("id" -> obj.workerId) ~
("masterurl" -> obj.masterUrl) ~
("masterwebuiurl" -> obj.masterWebUiUrl) ~
("cores" -> obj.cores) ~
("coresused" -> obj.coresUsed) ~
("memory" -> obj.memory) ~
("memoryused" -> obj.memoryUsed) ~
("executors" -> obj.executors.toList.map(writeExecutorRunner)) ~
("finishedexecutors" -> obj.finishedExecutors.toList.map(writeExecutorRunner))
}
}
|
Dax1n/spark-core
|
core/src/main/scala/org/apache/spark/deploy/JsonProtocol.scala
|
Scala
|
apache-2.0
| 3,707
|
package lila.swiss
import scala.concurrent.duration._
import lila.common.LightUser
import lila.game.Game
private case class SwissBoard(
gameId: Game.ID,
white: SwissBoard.Player,
black: SwissBoard.Player
)
private object SwissBoard {
case class Player(user: LightUser, rank: Int, rating: Int)
case class WithGame(board: SwissBoard, game: Game)
}
final private class SwissBoardApi(
rankingApi: SwissRankingApi,
cacheApi: lila.memo.CacheApi,
lightUserApi: lila.user.LightUserApi,
gameProxyRepo: lila.round.GameProxyRepo
)(implicit ec: scala.concurrent.ExecutionContext) {
private val displayBoards = 6
private val boardsCache = cacheApi.scaffeine
.expireAfterWrite(60 minutes)
.build[Swiss.Id, List[SwissBoard]]()
def apply(id: Swiss.Id): Fu[List[SwissBoard.WithGame]] =
boardsCache.getIfPresent(id) ?? {
_.map { board =>
gameProxyRepo.game(board.gameId) map2 {
SwissBoard.WithGame(board, _)
}
}.sequenceFu
.dmap(_.flatten)
}
def update(data: SwissScoring.Result): Funit =
data match {
case SwissScoring.Result(swiss, leaderboard, playerMap, pairings) =>
rankingApi(swiss) map { ranks =>
boardsCache
.put(
swiss.id,
leaderboard
.collect {
case (player, _) if player.present => player
}
.flatMap { player =>
pairings get player.userId flatMap {
_ get swiss.round
}
}
.filter(_.isOngoing)
.distinct
.take(displayBoards)
.flatMap { pairing =>
for {
p1 <- playerMap get pairing.white
p2 <- playerMap get pairing.black
u1 <- lightUserApi sync p1.userId
u2 <- lightUserApi sync p2.userId
r1 <- ranks get p1.userId
r2 <- ranks get p2.userId
} yield SwissBoard(
pairing.gameId,
white = SwissBoard.Player(u1, r1, p1.rating),
black = SwissBoard.Player(u2, r2, p2.rating)
)
}
)
}
}
}
|
luanlv/lila
|
modules/swiss/src/main/SwissBoard.scala
|
Scala
|
mit
| 2,326
|
package test
import io.keen.client.scala.EventStore
import java.io.IOException
import scala.collection.concurrent.TrieMap
import scala.collection.mutable.ListBuffer
import org.specs2.mutable.{ BeforeAfter, Specification }
import org.specs2.specification.{ Step, Fragments }
trait BeforeAllAfterAll extends Specification {
override def map(fragments: => Fragments) =
Step(beforeAll()) ^ fragments ^ Step(afterAll())
protected def beforeAll(): Unit
protected def afterAll(): Unit
}
abstract class EventStoreSpecBase extends Specification with BeforeAllAfterAll {
@throws(classOf[IOException])
def buildStore(): EventStore
var store: EventStore = _
val testEvents: ListBuffer[String] = new ListBuffer[String]
def beforeAll() = {
store = buildStore() // initialize our store
// generate 5 test events and add them to the store and keep track of them so
// we can use them later in the test
for (i <- 0 to 4) {
testEvents += s"""{"param$i":"value$i"}"""
}
}
def afterAll() = {
store = null // cleanup
}
trait EventStoreSetupTeardown extends BeforeAfter {
def before: Any = {
store = buildStore() // initialize our store
}
def after: Any = {}
}
sequential
"EventStoreSpecBase" should {
"store and get event" in new EventStoreSetupTeardown {
val handle: Long = store.store("project1", "collection1", testEvents.head)
val retrieved: String = store.get(handle)
retrieved must beEqualTo(testEvents.head)
}
"remove event" in new EventStoreSetupTeardown {
val handle: Long = store.store("project1", "collection1", testEvents.head)
store.remove(handle)
val retrieved: String = store.get(handle)
retrieved must beNull
}
"remove handle twice" in new EventStoreSetupTeardown {
val handle: Long = store.store("project1", "collection1", testEvents.head)
store.remove(handle)
store.remove(handle)
true must beEqualTo(true) // we're testing for an exception here, and if none is thrown, pass
}
"get handles" in new EventStoreSetupTeardown {
// add a couple events to the store
store.store("project1", "collection1", testEvents.head)
store.store("project1", "collection2", testEvents(1))
// get the handle map
val handleMap: TrieMap[String, ListBuffer[Long]] = store.getHandles("project1")
(handleMap must not beNull)
handleMap.size must beEqualTo(2)
// get the lists of handles
var handles1: ListBuffer[Long] = handleMap.getOrElse("collection1", null)
(handles1 must not beNull)
handles1.size must beEqualTo(1)
var handles2: ListBuffer[Long] = handleMap.getOrElse("collection2", null)
(handles2 must not beNull)
handles2.size must beEqualTo(1)
// validate the actual events
store.get(handles1.head) must beEqualTo(testEvents.head)
store.get(handles2.head) must beEqualTo(testEvents(1))
}
"get handles with no events" in new EventStoreSetupTeardown {
val handleMap: TrieMap[String, ListBuffer[Long]] = store.getHandles("project1")
(handleMap must not beNull)
handleMap.size must beEqualTo(0)
}
"get handles for multiple projects" in new EventStoreSetupTeardown {
// add a couple events to the store in different projects
store.store("project1", "collection1", testEvents.head)
store.store("project1", "collection2", testEvents(1))
store.store("project2", "collection3", testEvents(2))
store.store("project2", "collection3", testEvents(3))
// get and validate the handle map for project 1
var handleMap: TrieMap[String, ListBuffer[Long]] = store.getHandles("project1")
(handleMap must not beNull)
handleMap.size must beEqualTo(2)
handleMap.getOrElse("collection1", null).size must beEqualTo(1)
handleMap.getOrElse("collection2", null).size must beEqualTo(1)
// get and validate the handle map for project 2
handleMap = store.getHandles("project2")
(handleMap must not beNull)
handleMap.size must beEqualTo(1)
handleMap.getOrElse("collection3", null).size must beEqualTo(2)
}
}
}
|
ches/KeenClient-Scala
|
src/test/scala/EventStoreSpecBase.scala
|
Scala
|
mit
| 4,197
|
package xyz.ariwaranosai.leancloud
/**
* Created by ariwaranosai on 16/9/8.
*
*/
abstract class RequestMethod(val cmd: String)
object RequestMethod {
case object GET extends RequestMethod("GET")
case object PUT extends RequestMethod("PUT")
case object DELETE extends RequestMethod("DELETE")
case object POST extends RequestMethod("POST")
}
|
ariwaranosai/Hikikumari
|
leancloud/src/main/scala/xyz/ariwaranosai/leancloud/RequestMethod.scala
|
Scala
|
mit
| 357
|
package net.categoricaldata.category
import net.tqft.toolkit.collections.NonStrictNaturalNumbers
import net.categoricaldata.sets._
/**
* A LocallyFinitelyGeneratedCategory may have infinitely many objects, but each object sits at some integer level,
* and there are only finitely many objects at each level. Otherwise, the levels are completely ignored; in particular,
* they do not provide a grading.
*
* Each pair of objects has a finite set of 'generators'. This means that every morphism between two objects
* can be written as some composition of 'generators' between some chain of objects (with no restrictions on the levels).
*/
trait LocallyFinitelyGeneratedCategory extends SmallCategory { lfgCategory =>
override type M = PathEquivalenceClass
type G
type Path = net.categoricaldata.category.Path[O, G]
protected implicit def path2RichPath(path: Path) = new RichPath(path)
protected class RichPath(path: Path) {
def subpath(i: Int, j: Int) = {
val morphisms = path.morphisms.slice(i, j)
val (source, target) = if (morphisms.isEmpty) {
if (i == 0) {
val s = path.source
(s, s)
} else {
val t = generatorTarget(path.morphisms(i - 1))
(t, t)
}
} else {
(generatorSource(morphisms.head), generatorTarget(morphisms.last))
}
Path(source, target, morphisms)
}
}
val minimumLevel: Int
def generatorSource(g: G): O
def generatorTarget(g: G): O
def objectsAtLevel(k: Int): List[O]
def objectSet: FSet = {
new FSet {
def toIterable = NonStrictNaturalNumbers.flatMap(x => Set(x, -x)).flatMap(x => objectsAtLevel(x))
def sizeIfFinite = None
}
}
def generators(source: O, target: O): List[G]
def generatorsFrom(source: O): List[G]
def generatorsTo(target: O): List[G]
implicit def generatorAsPath(g: G) = Path(generatorSource(g), generatorTarget(g), g :: Nil)
implicit def pathAsMorphism(p: Path) = PathEquivalenceClass(p)
implicit def generatorAsMorphism(g: G): M = pathAsMorphism(generatorAsPath(g))
override def compose(m1: M, m2: M) = PathEquivalenceClass(m1.representative andThen m2.representative)
override def source(m: M): O = m.representative.source
override def target(m: M): O = m.representative.target
override def identity(o: O) = PathEquivalenceClass(Path(o, o, Nil))
case class PathEquivalenceClass(representative: Path) {
// sanity check
// representative.morphisms.headOption.map(g => require(generatorSource(g) == representative.source))
// representative.morphisms.lastOption.map(g => require(generatorTarget(g) == representative.target))
// if (representative.morphisms.nonEmpty) {
// for ((a, b) <- representative.morphisms zip representative.morphisms.tail) {
// require(generatorTarget(a) == generatorSource(b))
// }
// }
override def equals(other: Any) = {
other match {
case other: LocallyFinitelyGeneratedCategory#PathEquivalenceClass => pathEquality(representative, other.representative.asInstanceOf[Path])
case _ => false
}
}
override def hashCode = pathHashCode(representative)
override def toString = representative.toString
}
def pathEquality(path1: Path, path2: Path): Boolean
/**
* The only constraint on pathHashCode is the equal paths must give the same result.
* The extent possible, unequal paths should give different results, as long as this is a cheap calculation.
* Categories with normal forms override this automatically.
*/
def pathHashCode(path: Path): Int = 0
def wordsOfLengthFrom(k: Int)(source: O): List[Path] = {
k match {
case 0 => {
List(Path(source, source, Nil))
}
case 1 => generatorsFrom(source).map(generatorAsPath _)
case _ => for (Path(_, target, morphisms) <- wordsOfLengthFrom(k - 1)(source); g <- generatorsFrom(target)) yield Path(source, generatorTarget(g), morphisms ::: List(g))
}
}
def wordsOfLength(k: Int)(source: O, target: O): List[Path] = wordsOfLengthFrom(k)(source).filter(_.target == target)
def wordsFrom(source: O) = (for (k <- NonStrictNaturalNumbers) yield wordsOfLengthFrom(k)(source)).takeWhile(_.nonEmpty).flatten
def words(source: O, target: O) = wordsFrom(source).filter(_.target == target)
def wordsUpToLength(k: Int)(source: O, target: O): List[Path] = for (n <- (0 to k).toList; w <- wordsOfLength(n)(source, target)) yield w
def wordsUpToLengthFrom(k: Int)(source: O): List[Path] = for (n <- (0 to k).toList; w <- wordsOfLengthFrom(n)(source)) yield w
def morphismsUpToLength(k: Int)(source: O, target: O): Set[M] = {
wordsUpToLength(k)(source, target).map(pathAsMorphism(_)).toSet
}
// TODO this is very inefficient, we probably should memo some results.
def morphismsOfLength(k: Int)(source: O, target: O): Set[M] = {
morphismsUpToLength(k)(source, target) -- morphismsUpToLength(k - 1)(source, target)
}
// if there are infinitely many morphism from source to anywhere, this won't terminate.
def morphisms(source: O, target: O): Iterable[M] = {
var morphisms = scala.collection.mutable.Set[M]()
def p(m: M) = {
if (morphisms.contains(m)) {
false
} else {
morphisms += m
true
}
}
(for (k <- NonStrictNaturalNumbers) yield {
wordsOfLengthFrom(k)(source).map(pathAsMorphism(_)).filter(p _)
}).takeWhile(_.nonEmpty).flatten.filter(lfgCategory.target(_) == target)
}
trait SameObjects { self: LocallyFinitelyGeneratedCategory =>
override type O = lfgCategory.O
}
trait ReversedGenerators { self: LocallyFinitelyGeneratedCategory =>
case class Reverse(g: lfgCategory.G)
override type G = Reverse
}
trait OppositeLocallyFinitelyGeneratedCategory extends LocallyFinitelyGeneratedCategory with SameObjects {
def reverseGenerator(g: lfgCategory.G): G
def unreverseGenerator(g: G): lfgCategory.G
def reverse(m: lfgCategory.M): M = m match {
case lfgCategory.PathEquivalenceClass(Path(source, target, generators)) => PathEquivalenceClass(Path(target, source, generators.reverse.map(reverseGenerator(_))))
}
def unreverse(m: M): lfgCategory.M = m match {
case PathEquivalenceClass(Path(source, target, generators)) => lfgCategory.PathEquivalenceClass(Path(target, source, generators.reverse.map(unreverseGenerator(_))))
}
// reverse all the levels!
override def objectsAtLevel(k: Int) = lfgCategory.objectsAtLevel(-k)
override def generators(source: O, target: O) = lfgCategory.generators(target, source).map(reverseGenerator(_))
override def generatorsTo(target: O) = lfgCategory.generatorsFrom(target).map(reverseGenerator(_))
override def generatorsFrom(source: O) = lfgCategory.generatorsTo(source).map(reverseGenerator(_))
override def generatorSource(g: G) = lfgCategory.generatorTarget(unreverseGenerator(g))
override def generatorTarget(g: G) = lfgCategory.generatorSource(unreverseGenerator(g))
override def pathEquality(p1: Path, p2: Path) = lfgCategory.pathEquality(unreverse(p1).representative, unreverse(p2).representative)
}
// TODO ideally, the return value would just be 'LocallyFinitelyGeneratedCategory with SameObjects with ReversedGenerators'
override val opposite: OppositeLocallyFinitelyGeneratedCategory
protected trait Wrapper extends LocallyFinitelyGeneratedCategory {
override type O = lfgCategory.O
override type G = lfgCategory.G
override val minimumLevel = lfgCategory.minimumLevel
override def objectsAtLevel(k: Int) = lfgCategory.objectsAtLevel(k)
override def generators(s: O, t: O) = lfgCategory.generators(s, t)
override def generatorSource(g: G) = lfgCategory.generatorSource(g)
override def generatorTarget(g: G) = lfgCategory.generatorTarget(g)
override def pathEquality(p1: Path, p2: Path) = lfgCategory.pathEquality(p1, p2)
}
protected abstract class FullSubcategory(val spannedBy: List[O]) extends Wrapper with FinitelyGeneratedCategory {
private val objectsAtLevelMap: Map[Int, List[O]] = {
case class Accumulator(k: Int, map: Map[Int, List[O]], remaining: List[O]) {
def next = remaining.partition(lfgCategory.objectsAtLevel(k).contains(_)) match {
case (found, notfound) => Accumulator(k + 1, map + (k -> found), notfound)
}
def finish: Map[Int, List[O]] = if (remaining.isEmpty) {
map
} else {
next.finish
}
}
Accumulator(minimumLevel, Map(), spannedBy).finish
}
override def objectsAtLevel(k: Int) = objectsAtLevelMap.get(k).getOrElse(Nil)
override val maximumLevel = (objectsAtLevelMap.keySet + minimumLevel).max
}
private class ConcreteFullCategory(spannedBy: List[O]) extends FullSubcategory(spannedBy) with FinitelyGeneratedCategory.StandardFunctorsToSet
protected class FullSubcategoryInclusion(spannedBy: List[O]) extends functor.withFinitelyGeneratedSource.withLocallyFinitelyGeneratedTarget {
override val source: FullSubcategory = new ConcreteFullCategory(spannedBy)
override val target: lfgCategory.type = lfgCategory
override def onObjects(o: source.O) = o
override def onGenerators(g: source.G) = g
}
def fullSubcategoryInclusion(spannedBy: List[O]): functor.withFinitelyGeneratedSource.withLocallyFinitelyGeneratedTarget = new FullSubcategoryInclusion(spannedBy)
def fullSubcategory(spannedBy: List[O]): FinitelyGeneratedCategory = fullSubcategoryInclusion(spannedBy).source
private trait Truncation extends Wrapper with FinitelyGeneratedCategory {
override def objectsAtLevel(k: Int) = {
if (k <= maximumLevel) {
lfgCategory.objectsAtLevel(k)
} else {
Nil
}
}
}
private class ConcreteTruncation(override val maximumLevel: Int) extends Truncation with FinitelyGeneratedCategory.StandardFunctorsToSet
private class TruncationFunctor(maximumLevel: Int) extends functor.withFinitelyGeneratedSource.withLocallyFinitelyGeneratedTarget {
override val source: Truncation = new ConcreteTruncation(maximumLevel)
override val target: lfgCategory.type = lfgCategory
override def onObjects(o: source.O) = o
override def onGenerators(g: source.G) = g
}
def truncationFunctorAtLevel(maximumLevel: Int): functor.withFinitelyGeneratedSource.withLocallyFinitelyGeneratedTarget = new TruncationFunctor(maximumLevel)
def truncateAtLevel(maximumLevel: Int): FinitelyGeneratedCategory = truncationFunctorAtLevel(maximumLevel).source
trait FunctorToSet extends super.FunctorToSet with Functor.withLocallyFinitelyGeneratedSource { functorToSet =>
// Commenting out the following line, things still compile, but we get AbstractMethodError everywhere:
override val source: lfgCategory.type = lfgCategory
override def equals(other: Any): Boolean = {
if (this eq other.asInstanceOf[AnyRef]) {
true
} else {
other match {
case other: LocallyFinitelyGeneratedCategory#FunctorToSet => {
if (functorToSet.source != other.source) return false
throw new UnsupportedOperationException
}
case _ => false
}
}
}
trait CoCone {
val terminalSet: FSet
abstract class coConeFunction(o: O) extends FFunction {
override val source = functorToSet.onObjects(o)
override val target = terminalSet
}
def functionToTerminalSet(o: O): coConeFunction
}
trait CoConeMap extends { coConeMap =>
val source: CoCone
val target: CoCone
trait TerminalFunction extends FFunction {
override val source = coConeMap.source.terminalSet
override val target = coConeMap.target.terminalSet
}
val terminalFunction: TerminalFunction
}
trait Cone extends {
val initialSet: FSet
abstract class coneFunction(o: O) extends FFunction {
override val source = initialSet
override val target = functorToSet.onObjects(o)
}
def functionFromInitialSet(o: O): coneFunction
}
trait ConeMap extends { coneMap =>
val source: Cone
val target: Cone
trait InitialFunction extends FFunction {
override val source = coneMap.source.initialSet
override val target = coneMap.target.initialSet
}
val initialFunction: InitialFunction
}
trait Cones extends Category {
override type O = Cone
override type M = ConeMap
override def identity(c: Cone) = ???
override def source(c: ConeMap) = c.source
override def target(c: ConeMap) = c.target
override def compose(c1: ConeMap, c2: ConeMap) = new ConeMap {
override val source = c1.source
override val target = c2.target
override val initialFunction = new InitialFunction {
override def toFunction = c1.initialFunction.toFunction andThen c2.initialFunction.toFunction
}
}
}
trait CoCones extends Category {
override type O = CoCone
override type M = CoConeMap
override def identity(c: CoCone) = ???
override def source(c: CoConeMap) = c.source
override def target(c: CoConeMap) = c.target
override def compose(c1: CoConeMap, c2: CoConeMap) = new CoConeMap {
override val source = c1.source
override val target = c2.target
override val terminalFunction = new TerminalFunction {
override def toFunction = c1.terminalFunction.toFunction andThen c2.terminalFunction.toFunction
}
}
}
// FIXME these seems to drastically increase compile time; investigate
// def limitApproximation(n: Int) = truncationFunctorAtLevel(n).pullback(functorToSet).limit
// def colimitApproximation(n: Int) = truncationFunctorAtLevel(n).pullback(functorToSet).colimit
}
trait FunctorsToSet extends super.FunctorsToSet {
override type O <: lfgCategory.FunctorToSet
override type M <: lfgCategory.NaturalTransformationToSet
}
override type D <: FunctorsToSet
class Yoneda extends Functor.withLocallyFinitelyGeneratedSource {
class YonedaFunctor(s: lfgCategory.O) extends FunctorToSet {
override def onObjects(t: lfgCategory.O): FSet = morphisms(s, t)
override def onGenerators(g: lfgCategory.G) = FFunction(onObjects(generatorSource(g)), onObjects(generatorTarget(g)), { m: lfgCategory.M => compose(m, generatorAsMorphism(g)) })
}
class YonedaNaturalTransformation(g: lfgCategory.opposite.G) extends NaturalTransformationToSet {
override val source = functorsToSet.internalize(new YonedaFunctor(opposite.generatorSource(g)))
override val target = functorsToSet.internalize(new YonedaFunctor(opposite.generatorTarget(g)))
override def apply(t: lfgCategory.O) = FFunction(source.onObjects(t), target.onObjects(t), { m: lfgCategory.M => compose(generatorAsMorphism(opposite.unreverseGenerator(g)), m) })
}
override val source: lfgCategory.opposite.type = lfgCategory.opposite
override val target = functorsToSet
override def onObjects(o: source.O) = target.internalize(new YonedaFunctor(o))
override def onGenerators(g: source.G) = target.internalize(new YonedaNaturalTransformation(g))
}
// FIXME changing this to 'object yoneda extends' results in IllegalAccessErrors at runtime.
lazy val yoneda = new Yoneda {}
}
object LocallyFinitelyGeneratedCategory {
trait CachingGenerators extends LocallyFinitelyGeneratedCategory { lfgCategory =>
import net.tqft.toolkit.functions.Memo
private val generatorsCache = Memo({ (s: lfgCategory.O, t: lfgCategory.O) => super.generators(s, t) })
abstract override def generators(s: lfgCategory.O, t: lfgCategory.O) = generatorsCache(s, t)
}
}
|
JasonGross/categoricaldata
|
src/main/scala/net/categoricaldata/category/LocallyFinitelyGeneratedCategory.scala
|
Scala
|
mit
| 15,794
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.{File, PrintWriter}
import java.net.URI
import java.util.TimeZone
import java.util.concurrent.TimeUnit
import scala.collection.mutable
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.CatalogColumnStat
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.util.{DateTimeTestUtils, DateTimeUtils}
import org.apache.spark.sql.catalyst.util.DateTimeTestUtils.{withDefaultTimeZone, PST, UTC}
import org.apache.spark.sql.catalyst.util.DateTimeUtils.{getZoneId, TimeZoneUTC}
import org.apache.spark.sql.functions.timestamp_seconds
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.test.SQLTestData.ArrayData
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
/**
* End-to-end suite testing statistics collection and use on both entire table and columns.
*/
class StatisticsCollectionSuite extends StatisticsCollectionTestBase with SharedSparkSession {
import testImplicits._
test("estimates the size of a limit 0 on outer join") {
withTempView("test") {
Seq(("one", 1), ("two", 2), ("three", 3), ("four", 4)).toDF("k", "v")
.createOrReplaceTempView("test")
val df1 = spark.table("test")
val df2 = spark.table("test").limit(0)
val df = df1.join(df2, Seq("k"), "left")
val sizes = df.queryExecution.analyzed.collect { case g: Join =>
g.stats.sizeInBytes
}
assert(sizes.size === 1, s"number of Join nodes is wrong:\\n ${df.queryExecution}")
assert(sizes.head === BigInt(128),
s"expected exact size 96 for table 'test', got: ${sizes.head}")
}
}
test("analyzing views is not supported") {
def assertAnalyzeUnsupported(analyzeCommand: String): Unit = {
val err = intercept[AnalysisException] {
sql(analyzeCommand)
}
assert(err.message.contains("ANALYZE TABLE is not supported"))
}
val tableName = "tbl"
withTable(tableName) {
spark.range(10).write.saveAsTable(tableName)
val viewName = "view"
withView(viewName) {
sql(s"CREATE VIEW $viewName AS SELECT * FROM $tableName")
assertAnalyzeUnsupported(s"ANALYZE TABLE $viewName COMPUTE STATISTICS")
assertAnalyzeUnsupported(s"ANALYZE TABLE $viewName COMPUTE STATISTICS FOR COLUMNS id")
}
}
}
test("statistics collection of a table with zero column") {
val table_no_cols = "table_no_cols"
withTable(table_no_cols) {
val rddNoCols = sparkContext.parallelize(1 to 10).map(_ => Row.empty)
val dfNoCols = spark.createDataFrame(rddNoCols, StructType(Seq.empty))
dfNoCols.write.format("json").saveAsTable(table_no_cols)
sql(s"ANALYZE TABLE $table_no_cols COMPUTE STATISTICS")
checkTableStats(table_no_cols, hasSizeInBytes = true, expectedRowCounts = Some(10))
}
}
test("analyze empty table") {
val table = "emptyTable"
withTable(table) {
val df = Seq.empty[Int].toDF("key")
df.write.format("json").saveAsTable(table)
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS noscan")
val fetchedStats1 = checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = None)
assert(fetchedStats1.get.sizeInBytes == 0)
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS")
val fetchedStats2 = checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = Some(0))
assert(fetchedStats2.get.sizeInBytes == 0)
val expectedColStat =
"key" -> CatalogColumnStat(Some(0), None, None, Some(0),
Some(IntegerType.defaultSize), Some(IntegerType.defaultSize))
// There won't be histogram for empty column.
Seq("true", "false").foreach { histogramEnabled =>
withSQLConf(SQLConf.HISTOGRAM_ENABLED.key -> histogramEnabled) {
checkColStats(df, mutable.LinkedHashMap(expectedColStat))
}
}
}
}
test("analyze column command - unsupported types and invalid columns") {
val tableName = "column_stats_test1"
withTable(tableName) {
Seq(ArrayData(Seq(1, 2, 3), Seq(Seq(1, 2, 3)))).toDF().write.saveAsTable(tableName)
// Test unsupported data types
val err1 = intercept[AnalysisException] {
sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS FOR COLUMNS data")
}
assert(err1.message.contains("does not support statistics collection"))
// Test invalid columns
val err2 = intercept[AnalysisException] {
sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS FOR COLUMNS some_random_column")
}
assert(err2.message.contains("does not exist"))
}
}
test("test table-level statistics for data source table") {
val tableName = "tbl"
withTable(tableName) {
sql(s"CREATE TABLE $tableName(i INT, j STRING) USING parquet")
Seq(1 -> "a", 2 -> "b").toDF("i", "j").write.mode("overwrite").insertInto(tableName)
// noscan won't count the number of rows
sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS noscan")
checkTableStats(tableName, hasSizeInBytes = true, expectedRowCounts = None)
// without noscan, we count the number of rows
sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS")
checkTableStats(tableName, hasSizeInBytes = true, expectedRowCounts = Some(2))
}
}
test("SPARK-15392: DataFrame created from RDD should not be broadcasted") {
val rdd = sparkContext.range(1, 100).map(i => Row(i, i))
val df = spark.createDataFrame(rdd, new StructType().add("a", LongType).add("b", LongType))
assert(df.queryExecution.analyzed.stats.sizeInBytes >
spark.sessionState.conf.autoBroadcastJoinThreshold)
assert(df.selectExpr("a").queryExecution.analyzed.stats.sizeInBytes >
spark.sessionState.conf.autoBroadcastJoinThreshold)
}
test("column stats round trip serialization") {
// Make sure we serialize and then deserialize and we will get the result data
val df = data.toDF(stats.keys.toSeq :+ "carray" : _*)
Seq(stats, statsWithHgms).foreach { s =>
s.zip(df.schema).foreach { case ((k, v), field) =>
withClue(s"column $k with type ${field.dataType}") {
val roundtrip = CatalogColumnStat.fromMap("table_is_foo", field.name, v.toMap(k))
assert(roundtrip == Some(v))
}
}
}
}
test("SPARK-33812: column stats round trip serialization with splitting histogram property") {
withSQLConf(SQLConf.HIVE_TABLE_PROPERTY_LENGTH_THRESHOLD.key -> "10") {
statsWithHgms.foreach { case (k, v) =>
val roundtrip = CatalogColumnStat.fromMap("t", k, v.toMap(k))
assert(roundtrip == Some(v))
}
}
}
test("analyze column command - result verification") {
// (data.head.productArity - 1) because the last column does not support stats collection.
assert(stats.size == data.head.productArity - 1)
val df = data.toDF(stats.keys.toSeq :+ "carray" : _*)
checkColStats(df, stats)
// test column stats with histograms
withSQLConf(SQLConf.HISTOGRAM_ENABLED.key -> "true", SQLConf.HISTOGRAM_NUM_BINS.key -> "2") {
checkColStats(df, statsWithHgms)
}
}
test("column stats collection for null columns") {
val dataTypes: Seq[(DataType, Int)] = Seq(
BooleanType, ByteType, ShortType, IntegerType, LongType,
DoubleType, FloatType, DecimalType.SYSTEM_DEFAULT,
StringType, BinaryType, DateType, TimestampType
).zipWithIndex
val df = sql("select " + dataTypes.map { case (tpe, idx) =>
s"cast(null as ${tpe.sql}) as col$idx"
}.mkString(", "))
val expectedColStats = dataTypes.map { case (tpe, idx) =>
(s"col$idx", CatalogColumnStat(Some(0), None, None, Some(1),
Some(tpe.defaultSize.toLong), Some(tpe.defaultSize.toLong)))
}
// There won't be histograms for null columns.
Seq("true", "false").foreach { histogramEnabled =>
withSQLConf(SQLConf.HISTOGRAM_ENABLED.key -> histogramEnabled) {
checkColStats(df, mutable.LinkedHashMap(expectedColStats: _*))
}
}
}
test("SPARK-25028: column stats collection for null partitioning columns") {
val table = "analyze_partition_with_null"
withTempDir { dir =>
withTable(table) {
sql(s"""
|CREATE TABLE $table (value string, name string)
|USING PARQUET
|PARTITIONED BY (name)
|LOCATION '${dir.toURI}'""".stripMargin)
val df = Seq(("a", null), ("b", null)).toDF("value", "name")
df.write.mode("overwrite").insertInto(table)
sql(s"ANALYZE TABLE $table PARTITION (name) COMPUTE STATISTICS")
val partitions = spark.sessionState.catalog.listPartitions(TableIdentifier(table))
assert(partitions.head.stats.get.rowCount.get == 2)
}
}
}
test("number format in statistics") {
val numbers = Seq(
BigInt(0) -> (("0.0 B", "0")),
BigInt(100) -> (("100.0 B", "100")),
BigInt(2047) -> (("2047.0 B", "2.05E+3")),
BigInt(2048) -> (("2.0 KiB", "2.05E+3")),
BigInt(3333333) -> (("3.2 MiB", "3.33E+6")),
BigInt(4444444444L) -> (("4.1 GiB", "4.44E+9")),
BigInt(5555555555555L) -> (("5.1 TiB", "5.56E+12")),
BigInt(6666666666666666L) -> (("5.9 PiB", "6.67E+15")),
BigInt(1L << 10 ) * (1L << 60) -> (("1024.0 EiB", "1.18E+21")),
BigInt(1L << 11) * (1L << 60) -> (("2.36E+21 B", "2.36E+21"))
)
numbers.foreach { case (input, (expectedSize, expectedRows)) =>
val stats = Statistics(sizeInBytes = input, rowCount = Some(input))
val expectedString = s"sizeInBytes=$expectedSize, rowCount=$expectedRows"
assert(stats.simpleString == expectedString)
}
}
test("change stats after set location command") {
val table = "change_stats_set_location_table"
val tableLoc = new File(spark.sessionState.catalog.defaultTablePath(TableIdentifier(table)))
Seq(false, true).foreach { autoUpdate =>
withSQLConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> autoUpdate.toString) {
withTable(table) {
spark.range(100).select($"id", $"id" % 5 as "value").write.saveAsTable(table)
// analyze to get initial stats
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS FOR COLUMNS id, value")
val fetched1 = checkTableStats(
table, hasSizeInBytes = true, expectedRowCounts = Some(100))
assert(fetched1.get.sizeInBytes > 0)
assert(fetched1.get.colStats.size == 2)
// set location command
val initLocation = spark.sessionState.catalog.getTableMetadata(TableIdentifier(table))
.storage.locationUri.get.toString
withTempDir { newLocation =>
sql(s"ALTER TABLE $table SET LOCATION '${newLocation.toURI.toString}'")
if (autoUpdate) {
val fetched2 = checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = None)
assert(fetched2.get.sizeInBytes == 0)
assert(fetched2.get.colStats.isEmpty)
// set back to the initial location
sql(s"ALTER TABLE $table SET LOCATION '$initLocation'")
val fetched3 = checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = None)
assert(fetched3.get.sizeInBytes == fetched1.get.sizeInBytes)
} else {
checkTableStats(table, hasSizeInBytes = false, expectedRowCounts = None)
// SPARK-19724: clean up the previous table location.
waitForTasksToFinish()
Utils.deleteRecursively(tableLoc)
}
}
}
}
}
}
test("change stats after insert command for datasource table") {
val table = "change_stats_insert_datasource_table"
Seq(false, true).foreach { autoUpdate =>
withSQLConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> autoUpdate.toString) {
withTable(table) {
sql(s"CREATE TABLE $table (i int, j string) USING PARQUET")
// analyze to get initial stats
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS FOR COLUMNS i, j")
val fetched1 = checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = Some(0))
assert(fetched1.get.sizeInBytes == 0)
assert(fetched1.get.colStats.size == 2)
// table lookup will make the table cached
spark.table(table)
assert(isTableInCatalogCache(table))
// insert into command
sql(s"INSERT INTO TABLE $table SELECT 1, 'abc'")
if (autoUpdate) {
val fetched2 = checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = None)
assert(fetched2.get.sizeInBytes > 0)
assert(fetched2.get.colStats.isEmpty)
} else {
checkTableStats(table, hasSizeInBytes = false, expectedRowCounts = None)
}
// check that tableRelationCache inside the catalog was invalidated after insert
assert(!isTableInCatalogCache(table))
}
}
}
}
test("auto gather stats after insert command") {
val table = "change_stats_insert_datasource_table"
Seq(false, true).foreach { autoUpdate =>
withSQLConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> autoUpdate.toString) {
withTable(table) {
sql(s"CREATE TABLE $table (i int, j string) USING PARQUET")
// insert into command
sql(s"INSERT INTO TABLE $table SELECT 1, 'abc'")
val stats = getCatalogTable(table).stats
if (autoUpdate) {
assert(stats.isDefined)
assert(stats.get.sizeInBytes >= 0)
} else {
assert(stats.isEmpty)
}
}
}
}
}
test("invalidation of tableRelationCache after inserts") {
val table = "invalidate_catalog_cache_table"
Seq(false, true).foreach { autoUpdate =>
withSQLConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> autoUpdate.toString) {
withTable(table) {
spark.range(100).write.saveAsTable(table)
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS")
spark.table(table)
val initialSizeInBytes = getTableFromCatalogCache(table).stats.sizeInBytes
spark.range(100).write.mode(SaveMode.Append).saveAsTable(table)
spark.table(table)
assert(getTableFromCatalogCache(table).stats.sizeInBytes == 2 * initialSizeInBytes)
}
}
}
}
test("invalidation of tableRelationCache after alter table add partition") {
val table = "invalidate_catalog_cache_table"
Seq(false, true).foreach { autoUpdate =>
withSQLConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> autoUpdate.toString) {
withTempDir { dir =>
withTable(table) {
val path = dir.getCanonicalPath
sql(s"""
|CREATE TABLE $table (col1 int, col2 int)
|USING PARQUET
|PARTITIONED BY (col2)
|LOCATION '${dir.toURI}'""".stripMargin)
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS")
spark.table(table)
assert(getTableFromCatalogCache(table).stats.sizeInBytes == 0)
spark.catalog.recoverPartitions(table)
val df = Seq((1, 2), (1, 2)).toDF("col2", "col1")
df.write.parquet(s"$path/col2=1")
sql(s"ALTER TABLE $table ADD PARTITION (col2=1) LOCATION '${dir.toURI}'")
spark.table(table)
val cachedTable = getTableFromCatalogCache(table)
val cachedTableSizeInBytes = cachedTable.stats.sizeInBytes
val defaultSizeInBytes = conf.defaultSizeInBytes
if (autoUpdate) {
assert(cachedTableSizeInBytes != defaultSizeInBytes && cachedTableSizeInBytes > 0)
} else {
assert(cachedTableSizeInBytes == defaultSizeInBytes)
}
}
}
}
}
}
test("Simple queries must be working, if CBO is turned on") {
withSQLConf(SQLConf.CBO_ENABLED.key -> "true") {
withTable("TBL1", "TBL") {
import org.apache.spark.sql.functions._
val df = spark.range(1000L).select('id,
'id * 2 as "FLD1",
'id * 12 as "FLD2",
lit(null).cast(DoubleType) + 'id as "fld3")
df.write
.mode(SaveMode.Overwrite)
.bucketBy(10, "id", "FLD1", "FLD2")
.sortBy("id", "FLD1", "FLD2")
.saveAsTable("TBL")
sql("ANALYZE TABLE TBL COMPUTE STATISTICS ")
sql("ANALYZE TABLE TBL COMPUTE STATISTICS FOR COLUMNS ID, FLD1, FLD2, FLD3")
val df2 = spark.sql(
"""
|SELECT t1.id, t1.fld1, t1.fld2, t1.fld3
|FROM tbl t1
|JOIN tbl t2 on t1.id=t2.id
|WHERE t1.fld3 IN (-123.23,321.23)
""".stripMargin)
df2.createTempView("TBL2")
sql("SELECT * FROM tbl2 WHERE fld3 IN (0,1) ").queryExecution.executedPlan
}
}
}
test("store and retrieve column stats in different time zones") {
val (start, end) = (0, TimeUnit.DAYS.toSeconds(2))
def checkTimestampStats(
t: DataType,
srcTimeZone: TimeZone,
dstTimeZone: TimeZone)(checker: ColumnStat => Unit): Unit = {
val table = "time_table"
val column = "T"
val original = TimeZone.getDefault
try {
withTable(table) {
TimeZone.setDefault(srcTimeZone)
spark.range(start, end)
.select(timestamp_seconds($"id").cast(t).as(column))
.write.saveAsTable(table)
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS FOR COLUMNS $column")
TimeZone.setDefault(dstTimeZone)
val stats = getCatalogTable(table)
.stats.get.colStats(column).toPlanStat(column, t)
checker(stats)
}
} finally {
TimeZone.setDefault(original)
}
}
DateTimeTestUtils.outstandingZoneIds.foreach { zid =>
val timeZone = TimeZone.getTimeZone(zid)
checkTimestampStats(DateType, TimeZoneUTC, timeZone) { stats =>
assert(stats.min.get.asInstanceOf[Int] == TimeUnit.SECONDS.toDays(start))
assert(stats.max.get.asInstanceOf[Int] == TimeUnit.SECONDS.toDays(end - 1))
}
checkTimestampStats(TimestampType, TimeZoneUTC, timeZone) { stats =>
assert(stats.min.get.asInstanceOf[Long] == TimeUnit.SECONDS.toMicros(start))
assert(stats.max.get.asInstanceOf[Long] == TimeUnit.SECONDS.toMicros(end - 1))
}
}
}
private def checkDescTimestampColStats(
tableName: String,
timestampColumn: String,
expectedMinTimestamp: String,
expectedMaxTimestamp: String): Unit = {
def extractColumnStatsFromDesc(statsName: String, rows: Array[Row]): String = {
rows.collect {
case r: Row if r.getString(0) == statsName =>
r.getString(1)
}.head
}
val descTsCol = sql(s"DESC FORMATTED $tableName $timestampColumn").collect()
assert(extractColumnStatsFromDesc("min", descTsCol) == expectedMinTimestamp)
assert(extractColumnStatsFromDesc("max", descTsCol) == expectedMaxTimestamp)
}
test("SPARK-38140: describe column stats (min, max) for timestamp column: desc results should " +
"be consistent with the written value if writing and desc happen in the same time zone") {
val zoneIdAndOffsets =
Seq((UTC, "+0000"), (PST, "-0800"), (getZoneId("Asia/Hong_Kong"), "+0800"))
zoneIdAndOffsets.foreach { case (zoneId, offset) =>
withDefaultTimeZone(zoneId) {
val table = "insert_desc_same_time_zone"
val tsCol = "timestamp_typed_col"
withTable(table) {
val minTimestamp = "make_timestamp(2022, 1, 1, 0, 0, 1.123456)"
val maxTimestamp = "make_timestamp(2022, 1, 3, 0, 0, 2.987654)"
sql(s"CREATE TABLE $table ($tsCol Timestamp) USING parquet")
sql(s"INSERT INTO $table VALUES $minTimestamp, $maxTimestamp")
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS FOR ALL COLUMNS")
checkDescTimestampColStats(
tableName = table,
timestampColumn = tsCol,
expectedMinTimestamp = "2022-01-01 00:00:01.123456 " + offset,
expectedMaxTimestamp = "2022-01-03 00:00:02.987654 " + offset)
}
}
}
}
test("SPARK-38140: describe column stats (min, max) for timestamp column: desc should show " +
"different results if writing in UTC and desc in other time zones") {
val table = "insert_desc_diff_time_zones"
val tsCol = "timestamp_typed_col"
withDefaultTimeZone(UTC) {
withTable(table) {
val minTimestamp = "make_timestamp(2022, 1, 1, 0, 0, 1.123456)"
val maxTimestamp = "make_timestamp(2022, 1, 3, 0, 0, 2.987654)"
sql(s"CREATE TABLE $table ($tsCol Timestamp) USING parquet")
sql(s"INSERT INTO $table VALUES $minTimestamp, $maxTimestamp")
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS FOR ALL COLUMNS")
checkDescTimestampColStats(
tableName = table,
timestampColumn = tsCol,
expectedMinTimestamp = "2022-01-01 00:00:01.123456 +0000",
expectedMaxTimestamp = "2022-01-03 00:00:02.987654 +0000")
TimeZone.setDefault(DateTimeUtils.getTimeZone("PST"))
checkDescTimestampColStats(
tableName = table,
timestampColumn = tsCol,
expectedMinTimestamp = "2021-12-31 16:00:01.123456 -0800",
expectedMaxTimestamp = "2022-01-02 16:00:02.987654 -0800")
TimeZone.setDefault(DateTimeUtils.getTimeZone("Asia/Hong_Kong"))
checkDescTimestampColStats(
tableName = table,
timestampColumn = tsCol,
expectedMinTimestamp = "2022-01-01 08:00:01.123456 +0800",
expectedMaxTimestamp = "2022-01-03 08:00:02.987654 +0800")
}
}
}
private def getStatAttrNames(tableName: String): Set[String] = {
val queryStats = spark.table(tableName).queryExecution.optimizedPlan.stats.attributeStats
queryStats.map(_._1.name).toSet
}
test("analyzes column statistics in cached query") {
withTempView("cachedQuery") {
sql(
"""CACHE TABLE cachedQuery AS
| SELECT c0, avg(c1) AS v1, avg(c2) AS v2
| FROM (SELECT id % 3 AS c0, id % 5 AS c1, 2 AS c2 FROM range(1, 30))
| GROUP BY c0
""".stripMargin)
// Analyzes one column in the cached logical plan
sql("ANALYZE TABLE cachedQuery COMPUTE STATISTICS FOR COLUMNS v1")
assert(getStatAttrNames("cachedQuery") === Set("v1"))
// Analyzes two more columns
sql("ANALYZE TABLE cachedQuery COMPUTE STATISTICS FOR COLUMNS c0, v2")
assert(getStatAttrNames("cachedQuery") === Set("c0", "v1", "v2"))
}
}
test("analyzes column statistics in cached local temporary view") {
withTempView("tempView") {
// Analyzes in a temporary view
sql("CREATE TEMPORARY VIEW tempView AS SELECT 1 id")
val errMsg = intercept[AnalysisException] {
sql("ANALYZE TABLE tempView COMPUTE STATISTICS FOR COLUMNS id")
}.getMessage
assert(errMsg.contains("Temporary view `tempView` is not cached for analyzing columns"))
// Cache the view then analyze it
sql("CACHE TABLE tempView")
assert(getStatAttrNames("tempView") !== Set("id"))
sql("ANALYZE TABLE tempView COMPUTE STATISTICS FOR COLUMNS id")
assert(getStatAttrNames("tempView") === Set("id"))
}
}
test("analyzes column statistics in cached global temporary view") {
withGlobalTempView("gTempView") {
val globalTempDB = spark.sharedState.globalTempViewManager.database
val errMsg1 = intercept[AnalysisException] {
sql(s"ANALYZE TABLE $globalTempDB.gTempView COMPUTE STATISTICS FOR COLUMNS id")
}.getMessage
assert(errMsg1.contains("Table or view not found: " +
s"$globalTempDB.gTempView"))
// Analyzes in a global temporary view
sql("CREATE GLOBAL TEMP VIEW gTempView AS SELECT 1 id")
val errMsg2 = intercept[AnalysisException] {
sql(s"ANALYZE TABLE $globalTempDB.gTempView COMPUTE STATISTICS FOR COLUMNS id")
}.getMessage
assert(errMsg2.contains(
s"Temporary view `$globalTempDB`.`gTempView` is not cached for analyzing columns"))
// Cache the view then analyze it
sql(s"CACHE TABLE $globalTempDB.gTempView")
assert(getStatAttrNames(s"$globalTempDB.gTempView") !== Set("id"))
sql(s"ANALYZE TABLE $globalTempDB.gTempView COMPUTE STATISTICS FOR COLUMNS id")
assert(getStatAttrNames(s"$globalTempDB.gTempView") === Set("id"))
}
}
test("analyzes column statistics in cached catalog view") {
withTempDatabase { database =>
sql(s"CREATE VIEW $database.v AS SELECT 1 c")
sql(s"CACHE TABLE $database.v")
assert(getStatAttrNames(s"$database.v") !== Set("c"))
sql(s"ANALYZE TABLE $database.v COMPUTE STATISTICS FOR COLUMNS c")
assert(getStatAttrNames(s"$database.v") === Set("c"))
}
}
test("analyzes table statistics in cached catalog view") {
def getTableStats(tableName: String): Statistics = {
spark.table(tableName).queryExecution.optimizedPlan.stats
}
withTempDatabase { database =>
sql(s"CREATE VIEW $database.v AS SELECT 1 c")
// Cache data eagerly by default, so this operation collects table stats
sql(s"CACHE TABLE $database.v")
val stats1 = getTableStats(s"$database.v")
assert(stats1.sizeInBytes > 0)
assert(stats1.rowCount === Some(1))
sql(s"UNCACHE TABLE $database.v")
// Cache data lazily, then analyze table stats
sql(s"CACHE LAZY TABLE $database.v")
val stats2 = getTableStats(s"$database.v")
assert(stats2.sizeInBytes === OneRowRelation().computeStats().sizeInBytes)
assert(stats2.rowCount === None)
sql(s"ANALYZE TABLE $database.v COMPUTE STATISTICS NOSCAN")
val stats3 = getTableStats(s"$database.v")
assert(stats3.sizeInBytes === OneRowRelation().computeStats().sizeInBytes)
assert(stats3.rowCount === None)
sql(s"ANALYZE TABLE $database.v COMPUTE STATISTICS")
val stats4 = getTableStats(s"$database.v")
assert(stats4.sizeInBytes === stats1.sizeInBytes)
assert(stats4.rowCount === Some(1))
}
}
test(s"CTAS should update statistics if ${SQLConf.AUTO_SIZE_UPDATE_ENABLED.key} is enabled") {
val tableName = "spark_27694"
Seq(false, true).foreach { updateEnabled =>
withSQLConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> updateEnabled.toString) {
withTable(tableName) {
// Create a data source table using the result of a query.
sql(s"CREATE TABLE $tableName USING parquet AS SELECT 'a', 'b'")
val catalogTable = getCatalogTable(tableName)
if (updateEnabled) {
assert(catalogTable.stats.nonEmpty)
} else {
assert(catalogTable.stats.isEmpty)
}
}
}
}
}
test("Metadata files and temporary files should not be counted as data files") {
withTempDir { tempDir =>
val tableName = "t1"
val stagingDirName = ".test-staging-dir"
val tableLocation = s"${tempDir.toURI}/$tableName"
withSQLConf(
SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> "true",
"hive.exec.stagingdir" -> stagingDirName) {
withTable("t1") {
sql(s"CREATE TABLE $tableName(c1 BIGINT) USING PARQUET LOCATION '$tableLocation'")
sql(s"INSERT INTO TABLE $tableName VALUES(1)")
val staging = new File(new URI(s"$tableLocation/$stagingDirName"))
Utils.tryWithResource(new PrintWriter(staging)) { stagingWriter =>
stagingWriter.write("12")
}
val metadata = new File(new URI(s"$tableLocation/_metadata"))
Utils.tryWithResource(new PrintWriter(metadata)) { metadataWriter =>
metadataWriter.write("1234")
}
sql(s"INSERT INTO TABLE $tableName VALUES(1)")
val stagingFileSize = staging.length()
val metadataFileSize = metadata.length()
val tableLocationSize = getDataSize(new File(new URI(tableLocation)))
val stats = checkTableStats(tableName, hasSizeInBytes = true, expectedRowCounts = None)
assert(stats.get.sizeInBytes === tableLocationSize - stagingFileSize - metadataFileSize)
}
}
}
}
Seq(true, false).foreach { caseSensitive =>
test(s"SPARK-30903: Fail fast on duplicate columns when analyze columns " +
s"- caseSensitive=$caseSensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
val table = "test_table"
withTable(table) {
sql(s"CREATE TABLE $table (value string, name string) USING PARQUET")
val dupCol = if (caseSensitive) "value" else "VaLuE"
val errorMsg = intercept[AnalysisException] {
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS FOR COLUMNS value, name, $dupCol")
}.getMessage
assert(errorMsg.contains("Found duplicate column(s)"))
}
}
}
}
test("SPARK-34119: Keep necessary stats after PruneFileSourcePartitions") {
withTable("SPARK_34119") {
withSQLConf(SQLConf.CBO_ENABLED.key -> "true") {
sql(s"CREATE TABLE SPARK_34119 using parquet PARTITIONED BY (p) AS " +
"(SELECT id, CAST(id % 5 AS STRING) AS p FROM range(10))")
sql(s"ANALYZE TABLE SPARK_34119 COMPUTE STATISTICS FOR ALL COLUMNS")
checkOptimizedPlanStats(sql(s"SELECT id FROM SPARK_34119"),
160L,
Some(10),
Seq(ColumnStat(
distinctCount = Some(10),
min = Some(0),
max = Some(9),
nullCount = Some(0),
avgLen = Some(LongType.defaultSize),
maxLen = Some(LongType.defaultSize))))
checkOptimizedPlanStats(sql("SELECT id FROM SPARK_34119 WHERE p = '2'"),
32L,
Some(2),
Seq(ColumnStat(
distinctCount = Some(2),
min = Some(0),
max = Some(9),
nullCount = Some(0),
avgLen = Some(LongType.defaultSize),
maxLen = Some(LongType.defaultSize))))
}
}
}
test("SPARK-33687: analyze all tables in a specific database") {
withTempDatabase { database =>
spark.catalog.setCurrentDatabase(database)
withTempDir { dir =>
withTable("t1", "t2") {
spark.range(10).write.saveAsTable("t1")
sql(s"CREATE EXTERNAL TABLE t2 USING parquet LOCATION '${dir.toURI}' " +
"AS SELECT * FROM range(20)")
withView("v1", "v2") {
sql("CREATE VIEW v1 AS SELECT 1 c1")
sql("CREATE VIEW v2 AS SELECT 2 c2")
sql("CACHE TABLE v1")
sql("CACHE LAZY TABLE v2")
sql(s"ANALYZE TABLES IN $database COMPUTE STATISTICS NOSCAN")
checkTableStats("t1", hasSizeInBytes = true, expectedRowCounts = None)
checkTableStats("t2", hasSizeInBytes = true, expectedRowCounts = None)
assert(getCatalogTable("v1").stats.isEmpty)
checkOptimizedPlanStats(spark.table("v1"), 4, Some(1), Seq.empty)
checkOptimizedPlanStats(spark.table("v2"), 1, None, Seq.empty)
sql("ANALYZE TABLES COMPUTE STATISTICS")
checkTableStats("t1", hasSizeInBytes = true, expectedRowCounts = Some(10))
checkTableStats("t2", hasSizeInBytes = true, expectedRowCounts = Some(20))
checkOptimizedPlanStats(spark.table("v1"), 4, Some(1), Seq.empty)
checkOptimizedPlanStats(spark.table("v2"), 4, Some(1), Seq.empty)
}
}
}
}
val errMsg = intercept[AnalysisException] {
sql(s"ANALYZE TABLES IN db_not_exists COMPUTE STATISTICS")
}.getMessage
assert(errMsg.contains("Database 'db_not_exists' not found"))
}
}
|
vinodkc/spark
|
sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala
|
Scala
|
apache-2.0
| 32,697
|
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.tools.data
import com.beust.jcommander.{Parameter, ParameterException}
import org.geotools.data.DataStore
import org.joda.time.Period
import org.locationtech.geomesa.tools._
import org.locationtech.geomesa.tools.data.AgeOffCommand.AgeOffParams
import org.locationtech.geomesa.tools.utils.ParameterConverters.PeriodConverter
import org.locationtech.geomesa.tools.utils.Prompt
trait AgeOffCommand[DS <: DataStore] extends DataStoreCommand[DS] {
override val name = "configure-age-off"
override def params: AgeOffParams
override def execute(): Unit = {
if (Seq(params.set, params.remove, params.list).count(_ == true) != 1) {
throw new ParameterException("Must specify exactly one of 'list', 'set' or 'remove'")
} else if (params.set && params.expiry == null) {
throw new ParameterException("Must specify 'expiry' when setting age-off")
}
if (params.list) {
withDataStore(list(_, params.featureName))
} else if (params.set) {
if (params.dtgField == null) {
val confirm = Prompt.confirm(s"Configuring ingest-time-based age-off for schema '${params.featureName}' " +
s"with expiry ${params.expiry}. Continue? (y/n): ")
if (confirm) {
withDataStore(set(_, params.featureName, params.expiry))
}
} else {
val confirm = Prompt.confirm(s"Configuring attribute-based age-off for schema '${params.featureName}' " +
s"on field '${params.dtgField}' with expiry ${params.expiry}. Continue? (y/n): ")
if (confirm) {
withDataStore(set(_, params.featureName, params.dtgField, params.expiry))
}
}
} else {
val confirm = Prompt.confirm(s"Removing age-off for schema '${params.featureName}'. Continue? (y/n): ")
if (confirm) {
withDataStore(remove(_, params.featureName))
}
}
}
protected def list(ds: DS, featureName: String): Unit
protected def set(ds: DS, featureName: String, expiry: Period): Unit
protected def set(ds: DS, featureName: String, dtg: String, expiry: Period): Unit
protected def remove(ds: DS, featureName: String): Unit
}
object AgeOffCommand {
trait AgeOffParams extends RequiredTypeNameParam with OptionalDtgParam {
@Parameter(names = Array("-e", "--expiry"), description = "Duration before entries are aged-off, e.g. '1 day', '2 weeks and 1 hour', etc", converter = classOf[PeriodConverter])
var expiry: Period = _
@Parameter(names = Array("-l", "--list"), description = "List existing age-off for a simple feature type")
var list: Boolean = _
@Parameter(names = Array("-s", "--set"), description = "Set age-off for a simple feature type")
var set: Boolean = _
@Parameter(names = Array("-r", "--remove"), description = "Remove existing age-off for a simple feature type")
var remove: Boolean = _
}
}
|
ronq/geomesa
|
geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/data/AgeOffCommand.scala
|
Scala
|
apache-2.0
| 3,345
|
package slick.compiler
import slick.ast.TypeUtil.:@
import slick.ast._
import slick.util.SlickLogger
/** A standard skeleton for a code generator phase. */
abstract class CodeGen extends Phase {
val name = "codeGen"
override protected[this] lazy val logger = SlickLogger[CodeGen]
def apply(state: CompilerState): CompilerState = state.map(n => apply(n, state))
def apply(node: Node, state: CompilerState): Node =
ClientSideOp.mapResultSetMapping(node, keepType = true) { rsm =>
var nmap: Option[Node] = None
var compileMap: Option[Node] = Some(rsm.map)
val nfrom = ClientSideOp.mapServerSide(rsm.from, keepType = true) { ss =>
logger.debug("Compiling server-side and mapping with server-side:", ss)
val (nss, nmapOpt) = compileServerSideAndMapping(ss, compileMap, state)
nmapOpt match {
case Some(_) =>
nmap = nmapOpt
compileMap = None
case None =>
}
logger.debug("Compiled server-side to:", nss)
nss
}
rsm.copy(from = nfrom, map = nmap.get) :@ rsm.nodeType
}
def compileServerSideAndMapping(serverSide: Node, mapping: Option[Node], state: CompilerState): (Node, Option[Node])
/** Extract the source tree and type, after possible CollectionCast operations, from a tree */
def treeAndType(n: Node): (Node, Type) = n match {
case CollectionCast(ch, _) :@ tpe => (treeAndType(ch)._1, tpe)
case n => (n, n.nodeType)
}
}
object CodeGen {
def findResult(n: Node): (String, Any) = n match {
case r @ ResultSetMapping(_, from, _) => findResult(from)
case f @ First(from) => findResult(from)
case CompiledStatement(st, ex, _) => (st, ex)
}
}
|
dotta/slick
|
slick/src/main/scala/slick/compiler/CodeGen.scala
|
Scala
|
bsd-2-clause
| 1,712
|
/*
* Copyright (c) 2014, Oracle America, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of Oracle nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
package ${groupId}
import org.openjdk.jmh.annotations.Benchmark
class MyBenchmark {
@Benchmark
def testMethod(): Any = {
// This is a demo/sample template for building your JMH benchmarks. Edit as needed.
// Put your benchmark code here.
}
}
|
tauprojects/mpp
|
jmh/jmh-archetypes/jmh-scala-benchmark-archetype/src/main/resources/archetype-resources/src/main/scala/MyBenchmark.scala
|
Scala
|
gpl-2.0
| 1,839
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.dataload
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.sql.test.util.QueryTest
import org.apache.carbondata.processing.exception.DataLoadingException
class TestLoadDataUseAllDictionary extends QueryTest with BeforeAndAfterAll{
override def beforeAll {
sql("DROP TABLE IF EXISTS t3")
sql("""
CREATE TABLE IF NOT EXISTS t3
(ID Int, date Timestamp, country String,
name String, phonetype String, serialname String, salary Int)
STORED AS carbondata
""")
}
override def afterAll {
sql("DROP TABLE IF EXISTS t3")
}
}
|
jackylk/incubator-carbondata
|
integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataUseAllDictionary.scala
|
Scala
|
apache-2.0
| 1,455
|
/*
* The MIT License
*
* Copyright (c) 2017 Fulcrum Genomics LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
package com.fulcrumgenomics.fastq
import com.fulcrumgenomics.FgBioDef._
import com.fulcrumgenomics.bam.api.{SamOrder, SamRecord, SamWriter}
import com.fulcrumgenomics.cmdline.{ClpGroups, FgBioTool}
import com.fulcrumgenomics.commons.CommonsDef.{DirPath, FilePath, PathPrefix, PathToFastq}
import com.fulcrumgenomics.commons.io.PathUtil
import com.fulcrumgenomics.commons.util.{LazyLogging, Logger}
import com.fulcrumgenomics.fastq.FastqDemultiplexer.{DemuxRecord, DemuxResult}
import com.fulcrumgenomics.illumina.{Sample, SampleSheet}
import com.fulcrumgenomics.sopt.{arg, clp}
import com.fulcrumgenomics.umi.ConsensusTags
import com.fulcrumgenomics.util.NumericTypes.PhredScore
import com.fulcrumgenomics.util.ReadStructure.SubRead
import com.fulcrumgenomics.util._
import enumeratum.EnumEntry
import htsjdk.samtools.SAMFileHeader.SortOrder
import htsjdk.samtools._
import htsjdk.samtools.util.{Iso8601Date, SequenceUtil}
import java.io.Closeable
import java.util.concurrent.ForkJoinPool
import scala.collection.mutable.ListBuffer
object DemuxFastqs {
/** The name of the metrics file if none is given by the user. */
val DefaultDemuxMetricsFileName = "demux_barcode_metrics.txt"
/** The name of the sample for unmatched reads. */
val UnmatchedSampleId: String = "unmatched"
/** The maximum # of records in RAM per SAM/BAM writer. */
private[fastq] val MaxRecordsInRam: Int = 5e6.toInt
/** Decides whether or not to use asynchronous IO for the writers. This has a big performance benefit at the cost of
* some RAM. RAM may balloon if there is a need to sort the output. */
private[fastq] val UseAsyncIo: Boolean = true
/** The number of records to batch when demultiplexing in parallel. */
private val DemuxBatchRecordsSize = 1e5.toInt
/** Creates the sample output BAM path for the given sample. */
private[fastq] def outputPrefixFrom(output: DirPath, sample: Sample): PathPrefix = {
val sampleBarcode = sample.sampleBarcodeString
require(sampleBarcode.nonEmpty, s"Sample barcode missing for sample: ${sample.sampleName}")
output.resolve(PathUtil.sanitizeFileName(s"${sample.sampleId}-${sample.sampleName}-$sampleBarcode"))
}
/** Gets the quality format of the FASTQs. If a format is given, checks that the given format is compatible. */
private def determineQualityFormat(fastqs: Seq[PathToFastq],
format: Option[QualityEncoding] = None,
logger: Option[Logger] = None): QualityEncoding = {
val detector = new QualityEncodingDetector
detector.sample(fastqs.iterator.flatMap(FastqSource(_)).map(_.quals))
format match {
case Some(f) =>
require(detector.isCompatible(f), s"Fastq is not compatible with provided quality encoding: $f")
f
case None =>
val encs = detector.rankedCompatibleEncodings(q=30)
require(encs.nonEmpty, "Could not determine quality score encoding in fastq. No known encodings are valid for all observed qualities.")
if (encs.size > 1) logger.foreach(_.warning(s"Making ambiguous determination about fastq's quality encoding; possible encodings: ${encs.mkString(", ")}."))
logger.foreach(_.info(s"Auto-detected quality format as: ${encs.head}"))
encs.head
}
}
/** Create a sample with sample barcodes extracted from a custom column. */
private[fastq] def withCustomSampleBarcode(sample: Sample, columnForSampleBarcode: String): Sample = {
new Sample(
sampleOrdinal = sample.sampleOrdinal,
sampleId = sample.sampleId,
sampleName = sample.sampleName,
libraryId = sample.libraryId,
project = sample.project,
description = sample.description,
lane = sample.lane,
i7IndexBases = sample.i7IndexBases,
i5IndexBases = sample.i5IndexBases,
extendedAttributes = sample.extendedAttributes
) {
override val sampleBarcodes: Seq[Option[String]] = {
val barcode = extendedAttribute(columnForSampleBarcode)
require(barcode.nonEmpty, s"Sample barcode not found in column '$columnForSampleBarcode' for sample id '${sample.sampleId}'.")
Seq(barcode)
}
}
}
/** Create the unmatched sample with the given sample ordinal. */
private[fastq] def unmatchedSample(sampleOrdinal: Int, readStructures: Seq[ReadStructure]): Sample = {
val barcodeSegments = readStructures.flatMap(_.sampleBarcodeSegments)
require(barcodeSegments.nonEmpty, "No sample barcodes found in read structures: " + readStructures.mkString(", "))
require(barcodeSegments.forall(_.hasFixedLength), "Barcode segments must have fixed lengths in: " + readStructures.mkString(", "))
val noMatchBarcode: String = barcodeSegments.map("N" * _.fixedLength).mkString
Sample(sampleOrdinal=sampleOrdinal, sampleId=UnmatchedSampleId, sampleName=UnmatchedSampleId, libraryId=UnmatchedSampleId, i7IndexBases=Some(noMatchBarcode))
}
private[fastq] def toSampleOutputPrefix(sample: Sample, isUnmatched: Boolean, illuminaFileNames: Boolean, output: DirPath, unmatched: String): PathPrefix = {
(isUnmatched, illuminaFileNames) match {
case (true, true) => output.resolve(f"${UnmatchedSampleId}_S${sample.sampleOrdinal}_L${sample.lane.getOrElse(1)}%03d")
case (false, true) => output.resolve(f"${sample.sampleName}_S${sample.sampleOrdinal}_L${sample.lane.getOrElse(1)}%03d")
case (true, _) => PathUtil.removeExtension(output.resolve(unmatched))
case (false, _) => outputPrefixFrom(output, sample)
}
}
/** Creates a demultiplexing iterator that performs demultiplexing in parallel.
*
* @param sources the FASTQ sources, one per read.
* @param demultiplexer the demultiplexer to use. The demultiplexer's [[com.fulcrumgenomics.fastq.FastqDemultiplexer.demultiplex()]]
* method expects the same number of reads as sources.
*/
def demultiplexingIterator(sources: Seq[FastqSource],
demultiplexer: FastqDemultiplexer,
threads: Int,
batchSize: Int = DemuxBatchRecordsSize,
omitFailingReads: Boolean,
omitControlReads: Boolean,
minBaseQualityForMasking: Int,
qualityEncoding: QualityEncoding
): Iterator[DemuxResult] = {
require(demultiplexer.expectedNumberOfReads == sources.length,
s"The demultiplexer expects ${demultiplexer.expectedNumberOfReads} reads but ${sources.length} FASTQ sources given.")
val zippedIterator = FastqSource.zipped(sources)
val maskingThresholdToByte = convertQualToByte(qualityEncoding, minBaseQualityForMasking)
val resultIterator = if (threads > 1) {
// Developer Note: Iterator does not support parallel operations, so we need to group together records into a
// [[List]] or [[Seq]]. A fixed number of records are grouped to reduce memory overhead.
val pool = new ForkJoinPool(threads)
zippedIterator
.grouped(batchSize)
.flatMap { batch =>
batch
.parWith(pool=pool)
.map { readRecords =>
demultiplexer.demultiplex(readRecords: _*)
.maskLowQualityBases(minBaseQualityForMasking=maskingThresholdToByte, qualityEncoding=qualityEncoding, omitFailingReads=omitFailingReads)
}
.seq
}
}
else {
zippedIterator
.map { readRecords => demultiplexer.demultiplex(readRecords: _*)
.maskLowQualityBases(minBaseQualityForMasking=maskingThresholdToByte, qualityEncoding=qualityEncoding, omitFailingReads=omitFailingReads) }
}
resultIterator.map { res =>
if (!omitControlReads || !res.isControl){
res.sampleInfo.metric.increment(
numMismatches = res.numMismatches,
isPf = res.passQc,
q20Bases = res.q20Bases,
q30Bases = res.q30Bases,
basesToAdd = res.numBases,
omitFailing = omitFailingReads)
}
res
}.filter(r => r.keep(omitFailingReads, omitControlReads))
}
def convertQualToByte(qualityEncoding: QualityEncoding, qualityScore: Int): Byte = {
qualityEncoding.toStandardAscii(PhredScore.cap(qualityScore + qualityEncoding.asciiOffset).toChar).toByte
}
}
@clp(
description =
"""
|Performs sample demultiplexing on FASTQs.
|
|The sample barcode for each sample in the sample sheet will be compared against the sample barcode bases extracted from
|the FASTQs, to assign each read to a sample. Reads that do not match any sample within the given error tolerance
|will be placed in the 'unmatched' file.
|
|The type of output is specified with the `--output-type` option, and can be BAM (`--output-type Bam`),
|gzipped FASTQ (`--output-type Fastq`), or both (`--output-type BamAndFastq`).
|
|For BAM output, the output directory will contain one BAM file per sample in the sample sheet or metadata CSV file,
|plus a BAM for reads that could not be assigned to a sample given the criteria. The output file names will be the
|concatenation of sample id, sample name, and sample barcode bases (expected not observed), delimited by `-`. A
|metrics file will also be output providing analogous information to the metric described
|[SampleBarcodeMetric](http://fulcrumgenomics.github.io/fgbio/metrics/latest/#samplebarcodemetric).
|
|For gzipped FASTQ output, one or more gzipped FASTQs per sample in the sample sheet or metadata CSV file will be
|written to the output directory. For paired end data, the output will have the suffix `_R1.fastq.gz` and
|`_R2.fastq.gz` for read one and read two respectively. The sample barcode and molecular barcodes (concatenated)
|will be appended to the read name and delimited by a colon. If the `--illumina-standards` option is given, then
|the output read names and file names will follow the
|[Illumina standards described here](https://help.basespace.illumina.com/articles/tutorials/upload-data-using-web-uploader/).
|
|The output base qualities will be standardized to Sanger/SAM format.
|
|FASTQs and associated read structures for each sub-read should be given:
|
|- a single fragment read should have one FASTQ and one read structure
|- paired end reads should have two FASTQs and two read structures
|- a dual-index sample with paired end reads should have four FASTQs and four read structures given: two for the
| two index reads, and two for the template reads.
|
|If multiple FASTQs are present for each sub-read, then the FASTQs for each sub-read should be concatenated together
|prior to running this tool (ex. `cat s_R1_L001.fq.gz s_R1_L002.fq.gz > s_R1.fq.gz`).
|
|(Read structures)[https://github.com/fulcrumgenomics/fgbio/wiki/Read-Structures] are made up of `<number><operator>`
|pairs much like the `CIGAR` string in BAM files. Four kinds of operators are recognized:
|
|1. `T` identifies a template read
|2. `B` identifies a sample barcode read
|3. `M` identifies a unique molecular index read
|4. `S` identifies a set of bases that should be skipped or ignored
|
|The last `<number><operator>` pair may be specified using a `+` sign instead of number to denote "all remaining
|bases". This is useful if, e.g., fastqs have been trimmed and contain reads of varying length. Both reads must
|have template bases. Any molecular identifiers will be concatenated using
|the `-` delimiter and placed in the given SAM record tag (`RX` by default). Similarly, the sample barcode bases
|from the given read will be placed in the `BC` tag.
|
|Metadata about the samples should be given in either an Illumina Experiment Manager sample sheet or a metadata CSV
|file. Formats are described in detail below.
|
|The read structures will be used to extract the observed sample barcode, template bases, and molecular identifiers
|from each read. The observed sample barcode will be matched to the sample barcodes extracted from the bases in
|the sample metadata and associated read structures.
|
|## Sample Sheet
|The read group's sample id, sample name, and library id all correspond to the similarly named values in the
|sample sheet. Library id will be the sample id if not found, and the platform unit will be the sample name
|concatenated with the sample barcode bases delimited by a `.`.
|
|The sample section of the sample sheet should contain information related to each sample with the following columns:
|
| * Sample_ID: The sample identifier unique to the sample in the sample sheet.
| * Sample_Name: The sample name.
| * Library_ID: The library Identifier. The combination sample name and library identifier should be unique
| across the samples in the sample sheet.
| * Description: The description of the sample, which will be placed in the description field in the output BAM's
| read group. This column may be omitted.
| * Sample_Barcode: The sample barcode bases unique to each sample. The name of the column containing the sample barcode
| can be changed using the `--column-for-sample-barcode` option. If the sample barcode is present
| across multiple reads (ex. dual-index, or inline in both reads of a pair), then the expected
| barcode bases from each read should be concatenated in the same order as the order of the reads'
| FASTQs and read structures given to this tool.
|
|## Metadata CSV
|
|In lieu of a sample sheet, a simple CSV file may be provided with the necessary metadata. This file should
|contain the same columns as described above for the sample sheet (`Sample_ID`, `Sample_Name`, `Library_ID`, and
|`Description`).
|
|## Example Command Line
|
|As an example, if the sequencing run was 2x100bp (paired end) with two 8bp index reads both reading a sample
|barcode, as well as an in-line 8bp sample barcode in read one, the command line would be
|
|```
|--inputs r1.fq i1.fq i2.fq r2.fq --read-structures 8B92T 8B 8B 100T \\
| --metadata SampleSheet.csv --metrics metrics.txt --output output_folder
|```
|
|## Output Standards
|
|The following options affect the output format:
|
|1. If `--omit-fastq-read-numbers` is specified, then trailing /1 and /2 for R1 and R2 respectively, will not be
|appended to e FASTQ read name. By default they will be appended.
|2. If `--include-sample-barcodes-in-fastq` is specified, then sample barcode will replace the last field in the
|first comment in the FASTQ header, e.g. replace 'NNNNNN' in the header `@Instrument:RunID:FlowCellID:Lane:Tile:X:Y 1:N:0:NNNNNN`
|3. If `--illumina-file-names` is specified, the output files will be named according to the Illumina FASTQ file
|naming conventions:
|
| a. The file extension will be `_R1_001.fastq.gz` for read one, and `_R2_001.fastq.gz` for read two (if paired end).
| b. The per-sample output prefix will be `<SampleName>_S<SampleOrdinal>_L<LaneNumber>` (without angle brackets).
|
|Options (1) and (2) require the input FASTQ read names to contain the following elements:
|
|`@<instrument>:<run number>:<flowcell ID>:<lane>:<tile>:<x-pos>:<y-pos> <read>:<is filtered>:<control number>:<index>`
|
|[See the Illumina FASTQ conventions for more details.](https://support.illumina.com/help/BaseSpace_OLH_009008/Content/Source/Informatics/BS/FASTQFiles_Intro_swBS.htm)
|
|The `--illumina-standards` option may not be specified with the three options above. Use this option if you
|intend to upload to Illumina BaseSpace. This option implies:
|
|`--omit-fastq-read-numbers=true --include-sample-barcodes-in-fastq=false --illumina-file-names=true`
|
|[See the Illumina Basespace standards described here](https://help.basespace.illumina.com/articles/tutorials/upload-data-using-web-uploader/).
|
|To output with recent Illumina conventions (circa 2021) that match `bcl2fastq` and `BCLconvert`, use:
|
|`--omit-fastq-read-numbers=true --include-sample-barcodes-in-fastq=true --illumina-file-names=true`
|
|By default all input reads are output. If your input FASTQs contain reads that do not pass filter (as defined by the Y/N filter flag in the FASTQ comment) these can be filtered out during demultiplexing using the `--omit-failing-reads` option.
|
|To output only reads that are not control reads, as encoded in the `<control number>` field in the header comment, use the `--omit-control-reads` flag
""",
group=ClpGroups.Fastq
)
class DemuxFastqs
(@arg(flag='i', doc="One or more input fastq files each corresponding to a sub-read (ex. index-read, read-one, read-two, fragment).") val inputs: Seq[PathToFastq],
@arg(flag='o', doc="The output directory in which to place sample BAMs.") val output: DirPath,
@arg(flag='x', doc="A file containing the metadata about the samples.") val metadata: FilePath,
@arg(flag='r', doc="The read structure for each of the FASTQs.") val readStructures: Seq[ReadStructure],
@arg(flag='m', doc="The file to which per-barcode metrics are written. If none given, a file named `demux_barcode_metrics.txt` will be written to the output directory.") val metrics: Option[FilePath] = None,
@arg(flag='c', doc="The column name in the sample sheet or metadata CSV for the sample barcode.") val columnForSampleBarcode: String = "Sample_Barcode",
@arg(flag='u', doc="Output BAM file name for the unmatched records.") val unmatched: String = DemuxFastqs.UnmatchedSampleId + ".bam",
@arg(flag='q',
doc="""A value describing how the quality values are encoded in the FASTQ.
|Either Solexa for pre-pipeline 1.3 style scores (solexa scaling + 66),
|Illumina for pipeline 1.3 and above (phred scaling + 64) or Standard
|for phred scaled scores with a character shift of 33. If this value
|is not specified, the quality format will be detected automatically.
""")
val qualityFormat: Option[QualityEncoding] = None,
@arg(flag='t', doc="The number of threads to use while de-multiplexing. The performance does not increase linearly with the # of threads and seems not to improve beyond 2-4 threads.") val threads: Int = 1,
@arg(doc="Maximum mismatches for a barcode to be considered a match.") val maxMismatches: Int = 1,
@arg(doc="Minimum difference between number of mismatches in the best and second best barcodes for a barcode to be considered a match.") val minMismatchDelta: Int = 2,
@arg(doc="Maximum allowable number of no-calls in a barcode read before it is considered unmatchable.") val maxNoCalls: Int = 2,
@arg(doc="The sort order for the output sam/bam file (typically unsorted or queryname).") val sortOrder: SortOrder = SortOrder.queryname,
@arg(doc="The SAM tag for any molecular barcode. If multiple molecular barcodes are specified, they will be concatenated and stored here.") val umiTag: String = ConsensusTags.UmiBases,
@arg(doc="The platform unit (typically `<flowcell-barcode>-<sample-barcode>.<lane>`)") val platformUnit: Option[String] = None,
@arg(doc="The sequencing center from which the data originated") val sequencingCenter: Option[String] = None,
@arg(doc="Predicted median insert size, to insert into the read group header") val predictedInsertSize: Option[Integer] = None,
@arg(doc="Platform model to insert into the group header (ex. miseq, hiseq2500, hiseqX)") val platformModel: Option[String] = None,
@arg(doc="Platform to insert into the read group header of BAMs (e.g Illumina)") val platform: String = "Illumina",
@arg(doc="Comment(s) to include in the merged output file's header.", minElements = 0) val comments: List[String] = Nil,
@arg(doc="Date the run was produced, to insert into the read group header") val runDate: Option[Iso8601Date] = None,
@arg(doc="The type of outputs to produce.") val outputType: Option[OutputType] = None,
@arg(
doc=
"""Output all bases (i.e. all sample barcode, molecular barcode, skipped,
and template bases) for every read with template bases (ex. read one
and read two) as defined by the corresponding read structure(s).
""")
val includeAllBasesInFastqs: Boolean = false,
@deprecated(message="Use outputStandards instead", since="1.3.0")
@arg(doc="Output FASTQs according to Illumina BaseSpace Sequence Hub naming standards. This is differfent than Illumina naming standards.",
mutex=Array("omitFastqReadNumbers", "includeSampleBarcodesInFastq", "illuminaFileNames"))
val illuminaStandards: Boolean = false,
@arg(doc="Do not include trailing /1 or /2 for R1 and R2 in the FASTQ read name.", mutex=Array("illuminaStandards"))
val omitFastqReadNumbers: Boolean = false,
@arg(doc="Insert the sample barcode into the FASTQ header.", mutex=Array("illuminaStandards"))
val includeSampleBarcodesInFastq: Boolean = false,
@arg(doc="Name the output files according to the Illumina file name standards.", mutex=Array("illuminaStandards"))
val illuminaFileNames: Boolean = false,
@arg(doc="Keep only passing filter reads if true, otherwise keep all reads. Passing filter reads are determined from the comment in the FASTQ header.")
val omitFailingReads: Boolean = false,
@arg(doc="Do not keep reads identified as control if true, otherwise keep all reads. Control reads are determined from the comment in the FASTQ header.")
val omitControlReads: Boolean = false,
@arg(doc="Mask bases with a quality score below the specified threshold as Ns") val maskBasesBelowQuality: Int = 0,
) extends FgBioTool with LazyLogging {
// Support the deprecated --illumina-standards option
private val fastqStandards: FastqStandards = {
if (illuminaStandards) {
logger.warning("The `--illumina-standards` option will be removed in a future version, please use `--output-standards=Illumina`")
// NB: include read numbers
FastqStandards(
illuminaFileNames = true
)
} else {
FastqStandards(
includeReadNumbers = !omitFastqReadNumbers,
includeSampleBarcodes = includeSampleBarcodesInFastq,
illuminaFileNames = illuminaFileNames
)
}
}
import DemuxFastqs._
private[fastq] val metricsPath = metrics.getOrElse(output.resolve(DefaultDemuxMetricsFileName))
// NB: remove me when outputFastqs gets removed and use outputType directly
private val _outputType = this.outputType match {
case Some(tpe) => tpe
case None => OutputType.Bam
}
validate(inputs.length == readStructures.length, "The same number of read structures should be given as FASTQs.")
validate(readStructures.flatMap(_.sampleBarcodeSegments).nonEmpty, s"No sample barcodes found in read structures: " + readStructures.map(_.toString).mkString(", "))
private val pairedEnd = readStructures.count(_.templateSegments.nonEmpty) match {
case 1 => false
case 2 => true
case n => invalid(s"Found $n read structures with template bases but expected 1 or 2.")
}
Io.assertReadable(inputs)
Io.assertReadable(metadata)
/** Create a sample sheet from either the input sample sheet path or the metadata CSV. */
private val sampleSheet: SampleSheet = {
val lines = Io.readLines(metadata).toSeq
if (lines.exists(_.contains("[Data]"))) {
logger.info("Assuming input metadata file is an Illumina Experiment Manager Sample Sheet.")
SampleSheet(lines.iterator, lane=None)
}
else {
logger.info("Assuming input metadata file is simple CSV file.")
SampleSheet(Iterator("[Data]") ++ lines, lane=None)
}
}
override def execute(): Unit = {
Io.mkdirs(this.output)
Io.assertCanWriteFile(this.metricsPath)
// Get the FASTQ quality encoding format
val qualityEncoding: QualityEncoding = determineQualityFormat(fastqs=inputs, format=this.qualityFormat, logger=Some(this.logger))
// Read in the sample sheet and create the sample information
val samplesFromSampleSheet = sampleSheet.map(s => withCustomSampleBarcode(s, columnForSampleBarcode))
val samples = samplesFromSampleSheet.toSeq :+ unmatchedSample(samplesFromSampleSheet.size, this.readStructures)
val sampleInfos = samples.map(sample => SampleInfo(sample, sample.sampleName == UnmatchedSampleId))
val sampleToWriter = sampleInfos.map { info => info.sample -> toWriter(info, sampleInfos.length) }.toMap
// Validate that the # of sample barcode bases in the read structure matches the # of sample barcode in the sample sheet.
{
val rsNumSampleBarcodeBases = readStructures.map(_.sampleBarcodeSegments.map(_.fixedLength).sum).sum
samples.foreach { sample =>
val numSampleBarcodeBases = sample.sampleBarcodeBytes.length
require(numSampleBarcodeBases == rsNumSampleBarcodeBases,
s"The number of sample barcodes bases did not match; read structures: $rsNumSampleBarcodeBases sample (${sample.sampleId}): $numSampleBarcodeBases"
)
}
}
// Create the files for reading and writing
val demultiplexer = new FastqDemultiplexer(
sampleInfos = sampleInfos,
readStructures = this.readStructures,
umiTag = umiTag,
maxMismatches = maxMismatches,
minMismatchDelta = minMismatchDelta,
maxNoCalls = maxNoCalls,
includeOriginal = this.includeAllBasesInFastqs,
fastqStandards = this.fastqStandards,
omitFailingReads = this.omitFailingReads,
omitControlReads = this.omitControlReads
)
val progress = ProgressLogger(this.logger, unit=1e6.toInt)
// An iterator that uses the given fastq demultiplexer to convert FASTQ records from the same fragment/template to
// DemuxRecord in parallel
val sources = inputs.map(FastqSource(_))
val iterator = demultiplexingIterator(
sources = sources,
demultiplexer = demultiplexer,
threads = threads,
omitFailingReads = this.omitFailingReads,
omitControlReads = this.omitControlReads,
minBaseQualityForMasking = maskBasesBelowQuality,
qualityEncoding = qualityEncoding
)
// Write the records out in its own thread
iterator.foreach { demuxResult =>
val writer = sampleToWriter(demuxResult.sampleInfo.sample)
demuxResult.records.foreach { rec =>
writer.add(rec.copy(quals=qualityEncoding.toStandardAscii(rec.quals)))
progress.record()
}
}
// Close the writer; NB: the inputs close automatically
sampleToWriter.values.foreach(_.close())
// Write the metrics
val metricsMap = sampleInfos.map { sampleInfo => (sampleInfo.metric.barcode, sampleInfo.metric) }.toMap
val unmatchedBarcode = sampleInfos.find { sampleInfo => sampleInfo.isUnmatched }.getOrElse(unreachable("No unmatched sample."))
SampleBarcodeMetric.finalizeMetrics(metricsMap, unmatchedBarcode.metric.barcode)
Metric.write(metricsPath, sampleInfos.map(_.metric))
}
private def toWriter(sampleInfo: SampleInfo, numSamples: Int): DemuxWriter[Any] = {
val sample = sampleInfo.sample
val isUnmatched = sample.sampleName == UnmatchedSampleId
val prefix = toSampleOutputPrefix(sample, isUnmatched, fastqStandards.illuminaFileNames, output, this.unmatched)
val writers = new ListBuffer[DemuxWriter[Any]]()
if (this._outputType.producesFastq) {
writers += new FastqRecordWriter(prefix, this.pairedEnd, fastqStandards)
}
if (this._outputType.producesBam) {
val readGroup = new SAMReadGroupRecord(sample.sampleId)
readGroup.setSample(sample.sampleName)
readGroup.setLibrary(sample.libraryId)
readGroup.setPlatform(platform)
sample.description.foreach(readGroup.setDescription)
platformUnit.foreach(readGroup.setPlatformUnit)
sequencingCenter.foreach(readGroup.setSequencingCenter)
predictedInsertSize.foreach(readGroup.setPredictedMedianInsertSize)
runDate.foreach(readGroup.setRunDate)
platformModel.foreach(readGroup.setPlatformModel)
val header: SAMFileHeader = new SAMFileHeader
header.addReadGroup(readGroup)
header.setSortOrder(sortOrder)
comments.foreach(header.addComment)
writers += new SamRecordWriter(prefix, header, this.umiTag, numSamples)
}
new DemuxWriter[Any] {
def add(rec: DemuxRecord): Unit = writers.foreach { writer => writer.add(rec) }
override def close(): Unit = writers.foreach(_.close())
}
}
}
/** A writer than writes [[DemuxRecord]]s */
private trait DemuxWriter[+T] extends Closeable {
def add(rec: DemuxRecord): T
}
/** A writer that writes [[DemuxRecord]]s as [[SamRecord]]s. */
private class SamRecordWriter(prefix: PathPrefix,
val header: SAMFileHeader,
val umiTag: String,
val numSamples: Int) extends DemuxWriter[SamRecord] {
val order: Option[SamOrder] = if (header.getSortOrder == SortOrder.unsorted) None else SamOrder(header)
private val writer = SamWriter(PathUtil.pathTo(s"$prefix.bam"), header, sort=order,
async = DemuxFastqs.UseAsyncIo,
maxRecordsInRam = Math.max(10000, DemuxFastqs.MaxRecordsInRam / numSamples))
private val rgId: String = this.header.getReadGroups.get(0).getId
def add(rec: DemuxRecord): SamRecord = {
val record = SamRecord(header)
record.name = rec.name
record.bases = rec.bases
record.quals = rec.quals
record.unmapped = true
if (rec.pairedEnd) {
record.paired = true
record.mateUnmapped = true
if (rec.readNumber == 1) record.firstOfPair = true else record.secondOfPair = true
}
if (rec.sampleBarcode.nonEmpty) record("BC") = rec.sampleBarcode.mkString("-")
record(ReservedTagConstants.READ_GROUP_ID) = rgId
if (rec.molecularBarcode.nonEmpty) record(umiTag) = rec.molecularBarcode.mkString("-")
writer += record
record
}
override def close(): Unit = writer.close()
}
private[fastq] object FastqRecordWriter {
private[fastq] def extensions(pairedEnd: Boolean, illuminaFileNames: Boolean): Seq[String] = {
(pairedEnd, illuminaFileNames) match {
case (true, true) => Seq("_R1_001.fastq.gz", "_R2_001.fastq.gz")
case (true, false) => Seq("_R1.fastq.gz", "_R2.fastq.gz")
case (false, true) => Seq("_R1_001.fastq.gz")
case (false, false) => Seq(".fastq.gz")
}
}
}
/** A writer that writes [[DemuxRecord]]s as [[FastqRecord]]s. */
private[fastq] class FastqRecordWriter(prefix: PathPrefix, val pairedEnd: Boolean, val fastqStandards: FastqStandards) extends DemuxWriter[FastqRecord] {
private val writers: IndexedSeq[FastqWriter] = {
FastqRecordWriter.extensions(pairedEnd = pairedEnd, illuminaFileNames = fastqStandards.illuminaFileNames).map { ext =>
FastqWriter(Io.toWriter(PathUtil.pathTo(s"$prefix$ext")))
}.toIndexedSeq
}
def add(rec: DemuxRecord): FastqRecord = {
val name = {
if (fastqStandards.includeSampleBarcodes) rec.name // the comment is set below, so don't include it here
else Seq(Some(rec.name), rec.sampleBarcode, rec.molecularBarcode).flatten.mkString(":")
}
// update the comment
val comment = rec.readInfo match {
case None => rec.comment // when not updating sample barcodes, fetch the comment from the record
case Some(info) =>
if (fastqStandards.includeSampleBarcodes && rec.sampleBarcode.nonEmpty) {
Some(info.copy(sampleInfo = rec.sampleBarcode.mkString("+")).toString) // update the barcode in the record's header. In case of dual-indexing, barcodes are combined with a '+'.
}
else Some(info.toString)
}
val record = FastqRecord(
name = name,
bases = rec.originalBases.getOrElse(rec.bases),
quals = rec.originalQuals.getOrElse(rec.quals),
comment = comment,
readNumber = if (fastqStandards.includeReadNumbers) Some(rec.readNumber) else None
)
val writer = this.writers.lift(rec.readNumber - 1).getOrElse {
throw new IllegalStateException(s"Read number was invalid: ${rec.readNumber}")
}
writer.write(record)
record
}
override def close(): Unit = this.writers.foreach(_.close())
}
/** A class to store information about a sample. */
private[fastq] case class SampleInfo(sample: Sample, isUnmatched: Boolean = false) {
val metric: SampleBarcodeMetric = {
val barcode: String = this.sample.sampleBarcodeString
require(barcode.nonEmpty, s"Sample with id '${sample.sampleId}' did not have a sample barcode")
SampleBarcodeMetric(barcodeName=sample.sampleName, libraryName=sample.libraryId, barcode=barcode)
}
}
private[fastq] object FastqDemultiplexer {
/** Stores the minimal information for a single template read. */
case class DemuxRecord(name: String,
bases: String,
quals: String,
molecularBarcode: Seq[String],
sampleBarcode: Seq[String],
readNumber: Int,
pairedEnd: Boolean,
comment: Option[String],
originalBases: Option[String] = None,
originalQuals: Option[String] = None,
readInfo: Option[ReadInfo] = None,
q30Bases: Int = 0,
q20Bases: Int = 0) {
/** Masks bases that have a quality score less than to a specified minBaseQualityForMasking.
* @param minBaseQualityForMasking The minBaseQualityForMasking for masking bases, exclusive. Bases with a quality score < minBaseQualityForMasking will be masked
* @param qualityEncoding The encoding used for quality scores in the Fastq file.
* @return a new DemuxRecord with updated bases
*/
def maskLowQualityBases(minBaseQualityForMasking: Byte, qualityEncoding: QualityEncoding): DemuxRecord = {
val quals = this.quals
val q30Bases = quals.count(_ >= DemuxFastqs.convertQualToByte(qualityEncoding, 30))
val q20Bases = quals.count(_ >= DemuxFastqs.convertQualToByte(qualityEncoding, 20))
if (minBaseQualityForMasking <= 0 || this.quals.forall(_ >= minBaseQualityForMasking)) {
this.copy(q20Bases = q20Bases, q30Bases = q30Bases)
} else {
val bases = this.bases.toCharArray
for (i <- Range(0, this.quals.length)) if (quals.charAt(i).toByte < minBaseQualityForMasking) bases(i) = 'N'
this.copy(bases = bases.mkString)
}
}
}
/** A class to store the [[SampleInfo]] and associated demultiplexed [[DemuxRecord]]s.
* @param sampleInfo the [[SampleInfo]] for the matched sample.
* @param numMismatches the # of mismatches if it matched a sample with a sample barcode. This will be [[Int.MaxValue]]
* for the unmatched sample.
* @param records the records, one for each read that has template bases.
* @param passQc flag noting if the read passes QC. Default true to keep all reads unless otherwise specified.
* @param isControl flag noting if the read is an internal control. Default false unless filtering to remove internal control reads.
*/
case class DemuxResult(sampleInfo: SampleInfo,
numMismatches: Int,
records: Seq[DemuxRecord],
passQc: Boolean = true,
isControl: Boolean = false,
numBases: Int = 0,
q30Bases: Int = 0,
q20Bases: Int = 0) {
/** Returns true if this [[DemuxResult]] should be kept for output, false otherwise.
* Returns true if `omitFailingReads` is false or if `passQc` is true
*
* @param omitFailingReads true to keep only passing reads, false to keep all reads
*/
def keep(omitFailingReads: Boolean, omitControlReads: Boolean): Boolean = {
val keepByQc = if (!omitFailingReads) true else passQc
val keepByControl = if (!omitControlReads) true else !isControl
keepByQc && keepByControl
}
/** A function to mask bases that have a quality score less to a specified threshold
*
* @return a new DemuxResult with updated bases
*/
def maskLowQualityBases(minBaseQualityForMasking: Byte,
qualityEncoding: QualityEncoding,
omitFailingReads: Boolean): DemuxResult = { // using this.type here causes a mismatch error
val records = if (minBaseQualityForMasking <= 0) this.records else {
this.records.map(_.maskLowQualityBases(minBaseQualityForMasking = minBaseQualityForMasking,
qualityEncoding = qualityEncoding)
)
}
val q20Bases = records.map(_.q20Bases).sum
val q30Bases = records.map(_.q30Bases).sum
val numBases = records.map(_.bases.length).sum
this.copy(records=records, numBases=numBases, q20Bases=q20Bases, q30Bases=q30Bases)
}
}
/** Counts the nucleotide mismatches between two strings of the same length. Ignores no calls in expectedBases. */
private[fastq] def countMismatches(observedBases: Array[Byte], expectedBases: Array[Byte]): Int = {
require(observedBases.length == expectedBases.length, s"observedBases: ${observedBases.length} expectedBases: ${expectedBases.length}")
var idx = 0
var count = 0
while (idx < observedBases.length) {
val expectedBase = expectedBases(idx)
val observedBase = observedBases(idx)
if (!SequenceUtil.isNoCall(expectedBase) && !SequenceUtil.basesEqual(observedBase, expectedBase)) {
count += 1
}
idx += 1
}
count
}
}
/** Assigns reads from the same fragment/template to a sample.
*
* A [[SampleInfo]] should be given per sample and a [[ReadStructure]] per read from the same template/fragment.
* Use the [[demultiplex()]] method to create a [[DemuxRecord]] for each read with template bases. Any molecular barcodes
* will be extracted and stored in the tag specified by [[umiTag]].
*
* @param sampleInfos the sample information, one per sample.
* @param readStructures the read structures, one for each read that will be given to [[demultiplex()]].
* @param fastqStandards standards for outputting FASTQs
* @param umiTag the tag to store any molecular barcodes. The barcodes from reads will be delimited by "-".
* @param maxMismatches the maximum mismatches to match a sample barcode.
* @param minMismatchDelta the minimum difference between number of mismatches in the best and second best barcodes for
* a barcode to be considered a match.
* @param maxNoCalls the maximum number of no calls in the sample barcode bases allowed for matching.
* @param includeOriginal true if to provide set the values for `originaBases` and `originalQuals` in [[DemuxResult]],
* namely the bases and qualities FOR ALL bases, including molecular barcode, sample barcode,
* and skipped bases.
* @param omitFailingReads true if to remove reads that don't pass QC, marked as 'N' in the header comment
* @param omitControlReads false if to keep reads that are marked as internal control reads in the header comment.
*/
private class FastqDemultiplexer(val sampleInfos: Seq[SampleInfo],
readStructures: Seq[ReadStructure],
val fastqStandards: FastqStandards,
val umiTag: String = ConsensusTags.UmiBases,
val maxMismatches: Int = 2,
val minMismatchDelta: Int = 1,
val maxNoCalls: Int = 2,
val includeOriginal: Boolean = false,
val omitFailingReads: Boolean = false,
val omitControlReads: Boolean = false) {
import FastqDemultiplexer._
require(readStructures.nonEmpty, "No read structures were given")
private val variableReadStructures = readStructures.map(_.withVariableLastSegment)
{
val samples = sampleInfos.map(_.sample)
require(samples.map(_.sampleBarcodeString).sorted.distinct.length == samples.length, "Unique sample barcodes required for all samples")
}
private val sampleInfosNoUnmatched = sampleInfos.filterNot(_.isUnmatched)
private val unmatchedSample = sampleInfos.find(_.isUnmatched).getOrElse(throw new IllegalArgumentException("No unmatched sample provided."))
/** The number of reads that are expected to be given to the [[demultiplex()]] method. */
def expectedNumberOfReads: Int = this.variableReadStructures.length
/** True if the read structure implies paired end reads will be produced, false otherwise. */
val pairedEnd: Boolean = this.variableReadStructures.count(_.templateSegments.nonEmpty) match {
case 0 => throw new IllegalArgumentException("No template reads in any read structure.")
case 1 => false
case 2 => true
case n => throw new IllegalArgumentException(s"$n template reads defined. Can't process > 2 template reads.")
}
/** Gets the [[SampleInfo]] and the number of mismatches between the bases and matched sample barcode. If no match is
* found, the unmatched sample and [[Int.MaxValue]] are returned. */
private def matchSampleBarcode(subReads: Seq[SubRead]): (SampleInfo, Int) = {
val observedBarcode = subReads.filter(_.kind == SegmentType.SampleBarcode).map(_.bases).mkString.getBytes
val numNoCalls = observedBarcode.count(base => SequenceUtil.isNoCall(base))
// Get the best and second best sample barcode matches.
val (bestSampleInfo, bestMismatches, secondBestMismatches) = if (numNoCalls <= maxNoCalls) {
this.sampleInfosNoUnmatched.map { sampleInfo =>
val sample = sampleInfo.sample
val expectedBarcode = sample.sampleBarcodeBytes
require(expectedBarcode.nonEmpty, s"Sample with id '${sample.sampleId}' did not have a sample barcode")
val numMismatches = countMismatches(observedBarcode, expectedBarcode)
(sampleInfo, numMismatches)
}
.toList.sortBy(_._2).take(2) match {
case Nil => (this.unmatchedSample, Int.MaxValue, Int.MaxValue)
case List(bestTuple) => (bestTuple._1, bestTuple._2, Int.MaxValue)
case List(bestTuple, secondBestTuple) => (bestTuple._1, bestTuple._2, secondBestTuple._2)
}
}
else {
(this.unmatchedSample, Int.MaxValue, Int.MaxValue)
}
// Make sure we are within the parameter limits and update barcode metrics if necessary
if (maxMismatches < bestMismatches || maxNoCalls < numNoCalls || (secondBestMismatches - bestMismatches) < minMismatchDelta) {
(this.unmatchedSample, Int.MaxValue)
}
else {
(bestSampleInfo, bestMismatches)
}
}
/** Demultiplexes a given set of reads from the same template. The same number of reads should be given as read
* structures.
*
* The sample barcoded bases from each read are extracted and concatenated in the same order as the given reads. They
* are matched against the sample barcode bases for each sample.
* */
def demultiplex(reads: FastqRecord*): DemuxResult = {
require(reads.nonEmpty, "No reads given for demultiplexing.")
require(reads.length == expectedNumberOfReads, s"Expected '$expectedNumberOfReads' number of reads but found '${reads.length}'.")
// Generate the sub-reads by type
val subReads = reads.zip(this.variableReadStructures).flatMap { case (read, rs) => rs.extract(read.bases, read.quals) }
// Get the sample
val (sampleInfo, numMismatches) = matchSampleBarcode(subReads)
// Method to get all the bases of a given type
def bases(segmentType: SegmentType): Seq[String] = {
subReads.filter(_.kind == segmentType).map(_.bases)
}
// Get the molecular and sample barcodes
val molecularBarcode = bases(SegmentType.MolecularBarcode)
val sampleBarcode = bases(SegmentType.SampleBarcode)
val demuxRecords = reads.zip(this.variableReadStructures)
.filter { case (_, rs) => rs.exists(_.kind == SegmentType.Template) }
.zipWithIndex
.map { case ((read, rs), readIndex) =>
val segments = rs.extract(read.bases, read.quals)
val readNumber = readIndex + 1
val templates = segments.filter(_.kind == SegmentType.Template)
require(templates.nonEmpty, s"Bug: require at least one template in read $readIndex; read structure was ${segments.mkString}")
DemuxRecord(
name = read.name,
bases = templates.map(_.bases).mkString,
quals = templates.map(_.quals).mkString,
molecularBarcode = molecularBarcode,
sampleBarcode = sampleBarcode,
readNumber = readNumber,
pairedEnd = this.pairedEnd,
comment = read.comment,
originalBases = if (this.includeOriginal) Some(read.bases) else None,
originalQuals = if (this.includeOriginal) Some(read.quals) else None,
readInfo = fastqStandards.readInfo(read)
)
}
val passQc = demuxRecords.forall(d => d.readInfo.forall(_.passQc))
val isControl = demuxRecords.forall(d => d.readInfo.forall(_.internalControl))
DemuxResult(sampleInfo=sampleInfo, numMismatches=numMismatches, records=demuxRecords, passQc=passQc, isControl=isControl)
}
}
sealed trait OutputType extends EnumEntry {
def producesBam: Boolean
def producesFastq: Boolean
}
object OutputType extends FgBioEnum[OutputType] {
override def values: scala.collection.immutable.IndexedSeq[OutputType] = findValues
case object Fastq extends OutputType { val producesBam: Boolean = false; val producesFastq: Boolean = true; }
case object Bam extends OutputType { val producesBam: Boolean = true; val producesFastq: Boolean = false; }
case object BamAndFastq extends OutputType { val producesBam: Boolean = true; val producesFastq: Boolean = true; }
}
/** A little class to store read-level information from the comment in a FASTQ read name that is in Illumina
* standard format:
*
* `<read>:<is filtered>:<control number>:<barcode-sequence>`
*
* This is typically inferred from the first comment in the input FASTQ header.
*
* @param readNumber the read number
* @param passQc true if the read passes quality filters, false if it fails
* @param internalControl true if the read is an internal control, false otherwise
* @param sampleInfo additional sample information, such as sample barcode (bcl2fastq/BCL convert) or sample number (BaseSpace)
* @param rest any additional information beyond the comment.
*/
case class ReadInfo(readNumber: Int, passQc: Boolean, internalControl: Boolean, sampleInfo: String, rest: Seq[String]) {
override def toString: String = {
val leading = f"$readNumber:${if (passQc) "Y" else "N"}:${if (internalControl) 1 else 0}:$sampleInfo"
if (rest.isEmpty) leading else leading + " " + rest.mkString(" ")
}
}
object ReadInfo {
private def reject(name: String) = throw new IllegalArgumentException(s"Cannot extract ReadInfo due to missing comment: $name")
/** Builds the [[ReadInfo]] by parsing a [[FastqRecord]]. */
def apply(rec: FastqRecord): ReadInfo = this(rec.name, rec.comment.getOrElse(reject(rec.name)))
/** Builds the [[ReadInfo]] by parsing a [[DemuxRecord]]. */
def apply(rec: DemuxRecord): ReadInfo = this(rec.name, rec.comment.getOrElse(reject(rec.name)))
/** Builds the [[ReadInfo]] by parsing a standard input FASTQ. */
def apply(name: String, comment: String): ReadInfo = try { // <- comment is an Option[String] in FastqRecord and DemuxRecord
val comments = comment.split(' ')
val readInfo = comments.head
val commentInfo = readInfo.split(':').toSeq
require(commentInfo.length == 4,
s"Expected the comment format 'ReadNum:FilterFlag:0:SampleNumber', found '$readInfo'")
val Seq(readNumber, keep, internalControl, sampleInfo) = commentInfo
val keepBoolean = keep match {
case "Y" => true
case "N" => false
case x => throw new IllegalStateException(s"Cannot parse filter/keep flag: $x")
}
ReadInfo(
readNumber = readNumber.toInt,
passQc = keepBoolean,
internalControl = internalControl.toInt != 0,
sampleInfo = sampleInfo,
rest = comments.toIndexedSeq.drop(1)
)
} catch {
case ex: Exception => throw new IllegalStateException(f"Could parse read info from read: $name $comment", ex)
}
}
/** Stores information on how to name FASTQ output files and format the FASTQ read header.
*
* @param includeReadNumbers true to including a trailing "/1" or "/2" for R1 and R2 respectively
* @param includeSampleBarcodes update the sample barcode in the comment of the FASTQ header
* @param illuminaFileNames the output FASTQ file names should follow Illumina standards
*/
private case class FastqStandards
( includeReadNumbers: Boolean = false,
includeSampleBarcodes: Boolean = false,
illuminaFileNames: Boolean = false
) {
/** Build a [[ReadInfo]] according to the standards. */
def readInfo(read: FastqRecord): Option[ReadInfo] = if (includeSampleBarcodes) Some(ReadInfo(read)) else None
}
|
fulcrumgenomics/fgbio
|
src/main/scala/com/fulcrumgenomics/fastq/DemuxFastqs.scala
|
Scala
|
mit
| 51,412
|
package scodec
package codecs
import java.security.MessageDigest
import java.util.Arrays
import java.util.zip.{ CRC32, Adler32, Checksum }
import scodec.bits.ByteVector
/**
* Creates checksum implementations of [[SignerFactory]].
* @group checksum
*/
object ChecksumFactory {
/** Creates a `java.security.Digest` factory for the specified algorithm. */
def digest(algorithm: String): SignerFactory = new ChecksumFactory {
def newSigner: Signer = new DigestSigner(MessageDigest.getInstance(algorithm))
}
/** Signer factory that does not have a distinct verifier. */
private trait ChecksumFactory extends SignerFactory {
def newVerifier: Signer = newSigner
}
/** Fletcher-16 checksum. */
val fletcher16: SignerFactory = new ChecksumFactory {
def newSigner = new Fletcher16Checksum
}
/** CRC-32 checksum. */
val crc32: SignerFactory = new ChecksumFactory {
def newSigner = new ZipChecksumSigner(new CRC32())
}
/** Adler-32 checksum. */
val adler32: SignerFactory = new ChecksumFactory {
def newSigner = new ZipChecksumSigner(new Adler32())
}
/** `java.security.Digest` implementation of Signer. */
private class DigestSigner(impl: MessageDigest) extends Signer {
def update(data: Array[Byte]): Unit = impl.update(data)
def sign: Array[Byte] = impl.digest
def verify(signature: Array[Byte]): Boolean = MessageDigest.isEqual(impl.digest(), signature)
}
/** http://en.wikipedia.org/wiki/Fletcher's_checksum */
private class Fletcher16Checksum extends Signer {
var checksum = (0, 0)
def update(data: Array[Byte]): Unit = {
checksum = data.foldLeft(checksum) { (p, b) =>
val lsb = (p._2 + (0xff & b)) % 255
((p._1 + lsb) % 255, lsb)
}
}
def sign: Array[Byte] = Array(checksum._1.asInstanceOf[Byte], checksum._2.asInstanceOf[Byte])
def verify(signature: Array[Byte]): Boolean = Arrays.equals(sign, signature)
}
/** `java.util.zip.Checksum` implementation of Signer. */
private class ZipChecksumSigner(impl: Checksum) extends Signer {
def update(data: Array[Byte]): Unit = impl.update(data, 0, data.length)
def sign: Array[Byte] = ByteVector.fromLong(impl.getValue()).drop(4).toArray
def verify(signature: Array[Byte]): Boolean = MessageDigest.isEqual(sign, signature)
}
}
|
danielwegener/scodec
|
src/main/scala/scodec/codecs/ChecksumFactory.scala
|
Scala
|
bsd-3-clause
| 2,316
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.expressions
import org.apache.flink.table.api._
import org.apache.flink.table.planner.expressions.utils.ArrayTypeTestBase
import org.apache.flink.table.planner.utils.DateTimeTestUtil.{localDate, localDateTime, localTime => gLocalTime}
import org.junit.Test
import java.time.{LocalDateTime => JLocalDateTime}
class ArrayTypeTest extends ArrayTypeTestBase {
@Test
def testInputTypeGeneralization(): Unit = {
testAllApis(
array(1, 2.0, 3.0),
"array(1, 2.0, 3.0)",
"ARRAY[1, cast(2.0 AS DOUBLE), cast(3.0 AS DOUBLE)]",
"[1.0, 2.0, 3.0]")
}
@Test
def testArrayLiterals(): Unit = {
// primitive literals
testAllApis(array(1, 2, 3), "array(1, 2, 3)", "ARRAY[1, 2, 3]", "[1, 2, 3]")
testAllApis(
array(true, true, true),
"array(true, true, true)",
"ARRAY[TRUE, TRUE, TRUE]",
"[true, true, true]")
// object literals
testTableApi(array(BigDecimal(1), BigDecimal(1)), "array(1p, 1p)", "[1, 1]")
testAllApis(
array(array(array(1), array(1))),
"array(array(array(1), array(1)))",
"ARRAY[ARRAY[ARRAY[1], ARRAY[1]]]",
"[[[1], [1]]]")
testAllApis(
array(1 + 1, 3 * 3),
"array(1 + 1, 3 * 3)",
"ARRAY[1 + 1, 3 * 3]",
"[2, 9]")
testAllApis(
array(nullOf(DataTypes.INT), 1),
"array(Null(INT), 1)",
"ARRAY[NULLIF(1,1), 1]",
"[null, 1]")
testAllApis(
array(array(nullOf(DataTypes.INT), 1)),
"array(array(Null(INT), 1))",
"ARRAY[ARRAY[NULLIF(1,1), 1]]",
"[[null, 1]]")
// implicit conversion
testTableApi(
Array(1, 2, 3),
"array(1, 2, 3)",
"[1, 2, 3]")
testTableApi(
Array[Integer](1, 2, 3),
"array(1, 2, 3)",
"[1, 2, 3]")
testAllApis(
Array(localDate("1985-04-11"), localDate("2018-07-26")),
"array('1985-04-11'.toDate, '2018-07-26'.toDate)",
"ARRAY[DATE '1985-04-11', DATE '2018-07-26']",
"[1985-04-11, 2018-07-26]")
testAllApis(
Array(gLocalTime("14:15:16"), gLocalTime("17:18:19")),
"array('14:15:16'.toTime, '17:18:19'.toTime)",
"ARRAY[TIME '14:15:16', TIME '17:18:19']",
"[14:15:16, 17:18:19]")
// There is no timestamp literal function in Java String Table API,
// toTimestamp is casting string to TIMESTAMP(3) which is not the same to timestamp literal.
testTableApi(
Array(localDateTime("1985-04-11 14:15:16"), localDateTime("2018-07-26 17:18:19")),
"[1985-04-11 14:15:16, 2018-07-26 17:18:19]")
testSqlApi(
"ARRAY[TIMESTAMP '1985-04-11 14:15:16', TIMESTAMP '2018-07-26 17:18:19']",
"[1985-04-11 14:15:16, 2018-07-26 17:18:19]")
// localDateTime use DateTimeUtils.timestampStringToUnixDate to parse a time string,
// which only support millisecond's precision.
testTableApi(
Array(
JLocalDateTime.of(1985, 4, 11, 14, 15, 16, 123456789),
JLocalDateTime.of(2018, 7, 26, 17, 18, 19, 123456789)),
"[1985-04-11 14:15:16.123456789, 2018-07-26 17:18:19.123456789]")
testTableApi(
Array(
JLocalDateTime.of(1985, 4, 11, 14, 15, 16, 123456700),
JLocalDateTime.of(2018, 7, 26, 17, 18, 19, 123456700)),
"[1985-04-11 14:15:16.1234567, 2018-07-26 17:18:19.1234567]")
testTableApi(
Array(
JLocalDateTime.of(1985, 4, 11, 14, 15, 16, 123456000),
JLocalDateTime.of(2018, 7, 26, 17, 18, 19, 123456000)),
"[1985-04-11 14:15:16.123456, 2018-07-26 17:18:19.123456]")
testTableApi(
Array(
JLocalDateTime.of(1985, 4, 11, 14, 15, 16, 123400000),
JLocalDateTime.of(2018, 7, 26, 17, 18, 19, 123400000)),
"[1985-04-11 14:15:16.1234, 2018-07-26 17:18:19.1234]")
testSqlApi(
"ARRAY[TIMESTAMP '1985-04-11 14:15:16.123456789', TIMESTAMP '2018-07-26 17:18:19.123456789']",
"[1985-04-11 14:15:16.123456789, 2018-07-26 17:18:19.123456789]")
testSqlApi(
"ARRAY[TIMESTAMP '1985-04-11 14:15:16.1234567', TIMESTAMP '2018-07-26 17:18:19.1234567']",
"[1985-04-11 14:15:16.1234567, 2018-07-26 17:18:19.1234567]")
testSqlApi(
"ARRAY[TIMESTAMP '1985-04-11 14:15:16.123456', TIMESTAMP '2018-07-26 17:18:19.123456']",
"[1985-04-11 14:15:16.123456, 2018-07-26 17:18:19.123456]")
testSqlApi(
"ARRAY[TIMESTAMP '1985-04-11 14:15:16.1234', TIMESTAMP '2018-07-26 17:18:19.1234']",
"[1985-04-11 14:15:16.1234, 2018-07-26 17:18:19.1234]")
testAllApis(
Array(BigDecimal(2.0002), BigDecimal(2.0003)),
"Array(2.0002p, 2.0003p)",
"ARRAY[CAST(2.0002 AS DECIMAL(10,4)), CAST(2.0003 AS DECIMAL(10,4))]",
"[2.0002, 2.0003]")
testAllApis(
Array(Array(x = true)),
"Array(Array(true))",
"ARRAY[ARRAY[TRUE]]",
"[[true]]")
testAllApis(
Array(Array(1, 2, 3), Array(3, 2, 1)),
"Array(Array(1, 2, 3), Array(3, 2, 1))",
"ARRAY[ARRAY[1, 2, 3], ARRAY[3, 2, 1]]",
"[[1, 2, 3], [3, 2, 1]]")
// implicit type cast only works on SQL APIs.
testSqlApi("ARRAY[CAST(1 AS DOUBLE), CAST(2 AS FLOAT)]", "[1.0, 2.0]")
}
@Test
def testArrayField(): Unit = {
testAllApis(
array('f0, 'f1),
"array(f0, f1)",
"ARRAY[f0, f1]",
"[null, 42]")
testAllApis(
array('f0, 'f1),
"array(f0, f1)",
"ARRAY[f0, f1]",
"[null, 42]")
testAllApis(
'f2,
"f2",
"f2",
"[1, 2, 3]")
testAllApis(
'f3,
"f3",
"f3",
"[1984-03-12, 1984-02-10]")
testAllApis(
'f5,
"f5",
"f5",
"[[1, 2, 3], null]")
testAllApis(
'f6,
"f6",
"f6",
"[1, null, null, 4]")
testAllApis(
'f2,
"f2",
"f2",
"[1, 2, 3]")
testAllApis(
'f2.at(1),
"f2.at(1)",
"f2[1]",
"1")
testAllApis(
'f3.at(1),
"f3.at(1)",
"f3[1]",
"1984-03-12")
testAllApis(
'f3.at(2),
"f3.at(2)",
"f3[2]",
"1984-02-10")
testAllApis(
'f5.at(1).at(2),
"f5.at(1).at(2)",
"f5[1][2]",
"2")
testAllApis(
'f5.at(2).at(2),
"f5.at(2).at(2)",
"f5[2][2]",
"null")
testAllApis(
'f4.at(2).at(2),
"f4.at(2).at(2)",
"f4[2][2]",
"null")
testAllApis(
'f11.at(1),
"f11.at(1)",
"f11[1]",
"1")
}
@Test
def testArrayOperations(): Unit = {
// cardinality
testAllApis(
'f2.cardinality(),
"f2.cardinality()",
"CARDINALITY(f2)",
"3")
testAllApis(
'f4.cardinality(),
"f4.cardinality()",
"CARDINALITY(f4)",
"null")
testAllApis(
'f11.cardinality(),
"f11.cardinality()",
"CARDINALITY(f11)",
"1")
// element
testAllApis(
'f9.element(),
"f9.element()",
"ELEMENT(f9)",
"1")
testAllApis(
'f8.element(),
"f8.element()",
"ELEMENT(f8)",
"4.0")
testAllApis(
'f10.element(),
"f10.element()",
"ELEMENT(f10)",
"null")
testAllApis(
'f4.element(),
"f4.element()",
"ELEMENT(f4)",
"null")
testAllApis(
'f11.element(),
"f11.element()",
"ELEMENT(f11)",
"1")
// comparison
testAllApis(
'f2 === 'f5.at(1),
"f2 === f5.at(1)",
"f2 = f5[1]",
"true")
testAllApis(
'f6 === array(1, 2, 3),
"f6 === array(1, 2, 3)",
"f6 = ARRAY[1, 2, 3]",
"false")
testAllApis(
'f2 !== 'f5.at(1),
"f2 !== f5.at(1)",
"f2 <> f5[1]",
"false")
testAllApis(
'f2 === 'f7,
"f2 === f7",
"f2 = f7",
"false")
testAllApis(
'f2 !== 'f7,
"f2 !== f7",
"f2 <> f7",
"true")
testAllApis(
'f11 === 'f11,
"f11 === f11",
"f11 = f11",
"true")
testAllApis(
'f11 === 'f9,
"f11 === f9",
"f11 = f9",
"true")
testAllApis(
'f11 !== 'f11,
"f11 !== f11",
"f11 <> f11",
"false")
testAllApis(
'f11 !== 'f9,
"f11 !== f9",
"f11 <> f9",
"false")
}
@Test
def testArrayTypeCasting(): Unit = {
testTableApi(
'f3.cast(DataTypes.ARRAY(DataTypes.DATE)),
"f3.cast(OBJECT_ARRAY(SQL_DATE))",
"[1984-03-12, 1984-02-10]"
)
}
}
|
GJL/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/expressions/ArrayTypeTest.scala
|
Scala
|
apache-2.0
| 9,186
|
package ops.android.app
import android.app.Activity
import android.content.Context
import android.view.View
trait ActivityOps extends ContextOps {
self: Activity =>
override implicit lazy val context: Context = this
override def _getString(resId: Int, args: AnyRef*): String = getString(resId, args: _*)
implicit class ResourceView(self: Int) {
def findView[T <: View](): T = findViewById(self).asInstanceOf[T]
}
}
|
raizu/AndroidOps
|
src/main/scala/ops/android/app/ActivityOps.scala
|
Scala
|
apache-2.0
| 453
|
package org.jetbrains.plugins.scala
package testingSupport
import com.intellij.execution.process.{ProcessHandler, ProcessEvent, ProcessListener}
import com.intellij.openapi.util.Key
import com.intellij.debugger.engine.SuspendContextImpl
import com.intellij.execution.testframework.AbstractTestProxy
/**
* @author Roman.Shein
* Date: 03.03.14
*/
class TestResultListener(private val testConfigurationName: String) extends ProcessListener {
def waitForTestEnd(duration: Int): String = {
var i = 0
while (i < duration && (!terminated)) {
Thread.sleep(10)
i += 1
}
assert(terminated, "test " + testConfigurationName + " did not terminate correctly")
builder.toString()
}
private val builder = new StringBuilder
private var terminated = false
override def onTextAvailable(event: ProcessEvent, outputType: Key[_]): Unit = {
val text = event.getText
import TestResultListener._
if (text.contains(testResultPrefix) && text.contains(testResultSuffix)) {
val from = text.indexOf(testResultPrefix)
val to = text.indexOf(testResultSuffix)
if (from != -1 && to != -1) {
builder.append(text.substring(from + testResultPrefix.length, to))
}
}
}
override def processWillTerminate(event: ProcessEvent, willBeDestroyed: Boolean): Unit = {
//TODO: implement me
}
override def processTerminated(event: ProcessEvent): Unit = {
terminated = true
}
override def startNotified(event: ProcessEvent): Unit = {
//TODO: implement me
}
}
object TestResultListener {
val testResultPrefix = ">>TEST: "
val testResultSuffix = "<<"
}
|
LPTK/intellij-scala
|
test/org/jetbrains/plugins/scala/testingSupport/TestResultListener.scala
|
Scala
|
apache-2.0
| 1,641
|
package V2
object FWAEDynamicInterp extends App {
sealed abstract class FWAE
case class Num(n: Int) extends FWAE
case class Sub(lhs: FWAE, rhs: FWAE) extends FWAE
case class Add(lhs: FWAE, rhs: FWAE) extends FWAE
case class Let(boundId: Symbol, namedExpr: FWAE, boundBody: FWAE) extends FWAE
case class Id(name: Symbol) extends FWAE
case class Fun(param: Symbol, body: FWAE) extends FWAE
// No, external funName anymore.
case class App(funExpr: FWAE, arg: FWAE) extends FWAE
abstract class Value
case class VNum(n: Int) extends Value
case class VFun(param: Symbol, body: FWAE) extends Value
// We use environments again, but this time it can point to VNum oder VFun
type Env = Map[Symbol, Value]
def interp(expr: FWAE, env: Map[Symbol, Value] = Map()): Value = expr match {
case Num(n) => VNum(n)
case Sub(lhs, rhs) => interp(lhs, env) match {
case VNum(n1) => interp(rhs, env) match {
case VNum(n2) => VNum(n1 - n2)
case _ => sys.error("Could only sub numbers for rhs.")
}
case _ => sys.error("Could only sub numbers for lhs.")
}
case Add(lhs, rhs) => interp(lhs, env) match {
case VNum(n1) => interp(rhs, env) match {
case VNum(n2) => VNum(n1 + n2)
case _ => sys.error("Could only add numbers for rhs.")
}
case _ => sys.error("Could only add numbers for lhs.")
}
case Let(boundId, namedExpr, boundBody) =>
// We extend the env with the new identifier binding
val extendedEnv = env + (boundId -> interp(namedExpr, env))
interp(boundBody, extendedEnv)
case Id(name) => env(name)
case Fun(param, body) => VFun(param, body)
case App(funExpr, argExpr) => interp(funExpr, env) match {
case VFun(param, body) =>
// Dynamic scoping here
val extendedEnv = env + (param -> interp(argExpr, env))
interp(body, extendedEnv)
case e => sys.error("Can only call functions but got" + e)
}
}
// some assertions on the interpreter
import scala.language.implicitConversions
implicit def symbolToFWAE(symbol: Symbol) = Id(symbol)
implicit def intToFWAE(n: Int) = Num(n)
assert(interp(
Let('x, 3,
Let('f, Fun('y, Add('x, 'y)),
Let('x, 5, App('f, 4))))) == VNum(9)) // cf. PLAI page 62
assert(interp(
Let('x, 3,
Fun('y, Add('x, 'y)))) == VFun('y, Add('x, 'y))) // cf. PLAI page 62
assert(interp(
Let('inc, Fun('x, Add('x, 1)),
Add(App('inc, 4), App('inc, 5)))) == VNum(11))
assert(interp(
Let('x, 2, App(Let('x, 5, Fun('x, Add('x, 'x))), 'x))) == VNum(4))
}
|
Tooa/interpreters
|
src/V2/FWAEDynamicInterp.scala
|
Scala
|
apache-2.0
| 2,601
|
package com.yukihirai0505.sInstagram.utils
import com.typesafe.config.{Config, ConfigFactory}
import scala.util.Properties
/**
* author Yuki Hirai on 2017/04/17.
*/
object Configurations extends ConfigurationDetector {
val config: Config = ConfigFactory.load
lazy val clientId: String = envVarOrConfig("INSTAGRAM_CLIENT_ID", "instagram.client.id")
lazy val clientSecret: String = envVarOrConfig("INSTAGRAM_SECRET", "instagram.client.secret")
lazy val callbackUrl: String = envVarOrConfig("INSTAGRAM_CALLBACK_URL", "instagram.callbackUrl")
}
trait ConfigurationDetector {
def config: Config
protected def envVarOrConfig(envVar: String, configName: String): String =
try {
environmentVariable(envVar) getOrElse configuration(configName)
} catch {
case _: Throwable =>
val msg = s"[sInstagram] configuration missing: Environment variable $envVar or configuration $configName not found."
throw new RuntimeException(msg)
}
protected def environmentVariable(name: String): Option[String] = Properties.envOrNone(name)
protected def configuration(path: String): String = config.getString(path)
}
|
yukihirai0505/sInstagram
|
src/main/scala/com/yukihirai0505/sInstagram/utils/Configurations.scala
|
Scala
|
mit
| 1,159
|
/*
* Copyright (c) <2015-2016>, see CONTRIBUTORS
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package ch.usi.inf.l3.sana.arrooj.eval
import ch.usi.inf.l3.sana
import sana.arrooj
import sana.arrayj
import sana.ooj
import sana.brokenj
import sana.primj
import sana.tiny
import sana.calcj
import tiny.core.TransformationComponent
import tiny.dsl._
import ooj.eval.ConstantFoldingComponent
import arrayj.ast._
import tiny.ast.{UseTree, Expr}
import arrooj.ast.Implicits._
import arrooj.types.TypeUtils
import arrooj.symbols.SymbolUtils
/*
ArrayTypeUse: DONE
ArrayCreation: DONE
ArrayInitializer: DONE
ArrayAccess: DONE
*/
@component(tree, env)
trait ArrayTypeUseConstantFoldingComponent
extends ConstantFoldingComponent {
(tuse: ArrayTypeUseApi) => {
val (tpt, env1) = constantFold((tuse.tpt, env))
val res =
TreeCopiers.copyArrayTypeUse(tuse)(tpt = tpt.asInstanceOf[UseTree])
tpt.tpe.foreach { tpe =>
res.tpe = TypeUtils.mkArrayType(tpe)
}
tpt.symbol.foreach { sym =>
res.symbol = SymbolUtils.mkArraySymbol(sym)
}
(res, env1)
}
}
@component(tree, env)
trait ArrayCreationConstantFoldingComponent
extends ConstantFoldingComponent {
(creation: ArrayCreationApi) => {
val (array, env1) = constantFold((creation.array, env))
creation.size map { size =>
val (size2, env2) = constantFold((size, env1))
val res =
TreeCopiers.copyArrayCreation(creation)(
array = array.asInstanceOf[Expr],
size = Some(size2.asInstanceOf[Expr]))
(res, env2)
} getOrElse {
val res = TreeCopiers.copyArrayCreation(creation)(
array = array.asInstanceOf[Expr])
(res, env1)
}
}
}
@component(tree, env)
trait ArrayInitializerConstantFoldingComponent
extends ConstantFoldingComponent {
(init: ArrayInitializerApi) => {
val zero: List[Expr] = Nil
val (stnemele, env1) = init.elements.foldLeft((zero, env))((z, y) => {
val zelements = z._1
val zenv = z._2
val (y1, y2) = constantFold((y, zenv))
((y1.asInstanceOf[Expr]::zelements, y2))
})
val res =
TreeCopiers.copyArrayInitializer(init)(elements = stnemele.reverse)
(res, env1)
}
}
@component(tree, env)
trait ArrayAccessConstantFoldingComponent
extends ConstantFoldingComponent {
(access: ArrayAccessApi) => {
val (array, env1) = constantFold((access.array, env))
val (index, env2) = constantFold((access.index, env1))
val res =
TreeCopiers.copyArrayAccess(access)(array = array.asInstanceOf[Expr],
index = index.asInstanceOf[Expr])
(res, env1)
}
}
|
amanjpro/languages-a-la-carte
|
arrooj/src/main/scala/eval/constantfolding.scala
|
Scala
|
bsd-3-clause
| 4,094
|
package unfiltered.jetty
import java.util.EnumSet
import java.util.concurrent.atomic.AtomicInteger
import javax.servlet.{ Filter, DispatcherType }
import org.eclipse.jetty.servlet.{ FilterHolder, ServletContextHandler }
object BasicFilterHolder {
def apply(filter: Filter) = {
val holder = new FilterHolder(filter)
holder.setName(CountedName.Filter.name)
holder
}
}
case class FilterAdder(
filterHolder: FilterHolder,
pathSpec: String = "/*",
dispatches: EnumSet[DispatcherType] = EnumSet.of(DispatcherType.REQUEST)
) {
def addToContext(ctx: ServletContextHandler): Unit = {
ctx.addFilter(filterHolder, pathSpec, dispatches)
}
}
case class CountedName(prefix: String) {
private val counter = new AtomicInteger
def name = prefix + " " + counter.incrementAndGet
}
object CountedName {
val Servlet = CountedName("Servlet")
val Filter = CountedName("Filter")
}
|
hamnis/unfiltered
|
jetty/src/main/scala/filters.scala
|
Scala
|
mit
| 901
|
/**************************************************************************
Copyright 2014 Allen Institute for Artificial Intelligence Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
****************************************************************************/
package org.allenai.ari.solvers.graphmatch.models.features
import com.tinkerpop.blueprints.impls.tg.TinkerGraph
import com.tinkerpop.blueprints.Vertex
import org.allenai.ari.solvers.graphmatch.graph.path._
import org.allenai.ari.solvers.graphmatch.tools._
/** Check that the
*/
object EqualPhraseFeature extends LightweightLexicalFeatureTrait {
weight = 3.0
override val featureTag: String = "EqualPhrase"
override val requireStemming = false
override def feature(qp: PathTrait, ep: PathTrait, graph: TinkerGraph, focus: Option[String] = None): Double = {
qp match {
case p: EdgePath => return 0.0
case _ => {}
}
ep match {
case p: EdgePath => return 0.0
case _ => {}
}
val qw: Seq[String] = PathToPhraseSeq(qp)
val ew: Seq[String] = PathToPhraseSeq(ep)
wordsFeature(qw, ew)
}
def generatePhrase(nodes: Seq[Vertex]): String = {
val pairs = nodes.map(n => (n.getProperty("text"): String, n.getProperty("position"): Int)) //
val pairsSorted = pairs.sortBy[Int](_._2)
val s = pairsSorted.map(p => p._1).reduce(_ + " " + _)
s
}
override def wordsFeature(words1: Seq[String], words2: Seq[String], focus: Option[String]): Double = {
val w1 = words1.filter(w => !StopWord(w))
val w2 = words2.filter(w => !StopWord(w))
if (w1.size > 0 && w1.equals(w2)) {
1.0
} else {
0.0
}
}
}
|
tomkwiat/dependency-graph-similarity-measure
|
src/main/scala/org/allenai/ari/solvers/graphmatch/models/features/EqualPhraseFeature.scala
|
Scala
|
apache-2.0
| 2,188
|
package org.jetbrains.plugins.scala.lang.psi.stubs
import com.intellij.psi.stubs.NamedStub
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.stubs.impl.{ScExpressionOwnerStub, ScTypeElementOwnerStub}
/**
* User: Alexander Podkhalyuzin
* Date: 14.10.2008
*/
trait ScFunctionStub extends NamedStub[ScFunction]
with ScMemberOrLocal
with ScTypeElementOwnerStub[ScFunction]
with ScExpressionOwnerStub[ScFunction] {
def isImplicit: Boolean
def isDeclaration: Boolean
def annotations: Array[String]
def hasAssign: Boolean
}
|
gtache/intellij-lsp
|
intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/stubs/ScFunctionStub.scala
|
Scala
|
apache-2.0
| 606
|
package com.arcusys.learn.models.response.certificates
/**
* Created by Iliya Tryapitsin on 02.06.2014.
*/
case class CertificateWithUserStatusResponse(id: Int,
title: String,
shortDescription: String,
description: String,
logo: String,
isPublished: Boolean,
courseCount: Int,
statementCount: Int,
activityCount: Int,
packageCount: Int,
userCount: Int,
status: String) extends CertificateResponseContract
|
ViLPy/Valamis
|
learn-portlet/src/main/scala/com/arcusys/learn/models/response/certificates/CertificateWithUserStatusResponse.scala
|
Scala
|
lgpl-3.0
| 430
|
package com.outr.arango
import com.outr.arango.api.model.{PostAPICursor, PostAPICursorOpts}
import com.outr.arango.api.{APICursor, APICursorCursorIdentifier}
import io.circe.Decoder.Result
import io.circe.{Decoder, HCursor, Json}
import io.youi.client.HttpClient
import profig.JsonUtil
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.language.experimental.macros
case class QueryBuilder[R](client: HttpClient,
query: Query,
conversion: Json => R,
batchSize: Int = 100,
cache: Boolean = true,
count: Boolean = false,
memoryLimit: Option[Long] = None,
options: Option[PostAPICursorOpts] = None,
ttl: Option[Long] = None,
logQuery: Option[Json => Unit] = None,
logResponse: Option[Json => Unit] = None) {
def as[D](conversion: Json => D): QueryBuilder[D] = copy[D](conversion = conversion)
def as[D](serialization: Serialization[D]): QueryBuilder[D] = as[D](json => serialization.fromJson(json))
def as[D]: QueryBuilder[D] = macro GraphMacros.queryBuilderAs[D]
def batchSize(batchSize: Int): QueryBuilder[R] = copy(batchSize = batchSize)
def withCache: QueryBuilder[R] = copy(cache = true)
def withoutCache: QueryBuilder[R] = copy(cache = false)
def includeCount: QueryBuilder[R] = copy(count = true)
def excludeCount: QueryBuilder[R] = copy(count = false)
def withMemoryLimit(limit: Long): QueryBuilder[R] = copy(memoryLimit = Some(limit))
def withoutMemoryLimit: QueryBuilder[R] = copy(memoryLimit = None)
def cursorTimeout(timeInSeconds: Int = 30): QueryBuilder[R] = copy(ttl = Some(timeInSeconds))
def failOnWarning(b: Boolean): QueryBuilder[R] = opt(_.copy(failOnWarning = Some(b)))
def includeFullCount: QueryBuilder[R] = opt(_.copy(fullCount = Some(true)))
def excludeFullCount: QueryBuilder[R] = opt(_.copy(fullCount = Some(false)))
def maxWarningCount(n: Int): QueryBuilder[R] = opt(_.copy(maxWarningCount = Some(n)))
def maxRuntime(max: FiniteDuration): QueryBuilder[R] = opt(_.copy(maxRuntime = Some(max.toMillis.toDouble / 1000.0)))
def satelliteSyncWait(b: Boolean): QueryBuilder[R] = opt(_.copy(satelliteSyncWait = Some(b)))
def stream(b: Boolean): QueryBuilder[R] = opt(_.copy(stream = Some(b)))
def logQuery(f: Json => Unit): QueryBuilder[R] = copy(logQuery = Some(f))
def logResponse(f: Json => Unit): QueryBuilder[R] = copy(logResponse = Some(f))
private def opt(f: PostAPICursorOpts => PostAPICursorOpts): QueryBuilder[R] = {
val opts = options.getOrElse(PostAPICursorOpts())
copy(options = Some(f(opts)))
}
private implicit lazy val dDecoder: Decoder[R] = new Decoder[R] {
override def apply(c: HCursor): Result[R] = Right(conversion(c.value))
}
private lazy val qrDecoder: Decoder[QueryResponse[R]] = JsonUtil.decoder[QueryResponse[R]]
def cursor(implicit ec: ExecutionContext): Future[QueryResponse[R]] = {
val bindVars = query.bindVars
val apiCursor = PostAPICursor(
query = query.value,
bindVars = bindVars,
batchSize = Some(batchSize.toLong),
cache = Some(cache),
count = Some(count),
memoryLimit = memoryLimit,
options = options,
ttl = ttl
)
logQuery.foreach(f => f(JsonUtil.toJson(apiCursor)))
APICursor
.post(
client = client,
body = apiCursor
)
.map { response =>
logResponse.foreach(f => f(response))
response.as[QueryResponse[R]](qrDecoder)
}
.map {
case Left(df) => throw df
case Right(r) => {
if (options.flatMap(_.fullCount).getOrElse(false)) {
r.copy(count = r.extra.stats.fullCount)
} else {
r
}
}
}
}
def update(implicit ec: ExecutionContext): Future[Unit] = cursor(ec).map(_ => ())
def results(implicit ec: ExecutionContext): Future[List[R]] = cursor(ec).map(_.result)
def get(id: String)(implicit ec: ExecutionContext): Future[QueryResponse[R]] = APICursorCursorIdentifier
.put(client, id)
.map(_.as[QueryResponse[R]](qrDecoder))
.map {
case Left(df) => throw df
case Right(r) => r
}
/**
* Convenience method that calls `cursor` grabbing the first result returning None if there are no results.
*/
def first(implicit ec: ExecutionContext): Future[Option[R]] = {
batchSize(1).cursor.map(_.result.headOption)
}
def one(implicit ec: ExecutionContext): Future[R] = {
batchSize(1).includeCount.cursor.map { response =>
if (response.count != 1) {
throw new RuntimeException(s"Expected exactly one result for $query, but received ${response.count}")
} else {
response.result.head
}
}
}
def paged(implicit ec: ExecutionContext): Future[Pagination[R]] = {
includeCount.cursor.map(Pagination(this, _))
}
/**
* Utilizes pagination to process through all pages of data
*
* @param f the function to handle processing of each page of data
* @param ec the ExecutionContext
* @return List[Return]
*/
def process[Return](f: QueryResponse[R] => Future[Return])(implicit ec: ExecutionContext): Future[List[Return]] = {
paged(ec).flatMap(_.process(f))
}
/**
* Simplification of process to iteratively handle one result at a time through all pages of data
*
* @param f the function to handle processing of each data element
* @param ec the ExecutionContext
* @return Future[Unit]
*/
def iterate(f: R => Future[Unit])(implicit ec: ExecutionContext): Future[Unit] = process { response =>
def recurse(list: List[R]): Future[Unit] = if (list.isEmpty) {
Future.successful(())
} else {
f(list.head).flatMap(_ => recurse(list.tail))
}
recurse(response.result)
}.map(_ => ())
}
|
outr/arangodb-scala
|
driver/src/main/scala/com/outr/arango/QueryBuilder.scala
|
Scala
|
mit
| 6,011
|
object Test {
trait Leaf[T] {
def collect[U](f: PartialFunction[Leaf[_], U]): List[U]
def leaves: List[Leaf[T]] = collect { case l: Leaf[T] => l }
}
}
|
densh/dotty
|
tests/pos/virtpatmat_exist_uncurry.scala
|
Scala
|
bsd-3-clause
| 163
|
package ingraph.ire.messages
import java.io.{IOException, ObjectInputStream, ObjectOutputStream}
import akka.actor.ActorRef
import ingraph.ire.datatypes.Tuple
import ingraph.ire.util.AtomicUniqueCounter
import scala.collection.mutable
import scala.concurrent.{Future, Promise}
class Terminator private(terminatorID: Int, val inputs: Iterable[ReteMessage => Unit], production: ActorRef) extends ReteMessage with Serializable {
var lastMessageID = -1
def send(): Future[Iterable[Tuple]] = {
val messageID = Terminator.idCounter.getNext
lastMessageID = messageID
val promise = Promise[Iterable[Tuple]]
production ! ExpectTerminator(terminatorID, messageID, promise)
val future = promise.future
inputs.foreach(input => {
input(Pause(messageID))
input(TerminatorMessage(terminatorID, messageID))
})
future
}
def resend(): Future[Iterable[Tuple]] = {
val promise = Promise[Iterable[Tuple]]
production ! ExpectTerminator(terminatorID, lastMessageID, promise)
val future = promise.future
inputs.foreach(input => {
input(TerminatorMessage(terminatorID, lastMessageID))
})
future
}
@throws(classOf[IOException])
private def writeObject(out: ObjectOutputStream): Unit = {}
@throws(classOf[IOException])
private def readObject(in: ObjectInputStream): Unit = {}
}
object Terminator {
val idCounter = new AtomicUniqueCounter
def apply(inputs: Iterable[ReteMessage => Unit], productionNode: ActorRef): Terminator = {
val id = idCounter.getNext
productionNode ! ExpectMoreTerminators(id, inputs)
new Terminator(id, inputs, productionNode)
}
}
trait TerminatorHandler {
val expectedTerminatorCount: Int
val terminatorCount = new mutable.HashMap[Int, Int]
def forward(terminator: TerminatorMessage)
def handleTerminator(terminator: TerminatorMessage): Unit = {
val count = terminatorCount.getOrElse(terminator.messageID, 0) + 1
if (count >= expectedTerminatorCount) {
forward(terminator)
terminatorCount -= terminator.messageID
}
terminatorCount(terminator.messageID) = count
}
}
|
FTSRG/ingraph
|
ire/src/main/scala/ingraph/ire/messages/Terminator.scala
|
Scala
|
epl-1.0
| 2,124
|
package slinky.web
import slinky.core.SyntheticEvent
import scala.scalajs.js
import org.scalajs.dom.{TouchEvent, TouchList}
// https://reactjs.org/docs/events.html?#touch-events
@js.native
trait SyntheticTouchEvent[+TargetType] extends SyntheticEvent[TargetType, TouchEvent] {
val altKey: Boolean = js.native
val changedTouches: TouchList = js.native
val ctrlKey: Boolean = js.native
def getModifierState(key: String): Boolean = js.native
val metaKey: Boolean = js.native
val shiftKey: Boolean = js.native
val targetTouches: TouchList = js.native
val touches: TouchList = js.native
}
|
shadaj/slinky
|
web/src/main/scala/slinky/web/SyntheticTouchEvent.scala
|
Scala
|
mit
| 738
|
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
import java.nio.file.attribute.BasicFileAttributes
import java.nio.file.{FileVisitResult, Files, Path, SimpleFileVisitor, _}
import scala.jdk.CollectionConverters._
import scala.reflect.internal.pickling.ByteCodecs
import scala.reflect.io.RootPath
import scala.tools.asm.tree.ClassNode
import scala.tools.asm.{ClassReader, ClassWriter, Opcodes}
object PickleExtractor {
def main(args: Array[String]): Unit = {
args.toList match {
case input :: output :: Nil =>
process(Paths.get(input), Paths.get(output))
case _ =>
}
}
def process(input: Path, output: Path): Unit = {
val inputPath = RootPath(input, writable = false)
val outputPath = RootPath(output, writable = true)
try {
val root = inputPath.root
Files.createDirectories(outputPath.root)
val visitor = new SimpleFileVisitor[Path] {
override def preVisitDirectory(dir: Path, attrs: BasicFileAttributes): FileVisitResult = {
if (dir != root) {
val outputDir = outputPath.root.resolve(root.relativize(dir).toString)
Files.createDirectories(outputDir)
}
FileVisitResult.CONTINUE
}
override def visitFile(file: Path, attrs: BasicFileAttributes): FileVisitResult = {
if (file.getFileName.toString.endsWith(".class")) {
try {
stripClassFile(Files.readAllBytes(file)) match {
case Class(out) =>
Files.write(outputPath.root.resolve(root.relativize(file).toString), out)
case Pickle(out) =>
Files.write(outputPath.root.resolve(root.relativize(file).toString.replaceAll(".class$", ".sig")), out)
case Skip =>
}
} catch {
case ex: RuntimeException =>
throw new RuntimeException("While parsing: " + file + " in " + inputPath
, ex)
}
}
FileVisitResult.CONTINUE
}
}
Files.walkFileTree(root, visitor)
} finally {
inputPath.close()
outputPath.close()
}
}
def stripClassFile(classfile: Array[Byte]): OutputFile = {
val input = new ClassNode()
new ClassReader(classfile).accept(input, ClassReader.SKIP_DEBUG | ClassReader.SKIP_FRAMES | ClassReader.SKIP_CODE)
var output = new ClassNode()
output.name = input.name
output.access = input.access
output.version = input.version
var foundScalaSig = false
def isScalaAnnotation(desc: String) = (desc == "Lscala/reflect/ScalaSignature;" || desc == "Lscala/reflect/ScalaLongSignature;") && {
foundScalaSig = true
true
}
var pickleData: Array[Byte] = null
if (input.visibleAnnotations != null) {
input.visibleAnnotations.asScala.foreach { node =>
if (node.desc == "Lscala/reflect/ScalaSignature;") {
val Array("bytes", data: String) = node.values.toArray()
val bytes = data.getBytes(java.nio.charset.StandardCharsets.UTF_8)
val len = ByteCodecs.decode(bytes)
pickleData = bytes.take(len)
} else if (node.desc == "Lscala/reflect/ScalaLongSignature;") {
val Array("bytes", data: java.util.Collection[String @unchecked]) = node.values.toArray()
val encoded = data.asScala.toArray flatMap (_.getBytes(java.nio.charset.StandardCharsets.UTF_8))
val len = ByteCodecs.decode(encoded)
pickleData = encoded.take(len)
}
}
output.visibleAnnotations = input.visibleAnnotations.asScala.filter(node => isScalaAnnotation(node.desc) && {
true
}).asJava
}
var foundScalaAttr = false
if (input.attrs != null) {
output.attrs = input.attrs.asScala.filter(attr => (attr.`type` == "Scala" || attr.`type` == "ScalaSig") && {
foundScalaAttr = true;
true
}).asJava
}
val writer = new ClassWriter(Opcodes.ASM7)
val isScalaRaw = foundScalaAttr && !foundScalaSig
if (isScalaRaw) Skip
else {
if (pickleData == null) {
output = input
output.accept(writer)
Class(writer.toByteArray)
} else {
output.accept(writer)
Pickle(pickleData)
}
}
}
sealed abstract class OutputFile
case object Skip extends OutputFile
case class Class(content: Array[Byte]) extends OutputFile
case class Pickle(content: Array[Byte]) extends OutputFile
}
|
martijnhoekstra/scala
|
src/compiler/scala/tools/nsc/PickleExtractor.scala
|
Scala
|
apache-2.0
| 4,737
|
package com.github.mitallast.nsq
import java.io.Closeable
import com.typesafe.config.{Config, ConfigFactory}
import io.netty.util.CharsetUtil
import scala.concurrent.Future
import scala.concurrent.duration._
trait OK
trait NSQMessage {
def timestamp: Long
def attempts: Int
def messageId: String
def data: Array[Byte]
def fin(): Unit
def req(timeout: Duration = 0.milliseconds): Unit
def touch(): Unit
def touch(duration: Duration): Unit
}
trait NSQClient extends Closeable {
def producer(): NSQProducer
def consumer(topic: String, channel: String = "default")(consumer: NSQMessage ⇒ Unit): NSQConsumer
}
object NSQClient {
def apply(): NSQClient = apply(ConfigFactory.load())
def apply(config: Config): NSQClient = apply(NSQLookup(config), config)
def apply(lookup: NSQLookup): NSQClient = apply(lookup, ConfigFactory.load())
def apply(lookup: NSQLookup, config: Config): NSQClient = new NSQNettyClient(lookup, config.withFallback(ConfigFactory.defaultReference()))
}
trait NSQProducer extends Closeable {
def pub(topic: String, data: Array[Byte]): Future[OK]
def pubStr(topic: String, data: String): Future[OK] = {
pub(topic, data.getBytes(CharsetUtil.UTF_8))
}
def mpub(topic: String, data: Seq[Array[Byte]]): Future[OK]
def mpubStr(topic: String, data: Seq[String]): Future[OK] = {
mpub(topic, data.map(_.getBytes(CharsetUtil.UTF_8)))
}
}
trait NSQConsumer extends Closeable {
}
|
mitallast/scala-nsq
|
src/main/scala/com/github/mitallast/nsq/NSQClient.scala
|
Scala
|
mit
| 1,459
|
package com.rocketfuel.sdbc.scalaz
import com.rocketfuel.sdbc.base.jdbc._
import scalaz.concurrent.Task
import scalaz.stream._
import me.jeffshaw.scalaz.stream.IteratorConstructors._
object JdbcProcess {
object jdbc {
private def getConnection(pool: Pool) = Task.delay(pool.getConnection())
private def closeConnection(connection: Connection): Task[Unit] = Task.delay(connection.close())
private def withConnection[Key, T](task: Key => Connection => Task[T])(implicit pool: Pool): Channel[Task, Key, T] = {
channel.lift[Task, Key, T] { params =>
for {
connection <- getConnection(pool)
result <- task(params)(connection).onFinish(_ => closeConnection(connection))
} yield result
}
}
/**
* Create a Process with a single Unit value.
*
* The connection is not closed when the Process completes.
* @param execute
* @param connection
* @return
*/
def execute(
execute: Execute
)(implicit connection: Connection
): Process[Task, Unit] = {
Process.eval(Task.delay(execute.execute()))
}
/**
* Create a Process with a single Seq[Long] value, indicating the number of
* updated rows per query in the batch.
*
* The connection is not closed when the Process completes.
* @param batch
* @param connection
* @return
*/
def batch(
batch: Batch
)(implicit connection: Connection
): Process[Task, Seq[Long]] = {
Process.eval(Task.delay(batch.seq()))
}
/**
* Create a Process of the query results.
*
* The connection is not closed when the Process completes.
* @param select
* @param connection
* @tparam T
* @return
*/
def select[T](
select: Select[T]
)(implicit connection: Connection
): Process[Task, T] = {
Process.iterator(Task.delay(select.iterator()))
}
/**
* Create a Process with one element indicating the number of updated rows.
*
* The connection is not closed when the Process completes.
* @param update
* @param connection
* @return
*/
def update(
update: Update
)(implicit connection: Connection
): Process[Task, Long] = {
Process.eval(Task.delay(update.update()))
}
object params {
/**
* From a stream of collections of parameter lists, create a Process of single Seq[Long] values,
* indicating the number of updated rows per query in the batch.
*
* A connection is taken from the pool for each execution.
* @param batch
* @return
*/
def batch(batch: Batch)(implicit pool: Pool): Channel[Task, Traversable[ParameterList], Seq[Long]] = {
withConnection[Traversable[ParameterList], Seq[Long]] { batches => implicit connection =>
Task.delay(batches.foldLeft(batch){case (b, params) => b.addBatch(params: _*)}.seq())
}
}
/**
* From a stream of parameter lists, independently add each list to the
* query, execute it, and ignore the results.
*
* A connection is taken from the pool for each execution.
* @param execute
* @return
*/
def execute(execute: Execute)(implicit pool: Pool): Sink[Task, ParameterList] = {
withConnection[ParameterList, Unit] { params => implicit connection =>
Task.delay(execute.on(params: _*).execute())
}
}
/**
* From a stream of parameter lists, independently add each list to the
* query, execute it, and create a stream of the results.
*
* Use merge.mergeN to run the queries in parallel, or
* .flatMap(identity) to concatenate them.
*
* A connection is taken from the pool for each execution.
* @param select
* @param pool
* @tparam T
* @return
*/
def select[T](
select: Select[T]
)(implicit pool: Pool
): Channel[Task, ParameterList, Process[Task, T]] = {
channel.lift[Task, ParameterList, Process[Task, T]] { params =>
Task.delay {
Process.await(getConnection(pool)) {implicit connection =>
Process.iterator(Task.delay(select.on(params: _*).iterator())).onComplete(Process.eval_(closeConnection(connection)))
}
}
}
}
/**
* From a stream of parameter lists, independently add each list to the
* query, execute it, and obtain a count of the number of rows that were
* updated.
*
* A connection is taken from the pool for each execution.
* @param update
* @return
*/
def update(update: Update)(implicit pool: Pool): Channel[Task, ParameterList, Long] = {
withConnection[ParameterList, Long] { params => implicit connection =>
Task.delay(update.on(params: _*).update())
}
}
}
object keys {
/**
* Use an instance of Batchable to create a stream of Seq[Long], which each
* indicates the number of rows updated by each query in the batch.
*
* A connection is taken from the pool for each execution.
* @param pool
* @param batchable
* @tparam Key
* @return
*/
def batch[Key](
pool: Pool
)(implicit batchable: Batchable[Key]
): Channel[Task, Key, Seq[Long]] = {
withConnection[Key, Seq[Long]] { key => implicit connection =>
Task.delay(batchable.batch(key).seq())
}(pool)
}
/**
* Use an instance of Executable to create a stream of (),
* which each indicates that a query was executed.
*
* A connection is taken from the pool for each execution.
* @param pool
* @param executable
* @tparam Key
* @return
*/
def execute[Key](
pool: Pool
)(implicit executable: Executable[Key]
): Sink[Task, Key] = {
withConnection[Key, Unit] { key => implicit connection =>
Task.delay(Task.delay(executable.execute(key).execute()))
}(pool)
}
/**
* Use an instance of Selectable to create a Process of a Process of
* results.
*
* Use merge.mergeN on the result to run the queries in parallel, or .flatMap(identity)
* to concatenate them.
*
* A connection is taken from the pool for each execution.
* @param pool
* @param selectable
* @tparam Key
* @tparam Value
* @return
*/
def select[Key, Value](
pool: Pool
)(implicit selectable: Selectable[Key, Value]
): Channel[Task, Key, Process[Task, Value]] = {
channel.lift[Task, Key, Process[Task, Value]] { key =>
Task.delay {
Process.await(getConnection(pool)) {implicit connection =>
Process.iterator(Task.delay(selectable.select(key).iterator())).onComplete(Process.eval_(closeConnection(connection)))
}
}
}
}
/**
* Use an instance of Updatable to create a Process indicating
* how many rows were updated for each execution.
*
* A connection is taken from the pool for each execution.
* @param pool
* @param updatable
* @tparam Key
* @return
*/
def update[Key](
pool: Pool
)(implicit updatable: Updatable[Key]
): Channel[Task, Key, Long] = {
withConnection[Key, Long] { key => implicit connection =>
Task.delay(updatable.update(key).update())
}(pool)
}
}
}
}
|
wdacom/sdbc
|
jdbc.scalaz/src/main/scala/com/rocketfuel/sdbc/scalaz/JdbcProcess.scala
|
Scala
|
bsd-3-clause
| 7,654
|
package org.nedervold.grammareditor.models.adapters
import org.nedervold.grammareditor.models.VariableModel
import org.nedervold.grammareditor.models.onEDTWait
import javax.swing.event.DocumentEvent
import javax.swing.event.DocumentListener
import javax.swing.text.DefaultStyledDocument
import javax.swing.text.Document
/**
* Wraps a Document to make a [[VariableModel]][String]
*
* @constructor
* @param document the Document to wrap
*/
class DocumentAdapter(val document: Document) extends VariableModel[String] {
require(document != null)
def this() = this(new DefaultStyledDocument())
override def value = document.getText(0, document.getLength())
override def value_=(newValue: String) = {
require(newValue != null)
val oldValue = value
/*
* TODO 2014-05-29 Is this test too expensive? It extracts the entire string
* from the Document.
*/
if (!oldValue.equals(newValue)) {
onEDTWait {
listening = false
document.remove(0, document.getLength())
document.insertString(0, newValue, null)
listening = true
}
publish(event)
}
} ensuring {
newValue.equals(value)
}
private var listening = true
private object documentListener extends DocumentListener {
def changedUpdate(e: DocumentEvent) = if (listening) publish(event)
def insertUpdate(e: DocumentEvent) = if (listening) publish(event)
def removeUpdate(e: DocumentEvent) = if (listening) publish(event)
}
document.addDocumentListener(documentListener);
}
|
nedervold/GrammarEditor
|
src/main/scala/org/nedervold/grammareditor/models/adapters/DocumentAdapter.scala
|
Scala
|
apache-2.0
| 1,660
|
import language.higherKinds
object Wrap {
implicit class X[X](val a: X)
X[Int](0)
}
class Wrap {
implicit class Y[Y](val a: Y)
Y[Int](0)
implicit class Z[Z[_]](val a: Z[Wrap.this.Z[Z]])
Z[List](List(new Z[List](null)))
}
case class X[X](val a: X)
|
AlexSikia/dotty
|
tests/untried/pos/t7033.scala
|
Scala
|
bsd-3-clause
| 262
|
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
/* Super hacky overriding of the MainGenericRunner used by partest */
import org.scalajs.ir
import org.scalajs.logging._
import org.scalajs.linker._
import org.scalajs.linker.interface._
import org.scalajs.jsenv._
import org.scalajs.jsenv.nodejs.NodeJSEnv
import com.google.common.jimfs.Jimfs
import scala.tools.partest.scalajs.ScalaJSPartestOptions._
import java.net.URL
import java.nio.file._
import scala.io.Source
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import Properties.{ versionString, copyrightString }
import GenericRunnerCommand._
class MainGenericRunner {
def errorFn(ex: Throwable): Boolean = {
ex.printStackTrace()
false
}
def errorFn(str: String): Boolean = {
scala.Console.err println str
false
}
val optMode = OptMode.fromId(System.getProperty("scalajs.partest.optMode"))
def readSemantics() = {
import org.scalajs.linker.interface.CheckedBehavior.Compliant
val opt = Option(System.getProperty("scalajs.partest.compliantSems"))
val compliantSems =
opt.fold[List[String]](Nil)(_.split(',').toList.filter(_.nonEmpty))
compliantSems.foldLeft(Semantics.Defaults) { (prev, compliantSem) =>
compliantSem match {
case "asInstanceOfs" => prev.withAsInstanceOfs(Compliant)
case "arrayIndexOutOfBounds" => prev.withArrayIndexOutOfBounds(Compliant)
case "moduleInit" => prev.withModuleInit(Compliant)
}
}
}
def process(args: Array[String]): Boolean = {
val command = new GenericRunnerCommand(args.toList, (x: String) => errorFn(x))
if (!command.ok) return errorFn("\\n" + command.shortUsageMsg)
else if (command.settings.version) return errorFn("Scala code runner %s -- %s".format(versionString, copyrightString))
else if (command.shouldStopWithInfo) return errorFn("shouldStopWithInfo")
if (command.howToRun != AsObject)
return errorFn("Scala.js runner can only run an object")
val logger = new ScalaConsoleLogger(Level.Warn)
val semantics0 = readSemantics()
val semantics = if (optMode == FullOpt) semantics0.optimized else semantics0
val moduleInitializers = Seq(ModuleInitializer.mainMethodWithArgs(
command.thingToRun, "main", command.arguments))
val linkerConfig = StandardConfig()
.withCheckIR(true)
.withSemantics(semantics)
.withSourceMap(false)
.withOptimizer(optMode != NoOpt)
.withClosureCompiler(optMode == FullOpt)
.withBatchMode(true)
val linker = StandardImpl.linker(linkerConfig)
val sjsCode = {
val dir = Jimfs.newFileSystem().getPath("tmp")
Files.createDirectory(dir)
val cache = StandardImpl.irFileCache().newCache
val result = PathIRContainer
.fromClasspath(command.settings.classpathURLs.map(urlToPath _))
.map(_._1)
.flatMap(cache.cached _)
.flatMap(linker.link(_, moduleInitializers, PathOutputDirectory(dir), logger))
val report = Await.result(result, Duration.Inf)
if (report.publicModules.size != 1)
throw new AssertionError(s"got other than 1 module: $report")
dir.resolve(report.publicModules.head.jsFileName)
}
val input = Input.Script(sjsCode) :: Nil
val config = RunConfig().withLogger(logger)
val run = new NodeJSEnv().start(input, config)
try {
Await.result(run.future, Duration.Inf)
} finally {
run.close()
}
true
}
private def urlToPath(url: java.net.URL) = {
try {
Paths.get(url.toURI())
} catch {
case e: java.net.URISyntaxException => Paths.get(url.getPath())
}
}
}
object MainGenericRunner extends MainGenericRunner {
def main(args: Array[String]): Unit = {
if (!process(args))
System.exit(1)
}
}
|
scala-js/scala-js
|
partest/src/main/scala/scala/tools/nsc/MainGenericRunner.scala
|
Scala
|
apache-2.0
| 4,142
|
package io.escalante.quickstarts.scaladin.editor
import scala.reflect.BeanProperty
import scala.util.Random
import vaadin.scala._
/**
* Example app with a list of beans, a table to show the beans and a form to edit them.
* Since Vaadin is datasource-agnostic, anything that returns a collection of beans can be used.
*/
class Editor extends Application("Escalante Scaladin Quickstart: registration editor") {
//replace with any kind of datasource that provides a collection of beans
var registrations: List[Registration] = List.empty
def saveRegistration(registration: Registration): Unit = registrations = registration :: registrations
override val main: ComponentContainer = new VerticalLayout {
sizeFull()
styleName = Reindeer.LayoutWhite
//button and a table for showing the bean list
val tableLayout = new VerticalLayout {
size(50 pct, 50 pct)
spacing = true
val table = new Table {
sizeFull()
styleNames += Reindeer.TableStrong
container = new BeanItemContainer(registrations)
visibleColumns = Seq("username", "realName")
}
val addButton = Button("Register", showForm)
components += (table, addButton)
}
//form for showing the fields of a bean
lazy val form = new Form {
size(50 pct, 50 pct)
caption = "Registration"
writeThrough = false
formFieldFactory = registrationFormFieldFactory
footer = new HorizontalLayout {
components += Button("Save", showList)
}
}
components += tableLayout
alignment(tableLayout -> Alignment.MiddleCenter)
def showForm(): Unit = {
form.item = new BeanItem(Registration())
form.visibleItemProperties = Seq("realName", "username", "password")
form.p.setValidationVisible(false)
//here we add a field to the form for something that doesn't exist in the bean itself
form.addField(Option("confirmation"), form.formFieldFactory.flatMap(_.createField(FormFieldIngredients(form.item.get, "confirmation", form))))
replaceComponent(tableLayout, form)
alignment(form -> Alignment.MiddleCenter)
}
def showList(): Unit = {
if (form.commit.isValid) { //form handles error
val bean = form.item.get.asInstanceOf[BeanItem[Registration]].bean
saveRegistration(bean)
tableLayout.table.container = new BeanItemContainer(registrations)
tableLayout.table.visibleColumns = Seq("username", "realName")
replaceComponent(form, tableLayout)
alignment(tableLayout -> Alignment.MiddleCenter)
mainWindow.showNotification("User %s registered".format(bean.username))
}
}
}
//customize the password and password confirmation fields
val registrationFormFieldFactory = FormFieldFactory(ing => {
var field: Option[Field] = ing match {
case FormFieldIngredients(_, "password", _) =>
Some(new PasswordField {
caption = DefaultFieldFactory.createCaptionByPropertyId("password")
})
case FormFieldIngredients(_, "confirmation", form: Form) =>
Some(new PasswordField {
caption = "Confirm password"
validators += Validator(value =>
if (value == form.field("password").get.value) Valid
else Invalid(List("Passwords must match")))
})
case otherIngredient => DefaultFieldFactory.createField(otherIngredient)
}
field.foreach(_.required = true)
field.foreach(f => f.requiredError = "%s is required".format(f.caption.get))
field
})
}
//simple case class bean
//@BeanProperty is required for BeanItemContainer since it uses Java reflection to scan the fields
case class Registration(
@BeanProperty var username: String = "username" + Random.nextInt,
@BeanProperty var password: String = "",
@BeanProperty var realName: String = "Joe Tester")
|
escalante/escalante-quickstart
|
editor-scaladin/src/main/scala/io/escalante/quickstarts/scaladin/editor/EditorApp.scala
|
Scala
|
apache-2.0
| 3,873
|
package org.jetbrains.plugins.scala
package lang
import com.intellij.psi.codeStyle.arrangement.std.{ArrangementSettingsToken, StdArrangementSettingsToken, StdArrangementTokenType, StdArrangementTokens}
import com.intellij.psi.codeStyle.arrangement.std.StdArrangementTokens.EntryType._
import com.intellij.psi.codeStyle.arrangement.std.StdArrangementTokens.Modifier._
import scala.collection.immutable
/**
* @author Roman Shein
* Date: 09.07.13
*/
package object rearranger {
val SCALA_GETTERS_AND_SETTERS_ID = "SCALA_KEEP_SCALA_GETTERS_SETTERS_TOGETHER"
val SCALA_GETTERS_AND_SETTERS_UI = "Keep_scala-style_getters_and_setters_together"
val SCALA_GETTERS_AND_SETTERS: ArrangementSettingsToken =
StdArrangementSettingsToken.token(SCALA_GETTERS_AND_SETTERS_ID, SCALA_GETTERS_AND_SETTERS_UI, StdArrangementTokenType.GROUPING)
val JAVA_GETTERS_AND_SETTERS_ID = "SCALA_KEEP_JAVA_GETTERS_SETTERS_TOGETHER"
val JAVA_GETTERS_AND_SETTERS_UI = "Keep_java-style_getters_and_setters_together"
val JAVA_GETTERS_AND_SETTERS: ArrangementSettingsToken =
StdArrangementSettingsToken.token(JAVA_GETTERS_AND_SETTERS_ID, JAVA_GETTERS_AND_SETTERS_UI, StdArrangementTokenType.GROUPING)
val SPLIT_INTO_UNARRANGEABLE_BLOCKS_BY_EXPRESSIONS_ID = "SCALA_SPLIT_BY_EXPRESSIONS"
val SPLIT_INTO_UNARRANGEABLE_BLOCKS_BY_EXPRESSIONS_UI = "Split into unarrangeable blocks by expressions"
val SPLIT_INTO_UNARRANGEABLE_BLOCKS_BY_EXPRESSIONS: ArrangementSettingsToken =
StdArrangementSettingsToken.token(SPLIT_INTO_UNARRANGEABLE_BLOCKS_BY_EXPRESSIONS_ID, SPLIT_INTO_UNARRANGEABLE_BLOCKS_BY_EXPRESSIONS_UI, StdArrangementTokenType.GROUPING) //TODO: use name from bundle
val scalaGroupingRules = immutable.HashMap(SCALA_GETTERS_AND_SETTERS.getId -> SCALA_GETTERS_AND_SETTERS,
JAVA_GETTERS_AND_SETTERS.getId -> JAVA_GETTERS_AND_SETTERS,
SPLIT_INTO_UNARRANGEABLE_BLOCKS_BY_EXPRESSIONS.getId -> SPLIT_INTO_UNARRANGEABLE_BLOCKS_BY_EXPRESSIONS)
//access modifiers
val scalaAccessModifiersByName = immutable.ListMap("public" -> PUBLIC, "protected" -> PROTECTED, "private" -> PRIVATE)
val scalaAccessModifiersById = immutable.HashMap(PUBLIC.getId -> PUBLIC, PROTECTED.getId -> PROTECTED, PRIVATE.getId -> PRIVATE)
//other modifiers
val SEALED_ID = "SCALA_SEALED"
val SEALED_UI = "sealed"
val SEALED: ArrangementSettingsToken =
StdArrangementSettingsToken.token(SEALED_ID, SEALED_UI, StdArrangementTokenType.MODIFIER)
val IMPLICIT_ID = "SCALA_IMPLICIT"
val IMPLICIT_UI = "implicit"
val IMPLICIT: ArrangementSettingsToken =
StdArrangementSettingsToken.token(IMPLICIT_ID, IMPLICIT_UI, StdArrangementTokenType.MODIFIER)
val CASE_ID = "SCALA_CASE"
val CASE_UI = "case"
val CASE: ArrangementSettingsToken =
StdArrangementSettingsToken.token(CASE_ID, CASE_UI, StdArrangementTokenType.MODIFIER)
val OVERRIDE_ID = "SCALA_OVERRIDE"
val OVERRIDE_UI = "override"
val OVERRIDE: ArrangementSettingsToken =
StdArrangementSettingsToken.token(OVERRIDE_ID, OVERRIDE_UI, StdArrangementTokenType.MODIFIER)
val LAZY_ID = "SCALA_LAZY"
val LAZY_UI = "lazy"
val LAZY: ArrangementSettingsToken =
StdArrangementSettingsToken.token(LAZY_ID, LAZY_UI, StdArrangementTokenType.MODIFIER)
val scalaOtherModifiersByName = immutable.ListMap(SEALED_UI -> SEALED, IMPLICIT_UI -> IMPLICIT,
"abstract" -> ABSTRACT, CASE_UI -> CASE, "final" -> FINAL, OVERRIDE_UI -> OVERRIDE, LAZY_UI -> LAZY)
val scalaOtherModifiersById = immutable.HashMap(SEALED.getId -> SEALED, IMPLICIT.getId -> IMPLICIT,
ABSTRACT.getId -> ABSTRACT, FINAL.getId -> FINAL, OVERRIDE.getId -> OVERRIDE, LAZY.getId -> LAZY)
//types
val TYPE_ID = "SCALA_TYPE"
val TYPE_UI = "type"
val TYPE: ArrangementSettingsToken =
StdArrangementSettingsToken.token(TYPE_ID, TYPE_UI, StdArrangementTokenType.ENTRY_TYPE)
val FUNCTION_ID = "SCALA_FUNCTION"
val FUNCTION_UI = "function"
val FUNCTION: ArrangementSettingsToken =
StdArrangementSettingsToken.token(FUNCTION_ID, FUNCTION_UI, StdArrangementTokenType.ENTRY_TYPE)
val VAL_ID = "SCALA_VAL"
val VAL_UI = "val"
val VAL: ArrangementSettingsToken = StdArrangementSettingsToken.token(VAL_ID, VAL_UI, StdArrangementTokenType.ENTRY_TYPE)
val MACRO_ID = "SCALA_MACRO"
val MACRO_UI = "macro"
val MACRO: ArrangementSettingsToken =
StdArrangementSettingsToken.token(MACRO_ID, MACRO_UI, StdArrangementTokenType.ENTRY_TYPE)
val OBJECT_ID = "SCALA_OBJECT"
val OBJECT_UI = "object"
val OBJECT: ArrangementSettingsToken =
StdArrangementSettingsToken.token(OBJECT_ID, OBJECT_UI, StdArrangementTokenType.ENTRY_TYPE)
//this is a special token that is not used in arrangement GUI and always has canBeMatched = false
val UNSEPARABLE_RANGE_ID = "SCALA_UNSEPARABLE_RANGE"
val UNSEPARABLE_RANGE: ArrangementSettingsToken = StdArrangementSettingsToken.tokenById(UNSEPARABLE_RANGE_ID, StdArrangementTokenType.ENTRY_TYPE)
//maps and sets of tokens
val scalaTypesValues = immutable.HashSet(TYPE, FUNCTION, CLASS, VAL, VAR, TRAIT, MACRO, CONSTRUCTOR, OBJECT)
val scalaTypesById = immutable.HashMap(TYPE.getId -> TYPE, FUNCTION.getId -> FUNCTION, CLASS.getId -> CLASS,
VAL.getId -> VAL, VAR.getId -> VAR, TRAIT.getId -> TRAIT, MACRO.getId -> MACRO, CONSTRUCTOR.getId -> CONSTRUCTOR,
OBJECT.getId -> OBJECT)
val scalaAccessModifiersValues = scalaAccessModifiersByName.toSet.map((x: tokensType) => x._2)
val scalaModifiers = scalaAccessModifiersValues ++ scalaOtherModifiersByName.toSet.map((x: tokensType) => x._2)
private type tokensType = (String, ArrangementSettingsToken)
val supportedOrders = immutable.HashSet(StdArrangementTokens.Order.BY_NAME, StdArrangementTokens.Order.KEEP)
val commonModifiers = scalaAccessModifiersValues + FINAL //TODO: determine if final is common
val scalaModifiersByName = scalaAccessModifiersByName ++ scalaOtherModifiersByName
val scalaTokensById = scalaAccessModifiersById ++ scalaOtherModifiersById ++ scalaTypesById
val tokensForType = immutable.HashMap(TYPE -> (commonModifiers + OVERRIDE), FUNCTION -> (commonModifiers +
OVERRIDE + IMPLICIT), CLASS -> (commonModifiers + ABSTRACT + SEALED), TRAIT -> (commonModifiers +
ABSTRACT + SEALED), VAL -> (commonModifiers + OVERRIDE + LAZY + ABSTRACT),
VAR -> (commonModifiers + OVERRIDE), MACRO -> (commonModifiers + OVERRIDE),
CONSTRUCTOR -> scalaAccessModifiersValues, OBJECT -> commonModifiers)
def getModifierByName(modifierName: String) = {
scalaModifiersByName.get(modifierName)
}
def getTokenById(modifierId: String) = {
scalaTokensById.get(modifierId)
}
}
|
triggerNZ/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/rearranger/package.scala
|
Scala
|
apache-2.0
| 6,611
|
/*
* Copyright 2014 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s.client.middleware
import cats.effect._
import org.http4s._
import org.http4s.client.Client
import org.typelevel.vault._
/** Client middleware that sets the destination attribute of every request to the specified value.
*/
object DestinationAttribute {
def apply[F[_]: MonadCancelThrow](client: Client[F], destination: String): Client[F] =
Client { req =>
client.run(req.withAttribute(Destination, destination))
}
/** The returned function can be used as classifier function when creating the [[Metrics]] middleware, to use the destination
* attribute from the request as classifier.
*
* @return the classifier function
*/
def getDestination[F[_]](): Request[F] => Option[String] = _.attributes.lookup(Destination)
val Destination = Key.newKey[SyncIO, String].unsafeRunSync()
val EmptyDestination = ""
}
|
http4s/http4s
|
client/shared/src/main/scala/org/http4s/client/middleware/DestinationAttribute.scala
|
Scala
|
apache-2.0
| 1,464
|
package metamorphic.generator.spray
import metamorphic.dsl.application._
import metamorphic.dsl.generator._
import metamorphic.dsl.util.StringImplicits._
import scala.reflect.macros.blackbox.Context
object SprayRouteGenerator {
def generate(service: Service)(implicit c: Context): c.Tree = {
import c.universe._
val routes = UtilGenerator.recursiveMethod(service.operations, new OperationRecursiveApplication)
q"""
trait ${service.name.toType} extends HttpService with ApplicationJson {
..${service.dependencies.trees}
import scala.language.postfixOps
import scala.util._
import metamorphic.Settings
val ${service.name.route.obj.toTerm} = $routes
}
"""
}
class OperationRecursiveApplication extends RecursiveApplication[Operation] {
def method: String = "$tilde"
def nilError: String = "No operations defined for service."
def apply(operation: Operation)(implicit c: Context): c.Tree = {
import c.universe._
def path: Tree = {
operation.path.parameter match {
case Some(parameter) =>
q"path(${operation.path.base} / IntNumber /) { $pathContent }"
case None =>
q"path(${operation.path.base} /) { $pathContent }"
}
}
def pathContent: Tree = {
operation.path.parameter match {
case Some(parameter) =>
q"""(${parameter.toTerm}: ${tq""}) => $method"""
case None => method
}
}
def method: Tree = {
operation.method match {
case Get() => q"get { dynamic { $methodContent } }"
case Post() => q"post { dynamic { $methodContent } }"
case Put() => q"put { dynamic { $methodContent } }"
case Delete() => q"delete { dynamic { $methodContent } }"
}
}
def methodContent: Tree = {
operation.content match {
case Some(content) => q"entity(as[${content.tpe.toType}]) { $code }"
case None => code
}
}
def code: Tree = {
operation.content match {
case Some(content) => q"(${content.name.toTerm}: ${tq""}) => { ..$transformBody }"
case None => q"{ ..$transformBody }"
}
}
def transformBody: List[c.Tree] = {
val transformed = operation.body.trees.map(tree =>
UtilGenerator.transformByPattern(c)(tree, {
case q"Response($content, $status)" => {
status.toString match {
case "Ok" => q"complete($content)"
case "Created" => q"complete(StatusCodes.Created, $content)"
case "BadRequest" => q"complete(StatusCodes.BadRequest, $content)"
case "NoContent" => q"complete(StatusCodes.NoContent, $content)"
}
}
})
)
if (!operation.isAsync) {
transformed
} else {
q"""
val future = {
..$transformed
}
onComplete(future) {
case Success(route) => route
case Failure(ex) => {
if (Settings.debug) println(ex)
complete(StatusCodes.InternalServerError, ex)
}
}
""".children
}
}
path
}
}
}
|
frroliveira/metamorphic
|
metamorphic-spray/src/main/scala/metamorphic/generator/spray/SprayRouteGenerator.scala
|
Scala
|
mit
| 3,389
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command.table
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
import org.apache.spark.sql.execution.command.MetadataCommand
import org.apache.spark.sql.types.{BooleanType, StringType}
private[sql] case class CarbonShowTablesCommand ( databaseName: Option[String],
tableIdentifierPattern: Option[String]) extends MetadataCommand{
// The result of SHOW TABLES has three columns: database, tableName and isTemporary.
override val output: Seq[Attribute] = {
AttributeReference("database", StringType, nullable = false)() ::
AttributeReference("tableName", StringType, nullable = false)() ::
AttributeReference("isTemporary", BooleanType, nullable = false)() :: Nil
}
override def processMetadata(sparkSession: SparkSession): Seq[Row] = {
// Since we need to return a Seq of rows, we will call getTables directly
// instead of calling tables in sparkSession.
val catalog = sparkSession.sessionState.catalog
val db = databaseName.getOrElse(catalog.getCurrentDatabase)
var tables =
tableIdentifierPattern.map(catalog.listTables(db, _)).getOrElse(catalog.listTables(db))
val externalCatalog = sparkSession.sharedState.externalCatalog
// this method checks whether the table is mainTable or datamap based on property "isVisible"
def isMainTable(tableIdent: TableIdentifier) = {
var isMainTable = true
try {
isMainTable = externalCatalog.getTable(db, tableIdent.table).storage.properties
.getOrElse("isVisible", true).toString.toBoolean
} catch {
case ex: Throwable =>
// ignore the exception for show tables
}
isMainTable
}
// tables will be filtered for all the dataMaps to show only main tables
tables.collect {
case tableIdent if isMainTable(tableIdent) =>
val isTemp = catalog.isTemporaryTable(tableIdent)
Row(tableIdent.database.getOrElse("default"), tableIdent.table, isTemp)
}
}
}
|
sgururajshetty/carbondata
|
integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonShowTablesCommand.scala
|
Scala
|
apache-2.0
| 2,928
|
/***
* Copyright 2014 Rackspace US, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rackspace.com.papi.components.checker.step
import java.net.URL
import javax.xml.namespace.{NamespaceContext, QName}
import javax.xml.transform.{Source, Templates, TransformerFactory}
import javax.xml.transform.dom.{DOMResult, DOMSource}
import javax.xml.transform.sax.{SAXSource, SAXTransformerFactory, TransformerHandler}
import javax.xml.transform.stream.StreamSource
import javax.xml.validation.{Schema, SchemaFactory}
import javax.xml.xpath.XPathExpression
import com.fasterxml.jackson.databind.{JsonNode, ObjectMapper}
import com.github.fge.jsonschema.main.JsonSchemaFactory
import com.github.fge.jsonschema.report.{ListReportProvider, LogLevel}
import com.rackspace.com.papi.components.checker.Config
import com.rackspace.com.papi.components.checker.step.base.{ConnectedStep, Step}
import com.rackspace.com.papi.components.checker.step.startend._
import com.rackspace.com.papi.components.checker.util.{ImmutableNamespaceContext, ObjectMapperPool, XPathExpressionPool}
import com.saxonica.config.EnterpriseTransformerFactory
import net.sf.saxon.TransformerFactoryImpl
import org.w3c.dom.Document
import org.xml.sax.{Attributes, ContentHandler, InputSource, Locator, SAXParseException}
import scala.collection.mutable.{ArrayBuffer, HashMap, Map}
import scala.util.matching.Regex
/**
* The StepHandler assumes it is receiving content that is valid
* according to the checker schema. Please ensure that a validation
* stage occurs before the handler is called.
* <p>
* The StepHandler is also <b>not</b> thread safe.
*/
class StepHandler(var contentHandler : ContentHandler, val config : Config) extends ContentHandler {
//
// ID -> Step
//
private[this] val steps : Map[String, Step] = new HashMap[String, Step]
//
// ID -> Next Step IDs
//
private[this] val next : Map[String, Array[String]] = new HashMap[String, Array[String]]
//
// The start step
//
private[this] var start : Step = null
//
// The prefix mappings
//
private[this] val prefixes : Map[String, String] = new HashMap[String, String]
//
// In the first phase we process grammars, when this is false we're
// processing steps.
//
private[this] var processGrammar : Boolean = true
//
// A list of XML source
//
private[this] val grammarSources : ArrayBuffer[Source] = new ArrayBuffer[Source]
//
// JSON Schema factory
//
private[this] val jsonSchemaFactory = JsonSchemaFactory.newBuilder.setReportProvider(new ListReportProvider(LogLevel.WARNING, LogLevel.ERROR)).freeze
//
// JSON Schema grammar
//
private[this] var jsonGrammar : JsonNode = null
//
// JSON Schema buffer
//
private[this] var jsonBuffer : StringBuilder = new StringBuilder()
//
// Should we be processing the JSON buffer
//
private[this] var processJSONBuffer : Boolean = false
//
// Our schema factory...
//
private[this] val schemaFactory = {
var sf : SchemaFactory = null
config.xsdEngine match {
//
// Enable CTA full XPath2.0 checking in XSD 1.1
//
case "Xerces" => {
sf = SchemaFactory.newInstance("http://www.w3.org/XML/XMLSchema/v1.1", "org.apache.xerces.jaxp.validation.XMLSchema11Factory", this.getClass.getClassLoader)
sf.setFeature ("http://apache.org/xml/features/validation/cta-full-xpath-checking", true)
}
//
// Enable Schema 1.1 support
//
case "SaxonEE" => {
sf = new com.saxonica.ee.jaxp.SchemaFactoryImpl()
sf.setProperty("http://saxon.sf.net/feature/xsd-version","1.1")
}
}
sf
}
//
// XSL 2.0 schema factory
//
private[this] val transformFactoryXSL2 : TransformerFactory = {
/**
* Packages up a saxon factory, but also specifies the classloader for the DynamicLoader within saxon
* @return
*/
def saxonFactory() = {
val factory = TransformerFactory.newInstance("net.sf.saxon.TransformerFactoryImpl", this.getClass.getClassLoader)
val cast = factory.asInstanceOf[TransformerFactoryImpl]
cast.getConfiguration.getDynamicLoader.setClassLoader(this.getClass.getClassLoader)
factory
}
config.xslEngine match {
case "SaxonEE" => {
val factory = TransformerFactory.newInstance("com.saxonica.config.EnterpriseTransformerFactory", this.getClass.getClassLoader)
/*
* I found this through here: http://sourceforge.net/p/saxon/mailman/message/29737564/
* A bit of deduction and stuff let me to assume that all dynamic loading is done with the DynamicLoader
* object. The only way to get ahold of that is to typecast the TransformerFactory to the actual class, and
* then get the DynamicLoader out of it, and set it's classloader to the one where the saxonica classes
* are located.
*/
//Now that we have a Saxon EE transformer factory, we need to configure it...
//We have to do casting to get the configuration object, to configure the DynamicLoader for our classloader
//This is only needed for saxon EE, because it generates bytecode.
val cast = factory.asInstanceOf[EnterpriseTransformerFactory]
cast.getConfiguration.getDynamicLoader.setClassLoader(this.getClass.getClassLoader)
factory
}
case "SaxonHE" => saxonFactory()
// TODO: if the wadl every explicitly calls out for XSLT2 , we need to give them a SAXON transformer,
// Xalan doesn't support 2
case _ => saxonFactory()
}
}
//
// XSL schema factory
//
private[this] val transformFactoryXSL1 : TransformerFactory = {
config.xslEngine match {
case "Xalan" => TransformerFactory.newInstance("org.apache.xalan.processor.TransformerFactoryImpl", this.getClass.getClassLoader)
case "XalanC" => TransformerFactory.newInstance("org.apache.xalan.xsltc.trax.TransformerFactoryImpl", this.getClass.getClassLoader)
case _ => transformFactoryXSL2
}
}
//
// Our schema...
//
private[this] var _schema : Schema = null
private[this] def schema(qn : QName) : Schema = {
if ((_schema == null) && (qn.getNamespaceURI != "http://www.w3.org/2001/XMLSchema")) {
throw new SAXParseException("No schema available.", locator)
} else if (_schema == null) {
schemaFactory.newSchema(new StreamSource(getClass.getResourceAsStream("/xsd/blank.xsd")))
} else {
_schema
}
}
private[this] def schema (qn : Array[QName]) : Schema = {
if ((_schema == null) && (qn.exists(q => q.getNamespaceURI != "http://www.w3.org/2001/XMLSchema"))) {
throw new SAXParseException("No schema available.", locator)
} else if (_schema == null) {
schemaFactory.newSchema(new StreamSource(getClass.getResourceAsStream("/xsd/blank.xsd")))
} else {
_schema
}
}
private[this] def schema : Schema = {
_schema
}
private[this] def schema_= (sch : Schema) : Unit = {
_schema = sch
}
//
// The document locator...
//
private[this] var locator : Locator = null
//
// Saxon transformer factory, schemahandler and result...this is
// used to capture inline schema and inline XSL.
//
private[this] val saxTransformerFactory : SAXTransformerFactory =
TransformerFactory.newInstance("net.sf.saxon.TransformerFactoryImpl", this.getClass.getClassLoader).asInstanceOf[SAXTransformerFactory]
private[this] var currentSchemaHandler : TransformerHandler = null
private[this] var currentSchemaResult : DOMResult = null
private[this] var currentXSLHandler : TransformerHandler = null
private[this] var currentXSLResult : DOMResult = null
//
// The last XSL step processed, we may need to fill in the
// stylesheet.
//
private[this] var lastXSL : XSL = null
private[this] var lastXSLVersion : String = null
def this() = this( null, new Config() )
override def startElement (uri : String, localName : String, qname : String, atts : Attributes) = {
uri match {
case "http://www.rackspace.com/repose/wadl/checker" => startCheckerElement(uri, localName, qname, atts)
case "http://www.w3.org/2001/XMLSchema" => startSchemaElement(uri, localName, qname, atts)
case "http://www.w3.org/1999/XSL/Transform" => startTransformElement(uri, localName, qname, atts)
case _ => // ignore
}
if (contentHandler != null) {
contentHandler.startElement(uri, localName, qname, atts)
}
if (currentSchemaHandler != null) {
currentSchemaHandler.startElement(uri, localName, qname, atts)
}
if (currentXSLHandler != null) {
currentXSLHandler.startElement(uri, localName, qname, atts)
}
}
override def endElement(uri : String, localName : String, qname : String) = {
uri match {
case "http://www.rackspace.com/repose/wadl/checker" => endCheckerElement(uri, localName, qname)
case "http://www.w3.org/2001/XMLSchema" => endSchemaElement(uri, localName, qname)
case "http://www.w3.org/1999/XSL/Transform" => endTransformElement(uri, localName, qname)
case _ => // ignore
}
if (contentHandler != null) {
contentHandler.endElement(uri, localName, qname)
}
if (currentSchemaHandler != null) {
currentSchemaHandler.endElement(uri, localName, qname)
}
if (currentXSLHandler != null) {
currentXSLHandler.endElement(uri, localName, qname)
}
}
override def endDocument = {
next.foreach { case (id, nexts) =>
val step = steps(id).asInstanceOf[ConnectedStep]
for ( i <- nexts.indices) {
step.next(i) = steps(nexts(i))
}
}
next.clear
if (contentHandler != null) {
contentHandler.endDocument()
}
}
//
// Returns the start step after the document has been parsed.
//
def step : Step = start
//
// Element handlers
//
private[this] def startCheckerElement (uri : String, localName : String, qname : String, atts : Attributes) = {
localName match {
case "step" =>
if (processGrammar) {
setupGrammar
}
atts.getValue("type") match {
case "START" => addStart(atts)
case "ACCEPT" => addAccept(atts)
case "URL_FAIL" => addURLFail(atts)
case "METHOD_FAIL" => addMethodFail(atts)
case "REQ_TYPE_FAIL" => addReqTypeFail(atts)
case "CONTENT_FAIL" => addContentFail(atts)
case "URL" => addURL(atts)
case "METHOD" => addMethod(atts)
case "URLXSD" => addURLXSD(atts)
case "REQ_TYPE" => addReqType(atts)
case "WELL_XML" => addWellXML(atts)
case "WELL_JSON" => addWellJSON(atts)
case "XSD" => addXSD(atts)
case "XPATH" => addXPath(atts)
case "XSL" => addXSLT(atts)
case "HEADER" => addHeader(atts)
case "HEADERXSD" => addHeaderXSD(atts)
case "HEADER_SINGLE" => addHeaderSingle(atts)
case "HEADERXSD_SINGLE" => addHeaderXSDSingle(atts)
case "HEADER_ANY" => addHeaderAny(atts)
case "HEADERXSD_ANY" => addHeaderXSDAny(atts)
case "HEADER_ALL" => addHeaderAll(atts)
case "SET_HEADER" => addSetHeader(atts)
case "SET_HEADER_ALWAYS" => addSetHeaderAlways(atts)
case "JSON_SCHEMA" => addJSONSchema(atts)
case "JSON_XPATH" => addJSONXPath(atts)
case "ASSERT" => addAssert(atts)
case "CAPTURE_HEADER" => addCaptureHeader(atts)
case "POP_REP" => addPopRep(atts)
case "PUSH_XML_REP" => addPushXML(atts)
case "PUSH_JSON_REP" => addPushJSON(atts)
}
case "grammar" =>
addGrammar(atts)
case _ => // ignore
}
}
private[this] def endCheckerElement (uri : String, localName : String, qname : String) = {
localName match {
case "step" => if (lastXSL != null) closeXSLTStep
case "grammar" => endGrammar
case _ => //ignore
}
}
private[this] def startSchemaElement (uri : String, localName : String, qname : String, atts : Attributes) = {
localName match {
case "schema" => startInlineSchema
case _ => //ignore
}
}
private[this] def endSchemaElement (uri : String, localName : String, qname : String) = {
localName match {
case "schema" => endInlineSchema
case _ => //ignore
}
}
private[this] def startTransformElement (uri : String, localName : String, qname : String, atts : Attributes) = {
localName match {
case "transform" => startInlineXSL
case "stylesheet" => startInlineXSL
case _ => //ignore
}
}
private[this] def endTransformElement (uri : String, localName : String, qname : String) = {
localName match {
case "transform" => endInlineXSL
case "stylesheet" => endInlineXSL
case _ => //ignore
}
}
//
// We add new grammar source, we use it for processing later.
//
private[this] def addGrammar(atts : Attributes) : Unit = {
val href = atts.getValue("href")
atts.getValue("type") match {
//
// match null here for compatibility with older versions
// that didn't specify a type...
//
case "W3C_XML" | null => {
if (href != null) {
grammarSources += new SAXSource(new InputSource(href))
}
}
case "SCHEMA_JSON" => {
if (href != null) {
var om : ObjectMapper = null
try {
om = ObjectMapperPool.borrowParser
jsonGrammar = om.readValue(new URL(href), classOf[JsonNode])
} finally {
if (om != null) ObjectMapperPool.returnParser(om)
}
} else {
processJSONBuffer = true
}
}
}
}
//
// Grammar section ends
//
private[this] def endGrammar : Unit = {
if (processJSONBuffer) {
var om : ObjectMapper = null
try {
om = ObjectMapperPool.borrowParser
jsonGrammar = om.readValue(jsonBuffer.toString, classOf[JsonNode])
jsonBuffer.setLength(0)
processJSONBuffer = false
} finally {
if (om != null) ObjectMapperPool.returnParser(om)
}
}
}
//
// Handle internal schema....
//
private[this] def startInlineSchema : Unit = {
if (currentSchemaHandler == null) {
currentSchemaHandler = saxTransformerFactory.newTransformerHandler()
currentSchemaResult = new DOMResult()
currentSchemaHandler.setResult (currentSchemaResult)
currentSchemaHandler.startDocument()
currentSchemaHandler.setDocumentLocator(locator)
prefixes.foreach { case (prefix, uri) => {
currentSchemaHandler.startPrefixMapping (prefix, uri)
}}
}
}
private[this] def endInlineSchema : Unit = {
if (currentSchemaHandler != null) {
currentSchemaHandler.endDocument()
currentSchemaHandler = null
grammarSources += new DOMSource(currentSchemaResult.getNode)
currentSchemaResult = null
}
}
//
// Handle internal XSL...
//
private[this] def startInlineXSL : Unit = {
if (currentXSLHandler == null) {
currentXSLHandler = saxTransformerFactory.newTransformerHandler()
currentXSLResult = new DOMResult()
currentXSLHandler.setResult (currentXSLResult)
currentXSLHandler.startDocument()
currentXSLHandler.setDocumentLocator(locator)
prefixes.foreach { case (prefix, uri) => {
currentXSLHandler.startPrefixMapping (prefix, uri)
}}
}
}
private[this] def endInlineXSL : Unit = {
if (currentXSLHandler != null) {
currentXSLHandler.endDocument()
currentXSLHandler = null
//
// The currentXSLResult is cleared by the step that consumes
// it.
//
}
}
//
// Process the grammar to generate a schema.
//
private[this] def setupGrammar : Unit = {
if (grammarSources.length != 0) {
schema = schemaFactory.newSchema(grammarSources.toArray)
}
processGrammar = false
}
private[this] def getPriority(atts : Attributes) : Long = atts.getValue("priority") match {
case null => 1
case s : String => s.toLong
}
//
// The following add steps...
//
private[this] def addStart(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
start = new Start(id, label, new Array[Step](nexts.length))
next += (id -> nexts)
steps += (id -> start)
}
private[this] def addAccept(atts : Attributes) : Unit = {
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val priority = getPriority (atts)
steps += (id -> new Accept(id, label, priority))
}
private[this] def addURLFail(atts : Attributes) : Unit = {
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val notMatch : String = atts.getValue("notMatch")
val notTypes : Array[String] = {
val nt = atts.getValue("notTypes")
if (nt != null) {
nt.split(" ")
} else {
null
}
}
val priority = getPriority (atts)
val notQNames : Array[QName] = {
if (notTypes != null) {
notTypes.map(x => qname(x))
} else {
null
}
}
if (notMatch == null && notTypes == null) {
steps += (id -> new URLFail(id, label, priority))
} else if (notMatch != null && notTypes == null) {
steps += (id -> new URLFailMatch(id, label, notMatch.r, priority))
} else if (notMatch == null && notTypes != null) {
steps += (id -> new URLFailXSD(id, label, notQNames, schema(notQNames), priority))
} else {
steps += (id -> new URLFailXSDMatch(id, label, notMatch.r, notQNames, schema(notQNames), priority))
}
}
private[this] def addReqTypeFail(atts : Attributes) : Unit = {
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val notMatch : String = atts.getValue("notMatch")
val priority = getPriority (atts)
steps += (id -> new ReqTypeFail(id, label, notMatch.r, priority))
}
private[this] def addContentFail(atts : Attributes) : Unit = {
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val priority = getPriority (atts)
steps += (id -> new ContentFail(id, label, priority))
}
private[this] def addMethodFail(atts : Attributes) : Unit = {
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val notMatch : String = atts.getValue("notMatch")
val priority = getPriority (atts)
if (notMatch == null) {
steps += (id -> new MethodFail(id, label, priority))
} else {
steps += (id -> new MethodFailMatch (id, label, notMatch.r, priority))
}
}
private[this] def addReqType(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val _match : String = atts.getValue("match")
next += (id -> nexts)
steps += (id -> new ReqType(id, label, _match.r, new Array[Step](nexts.length)))
}
private[this] def addWellXML(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val priority = getPriority (atts)
next += (id -> nexts)
steps += (id -> new WellFormedXML (id, label, priority, new Array[Step](nexts.length)))
}
private[this] def addWellJSON(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val priority = getPriority (atts)
next += (id -> nexts)
steps += (id -> new WellFormedJSON (id, label, priority, new Array[Step](nexts.length)))
}
private[this] def addXSD(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val transform : Boolean = {
val stransform = atts.getValue("transform")
if (stransform == null) {
config.doXSDGrammarTransform
} else {
stransform.toBoolean
}
}
val priority = getPriority (atts)
next += (id -> nexts)
steps += (id -> new XSD(id, label, schema, transform, priority, new Array[Step](nexts.length)))
}
private[this] def addJSONSchema(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val priority = getPriority (atts)
next += (id -> nexts)
steps += (id -> new JSONSchema(id, label, jsonSchemaFactory.getJsonSchema(jsonGrammar), priority, new Array[Step](nexts.length)))
}
private[this] def addXSLT(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val href : String = atts.getValue("href")
val version : String = atts.getValue("version")
val priority = getPriority (atts)
try {
val templates : Templates = {
if (href != null) {
version match {
case "1" => transformFactoryXSL1.newTemplates(new StreamSource(href))
case vr : String if (vr == "2" || vr == "3") => transformFactoryXSL2.newTemplates(new StreamSource(href))
}
} else {
null
}
}
val xsl = new XSL(id, label, templates, priority, new Array[Step](nexts.length))
next += (id -> nexts)
steps += (id -> xsl)
if (templates == null) {
lastXSL = xsl
lastXSLVersion = version
}
} catch {
case e : Exception => throw new SAXParseException("Error while parsing XSLT", locator, e)
}
}
private[this] def closeXSLTStep : Unit = {
try {
val templates : Templates = {
val xslDoc = currentXSLResult.getNode.asInstanceOf[Document]
lastXSLVersion match {
case "1" => transformFactoryXSL1.newTemplates(new DOMSource(xslDoc))
case vr : String if (vr == "2" || vr == "3") => transformFactoryXSL2.newTemplates(new DOMSource(xslDoc))
}
}
steps += (lastXSL.id -> new XSL(lastXSL.id, lastXSL.label, templates, lastXSL.priority, lastXSL.next))
} catch {
case e : Exception => throw new SAXParseException("Error while parsing XSLT", locator, e)
} finally {
lastXSL = null
lastXSLVersion = null
currentXSLResult = null
}
}
private[this] def getOptQNameList(name : String, atts : Attributes) : Option[List[QName]] = {
val qlistString = atts.getValue(name)
if (qlistString == null) {
None
} else {
Some(qlistString.split(" ").toList.map(q => qname(q)))
}
}
private[this] def getOptRegex(name : String, atts : Attributes) : Option[Regex] = {
val rString = atts.getValue(name)
if (rString == null) {
None
} else {
Some(rString.r)
}
}
private[this] def getMessageCode(atts : Attributes) = {
val message : Option[String] = {
if (atts.getValue("message") == null) {
None
} else {
Some(atts.getValue("message"))
}
}
val code : Option[Int] = {
if (atts.getValue("code") == null) {
None
} else {
Some(atts.getValue("code").toInt)
}
}
(message, code)
}
private[this] def getCaptureHeader(atts : Attributes) = atts.getValue("captureHeader") match {
case s : String => Some(s)
case null => None
}
private[this] def getIsTenant(atts : Attributes) : Boolean = atts.getValue("isTenant") match {
case s : String if (s == "true" || s == "1") => true
case _ => false
}
private[this] def getMatchingRoles(atts : Attributes) : Option[Set[String]] = atts.getValue("matchingRoles") match {
case s : String => Some(Set[String]() ++ s.split(" ").map(_.replaceAll("\\u00A0"," ")))
case _ => None
}
private[this] def addXPath(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val name : Option[String] = Option(atts.getValue("name"))
val _match : String = atts.getValue("match")
val captureHeader = getCaptureHeader(atts)
val mc = getMessageCode(atts)
val message = mc._1
val code = mc._2
val context : NamespaceContext = ImmutableNamespaceContext(prefixes)
val version : Int = {
val sversion = atts.getValue("version")
if (sversion == null) {
config.xpathVersion
} else {
sversion.toInt
}
}
val priority = getPriority (atts)
val isTenant = getIsTenant (atts)
//
// Make an attempt to compile the XPath expression. Throw a
// SAXParseException if something goes wrong.
//
var expression : XPathExpression = null
try {
expression = XPathExpressionPool.borrowExpression(_match, context, version)
} catch {
case spe : SAXParseException => throw spe
case e : Exception => throw new SAXParseException ("Error while compiling XPath expression: "+e.getMessage(), locator, e)
} finally {
if (expression != null) XPathExpressionPool.returnExpression(_match, context, version, expression)
}
next += (id -> nexts)
steps += (id -> new XPath(id, label, name, _match, message, code, context, version, captureHeader, isTenant, priority, new Array[Step](nexts.length)))
}
private[this] def addJSONXPath(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val name : Option[String] = Option(atts.getValue("name"))
val _match : String = atts.getValue("match")
val captureHeader = getCaptureHeader(atts)
val mc = getMessageCode(atts)
val message = mc._1
val code = mc._2
val context : NamespaceContext = ImmutableNamespaceContext(prefixes)
val version : Int = {
val sversion = atts.getValue("version")
if (sversion == null) {
config.xpathVersion
} else {
sversion.toInt
}
}
val priority = getPriority (atts)
val isTenant = getIsTenant (atts)
//
// Make an attempt to compile the XPath expression. Throw a
// SAXParseException if something goes wrong.
//
var expression : XPathExpression = null
try {
expression = XPathExpressionPool.borrowExpression(_match, context, version)
} catch {
case spe : SAXParseException => throw spe
case e : Exception => throw new SAXParseException ("Error while compiling XPath expression: "+e.getMessage(), locator, e)
} finally {
if (expression != null) XPathExpressionPool.returnExpression(_match, context, version, expression)
}
next += (id -> nexts)
steps += (id -> new JSONXPath(id, label, name, _match, message, code, context, version, captureHeader, isTenant, priority, new Array[Step](nexts.length)))
}
private[this] def addAssert(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val _match : String = atts.getValue("match")
val mc = getMessageCode(atts)
val message = mc._1
val code = mc._2
val context : ImmutableNamespaceContext = ImmutableNamespaceContext(prefixes)
val version : Int = {
val sversion = atts.getValue("version")
if (sversion == null) {
Config.RAX_ASSERT_XPATH_VERSION
} else {
sversion.toInt
}
}
val priority = getPriority (atts)
//
// Make an attempt to parse the XPath expression. Throw a
// SAXParseException if something goes wrong.
//
try {
XPathStepUtil.parseXPath(_match, context, version)
} catch {
case spe : SAXParseException => throw spe
case e : Exception => throw new SAXParseException ("Error while compiling assert XPath expression: "+e.getMessage(), locator, e)
}
next += (id -> nexts)
steps += (id -> new Assert(id, label, _match, message, code, context, version, priority, new Array[Step](nexts.length)))
}
private[this] def addCaptureHeader(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val name : String = atts.getValue("name")
val path : String = atts.getValue("path")
val context : ImmutableNamespaceContext = ImmutableNamespaceContext(prefixes)
val version : Int = {
val sversion = atts.getValue("version")
if (sversion == null) {
Config.RAX_ASSERT_XPATH_VERSION
} else {
sversion.toInt
}
}
val isTenant = getIsTenant (atts)
val matchingRoles = getMatchingRoles(atts)
//
// Make an attempt to parse the XPath expression. Throw a
// SAXParseException if something goes wrong.
//
try {
XPathStepUtil.parseXPath(path, context, version)
} catch {
case spe : SAXParseException => throw spe
case e : Exception => throw new SAXParseException ("Error while compiling capture header XPath expression: "+e.getMessage(), locator, e)
}
next += (id -> nexts)
steps += (id -> new CaptureHeader(id, label, name, path, context, version, matchingRoles, isTenant, new Array[Step](nexts.length)))
}
private[this] def addPushXML(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val name : String = atts.getValue("name")
val path : String = atts.getValue("path")
val context : ImmutableNamespaceContext = ImmutableNamespaceContext(prefixes)
val version : Int = {
val sversion = atts.getValue("version")
if (sversion == null) {
Config.RAX_ASSERT_XPATH_VERSION
} else {
sversion.toInt
}
}
val priority = getPriority (atts)
//
// Make an attempt to parse the XPath expression. Throw a
// SAXParseException if something goes wrong.
//
try {
XPathStepUtil.parseXPath(path, context, version)
} catch {
case spe : SAXParseException => throw spe
case e : Exception => throw new SAXParseException ("Error while compiling rax:representation XPath expression: "+e.getMessage(), locator, e)
}
next += (id -> nexts)
steps += (id -> new PushXML(id, label, name, path, context, version, priority, new Array[Step](nexts.length)))
}
private[this] def addPushJSON(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val name : String = atts.getValue("name")
val path : String = atts.getValue("path")
val context : ImmutableNamespaceContext = ImmutableNamespaceContext(prefixes)
val version : Int = {
val sversion = atts.getValue("version")
if (sversion == null) {
Config.RAX_ASSERT_XPATH_VERSION
} else {
sversion.toInt
}
}
val priority = getPriority (atts)
//
// Make an attempt to parse the XPath expression. Throw a
// SAXParseException if something goes wrong.
//
try {
XPathStepUtil.parseXPath(path, context, version)
} catch {
case spe : SAXParseException => throw spe
case e : Exception => throw new SAXParseException ("Error while compiling rax:representation XPath expression: "+e.getMessage(), locator, e)
}
next += (id -> nexts)
steps += (id -> new PushJSON(id, label, name, path, context, version, priority, new Array[Step](nexts.length)))
}
private[this] def addPopRep(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
next += (id -> nexts)
steps += (id -> new PopRep(id, label, new Array[Step](nexts.length)))
}
private[this] def addURL(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val name : Option[String] = Option(atts.getValue("name"))
val _match : String = atts.getValue("match")
val captureHeader = getCaptureHeader(atts)
val isTenant = getIsTenant (atts)
next += (id -> nexts)
steps += (id -> new URI(id, label, name, _match.r, captureHeader, isTenant, new Array[Step](nexts.length)))
}
private[this] def addHeader(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val _match : String = atts.getValue("match")
val name : String = atts.getValue("name")
val mc = getMessageCode(atts)
val message = mc._1
val code = mc._2
val priority = getPriority (atts)
val captureHeader = getCaptureHeader(atts)
val isTenant = getIsTenant (atts)
val matchingRoles = getMatchingRoles(atts)
next += (id -> nexts)
steps += (id -> new Header(id, label, name, _match.r,
message, code, captureHeader, matchingRoles, isTenant, priority,
new Array[Step](nexts.length)))
}
private[this] def addHeaderSingle(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val _match : String = atts.getValue("match")
val name : String = atts.getValue("name")
val mc = getMessageCode(atts)
val message = mc._1
val code = mc._2
val priority = getPriority (atts)
val captureHeader = getCaptureHeader(atts)
val isTenant = getIsTenant (atts)
next += (id -> nexts)
steps += (id -> new HeaderSingle(id, label, name, _match.r,
message, code, captureHeader, isTenant, priority,
new Array[Step](nexts.length)))
}
private[this] def addHeaderAny(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val _match : String = atts.getValue("match")
val name : String = atts.getValue("name")
val mc = getMessageCode(atts)
val message = mc._1
val code = mc._2
val priority = getPriority (atts)
val captureHeader = getCaptureHeader(atts)
val isTenant = getIsTenant (atts)
val matchingRoles = getMatchingRoles(atts)
next += (id -> nexts)
steps += (id -> new HeaderAny(id, label, name, _match.r,
message, code, captureHeader, matchingRoles, isTenant,
priority, new Array[Step](nexts.length)))
}
private[this] def addHeaderXSD(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val _match : String = atts.getValue("match")
val name : String = atts.getValue("name")
val qn : QName = qname(_match)
val mc = getMessageCode(atts)
val message = mc._1
val code = mc._2
val priority = getPriority (atts)
val captureHeader = getCaptureHeader(atts)
val isTenant = getIsTenant (atts)
val matchingRoles = getMatchingRoles(atts)
next += (id -> nexts)
steps += (id -> new HeaderXSD(id, label, name, qn, schema(qn),
message, code, captureHeader, matchingRoles, isTenant,
priority, new Array[Step](nexts.length)))
}
private[this] def addHeaderXSDSingle(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val _match : String = atts.getValue("match")
val name : String = atts.getValue("name")
val qn : QName = qname(_match)
val mc = getMessageCode(atts)
val message = mc._1
val code = mc._2
val priority = getPriority (atts)
val captureHeader = getCaptureHeader(atts)
val isTenant = getIsTenant (atts)
next += (id -> nexts)
steps += (id -> new HeaderXSDSingle(id, label, name, qn, schema(qn),
message, code, captureHeader, isTenant,
priority, new Array[Step](nexts.length)))
}
private[this] def addHeaderXSDAny(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val _match : String = atts.getValue("match")
val name : String = atts.getValue("name")
val qn : QName = qname(_match)
val mc = getMessageCode(atts)
val message = mc._1
val code = mc._2
val priority = getPriority (atts)
val captureHeader = getCaptureHeader(atts)
val isTenant = getIsTenant (atts)
val matchingRoles = getMatchingRoles(atts)
next += (id -> nexts)
steps += (id -> new HeaderXSDAny(id, label, name, qn, schema(qn),
message, code, captureHeader, matchingRoles, isTenant,
priority, new Array[Step](nexts.length)))
}
private[this] def addHeaderAll(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val _match : Option[List[QName]] = getOptQNameList("match", atts)
val _schema : Option[Schema] = if (_match.isEmpty) None else Some(schema(_match.get.head))
val value = getOptRegex("matchRegEx", atts)
val name : String = atts.getValue("name")
val mc = getMessageCode(atts)
val message = mc._1
val code = mc._2
val priority = getPriority (atts)
val captureHeader = getCaptureHeader(atts)
val isTenant = getIsTenant (atts)
val matchingRoles = getMatchingRoles(atts)
next += (id -> nexts)
steps += (id -> new HeaderAll(id, label, name, _match, _schema, value,
message, code, captureHeader, matchingRoles, isTenant,
priority, new Array[Step](nexts.length)))
}
private[this] def addSetHeader(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val name : String = atts.getValue("name")
val value : String = atts.getValue("value")
next += (id -> nexts)
steps += (id -> new SetHeader(id, label, name, value, new Array[Step](nexts.length)))
}
private[this] def addSetHeaderAlways(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val name : String = atts.getValue("name")
val value : String = atts.getValue("value")
next += (id -> nexts)
steps += (id -> new SetHeaderAlways(id, label, name, value, new Array[Step](nexts.length)))
}
private[this] def addMethod(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val _match : String = atts.getValue("match")
next += (id -> nexts)
steps += (id -> new Method(id, label, _match.r, new Array[Step](nexts.length)))
}
private[this] def addURLXSD(atts : Attributes) : Unit = {
val nexts : Array[String] = atts.getValue("next").split(" ")
val id : String = atts.getValue("id")
val label : String = atts.getValue("label")
val name : Option[String] = Option(atts.getValue("name"))
val _match : String = atts.getValue("match")
val qn : QName = qname(_match)
val captureHeader = getCaptureHeader(atts)
val isTenant = getIsTenant (atts)
next += (id -> nexts)
steps += (id -> new URIXSD(id, label, name, qn, schema(qn), captureHeader, isTenant, new Array[Step](nexts.length)))
}
private[this] def qname(_match : String) : QName = {
if (_match.contains(":")) {
val qname = _match.split(":")
new QName(prefixes(qname(0)), qname(1), qname(0))
} else {
new QName(prefixes(""), _match)
}
}
//
// Other content handler methods
//
override def characters(ch : Array[Char], start : Int, length : Int) = {
if (contentHandler != null) {
contentHandler.characters(ch, start, length)
}
if (currentSchemaHandler != null) {
currentSchemaHandler.characters(ch, start, length)
}
if (currentXSLHandler != null) {
currentXSLHandler.characters(ch, start, length)
}
if (processJSONBuffer) {
jsonBuffer.appendAll (ch, start, length)
}
}
override def startPrefixMapping (prefix : String, uri : String) = {
prefixes += (prefix -> uri)
if (contentHandler != null) {
contentHandler.startPrefixMapping(prefix, uri)
}
if (currentSchemaHandler != null) {
currentSchemaHandler.startPrefixMapping(prefix, uri)
}
if (currentXSLHandler != null) {
currentXSLHandler.startPrefixMapping(prefix, uri)
}
}
override def endPrefixMapping (prefix : String) = {
prefixes -= prefix
if (contentHandler != null) {
contentHandler.endPrefixMapping(prefix)
}
if (currentSchemaHandler != null) {
currentSchemaHandler.endPrefixMapping(prefix)
}
if (currentXSLHandler != null) {
currentXSLHandler.endPrefixMapping(prefix)
}
}
override def ignorableWhitespace(ch : Array[Char], start : Int, length : Int) = {
if (contentHandler != null) {
contentHandler.ignorableWhitespace(ch, start, length)
}
if (currentSchemaHandler != null) {
currentSchemaHandler.ignorableWhitespace(ch, start, length)
}
if (currentXSLHandler != null) {
currentXSLHandler.ignorableWhitespace(ch, start, length)
}
}
override def processingInstruction(target : String, data : String) = {
if (contentHandler != null) {
contentHandler.processingInstruction(target, data)
}
if (currentSchemaHandler != null) {
currentSchemaHandler.processingInstruction(target, data)
}
if (currentXSLHandler != null) {
currentXSLHandler.processingInstruction(target, data)
}
}
override def setDocumentLocator(locator : Locator) = {
this.locator = locator
if (contentHandler != null) {
contentHandler.setDocumentLocator(locator)
}
if (currentSchemaHandler != null) {
currentSchemaHandler.setDocumentLocator(locator)
}
if (currentXSLHandler != null) {
currentXSLHandler.setDocumentLocator(locator)
}
}
override def skippedEntity (name : String) = {
if (contentHandler != null) {
contentHandler.skippedEntity(name)
}
if (currentSchemaHandler != null) {
currentSchemaHandler.skippedEntity(name)
}
if (currentXSLHandler != null) {
currentXSLHandler.skippedEntity(name)
}
}
override def startDocument = {
if (contentHandler != null) {
contentHandler.startDocument()
}
}
}
|
wdschei/api-checker
|
core/src/main/scala/com/rackspace/com/papi/components/checker/step/StepHandler.scala
|
Scala
|
apache-2.0
| 44,584
|
package TAPLcomp.fullrecon
import scala.text.Document
// outer means that the term is the top-level term
object FullReconPrinter {
import TAPLcomp.Print._
def ptyType(outer: Boolean, ty: Ty): Document = ty match {
case ty => ptyArrowType(outer, ty)
}
def ptyArrowType(outer: Boolean, tyT: Ty): Document = tyT match {
case TyArr(tyT1, tyT2) =>
g2(ptyAType(false, tyT1) :: " ->" :/: ptyArrowType(outer, tyT2))
case tyT =>
ptyAType(outer, tyT)
}
def ptyAType(outer: Boolean, tyT: Ty): Document = tyT match {
case TyVar(b) =>
b
case TyBool =>
"Bool"
case TyNat =>
"Nat"
case tyT =>
"(" :: ptyType(outer, tyT) :: ")"
}
def ptyTy(ty: Ty) = ptyType(true, ty)
def ptmTerm(outer: Boolean, t: Term): Document = t match {
case TmIf(t1, t2, t3) =>
val ifB = g2("if" :/: ptmTerm(outer, t1))
val thenB = g2("then" :/: ptmTerm(outer, t2))
val elseB = g2("else" :/: ptmTerm(outer, t3))
g0(ifB :/: thenB :/: elseB)
case TmAbs(x, Some(tyT1), t2) =>
val abs = g0("lambda" :/: x :: ":" :/: ptyType(false, tyT1) :: ".")
val body = ptmTerm(outer, t2)
g2(abs :/: body)
case TmAbs(x, None, t2) =>
val abs = g0("lambda" :/: x :: ".")
val body = ptmTerm(outer, t2)
g2(abs :/: body)
case TmLet(x, t1, t2) =>
g0("let " :: x :: " = " :: ptmTerm(false, t1) :/: "in" :/: ptmTerm(false, t2))
case t => ptmAppTerm(outer, t)
}
def ptmAppTerm(outer: Boolean, t: Term): Document = t match {
case TmApp(t1, t2) =>
g2(ptmAppTerm(false, t1) :/: ptmATerm(false, t2))
case TmPred(t1) =>
"pred " :: ptmATerm(false, t1)
case TmIsZero(t1) =>
"iszero " :: ptmATerm(false, t1)
case t =>
ptmATerm(outer, t)
}
def ptmATerm(outer: Boolean, t: Term): Document = t match {
case TmTrue =>
"true"
case TmFalse =>
"false"
case TmVar(x) =>
x
case TmZero =>
"0"
case TmSucc(t1) =>
def pf(i: Int, t: Term): Document = t match {
case TmZero =>
i.toString()
case TmSucc(s) =>
pf(i + 1, s)
case _ =>
"(succ " :: ptmATerm(false, t1) :: ")"
}
pf(1, t1)
case t =>
"(" :: ptmTerm(outer, t) :: ")"
}
def ptm(t: Term) = ptmTerm(true, t)
}
|
hy-zhang/parser
|
Scala/Parser/src/TAPLcomp/fullrecon/syntax.scala
|
Scala
|
bsd-3-clause
| 2,331
|
package com.github.gigurra.glasciia
import com.badlogic.gdx.Gdx
import com.badlogic.gdx.Input.Keys
import com.github.gigurra.math.Vec2
/**
* Created by johan on 2016-10-03.
*/
case class Pov4W(left: Int = Keys.LEFT,
right: Int = Keys.RIGHT,
up: Int = Keys.UP,
down: Int = Keys.DOWN,
checkFn: Int => Boolean = Gdx.input.isKeyPressed) {
private def value(dir: Int): Int = if (checkFn(dir)) 1 else 0
def dir: Vec2 = Vec2(
x = value(right) - value(left),
y = value(up) - value(down)
)
}
|
GiGurra/glasciia
|
glasciia-core/src/main/scala/com/github/gigurra/glasciia/Pov4W.scala
|
Scala
|
mit
| 574
|
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.check.xpath
import java.nio.charset.StandardCharsets.UTF_8
import scala.util.Using
import io.gatling.{ BaseSpec, ValidationValues }
import net.sf.saxon.s9api.XdmNode
class XPathExtractorSpec extends BaseSpec with ValidationValues {
private val namespaces = Map("foo" -> "http://foo/foo")
private val xmlParsers = new XmlParsers(Long.MaxValue)
private def dom(file: String): XdmNode =
Using.resource(getClass.getResourceAsStream(file)) { is =>
XmlParsers.parse(is, UTF_8)
}
private def testCount(expression: String, file: String, expected: Int): Unit = {
val extractor = XPathExtractors.count(expression, namespaces, xmlParsers)
extractor(dom(file)).succeeded shouldBe Some(expected)
}
private def testSingle(expression: String, namespaces: Map[String, String], occurrence: Int, file: String, expected: Option[String]): Unit = {
val extractor = XPathExtractors.find(expression, namespaces, occurrence, xmlParsers)
extractor(dom(file)).succeeded shouldBe expected
}
private def testMultiple(expression: String, namespaces: Map[String, String], file: String, expected: Option[List[String]]): Unit = {
val extractor = XPathExtractors.findAll(expression, namespaces, xmlParsers)
extractor(dom(file)).succeeded shouldBe expected
}
"count" should "return expected result with anywhere expression" in {
testCount("//author", "/test.xml", 4)
}
it should "return expected result with array expression" in {
testCount("/test/store/book[3]/author", "/test.xml", 1)
}
it should "return Some(0) when no results" in {
testCount("/foo", "/test.xml", 0)
}
"extractSingle" should "return expected result with anywhere expression and rank 0" in {
testSingle("//author", namespaces, 0, "/test.xml", Some("Nigel Rees"))
}
it should "support name()" in {
testSingle("//*[name()='author']", namespaces, 0, "/test.xml", Some("Nigel Rees"))
}
it should "return expected result with anywhere expression and rank 1" in {
testSingle("//author", namespaces, 1, "/test.xml", Some("Evelyn Waugh"))
}
it should "return expected result with array expression" in {
testSingle("/test/store/book[3]/author", namespaces, 0, "/test.xml", Some("Herman Melville"))
}
it should "return expected None with array expression" in {
testSingle("/test/store/book[3]/author", namespaces, 1, "/test.xml", None)
}
it should "return expected result with attribute expression" in {
testSingle("/test/store/book[@att = 'foo']/title", namespaces, 0, "/test.xml", Some("Sayings of the Century"))
}
it should "return expected result with last function expression" in {
testSingle("//book[last()]/title", namespaces, 0, "/test.xml", Some("The Lord of the Rings"))
}
it should "support default namespace" in {
testSingle("//pre:name", Map("pre" -> "http://schemas.test.com/entityserver/runtime/1.0"), 0, "/test2.xml", Some("HR"))
}
"extractMultiple" should "return expected result with anywhere expression" in {
testMultiple("//author", namespaces, "/test.xml", Some(List("Nigel Rees", "Evelyn Waugh", "Herman Melville", "J. R. R. Tolkien")))
}
it should "return expected result with array expression" in {
testMultiple("/test/store/book[3]/author", namespaces, "/test.xml", Some(List("Herman Melville")))
}
it should "return expected result with anywhere namespaced element" in {
testMultiple("//foo:bar", namespaces, "/test.xml", Some(List("fooBar")))
}
}
|
gatling/gatling
|
gatling-core/src/test/scala/io/gatling/core/check/xpath/XPathExtractorSpec.scala
|
Scala
|
apache-2.0
| 4,136
|
package skuber.api.watch
import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.model._
import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, Merge, Source}
import akka.stream.SourceShape
import play.api.libs.json.Format
import skuber.api.client._
import skuber.api.client.impl.KubernetesClientImpl
import skuber.{ObjectResource, ResourceDefinition, ListOptions}
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import scala.util.{Failure, Success}
private[api] object WatchSource {
sealed trait StreamElement[O <: ObjectResource] {}
case class End[O <: ObjectResource]() extends StreamElement[O]
case class Start[O <: ObjectResource](resourceVersion: Option[String]) extends StreamElement[O]
case class Result[O <: ObjectResource](resourceVersion: String, value: WatchEvent[O]) extends StreamElement[O]
sealed trait StreamState {}
case object Waiting extends StreamState
case object Processing extends StreamState
case object Finished extends StreamState
case class StreamContext(currentResourceVersion: Option[String], state: StreamState)
def apply[O <: ObjectResource](client: KubernetesClientImpl,
pool: Pool[Start[O]],
name: Option[String],
options: ListOptions,
bufSize: Int)(implicit sys: ActorSystem,
format: Format[O],
rd: ResourceDefinition[O],
lc: LoggingContext): Source[WatchEvent[O], NotUsed] = {
Source.fromGraph(GraphDSL.create() { implicit b =>
import GraphDSL.Implicits._
implicit val dispatcher: ExecutionContext = sys.dispatcher
def createWatchRequest(since: Option[String]) =
{
val nameFieldSelector=name.map(objName => s"metadata.name=$objName")
val watchOptions=options.copy(
resourceVersion = since,
watch = Some(true),
fieldSelector = nameFieldSelector.orElse(options.fieldSelector)
)
client.buildRequest(
HttpMethods.GET, rd, None, query = Some(Uri.Query(watchOptions.asMap))
)
}
val singleEnd = Source.single(End[O]())
def singleStart(s:StreamElement[O]) = Source.single(s)
val initSource = Source.single(
(createWatchRequest(options.resourceVersion), Start[O](options.resourceVersion))
)
val httpFlow: Flow[(HttpRequest, Start[O]), StreamElement[O], NotUsed] =
Flow[(HttpRequest, Start[O])].map { request => // log request
client.logInfo(client.logConfig.logRequestBasic, s"about to send HTTP request: ${request._1.method.value} ${request._1.uri.toString}")
request
}.via(pool).flatMapConcat {
case (Success(HttpResponse(StatusCodes.OK, _, entity, _)), se) =>
client.logInfo(client.logConfig.logResponseBasic, s"received response with HTTP status 200")
singleStart(se).concat(
BytesToWatchEventSource[O](entity.dataBytes, bufSize).map { event =>
Result[O](event._object.resourceVersion, event)
}
).concat(singleEnd)
case (Success(HttpResponse(sc, _, entity, _)), _) =>
client.logWarn(s"Error watching resource. Received a status of ${sc.intValue()}")
entity.discardBytes()
throw new K8SException(Status(message = Some("Non-OK status code received while watching resource"), code = Some(sc.intValue())))
case (Failure(f), _) =>
client.logError("Error watching resource.", f)
throw new K8SException(Status(message = Some("Error watching resource"), details = Some(f.getMessage)))
}
val outboundFlow: Flow[StreamElement[O], WatchEvent[O], NotUsed] =
Flow[StreamElement[O]]
.collect {
case Result(_, event) => event
}
val feedbackFlow: Flow[StreamElement[O], (HttpRequest, Start[O]), NotUsed] =
Flow[StreamElement[O]].scan(StreamContext(None, Waiting)){(cxt, next) =>
next match {
case Start(rv) => StreamContext(rv, Processing)
case Result(rv, _) => StreamContext(Some(rv), Processing)
case End() => cxt.copy(state = Finished)
}
}.filter(_.state == Finished).map { acc =>
(createWatchRequest(acc.currentResourceVersion), Start[O](acc.currentResourceVersion))
}
val init = b.add(initSource)
val http = b.add(httpFlow)
val merge = b.add(Merge[(HttpRequest, Start[O])](2))
val broadcast = b.add(Broadcast[StreamElement[O]](2, eagerCancel = true))
val outbound = b.add(outboundFlow)
val feedback = b.add(feedbackFlow)
init ~> merge ~> http ~> broadcast ~> outbound
merge <~ feedback <~ broadcast
SourceShape(outbound.out)
})
}
}
|
doriordan/skuber
|
client/src/main/scala/skuber/api/watch/WatchSource.scala
|
Scala
|
apache-2.0
| 4,991
|
package be.objectify.deadbolt.scala.test.dao
import be.objectify.deadbolt.scala.models.Subject
import be.objectify.deadbolt.scala.test.models.{SecurityPermission, SecurityRole, SecuritySubject}
class TestSubjectDao extends SubjectDao {
val subjects: Map[String, Subject] = Map("greet" -> new SecuritySubject("greet",
List(SecurityRole("foo"),
SecurityRole("bar")),
List(SecurityPermission("killer.undead.zombie"))),
"lotte" -> new SecuritySubject("lotte",
List(SecurityRole("hurdy")),
List(SecurityPermission("killer.undead.vampire"))),
"steve" -> new SecuritySubject("steve",
List(SecurityRole("bar")),
List(SecurityPermission("curator.museum.insects"))),
"mani" -> new SecuritySubject("mani",
List(SecurityRole("bar"),
SecurityRole("hurdy")),
List(SecurityPermission("zombie.movie.enthusiast"))),
"trippel" -> new SecuritySubject("trippel",
List(SecurityRole("foo"),
SecurityRole("hurdy")),
List[SecurityPermission]()))
override def user(userName: String): Option[Subject] = subjects.get(userName)
}
|
schaloner/deadbolt-2-scala
|
test-app/test/be/objectify/deadbolt/scala/test/dao/TestSubjectDao.scala
|
Scala
|
apache-2.0
| 2,183
|
package org.scalajs.openui5.sap.ui.core
import scala.scalajs.js
import scala.scalajs.js.annotation.JSName
package object mvc {
type ViewType = String
}
|
lastsys/scalajs-openui5
|
src/main/scala/org/scalajs/openui5/sap/ui/core/mvc/package.scala
|
Scala
|
mit
| 156
|
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.pipes.matching
import org.neo4j.cypher.internal.compiler.v2_3._
import org.neo4j.cypher.internal.compiler.v2_3.commands.predicates.Predicate
import org.neo4j.cypher.internal.compiler.v2_3.pipes.QueryState
import org.neo4j.cypher.internal.frontend.v2_3.SemanticDirection
import org.neo4j.cypher.internal.frontend.v2_3.SemanticDirection.{INCOMING, OUTGOING, BOTH}
import org.neo4j.graphdb.{Node, PropertyContainer, Relationship}
import scala.collection.Map
class PatternMatchingBuilder(patternGraph: PatternGraph,
predicates: Seq[Predicate],
identifiersInClause: Set[String]) extends MatcherBuilder {
def getMatches(sourceRow: ExecutionContext, state:QueryState): Traversable[ExecutionContext] = {
val bindings: Map[String, Any] = sourceRow.filter(_._2.isInstanceOf[PropertyContainer])
val boundPairs: Map[String, Set[MatchingPair]] = extractBoundMatchingPairs(bindings)
val undirectedBoundRelationships: Iterable[PatternRelationship] = bindings.keys.
filter(z => patternGraph.contains(z)).
filter(patternGraph(_).exists(_.isInstanceOf[PatternRelationship])).
flatMap(patternGraph(_).asInstanceOf[Seq[PatternRelationship]]).
filter(_.dir == SemanticDirection.BOTH)
val mandatoryPattern: Traversable[ExecutionContext] = if (undirectedBoundRelationships.isEmpty) {
createPatternMatcher(boundPairs, includeOptionals = false, sourceRow, state)
} else {
val boundRels = createListOfBoundRelationshipsWithHangingNodes(undirectedBoundRelationships, bindings)
boundRels.
flatMap(relMap => createPatternMatcher(relMap ++ boundPairs, includeOptionals = false, sourceRow, state))
}
mandatoryPattern
}
private def createListOfBoundRelationshipsWithHangingNodes(undirectedBoundRelationships:
Iterable[PatternRelationship], bindings: Map[String,
Any]): Seq[Map[String, Set[MatchingPair]]] = {
val toList = undirectedBoundRelationships.map(patternRel => {
val rel = bindings(patternRel.key).asInstanceOf[Relationship]
val x = patternRel.key -> Set(MatchingPair(patternRel, rel))
// Outputs the first direction of the pattern relationship
val a1 = patternRel.startNode.key -> Set(MatchingPair(patternRel.startNode, rel.getStartNode))
val a2 = patternRel.endNode.key -> Set(MatchingPair(patternRel.endNode, rel.getEndNode))
// Outputs the second direction of the pattern relationship
val b1 = patternRel.startNode.key -> Set(MatchingPair(patternRel.startNode, rel.getEndNode))
val b2 = patternRel.endNode.key -> Set(MatchingPair(patternRel.endNode, rel.getStartNode))
Seq(Map(x, a1, a2), Map(x, b1, b2))
}).toList
cartesian(toList).map(_.reduceLeft(_ ++ _))
}
// This method takes a Seq of Seq and produces the cartesian product of all inner Seqs
// I'm committing this code, but it's all Tobias' doing.
private def cartesian[T](lst: Seq[Seq[T]]): Seq[Seq[T]] =
lst.foldRight(List(List[T]()))(// <- the type T needs to be specified here
(element: Seq[T], result: List[List[T]]) => // types for better readability
result.flatMap(r => element.map(e => e :: r))
).toSeq
private def createPatternMatcher(boundPairs: Map[String, Set[MatchingPair]], includeOptionals: Boolean, source: ExecutionContext, state:QueryState): Traversable[ExecutionContext] =
new PatternMatcher(boundPairs, predicates, source, state, identifiersInClause)
private def extractBoundMatchingPairs(bindings: Map[String, Any]): Map[String, Set[MatchingPair]] = bindings.flatMap {
case (key, node: Node) if patternGraph.contains(key) =>
Seq(key -> patternGraph(key).map(pNode => MatchingPair(pNode, node)).toSet)
case (key, rel: Relationship) if patternGraph.contains(key) =>
val patternRels = patternGraph(key).asInstanceOf[Seq[PatternRelationship]]
patternRels.flatMap(pRel => {
def extractMatchingPairs(startNode: PatternNode, endNode: PatternNode): Seq[(String, Set[MatchingPair])] = {
val t1 = startNode.key -> Set(MatchingPair(startNode, rel.getStartNode))
val t2 = endNode.key -> Set(MatchingPair(endNode, rel.getEndNode))
val t3 = pRel.key -> Set(MatchingPair(pRel, rel))
// Check that found end nodes correspond to what is already in scope
if (bindings.get(t1._1).forall(_ == t1._2.head.entity) &&
bindings.get(t2._1).forall(_ == t2._2.head.entity))
Seq(t1, t2, t3)
else
Seq.empty[(String, Set[MatchingPair])]
}
pRel.dir match {
case SemanticDirection.OUTGOING => extractMatchingPairs(pRel.startNode, pRel.endNode)
case SemanticDirection.INCOMING => extractMatchingPairs(pRel.endNode, pRel.startNode)
case SemanticDirection.BOTH if bindings.contains(pRel.key) => Seq(pRel.key -> Set(MatchingPair(pRel, rel)))
case SemanticDirection.BOTH => Seq.empty[(String, Set[MatchingPair])]
}
})
case (key, _) => Seq(key -> Set.empty[MatchingPair])
}
def name = "PatternMatcher"
override def startPoint: String = ""
}
|
HuangLS/neo4j
|
community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/pipes/matching/PatternMatchingBuilder.scala
|
Scala
|
apache-2.0
| 6,050
|
///////////////////////////////////////////////////////////////
// © ООО «Праймтолк», 2011-2013 //
// Все права принадлежат компании ООО «Праймтолк». //
///////////////////////////////////////////////////////////////
/**
* SynapseGrid
* © Primetalk Ltd., 2013.
* All rights reserved.
* Authors: A.Zhizhelev, A.Nehaev, P. Popov
* <p/>
* Created: 17.07.13, zhizhelev
*/
package ru.primetalk.synapse.examples
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import ru.primetalk.synapse.core._
import ru.primetalk.synapse.examples.Examples2.StringSplitterBuilder
@RunWith(classOf[JUnitRunner])
class Examples2Test extends FunSuite {
test("2"){
(StringSplitterBuilder:StaticSystem).toDot().trySaveTo("StringSplitter.dot")
}
test("collection"){
// val list = List(1)
// val arr = Array(1)
// arr.flatMap(_*2)
}
}
|
GitOutATown/SynapseGrid
|
synapse-grid-examples/src/test/scala/ru/primetalk/synapse/examples/Examples2Test.scala
|
Scala
|
bsd-2-clause
| 987
|
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail
import cats.effect.IO
import cats.laws._
import cats.laws.discipline._
import monix.eval.Task
import monix.execution.Scheduler
import monix.execution.atomic.Atomic
import monix.execution.exceptions.DummyException
import org.reactivestreams.{Publisher, Subscriber, Subscription}
import org.scalacheck.{Arbitrary, Gen}
import scala.concurrent.Promise
import scala.util.{Failure, Success}
import scala.concurrent.duration._
object IterantFromReactivePublisherSuite extends BaseTestSuite {
implicit val arbRange: Arbitrary[Range] = Arbitrary {
for {
i <- Gen.choose(-100, 100)
j <- Gen.choose(-100, 100)
Array(min, max) = Array(i, j).sorted
step <- Gen.oneOf(1, 2, 3)
} yield min until max by step
}
test("fromReactivePublisher(bufferSize = 1) emits values in correct order") { implicit s =>
check1 { range: Range =>
val publisher = new RangePublisher(range, None)
Iterant[IO].fromReactivePublisher(publisher, 1) <-> Iterant[IO].fromSeq(range)
}
}
test("fromReactivePublisher(bufferSize = 1) can end in error") { implicit s =>
check1 { range: Range =>
val dummy = DummyException("dummy")
val publisher = new RangePublisher(range, Some(dummy))
Iterant[IO].fromReactivePublisher(publisher, 1).attempt <->
(Iterant[IO].fromSeq(range).map(Right(_)) ++ Iterant[IO].pure[Either[Throwable, Int]](Left(dummy)))
}
}
test("fromReactivePublisher(bufferSize = default) emits values in correct order") { implicit s =>
check1 { range: Range =>
val publisher = new RangePublisher(range, None)
Iterant[IO].fromReactivePublisher(publisher) <-> Iterant[IO].fromSeq(range)
}
}
test("fromReactivePublisher(bufferSize = default) can end in error") { implicit s =>
check1 { range: Range =>
val dummy = DummyException("dummy")
val publisher = new RangePublisher(range, Some(dummy))
Iterant[IO].fromReactivePublisher(publisher).attempt <->
(Iterant[IO].fromSeq(range).map(Right(_)) ++ Iterant[IO].pure[Either[Throwable, Int]](Left(dummy)))
}
}
test("fromReactivePublisher(bufferSize = default) with slow consumer") { implicit s =>
check1 { range: Range =>
val publisher = new RangePublisher(range, None)
val lh = Iterant[Task].fromReactivePublisher(publisher).mapEval(x => Task(x).delayExecution(10.millis))
lh <-> Iterant[Task].fromSeq(range)
}
}
test("fromReactivePublisher cancels subscription on earlyStop") { implicit s =>
val cancelled = Promise[Unit]()
val publisher = new RangePublisher(1 to 64, None, cancelled)
Iterant[Task]
.fromReactivePublisher(publisher, 8)
.take(5)
.completedL
.runToFuture
s.tick()
assertEquals(cancelled.future.value, Some(Success(())))
}
test("fromReactivePublisher propagates errors") { implicit s =>
val dummy = DummyException("dummy")
val publisher = new RangePublisher(1 to 64, Some(dummy))
val f = Iterant[Task].fromReactivePublisher(publisher).completedL.runToFuture
s.tick()
assertEquals(f.value, Some(Failure(dummy)))
}
test("fromReactivePublisher(it.toReactivePublisher) is identity") { implicit s =>
check1 { it: Iterant[IO, Int] =>
Iterant[IO].fromReactivePublisher(it.toReactivePublisher) <-> it
}
}
test("fromReactivePublisher handles immediate completion") { implicit s =>
val publisher = new Publisher[Unit] {
def subscribe(subscriber: Subscriber[_ >: Unit]): Unit = {
subscriber.onComplete()
}
}
val f = Iterant[Task].fromReactivePublisher(publisher).completedL.runToFuture
s.tick()
assertEquals(f.value, Some(Success(())))
}
class RangePublisher(from: Int, until: Int, step: Int, finish: Option[Throwable], onCancel: Promise[Unit])(
implicit sc: Scheduler)
extends Publisher[Int] {
def this(range: Range, finish: Option[Throwable])(implicit sc: Scheduler) =
this(range.start, range.end, range.step, finish, null)
def this(range: Range, finish: Option[Throwable], onCancel: Promise[Unit])(implicit sc: Scheduler) =
this(range.start, range.end, range.step, finish, onCancel)
def subscribe(s: Subscriber[_ >: Int]): Unit = {
s.onSubscribe(new Subscription { self =>
private[this] val cancelled = Atomic(false)
private[this] val requested = Atomic(0L)
private[this] var index = from
def isInRange(x: Long, until: Long, step: Long): Boolean = {
(step > 0 && x < until) || (step < 0 && x > until)
}
def request(n: Long): Unit = {
if (requested.getAndAdd(n) == 0)
sc.execute(new Runnable {
def run(): Unit = {
var requested = self.requested.get()
var toSend = requested
while (toSend > 0 && isInRange(index.toLong, until.toLong, step.toLong) && !cancelled.get()) {
s.onNext(index)
index += step
toSend -= 1
if (toSend == 0) {
requested = self.requested.subtractAndGet(requested)
toSend = requested
}
}
if (!isInRange(index.toLong, until.toLong, step.toLong))
finish match {
case None =>
s.onComplete()
case Some(e) =>
s.onError(e)
}
}
})
}
def cancel(): Unit = {
cancelled.set(true)
if (onCancel != null) {
onCancel.success(())
()
}
}
})
}
}
}
|
monifu/monix
|
monix-tail/shared/src/test/scala/monix/tail/IterantFromReactivePublisherSuite.scala
|
Scala
|
apache-2.0
| 6,385
|
/*
* Copyright (c) 2011-16 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
import scala.language.dynamics
import scala.language.experimental.macros
import scala.annotation.tailrec
import scala.reflect.macros.whitebox
/**
* `HList` ADT base trait.
*
* @author Miles Sabin
*/
sealed trait HList extends Product with Serializable
/**
* Non-empty `HList` element type.
*
* @author Miles Sabin
*/
final case class ::[+H, +T <: HList](head : H, tail : T) extends HList {
override def toString = head match {
case _: ::[_, _] => "("+head+") :: "+tail.toString
case _ => head+" :: "+tail.toString
}
}
/**
* Empty `HList` element type.
*
* @author Miles Sabin
*/
sealed trait HNil extends HList {
def ::[H](h : H) = shapeless.::(h, this)
override def toString = "HNil"
}
/**
* Empty `HList` value.
*
* @author Miles Sabin
*/
case object HNil extends HNil
object HList extends Dynamic {
import ops.hlist._
import syntax.HListOps
def apply() = HNil
def apply[T](t: T) = t :: HNil
def apply[P <: Product, L <: HList](p : P)(implicit gen: Generic.Aux[P, L]) : L = gen.to(p)
/**
* Produces a HList of length `N` filled with `elem`.
*/
def fill[A](n: Nat)(elem: A)(implicit fill: Fill[n.N, A]) : fill.Out = fill(elem)
/**
* Produces a `N1`-length HList made of `N2`-length HLists filled with `elem`.
*/
def fill[A](n1: Nat, n2: Nat)(elem: A)(implicit fill: Fill[(n1.N, n2.N), A]) : fill.Out = fill(elem)
implicit def hlistOps[L <: HList](l : L) : HListOps[L] = new HListOps(l)
/**
* Convenience aliases for HList :: and List :: allowing them to be used together within match expressions.
*/
object ListCompat {
val :: = scala.collection.immutable.::
val #: = shapeless.::
}
/**
* Allows to specify an `HList` type with a syntax similar to `Record` and `Union`, as follows,
*
* {{{
* type ISB = HList.`Int, String, Boolean`.T
* }}}
*
* Literal types are allowed, so that the following is valid,
*
* {{{
* type ABC = HList.`'a, 'b, 'c`.T
* type TwoTrueStr = HList.`2, true, "str"`.T
* }}}
*/
def selectDynamic(tpeSelector: String): Any = macro LabelledMacros.hlistTypeImpl
@tailrec
def unsafeGet(l: HList, i: Int): Any = {
val c = l.asInstanceOf[::[Any, HList]]
if(i == 0) c.head
else unsafeGet(c.tail, i-1)
}
def unsafeUpdate(l: HList, i: Int, e: Any): HList = {
@tailrec
def loop(l: HList, i: Int, prefix: List[Any]): (List[Any], HList) =
l match {
case HNil => (prefix, e :: HNil)
case hd :: (tl : HList) if i == 0 => (prefix, e :: tl)
case hd :: (tl : HList) => loop(tl, i-1, hd :: prefix)
}
val (prefix, suffix) = loop(l, i, Nil)
prefix.foldLeft(suffix) { (tl, hd) => hd :: tl }
}
}
/**
* Trait supporting mapping dynamic argument lists of Ints to HList of Nat arguments.
*
* Mixing in this trait enables method applications of the form,
*
* {{{
* lhs.method(1, 2, 3)
* }}}
*
* to be rewritten as,
*
* {{{
* lhs.methodProduct(_1 :: _2 :: _3)
* }}}
*
* ie. the arguments are rewritten as HList elements of Nat and the application is
* rewritten to an application of an implementing method (identified by the
* "Product" suffix) which accepts a single HList of Int argument.
*
* @author Andreas Koestler
*/
trait NatProductArgs extends Dynamic {
def applyDynamic(method: String)(args: Int*): Any = macro ProductMacros.forwardNatImpl
}
/**
* Trait supporting mapping dynamic argument lists to HList arguments.
*
* Mixing in this trait enables method applications of the form,
*
* {{{
* lhs.method(23, "foo", true)
* }}}
*
* to be rewritten as,
*
* {{{
* lhs.methodProduct(23 :: "foo" :: true)
* }}}
*
* ie. the arguments are rewritten as HList elements and the application is
* rewritten to an application of an implementing method (identified by the
* "Product" suffix) which accepts a single HList argument.
*
*/
trait ProductArgs extends Dynamic {
def applyDynamic(method: String)(args: Any*): Any = macro ProductMacros.forwardImpl
}
/**
* Trait supporting mapping dynamic argument lists to singleton-typed HList arguments.
*
* Mixing in this trait enables method applications of the form,
*
* {{{
* lhs.method(23, "foo", true)
* }}}
*
* to be rewritten as,
*
* {{{
* lhs.methodProduct(23.narrow :: "foo".narrow :: true.narrow)
* }}}
*
* ie. the arguments are rewritten as singleton-typed HList elements and the
* application is rewritten to an application of an implementing method (identified by the
* "Product" suffix) which accepts a single HList argument.
*/
trait SingletonProductArgs extends Dynamic {
def applyDynamic(method: String)(args: Any*): Any = macro ProductMacros.forwardSingletonImpl
}
@macrocompat.bundle
class ProductMacros(val c: whitebox.Context) extends SingletonTypeUtils with NatMacroDefns {
import c.universe._
import internal.constantType
def forwardImpl(method: Tree)(args: Tree*): Tree = forward(method, args, false)
def forwardNatImpl(method: Tree)(args: Tree*): Tree = forwardNat(method, args)
def forwardSingletonImpl(method: Tree)(args: Tree*): Tree = forward(method, args, true)
def forwardNat(method: Tree, args: Seq[Tree]): Tree = {
val lhs = c.prefix.tree
val lhsTpe = lhs.tpe
val q"${methodString: String}" = method
val methodName = TermName(methodString+"NatProduct")
if(lhsTpe.member(methodName) == NoSymbol)
c.abort(c.enclosingPosition, s"missing method '$methodName'")
val meth = lhsTpe.member(methodName).asMethod
if (!meth.paramLists.isEmpty && (meth.paramLists(0) forall (_.isImplicit))) {
val typeParamsTree = mkProductNatTypeParamsImpl(args)
q""" $lhs.$methodName[${typeParamsTree}] """
} else {
val argsTree = mkProductNatImpl(args)
q""" $lhs.$methodName($argsTree) """
}
}
def forward(method: Tree, args: Seq[Tree], narrow: Boolean): Tree = {
val lhs = c.prefix.tree
val lhsTpe = lhs.tpe
val q"${methodString: String}" = method
val methodName = TermName(methodString+"Product")
if(lhsTpe.member(methodName) == NoSymbol)
c.abort(c.enclosingPosition, s"missing method '$methodName'")
val argsTree = mkProductImpl(args, narrow)
q""" $lhs.$methodName($argsTree) """
}
def mkProductImpl(args: Seq[Tree], narrow: Boolean): Tree = {
args.foldRight((hnilTpe, q"_root_.shapeless.HNil: $hnilTpe": Tree)) {
case(elem, (accTpe, accTree)) =>
val (neTpe, neTree) = if(narrow) narrowValue(elem) else (elem.tpe, elem)
(appliedType(hconsTpe, List(neTpe, accTpe)), q"""_root_.shapeless.::[$neTpe, $accTpe]($neTree, $accTree)""")
}._2
}
def mkProductNatImpl(args: Seq[Tree]): Tree = {
args.foldRight((tq"_root_.shapeless.HNil", q"_root_.shapeless.HNil: $hnilTpe"): (Tree, Tree)) {
case(NatLiteral(n), (accTpt, accTree)) =>
val neTpt = mkNatTpt(n)
val neTree = mkNatValue(n)
(tq"""_root_.shapeless.::[$neTpt, $accTpt]""", q"""_root_.shapeless.::[$neTpt, $accTpt]($neTree, $accTree)""")
case (elem, _) =>
c.abort(c.enclosingPosition, s"Expression $elem does not evaluate to a non-negative Int literal")
}._2
}
def mkProductNatTypeParamsImpl(args: Seq[Tree]): Tree = {
args.foldRight((tq"_root_.shapeless.HNil", tq"_root_.shapeless.HNil"): (Tree, Tree)) {
case (NatLiteral(n), (accTpt, _)) =>
val neTpt = mkNatTpt(n)
(tq"""_root_.shapeless.::[$neTpt, $accTpt]""", tq"""_root_.shapeless.::[$neTpt, $accTpt]""")
case (elem, _) =>
c.abort(c.enclosingPosition, s"Expression $elem does not evaluate to a non-negative Int literal")
}._2
}
}
|
liff/shapeless
|
core/src/main/scala/shapeless/hlists.scala
|
Scala
|
apache-2.0
| 8,263
|
package org.rebeam.boxes.swing.views
import org.rebeam.boxes.core.util._
import org.rebeam.boxes.core._
import org.rebeam.boxes.swing._
import java.awt.Dimension
import javax.swing.JTextField
import java.awt.event.ActionListener
import java.awt.event.ActionEvent
import java.awt.event.FocusListener
import java.awt.event.FocusEvent
import javax.swing.Icon
import javax.swing.JToggleButton.ToggleButtonModel
import BoxUtils._
import BoxTypes._
import BoxScriptImports._
import scalaz._
import Scalaz._
sealed trait BooleanControlType
case object Checkbox extends BooleanControlType
case object ToggleButton extends BooleanControlType
case object ToolbarButton extends BooleanControlType
case object SlideCheck extends BooleanControlType
case object Radio extends BooleanControlType
case object Tab extends BooleanControlType
object BooleanView {
def extended(v: BoxM[Boolean], n: BoxR[String] = just(""), controlType: BooleanControlType = SlideCheck, icon: BoxR[Option[Icon]], toggle: Boolean = true) = new BooleanOptionView(v, n, new TConverter[Boolean], controlType, icon, toggle).asInstanceOf[SwingView]
def apply(v: BoxM[Boolean], controlType: BooleanControlType = SlideCheck, toggle: Boolean = true) = new BooleanOptionView(v, just(""), new TConverter[Boolean], controlType, just(None), toggle).asInstanceOf[SwingView]
def toolbar(v: BoxM[Boolean], icon: BoxR[Option[Icon]], toggle: Boolean = true) = new BooleanOptionView(v, just(""), new TConverter[Boolean], ToolbarButton, icon, toggle).asInstanceOf[SwingView]
}
object BooleanOptionView {
def extended(v: BoxM[Option[Boolean]], n: BoxR[String], controlType: BooleanControlType = SlideCheck, icon: BoxR[Option[Icon]], toggle: Boolean = true) = new BooleanOptionView(v, n, new OptionTConverter[Boolean], controlType, icon, toggle).asInstanceOf[SwingView]
def apply(v: BoxM[Option[Boolean]], controlType: BooleanControlType = SlideCheck, toggle: Boolean = true) = new BooleanOptionView(v, just(""), new OptionTConverter[Boolean], controlType, just(None), toggle).asInstanceOf[SwingView]
}
private class BooleanOptionView[G](v: BoxM[G], n: BoxR[String], c: GConverter[G, Boolean], controlType: BooleanControlType, icon: BoxR[Option[Icon]], toggle: Boolean = true) extends SwingView {
val component = controlType match {
case Checkbox => new LinkingJCheckBox(this)
case Radio => new LinkingJRadioButton(this)
case ToggleButton => new LinkingJToggleButton(this)
case ToolbarButton => new LinkingToolbarToggleButton(this)
case SlideCheck => new LinkingSlideCheckButton(this)
case Tab => new LinkingTabButton(this)
}
private val model = new AutoButtonModel()
//Update delegate from Box
val observer = {
val g: BoxScript[G] = for {
n <- v()
} yield n
//TODO use applicative or similar to make this neater
val script = for {
newV <- v()
newN <- n
newIcon <- icon
} yield (newV, newN, newIcon)
SwingView.observer(this, script){v => display(v._1, v._2, v._3)}
}
{
component.setModel(model)
component.addActionListener(new ActionListener(){
//On action, toggle value if it is not None
override def actionPerformed(e:ActionEvent) = atomic {
for {
value <- v()
_ <- c.toOption(value) match {
case None => nothing
case Some(b) => v() = if (toggle) c.toG(!b) else c.toG(true)
}
} yield ()
}
})
atomic { observe(observer) }
}
//Update display if necessary
private def display(newV: G, newN: String, newIcon: Option[Icon]) {
c.toOption(newV) match {
case None => {
if (model.enabled || model.selected) {
model.enabled = false
model.selected = false
model.fire()
}
}
case Some(b) => {
if (!model.enabled || model.selected != b) {
model.enabled = true
model.selected = b
model.fire()
}
}
}
if (newN != component.getText) {
component.setText(newN)
}
val iconOrNull = newIcon.getOrElse(null)
if (iconOrNull != component.getIcon) {
component.setIcon(iconOrNull)
}
}
private class AutoButtonModel extends ToggleButtonModel {
var enabled = true
var selected = true
def fire() = fireStateChanged()
override def isSelected = selected
override def isEnabled = enabled
}
}
|
trepidacious/boxes-swing
|
src/main/scala/org/rebeam/boxes/swing/views/BooleanView.scala
|
Scala
|
gpl-2.0
| 4,425
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.mesos.ui
import javax.servlet.http.HttpServletRequest
import scala.xml.Node
import org.apache.spark.deploy.Command
import org.apache.spark.deploy.mesos.MesosDriverDescription
import org.apache.spark.scheduler.cluster.mesos.{MesosClusterRetryState, MesosClusterSubmissionState}
import org.apache.spark.ui.{UIUtils, WebUIPage}
private[ui] class DriverPage(parent: MesosClusterUI) extends WebUIPage("driver") {
override def render(request: HttpServletRequest): Seq[Node] = {
// stripXSS is called first to remove suspicious characters used in XSS attacks
val driverId = UIUtils.stripXSS(request.getParameter("id"))
require(driverId != null && driverId.nonEmpty, "Missing id parameter")
val state = parent.scheduler.getDriverState(driverId)
if (state.isEmpty) {
val content =
<div>
<p>Cannot find driver {driverId}</p>
</div>
return UIUtils.basicSparkPage(content, s"Details for Job $driverId")
}
val driverState = state.get
val driverHeaders = Seq("Driver property", "Value")
val schedulerHeaders = Seq("Scheduler property", "Value")
val commandEnvHeaders = Seq("Command environment variable", "Value")
val launchedHeaders = Seq("Launched property", "Value")
val commandHeaders = Seq("Command property", "Value")
val retryHeaders = Seq("Last failed status", "Next retry time", "Retry count")
val driverDescription = Iterable.apply(driverState.description)
val submissionState = Iterable.apply(driverState.submissionState)
val command = Iterable.apply(driverState.description.command)
val schedulerProperties = Iterable.apply(driverState.description.conf.getAll.toMap)
val commandEnv = Iterable.apply(driverState.description.command.environment)
val driverTable =
UIUtils.listingTable(driverHeaders, driverRow, driverDescription)
val commandTable =
UIUtils.listingTable(commandHeaders, commandRow, command)
val commandEnvTable =
UIUtils.listingTable(commandEnvHeaders, propertiesRow, commandEnv)
val schedulerTable =
UIUtils.listingTable(schedulerHeaders, propertiesRow, schedulerProperties)
val launchedTable =
UIUtils.listingTable(launchedHeaders, launchedRow, submissionState)
val retryTable =
UIUtils.listingTable(
retryHeaders, retryRow, Iterable.apply(driverState.description.retryState))
val content =
<p>Driver state information for driver id {driverId}</p>
<a href={UIUtils.prependBaseUri("/")}>Back to Drivers</a>
<div class="row-fluid">
<div class="span12">
<h4>Driver state: {driverState.state}</h4>
<h4>Driver properties</h4>
{driverTable}
<h4>Driver command</h4>
{commandTable}
<h4>Driver command environment</h4>
{commandEnvTable}
<h4>Scheduler properties</h4>
{schedulerTable}
<h4>Launched state</h4>
{launchedTable}
<h4>Retry state</h4>
{retryTable}
</div>
</div>;
UIUtils.basicSparkPage(content, s"Details for Job $driverId")
}
private def launchedRow(submissionState: Option[MesosClusterSubmissionState]): Seq[Node] = {
submissionState.map { state =>
<tr>
<td>Mesos Slave ID</td>
<td>{state.slaveId.getValue}</td>
</tr>
<tr>
<td>Mesos Task ID</td>
<td>{state.taskId.getValue}</td>
</tr>
<tr>
<td>Launch Time</td>
<td>{UIUtils.formatDate(state.startDate)}</td>
</tr>
<tr>
<td>Finish Time</td>
<td>{state.finishDate.map(_.toString).getOrElse("")}</td>
</tr>
<tr>
<td>Last Task Status</td>
<td>{state.mesosTaskStatus.map(_.toString).getOrElse("")}</td>
</tr>
}.getOrElse(Seq[Node]())
}
private def propertiesRow(properties: collection.Map[String, String]): Seq[Node] = {
properties.map { case (k, v) =>
var valueToShow = v
if (k.toLowerCase.contains("password") || k.toLowerCase.contains("secret")) {
valueToShow = "******"
}
<tr>
<td>{k}</td><td>{valueToShow}</td>
</tr>
}.toSeq
}
private def commandRow(command: Command): Seq[Node] = {
<tr>
<td>Main class</td><td>{command.mainClass}</td>
</tr>
<tr>
<td>Arguments</td><td>{command.arguments.mkString(" ")}</td>
</tr>
<tr>
<td>Class path entries</td><td>{command.classPathEntries.mkString(" ")}</td>
</tr>
<tr>
<td>Java options</td><td>{command.javaOpts.map(javaProp => {
val Array(k, v) = if (javaProp.split(" ").size == 2) javaProp.split(" ")
else if (javaProp.split("=").size == 2) javaProp.split("=")
else Array(javaProp, "")
if (k.toLowerCase.contains("password") || k.toLowerCase.contains("secret")) s"$k******"
else s"$k $v"
}).mkString((" "))}</td>
</tr>
<tr>
<td>Library path entries</td><td>{command.libraryPathEntries.mkString((" "))}</td>
</tr>
}
private def driverRow(driver: MesosDriverDescription): Seq[Node] = {
<tr>
<td>Name</td><td>{driver.name}</td>
</tr>
<tr>
<td>Id</td><td>{driver.submissionId}</td>
</tr>
<tr>
<td>Cores</td><td>{driver.cores}</td>
</tr>
<tr>
<td>Memory</td><td>{driver.mem}</td>
</tr>
<tr>
<td>Submitted</td><td>{UIUtils.formatDate(driver.submissionDate)}</td>
</tr>
<tr>
<td>Supervise</td><td>{driver.supervise}</td>
</tr>
}
private def retryRow(retryState: Option[MesosClusterRetryState]): Seq[Node] = {
retryState.map { state =>
<tr>
<td>
{state.lastFailureStatus}
</td>
<td>
{state.nextRetry}
</td>
<td>
{state.retries}
</td>
</tr>
}.getOrElse(Seq[Node]())
}
}
|
jlopezmalla/spark
|
resource-managers/mesos/src/main/scala/org/apache/spark/deploy/mesos/ui/DriverPage.scala
|
Scala
|
apache-2.0
| 6,743
|
package chrome.runtime
import scala.scalajs.js
import scala.scalajs.js.annotation.JSGlobal
@js.native
@JSGlobal("chrome.runtime")
private[runtime] object Impl extends js.Any {
def openOptionsPage(callback: js.Function0[_]): Unit = js.native
def getManifest(): js.Dictionary[js.Any] = js.native
def getURL(path: String): String = js.native
def sendMessage(extensionId: String, message: js.Any, options: MessageOptions, responseCallback: js.Function1[js.Any, _]): Unit = js.native
def sendMessage(extensionId: String, message: js.Any, options: MessageOptions): Unit = js.native
def sendMessage(extensionId: String, message: js.Any, responseCallback: js.Function1[js.Any, _]): Unit = js.native
def sendMessage(extensionId: String, message: js.Any): Unit = js.native
def sendMessage(message: js.Any, options: MessageOptions, responseCallback: js.Function1[js.Any, _]): Unit = js.native
def sendMessage(message: js.Any, options: MessageOptions): Unit = js.native
def sendMessage(message: js.Any, responseCallback: js.Function1[js.Any, _]): Unit = js.native
def sendMessage(message: js.Any): Unit = js.native
}
|
erdavila/auto-steamgifts
|
src/main/scala/chrome/runtime/Impl.scala
|
Scala
|
mit
| 1,130
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.