code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.twitter.finagle.client
import com.twitter.finagle.dispatch.SerialClientDispatcher
import com.twitter.finagle.netty3.Netty3Transporter
import com.twitter.finagle.param.ProtocolLibrary
import com.twitter.finagle.transport.Transport
import com.twitter.finagle.{Name, Service, ServiceFactory, Stack}
import com.twitter.util.Future
import java.nio.charset.StandardCharsets.UTF_8
import org.jboss.netty.channel.{ChannelHandlerContext, ChannelPipelineFactory, Channels, MessageEvent, SimpleChannelHandler}
import org.jboss.netty.handler.codec.string.{StringDecoder, StringEncoder}
private class DelimEncoder(delim: Char) extends SimpleChannelHandler {
override def writeRequested(ctx: ChannelHandlerContext, evt: MessageEvent) = {
val newMessage = evt.getMessage match {
case m: String => m + delim
case m => m
}
Channels.write(ctx, evt.getFuture, newMessage, evt.getRemoteAddress)
}
}
private[finagle] object StringClientPipeline extends ChannelPipelineFactory {
def getPipeline = {
val pipeline = Channels.pipeline()
pipeline.addLast("stringEncode", new StringEncoder(UTF_8))
pipeline.addLast("stringDecode", new StringDecoder(UTF_8))
pipeline.addLast("line", new DelimEncoder('\\n'))
pipeline
}
}
private[finagle] object NoDelimStringPipeline extends ChannelPipelineFactory {
def getPipeline = {
val pipeline = Channels.pipeline()
pipeline.addLast("stringEncode", new StringEncoder(UTF_8))
pipeline.addLast("stringDecode", new StringDecoder(UTF_8))
pipeline
}
}
private[finagle] object StringClient {
val protocolLibrary = "string"
}
private[finagle] trait StringClient {
import StringClient._
case class RichClient(underlying: Service[String, String]) {
def ping(): Future[String] = underlying("ping")
}
trait StringRichClient { self: com.twitter.finagle.Client[String, String] =>
def newRichClient(dest: Name, label: String): RichClient =
RichClient(newService(dest, label))
}
case class Client(
stack: Stack[ServiceFactory[String, String]] = StackClient.newStack,
params: Stack.Params = Stack.Params.empty + ProtocolLibrary(protocolLibrary),
appendDelimeter: Boolean = true)
extends StdStackClient[String, String, Client]
with StringRichClient {
protected def copy1(
stack: Stack[ServiceFactory[String, String]] = this.stack,
params: Stack.Params = this.params
): Client = copy(stack, params)
protected type In = String
protected type Out = String
protected def newTransporter(): Transporter[String, String] =
if (appendDelimeter) Netty3Transporter(StringClientPipeline, params)
else Netty3Transporter(NoDelimStringPipeline, params)
protected def newDispatcher(
transport: Transport[In, Out]
): Service[String, String] =
new SerialClientDispatcher(transport)
}
val stringClient = Client()
}
| adriancole/finagle | finagle-core/src/test/scala/com/twitter/finagle/client/StringClient.scala | Scala | apache-2.0 | 2,909 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.types
import java.util.Objects
import org.json4s.JsonAST.JValue
import org.json4s.JsonDSL._
import org.apache.spark.annotation.DeveloperApi
/**
* The data type for User Defined Types (UDTs).
*
* This interface allows a user to make their own classes more interoperable with SparkSQL;
* e.g., by creating a [[UserDefinedType]] for a class X, it becomes possible to create
* a `DataFrame` which has class X in the schema.
*
* For SparkSQL to recognize UDTs, the UDT must be annotated with
* [[SQLUserDefinedType]].
*
* The conversion via `serialize` occurs when instantiating a `DataFrame` from another RDD.
* The conversion via `deserialize` occurs when reading from a `DataFrame`.
*/
@DeveloperApi
abstract class UserDefinedType[UserType >: Null] extends DataType with Serializable {
/** Underlying storage type for this UDT */
def sqlType: DataType
/** Paired Python UDT class, if exists. */
def pyUDT: String = null
/** Serialized Python UDT class, if exists. */
def serializedPyClass: String = null
/**
* Convert the user type to a SQL datum
*/
def serialize(obj: UserType): Any
/** Convert a SQL datum to the user type */
def deserialize(datum: Any): UserType
override private[sql] def jsonValue: JValue = {
("type" -> "udt") ~
("class" -> this.getClass.getName) ~
("pyClass" -> pyUDT) ~
("sqlType" -> sqlType.jsonValue)
}
/**
* Class object for the UserType
*/
def userClass: java.lang.Class[UserType]
override def defaultSize: Int = sqlType.defaultSize
/**
* For UDT, asNullable will not change the nullability of its internal sqlType and just returns
* itself.
*/
override private[spark] def asNullable: UserDefinedType[UserType] = this
override private[sql] def acceptsType(dataType: DataType): Boolean = dataType match {
case other: UserDefinedType[_] if this.userClass != null && other.userClass != null =>
this.getClass == other.getClass ||
this.userClass.isAssignableFrom(other.userClass)
case _ => false
}
override def sql: String = sqlType.sql
override def hashCode(): Int = getClass.hashCode()
override def equals(other: Any): Boolean = other match {
case that: UserDefinedType[_] => this.getClass == that.getClass
case _ => false
}
override def catalogString: String = sqlType.simpleString
}
private[spark] object UserDefinedType {
/**
* Get the sqlType of a (potential) [[UserDefinedType]].
*/
def sqlType(dt: DataType): DataType = dt match {
case udt: UserDefinedType[_] => udt.sqlType
case _ => dt
}
}
/**
* The user defined type in Python.
*
* Note: This can only be accessed via Python UDF, or accessed as serialized object.
*/
private[sql] class PythonUserDefinedType(
val sqlType: DataType,
override val pyUDT: String,
override val serializedPyClass: String) extends UserDefinedType[Any] {
/* The serialization is handled by UDT class in Python */
override def serialize(obj: Any): Any = obj
override def deserialize(datam: Any): Any = datam
/* There is no Java class for Python UDT */
override def userClass: java.lang.Class[Any] = null
override private[sql] def jsonValue: JValue = {
("type" -> "udt") ~
("pyClass" -> pyUDT) ~
("serializedClass" -> serializedPyClass) ~
("sqlType" -> sqlType.jsonValue)
}
override private[sql] def acceptsType(dataType: DataType): Boolean = dataType match {
case other: PythonUserDefinedType => pyUDT == other.pyUDT
case _ => false
}
override def equals(other: Any): Boolean = other match {
case that: PythonUserDefinedType => pyUDT == that.pyUDT
case _ => false
}
override def hashCode(): Int = Objects.hashCode(pyUDT)
}
| wangmiao1981/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/types/UserDefinedType.scala | Scala | apache-2.0 | 4,568 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.catalyst.expressions.objects.Invoke
import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, GenericArrayData}
import org.apache.spark.sql.types.{IntegerType, ObjectType}
class ObjectExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper {
test("SPARK-16622: The returned value of the called method in Invoke can be null") {
val inputRow = InternalRow.fromSeq(Seq((false, null)))
val cls = classOf[Tuple2[Boolean, java.lang.Integer]]
val inputObject = BoundReference(0, ObjectType(cls), nullable = true)
val invoke = Invoke(inputObject, "_2", IntegerType)
checkEvaluationWithGeneratedMutableProjection(invoke, null, inputRow)
}
test("MapObjects should make copies of unsafe-backed data") {
// test UnsafeRow-backed data
val structEncoder = ExpressionEncoder[Array[Tuple2[java.lang.Integer, java.lang.Integer]]]
val structInputRow = InternalRow.fromSeq(Seq(Array((1, 2), (3, 4))))
val structExpected = new GenericArrayData(
Array(InternalRow.fromSeq(Seq(1, 2)), InternalRow.fromSeq(Seq(3, 4))))
checkEvalutionWithUnsafeProjection(
structEncoder.serializer.head, structExpected, structInputRow)
// test UnsafeArray-backed data
val arrayEncoder = ExpressionEncoder[Array[Array[Int]]]
val arrayInputRow = InternalRow.fromSeq(Seq(Array(Array(1, 2), Array(3, 4))))
val arrayExpected = new GenericArrayData(
Array(new GenericArrayData(Array(1, 2)), new GenericArrayData(Array(3, 4))))
checkEvalutionWithUnsafeProjection(
arrayEncoder.serializer.head, arrayExpected, arrayInputRow)
// test UnsafeMap-backed data
val mapEncoder = ExpressionEncoder[Array[Map[Int, Int]]]
val mapInputRow = InternalRow.fromSeq(Seq(Array(
Map(1 -> 100, 2 -> 200), Map(3 -> 300, 4 -> 400))))
val mapExpected = new GenericArrayData(Seq(
new ArrayBasedMapData(
new GenericArrayData(Array(1, 2)),
new GenericArrayData(Array(100, 200))),
new ArrayBasedMapData(
new GenericArrayData(Array(3, 4)),
new GenericArrayData(Array(300, 400)))))
checkEvalutionWithUnsafeProjection(
mapEncoder.serializer.head, mapExpected, mapInputRow)
}
}
| aokolnychyi/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ObjectExpressionsSuite.scala | Scala | apache-2.0 | 3,218 |
package magic.gol
import akka.actor.{Actor, ActorSystem, Props, ActorLogging}
import akka.actor.ActorDSL._
import akka.io.IO
import akka.io.Tcp._
import spray.can.Http
import spray.routing._
object Boot extends App with config.ConfigApp {
implicit val system = ActorSystem("magic-gol-akka-system")
val serviceActor = system.actorOf(Props[ServiceActor], "service-actor")
val bootActor = actor("bootActor")(new Act{})
IO(Http).tell(Http.Bind(serviceActor, interface, port), bootActor)
}
| dvallejo/spray-example | src/main/scala/magic/gol/Boot.scala | Scala | apache-2.0 | 502 |
package com.arcusys.valamis.hook.utils
/**
* Created by Igor Borisov on 20.07.16.
*/
trait Info {
val key: String
val name: String
val description: String
}
case class StructureInfo(key: String, name: String, description: String) extends Info
object StructureInfo{
def apply(key: String, name: String): StructureInfo = StructureInfo(key, name, name + "-description")
}
case class TemplateInfo(key: String, name: String, description: String) extends Info
object TemplateInfo{
def apply(key: String, name: String): TemplateInfo = TemplateInfo(key, name, name + "-description")
} | igor-borisov/valamis | hook-utils/src/main/scala/com/arcusys/valamis/hook/utils/Info.scala | Scala | gpl-3.0 | 596 |
package records
import Compat210._
import scala.annotation.StaticAnnotation
object Macros {
// Import macros only here, otherwise we collide with Compat210.whitebox
import scala.reflect.macros._
import whitebox.Context
class RecordMacros[C <: Context](val c: C) extends CommonMacros.Common[C] {
import c.universe._
protected val rImplMods = Modifiers(Flag.OVERRIDE | Flag.SYNTHETIC)
protected val synthMod = Modifiers(Flag.SYNTHETIC)
/** Set of types for which the `_\\u200B_data*` members are specialized */
val specializedTypes: Set[Type] = {
import definitions._
Set(BooleanTpe, ByteTpe, CharTpe, ShortTpe, IntTpe, LongTpe, FloatTpe, DoubleTpe)
}
/**
* Create a Record
*
* This creates a simple record that implements `_\\u200B_data`. As a
* consequence it needs to box when used with primitive types.
*
* @param schema List of (field name, field type) tuples
* @param ancestors Traits that are mixed into the resulting [[Rec]]
* (e.g. Serializable). Make sure the idents are fully qualified.
* @param fields Additional members/fields of the resulting [[Rec]]
* (recommended for private data fields)
* @param dataImpl Implementation of the `_\\u200B_data` method.
* Should use the parameter `fieldName` of type String and the type
* parameter `T` and return a value of type `T`
* return a value of a corresponding type.
*/
def record(schema: Schema)(ancestors: Ident*)(
fields: Tree*)(dataImpl: Tree): Tree = {
val dataDef = q"""
$rImplMods def __data[T : _root_.scala.reflect.ClassTag](
fieldName: String): T = $dataImpl
"""
genRecord(schema, ancestors, fields :+ dataDef)
}
/**
* Create a specialized record
*
* By providing implementations for all or some primitive types,
* boxing can be avoided.
*
* @param schema List of (field name, field type) tuples
* @param ancestors Traits that are mixed into the resulting [[Rec]]
* (e.g. Serializable). Make sure the idents are fully qualified.
* @param fields Additional members/fields of the resulting [[Rec]]
* (recommended for private data fields)
* @param objectDataImpl Implementation of the `_\\u200B_dataObj` method. Should
* use the parameter `fieldName` of type String and the type parameter
* `T` and return a value of type `T`
* @param dataImpl Partial function giving the implementations of
* the `_\\u200B_data*` methods. If it is not defined for some of the
* `_\\u200B_data*` methods, `???` will be used instead.
* Should use the parameter `fieldName` of type String and
* return a value of a corresponding type.
* The partial function will be called exactly once with each value in
* [[specializedTypes]].
*/
def specializedRecord(schema: Schema)(ancestors: Ident*)(fields: Tree*)(
objectDataImpl: Tree)(dataImpl: PartialFunction[Type, Tree]): Tree = {
import definitions._
def impl(tpe: Type) = dataImpl.applyOrElse(tpe, (_: Type) => q"???")
val specializedDefs = specializedTypes.map { t =>
val name = t.typeSymbol.name
val methodName = newTermName("__data" + name)
q"$rImplMods def $methodName(fieldName: String): $t = ${impl(t)}"
}
val objectDef = q"""
$rImplMods def __dataObj[T : _root_.scala.reflect.ClassTag](
fieldName: String): T = $objectDataImpl
"""
genRecord(schema, ancestors, fields ++ specializedDefs :+ objectDef)
}
/**
* Generalized record. Implementation is totally left to the caller.
* @param schema List of (field name, field type) tuples
* @param ancestors Traits that are mixed into the resulting [[Rec]]
* (e.g. Serializable). Make sure the idents are fully qualified.
* @param impl However you want to implement the `_\\u200B_data` interface.
*/
def genRecord(schema: Schema, ancestors: Seq[Ident],
impl: Seq[Tree]): Tree = {
val dataCountTree = q"$synthMod def __dataCount = ${schema.size}"
val body = impl ++ Seq(
genToString(schema),
genHashCode(schema),
genDataExists(schema),
genDataAny(schema),
genEquals(schema),
dataCountTree)
val structType = {
val fields = for {
(name, tpe) <- schema
} yield {
val encName = newTermName(name).encodedName.toTermName
q"def $encName: $tpe"
}
tq"{ ..$fields }"
}
q"""
new _root_.records.Rec[$structType] with ..$ancestors {
..$body
}: _root_.records.Rec[$structType] with ..$ancestors
"""
}
/**
* Generate the toString method of a record. The resulting toString
* method will generate strings of the form:
* {{{
* Rec { fieldName1 = fieldValue1, fieldName2 = fieldValue2, ... }
* }}}
*/
def genToString(schema: Schema): Tree = {
val elems = for ((fname, tpe) <- schema) yield {
val fldVal = accessData(q"this", fname, tpe)
q"""$fname + " = " + $fldVal.toString"""
}
val cont = elems.reduceLeftOption[Tree] {
case (acc, e) => q"""$acc + ", " + $e"""
}
val str = cont.fold[Tree](q""""Rec {}"""") { cont =>
q""""Rec { " + $cont + " }""""
}
q"override def toString(): String = $str"
}
/**
* Generate the hashCode method of a record. The hasCode is an bitwise xor
* of the hashCodes of the field names (this one is calculated at compile
* time) and the hashCodes of the field values
*/
def genHashCode(schema: Schema): Tree = {
import scala.util.hashing.MurmurHash3._
val sortedSchema = schema.sortBy(_._1)
val recSeed = -972824572
// Hash of all field names
val nameHash = sortedSchema.foldLeft(recSeed) {
case (hash, (name, _)) => mix(hash, name.hashCode)
}
// Hashes of fields
val fieldHashes = sortedSchema.map {
case (name, tpe) =>
val data = accessData(q"this", name, tpe)
q"$data.##"
}
val mm3 = q"_root_.scala.util.hashing.MurmurHash3"
val hashBody = fieldHashes.foldLeft[Tree](q"$nameHash") {
case (acc, hash) => q"$mm3.mix($acc, $hash)"
}
val elemCount = sortedSchema.size * 2
q"""
override def hashCode(): Int = $mm3.finalizeHash($hashBody, $elemCount)
"""
}
/** Generate `_\\u200B_dataExists` member */
def genDataExists(schema: Schema): Tree = {
val lookupData = schema.map { case (name, _) => (name, q"true") }.toMap
val lookupTree =
genLookup(q"fieldName", lookupData, default = Some(q"false"))
q"$synthMod def __dataExists(fieldName: String): Boolean = $lookupTree"
}
/** Generate `_\\u200B_dataAny` member */
def genDataAny(schema: Schema): Tree = {
val lookupData = schema.map {
case (name, tpe) =>
(name, accessData(q"this", name, tpe))
}.toMap
val lookupTree = genLookup(q"fieldName", lookupData, mayCache = false)
q"$synthMod def __dataAny(fieldName: String): Any = $lookupTree"
}
/**
* Generate the equals method for Records. Two records are equal iff:
* - They have the same number of fields
* - Their fields have the same names
* - Values of corresponding fields compare equal
*/
def genEquals(schema: Schema): Tree = {
val thisCount = schema.size
val existence = schema.map { case (n, _) => q"that.__dataExists($n)" }
val equality = schema.map {
case (name, tpe) =>
q"${accessData(q"this", name, tpe)} == that.__dataAny($name)"
}
val tests = existence ++ equality
q"""
override def equals(that: Any) = that match {
case that: _root_.records.Rec[Any] if that.__dataCount == $thisCount =>
${tests.fold[Tree](q"true") { case (x, y) => q"$x && $y" }}
case _ => false
}
"""
}
/**
* Generate a lookup amongst the keys in `data` and map to the tree
* values. This is like an exhaustive pattern match on the strings, but may
* be implemented more efficiently.
* If default is None, it is assumed that `nameTree` evaluates to one of
* the keys of `data`. Otherwise the default tree is used if a key
* doesn't exist.
* If `mayCache` is true, the implementation might decide to store the
* evaluated trees somewhere (at runtime). Otherwise, the trees will be
* evaluated each time the resulting tree is evaluated.
*/
def genLookup(nameTree: Tree, data: Map[String, Tree],
default: Option[Tree] = None, mayCache: Boolean = true): Tree = {
val count = data.size + default.size
if (count == 0) {
q"???"
} else if (count == 1) {
// Shortcut for only one case
data.values.headOption.orElse(default).get
} else if (data.size == 1) {
// No need doing switch. We have a normal and a default case
val (keyStr, tree) = data.head
q"""if ($nameTree == $keyStr) $tree else ${default.get}"""
} else if (data.contains("")) {
// Special case this, since the per-char distinction won't work
val lookupRest = genLookup(nameTree, data - "", default, mayCache)
q"""if ($nameTree == "") ${data("")} else $lookupRest"""
} else {
val keys = data.keys.toList
val minSize = keys.map(_.length).min
val optSplitIdx = {
val optimality = for (i <- 0 until minSize)
yield (i, keys.map(_.charAt(i)).distinct.size)
optimality.maxBy(_._2)._1
}
val grouped = data.groupBy(_._1.charAt(optSplitIdx))
val cases0 = grouped.map {
case (c, innerData) =>
val body = {
if (innerData.size == 1 && default.isEmpty)
// Only one key matches and no default. Done
innerData.values.head
else
genTrivialLookup(nameTree, innerData, default)
}
cq"$c => $body"
}
val cases1 = cases0 ++ default.map(default => cq"_ => $default")
val switchAnnot = tq"_root_.scala.annotation.switch"
q"""
($nameTree.charAt($optSplitIdx): @$switchAnnot) match {
case ..$cases1
}
"""
}
}
private def genTrivialLookup(nameTree: Tree, data: Map[String, Tree],
default: Option[Tree]) = {
val cases0 = data.map {
case (name, res) => cq"$name => $res"
}
val cases1 = cases0 ++ default.map(default => cq"_ => $default")
q"$nameTree match { case ..$cases1 }"
}
/**
* Macro that implements [[Rec.applyDynamic]] and [[Rec.applyDynamicNamed]].
* You probably won't need this.
*/
def recordApply(v: Seq[c.Expr[(String, Any)]]): c.Expr[Rec[Any]] = {
val constantLiteralsMsg =
"Records can only be constructed with constant keys (string literals)."
val noEmptyStrMsg =
"Records may not have a field with an empty name"
val tuples = v.map(_.tree).map {
case Tuple2(Literal(Constant(s: String)), v) =>
if (s == "") c.abort(NoPosition, noEmptyStrMsg)
else (s, v)
case Literal(Constant(s: String)) -> v =>
if (s == "") c.abort(NoPosition, noEmptyStrMsg)
else (s, v)
case Tuple2(_, _) =>
c.abort(NoPosition, constantLiteralsMsg)
case _ -> _ =>
c.abort(NoPosition, constantLiteralsMsg)
case x =>
c.abort(NoPosition, "Records can only be constructed with tuples (a, b) and arrows a -> b.")
}
val schema = tuples.map { case (s, v) => (s, v.tpe.widen) }
checkDuplicate(schema)
val args = tuples.map { case (s, v) => q"($s,$v)" }
val data = q"Map[String,Any](..$args)"
val resultTree =
record(schema)()(
q"private val _data = $data")(
q"_data(fieldName).asInstanceOf[T]")
c.Expr[Rec[Any]](resultTree)
}
private def checkDuplicate(schema: Seq[(String, c.Type)]): Unit = {
val duplicateFields = schema.groupBy(_._1).filter(_._2.size > 1)
if (duplicateFields.nonEmpty) {
val fields = duplicateFields.keys.toList.sorted
if (fields.size == 1)
c.abort(NoPosition, s"Field ${fields.head} is defined more than once.")
else
c.abort(NoPosition, s"Fields ${fields.mkString(", ")} are defined more than once.")
}
}
}
def apply_impl(c: Context)(method: c.Expr[String])(v: c.Expr[(String, Any)]*): c.Expr[Rec[Any]] = {
import c.universe._
method.tree match {
case Literal(Constant(str: String)) if str == "apply" =>
new RecordMacros[c.type](c).recordApply(v)
case Literal(Constant(str: String)) =>
val targetName = c.prefix.actualType.typeSymbol.fullName
c.abort(
NoPosition,
s"value $str is not a member of $targetName")
case _ =>
val methodName = c.macroApplication.symbol.name
c.abort(
NoPosition,
s"You may not invoke Rec.$methodName with a non-literal method name.")
}
}
}
| scala-records/scala-records | core/src/main/scala/records/Macros.scala | Scala | bsd-3-clause | 13,422 |
package controllers
import javax.inject.Inject
import com.mohiva.play.silhouette.api.Authenticator.Implicits._
import com.mohiva.play.silhouette.api._
import com.mohiva.play.silhouette.api.exceptions.ProviderException
import com.mohiva.play.silhouette.api.repositories.AuthInfoRepository
import com.mohiva.play.silhouette.api.util.{ Clock, Credentials }
import com.mohiva.play.silhouette.impl.exceptions.IdentityNotFoundException
import com.mohiva.play.silhouette.impl.providers._
import models.services.{ UserService }
import net.ceedubs.ficus.Ficus._
import play.api.Configuration
import play.api.i18n.{ I18nSupport, Messages, MessagesApi }
import play.api.libs.concurrent.Execution.Implicits._
import play.api.mvc.Controller
import utils.auth.DefaultEnv
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps
class SignInController @Inject() (
val messagesApi: MessagesApi,
silhouette: Silhouette[DefaultEnv],
userService: UserService,
authInfoRepository: AuthInfoRepository,
credentialsProvider: CredentialsProvider,
socialProviderRegistry: SocialProviderRegistry,
configuration: Configuration,
clock: Clock,
envDAO: models.daos.EnvDAO
)
extends Controller with I18nSupport {
def view = silhouette.UnsecuredAction.async { implicit request =>
Future.successful(Ok(views.html.signIn(
new myform.MySignInForm(envDAO.getDosendmail), socialProviderRegistry, envDAO.getDosendmail
)))
}
def submit = silhouette.UnsecuredAction.async { implicit request =>
new myform.MySignInForm(envDAO.getDosendmail).bindFromRequest match {
case form: myform.MySignInForm =>
Future.successful(BadRequest(views.html.signIn(form, socialProviderRegistry, envDAO.getDosendmail)))
case data: myform.MySignInFormData => {
val credentials = Credentials(data.email, data.password)
credentialsProvider.authenticate(credentials).flatMap { loginInfo =>
val result = Redirect(routes.ApplicationController.index())
userService.retrieve(loginInfo).flatMap {
case Some(user) if !user.activated =>
Future.successful(Ok(views.html.activateAccount(data.email)))
case Some(user) =>
val c = configuration.underlying
silhouette.env.authenticatorService.create(loginInfo).map {
case authenticator if data.rememberMe =>
authenticator.copy(
expirationDateTime = clock.now + c.as[FiniteDuration]("silhouette.authenticator.rememberMe.authenticatorExpiry"),
idleTimeout = c.getAs[FiniteDuration]("silhouette.authenticator.rememberMe.authenticatorIdleTimeout"),
cookieMaxAge = c.getAs[FiniteDuration]("silhouette.authenticator.rememberMe.cookieMaxAge")
)
case authenticator => authenticator
}.flatMap { authenticator =>
silhouette.env.eventBus.publish(LoginEvent(user, request))
silhouette.env.authenticatorService.init(authenticator).flatMap { v =>
silhouette.env.authenticatorService.embed(v, result)
}
}
case None => { println("user not found"); Future.failed(new IdentityNotFoundException("Couldn't find user")) }
}
}.recover {
case e: ProviderException =>
Redirect(routes.SignInController.view()).flashing("error" -> Messages("invalid.credentials"))
}
}
}
}
}
| serversideapps/silhmojs | server/app/controllers/SignInController.scala | Scala | apache-2.0 | 3,528 |
package scribe.slf4j
import java.util.concurrent.ConcurrentHashMap
import org.slf4j.{ILoggerFactory, Logger, ScribeLoggerAdapter}
object ScribeLoggerFactory extends ILoggerFactory {
private lazy val map = new ConcurrentHashMap[String, Logger]()
override def getLogger(name: String): Logger = Option(map.get(name)) match {
case Some(logger) => logger
case None =>
val logger = new ScribeLoggerAdapter(name)
val oldInstance = map.putIfAbsent(name, logger)
Option(oldInstance).getOrElse(logger)
}
}
| outr/scribe | slf4j2/src/main/scala/scribe/slf4j/ScribeLoggerFactory.scala | Scala | mit | 531 |
package pl.touk.nussknacker.ui.api
import java.time.LocalDateTime
import akka.http.scaladsl.model.{ContentTypeRange, StatusCodes}
import akka.http.scaladsl.server
import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.http.scaladsl.unmarshalling.{FromEntityUnmarshaller, Unmarshaller}
import cats.instances.all._
import cats.syntax.semigroup._
import de.heikoseeberger.akkahttpcirce.FailFastCirceSupport
import io.circe.Json
import io.circe.syntax._
import org.scalatest._
import org.scalatest.matchers.BeMatcher
import pl.touk.nussknacker.engine.api.deployment.simple.SimpleStateStatus
import pl.touk.nussknacker.engine.api.deployment.ProcessActionType
import pl.touk.nussknacker.engine.api.process.{ProcessName, VersionId}
import pl.touk.nussknacker.engine.build.ScenarioBuilder
import pl.touk.nussknacker.restmodel.{CustomActionRequest, CustomActionResponse}
import pl.touk.nussknacker.restmodel.process.ProcessIdWithName
import pl.touk.nussknacker.restmodel.processdetails._
import pl.touk.nussknacker.test.PatientScalaFutures
import pl.touk.nussknacker.ui.api.helpers.TestFactory._
import pl.touk.nussknacker.ui.api.helpers.{EspItTest, SampleProcess, TestFactory, TestProcessingTypes}
import pl.touk.nussknacker.ui.process.exception.ProcessIllegalAction
import pl.touk.nussknacker.ui.process.marshall.ProcessConverter
import pl.touk.nussknacker.ui.process.repository.ProcessActivityRepository.ProcessActivity
import pl.touk.nussknacker.ui.util.MultipartUtils
class ManagementResourcesSpec extends FunSuite with ScalatestRouteTest with FailFastCirceSupport
with Matchers with PatientScalaFutures with OptionValues with BeforeAndAfterEach with BeforeAndAfterAll with EspItTest {
private implicit final val string: FromEntityUnmarshaller[String] = Unmarshaller.stringUnmarshaller.forContentTypes(ContentTypeRange.*)
private val processName: ProcessName = ProcessName(SampleProcess.process.id)
private val fixedTime = LocalDateTime.now()
private def deployedWithVersions(versionId: Long): BeMatcher[Option[ProcessAction]] =
BeMatcher(equal(
Option(ProcessAction(VersionId(versionId), fixedTime, user().username, ProcessActionType.Deploy, Option.empty, Option.empty, buildInfo))
).matcher[Option[ProcessAction]]
).compose[Option[ProcessAction]](_.map(_.copy(performedAt = fixedTime)))
test("process deployment should be visible in process history") {
saveProcessAndAssertSuccess(SampleProcess.process.id, SampleProcess.process)
deployProcess(SampleProcess.process.id) ~> check {
status shouldBe StatusCodes.OK
getSampleProcess ~> check {
decodeDetails.lastAction shouldBe deployedWithVersions(2)
updateProcessAndAssertSuccess(SampleProcess.process.id, SampleProcess.process)
deployProcess(SampleProcess.process.id) ~> check {
getSampleProcess ~> check {
decodeDetails.lastAction shouldBe deployedWithVersions(2)
}
}
}
}
}
test("process during deploy can't be deploy again") {
createDeployedProcess(processName, testCategoryName, isSubprocess = false)
deploymentManager.withProcessStateStatus(SimpleStateStatus.DuringDeploy) {
deployProcess(processName.value) ~> check {
status shouldBe StatusCodes.Conflict
}
}
}
test("canceled process can't be canceled again") {
createDeployedCanceledProcess(processName, testCategoryName, isSubprocess = false)
deploymentManager.withProcessStateStatus(SimpleStateStatus.Canceled) {
cancelProcess(processName.value) ~> check {
status shouldBe StatusCodes.Conflict
}
}
}
test("can't deploy archived process") {
val id = createArchivedProcess(processName)
val processIdWithName = ProcessIdWithName(id, processName)
deploymentManager.withProcessStateStatus(SimpleStateStatus.Canceled) {
deployProcess(processName.value) ~> check {
status shouldBe StatusCodes.Conflict
responseAs[String] shouldBe ProcessIllegalAction.archived(ProcessActionType.Deploy, processIdWithName).message
}
}
}
test("can't deploy fragment") {
val id = createProcess(processName, testCategoryName, isSubprocess = true)
val processIdWithName = ProcessIdWithName(id, processName)
deployProcess(processName.value) ~> check {
status shouldBe StatusCodes.Conflict
responseAs[String] shouldBe ProcessIllegalAction.subprocess(ProcessActionType.Deploy, processIdWithName).message
}
}
test("can't cancel fragment") {
val id = createProcess(processName, testCategoryName, isSubprocess = true)
val processIdWithName = ProcessIdWithName(id, processName)
deployProcess(processName.value) ~> check {
status shouldBe StatusCodes.Conflict
responseAs[String] shouldBe ProcessIllegalAction.subprocess(ProcessActionType.Deploy, processIdWithName).message
}
}
test("deploys and cancels with comment") {
saveProcessAndAssertSuccess(SampleProcess.process.id, SampleProcess.process)
deployProcess(SampleProcess.process.id, true, Some("deployComment")) ~> check {
cancelProcess(SampleProcess.process.id, true, Some("cancelComment")) ~> check {
status shouldBe StatusCodes.OK
//TODO: remove Deployment:, Stop: after adding custom icons
val expectedDeployComment = "Deployment: deployComment"
val expectedStopComment = "Stop: cancelComment"
getActivity(ProcessName(SampleProcess.process.id)) ~> check {
val comments = responseAs[ProcessActivity].comments.sortBy(_.id)
comments.map(_.content) shouldBe List(expectedDeployComment, expectedStopComment)
val firstCommentId::secondCommentId::Nil = comments.map(_.id)
Get(s"/processes/${SampleProcess.process.id}/deployments") ~> withAllPermissions(processesRoute) ~> check {
val deploymentHistory = responseAs[List[ProcessAction]]
val curTime = LocalDateTime.now()
deploymentHistory.map(_.copy(performedAt = curTime)) shouldBe List(
ProcessAction(VersionId(2), curTime, user().username, ProcessActionType.Cancel, Some(secondCommentId), Some(expectedStopComment), Map()),
ProcessAction(VersionId(2), curTime, user().username, ProcessActionType.Deploy, Some(firstCommentId), Some(expectedDeployComment), TestFactory.buildInfo)
)
}
}
}
}
}
test("rejects deploy without comment if comment needed") {
saveProcessAndAssertSuccess(SampleProcess.process.id, SampleProcess.process)
deployProcess(SampleProcess.process.id, true) ~> check {
rejection shouldBe server.ValidationRejection("Comment is required", None)
}
}
test("deploy technical process and mark it as deployed") {
createProcess(processName, testCategoryName, false)
deployProcess(processName.value) ~> check { status shouldBe StatusCodes.OK }
getProcess(processName) ~> check {
val processDetails = responseAs[ProcessDetails]
processDetails.lastAction shouldBe deployedWithVersions(1)
processDetails.isDeployed shouldBe true
}
}
test("recognize process cancel in deployment list") {
saveProcessAndAssertSuccess(SampleProcess.process.id, SampleProcess.process)
deployProcess(SampleProcess.process.id) ~> check {
status shouldBe StatusCodes.OK
getSampleProcess ~> check {
decodeDetails.lastAction shouldBe deployedWithVersions(2)
cancelProcess(SampleProcess.process.id) ~> check {
getSampleProcess ~> check {
decodeDetails.lastAction should not be None
decodeDetails.isCanceled shouldBe true
}
}
}
}
}
test("recognize process deploy and cancel in global process list") {
saveProcessAndAssertSuccess(SampleProcess.process.id, SampleProcess.process)
deployProcess(SampleProcess.process.id) ~> check {
status shouldBe StatusCodes.OK
getProcesses ~> check {
val process = findJsonProcess(responseAs[String])
process.value.lastActionVersionId shouldBe Some(2L)
process.value.isDeployed shouldBe true
cancelProcess(SampleProcess.process.id) ~> check {
getProcesses ~> check {
val reprocess = findJsonProcess(responseAs[String])
reprocess.value.lastActionVersionId shouldBe Some(2L)
reprocess.value.isCanceled shouldBe true
}
}
}
}
}
test("not authorize user with write permission to deploy") {
saveProcessAndAssertSuccess(SampleProcess.process.id, SampleProcess.process)
Post(s"/processManagement/deploy/${SampleProcess.process.id}") ~> withPermissions(deployRoute(), testPermissionWrite) ~> check {
rejection shouldBe server.AuthorizationFailedRejection
}
}
test("return error on deployment failure") {
saveProcessAndAssertSuccess(SampleProcess.process.id, SampleProcess.process)
deploymentManager.withFailingDeployment {
deployProcess(SampleProcess.process.id) ~> check {
status shouldBe StatusCodes.InternalServerError
}
}
}
test("snaphots process") {
saveProcessAndAssertSuccess(SampleProcess.process.id, SampleProcess.process)
snapshot(SampleProcess.process.id) ~> check {
status shouldBe StatusCodes.OK
responseAs[String] shouldBe MockDeploymentManager.savepointPath
}
}
test("stops process") {
saveProcessAndAssertSuccess(SampleProcess.process.id, SampleProcess.process)
stop(SampleProcess.process.id) ~> check {
status shouldBe StatusCodes.OK
responseAs[String] shouldBe MockDeploymentManager.stopSavepointPath
}
}
test("return test results") {
saveProcessAndAssertSuccess(SampleProcess.process.id, SampleProcess.process)
val displayableProcess = ProcessConverter.toDisplayable(SampleProcess.process.toCanonicalProcess, TestProcessingTypes.Streaming)
val multiPart = MultipartUtils.prepareMultiParts("testData" -> "ala\nbela", "processJson" -> displayableProcess.asJson.noSpaces)()
Post(s"/processManagement/test/${SampleProcess.process.id}", multiPart) ~> withPermissions(deployRoute(), testPermissionDeploy |+| testPermissionRead) ~> check {
status shouldEqual StatusCodes.OK
val ctx = responseAs[Json] .hcursor
.downField("results")
.downField("nodeResults")
.downField("endsuffix")
.downArray
.downField("context")
.downField("variables")
ctx
.downField("output")
.downField("pretty")
.downField("message")
.focus shouldBe Some(Json.fromString("message"))
ctx
.downField("input")
.downField("pretty")
.downField("firstField")
.focus shouldBe Some(Json.fromString("ala"))
}
}
test("return test results of errors, including null") {
import pl.touk.nussknacker.engine.spel.Implicits._
val process = {
ScenarioBuilder
.streaming("sampleProcess")
.parallelism(1)
.source("startProcess", "csv-source")
.filter("input", "new java.math.BigDecimal(null) == 0")
.emptySink("end", "kafka-string", "topic" -> "'end.topic'", "value" -> "''")
}
saveProcessAndAssertSuccess(process.id, process)
val displayableProcess = ProcessConverter.toDisplayable(process.toCanonicalProcess, TestProcessingTypes.Streaming)
val multiPart = MultipartUtils.prepareMultiParts("testData" -> "ala\nbela", "processJson" -> displayableProcess.asJson.noSpaces)()
Post(s"/processManagement/test/${process.id}", multiPart) ~> withPermissions(deployRoute(), testPermissionDeploy |+| testPermissionRead) ~> check {
status shouldEqual StatusCodes.OK
}
}
test("refuses to test if too much data") {
import pl.touk.nussknacker.engine.spel.Implicits._
val process = {
ScenarioBuilder
.streaming("sampleProcess")
.parallelism(1)
.source("startProcess", "csv-source")
.emptySink("end", "kafka-string", "topic" -> "'end.topic'")
}
saveProcessAndAssertSuccess(process.id, process)
val displayableProcess = ProcessConverter.toDisplayable(process.toCanonicalProcess, TestProcessingTypes.Streaming)
List((1 to 50).mkString("\n"), (1 to 50000).mkString("-")).foreach { tooLargeData =>
val multiPart = MultipartUtils.prepareMultiParts("testData" -> tooLargeData, "processJson" -> displayableProcess.asJson.noSpaces)()
Post(s"/processManagement/test/${process.id}", multiPart) ~> withPermissions(deployRoute(), testPermissionDeploy |+| testPermissionRead) ~> check {
status shouldEqual StatusCodes.BadRequest
}
}
}
test("execute valid custom action") {
saveProcessAndAssertSuccess(SampleProcess.process.id, SampleProcess.process)
customAction(SampleProcess.process.id, CustomActionRequest("hello")) ~> check {
status shouldBe StatusCodes.OK
responseAs[CustomActionResponse] shouldBe CustomActionResponse(isSuccess = true, msg = "Hi")
}
}
test("execute non existing custom action") {
saveProcessAndAssertSuccess(SampleProcess.process.id, SampleProcess.process)
customAction(SampleProcess.process.id, CustomActionRequest("non-existing")) ~> check {
status shouldBe StatusCodes.NotFound
responseAs[CustomActionResponse] shouldBe CustomActionResponse(isSuccess = false, msg = "non-existing is not existing")
}
}
test("execute not implemented custom action") {
saveProcessAndAssertSuccess(SampleProcess.process.id, SampleProcess.process)
customAction(SampleProcess.process.id, CustomActionRequest("not-implemented")) ~> check {
status shouldBe StatusCodes.NotImplemented
responseAs[CustomActionResponse] shouldBe CustomActionResponse(isSuccess = false, msg = "not-implemented is not implemented")
}
}
test("execute custom action with not allowed process status") {
saveProcessAndAssertSuccess(SampleProcess.process.id, SampleProcess.process)
customAction(SampleProcess.process.id, CustomActionRequest("invalid-status")) ~> check {
status shouldBe StatusCodes.Forbidden
responseAs[CustomActionResponse] shouldBe CustomActionResponse(isSuccess = false, msg = s"Scenario status: WARNING is not allowed for action invalid-status")
}
}
def decodeDetails: ProcessDetails = responseAs[ProcessDetails]
}
| TouK/nussknacker | ui/server/src/test/scala/pl/touk/nussknacker/ui/api/ManagementResourcesSpec.scala | Scala | apache-2.0 | 14,420 |
package org.denigma.genes
import org.apache.avro.Schema
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.models.{Transcript, CDS, Exon, UTR}
import org.bdgenomics.adam.predicates.HighQualityReadPredicate
import org.bdgenomics.adam.projections.{AlignmentRecordField, Projection}
import org.bdgenomics.adam.rdd.ADAMContext
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.read.comparisons.ComparisonTraversalEngine
import org.bdgenomics.adam.rich.ReferenceMappingContext.FeatureReferenceMapping
import org.bdgenomics.formats.avro.{AlignmentRecord, Feature, Strand}
import ADAMContext._
case class GeneCounter(@transient sc: SparkContext) extends ExpressionsBase{
@transient
lazy val ac = new ADAMContext(sc)
def loadGTFFeatures(filePath:String) = {
sc.textFile(filePath).flatMap(new FixedGTFParser().parse)
}
def loadFeatures(input:String,name:String):RDD[(String,Feature)]= {
val filePath = input + name
val features: RDD[Feature] = this.loadGTFFeatures(filePath)
featuresByKey(features).cache()
}
def loadGenes(input:String,name:String)= {
val byKey = this.loadFeatures(input,name)
val exons = this.exonsByTranscript(byKey)
val utrs = this.utrsByTranscript(byKey)
exons.take(100).foreach{
e=>println(s"EXON = "+e._2+s" of TRANSCRIPT ${e._1} \\n")
}
val cds: RDD[(String, Iterable[CDS])] = this.cdsByTranscript(byKey)
transcriptsByGenes(byKey,exons,utrs,cds)
}
def loadGeneMap(names:String*)(implicit path:String): Map[String, RDD[AlignmentRecord]] = {
val files = names.map(n=>path+n)
val pred = new HighQualityReadPredicate
files.map(f=>f->ac.loadAlignments(f,predicate = Some(classOf[HighQualityReadPredicate]),projection =Some(projection))).toMap
}
def featuresByKey(features:RDD[Feature]): RDD[(String, Feature)] = { features.keyBy(_.getFeatureType).cache() }
def cdsByTranscript(typePartitioned:RDD[(String, Feature)] ): RDD[(String, Iterable[CDS])] = {
typePartitioned.filter(_._1 == "CDS").flatMap {
case ("CDS", ftr: Feature) =>
val ids: Seq[String] = ftr.getParentIds.map(_.toString)
ids.map(transcriptId => (transcriptId,
CDS(transcriptId, strand(ftr.getStrand), FeatureReferenceMapping.getReferenceRegion(ftr))))
}.groupByKey()
}
def utrsByTranscript(typePartitioned:RDD[(String, Feature)]): RDD[(String, Iterable[UTR])] = {
typePartitioned.filter(_._1 == "UTR").flatMap {
case ("UTR", ftr: Feature) =>
val ids: Seq[String] = ftr.getParentIds.map(_.toString)
ids.map(transcriptId => (transcriptId,
UTR(transcriptId, strand(ftr.getStrand), FeatureReferenceMapping.getReferenceRegion(ftr))))
}.groupByKey()
}
def exonsByTranscript(typePartitioned:RDD[(String, Feature)] ): RDD[(String, Iterable[Exon])] = {
typePartitioned.filter(_._1 == "exon").flatMap {
case ("exon", ftr: Feature) =>
val ids: Seq[String] = ftr.getParentIds
ids.map(transcriptId => (transcriptId,
Exon(ftr.getFeatureId, transcriptId, strand(ftr.getStrand), FeatureReferenceMapping.getReferenceRegion(ftr))))
}.groupByKey()
}
def transcriptsByGenes(typePartitioned:RDD[(String,Feature)],
exonsByTranscript:RDD[(String, Iterable[Exon])],
utrsByTranscript:RDD[(String, Iterable[UTR])],
cdsByTranscript: RDD[(String, Iterable[CDS])] ) = {
typePartitioned.filter(_._1 == "transcript").map {
case ("transcript", ftr: Feature) => (ftr.getFeatureId.toString, ftr)
}.join(exonsByTranscript)
.leftOuterJoin(utrsByTranscript)
.leftOuterJoin(cdsByTranscript)
.flatMap {
// There really only should be _one_ parent listed in this flatMap, but since
// getParentIds is modeled as returning a List[], we'll write it this way.
case (transcriptId: String, (((tgtf: Feature, exons: Iterable[Exon]),
utrs: Option[Iterable[UTR]]),
cds: Option[Iterable[CDS]])) =>
val geneIds: Seq[String] = tgtf.getParentIds.map(_.toString) // should be length 1
geneIds.map(f = geneId => (geneId,
Transcript(transcriptId, Seq(transcriptId), geneId,
strand(tgtf.getStrand),
exons, cds.getOrElse(Seq()), utrs.getOrElse(Seq()))))
}.groupByKey()
}
/*
def compareTranscripts(from:String*) = {
val samples: Seq[(String, RDD[Feature])] = from.map(f=>f->this.parseGTF(f))
val typedSamples: Seq[(String, RDD[(String, Feature)])] = samples.map{case(s,features)=>s->features.keyBy(_.getFeatureType).cache() }
}
def compareExons(from:String*)(to:String) = {
val samples: Seq[(String, RDD[Feature])] = from.map(f=>f->this.parseGTF(f))
val typedSamples: Seq[(String, RDD[(String, Feature)])] = samples.map{case(s,features)=>s->features.keyBy(_.getFeatureType).cache() }
//val exonsSamples = typedSamples.map{ case(s,fs)=>s->exonsByTranscript(fs) }
val exonsSamples = typedSamples.map{ case(s,fs)=>exonsByTranscript(fs) }
sc.union(exonsSamples).saveAsTextFile(to)
}*/
protected def regionInfo(utr:UTR)= s"from ${utr.region.start} to ${utr.region.end} of ${utr.region.length()}"
/*
/**
* Compares UTRs
* @param from
* @param to
* @return
*/
def compareUTRS(from:String*)(to:String) = {
val samples: Seq[(String, RDD[Feature])] = from.map(f=>f->this.parseGTF(f))
val typedSamples: Seq[(String, RDD[(String, Feature)])] = samples.map{case(s,features)=>s->features.keyBy(_.getFeatureType).cache() }
val utrSamples: Seq[(String, RDD[(String, Iterable[UTR])])] = typedSamples.map{ case(s,fs)=>s->utrsByTranscript(fs) }
val info: RDD[(String, String)] = sc.union( utrSamples.map{
case (sample,uters)=>
uters.map{
case (tr,uts)=>
val info = sample+": "+uts.foldLeft("")((acc,el)=>acc+regionInfo(el))+"\\n"
tr-> info
}
} )
val comparison: RDD[String] = info.groupByKey().map{
case (key,strs)=>s"----------------------\\nTRANSCRIPT $key"+strs.reduce(_+_)
}
comparison.coalesce(1,true).saveAsTextFile(to)
println("GTF FILES: \\n"+from.reduce((a,b)=>a+"\\n"+b)+"\\nwere processed")
}
*/
}
| antonkulaga/gene-expressions | src/main/scala/org/denigma/genes/GeneCounter.scala | Scala | apache-2.0 | 6,379 |
package edu.neu.coe.csye._7200.minidatabase2
import scala.io.Source
import scala.util._
/**
* @author scalaprof
*/
object MiniDatabase2 extends App {
// TODO 1: Implement this method, similar to the map2 you already know (4 points)
def map3[A,B,C,D](a: Option[A], b: Option[B], c: Option[C])(f: (A,B,C) => D): Option[D] = ???
// TODO 2: Implement this method, similar to the map2 you already know (4 points)
def map2[A,B,C](a: Try[A], b: Try[B])(f: (A,B) => C): Try[C] = ???
def load(filename: String) = {
val src = Source.fromFile(filename)
val database = src.getLines.toList.map(e => Entry.parse(e.split(",")))
val result = database
src.close
result
}
def measure(height: Height) = height match {
case Height(8,_) => "giant"
case Height(7,_) => "very tall"
case Height(6,_) => "tall"
case Height(5,_) => "normal"
case Height(_,_) => "short"
}
if (args.length>0) {
val db = load(args(0))
print(db)
}
}
case class Entry(name: Name, height: Height)
case class Height(feet: Int, in: Int) {
def inches = feet*12+in
}
object Entry {
// TODO 3: Implement this method using the map2 method you implemented above, using a suitable function f (5 points)
def parse(name: Try[Name],height: Try[Height]): Try[Entry] = ???
def parse(name: String, height: String): Try[Entry] = parse(Name.parse(name),Height.parse(height))
// TODO 4: Implement this method using the entry(0) and entry(3) for the name and height and using parse (immediately above) (8 points)
def parse(entry: Seq[String]): Try[Entry] = ???
}
object Height {
// TODO 5: Implement this method using the map2 method you implemented above (7 points)
def parse(ft: String, in: String): Try[Height] = ???
val rHeightFtIn = """^\\s*(\\d+)\\s*(?:ft|\\')(\\s*(\\d+)\\s*(?:in|\\"))?\\s*$""".r
// TODO 6: Implement this method using the rHeightFtIn pattern given above and the parse method you just implemented (5 points)
def parse(height: String): Try[Height] = ???
}
case class Name(first: String, middle: Option[String], last: String)
object Name {
val rName="""^(\\w+)\\s+((.*)\\s+)?(\\w+)$""".r
// TODO 7: Implement this method using the rName pattern given above (7 points)
def parse(name: String): Try[Name] = ???
} | rchillyard/Scalaprof | FunctionalProgramming/src/main/scala/edu/neu/coe/csye/_7200/minidatabase2/MiniDatabase2.scala | Scala | gpl-2.0 | 2,271 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.commands
import org.neo4j.cypher.internal.compiler.v2_3._
import org.neo4j.cypher.internal.compiler.v2_3.commands.expressions._
import org.neo4j.cypher.internal.compiler.v2_3.commands.predicates.{Equals, HasLabel}
import org.neo4j.cypher.internal.compiler.v2_3.commands.values.TokenType._
import org.neo4j.cypher.internal.compiler.v2_3.commands.values.{KeyToken, TokenType}
import org.neo4j.cypher.internal.compiler.v2_3.mutation._
import org.neo4j.cypher.internal.frontend.v2_3.test_helpers.CypherFunSuite
class MergeAstTest extends CypherFunSuite {
test("simple_node_without_labels_or_properties") {
// given
val from = mergeAst(patterns = Seq(ParsedEntity(A, Identifier(A), Map.empty, Seq.empty)))
// then
from.nextStep() should equal(Seq(MergeNodeAction(A, Map.empty, Seq.empty, Seq.empty, Seq.empty, Seq.empty, None)))
}
test("node_with_labels") {
// given
val from = mergeAst(patterns = Seq(ParsedEntity(A, Identifier(A), Map.empty, Seq(KeyToken.Unresolved(labelName, Label)))))
// then
val a = from.nextStep().head
val b = Seq(MergeNodeAction(A,
props = Map.empty,
labels = Seq(Label(labelName)),
expectations = Seq(nodeHasLabelPredicate(A)),
onCreate = Seq(setNodeLabels(A)),
onMatch = Seq.empty,
maybeNodeProducer = NO_PRODUCER)).head
a should equal(b)
}
test("node_with_properties") {
// given
val from = mergeAst(patterns = Seq(ParsedEntity(A, Identifier(A), Map(propertyKey.name -> expression), Seq.empty)))
from.nextStep() should equal(Seq(MergeNodeAction(A,
props = Map(propertyKey -> expression),
labels = Seq.empty,
expectations = Seq(Equals(Property(Identifier(A), propertyKey), expression)),
onCreate = Seq(PropertySetAction(Property(Identifier(A), propertyKey), expression)),
onMatch = Seq.empty,
maybeNodeProducer = NO_PRODUCER)))
}
test("node_with_on_create") {
// given MERGE A ON CREATE SET A.prop = exp
val from = mergeAst(
patterns = Seq(ParsedEntity(A, Identifier(A), Map.empty, Seq.empty)),
onActions = Seq(OnAction(On.Create, Seq(PropertySetAction(Property(Identifier(A), propertyKey), expression)))))
// then
from.nextStep() should equal(Seq(MergeNodeAction(A,
props = Map.empty,
labels = Seq.empty,
expectations = Seq.empty,
onCreate = Seq(PropertySetAction(Property(Identifier(A), propertyKey), expression)),
onMatch = Seq.empty,
maybeNodeProducer = NO_PRODUCER)))
}
test("node_with_on_match") {
// given MERGE A ON MATCH SET A.prop = exp
val from = mergeAst(
patterns = Seq(ParsedEntity(A, Identifier(A), Map.empty, Seq.empty)),
onActions = Seq(OnAction(On.Match, Seq(PropertySetAction(Property(Identifier(A), propertyKey), expression)))))
// then
from.nextStep() should equal(Seq(MergeNodeAction(A,
props = Map.empty,
labels = Seq.empty,
expectations = Seq.empty,
onCreate = Seq.empty,
onMatch = Seq(PropertySetAction(Property(Identifier(A), propertyKey), expression)),
maybeNodeProducer = NO_PRODUCER)))
}
val A = "a"
val B = "b"
val NO_PATHS = Seq.empty
val NO_PRODUCER = None
val labelName = "Label"
val propertyKey = PropertyKey("property")
val expression = TimestampFunction()
def nodeHasLabelPredicate(id: String) = HasLabel(Identifier(id), KeyToken.Unresolved(labelName, TokenType.Label))
def setNodeLabels(id: String) = LabelAction(Identifier(id), LabelSetOp, Seq(KeyToken.Unresolved(labelName, TokenType.Label)))
def setProperty(id: String) = PropertySetAction(Property(Identifier(id), propertyKey), expression)
def mergeAst(patterns: Seq[AbstractPattern] = Seq.empty,
onActions: Seq[OnAction] = Seq.empty,
matches: Seq[Pattern] = Seq.empty,
create: Seq[UpdateAction] = Seq.empty) = MergeAst(patterns, onActions, matches, create)
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/test/scala/org/neo4j/cypher/internal/compiler/v2_3/commands/MergeAstTest.scala | Scala | apache-2.0 | 4,775 |
package io.plasmap.geohash.test
import io.plasmap.generator.OsmObjectGenerator
import io.plasmap.geohash._
import org.specs2.ScalaCheck
import org.specs2.execute.Result
import org.specs2.mutable.Specification
/**
* Created by janschulte on 12/02/15.
*/
class GeoHashSpec extends Specification with ScalaCheck {
sequential
val generator = OsmObjectGenerator()
private val hashUltraLow = new GeoHash(PrecisionUltraLow_630KM)
private val hashVeryLow = new GeoHash(PrecisionVeryLow_80KM)
private val hashLow = new GeoHash(PrecisionLow_20KM)
private val hashMedium = new GeoHash(PrecisionMedium_5KM)
private val hashHigh = new GeoHash(PrecisionHigh_100M)
private val hashVeryHigh = new GeoHash(PrecisionVeryHigh_1M)
private val hashUltra = new GeoHash(PrecisionUltra_1CM)
private val hashUltraHigh = new GeoHash(PrecisionUltraHigh_1MM)
private val hashMediumLow = new GeoHash(PrecisionLow_24BIT)
val testCases = 100000
"The GeoHash" should {
s"encode/decode $testCases points at ultra low precision" in {
Result.unit {
(1 to testCases) foreach {
i =>
val expectedPoint = generator.generatePoint
val hash = hashUltraLow.encodeParallel(expectedPoint.lon, expectedPoint.lat)
val (lon, lat) = hashUltraLow.decodeParallel(hash)
lon must beCloseTo(expectedPoint.lon, 180 / Math.pow(2, 5))
lat must beCloseTo(expectedPoint.lat, 90 / Math.pow(2, 5))
}
}
}
s"encode/decode $testCases points at medium low precision" in {
Result.unit {
(1 to testCases) foreach {
i =>
val expectedPoint = generator.generatePoint
val hash = hashMediumLow.encodeParallel(expectedPoint.lon, expectedPoint.lat)
val (lon, lat) = hashMediumLow.decodeParallel(hash)
lon must beCloseTo(expectedPoint.lon, 180 / Math.pow(2, 5))
lat must beCloseTo(expectedPoint.lat, 90 / Math.pow(2, 5))
}
}
}
s"encode/decode $testCases points at very low precision" in {
Result.unit {
(1 to testCases) foreach {
i =>
val expectedPoint = generator.generatePoint
val hash = hashVeryLow.encodeParallel(expectedPoint.lon, expectedPoint.lat)
val (lon, lat) = hashVeryLow.decodeParallel(hash)
lon must beCloseTo(expectedPoint.lon, 180 / Math.pow(2, 8))
lat must beCloseTo(expectedPoint.lat, 90 / Math.pow(2, 8))
}
}
}
s"encode/decode $testCases points at low precision" in {
Result.unit {
(1 to testCases) foreach {
i =>
val expectedPoint = generator.generatePoint
val hash = hashLow.encodeParallel(expectedPoint.lon, expectedPoint.lat)
val (lon, lat) = hashLow.decodeParallel(hash)
lon must beCloseTo(expectedPoint.lon, 180 / Math.pow(2, 10))
lat must beCloseTo(expectedPoint.lat, 90 / Math.pow(2, 10))
}
}
}
s"encode/decode $testCases points at medium precision" in {
Result.unit {
(1 to testCases) foreach {
i =>
val expectedPoint = generator.generatePoint
val hash = hashMedium.encodeParallel(expectedPoint.lon, expectedPoint.lat)
val (lon, lat) = hashMedium.decodeParallel(hash)
lon must beCloseTo(expectedPoint.lon, 180 / Math.pow(2, 13))
lat must beCloseTo(expectedPoint.lat, 90 / Math.pow(2, 13))
}
}
}
s"encode/decode $testCases points at high precision" in {
Result.unit {
(1 to testCases) foreach {
i =>
val expectedPoint = generator.generatePoint
val hash = hashHigh.encodeParallel(expectedPoint.lon, expectedPoint.lat)
val (lon, lat) = hashHigh.decodeParallel(hash)
lon must beCloseTo(expectedPoint.lon, 180 / Math.pow(2, 18))
lat must beCloseTo(expectedPoint.lat, 90 / Math.pow(2, 18))
}
}
}
s"encode/decode $testCases points at very high precision" in {
Result.unit {
(1 to testCases) foreach {
i =>
val expectedPoint = generator.generatePoint
val hash = hashVeryHigh.encodeParallel(expectedPoint.lon, expectedPoint.lat)
val (lon, lat) = hashVeryHigh.decodeParallel(hash)
lon must beCloseTo(expectedPoint.lon, 180 / Math.pow(2, 24))
lat must beCloseTo(expectedPoint.lat, 90 / Math.pow(2, 24))
}
}
}
s"encode/decode $testCases points at ultra precision" in {
Result.unit {
(1 to testCases) foreach {
i =>
val expectedPoint = generator.generatePoint
val hash = hashUltra.encodeParallel(expectedPoint.lon, expectedPoint.lat)
val (lon, lat) = hashUltra.decodeParallel(hash)
lon must beCloseTo(expectedPoint.lon, 180 / Math.pow(2, 30))
lat must beCloseTo(expectedPoint.lat, 90 / Math.pow(2, 30))
}
}
}
s"encode/decode $testCases points at ultra high precision" in {
Result.unit {
(1 to testCases) foreach {
i =>
val expectedPoint = generator.generatePoint
val hash = hashUltraHigh.encodeParallel(expectedPoint.lon, expectedPoint.lat)
val (lon, lat) = hashUltraHigh.decodeParallel(hash)
lon must beCloseTo(expectedPoint.lon, 180 / Math.pow(2, 32))
lat must beCloseTo(expectedPoint.lat, 90 / Math.pow(2, 32))
}
}
}
}
} | plasmap/geow | src/test/scala/io/plasmap/geohash/test/GeoHashSpec.scala | Scala | apache-2.0 | 5,596 |
package com.trueaccord
import utest._
import com.trueaccord.foods._
import com.trueaccord.advanced._
object JsTest extends TestSuite {
val tests = TestSuite {
val myMenu = Menu()
.update(
_.menuName := "Our menu",
_.foods := Seq(
Food().withName("Apple").withMeasure("1 unit (3\\" dia)").withCalories(Calories(95)),
Food().withName("Chicken McNuggets").withMeasure("4 pieces (64 g)").withCalories(Calories(193)),
Food().withName("California Roll").withMeasure("8 sushies").withCalories(Calories(400))))
'updateWorks {
assert(myMenu.update(_.menuName := "Another menu") ==
myMenu.copy(menuName = "Another menu"))
}
'parseFromIsInverseOfByteArray {
assert(Menu.parseFrom(myMenu.toByteArray) == myMenu)
}
'customOptionWorks {
assert(
MyMessage.scalaDescriptor.findFieldByName("my_field").get.getOptions
.extension(AdvancedProto.foo) == "abcdef")
}
}
}
| thesamet/scalapbjs-test | src/test/scala/com/trueaccord/JsTest.scala | Scala | apache-2.0 | 985 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.check.status
import io.gatling.commons.validation._
import io.gatling.core.check._
import io.gatling.core.check.extractor._
import io.gatling.core.session._
import io.gatling.http.check.HttpCheck
import io.gatling.http.check.HttpCheckBuilders._
import io.gatling.http.response.Response
trait HttpStatusCheckType
object HttpStatusCheckBuilder {
val Status = {
val statusExtractor = new Extractor[Response, Int] with SingleArity {
val name = "status"
def apply(prepared: Response) = prepared.statusCode match {
case None => "Response wasn't received".failure
case code => code.success
}
}.expressionSuccess
new DefaultFindCheckBuilder[HttpStatusCheckType, Response, Int](statusExtractor)
}
}
object HttpStatusProvider extends CheckProtocolProvider[HttpStatusCheckType, HttpCheck, Response, Response] {
override val specializer: Specializer[HttpCheck, Response] = StatusSpecializer
override val preparer: Preparer[Response, Response] = PassThroughResponsePreparer
}
| pwielgolaski/gatling | gatling-http/src/main/scala/io/gatling/http/check/status/HttpStatusCheckBuilder.scala | Scala | apache-2.0 | 1,667 |
/*
* ____ ____ _____ ____ ___ ____
* | _ \\ | _ \\ | ____| / ___| / _/ / ___| Precog (R)
* | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data
* | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc.
* |_| |_| \\_\\ |_____| \\____| /__/ \\____| All Rights Reserved.
*
* This program is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License along with this
* program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.precog.common
package ingest
import kafka._
import java.nio.ByteBuffer
import org.specs2.ScalaCheck
import org.specs2.mutable._
import org.scalacheck._
import org.scalacheck.Gen._
import blueeyes.json._
import blueeyes.json.serialization._
import scalaz._
object EventMessageSerializationSpec extends Specification with ScalaCheck with ArbitraryEventMessage {
implicit val arbMsg = Arbitrary(genRandomEventMessage)
"Event message serialization " should {
"maintain event content" in { check { (in: EventMessage) =>
val buf = EventMessageEncoding.toMessageBytes(in)
EventMessageEncoding.read(buf) must beLike {
case Success(\\/-(out)) => out must_== in
case Failure(Extractor.Thrown(ex)) => throw ex
}
}}
}
}
| precog/platform | common/src/test/scala/com/precog/common/ingest/EventMessageSerializationSpec.scala | Scala | agpl-3.0 | 1,851 |
package scala.meta
package internal
package prettyprinters
import org.scalameta.show._
import Show.{ sequence => s, repeat => r, indent => i, newline => n }
import scala.meta.internal.{ast => impl}
import scala.meta.prettyprinters.{Syntax, Structure}
object TreeStructure {
def apply[T <: Tree]: Structure[T] = {
Structure(x => s(x.productPrefix, "(", {
def default = {
def showRaw(x: Any): String = x match {
case el: String => enquote(el, DoubleQuotes)
case el: Tree => el.show[Structure]
case el: Nil.type => "Nil"
case el @ Seq(Seq()) => "Seq(Seq())"
case el: Seq[_] => "Seq(" + el.map(showRaw).mkString(", ") + ")"
case el: None.type => "None"
case el: Some[_] => "Some(" + showRaw(el.get) + ")"
case el => el.toString
}
r(x.productIterator.map(showRaw).toList, ", ")
}
x match {
case x: impl.Quasi => default
case x: impl.Lit.String => s(enquote(x.value, DoubleQuotes))
case x: Lit => import scala.meta.dialects.Scala211; s(x.show[Syntax])
case x => default
}
}, ")"))
}
} | beni55/scalameta | scalameta/trees/src/main/scala/scala/meta/internal/prettyprinters/TreeStructure.scala | Scala | bsd-3-clause | 1,152 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.aggregate
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateExpression, AggregateFunction}
import org.apache.spark.sql.execution.metric.SQLMetric
/**
* An iterator used to evaluate [[AggregateFunction]]. It assumes the input rows have been
* sorted by values of [[groupingExpressions]].
*/
class SortBasedAggregationIterator(
partIndex: Int,
groupingExpressions: Seq[NamedExpression],
valueAttributes: Seq[Attribute],
inputIterator: Iterator[InternalRow],
aggregateExpressions: Seq[AggregateExpression],
aggregateAttributes: Seq[Attribute],
initialInputBufferOffset: Int,
resultExpressions: Seq[NamedExpression],
newMutableProjection: (Seq[Expression], Seq[Attribute]) => MutableProjection,
numOutputRows: SQLMetric)
extends AggregationIterator(
partIndex,
groupingExpressions,
valueAttributes,
aggregateExpressions,
aggregateAttributes,
initialInputBufferOffset,
resultExpressions,
newMutableProjection) {
/**
* Creates a new aggregation buffer and initializes buffer values
* for all aggregate functions.
*/
private def newBuffer: InternalRow = {
val bufferSchema = aggregateFunctions.flatMap(_.aggBufferAttributes)
val bufferRowSize: Int = bufferSchema.length
val genericMutableBuffer = new GenericInternalRow(bufferRowSize)
val useUnsafeBuffer = bufferSchema.map(_.dataType).forall(UnsafeRow.isMutable)
val buffer = if (useUnsafeBuffer) {
val unsafeProjection =
UnsafeProjection.create(bufferSchema.map(_.dataType))
unsafeProjection.apply(genericMutableBuffer)
} else {
genericMutableBuffer
}
initializeBuffer(buffer)
buffer
}
///////////////////////////////////////////////////////////////////////////
// Mutable states for sort based aggregation.
///////////////////////////////////////////////////////////////////////////
// The partition key of the current partition.
private[this] var currentGroupingKey: UnsafeRow = _
// The partition key of next partition.
private[this] var nextGroupingKey: UnsafeRow = _
// The first row of next partition.
private[this] var firstRowInNextGroup: InternalRow = _
// Indicates if we has new group of rows from the sorted input iterator
private[this] var sortedInputHasNewGroup: Boolean = false
// The aggregation buffer used by the sort-based aggregation.
private[this] val sortBasedAggregationBuffer: InternalRow = newBuffer
protected def initialize(): Unit = {
if (inputIterator.hasNext) {
initializeBuffer(sortBasedAggregationBuffer)
val inputRow = inputIterator.next()
nextGroupingKey = groupingProjection(inputRow).copy()
firstRowInNextGroup = inputRow.copy()
sortedInputHasNewGroup = true
} else {
// This inputIter is empty.
sortedInputHasNewGroup = false
}
}
initialize()
/** Processes rows in the current group. It will stop when it find a new group. */
protected def processCurrentSortedGroup(): Unit = {
currentGroupingKey = nextGroupingKey
// Now, we will start to find all rows belonging to this group.
// We create a variable to track if we see the next group.
var findNextPartition = false
// firstRowInNextGroup is the first row of this group. We first process it.
processRow(sortBasedAggregationBuffer, firstRowInNextGroup)
// The search will stop when we see the next group or there is no
// input row left in the iter.
while (!findNextPartition && inputIterator.hasNext) {
// Get the grouping key.
val currentRow = inputIterator.next()
val groupingKey = groupingProjection(currentRow)
// Check if the current row belongs the current input row.
if (currentGroupingKey == groupingKey) {
processRow(sortBasedAggregationBuffer, currentRow)
} else {
// We find a new group.
findNextPartition = true
nextGroupingKey = groupingKey.copy()
firstRowInNextGroup = currentRow.copy()
}
}
// We have not seen a new group. It means that there is no new row in the input
// iter. The current group is the last group of the iter.
if (!findNextPartition) {
sortedInputHasNewGroup = false
}
}
///////////////////////////////////////////////////////////////////////////
// Iterator's public methods
///////////////////////////////////////////////////////////////////////////
override final def hasNext: Boolean = sortedInputHasNewGroup
override final def next(): UnsafeRow = {
if (hasNext) {
// Process the current group.
processCurrentSortedGroup()
// Generate output row for the current group.
val outputRow = generateOutput(currentGroupingKey, sortBasedAggregationBuffer)
// Initialize buffer values for the next group.
initializeBuffer(sortBasedAggregationBuffer)
numOutputRows += 1
outputRow
} else {
// no more result
throw new NoSuchElementException
}
}
def outputForEmptyGroupingKeyWithoutInput(): UnsafeRow = {
initializeBuffer(sortBasedAggregationBuffer)
generateOutput(UnsafeRow.createFromByteArray(0, 0), sortBasedAggregationBuffer)
}
}
| bravo-zhang/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortBasedAggregationIterator.scala | Scala | apache-2.0 | 6,158 |
package com.codahale.jerkson.deser
import com.fasterxml.jackson.databind._
import scala.collection.{Traversable, MapLike, immutable, mutable}
import com.codahale.jerkson.AST.{JNull, JValue}
import scala.collection.generic.{MapFactory, GenericCompanion}
import com.fasterxml.jackson.databind.deser.Deserializers
import com.fasterxml.jackson.databind.Module.SetupContext
class ScalaDeserializers(classLoader: ClassLoader, context: SetupContext) extends Deserializers.Base {
override def findBeanDeserializer(javaType: JavaType, config: DeserializationConfig,
beanDesc: BeanDescription): JsonDeserializer[_] = {
val klass = javaType.getRawClass
if (klass == classOf[Range] || klass == classOf[immutable.Range]) {
new RangeDeserializer
} else if (klass == classOf[StringBuilder]) {
new StringBuilderDeserializer
} else if (klass == classOf[List[_]] || klass == classOf[immutable.List[_]]) {
createSeqDeserializer(config, javaType, List)
} else if (klass == classOf[Seq[_]] || klass == classOf[immutable.Seq[_]] ||
klass == classOf[Iterable[_]] || klass == classOf[Traversable[_]] ||
klass == classOf[immutable.Traversable[_]]) {
createSeqDeserializer(config, javaType, Seq)
} else if (klass == classOf[Stream[_]] || klass == classOf[immutable.Stream[_]]) {
createSeqDeserializer(config, javaType, Stream)
} else if (klass == classOf[immutable.Queue[_]]) {
createSeqDeserializer(config, javaType, immutable.Queue)
} else if (klass == classOf[Vector[_]]) {
createSeqDeserializer(config, javaType, Vector)
} else if (klass == classOf[IndexedSeq[_]] || klass == classOf[immutable.IndexedSeq[_]]) {
createSeqDeserializer(config, javaType, IndexedSeq)
} else if (klass == classOf[mutable.ResizableArray[_]]) {
createSeqDeserializer(config, javaType, mutable.ResizableArray)
} else if (klass == classOf[mutable.ArraySeq[_]]) {
createSeqDeserializer(config, javaType, mutable.ArraySeq)
} else if (klass == classOf[mutable.MutableList[_]]) {
createSeqDeserializer(config, javaType, mutable.MutableList)
} else if (klass == classOf[mutable.Queue[_]]) {
createSeqDeserializer(config, javaType, mutable.Queue)
} else if (klass == classOf[mutable.ListBuffer[_]]) {
createSeqDeserializer(config, javaType, mutable.ListBuffer)
} else if (klass == classOf[mutable.ArrayBuffer[_]] || klass == classOf[mutable.Traversable[_]]) {
createSeqDeserializer(config, javaType, mutable.ArrayBuffer)
} else if (klass == classOf[collection.BitSet] || klass == classOf[immutable.BitSet]) {
new BitSetDeserializer(immutable.BitSet)
} else if (klass == classOf[mutable.BitSet]) {
new BitSetDeserializer(mutable.BitSet)
} else if (klass == classOf[immutable.HashSet[_]]) {
createSeqDeserializer(config, javaType, immutable.HashSet)
} else if (klass == classOf[Set[_]] || klass == classOf[immutable.Set[_]] || klass == classOf[collection.Set[_]]) {
createSeqDeserializer(config, javaType, Set)
} else if (klass == classOf[mutable.HashSet[_]]) {
createSeqDeserializer(config, javaType, mutable.HashSet)
} else if (klass == classOf[mutable.LinkedHashSet[_]]) {
createSeqDeserializer(config, javaType, mutable.LinkedHashSet)
} else if (klass == classOf[Iterator[_]] || klass == classOf[BufferedIterator[_]]) {
val elementType = javaType.containedType(0)
new IteratorDeserializer(elementType)
} else if (klass == classOf[immutable.HashMap[_, _]] || klass == classOf[Map[_, _]] || klass == classOf[collection.Map[_, _]]) {
createImmutableMapDeserializer(config, javaType, immutable.HashMap)
} else if (klass == classOf[immutable.IntMap[_]]) {
val valueType = javaType.containedType(0)
new IntMapDeserializer(valueType)
} else if (klass == classOf[immutable.LongMap[_]]) {
val valueType = javaType.containedType(0)
new LongMapDeserializer(valueType)
} else if (klass == classOf[mutable.HashMap[_, _]] || klass == classOf[mutable.Map[_, _]]) {
if (javaType.containedType(0).getRawClass == classOf[String]) {
val valueType = javaType.containedType(1)
new MutableMapDeserializer(valueType)
} else {
null
}
} else if (klass == classOf[mutable.LinkedHashMap[_, _]]) {
if (javaType.containedType(0).getRawClass == classOf[String]) {
val valueType = javaType.containedType(1)
new MutableLinkedHashMapDeserializer(valueType)
} else {
null
}
} else if (klass == classOf[Option[_]]) {
createOptionDeserializer(config, javaType)
} else if (classOf[JValue].isAssignableFrom(klass) || klass == JNull.getClass) {
new JValueDeserializer(config.getTypeFactory, klass)
} else if (klass == classOf[BigInt]) {
new BigIntDeserializer
} else if (klass == classOf[BigDecimal]) {
new BigDecimalDeserializer
} else if (klass == classOf[Either[_,_]]) {
new EitherDeserializer(config, javaType)
} else if (classOf[Product].isAssignableFrom(klass)) {
new CaseClassDeserializer(config, javaType, classLoader)
} else null
}
private def createSeqDeserializer[CC[X] <: Traversable[X]](config: DeserializationConfig,
javaType: JavaType,
companion: GenericCompanion[CC]) = {
val elementType = javaType.containedType(0)
new SeqDeserializer[CC](companion, elementType)
}
private def createOptionDeserializer(config: DeserializationConfig,
javaType: JavaType) = {
val elementType = javaType.containedType(0)
new OptionDeserializer(elementType)
}
private def createImmutableMapDeserializer[CC[A, B] <: Map[A, B] with MapLike[A, B, CC[A, B]]](config: DeserializationConfig,
javaType: JavaType,
companion: MapFactory[CC]) = {
val keyType = javaType.containedType(0)
val valueType = javaType.containedType(1)
if (keyType.getRawClass == classOf[String]) {
new ImmutableMapDeserializer[CC](companion, valueType)
} else if (keyType.getRawClass == classOf[Int] || keyType.getRawClass == classOf[java.lang.Integer]) {
new IntMapDeserializer(valueType)
} else if (keyType.getRawClass == classOf[Long] || keyType.getRawClass == classOf[java.lang.Long]) {
new LongMapDeserializer(valueType)
} else {
null
}
}
}
| gilt/jerkson | src/main/scala/com/codahale/jerkson/deser/ScalaDeserializers.scala | Scala | mit | 6,721 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources
import org.apache.spark.sql.types.{CalendarIntervalType, DataType, NullType}
trait ParquetRelationTrait extends MustDeclareDatasource {
// Parquet does not play well with NullType.
override def supportsDataType(
dataType: DataType): Boolean = dataType match {
case _: NullType => false
case _: CalendarIntervalType => false
case _ => true
}
override def dataSourceName(): String = {
"parquet"
}
}
| hortonworks-spark/cloud-integration | cloud-examples/src/test/scala/org/apache/spark/sql/sources/ParquetRelationTrait.scala | Scala | apache-2.0 | 1,272 |
object Test {
// After the first attempt to make seprately compiled value
// classes respect the privacy of constructors, we got:
//
// exception when typing v.a().==(v.a())/class scala.reflect.internal.Trees$Apply
// constructor V in class V cannot be accessed in object Test in file test/files/pos/t6601/UsePrivateValueClass_2.scala
// scala.reflect.internal.Types$TypeError: constructor V in class V cannot be accessed in object Test
def foo(v: V) = v.a == v.a
def bar(v: V) = v == v
}
| AlexSikia/dotty | tests/untried/pos/t6601/UsePrivateValueClass_2.scala | Scala | bsd-3-clause | 507 |
/*
* Copyright 2017 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.mongodb
package options
import com.mongodb.client.model.DeleteOptions
final case class DeleteOpts(collation: Option[Collation]) {
def collation(c: Collation): DeleteOpts = copy(collation = Some(c))
private[mongodb] lazy val legacy: DeleteOptions = {
val opts = new DeleteOptions()
collation.foreach(c ⇒ opts.collation(c.legacy))
opts
}
}
object DeleteOpts {
val default: DeleteOpts = DeleteOpts(None)
}
| nrinaudo/kantan.mongodb | core/src/main/scala/kantan/mongodb/options/DeleteOpts.scala | Scala | apache-2.0 | 1,046 |
package services.documents.pdf
import java.io.File
import com.google.common.collect.ComparisonChain
import com.itextpdf.text.pdf.{ AcroFields, PdfName, PdfReader, PdfStamper }
import org.apache.commons.io.FileUtils
import org.apache.commons.io.output.NullOutputStream
import scala.collection.JavaConverters._
object FieldPageAppearanceOrdering extends Ordering[AcroFields.Item] {
override def compare(left: AcroFields.Item, right: AcroFields.Item): Int = {
ComparisonChain.start()
.compare(left.getPage(0).intValue(), right.getPage(0).intValue())
.compare(right.getValue(0).getAsArray(PdfName.RECT).asDoubleArray()(1), left.getValue(0).getAsArray(PdfName.RECT).asDoubleArray()(1))
.compare(left.getValue(0).getAsArray(PdfName.RECT).asDoubleArray()(0), right.getValue(0).getAsArray(PdfName.RECT).asDoubleArray()(0))
.result()
}
}
/**
* Util for listing fields in a given PDF
*
* Will display fields in the order they appear physically in the document
*/
object PDFListFieldsUtil extends App {
val pdfTemplate = FileUtils.openInputStream(new File(args(0)))
val reader = new PdfReader(pdfTemplate)
val stamper = new PdfStamper(reader, new NullOutputStream())
val form = stamper.getAcroFields
val fields: Map[String, AcroFields.Item] = form.getFields.asScala.toMap
val items = fields.toSeq.sortBy(_._2)(FieldPageAppearanceOrdering)
items.foreach {
case (k, v) =>
println(s"$k page: ${v.getPage(0).intValue} " +
s"x: ${v.getValue(0).getAsArray(PdfName.RECT).asDoubleArray()(0)} " +
s"y: ${v.getValue(0).getAsArray(PdfName.RECT).asDoubleArray()(1)}")
}
}
| vetafi/vetafi-web | app/services/documents/pdf/PDFListFieldsUtil.scala | Scala | apache-2.0 | 1,636 |
package com.twitter.finagle.partitioning
import com.twitter.conversions.DurationOps._
import com.twitter.finagle.Stack.Params
import com.twitter.finagle._
import com.twitter.finagle.partitioning.KetamaPartitioningService.NoPartitioningKeys
import com.twitter.finagle.stats.{InMemoryStatsReceiver, NullStatsReceiver}
import com.twitter.hashing.KeyHasher
import com.twitter.util._
import java.nio.charset.StandardCharsets.UTF_8
import scala.util.Random
class KetamaPartitioningServiceTest extends PartitioningServiceTestBase {
import PartitioningServiceTestBase._
override def getPartitioningServiceModule: Stackable[ServiceFactory[String, String]] = {
TestKetamaPartitioningService.module
}
private[this] def randomString(length: Int): String = {
Random.alphanumeric.take(length).mkString
}
// sends random strings to the servers, asserts random distribution and returns the request
// distribution map
private[this] def sprayRequests(numKeys: Int): Map[String, String] = {
(1 to numKeys).map { _ =>
val request = randomString(50)
// the response from the server is going to be of the form: $request:$servername
val response = awaitResult(client(request)).split(EchoDelimiter)
// assert the response was expected
assert(response.head == request)
request -> response(1)
}.toMap
}
test("requests stick to the node that it hashes to for non-batched requests") {
val sr = new InMemoryStatsReceiver
val numServers = 5
servers = createServers(numServers)
client = createClient(sr)
assert(servers.length == numServers)
assert(sr.counters(Seq("client", "loadbalancer", "adds")) == numServers)
val numKeys = 50
// first send numKeys requests to the cluster and find the nodes they stick with.
// ensure they stick to the same node subsequently to verify hashing behavior
val requestToServer: Map[String, String] = sprayRequests(numKeys)
// using multiple iterations to test repeatability
0 until 5 foreach { i =>
// send the client requests concurrently. One request per client.
val resFutures: Map[String, Future[String]] = requestToServer map {
case (request, serverName) =>
(serverName, client(request))
}
// wait for all requests to finish
awaitResult(Future.join(resFutures.values.toSeq))
resFutures foreach {
case (serverName, resFuture) =>
assert(serverName == awaitResult(resFuture).split(EchoDelimiter)(1), s"i=$i")
}
assert(sr.counters(Seq("client", "loadbalancer", "adds")) == numServers)
}
}
test("requests stick to the node that it hashes to for batched requests") {
val sr = new InMemoryStatsReceiver
val numServers = 5
servers = createServers(numServers)
client = createClient(sr)
assert(servers.length == numServers)
assert(sr.counters(Seq("client", "loadbalancer", "adds")) == numServers)
val numKeys = 25
// first send numKeys requests to the cluster and find the nodes they stick with.
// ensure they stick to the same node subsequently to verify hashing behavior
val keys = 1 to numKeys map { _ =>
randomString(5)
}
val batchedRequest: String = keys.mkString(RequestDelimiter)
// capture the request distribution (request -> server) for asserting stickiness
val requestToServer: Map[String, String] = {
val batchedResponse = awaitResult(client(batchedRequest))
val responses = batchedResponse.split(ResponseDelimiter)
responses.map { response =>
val requestAndServer = response.split(EchoDelimiter)
requestAndServer(0) -> requestAndServer(1)
}.toMap
}
// using multiple iterations to test repeatability
0 until 5 foreach { i =>
val batchedResponse = awaitResult(client(batchedRequest))
val responses = batchedResponse.split(ResponseDelimiter)
responses.map { response =>
val requestAndServer = response.split(EchoDelimiter)
val request = requestAndServer(0)
val serverName = requestAndServer(1)
// assert that the requests landed on the same host as before
assert(serverName == requestToServer.getOrElse(request, fail()), s"i=$i")
}
assert(sr.counters(Seq("client", "loadbalancer", "adds")) == numServers)
}
}
test("node addition and removal") {
val sr = new InMemoryStatsReceiver
// start with 3 servers
servers = createServers(3)
val mutableAddrs: ReadWriteVar[Addr] = new ReadWriteVar(
Addr.Bound(servers.map(s => Address(s._2)): _*)
)
val dest: Name = Name.Bound.singleton(mutableAddrs)
client = createClient(sr, dest)
assert(sr.counters(Seq("client", "partitioner", "redistributes")) == 1)
assert(sr.counters(Seq("client", "loadbalancer", "rebuilds")) == 3)
assert(sr.counters(Seq("client", "loadbalancer", "updates")) == 3)
assert(sr.counters(Seq("client", "loadbalancer", "adds")) == 3)
assert(sr.counters(Seq("client", "loadbalancer", "removes")) == 0)
// add two more servers
val additions = createServers(2, 3)
servers = servers ++ additions
mutableAddrs.update(Addr.Bound(servers.map(s => Address(s._2)).toSet))
assert(sr.counters(Seq("client", "partitioner", "redistributes")) == 2)
assert(sr.counters(Seq("client", "loadbalancer", "rebuilds")) == 5)
assert(sr.counters(Seq("client", "loadbalancer", "updates")) == 5)
assert(sr.counters(Seq("client", "loadbalancer", "adds")) == 5)
assert(sr.counters(Seq("client", "loadbalancer", "removes")) == 0)
// remove one server
val toDrop = servers.head
toDrop._1.close()
servers = servers.toSet.drop(1).toSeq
mutableAddrs.update(Addr.Bound(servers.map(s => Address(s._2)).toSet))
assert(sr.counters(Seq("client", "partitioner", "redistributes")) == 3)
assert(sr.counters(Seq("client", "loadbalancer", "rebuilds")) == 5)
assert(sr.counters(Seq("client", "loadbalancer", "updates")) == 5)
assert(sr.counters(Seq("client", "loadbalancer", "adds")) == 5)
assert(sr.counters(Seq("client", "partitioner", "leaves")) == 1)
assert(sr.counters(Seq("client", "loadbalancer", "removes")) == 1)
}
test("re-hash when bad hosts are ejected") {
val sr = new InMemoryStatsReceiver
// start with 5 servers
servers = createServers(5)
client = createClient(sr, ejectFailedHosts = true)
// send some random requests and store the request distribution
val requestToServer = sprayRequests(50)
assert(sr.counters(Seq("client", "partitioner", "redistributes")) == 1)
// kill one of the host
servers.head._1.close()
// trigger ejection by sending requests
Await.ready(Future.join(requestToServer.keySet.map(client(_)).toSeq), Timeout)
// at least one host should get ejected, because we don't know which hosts were hit by above
// requests
eventually {
assert(sr.counters(Seq("client", "partitioner", "ejections")) == 1)
assert(sr.counters(Seq("client", "partitioner", "redistributes")) == 2)
}
// requests that went to the killed server earlier, should not end up on it now
val resFutures: Map[String, Future[String]] = requestToServer map {
case (request, serverName) =>
(serverName, client(request))
}
resFutures foreach {
case (serverName, resFuture) =>
if (serverName == "server#0") {
assert("server#0" != awaitResult(resFuture).split(EchoDelimiter)(1))
} else {
assert(serverName == awaitResult(resFuture).split(EchoDelimiter)(1))
}
}
}
test("host comes back into ring after being ejected (multiple host cluster)") {
val sr = new InMemoryStatsReceiver
// start 5 servers
servers = createServers(5)
client = createClient(sr, ejectFailedHosts = true)
// pick a shard by sending a random request
val request = randomString(50)
val serverName = awaitResult(client(request)).split(EchoDelimiter)(1)
Time.withCurrentTimeFrozen { timeControl =>
// Make the host throw an exception
failingHosts.add(serverName)
val response = awaitResult(client(request))
assert(response == "com.twitter.finagle.ChannelClosedException")
failingHosts.clear()
// Node should have been ejected
assert(sr.counters.get(List("client", "partitioner", "ejections")).contains(1))
// request should end up somewhere else
assert(awaitResult(client(request)) != request + EchoDelimiter + serverName)
timeControl.advance(10.minutes)
timer.tick()
// 10 minutes (markDeadFor duration) have passed, so the request should go back the same host
assert(sr.counters.get(List("client", "partitioner", "revivals")).contains(1))
assert(awaitResult(client(request)) == request + EchoDelimiter + serverName)
}
}
test("host comes back into ring after being ejected (single host cluster)") {
val sr = new InMemoryStatsReceiver
// start 1 server
servers = createServers(1)
client = createClient(sr, ejectFailedHosts = true)
val request = randomString(50)
val serverName = "server#0"
Time.withCurrentTimeFrozen { timeControl =>
// Make the host throw an exception
failingHosts.add(serverName)
val response = awaitResult(client(request))
assert(response == "com.twitter.finagle.ChannelClosedException")
failingHosts.clear()
// Node should have been ejected
assert(sr.counters.get(List("client", "partitioner", "ejections")).contains(1))
// Node should have been marked dead, and still be dead after 5 minutes
timeControl.advance(5.minutes)
// Shard should be unavailable
assert(awaitResult(client(request)) == "com.twitter.finagle.ShardNotAvailableException")
timeControl.advance(5.minutes)
timer.tick()
// 10 minutes (markDeadFor duration) have passed, so the request should go back the same host
assert(sr.counters.get(List("client", "partitioner", "revivals")).contains(1))
assert(awaitResult(client(request)) == request + EchoDelimiter + serverName)
}
}
test("no partitioning keys") {
client = createClient(NullStatsReceiver, ejectFailedHosts = true)
intercept[NoPartitioningKeys] {
awaitResult(client(""))
}
}
}
object TestKetamaPartitioningService {
val role = Stack.Role("KetamaPartitioning")
val description = "Partitioning Service based on Ketama consistent hashing"
private[finagle] def module: Stackable[ServiceFactory[String, String]] =
new KetamaPartitioningService.Module[String, String, String] {
override val role: Stack.Role = TestKetamaPartitioningService.role
override val description: String = TestKetamaPartitioningService.description
def newKetamaPartitioningService(
underlying: Stack[ServiceFactory[String, String]],
params: Params
): KetamaPartitioningService[String, String, String] = {
new TestKetamaPartitioningService(
underlying = underlying,
params = params
)
}
}
}
private[this] class TestKetamaPartitioningService(
underlying: Stack[ServiceFactory[String, String]],
params: Stack.Params,
keyHasher: KeyHasher = KeyHasher.KETAMA,
numReps: Int = KetamaPartitioningService.DefaultNumReps,
oldLibMemcachedVersionComplianceMode: Boolean = false)
extends KetamaPartitioningService[String, String, String](
underlying,
params,
keyHasher,
numReps
) {
import PartitioningServiceTestBase._
protected override def getKeyBytes(key: String): Array[Byte] = {
key.getBytes(UTF_8)
}
protected override def getPartitionKeys(request: String): Seq[String] = {
if (request.isEmpty)
Seq.empty
else
request.split(RequestDelimiter).map(_.trim).toSeq
}
protected override def createPartitionRequestForKeys(
request: String,
pKeys: Seq[String]
): String = {
pKeys.mkString(RequestDelimiter)
}
protected override def mergeResponses(
successes: Seq[String],
failures: Map[String, Throwable]
): String = {
// responses contain the request keys. So just concatenate. In a real implementation this will
// typically be a key-value map.
if (failures.isEmpty) {
successes.mkString(ResponseDelimiter)
} else if (successes.nonEmpty) {
// appending the server exceptions here to easily test partial success for batch operations
successes.mkString(ResponseDelimiter) + ResponseDelimiter +
failures.values.map(_.getClass.getTypeName).mkString(ResponseDelimiter)
} else {
failures.values.map(_.getClass.getTypeName).mkString(ResponseDelimiter)
}
}
protected def isSinglePartition(request: String): Boolean = false
}
| luciferous/finagle | finagle-partitioning/src/test/scala/com/twitter/finagle/partitioning/KetamaPartitioningServiceTest.scala | Scala | apache-2.0 | 12,809 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.bib.parser
import scala.language.implicitConversions
import scala.util.parsing.combinator._
private[parser] trait SharedParsers extends RegexParsers {
override val skipWhitespace = false
// FIXME: it's more readable if this is '+', not '*' - go find places that rely on it being '+' and add a '?'
lazy val WS = r("\\\\s*")
lazy val BRACE_DELIMITED_STRING_NO_OUTER: Parser[String] =
BRACE_DELIMITED_STRING ^^ (s => s.substring(1, s.length - 1))
lazy val BRACE_DELIMITED_STRING: Parser[String] =
'{' ~> (BRACE_DELIMITED_STRING | """\\\\.""" | """[^}{]""").* <~ '}' ^^
("{" + _.mkString + "}")
implicit def c(x: Char): Parser[Char] = accept(x)
implicit def r(reg: String): Parser[String] = regex(reg.r)
} | patverga/factorie | src/main/scala/cc/factorie/app/bib/parser/SharedParsers.scala | Scala | apache-2.0 | 1,527 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest
import org.scalatest.FlatSpec
import scala.util.Random
/**
* Created by yao on 9/21/16.
*/
@com.intel.analytics.bigdl.tags.Parallel
class MulConstantSpec extends FlatSpec {
"MulConstant" should "generate correct output and grad" in {
val input = Tensor[Double](2, 2, 2).randn()
val scalar = 25.0
val expectedOutput = input.clone().apply1(_ * scalar)
val gradOutput = Tensor[Double](2, 2, 2).rand()
val expectedGrad = gradOutput.clone().apply1(_ * scalar)
val module = new MulConstant[Double](scalar)
val output = module.forward(input)
assert(expectedOutput equals output)
val gradInput = module.backward(input, gradOutput)
assert(gradInput equals expectedGrad )
}
"MulConstant with inPlace = true" should "generate correct output and grad" in {
var input = Tensor[Double](2, 2, 2).randn()
val scalar = 25.0
val expectedOutput = input.clone().apply1(_ * scalar)
val gradOutput = Tensor[Double](2, 2, 2).rand()
val expectedGrad = gradOutput.clone().apply1(_ * scalar)
// Test forward
val module = new MulConstant[Double](scalar, true)
val output = module.forward(input)
assert(expectedOutput equals output)
// Test backward
input = Tensor[Double](2, 2, 2).randn()
val expectedInput = input.clone().apply1(_ / scalar)
val gradInput = module.backward(input, gradOutput)
assert(gradInput equals expectedGrad)
assert(input equals expectedInput)
}
}
class MulConstantSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val mulConst = MulConstant[Float](1.0).setName("mulConst")
val input = Tensor[Float](10, 10).apply1(_ => Random.nextFloat())
runSerializationTest(mulConst, input)
}
}
| yiheng/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/MulConstantSpec.scala | Scala | apache-2.0 | 2,502 |
/*
* Copyright 2015 University of Basel, Graphics and Vision Research Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalismo.statisticalmodel.asm
import ncsa.hdf.`object`.Group
import scalismo.io.HDF5File
import scala.collection.immutable.TreeMap
import scala.util.{Failure, Success, Try}
// TODO: naming to be discussed (also within the entire project). Right now, I'm using a mix of both styles
// ("Hdf5", but "IO"), according to the rule "Camel-case acronyms, but only if they're longer than 2 characters."
// See for instance http://stackoverflow.com/questions/1176950/acronyms-in-camel-back
/**
* A trait signifying that the implementing object provides
* a unique identifier used for serialization purposes.
*/
trait HasIOIdentifier {
/**
* An identifier uniquely identifying the kind of object.
* Note: the prefix "builtin::" is reserved for identifiers of objects that are shipped with the scalismo framework.
* For the purpose of compatibility with other implementations (e.g., the statismo C++ implementation), it is recommended
* to restrict the characters used to the ASCII range.
* @return the unique serialization identifier for this kind of object.
*/
def identifier: String
}
/**
* Metadata about an object, used for serialization purposes.
* In addition to the unique identifier, attributes concerning the implementation version are provided.
* @param identifier a unique IO identifier.
* @param majorVersion major implementation version.
* @param minorVersion minor implementation version.
* @see HasIOIdentifier
*/
case class IOMetadata(override val identifier: String, majorVersion: Int, minorVersion: Int) extends HasIOIdentifier
/**
* A trait signifying that the implementing object provides
* identity and version information for (de)serialization purposes.
*/
trait HasIOMetadata {
def ioMetadata: IOMetadata
}
/**
* Trait providing methods for serializing/deserializing objects of type T
* to/from HDF5 files.
* @tparam T the type of objects which can be constructed from the information present in HDF5 files, and saved into such files.
*/
trait Hdf5IOHandler[T <: HasIOMetadata] {
/**
* Load (instantiate) an object of type T from the information in an HDF5 file.
* The IO metadata present in the file, as well as the file and group with the file, are provided as arguments, so that implementations can read additional data that
* might be required for correct object instantiation.
* @param meta IO Metadata about the concrete implementation, as present in the HDF5 file.
* @param h5File the HDF5 file containing the information about the object to be constructed.
* @param h5Group the HDF5 group containing the information about the object to be constructed.
* @return an object of type T corresponding to the provided IO metadata and initialized according to the information present in the file
* (wrapped in a [[Success]]]), or a [[Failure]] indicating the cause of the failure
*/
def load(meta: IOMetadata, h5File: HDF5File, h5Group: Group): Try[T]
/**
* Save all required information about an object to an HDF5 file, so that the object can later be reconstructed using the [[Hdf5IOHandler.load]] method.
* Note that implementations do not need to care about saving the object's IO metadata, as this is taken care of by the framework.
* Thus, if the object to be stored does not require any further parameterization, this method does not need to be overridden (there is a default no-op implementation).
* Otherwise, it is strongly advised to follow these rules in the implementation:
*
* - Do not write data anywhere except in the provided group (or subgroups thereof). In other words, do not store data in parent of sibling groups of <code>h5Group</code>.
* - Do not store attributes directly attached to <code>h5Group</code>, as they might clash with the attributes used internally by the framework.
* - There are no further limitations, i.e., you are free to create values and subgroups, or anything else, in <code>h5Group</code,
* and attach attributes to anything except <code>h5Group</code> itself.
*
* @param t the object about which information is to be stored.
* @param h5File the HDF5 file to save the information to.
* @param h5Group the group under which to save the information in the HDF5 file.
* @return [[Success]] or [[Failure]]
*/
def save(t: T, h5File: HDF5File, h5Group: Group): Try[Unit] = Success(())
}
/**
* Companion object of the [[Hdf5IOHandler]] trait, providing implementations for
* reading/writing IO metadata from/to HDF5 files.
*/
object Hdf5IOHandler {
final val IdentifierAttributeName = "identifier"
final val MajorVersionAttributeName = "majorVersion"
final val MinorVersionAttributeName = "minorVersion"
/**
* Saves a given IO metadata object to an HDF5 group.
* The metadata is stored as attributes attached to the given group.
* @param meta the metadata to be saved.
* @param h5File the HDF5 file to save the metadata to.
* @param h5Group the HDF5 Group within the file to save the metadata to.
* @return [[Success]] or [[Failure]]
*/
def saveMetadata(meta: IOMetadata, h5File: HDF5File, h5Group: Group): Try[Unit] = {
val groupName = h5Group.getFullName
for {
_ <- h5File.writeStringAttribute(groupName, IdentifierAttributeName, meta.identifier)
_ <- h5File.writeIntAttribute(groupName, MajorVersionAttributeName, meta.majorVersion)
_ <- h5File.writeIntAttribute(groupName, MinorVersionAttributeName, meta.minorVersion)
} yield ()
}
/**
* Loads IO metadata from an HDF5 group.
* The metadata is retrieved by reading the identifier and version information from attributes attached to the given group.
* @param h5File the HDF5 file to read the metadata from.
* @param h5Group the HDF5 Group within the file to read metadata from.
* @return an IO metadata object, wrapped in a [[Success]], or a [[Failure]] object indicating the failure that occurred.
*/
def loadMetadata(h5File: HDF5File, h5Group: Group): Try[IOMetadata] = {
val groupName = h5Group.getFullName
for {
identifier <- h5File.readStringAttribute(groupName, IdentifierAttributeName)
majorVersion <- h5File.readIntAttribute(groupName, MajorVersionAttributeName)
minorVersion <- h5File.readIntAttribute(groupName, MinorVersionAttributeName)
} yield IOMetadata(identifier, majorVersion, minorVersion)
}
}
/**
* Trait specifying that the implementing object can load and save objects of a given type from/to files.
* In addition, the object specifies which IO Identifier it can handle.
*
* Currently, we require that the HDF5 file format be supported.
*
* @tparam T the type of objects which can be constructed from the information present in files, and saved into files.
* @see IOCollection
*/
trait IOHandler[T <: HasIOMetadata] extends Hdf5IOHandler[T] with HasIOIdentifier
/**
* An object representing a collection of IO handlers providing load/save capabilities for a given object type.
* If there are multiple implementations for the given type (e.g., multiple image preprocessor implementations,
* multiple feature extractor implementations), they are distinguished (and uniquely identified) by their IO Identifiers
* (see [[HasIOIdentifier]]).
*
* For every supported IO Identifier, a corresponding [[IOHandler]] must be registered using the <code>register()</code> method. The identifiers/handlers that come
* built into scalismo are automatically registered and available, but manual registration is required for user-defined handlers.
*
* @tparam T the type of objects that can be loaded/saved
* @tparam IO the type of the corresponding IO handlers
*/
class IOHandlers[T <: HasIOMetadata, IO <: IOHandler[T]] {
private var instances = new TreeMap[String, IO]()
/**
* Register an IO handler to make it available for loading and saving object instances.
* @param handler the IO Handler to register.
*/
def register(handler: IO): Unit = {
instances = instances + ((handler.identifier, handler))
}
/**
* Find the handler corresponding to the given IO identifier.
* @param identifier an IO identifier.
* @return the corresponding IO handler, or a [[Failure]] if no handler was registered for the identifier.
*/
def find(identifier: String): Try[IO] = {
instances.get(identifier) match {
case Some(value) => Success(value)
case None =>
Failure(
new IllegalArgumentException(
s"No instance found for identifier=$identifier." +
" You may need to call " + this.getClass.getName + ".register() once to make the implementation available."
)
)
}
}
/**
* Convenience method to load an object from an HDF5 group.
* This method loads the IO metadata from the file, then uses the list of registered handlers to automatically find the corresponding IO handler, and finally uses that handler to load the object.
* @param h5File the HDF5 file containing the group.
* @param h5Group the HDF5 group within the file to load the object from.
* @return the object corresponding to the information in the HDF5 group.
*/
def load(h5File: HDF5File, h5Group: Group): Try[T] = {
for {
meta <- Hdf5IOHandler.loadMetadata(h5File, h5Group)
io <- find(meta.identifier)
instance <- io.load(meta, h5File, h5Group)
} yield instance
}
/**
* Convenience method to store an object to an HDF5 group.
* This method uses the list of registered handlers to automatically find the correct IO handler, then saves the object's metadata and uses the IO handler to save the object.
* @param t the object to save.
* @param h5File the HDF5 file containing the group.
* @param h5Group the HDF5 group to save the object to.
* @return [[Success]] or [[Failure]]
*/
def save(t: T, h5File: HDF5File, h5Group: Group): Try[Unit] = {
val meta = t.ioMetadata
for {
io <- find(meta.identifier)
_ <- Hdf5IOHandler.saveMetadata(meta, h5File, h5Group)
_ <- io.save(t, h5File, h5Group)
} yield ()
}
}
| unibas-gravis/scalismo | src/main/scala/scalismo/statisticalmodel/asm/IOHandler.scala | Scala | apache-2.0 | 10,724 |
package eventstore
package akka
package tcp
import _root_.akka.NotUsed
import _root_.akka.stream.BidiShape
import _root_.akka.stream.scaladsl.{ BidiFlow, Broadcast, Flow, GraphDSL, Merge }
private[eventstore] object BidiReply {
def apply[I, O](pf: PartialFunction[I, O]): BidiFlow[I, I, O, O, NotUsed] = BidiFlow.fromGraph {
GraphDSL.create() { implicit builder =>
import GraphDSL.Implicits._
val broadcast = builder add Broadcast[I](2)
val merge = builder add Merge[O](2)
val filter = builder add Flow[I].filter(pf.isDefinedAt)
val filterNot = builder add Flow[I].filterNot(pf.isDefinedAt)
val reqToRes = builder add Flow[I].map(pf)
broadcast.out(0) ~> filterNot
broadcast.out(1) ~> filter ~> reqToRes ~> merge.in(0)
BidiShape(broadcast.in, filterNot.outlet, merge.in(1), merge.out)
}
}
} | EventStore/EventStore.JVM | client/src/main/scala/eventstore/akka/tcp/BidiReply.scala | Scala | bsd-3-clause | 860 |
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.ops.variables
import org.platanios.tensorflow.api.core.Shape
import org.platanios.tensorflow.api.core.exception.ShapeMismatchException
import org.platanios.tensorflow.api.implicits.Implicits._
import org.platanios.tensorflow.api.ops.{Basic, Op, Output, Random}
import org.platanios.tensorflow.api.ops.variables.Variable.PartitionInformation
import org.platanios.tensorflow.api.ops.variables.VarianceScalingInitializer.FanInScalingMode
import org.platanios.tensorflow.api.tensors.Tensor
import org.platanios.tensorflow.api.types.DataType
/** Base trait for all variable initializers.
*
* @author Emmanouil Antonios Platanios
*/
trait Initializer {
/** Data type of the values produced by this initializer. If `null`, then the initializer may produce values of any
* data type. */
val dataType: DataType = null
/** Shape of the values produced by this initializer. If `null`, then the initializer may produce values of any
* shape. */
val shape: Shape = null
def apply(dataType: DataType, shape: Shape, partitionInfo: PartitionInformation): Output = {
Op.initialization {
initialValue(dataType, shape, partitionInfo)
}
}
/** Generates an initial value op.
*
* @param dataType Data type for the output tensor.
* @param shape Shape for the output tensor.
* @param partitionInfo [[PartitionInformation]] object holding additional information about how the variable is
* partitioned. May be `null` if the variable is not partitioned.
* @return Created op output.
* @throws ShapeMismatchException If the initializer cannot produce a value with the requested shape.
*/
@throws[ShapeMismatchException]
def initialValue(dataType: DataType, shape: Shape, partitionInfo: PartitionInformation): Output
}
private[variables] case class InitializerWithPartitionInformation(
initializer: Initializer, partitionInfo: PartitionInformation) extends Initializer {
override val dataType: DataType = initializer.dataType
override val shape: Shape = initializer.shape
override def initialValue(dataType: DataType, shape: Shape, partitionInfo: PartitionInformation): Output = {
if (partitionInfo == null)
initializer.initialValue(dataType, shape, this.partitionInfo)
else
initializer.initialValue(dataType, shape, partitionInfo)
}
}
/** Initializer that sets all elements of the variable tensor to zeros. */
object ZerosInitializer extends Initializer {
override def initialValue(dataType: DataType, shape: Shape, partitionInfo: PartitionInformation): Output = {
Basic.zeros(dataType, shape, name = "ZerosInitializer")
}
}
/** Initializer that sets all elements of the variable tensor to ones. */
object OnesInitializer extends Initializer {
override def initialValue(dataType: DataType, shape: Shape, partitionInfo: PartitionInformation): Output = {
Basic.ones(dataType, shape, name = "OnesInitializer")
}
}
/** Initializer that sets the value of the variable to the provided `value`. */
case class ConstantInitializer(value: Tensor) extends Initializer {
override val dataType: DataType = value.dataType
override val shape: Shape = value.shape
override def initialValue(dataType: DataType, shape: Shape, partitionInfo: PartitionInformation): Output = {
Basic.constant(value, dataType, shape, name = "ConstantInitializer")
}
}
/** Initializer that sets the value of the variable to the provided `value`. */
case class DynamicConstantInitializer(value: Output) extends Initializer {
override val dataType: DataType = value.dataType
override val shape: Shape = value.shape
@throws[ShapeMismatchException]
override def initialValue(dataType: DataType, shape: Shape, partitionInfo: PartitionInformation): Output = {
if (this.shape == null) {
Basic.fill(dataType, shape)(value, name = "ConstantInitializer")
} else if (shape.isCompatibleWith(this.shape)) {
Basic.identity(value, name = "ConstantInitializer")
} else if (shape.rank > 0 && this.shape.rank == 0 || (this.shape.rank == 1 && this.shape(0) == 1)) {
Basic.fill(dataType, shape)(value, name = "ConstantInitializer")
} else {
throw ShapeMismatchException(
s"The constant value shape '${this.shape}' is not compatible with the requested shape '$shape'.")
}
}
}
/** Initializer that sets the value of the variable to a `value` drawn from a uniform distribution. */
case class RandomUniformInitializer(
minValue: Tensor = 0.0, maxValue: Tensor = 1.0, seed: Option[Int] = None) extends Initializer {
override def initialValue(dataType: DataType, shape: Shape, partitionInfo: PartitionInformation): Output = {
Random.randomUniform(
dataType, shape, minValue = minValue, maxValue = maxValue, seed = seed, name = "RandomUniformInitializer")
}
}
/** Initializer that sets the value of the variable to a `value` drawn from a Normal distribution. */
case class RandomNormalInitializer(
mean: Tensor = 0.0, standardDeviation: Tensor = 1.0, seed: Option[Int] = None) extends Initializer {
override def initialValue(dataType: DataType, shape: Shape, partitionInfo: PartitionInformation): Output = {
Random.randomNormal(
dataType, shape, mean = mean, standardDeviation = standardDeviation, seed = seed,
name = "RandomNormalInitializer")
}
}
/** Initializer that sets the value of the variable to a `value` drawn from a truncated Normal distribution. */
case class RandomTruncatedNormalInitializer(
mean: Tensor = 0.0, standardDeviation: Tensor = 1.0, seed: Option[Int] = None) extends Initializer {
override def initialValue(dataType: DataType, shape: Shape, partitionInfo: PartitionInformation): Output = {
Random.randomTruncatedNormal(
dataType, shape, mean = mean, standardDeviation = standardDeviation, seed = seed,
name = "RandomTruncatedNormalInitializer")
}
}
/** Initializer capable of adapting its scale to the shape of weights tensors.
*
* With the Normal distribution option, samples are drawn from a truncated Normal distribution centered on zero, and
* with standard deviation equal to `sqrt(initialScale / n)`, where `n` is:
*
* - the number of input units in the weight tensor, if `mode == FanInScalingMode`,
* - the number of output units, if `mode == FanOutScalingMode`, or
* - the average of the numbers of input and output units, if `mode == FanAverageScalingMode`
*
* With uniform distribution option, samples are drawn from a uniform distribution within `[-limit, limit]`, where
* `limit = sqrt(3 * initialScale / n)`.
*
* @param initialScale Initial variance scale.
* @param scalingMode Variance scaling mode.
* @param distribution Distribution to use when sampling.
* @param seed Optional random seed, used to generate a random seed pair for the random number generator,
* when combined with the graph-level seed.
*/
class VarianceScalingInitializer(
val initialScale: Float = 1.0f,
val scalingMode: VarianceScalingInitializer.ScalingMode = FanInScalingMode,
val distribution: VarianceScalingInitializer.Distribution = VarianceScalingInitializer.NormalDistribution,
val seed: Option[Int] = None
) extends Initializer {
@throws[ShapeMismatchException]
override def initialValue(dataType: DataType, shape: Shape, partitionInfo: PartitionInformation): Output = {
val scale = scalingMode.scale(initialScale, if (partitionInfo != null) partitionInfo.fullShape else shape)
distribution.initialValue(scale, dataType, shape, seed)
}
}
object VarianceScalingInitializer {
def apply(
initialScale: Float = 1.0f,
scalingMode: ScalingMode = FanInScalingMode,
distribution: Distribution = VarianceScalingInitializer.NormalDistribution,
seed: Option[Int] = None
): VarianceScalingInitializer = {
new VarianceScalingInitializer(initialScale, scalingMode, distribution, seed)
}
sealed trait ScalingMode {
def scale(initialScale: Float, shape: Shape): Float
/** Computes the number of input and output units for the provided weights shape. */
protected def computeFans(shape: Shape): (Long, Long) = {
if (shape.rank == 0) {
(0L, 0L)
} else if (shape.rank == 1) {
(shape(0), shape(0))
} else if (shape.rank == 2) {
(shape(0), shape(1))
} else {
// Assuming convolution kernels (2D, 3D, or more) with shape: [..., inputDepth, depth]
val receptiveFieldSize = shape(0 :: -2).asArray.product
(shape(-2) * receptiveFieldSize, shape(-1) * receptiveFieldSize)
}
}
}
case object FanInScalingMode extends ScalingMode {
override def scale(initialScale: Float, shape: Shape): Float = {
val (fanIn, _) = computeFans(shape)
initialScale / Math.max(1L, fanIn).toFloat
}
}
case object FanOutScalingMode extends ScalingMode {
override def scale(initialScale: Float, shape: Shape): Float = {
val (_, fanOut) = computeFans(shape)
initialScale / Math.max(1L, fanOut).toFloat
}
}
case object FanAverageScalingMode extends ScalingMode {
override def scale(initialScale: Float, shape: Shape): Float = {
val (fanIn, fanOut) = computeFans(shape)
initialScale / Math.max(1.0f, (fanIn + fanOut).toFloat / 2.0f)
}
}
sealed trait Distribution {
def initialValue(scale: Float, dataType: DataType, shape: Shape, seed: Option[Int] = None): Output
}
case object NormalDistribution extends Distribution {
override def initialValue(scale: Float, dataType: DataType, shape: Shape, seed: Option[Int] = None): Output = {
Random.randomTruncatedNormal(
dataType, shape, Basic.constant(0, dataType), Basic.constant(Math.sqrt(scale), dataType), seed)
}
}
case object UniformDistribution extends Distribution {
override def initialValue(scale: Float, dataType: DataType, shape: Shape, seed: Option[Int] = None): Output = {
val limit = Math.sqrt(3.0f * scale)
Random.randomUniform(dataType, shape, Basic.constant(-limit, dataType), Basic.constant(limit, dataType), seed)
}
}
}
/** Glorot uniform initializer, also called the Xavier uniform initializer..
*
* This initializer draws samples from a uniform distribution within `[-limit, limit]`, where `limit` is equal to
* `sqrt(6 / (fanIn + fanOut))`, where `fanIn` is the number of input units in the weight tensor and `fanOut` is the
* number of output units in the weight tensor.
*
* Reference: [Understanding the difficulty of training deep feed-forward neural networks](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)
*
* @param seed Optional random seed, used to generate a random seed pair for the random number generator, when
* combined with the graph-level seed.
*/
case class GlorotUniformInitializer(override val seed: Option[Int] = None)
extends VarianceScalingInitializer(
1.0f, VarianceScalingInitializer.FanAverageScalingMode, VarianceScalingInitializer.UniformDistribution, seed)
/** Glorot Normal initializer, also called the Xavier Normal initializer..
*
* This initializer draws samples from a Normal distribution centered on zero and with standard deviation equal to
* `sqrt(2 / (fanIn + fanOut))`, where `fanIn` is the number of input units in the weight tensor and `fanOut` is the
* number of output units in the weight tensor.
*
* Reference: [Understanding the difficulty of training deep feed-forward neural networks](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)
*
* @param seed Optional random seed, used to generate a random seed pair for the random number generator, when
* combined with the graph-level seed.
*/
case class GlorotNormalInitializer(override val seed: Option[Int] = None)
extends VarianceScalingInitializer(
1.0f, VarianceScalingInitializer.FanAverageScalingMode, VarianceScalingInitializer.NormalDistribution, seed)
| eaplatanios/tensorflow | tensorflow/scala/api/src/main/scala/org/platanios/tensorflow/api/ops/variables/Initializer.scala | Scala | apache-2.0 | 12,678 |
package com.codelab27.cards9.repos.matches
import com.codelab27.cards9.models.matches.Match
import com.codelab27.cards9.models.matches.Match.MatchState
import com.codelab27.cards9.models.players.Player
trait MatchRepository[F[_]] {
def findMatch(id: Match.Id): F[Option[Match]]
def findMatches(state: MatchState): F[Seq[Match]]
def findMatchesForPlayer(playerId: Player.Id): F[Seq[Match]]
def storeMatch(theMatch: Match): F[Option[Match.Id]]
def changeMatchState(id: Match.Id, state: MatchState): F[Option[MatchState]]
}
| bilki/cards9-server | app/com/codelab27/cards9/repos/matches/MatchRepository.scala | Scala | gpl-2.0 | 539 |
package concrete
package constraint
package linear
;
import cspom.CSPOM._
import cspom.variable.{BoolVariable, IntVariable}
import cspom.{CSPOM, CSPOMConstraint}
import org.scalatest.{FlatSpec, Inspectors, Matchers}
final class SumTest extends FlatSpec with Matchers with Inspectors {
val pm = new ParameterManager()
// pm("linear.stateless") = true
"Sum" should "filter <= with negative coefficients" in {
val x = new Variable("x", IntDomain.ofSeq(0, 1))
val y = new Variable("y", IntDomain.ofSeq(0, 1))
val c = LinearLe(-19, Array(-20, -18), Array(x, y), false, pm)
val pb = Problem(x, y)
pb.addConstraint(c)
c.register(new AdviseCount)
val mod = pb.initState.andThen { ps =>
if (ps.entailed.hasInactiveVar(c)) {
ps
} else {
c.eventAll(ps)
c.revise(ps)
}
}.toState
mod.dom(x).view should contain theSameElementsAs Seq(1)
mod.dom(y).view should contain theSameElementsAs Seq(0, 1)
}
it should "recompute f correctly" in {
val x = new Variable("x", IntDomain.ofSeq(-4, -3, -2, -1, 2))
val y = new Variable("y", IntDomain.ofSeq(3, 5, 6, 8, 9))
val z = new Variable("z", IntDomain.ofSeq(7))
val c = LinearEq(0, Array(1, -1, 1), Array(x, y, z))
// c.register(new AdviseCount)
val pb = new Problem(c.scope)
pb.addConstraint(c)
c.register(new AdviseCount)
val ps = pb.initState.assign(y, 9).toState
c.eventAll(ps)
c.revise(ps).toState
// withClue(c.toString(mod)) {
// mod.dom(x52) should contain theSameElementsAs Seq(4)
// }
}
it should "filter = with yet other particular parameters" in {
val x52 = new Variable("x52", IntDomain.ofSeq(0, 2, 4, 5))
val x5 = new Variable("x5", Singleton(7))
val x6 = new Variable("x6", IntDomain.ofSeq(3, 6, 8))
val c = LinearEq(0, Array(-1, 1, -1), Array(x52, x5, x6))
// c.register(new AdviseCount)
val pb = Problem(c.scope: _*)
pb.addConstraint(c)
c.register(new AdviseCount)
val ps = pb.initState.toState
c.eventAll(ps)
val mod = c.revise(ps).toState
withClue(c.toString(mod)) {
mod.dom(x52).view should contain theSameElementsAs Seq(4)
}
// -1.Q[43] [0, 3] + -2.Q[44] [1] + -3.Q[45] [1, 2] + -4.Q[46] [0] + -5.Q[47] [0] + -6.Q[48] [0] eq -6 ()
}
val v0 = new Variable("v0", IntDomain(1 to 4))
val v1 = new Variable("v1", IntDomain(0 to 4))
val b = new Variable("b", IntDomain.ofSeq(1))
it should "filter = with particular parameters" in {
val q43 = new Variable("q43", IntDomain(0 to 3))
val q44 = new Variable("q44", IntDomain(1 to 1))
val q45 = new Variable("q45", IntDomain(1 to 2))
val q46 = new Variable("q46", IntDomain(0 to 0))
val q47 = new Variable("q47", IntDomain(0 to 0))
val q48 = new Variable("q48", IntDomain(0 to 0))
val c = LinearEq(-6, Array(-1, -2, -3, -4, -5, -6), Array(q43, q44, q45, q46, q47, q48))
//c.register(new AdviseCount)
val pb = Problem(c.scope: _*)
pb.addConstraint(c)
c.register(new AdviseCount)
val ps = pb.initState.toState
c.eventAll(ps)
val mod = c.revise(ps).toState
mod.dom(q43).view should contain theSameElementsAs Seq(1)
// -1.Q[43] [0, 3] + -2.Q[44] [1] + -3.Q[45] [1, 2] + -4.Q[46] [0] + -5.Q[47] [0] + -6.Q[48] [0] eq -6 ()
}
it should "filter =" in {
val c = LinearEq(0, Array(4, -1, -1), Array(b, v0, v1))
//c.register(new AdviseCount)
val pb = Problem(b, v0, v1)
pb.addConstraint(c)
c.register(new AdviseCount)
val ps = pb.initState.toState
c.eventAll(ps)
val mod = c.revise(ps).toState
mod.dom(v0).view should contain theSameElementsAs (1 to 4)
mod.dom(v1).view should contain theSameElementsAs (0 to 3)
}
it should "filter <= with positive and negative coefficients" in {
val c = LinearLe(0, Array(4, -1, -1), Array(b, v0, v1), false, pm)
val pb = Problem(b, v0, v1)
pb.addConstraint(c)
c.register(new AdviseCount)
val ps = pb.initState.toState
c.eventAll(ps)
val mod = c.revise(ps).toState
mod.dom(v0).view should contain theSameElementsAs Seq(1, 2, 3, 4)
mod.dom(v1).view should contain theSameElementsAs (0 to 4)
}
it should "filter <= with positive coefficients" in {
val c = LinearLe(3, Array(1, 1), Array(v0, v1), false, pm)
val pb = Problem(b, v0, v1)
pb.addConstraint(c)
c.register(new AdviseCount)
val ps = pb.initState.toState
c.eventAll(ps)
val mod = c.revise(ps).toState
mod.dom(v0).view should contain theSameElementsAs (1 to 3)
mod.dom(v1).view should contain theSameElementsAs (0 to 2)
}
it should "filter <= with other negative coefficients and domains" in {
val v2 = new Variable("v2", IntDomain(0 to 1))
val c = LinearLe(-3, Array(-1, -1, -6), Array(
new Variable("v0", IntDomain(0 to 1)),
new Variable("v1", IntDomain(1 to 5)),
v2), strict = false, pm)
val pb = Problem(c.scope: _*)
pb.addConstraint(c)
c.register(new AdviseCount)
val ps = pb.initState.toState
c.eventAll(ps)
val mod = c.revise(ps).toState
mod.dom(v2).view should contain theSameElementsAs (0 to 1)
}
it should "detect singleton inconsistency" in {
val v0 = new Variable("v0", Singleton(1))
val pb = Problem(v0)
forAll(Seq(
LinearLe(0, Array(1), Array(v0), strict = false, pm),
new LinearNe(1, Array(1), Array(v0)),
LinearEq(0, Array(1), Array(v0)))
) { c =>
c.register(new AdviseCount)
pb.addConstraint(c)
val r = pb.initState.andThen { ps =>
c.eventAll(ps)
c.revise(ps)
}
assert(!r.isState)
}
}
it should "filter /=" in {
val v0 = new Variable("v0", IntDomain(-10 to 10))
val pb = Problem(v0)
val c = new LinearNe(2, Array(2), Array(v0))
pb.addConstraint(c)
c.register(new AdviseCount)
val mod = pb.initState.andThen { ps =>
c.eventAll(ps)
c.revise(ps)
}
mod.dom(v0).view should contain theSameElementsAs (-10 to 10 filter (_ != 1))
}
it should "not filter /=" in {
val v0 = new Variable("v0", IntDomain(-10 to 10))
val v1 = new Variable("v1", IntDomain(-1 to 1))
val pb = Problem(v0, v1)
val c = new LinearNe(0, Array(1, 1), Array(v0, v1))
pb.addConstraint(c)
c.register(new AdviseCount)
val ps = pb.initState
val mod = ps.andThen { ps =>
c.eventAll(ps)
c.revise(ps)
}
mod shouldBe ps
}
it should "filter = with other particular parameters" in {
val x98 = new Variable("x98", IntDomain.ofSeq(0, 2))
val x1605 = new Variable("x1605", IntDomain(0 to 0))
val x985 = new Variable("x985", IntDomain(-1 to 1))
val c = LinearEq(0, Array(1, -1, -1), Array(x98, x1605, x985))
//c.register(new AdviseCount)
val pb = new Problem(c.scope)
pb.addConstraint(c)
c.register(new AdviseCount)
val mod = pb.initState.andThen { ps =>
c.eventAll(ps)
c.revise(ps)
}
mod.dom(x985).view should contain theSameElementsAs Seq(0)
// -1.Q[43] [0, 3] + -2.Q[44] [1] + -3.Q[45] [1, 2] + -4.Q[46] [0] + -5.Q[47] [0] + -6.Q[48] [0] eq -6 ()
}
it should "filter = incrementally" in {
val x238 = new Variable("x238", IntDomain.ofInterval(-14, 15))
val x14 = new Variable("x14", IntDomain.ofInterval(1, 16))
val x0 = new Variable("x0", Singleton(15))
val c = LinearEq(0, Array(1, -1, 1), Array(x238, x14, x0))
//c.register(new AdviseCount)
val pb = new Problem(c.scope)
pb.addConstraint(c)
c.register(new AdviseCount)
val ps = pb.initState.toState
c.eventAll(ps)
val mod = c.revise(ps).toState
mod.dom(x238).view should contain theSameElementsAs (-14 to 1)
val ps2 = mod.removeAfter(x14, 14).toState
c.event(ps2, BoundRemoval, 1)
val mod2 = c.revise(ps2).toState
mod2.dom(x238).view should contain theSameElementsAs (-14 to -1)
//1.X_INTRODUCED_238||X_INTRODUCED_408[1] {-14, -13, -12, -11, [24...], 15} + -1.X_INTRODUCED_14||costas[15] [1, 14] + 1.X_INTRODUCED_0||costas[1] [15] =BC= 0
}
it should "nop" in {
val x = new Variable("x", Singleton(1))
val y = new Variable("y", Singleton(2))
val z = new Variable("z", Singleton(1))
val c = new LinearLe(0, Array(1, -1, 1), Array(x, y, z), Array(20, 19, 18))
//c.register(new AdviseCount)
val pb = new Problem(c.scope)
pb.addConstraint(c)
c.register(new AdviseCount)
val mod = pb.initState.andThen { ps =>
if (ps.entailed.hasInactiveVar(c)) {
ps
} else {
c.eventAll(ps)
c.revise(ps)
}
}.toState
mod.currentDomains.toSeq shouldBe Seq(x, y, z).map(_.initDomain)
}
"Reified sum" should "detect inconsistency" in {
val cspom = CSPOM { implicit cspom =>
val r = new BoolVariable() as "r"
val v0 = IntVariable(0 to 3) as "v0"
ctr(CSPOMConstraint(r)("sum")(Seq(1), Seq(v0), -1) withParam ("mode" -> "le"))
}
val s = Solver(cspom).get
val problem = s.concreteProblem
val r = problem.variable("r")
val v0 = problem.variable("v0")
val state = problem.initState
.assign(r, 1)
.assign(v0, 0)
.toState
.fold(problem.constraints) { (ps, c) =>
c.eventAll(ps)
c.revise(ps)
}
assert(!state.isState)
}
}
| concrete-cp/concrete | src/test/scala/concrete/constraint/linear/SumTest.scala | Scala | lgpl-2.1 | 9,328 |
package com.mthaler.xmlconfect
import scala.reflect._
import scala.xml.{ Node, Null, Text }
/**
* Provides the XmlElemFormats for the most important Scala types.
*/
object BasicElemFormats {
implicit object BooleanXmlElemFormat extends SimpleXmlElemFormat[Boolean] {
protected def readElem(node: Node, name: String = ""): Boolean = node.text.toBoolean
}
implicit object ByteXmlElemFormat extends SimpleXmlElemFormat[Byte] {
protected def readElem(node: Node, name: String = ""): Byte = node.text.toByte
}
implicit object ShortXmlElemFormat extends SimpleXmlElemFormat[Short] {
protected def readElem(node: Node, name: String = ""): Short = node.text.toShort
}
implicit object IntXmlElemFormat extends SimpleXmlElemFormat[Int] {
protected def readElem(node: Node, name: String = ""): Int = node.text.toInt
}
implicit object LongXmlElemFormat extends SimpleXmlElemFormat[Long] {
protected def readElem(node: Node, name: String = ""): Long = node.text.toLong
}
implicit object FloatXmlElemFormat extends SimpleXmlElemFormat[Float] {
protected def readElem(node: Node, name: String = ""): Float = node.text.toFloat
}
implicit object DoubleXmlElemFormat extends SimpleXmlElemFormat[Double] {
protected def readElem(node: Node, name: String = ""): Double = node.text.toDouble
}
implicit object StringXmlElemFormat extends SimpleXmlElemFormat[String] {
protected def readElem(node: Node, name: String = ""): String = node.text
}
implicit object CharXmlElemFormat extends SimpleXmlElemFormat[Char] {
protected def readElem(node: Node, name: String = ""): Char = {
val txt = node.text
if (txt.length == 1) txt.charAt(0) else deserializationError("Expected Char as single-character string, but got " + txt)
}
}
implicit object SymbolXmlElemFormat extends SimpleXmlElemFormat[Symbol] {
protected def readElem(node: Node, name: String = ""): Symbol = Symbol(node.text)
protected override def writeElem(obj: Symbol, name: String = ""): Node = elem(name, Null, Seq(Text(obj.name)))
}
implicit object BigIntXmlElemFormat extends SimpleXmlElemFormat[BigInt] {
protected def readElem(node: Node, name: String = ""): BigInt = BigInt(node.text)
}
implicit object BigDecimalXmlElemFormat extends SimpleXmlElemFormat[BigDecimal] {
protected def readElem(node: Node, name: String = ""): BigDecimal = BigDecimal(node.text)
}
implicit def enumFormat[T <: Enum[T]: ClassTag] = new SimpleXmlElemFormat[T] {
protected def readElem(node: Node, name: String = ""): T = {
val c = classTag[T].runtimeClass.asInstanceOf[Class[T]]
Enum.valueOf(c, node.text)
}
}
}
| mthaler/xmlconfect | src/main/scala/com/mthaler/xmlconfect/BasicElemFormats.scala | Scala | apache-2.0 | 2,688 |
package com.sksamuel.elastic4s.requests.get
import com.sksamuel.elastic4s.{Index, IndexAndType}
trait GetApi {
@deprecated("types are deprecated now", "7.0")
def get(index: Index, `type`: String, id: String) = GetRequest(IndexAndType(index.name, `type`), id)
def get(index: Index, id: String): GetRequest = GetRequest(index, id)
@deprecated("use get(index, id)", "7.7")
def get(id: String): GetExpectsFrom = new GetExpectsFrom(id)
class GetExpectsFrom(id: String) {
def from(index: Index): GetRequest = GetRequest(index, id)
}
def multiget(first: GetRequest, rest: GetRequest*): MultiGetRequest = multiget(first +: rest)
def multiget(gets: Iterable[GetRequest]): MultiGetRequest = MultiGetRequest(gets.toSeq)
}
| stringbean/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/get/GetApi.scala | Scala | apache-2.0 | 759 |
package scuff.web
import java.net.{ URI, URL }
import java.time.{ Instant, LocalDateTime, OffsetDateTime, ZoneId, ZonedDateTime }
import java.time.format.DateTimeFormatter
import scala.concurrent.duration.FiniteDuration
import scuff.MediaType
object HttpHeaders {
private def RFC_1123 = DateTimeFormatter.RFC_1123_DATE_TIME
private[this] val GMT = ZoneId of "GMT"
def RFC_1123(str: String): ZonedDateTime = ZonedDateTime.parse(str, RFC_1123)
def RFC_1123(date: ZonedDateTime): String = {
val gmt = if (date.getZone == GMT) date else date.withZoneSameInstant(GMT)
RFC_1123.format(gmt)
}
def RFC_1123(epochMillis: Long): String = {
val zdt = ZonedDateTime.ofInstant(Instant.ofEpochMilli(epochMillis), GMT)
RFC_1123(zdt)
}
def RFC_1123(date: java.util.Date): String = {
val zdt = ZonedDateTime.ofInstant(Instant.ofEpochMilli(date.getTime), GMT)
RFC_1123(zdt)
}
def RFC_1123(date: LocalDateTime): String = RFC_1123(ZonedDateTime.of(date, GMT))
def RFC_1123(date: OffsetDateTime): String = RFC_1123(date.atZoneSameInstant(GMT))
final val LastModified = "Last-Modified"
final val ContentLength = "Content-Length"
final val ContentType = "Content-Type"
final val ContentRange = "Content-Range"
final val ETag = "ETag"
final val Expect = "Expect"
final val Age = "Age"
final val IfNoneMatch = "If-None-Match"
final val IfMatch = "If-Match"
final val IfModifiedSince = "If-Modified-Since"
final val CacheControl = "Cache-Control"
final val Referer = "Referer"
final val RetryAfter = "Retry-After"
final val UserAgent = "User-Agent"
final val Location = "Location"
final def Location(location: URL): (String, String) = Location(location.toString)
final def Location(location: URI): (String, String) = Location(location.toString)
final def Location(location: String): (String, String) = Location -> location
def LastModified(epochMillis: Long): (String, String) = LastModified -> RFC_1123(epochMillis)
def LastModified(date: java.util.Date): (String, String) = LastModified -> RFC_1123(date)
def LastModified(date: ZonedDateTime): (String, String) = LastModified -> RFC_1123(date)
def LastModified(date: LocalDateTime): (String, String) = LastModified -> RFC_1123(date)
def LastModified(date: OffsetDateTime): (String, String) = LastModified -> RFC_1123(date)
def ContentLength(length: Long): (String, String) = ContentLength -> length.toString
def ContentType(ct: MediaType): (String, String) = ContentType(ct.toString)
def ContentType(ct: String): (String, String) = ContentType -> ct
def ContentRange(unit: String, range: Range, size: Long): (String, String) =
ContentRange -> s"""$unit ${range.head}-${range.last}/$size"""
def ContentRange(unit: String, size: Long): (String, String) =
ContentRange -> s"""$unit */$size"""
def ContentRange(unit: String, range: Range): (String, String) =
ContentRange -> s"""$unit ${range.head}-${range.last}/*"""
def ETag(etag: scuff.web.ETag): (String, String) = ETag -> etag.headerString
def Age(age: Int): (String, String) = Age -> age.toString
def RetryAfter(dur: FiniteDuration): (String, String) = RetryAfter(dur.toSeconds)
def RetryAfter(dur: java.time.Duration): (String, String) = RetryAfter(dur.toMillis / 1000)
def RetryAfter(seconds: Long): (String, String) = RetryAfter -> seconds.toString
def RetryAfter(date: ZonedDateTime): (String, String) = RetryAfter -> RFC_1123(date)
def RetryAfter(date: java.util.Date): (String, String) = RetryAfter -> RFC_1123(date)
def RetryAfter(date: LocalDateTime): (String, String) = RetryAfter -> RFC_1123(date)
def RetryAfter(date: OffsetDateTime): (String, String) = RetryAfter -> RFC_1123(date)
}
| nilskp/scuff | src/main/scala/scuff/web/HttpHeaders.scala | Scala | mit | 3,724 |
package org.jetbrains.plugins.scala
package debugger.evaluation.evaluator
import com.intellij.debugger.engine.evaluation.expression.{Modifier, Evaluator}
import com.intellij.debugger.engine.evaluation.EvaluationContextImpl
/**
* Nikolay.Tropin
* 7/24/13
*/
class ScalaBlockExpressionEvaluator(statements: Seq[Evaluator]) extends Evaluator{
def evaluate(context: EvaluationContextImpl): AnyRef = {
val void: AnyRef = context.getSuspendContext.getDebugProcess.getVirtualMachineProxy.mirrorOf()
statements.foldLeft(void)((_, stmt) => stmt.evaluate(context))
}
def getModifier: Modifier = null
}
| consulo/consulo-scala | src/org/jetbrains/plugins/scala/debugger/evaluation/evaluator/ScalaBlockExpressionEvaluator.scala | Scala | apache-2.0 | 612 |
package com.twitter.finagle
import com.twitter.util.{Witness, Var}
import java.net.{InetSocketAddress, SocketAddress}
import org.scalatest.FunSuite
class NameTest extends FunSuite {
test("Name.fromGroup") {
val g = Group.mutable[SocketAddress]()
val n = Name.fromGroup(g)
var addr: Addr = Addr.Pending
n.addr.changes.register(Witness({ addr = _: Addr }))
assert(addr == Addr.Pending)
val set = Set[SocketAddress](new InetSocketAddress(0), new InetSocketAddress(1))
g() = set
val Addr.Bound(s2, r) = addr
assert(s2.collect { case Address.Inet(ia, _) => ia } == set)
assert(r.isEmpty)
g() = Set(new SocketAddress {})
val Addr.Failed(e) = addr
assert(e.isInstanceOf[IllegalArgumentException])
}
test("Name.Bound maintains equality as per 'id'") {
val id1, id2 = new {}
val a1, a2 = Var(Addr.Pending)
assert(Name.Bound(a1, id1) == Name.Bound(a2, id1))
assert(Name.Bound(a1, id1) != Name.Bound(a1, id2))
// It sucks that this is not symmetric, oh well.
assert(Name.Bound(a1, id1) == id1)
assert(Name.Bound(a1, id1) != id2)
}
test("Name.all maintains equality") {
val names = Seq.fill(10) { Name.Bound.singleton(Var(Addr.Pending)) }.toSet
assert(Name.all(names) == Name.all(names))
assert(Name.all(names) != Name.all(names drop 1))
}
}
| luciferous/finagle | finagle-core/src/test/scala/com/twitter/finagle/NameTest.scala | Scala | apache-2.0 | 1,344 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import java.io.File
import java.net.URI
import java.nio.charset.StandardCharsets
import java.sql.{Date, Timestamp}
import java.util.{Locale, Set}
import com.google.common.io.Files
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SparkException, TestUtils}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{EliminateSubqueryAliases, FunctionRegistry}
import org.apache.spark.sql.catalyst.catalog.{CatalogTableType, CatalogUtils, HiveTableRelation}
import org.apache.spark.sql.catalyst.parser.ParseException
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, SubqueryAlias}
import org.apache.spark.sql.execution.command.LoadDataCommand
import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.hive.{HiveExternalCatalog, HiveUtils}
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.CalendarInterval
case class Nested1(f1: Nested2)
case class Nested2(f2: Nested3)
case class Nested3(f3: Int)
case class NestedArray2(b: Seq[Int])
case class NestedArray1(a: NestedArray2)
case class Order(
id: Int,
make: String,
`type`: String,
price: Int,
pdate: String,
customer: String,
city: String,
state: String,
month: Int)
/**
* A collection of hive query tests where we generate the answers ourselves instead of depending on
* Hive to generate them (in contrast to HiveQuerySuite). Often this is because the query is
* valid, but Hive currently cannot execute it.
*/
class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
import hiveContext._
import spark.implicits._
test("query global temp view") {
val df = Seq(1).toDF("i1")
df.createGlobalTempView("tbl1")
val global_temp_db = spark.conf.get("spark.sql.globalTempDatabase")
checkAnswer(spark.sql(s"select * from ${global_temp_db}.tbl1"), Row(1))
spark.sql(s"drop view ${global_temp_db}.tbl1")
}
test("non-existent global temp view") {
val global_temp_db = spark.conf.get("spark.sql.globalTempDatabase")
val message = intercept[AnalysisException] {
spark.sql(s"select * from ${global_temp_db}.nonexistentview")
}.getMessage
assert(message.contains("Table or view not found"))
}
test("script") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
assume(TestUtils.testCommandAvailable("echo | sed"))
val scriptFilePath = getTestResourcePath("test_script.sh")
val df = Seq(("x1", "y1", "z1"), ("x2", "y2", "z2")).toDF("c1", "c2", "c3")
df.createOrReplaceTempView("script_table")
val query1 = sql(
s"""
|SELECT col1 FROM (from(SELECT c1, c2, c3 FROM script_table) tempt_table
|REDUCE c1, c2, c3 USING 'bash $scriptFilePath' AS
|(col1 STRING, col2 STRING)) script_test_table""".stripMargin)
checkAnswer(query1, Row("x1_y1") :: Row("x2_y2") :: Nil)
}
test("SPARK-6835: udtf in lateral view") {
val df = Seq((1, 1)).toDF("c1", "c2")
df.createOrReplaceTempView("table1")
val query = sql("SELECT c1, v FROM table1 LATERAL VIEW stack(3, 1, c1 + 1, c1 + 2) d AS v")
checkAnswer(query, Row(1, 1) :: Row(1, 2) :: Row(1, 3) :: Nil)
}
test("SPARK-13651: generator outputs shouldn't be resolved from its child's output") {
withTempView("src") {
Seq(("id1", "value1")).toDF("key", "value").createOrReplaceTempView("src")
val query =
sql("SELECT genoutput.* FROM src " +
"LATERAL VIEW explode(map('key1', 100, 'key2', 200)) genoutput AS key, value")
checkAnswer(query, Row("key1", 100) :: Row("key2", 200) :: Nil)
}
}
test("SPARK-6851: Self-joined converted parquet tables") {
val orders = Seq(
Order(1, "Atlas", "MTB", 234, "2015-01-07", "John D", "Pacifica", "CA", 20151),
Order(3, "Swift", "MTB", 285, "2015-01-17", "John S", "Redwood City", "CA", 20151),
Order(4, "Atlas", "Hybrid", 303, "2015-01-23", "Jones S", "San Mateo", "CA", 20151),
Order(7, "Next", "MTB", 356, "2015-01-04", "Jane D", "Daly City", "CA", 20151),
Order(10, "Next", "YFlikr", 187, "2015-01-09", "John D", "Fremont", "CA", 20151),
Order(11, "Swift", "YFlikr", 187, "2015-01-23", "John D", "Hayward", "CA", 20151),
Order(2, "Next", "Hybrid", 324, "2015-02-03", "Jane D", "Daly City", "CA", 20152),
Order(5, "Next", "Street", 187, "2015-02-08", "John D", "Fremont", "CA", 20152),
Order(6, "Atlas", "Street", 154, "2015-02-09", "John D", "Pacifica", "CA", 20152),
Order(8, "Swift", "Hybrid", 485, "2015-02-19", "John S", "Redwood City", "CA", 20152),
Order(9, "Atlas", "Split", 303, "2015-02-28", "Jones S", "San Mateo", "CA", 20152))
val orderUpdates = Seq(
Order(1, "Atlas", "MTB", 434, "2015-01-07", "John D", "Pacifica", "CA", 20151),
Order(11, "Swift", "YFlikr", 137, "2015-01-23", "John D", "Hayward", "CA", 20151))
orders.toDF.createOrReplaceTempView("orders1")
orderUpdates.toDF.createOrReplaceTempView("orderupdates1")
withTable("orders", "orderupdates") {
sql(
"""CREATE TABLE orders(
| id INT,
| make String,
| type String,
| price INT,
| pdate String,
| customer String,
| city String)
|PARTITIONED BY (state STRING, month INT)
|STORED AS PARQUET
""".stripMargin)
sql(
"""CREATE TABLE orderupdates(
| id INT,
| make String,
| type String,
| price INT,
| pdate String,
| customer String,
| city String)
|PARTITIONED BY (state STRING, month INT)
|STORED AS PARQUET
""".stripMargin)
sql("set hive.exec.dynamic.partition.mode=nonstrict")
sql("INSERT INTO TABLE orders PARTITION(state, month) SELECT * FROM orders1")
sql("INSERT INTO TABLE orderupdates PARTITION(state, month) SELECT * FROM orderupdates1")
checkAnswer(
sql(
"""
|select orders.state, orders.month
|from orders
|join (
| select distinct orders.state,orders.month
| from orders
| join orderupdates
| on orderupdates.id = orders.id) ao
| on ao.state = orders.state and ao.month = orders.month
""".stripMargin),
(1 to 6).map(_ => Row("CA", 20151)))
}
}
test("show functions") {
val allBuiltinFunctions = FunctionRegistry.builtin.listFunction().map(_.unquotedString)
val allFunctions = sql("SHOW functions").collect().map(r => r(0))
allBuiltinFunctions.foreach { f =>
assert(allFunctions.contains(f))
}
withTempDatabase { db =>
def createFunction(names: Seq[String]): Unit = {
names.foreach { name =>
sql(
s"""
|CREATE TEMPORARY FUNCTION $name
|AS '${classOf[PairUDF].getName}'
""".stripMargin)
}
}
def dropFunction(names: Seq[String]): Unit = {
names.foreach { name =>
sql(s"DROP TEMPORARY FUNCTION $name")
}
}
createFunction(Seq("temp_abs", "temp_weekofyear", "temp_sha", "temp_sha1", "temp_sha2"))
checkAnswer(sql("SHOW functions temp_abs"), Row("temp_abs"))
checkAnswer(sql("SHOW functions 'temp_abs'"), Row("temp_abs"))
checkAnswer(sql(s"SHOW functions $db.temp_abs"), Row("temp_abs"))
checkAnswer(sql(s"SHOW functions `$db`.`temp_abs`"), Row("temp_abs"))
checkAnswer(sql(s"SHOW functions `$db`.`temp_abs`"), Row("temp_abs"))
checkAnswer(sql("SHOW functions `a function doens't exist`"), Nil)
checkAnswer(sql("SHOW functions `temp_weekofyea*`"), Row("temp_weekofyear"))
// this probably will failed if we add more function with `sha` prefixing.
checkAnswer(
sql("SHOW functions `temp_sha*`"),
List(Row("temp_sha"), Row("temp_sha1"), Row("temp_sha2")))
// Test '|' for alternation.
checkAnswer(
sql("SHOW functions 'temp_sha*|temp_weekofyea*'"),
List(Row("temp_sha"), Row("temp_sha1"), Row("temp_sha2"), Row("temp_weekofyear")))
dropFunction(Seq("temp_abs", "temp_weekofyear", "temp_sha", "temp_sha1", "temp_sha2"))
}
}
test("describe functions - built-in functions") {
checkKeywordsExist(sql("describe function extended upper"),
"Function: upper",
"Class: org.apache.spark.sql.catalyst.expressions.Upper",
"Usage: upper(str) - Returns `str` with all characters changed to uppercase",
"Extended Usage:",
"Examples:",
"> SELECT upper('SparkSql');",
"SPARKSQL")
checkKeywordsExist(sql("describe functioN Upper"),
"Function: upper",
"Class: org.apache.spark.sql.catalyst.expressions.Upper",
"Usage: upper(str) - Returns `str` with all characters changed to uppercase")
checkKeywordsNotExist(sql("describe functioN Upper"),
"Extended Usage")
checkKeywordsExist(sql("describe functioN abcadf"),
"Function: abcadf not found.")
checkKeywordsExist(sql("describe functioN `~`"),
"Function: ~",
"Class: org.apache.spark.sql.catalyst.expressions.BitwiseNot",
"Usage: ~ expr - Returns the result of bitwise NOT of `expr`.")
// Hard coded describe functions
checkKeywordsExist(sql("describe function `<>`"),
"Function: <>",
"Usage: expr1 <> expr2 - Returns true if `expr1` is not equal to `expr2`")
checkKeywordsExist(sql("describe function `!=`"),
"Function: !=",
"Usage: expr1 != expr2 - Returns true if `expr1` is not equal to `expr2`")
checkKeywordsExist(sql("describe function `between`"),
"Function: between",
"Usage: expr1 [NOT] BETWEEN expr2 AND expr3 - " +
"evaluate if `expr1` is [not] in between `expr2` and `expr3`")
checkKeywordsExist(sql("describe function `case`"),
"Function: case",
"Usage: CASE expr1 WHEN expr2 THEN expr3 " +
"[WHEN expr4 THEN expr5]* [ELSE expr6] END - " +
"When `expr1` = `expr2`, returns `expr3`; " +
"when `expr1` = `expr4`, return `expr5`; else return `expr6`")
}
test("describe functions - user defined functions") {
withUserDefinedFunction("udtf_count" -> false) {
sql(
s"""
|CREATE FUNCTION udtf_count
|AS 'org.apache.spark.sql.hive.execution.GenericUDTFCount2'
|USING JAR '${hiveContext.getHiveFile("TestUDTF.jar").toURI}'
""".stripMargin)
checkKeywordsExist(sql("describe function udtf_count"),
"Function: default.udtf_count",
"Class: org.apache.spark.sql.hive.execution.GenericUDTFCount2",
"Usage: N/A")
checkAnswer(
sql("SELECT udtf_count(a) FROM (SELECT 1 AS a FROM src LIMIT 3) t"),
Row(3) :: Row(3) :: Nil)
checkKeywordsExist(sql("describe function udtf_count"),
"Function: default.udtf_count",
"Class: org.apache.spark.sql.hive.execution.GenericUDTFCount2",
"Usage: N/A")
}
}
test("describe functions - temporary user defined functions") {
withUserDefinedFunction("udtf_count_temp" -> true) {
sql(
s"""
|CREATE TEMPORARY FUNCTION udtf_count_temp
|AS 'org.apache.spark.sql.hive.execution.GenericUDTFCount2'
|USING JAR '${hiveContext.getHiveFile("TestUDTF.jar").toURI}'
""".stripMargin)
checkKeywordsExist(sql("describe function udtf_count_temp"),
"Function: udtf_count_temp",
"Class: org.apache.spark.sql.hive.execution.GenericUDTFCount2",
"Usage: N/A")
checkAnswer(
sql("SELECT udtf_count_temp(a) FROM (SELECT 1 AS a FROM src LIMIT 3) t"),
Row(3) :: Row(3) :: Nil)
checkKeywordsExist(sql("describe function udtf_count_temp"),
"Function: udtf_count_temp",
"Class: org.apache.spark.sql.hive.execution.GenericUDTFCount2",
"Usage: N/A")
}
}
test("SPARK-5371: union with null and sum") {
val df = Seq((1, 1)).toDF("c1", "c2")
df.createOrReplaceTempView("table1")
val query = sql(
"""
|SELECT
| MIN(c1),
| MIN(c2)
|FROM (
| SELECT
| SUM(c1) c1,
| NULL c2
| FROM table1
| UNION ALL
| SELECT
| NULL c1,
| SUM(c2) c2
| FROM table1
|) a
""".stripMargin)
checkAnswer(query, Row(1, 1) :: Nil)
}
test("CTAS with WITH clause") {
val df = Seq((1, 1)).toDF("c1", "c2")
df.createOrReplaceTempView("table1")
withTable("with_table1") {
sql(
"""
|CREATE TABLE with_table1 AS
|WITH T AS (
| SELECT *
| FROM table1
|)
|SELECT *
|FROM T
""".stripMargin)
val query = sql("SELECT * FROM with_table1")
checkAnswer(query, Row(1, 1) :: Nil)
}
}
test("explode nested Field") {
Seq(NestedArray1(NestedArray2(Seq(1, 2, 3)))).toDF.createOrReplaceTempView("nestedArray")
checkAnswer(
sql("SELECT ints FROM nestedArray LATERAL VIEW explode(a.b) a AS ints"),
Row(1) :: Row(2) :: Row(3) :: Nil)
checkAnswer(
sql("SELECT `ints` FROM nestedArray LATERAL VIEW explode(a.b) `a` AS `ints`"),
Row(1) :: Row(2) :: Row(3) :: Nil)
checkAnswer(
sql("SELECT `a`.`ints` FROM nestedArray LATERAL VIEW explode(a.b) `a` AS `ints`"),
Row(1) :: Row(2) :: Row(3) :: Nil)
checkAnswer(
sql(
"""
|SELECT `weird``tab`.`weird``col`
|FROM nestedArray
|LATERAL VIEW explode(a.b) `weird``tab` AS `weird``col`
""".stripMargin),
Row(1) :: Row(2) :: Row(3) :: Nil)
}
test("SPARK-4512 Fix attribute reference resolution error when using SORT BY") {
checkAnswer(
sql("SELECT * FROM (SELECT key + key AS a FROM src SORT BY value) t ORDER BY t.a"),
sql("SELECT key + key as a FROM src ORDER BY a").collect().toSeq
)
}
def checkRelation(
tableName: String,
isDataSourceTable: Boolean,
format: String,
userSpecifiedLocation: Option[String] = None): Unit = {
var relation: LogicalPlan = null
withSQLConf(
HiveUtils.CONVERT_METASTORE_PARQUET.key -> "false",
HiveUtils.CONVERT_METASTORE_ORC.key -> "false") {
relation = EliminateSubqueryAliases(spark.table(tableName).queryExecution.analyzed)
}
val catalogTable =
sessionState.catalog.getTableMetadata(TableIdentifier(tableName))
relation match {
case LogicalRelation(r: HadoopFsRelation, _, _, _) =>
if (!isDataSourceTable) {
fail(
s"${classOf[HiveTableRelation].getCanonicalName} is expected, but found " +
s"${HadoopFsRelation.getClass.getCanonicalName}.")
}
userSpecifiedLocation match {
case Some(location) =>
assert(r.options("path") === location)
case None => // OK.
}
assert(catalogTable.provider.get === format)
case r: HiveTableRelation =>
if (isDataSourceTable) {
fail(
s"${HadoopFsRelation.getClass.getCanonicalName} is expected, but found " +
s"${classOf[HiveTableRelation].getCanonicalName}.")
}
userSpecifiedLocation match {
case Some(location) =>
assert(r.tableMeta.location === CatalogUtils.stringToURI(location))
case None => // OK.
}
// Also make sure that the format and serde are as desired.
assert(catalogTable.storage.inputFormat.get.toLowerCase(Locale.ROOT).contains(format))
assert(catalogTable.storage.outputFormat.get.toLowerCase(Locale.ROOT).contains(format))
val serde = catalogTable.storage.serde.get
format match {
case "sequence" | "text" => assert(serde.contains("LazySimpleSerDe"))
case "rcfile" => assert(serde.contains("LazyBinaryColumnarSerDe"))
case _ => assert(serde.toLowerCase(Locale.ROOT).contains(format))
}
}
// When a user-specified location is defined, the table type needs to be EXTERNAL.
val actualTableType = catalogTable.tableType
userSpecifiedLocation match {
case Some(location) =>
assert(actualTableType === CatalogTableType.EXTERNAL)
case None =>
assert(actualTableType === CatalogTableType.MANAGED)
}
}
test("CTAS without serde without location") {
withSQLConf(SQLConf.CONVERT_CTAS.key -> "true") {
val defaultDataSource = sessionState.conf.defaultDataSourceName
withTable("ctas1") {
sql("CREATE TABLE ctas1 AS SELECT key k, value FROM src ORDER BY k, value")
sql("CREATE TABLE IF NOT EXISTS ctas1 AS SELECT key k, value FROM src ORDER BY k, value")
val message = intercept[AnalysisException] {
sql("CREATE TABLE ctas1 AS SELECT key k, value FROM src ORDER BY k, value")
}.getMessage
assert(message.contains("already exists"))
checkRelation("ctas1", isDataSourceTable = true, defaultDataSource)
}
// Specifying database name for query can be converted to data source write path
// is not allowed right now.
withTable("ctas1") {
sql("CREATE TABLE default.ctas1 AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation("ctas1", isDataSourceTable = true, defaultDataSource)
}
withTable("ctas1") {
sql("CREATE TABLE ctas1 stored as textfile" +
" AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation("ctas1", isDataSourceTable = false, "text")
}
withTable("ctas1") {
sql("CREATE TABLE ctas1 stored as sequencefile" +
" AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation("ctas1", isDataSourceTable = false, "sequence")
}
withTable("ctas1") {
sql("CREATE TABLE ctas1 stored as rcfile AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation("ctas1", isDataSourceTable = false, "rcfile")
}
withTable("ctas1") {
sql("CREATE TABLE ctas1 stored as orc AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation("ctas1", isDataSourceTable = false, "orc")
}
withTable("ctas1") {
sql(
"""
|CREATE TABLE ctas1 stored as parquet
|AS SELECT key k, value FROM src ORDER BY k, value
""".stripMargin)
checkRelation("ctas1", isDataSourceTable = false, "parquet")
}
}
}
test("CTAS with default fileformat") {
val table = "ctas1"
val ctas = s"CREATE TABLE IF NOT EXISTS $table SELECT key k, value FROM src"
Seq("orc", "parquet").foreach { dataSourceFormat =>
withSQLConf(
SQLConf.CONVERT_CTAS.key -> "true",
SQLConf.DEFAULT_DATA_SOURCE_NAME.key -> dataSourceFormat,
"hive.default.fileformat" -> "textfile") {
withTable(table) {
sql(ctas)
// The default datasource file format is controlled by `spark.sql.sources.default`.
// This testcase verifies that setting `hive.default.fileformat` has no impact on
// the target table's fileformat in case of CTAS.
checkRelation(tableName = table, isDataSourceTable = true, format = dataSourceFormat)
}
}
}
}
test("CTAS without serde with location") {
withSQLConf(SQLConf.CONVERT_CTAS.key -> "true") {
withTempDir { dir =>
val defaultDataSource = sessionState.conf.defaultDataSourceName
val tempLocation = dir.toURI.getPath.stripSuffix("/")
withTable("ctas1") {
sql(s"CREATE TABLE ctas1 LOCATION 'file:$tempLocation/c1'" +
" AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation(
"ctas1", isDataSourceTable = true, defaultDataSource, Some(s"file:$tempLocation/c1"))
}
withTable("ctas1") {
sql(s"CREATE TABLE ctas1 LOCATION 'file:$tempLocation/c2'" +
" AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation(
"ctas1", isDataSourceTable = true, defaultDataSource, Some(s"file:$tempLocation/c2"))
}
withTable("ctas1") {
sql(s"CREATE TABLE ctas1 stored as textfile LOCATION 'file:$tempLocation/c3'" +
" AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation(
"ctas1", isDataSourceTable = false, "text", Some(s"file:$tempLocation/c3"))
}
withTable("ctas1") {
sql(s"CREATE TABLE ctas1 stored as sequenceFile LOCATION 'file:$tempLocation/c4'" +
" AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation(
"ctas1", isDataSourceTable = false, "sequence", Some(s"file:$tempLocation/c4"))
}
withTable("ctas1") {
sql(s"CREATE TABLE ctas1 stored as rcfile LOCATION 'file:$tempLocation/c5'" +
" AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation(
"ctas1", isDataSourceTable = false, "rcfile", Some(s"file:$tempLocation/c5"))
}
}
}
}
test("CTAS with serde") {
withTable("ctas1", "ctas2", "ctas3", "ctas4", "ctas5") {
sql("CREATE TABLE ctas1 AS SELECT key k, value FROM src ORDER BY k, value")
sql(
"""CREATE TABLE ctas2
| ROW FORMAT SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"
| WITH SERDEPROPERTIES("serde_p1"="p1","serde_p2"="p2")
| STORED AS RCFile
| TBLPROPERTIES("tbl_p1"="p11", "tbl_p2"="p22")
| AS
| SELECT key, value
| FROM src
| ORDER BY key, value""".stripMargin)
val storageCtas2 = spark.sessionState.catalog.
getTableMetadata(TableIdentifier("ctas2")).storage
assert(storageCtas2.inputFormat == Some("org.apache.hadoop.hive.ql.io.RCFileInputFormat"))
assert(storageCtas2.outputFormat == Some("org.apache.hadoop.hive.ql.io.RCFileOutputFormat"))
assert(storageCtas2.serde == Some("org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"))
sql(
"""CREATE TABLE ctas3
| ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\012'
| STORED AS textfile AS
| SELECT key, value
| FROM src
| ORDER BY key, value""".stripMargin)
// the table schema may like (key: integer, value: string)
sql(
"""CREATE TABLE IF NOT EXISTS ctas4 AS
| SELECT 1 AS key, value FROM src LIMIT 1""".stripMargin)
// do nothing cause the table ctas4 already existed.
sql(
"""CREATE TABLE IF NOT EXISTS ctas4 AS
| SELECT key, value FROM src ORDER BY key, value""".stripMargin)
checkAnswer(
sql("SELECT k, value FROM ctas1 ORDER BY k, value"),
sql("SELECT key, value FROM src ORDER BY key, value"))
checkAnswer(
sql("SELECT key, value FROM ctas2 ORDER BY key, value"),
sql(
"""
SELECT key, value
FROM src
ORDER BY key, value"""))
checkAnswer(
sql("SELECT key, value FROM ctas3 ORDER BY key, value"),
sql(
"""
SELECT key, value
FROM src
ORDER BY key, value"""))
intercept[AnalysisException] {
sql(
"""CREATE TABLE ctas4 AS
| SELECT key, value FROM src ORDER BY key, value""".stripMargin)
}
checkAnswer(
sql("SELECT key, value FROM ctas4 ORDER BY key, value"),
sql("SELECT key, value FROM ctas4 LIMIT 1").collect().toSeq)
sql(
"""CREATE TABLE ctas5
| STORED AS parquet AS
| SELECT key, value
| FROM src
| ORDER BY key, value""".stripMargin)
val storageCtas5 = spark.sessionState.catalog.
getTableMetadata(TableIdentifier("ctas5")).storage
assert(storageCtas5.inputFormat ==
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"))
assert(storageCtas5.outputFormat ==
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"))
assert(storageCtas5.serde ==
Some("org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"))
// use the Hive SerDe for parquet tables
withSQLConf(HiveUtils.CONVERT_METASTORE_PARQUET.key -> "false") {
checkAnswer(
sql("SELECT key, value FROM ctas5 ORDER BY key, value"),
sql("SELECT key, value FROM src ORDER BY key, value"))
}
}
}
test("specifying the column list for CTAS") {
withTempView("mytable1") {
Seq((1, "111111"), (2, "222222")).toDF("key", "value").createOrReplaceTempView("mytable1")
withTable("gen__tmp") {
sql("create table gen__tmp as select key as a, value as b from mytable1")
checkAnswer(
sql("SELECT a, b from gen__tmp"),
sql("select key, value from mytable1").collect())
}
withTable("gen__tmp") {
val e = intercept[AnalysisException] {
sql("create table gen__tmp(a int, b string) as select key, value from mytable1")
}.getMessage
assert(e.contains("Schema may not be specified in a Create Table As Select (CTAS)"))
}
withTable("gen__tmp") {
val e = intercept[AnalysisException] {
sql(
"""
|CREATE TABLE gen__tmp
|PARTITIONED BY (key string)
|AS SELECT key, value FROM mytable1
""".stripMargin)
}.getMessage
assert(e.contains("Create Partitioned Table As Select cannot specify data type for " +
"the partition columns of the target table"))
}
}
}
test("command substitution") {
sql("set tbl=src")
checkAnswer(
sql("SELECT key FROM ${hiveconf:tbl} ORDER BY key, value limit 1"),
sql("SELECT key FROM src ORDER BY key, value limit 1").collect().toSeq)
sql("set spark.sql.variable.substitute=false") // disable the substitution
sql("set tbl2=src")
intercept[Exception] {
sql("SELECT key FROM ${hiveconf:tbl2} ORDER BY key, value limit 1").collect()
}
sql("set spark.sql.variable.substitute=true") // enable the substitution
checkAnswer(
sql("SELECT key FROM ${hiveconf:tbl2} ORDER BY key, value limit 1"),
sql("SELECT key FROM src ORDER BY key, value limit 1").collect().toSeq)
}
test("ordering not in select") {
checkAnswer(
sql("SELECT key FROM src ORDER BY value"),
sql("SELECT key FROM (SELECT key, value FROM src ORDER BY value) a").collect().toSeq)
}
test("ordering not in agg") {
checkAnswer(
sql("SELECT key FROM src GROUP BY key, value ORDER BY value"),
sql("""
SELECT key
FROM (
SELECT key, value
FROM src
GROUP BY key, value
ORDER BY value) a""").collect().toSeq)
}
test("double nested data") {
withTable("test_ctas_1234") {
sparkContext.parallelize(Nested1(Nested2(Nested3(1))) :: Nil)
.toDF().createOrReplaceTempView("nested")
checkAnswer(
sql("SELECT f1.f2.f3 FROM nested"),
Row(1))
sql("CREATE TABLE test_ctas_1234 AS SELECT * from nested")
checkAnswer(
sql("SELECT * FROM test_ctas_1234"),
sql("SELECT * FROM nested").collect().toSeq)
intercept[AnalysisException] {
sql("CREATE TABLE test_ctas_1234 AS SELECT * from notexists").collect()
}
}
}
test("test CTAS") {
withTable("test_ctas_1234") {
sql("CREATE TABLE test_ctas_123 AS SELECT key, value FROM src")
checkAnswer(
sql("SELECT key, value FROM test_ctas_123 ORDER BY key"),
sql("SELECT key, value FROM src ORDER BY key").collect().toSeq)
}
}
test("SPARK-4825 save join to table") {
withTable("test1", "test2", "test") {
val testData = sparkContext.parallelize(1 to 10).map(i => TestData(i, i.toString)).toDF()
sql("CREATE TABLE test1 (key INT, value STRING)")
testData.write.mode(SaveMode.Append).insertInto("test1")
sql("CREATE TABLE test2 (key INT, value STRING)")
testData.write.mode(SaveMode.Append).insertInto("test2")
testData.write.mode(SaveMode.Append).insertInto("test2")
sql("CREATE TABLE test AS SELECT COUNT(a.value) FROM test1 a JOIN test2 b ON a.key = b.key")
checkAnswer(
table("test"),
sql("SELECT COUNT(a.value) FROM test1 a JOIN test2 b ON a.key = b.key").collect().toSeq)
}
}
test("SPARK-3708 Backticks aren't handled correctly is aliases") {
checkAnswer(
sql("SELECT k FROM (SELECT `key` AS `k` FROM src) a"),
sql("SELECT `key` FROM src").collect().toSeq)
}
test("SPARK-3834 Backticks not correctly handled in subquery aliases") {
checkAnswer(
sql("SELECT a.key FROM (SELECT key FROM src) `a`"),
sql("SELECT `key` FROM src").collect().toSeq)
}
test("SPARK-3814 Support Bitwise & operator") {
checkAnswer(
sql("SELECT case when 1&1=1 then 1 else 0 end FROM src"),
sql("SELECT 1 FROM src").collect().toSeq)
}
test("SPARK-3814 Support Bitwise | operator") {
checkAnswer(
sql("SELECT case when 1|0=1 then 1 else 0 end FROM src"),
sql("SELECT 1 FROM src").collect().toSeq)
}
test("SPARK-3814 Support Bitwise ^ operator") {
checkAnswer(
sql("SELECT case when 1^0=1 then 1 else 0 end FROM src"),
sql("SELECT 1 FROM src").collect().toSeq)
}
test("SPARK-3814 Support Bitwise ~ operator") {
checkAnswer(
sql("SELECT case when ~1=-2 then 1 else 0 end FROM src"),
sql("SELECT 1 FROM src").collect().toSeq)
}
test("SPARK-4154 Query does not work if it has 'not between' in Spark SQL and HQL") {
checkAnswer(sql("SELECT key FROM src WHERE key not between 0 and 10 order by key"),
sql("SELECT key FROM src WHERE key between 11 and 500 order by key").collect().toSeq)
}
test("SPARK-2554 SumDistinct partial aggregation") {
checkAnswer(sql("SELECT sum( distinct key) FROM src group by key order by key"),
sql("SELECT distinct key FROM src order by key").collect().toSeq)
}
test("SPARK-4963 DataFrame sample on mutable row return wrong result") {
sql("SELECT * FROM src WHERE key % 2 = 0")
.sample(withReplacement = false, fraction = 0.3)
.createOrReplaceTempView("sampled")
(1 to 10).foreach { i =>
checkAnswer(
sql("SELECT * FROM sampled WHERE key % 2 = 1"),
Seq.empty[Row])
}
}
test("SPARK-4699 SparkSession with Hive Support should be case insensitive by default") {
checkAnswer(
sql("SELECT KEY FROM Src ORDER BY value"),
sql("SELECT key FROM src ORDER BY value").collect().toSeq)
}
test("SPARK-5284 Insert into Hive throws NPE when a inner complex type field has a null value") {
val schema = StructType(
StructField("s",
StructType(
StructField("innerStruct", StructType(StructField("s1", StringType, true) :: Nil)) ::
StructField("innerArray", ArrayType(IntegerType), true) ::
StructField("innerMap", MapType(StringType, IntegerType)) :: Nil), true) :: Nil)
val row = Row(Row(null, null, null))
val rowRdd = sparkContext.parallelize(row :: Nil)
spark.createDataFrame(rowRdd, schema).createOrReplaceTempView("testTable")
sql(
"""CREATE TABLE nullValuesInInnerComplexTypes
| (s struct<innerStruct: struct<s1:string>,
| innerArray:array<int>,
| innerMap: map<string, int>>)
""".stripMargin).collect()
sql(
"""
|INSERT OVERWRITE TABLE nullValuesInInnerComplexTypes
|SELECT * FROM testTable
""".stripMargin)
checkAnswer(
sql("SELECT * FROM nullValuesInInnerComplexTypes"),
Row(Row(null, null, null))
)
sql("DROP TABLE nullValuesInInnerComplexTypes")
dropTempTable("testTable")
}
test("SPARK-4296 Grouping field with Hive UDF as sub expression") {
val ds = Seq("""{"a": "str", "b":"1", "c":"1970-01-01 00:00:00"}""").toDS()
read.json(ds).createOrReplaceTempView("data")
checkAnswer(
sql("SELECT concat(a, '-', b), year(c) FROM data GROUP BY concat(a, '-', b), year(c)"),
Row("str-1", 1970))
dropTempTable("data")
read.json(ds).createOrReplaceTempView("data")
checkAnswer(sql("SELECT year(c) + 1 FROM data GROUP BY year(c) + 1"), Row(1971))
dropTempTable("data")
}
test("resolve udtf in projection #1") {
val ds = (1 to 5).map(i => s"""{"a":[$i, ${i + 1}]}""").toDS()
read.json(ds).createOrReplaceTempView("data")
val df = sql("SELECT explode(a) AS val FROM data")
val col = df("val")
}
test("resolve udtf in projection #2") {
val ds = (1 to 2).map(i => s"""{"a":[$i, ${i + 1}]}""").toDS()
read.json(ds).createOrReplaceTempView("data")
checkAnswer(sql("SELECT explode(map(1, 1)) FROM data LIMIT 1"), Row(1, 1) :: Nil)
checkAnswer(sql("SELECT explode(map(1, 1)) as (k1, k2) FROM data LIMIT 1"), Row(1, 1) :: Nil)
intercept[AnalysisException] {
sql("SELECT explode(map(1, 1)) as k1 FROM data LIMIT 1")
}
intercept[AnalysisException] {
sql("SELECT explode(map(1, 1)) as (k1, k2, k3) FROM data LIMIT 1")
}
}
// TGF with non-TGF in project is allowed in Spark SQL, but not in Hive
test("TGF with non-TGF in projection") {
val ds = Seq("""{"a": "1", "b":"1"}""").toDS()
read.json(ds).createOrReplaceTempView("data")
checkAnswer(
sql("SELECT explode(map(a, b)) as (k1, k2), a, b FROM data"),
Row("1", "1", "1", "1") :: Nil)
}
test("logical.Project should not be resolved if it contains aggregates or generators") {
// This test is used to test the fix of SPARK-5875.
// The original issue was that Project's resolved will be true when it contains
// AggregateExpressions or Generators. However, in this case, the Project
// is not in a valid state (cannot be executed). Because of this bug, the analysis rule of
// PreInsertionCasts will actually start to work before ImplicitGenerate and then
// generates an invalid query plan.
val ds = (1 to 5).map(i => s"""{"a":[$i, ${i + 1}]}""").toDS()
read.json(ds).createOrReplaceTempView("data")
withSQLConf(SQLConf.CONVERT_CTAS.key -> "false") {
sql("CREATE TABLE explodeTest (key bigInt)")
table("explodeTest").queryExecution.analyzed match {
case SubqueryAlias(_, r: HiveTableRelation) => // OK
case _ =>
fail("To correctly test the fix of SPARK-5875, explodeTest should be a MetastoreRelation")
}
sql(s"INSERT OVERWRITE TABLE explodeTest SELECT explode(a) AS val FROM data")
checkAnswer(
sql("SELECT key from explodeTest"),
(1 to 5).flatMap(i => Row(i) :: Row(i + 1) :: Nil)
)
sql("DROP TABLE explodeTest")
dropTempTable("data")
}
}
test("sanity test for SPARK-6618") {
val threads: Seq[Thread] = (1 to 10).map { i =>
new Thread("test-thread-" + i) {
override def run(): Unit = {
val tableName = s"SPARK_6618_table_$i"
sql(s"CREATE TABLE $tableName (col1 string)")
sessionState.catalog.lookupRelation(TableIdentifier(tableName))
table(tableName)
tables()
sql(s"DROP TABLE $tableName")
}
}
}
threads.foreach(_.start())
threads.foreach(_.join(10000))
}
test("SPARK-5203 union with different decimal precision") {
Seq.empty[(java.math.BigDecimal, java.math.BigDecimal)]
.toDF("d1", "d2")
.select($"d1".cast(DecimalType(10, 5)).as("d"))
.createOrReplaceTempView("dn")
sql("select d from dn union all select d * 2 from dn")
.queryExecution.analyzed
}
test("Star Expansion - script transform") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val data = (1 to 100000).map { i => (i, i, i) }
data.toDF("d1", "d2", "d3").createOrReplaceTempView("script_trans")
assert(100000 === sql("SELECT TRANSFORM (*) USING 'cat' FROM script_trans").count())
}
test("test script transform for stdout") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val data = (1 to 100000).map { i => (i, i, i) }
data.toDF("d1", "d2", "d3").createOrReplaceTempView("script_trans")
assert(100000 ===
sql("SELECT TRANSFORM (d1, d2, d3) USING 'cat' AS (a,b,c) FROM script_trans").count())
}
test("test script transform for stderr") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val data = (1 to 100000).map { i => (i, i, i) }
data.toDF("d1", "d2", "d3").createOrReplaceTempView("script_trans")
assert(0 ===
sql("SELECT TRANSFORM (d1, d2, d3) USING 'cat 1>&2' AS (a,b,c) FROM script_trans").count())
}
test("test script transform data type") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val data = (1 to 5).map { i => (i, i) }
data.toDF("key", "value").createOrReplaceTempView("test")
checkAnswer(
sql("""FROM
|(FROM test SELECT TRANSFORM(key, value) USING 'cat' AS (`thing1` int, thing2 string)) t
|SELECT thing1 + 1
""".stripMargin), (2 to 6).map(i => Row(i)))
}
test("Sorting columns are not in Generate") {
withTempView("data") {
spark.range(1, 5)
.select(array($"id", $"id" + 1).as("a"), $"id".as("b"), (lit(10) - $"id").as("c"))
.createOrReplaceTempView("data")
// case 1: missing sort columns are resolvable if join is true
checkAnswer(
sql("SELECT explode(a) AS val, b FROM data WHERE b < 2 order by val, c"),
Row(1, 1) :: Row(2, 1) :: Nil)
// case 2: missing sort columns are resolvable if join is false
checkAnswer(
sql("SELECT explode(a) AS val FROM data order by val, c"),
Seq(1, 2, 2, 3, 3, 4, 4, 5).map(i => Row(i)))
// case 3: missing sort columns are resolvable if join is true and outer is true
checkAnswer(
sql(
"""
|SELECT C.val, b FROM data LATERAL VIEW OUTER explode(a) C as val
|where b < 2 order by c, val, b
""".stripMargin),
Row(1, 1) :: Row(2, 1) :: Nil)
}
}
test("test case key when") {
(1 to 5).map(i => (i, i.toString)).toDF("k", "v").createOrReplaceTempView("t")
checkAnswer(
sql("SELECT CASE k WHEN 2 THEN 22 WHEN 4 THEN 44 ELSE 0 END, v FROM t"),
Row(0, "1") :: Row(22, "2") :: Row(0, "3") :: Row(44, "4") :: Row(0, "5") :: Nil)
}
test("SPARK-7269 Check analysis failed in case in-sensitive") {
Seq(1, 2, 3).map { i =>
(i.toString, i.toString)
}.toDF("key", "value").createOrReplaceTempView("df_analysis")
sql("SELECT kEy from df_analysis group by key").collect()
sql("SELECT kEy+3 from df_analysis group by key+3").collect()
sql("SELECT kEy+3, a.kEy, A.kEy from df_analysis A group by key").collect()
sql("SELECT cast(kEy+1 as Int) from df_analysis A group by cast(key+1 as int)").collect()
sql("SELECT cast(kEy+1 as Int) from df_analysis A group by key+1").collect()
sql("SELECT 2 from df_analysis A group by key+1").collect()
intercept[AnalysisException] {
sql("SELECT kEy+1 from df_analysis group by key+3")
}
intercept[AnalysisException] {
sql("SELECT cast(key+2 as Int) from df_analysis A group by cast(key+1 as int)")
}
}
test("Cast STRING to BIGINT") {
checkAnswer(sql("SELECT CAST('775983671874188101' as BIGINT)"), Row(775983671874188101L))
}
test("dynamic partition value test") {
try {
sql("set hive.exec.dynamic.partition.mode=nonstrict")
// date
sql("drop table if exists dynparttest1")
sql("create table dynparttest1 (value int) partitioned by (pdate date)")
sql(
"""
|insert into table dynparttest1 partition(pdate)
| select count(*), cast('2015-05-21' as date) as pdate from src
""".stripMargin)
checkAnswer(
sql("select * from dynparttest1"),
Seq(Row(500, java.sql.Date.valueOf("2015-05-21"))))
// decimal
sql("drop table if exists dynparttest2")
sql("create table dynparttest2 (value int) partitioned by (pdec decimal(5, 1))")
sql(
"""
|insert into table dynparttest2 partition(pdec)
| select count(*), cast('100.12' as decimal(5, 1)) as pdec from src
""".stripMargin)
checkAnswer(
sql("select * from dynparttest2"),
Seq(Row(500, new java.math.BigDecimal("100.1"))))
} finally {
sql("drop table if exists dynparttest1")
sql("drop table if exists dynparttest2")
sql("set hive.exec.dynamic.partition.mode=strict")
}
}
test("Call add jar in a different thread (SPARK-8306)") {
@volatile var error: Option[Throwable] = None
val thread = new Thread {
override def run() {
// To make sure this test works, this jar should not be loaded in another place.
sql(
s"ADD JAR ${hiveContext.getHiveFile("hive-contrib-0.13.1.jar").getCanonicalPath()}")
try {
sql(
"""
|CREATE TEMPORARY FUNCTION example_max
|AS 'org.apache.hadoop.hive.contrib.udaf.example.UDAFExampleMax'
""".stripMargin)
} catch {
case throwable: Throwable =>
error = Some(throwable)
}
}
}
thread.start()
thread.join()
error match {
case Some(throwable) =>
fail("CREATE TEMPORARY FUNCTION should not fail.", throwable)
case None => // OK
}
}
test("SPARK-6785: HiveQuerySuite - Date comparison test 2") {
checkAnswer(
sql("SELECT CAST(CAST(0 AS timestamp) AS date) > CAST(0 AS timestamp) FROM src LIMIT 1"),
Row(false))
}
test("SPARK-6785: HiveQuerySuite - Date cast") {
// new Date(0) == 1970-01-01 00:00:00.0 GMT == 1969-12-31 16:00:00.0 PST
checkAnswer(
sql(
"""
| SELECT
| CAST(CAST(0 AS timestamp) AS date),
| CAST(CAST(CAST(0 AS timestamp) AS date) AS string),
| CAST(0 AS timestamp),
| CAST(CAST(0 AS timestamp) AS string),
| CAST(CAST(CAST('1970-01-01 23:00:00' AS timestamp) AS date) AS timestamp)
| FROM src LIMIT 1
""".stripMargin),
Row(
Date.valueOf("1969-12-31"),
String.valueOf("1969-12-31"),
Timestamp.valueOf("1969-12-31 16:00:00"),
String.valueOf("1969-12-31 16:00:00"),
Timestamp.valueOf("1970-01-01 00:00:00")))
}
test("SPARK-8588 HiveTypeCoercion.inConversion fires too early") {
val df =
createDataFrame(Seq((1, "2014-01-01"), (2, "2015-01-01"), (3, "2016-01-01")))
df.toDF("id", "datef").createOrReplaceTempView("test_SPARK8588")
checkAnswer(
sql(
"""
|select id, concat(year(datef))
|from test_SPARK8588 where concat(year(datef), ' year') in ('2015 year', '2014 year')
""".stripMargin),
Row(1, "2014") :: Row(2, "2015") :: Nil
)
dropTempTable("test_SPARK8588")
}
test("SPARK-9371: fix the support for special chars in column names for hive context") {
val ds = Seq("""{"a": {"c.b": 1}, "b.$q": [{"a@!.q": 1}], "q.w": {"w.i&": [1]}}""").toDS()
read.json(ds).createOrReplaceTempView("t")
checkAnswer(sql("SELECT a.`c.b`, `b.$q`[0].`a@!.q`, `q.w`.`w.i&`[0] FROM t"), Row(1, 1, 1))
}
test("Convert hive interval term into Literal of CalendarIntervalType") {
checkAnswer(sql("select interval '10-9' year to month"),
Row(CalendarInterval.fromString("interval 10 years 9 months")))
checkAnswer(sql("select interval '20 15:40:32.99899999' day to second"),
Row(CalendarInterval.fromString("interval 2 weeks 6 days 15 hours 40 minutes " +
"32 seconds 99 milliseconds 899 microseconds")))
checkAnswer(sql("select interval '30' year"),
Row(CalendarInterval.fromString("interval 30 years")))
checkAnswer(sql("select interval '25' month"),
Row(CalendarInterval.fromString("interval 25 months")))
checkAnswer(sql("select interval '-100' day"),
Row(CalendarInterval.fromString("interval -14 weeks -2 days")))
checkAnswer(sql("select interval '40' hour"),
Row(CalendarInterval.fromString("interval 1 days 16 hours")))
checkAnswer(sql("select interval '80' minute"),
Row(CalendarInterval.fromString("interval 1 hour 20 minutes")))
checkAnswer(sql("select interval '299.889987299' second"),
Row(CalendarInterval.fromString(
"interval 4 minutes 59 seconds 889 milliseconds 987 microseconds")))
}
test("specifying database name for a temporary view is not allowed") {
withTempPath { dir =>
withTempView("db.t") {
val path = dir.toURI.toString
val df = sparkContext.parallelize(1 to 10).map(i => (i, i.toString)).toDF("num", "str")
df
.write
.format("parquet")
.save(path)
// We don't support creating a temporary table while specifying a database
intercept[AnalysisException] {
spark.sql(
s"""
|CREATE TEMPORARY VIEW db.t
|USING parquet
|OPTIONS (
| path '$path'
|)
""".stripMargin)
}
// If you use backticks to quote the name then it's OK.
spark.sql(
s"""
|CREATE TEMPORARY VIEW `db.t`
|USING parquet
|OPTIONS (
| path '$path'
|)
""".stripMargin)
checkAnswer(spark.table("`db.t`"), df)
}
}
}
test("SPARK-10593 same column names in lateral view") {
val df = spark.sql(
"""
|select
|insideLayer2.json as a2
|from (select '{"layer1": {"layer2": "text inside layer 2"}}' json) test
|lateral view json_tuple(json, 'layer1') insideLayer1 as json
|lateral view json_tuple(insideLayer1.json, 'layer2') insideLayer2 as json
""".stripMargin
)
checkAnswer(df, Row("text inside layer 2") :: Nil)
}
ignore("SPARK-10310: " +
"script transformation using default input/output SerDe and record reader/writer") {
spark
.range(5)
.selectExpr("id AS a", "id AS b")
.createOrReplaceTempView("test")
val scriptFilePath = getTestResourcePath("data")
checkAnswer(
sql(
s"""FROM(
| FROM test SELECT TRANSFORM(a, b)
| USING 'python $scriptFilePath/scripts/test_transform.py "\\t"'
| AS (c STRING, d STRING)
|) t
|SELECT c
""".stripMargin),
(0 until 5).map(i => Row(i + "#")))
}
ignore("SPARK-10310: script transformation using LazySimpleSerDe") {
spark
.range(5)
.selectExpr("id AS a", "id AS b")
.createOrReplaceTempView("test")
val scriptFilePath = getTestResourcePath("data")
val df = sql(
s"""FROM test
|SELECT TRANSFORM(a, b)
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|WITH SERDEPROPERTIES('field.delim' = '|')
|USING 'python $scriptFilePath/scripts/test_transform.py "|"'
|AS (c STRING, d STRING)
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|WITH SERDEPROPERTIES('field.delim' = '|')
""".stripMargin)
checkAnswer(df, (0 until 5).map(i => Row(i + "#", i + "#")))
}
test("SPARK-10741: Sort on Aggregate using parquet") {
withTable("test10741") {
withTempView("src") {
Seq("a" -> 5, "a" -> 9, "b" -> 6).toDF("c1", "c2").createOrReplaceTempView("src")
sql("CREATE TABLE test10741 STORED AS PARQUET AS SELECT * FROM src")
}
checkAnswer(sql(
"""
|SELECT c1, AVG(c2) AS c_avg
|FROM test10741
|GROUP BY c1
|HAVING (AVG(c2) > 5) ORDER BY c1
""".stripMargin), Row("a", 7.0) :: Row("b", 6.0) :: Nil)
checkAnswer(sql(
"""
|SELECT c1, AVG(c2) AS c_avg
|FROM test10741
|GROUP BY c1
|ORDER BY AVG(c2)
""".stripMargin), Row("b", 6.0) :: Row("a", 7.0) :: Nil)
}
}
test("run sql directly on files - parquet") {
val df = spark.range(100).toDF()
withTempPath(f => {
df.write.parquet(f.getCanonicalPath)
// data source type is case insensitive
checkAnswer(sql(s"select id from Parquet.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select id from `org.apache.spark.sql.parquet`.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select a.id from parquet.`${f.getCanonicalPath}` as a"),
df)
})
}
test("run sql directly on files - orc") {
val df = spark.range(100).toDF()
withTempPath(f => {
df.write.orc(f.getCanonicalPath)
// data source type is case insensitive
checkAnswer(sql(s"select id from ORC.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select id from `org.apache.spark.sql.hive.orc`.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select a.id from orc.`${f.getCanonicalPath}` as a"),
df)
})
}
test("run sql directly on files - csv") {
val df = spark.range(100).toDF()
withTempPath(f => {
df.write.csv(f.getCanonicalPath)
// data source type is case insensitive
checkAnswer(sql(s"select cast(_c0 as int) id from CSV.`${f.getCanonicalPath}`"),
df)
checkAnswer(
sql(s"select cast(_c0 as int) id from `com.databricks.spark.csv`.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select cast(a._c0 as int) id from csv.`${f.getCanonicalPath}` as a"),
df)
})
}
test("run sql directly on files - json") {
val df = spark.range(100).toDF()
withTempPath(f => {
df.write.json(f.getCanonicalPath)
// data source type is case insensitive
checkAnswer(sql(s"select id from jsoN.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select id from `org.apache.spark.sql.json`.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select a.id from json.`${f.getCanonicalPath}` as a"),
df)
})
}
test("run sql directly on files - hive") {
withTempPath(f => {
spark.range(100).toDF.write.parquet(f.getCanonicalPath)
var e = intercept[AnalysisException] {
sql(s"select id from hive.`${f.getCanonicalPath}`")
}
assert(e.message.contains("Unsupported data source type for direct query on files: hive"))
// data source type is case insensitive
e = intercept[AnalysisException] {
sql(s"select id from HIVE.`${f.getCanonicalPath}`")
}
assert(e.message.contains("Unsupported data source type for direct query on files: HIVE"))
})
}
test("SPARK-8976 Wrong Result for Rollup #1") {
Seq("grouping_id()", "grouping__id").foreach { gid =>
checkAnswer(sql(
s"SELECT count(*) AS cnt, key % 5, $gid FROM src GROUP BY key%5 WITH ROLLUP"),
Seq(
(113, 3, 0),
(91, 0, 0),
(500, null, 1),
(84, 1, 0),
(105, 2, 0),
(107, 4, 0)
).map(i => Row(i._1, i._2, i._3)))
}
}
test("SPARK-8976 Wrong Result for Rollup #2") {
Seq("grouping_id()", "grouping__id").foreach { gid =>
checkAnswer(sql(
s"""
|SELECT count(*) AS cnt, key % 5 AS k1, key-5 AS k2, $gid AS k3
|FROM src GROUP BY key%5, key-5
|WITH ROLLUP ORDER BY cnt, k1, k2, k3 LIMIT 10
""".stripMargin),
Seq(
(1, 0, 5, 0),
(1, 0, 15, 0),
(1, 0, 25, 0),
(1, 0, 60, 0),
(1, 0, 75, 0),
(1, 0, 80, 0),
(1, 0, 100, 0),
(1, 0, 140, 0),
(1, 0, 145, 0),
(1, 0, 150, 0)
).map(i => Row(i._1, i._2, i._3, i._4)))
}
}
test("SPARK-8976 Wrong Result for Rollup #3") {
Seq("grouping_id()", "grouping__id").foreach { gid =>
checkAnswer(sql(
s"""
|SELECT count(*) AS cnt, key % 5 AS k1, key-5 AS k2, $gid AS k3
|FROM (SELECT key, key%2, key - 5 FROM src) t GROUP BY key%5, key-5
|WITH ROLLUP ORDER BY cnt, k1, k2, k3 LIMIT 10
""".stripMargin),
Seq(
(1, 0, 5, 0),
(1, 0, 15, 0),
(1, 0, 25, 0),
(1, 0, 60, 0),
(1, 0, 75, 0),
(1, 0, 80, 0),
(1, 0, 100, 0),
(1, 0, 140, 0),
(1, 0, 145, 0),
(1, 0, 150, 0)
).map(i => Row(i._1, i._2, i._3, i._4)))
}
}
test("SPARK-8976 Wrong Result for CUBE #1") {
Seq("grouping_id()", "grouping__id").foreach { gid =>
checkAnswer(sql(
s"SELECT count(*) AS cnt, key % 5, $gid FROM src GROUP BY key%5 WITH CUBE"),
Seq(
(113, 3, 0),
(91, 0, 0),
(500, null, 1),
(84, 1, 0),
(105, 2, 0),
(107, 4, 0)
).map(i => Row(i._1, i._2, i._3)))
}
}
test("SPARK-8976 Wrong Result for CUBE #2") {
Seq("grouping_id()", "grouping__id").foreach { gid =>
checkAnswer(sql(
s"""
|SELECT count(*) AS cnt, key % 5 AS k1, key-5 AS k2, $gid AS k3
|FROM (SELECT key, key%2, key - 5 FROM src) t GROUP BY key%5, key-5
|WITH CUBE ORDER BY cnt, k1, k2, k3 LIMIT 10
""".stripMargin),
Seq(
(1, null, -3, 2),
(1, null, -1, 2),
(1, null, 3, 2),
(1, null, 4, 2),
(1, null, 5, 2),
(1, null, 6, 2),
(1, null, 12, 2),
(1, null, 14, 2),
(1, null, 15, 2),
(1, null, 22, 2)
).map(i => Row(i._1, i._2, i._3, i._4)))
}
}
test("SPARK-8976 Wrong Result for GroupingSet") {
Seq("grouping_id()", "grouping__id").foreach { gid =>
checkAnswer(sql(
s"""
|SELECT count(*) AS cnt, key % 5 AS k1, key-5 AS k2, $gid AS k3
|FROM (SELECT key, key%2, key - 5 FROM src) t GROUP BY key%5, key-5
|GROUPING SETS (key%5, key-5) ORDER BY cnt, k1, k2, k3 LIMIT 10
""".stripMargin),
Seq(
(1, null, -3, 2),
(1, null, -1, 2),
(1, null, 3, 2),
(1, null, 4, 2),
(1, null, 5, 2),
(1, null, 6, 2),
(1, null, 12, 2),
(1, null, 14, 2),
(1, null, 15, 2),
(1, null, 22, 2)
).map(i => Row(i._1, i._2, i._3, i._4)))
}
}
ignore("SPARK-10562: partition by column with mixed case name") {
withTable("tbl10562") {
val df = Seq(2012 -> "a").toDF("Year", "val")
df.write.partitionBy("Year").saveAsTable("tbl10562")
checkAnswer(sql("SELECT year FROM tbl10562"), Row(2012))
checkAnswer(sql("SELECT Year FROM tbl10562"), Row(2012))
checkAnswer(sql("SELECT yEAr FROM tbl10562"), Row(2012))
// TODO(ekl) this is causing test flakes [SPARK-18167], but we think the issue is derby specific
// checkAnswer(sql("SELECT val FROM tbl10562 WHERE Year > 2015"), Nil)
checkAnswer(sql("SELECT val FROM tbl10562 WHERE Year == 2012"), Row("a"))
}
}
test("SPARK-11453: append data to partitioned table") {
withTable("tbl11453") {
Seq("1" -> "10", "2" -> "20").toDF("i", "j")
.write.partitionBy("i").saveAsTable("tbl11453")
Seq("3" -> "30").toDF("i", "j")
.write.mode(SaveMode.Append).partitionBy("i").saveAsTable("tbl11453")
checkAnswer(
spark.read.table("tbl11453").select("i", "j").orderBy("i"),
Row("1", "10") :: Row("2", "20") :: Row("3", "30") :: Nil)
// make sure case sensitivity is correct.
Seq("4" -> "40").toDF("i", "j")
.write.mode(SaveMode.Append).partitionBy("I").saveAsTable("tbl11453")
checkAnswer(
spark.read.table("tbl11453").select("i", "j").orderBy("i"),
Row("1", "10") :: Row("2", "20") :: Row("3", "30") :: Row("4", "40") :: Nil)
}
}
test("SPARK-11590: use native json_tuple in lateral view") {
checkAnswer(sql(
"""
|SELECT a, b
|FROM (SELECT '{"f1": "value1", "f2": 12}' json) test
|LATERAL VIEW json_tuple(json, 'f1', 'f2') jt AS a, b
""".stripMargin), Row("value1", "12"))
// we should use `c0`, `c1`... as the name of fields if no alias is provided, to follow hive.
checkAnswer(sql(
"""
|SELECT c0, c1
|FROM (SELECT '{"f1": "value1", "f2": 12}' json) test
|LATERAL VIEW json_tuple(json, 'f1', 'f2') jt
""".stripMargin), Row("value1", "12"))
// we can also use `json_tuple` in project list.
checkAnswer(sql(
"""
|SELECT json_tuple(json, 'f1', 'f2')
|FROM (SELECT '{"f1": "value1", "f2": 12}' json) test
""".stripMargin), Row("value1", "12"))
// we can also mix `json_tuple` with other project expressions.
checkAnswer(sql(
"""
|SELECT json_tuple(json, 'f1', 'f2'), 3.14, str
|FROM (SELECT '{"f1": "value1", "f2": 12}' json, 'hello' as str) test
""".stripMargin), Row("value1", "12", BigDecimal("3.14"), "hello"))
}
test("multi-insert with lateral view") {
withTempView("source") {
spark.range(10)
.select(array($"id", $"id" + 1).as("arr"), $"id")
.createOrReplaceTempView("source")
withTable("dest1", "dest2") {
sql("CREATE TABLE dest1 (i INT)")
sql("CREATE TABLE dest2 (i INT)")
sql(
"""
|FROM source
|INSERT OVERWRITE TABLE dest1
|SELECT id
|WHERE id > 3
|INSERT OVERWRITE TABLE dest2
|select col LATERAL VIEW EXPLODE(arr) exp AS col
|WHERE col > 3
""".stripMargin)
checkAnswer(
spark.table("dest1"),
sql("SELECT id FROM source WHERE id > 3"))
checkAnswer(
spark.table("dest2"),
sql("SELECT col FROM source LATERAL VIEW EXPLODE(arr) exp AS col WHERE col > 3"))
}
}
}
test("derived from Hive query file: drop_database_removes_partition_dirs.q") {
// This test verifies that if a partition exists outside a table's current location when the
// database is dropped the partition's location is dropped as well.
sql("DROP database if exists test_database CASCADE")
sql("CREATE DATABASE test_database")
val previousCurrentDB = sessionState.catalog.getCurrentDatabase
sql("USE test_database")
sql("drop table if exists test_table")
val tempDir = System.getProperty("test.tmp.dir")
assert(tempDir != null, "TestHive should set test.tmp.dir.")
sql(
"""
|CREATE TABLE test_table (key int, value STRING)
|PARTITIONED BY (part STRING)
|STORED AS RCFILE
|LOCATION 'file:${system:test.tmp.dir}/drop_database_removes_partition_dirs_table'
""".stripMargin)
sql(
"""
|ALTER TABLE test_table ADD PARTITION (part = '1')
|LOCATION 'file:${system:test.tmp.dir}/drop_database_removes_partition_dirs_table2/part=1'
""".stripMargin)
sql(
"""
|INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
|SELECT * FROM default.src
""".stripMargin)
checkAnswer(
sql("select part, key, value from test_table"),
sql("select '1' as part, key, value from default.src")
)
val path = new Path(
new Path(s"file:$tempDir"),
"drop_database_removes_partition_dirs_table2")
val fs = path.getFileSystem(sparkContext.hadoopConfiguration)
// The partition dir is not empty.
assert(fs.listStatus(new Path(path, "part=1")).nonEmpty)
sql(s"USE $previousCurrentDB")
sql("DROP DATABASE test_database CASCADE")
// This table dir should not exist after we drop the entire database with the mode
// of CASCADE. This probably indicates a Hive bug, which returns the wrong table
// root location. So, the table's directory still there. We should change the condition
// to fs.exists(path) after we handle fs operations.
assert(
fs.exists(path),
"Thank you for making the changes of letting Spark SQL handle filesystem operations " +
"for DDL commands. Originally, Hive metastore does not delete the table root directory " +
"for this case. Now, please change this condition to !fs.exists(path).")
}
test("derived from Hive query file: drop_table_removes_partition_dirs.q") {
// This test verifies that if a partition exists outside the table's current location when the
// table is dropped the partition's location is dropped as well.
sql("drop table if exists test_table")
val tempDir = System.getProperty("test.tmp.dir")
assert(tempDir != null, "TestHive should set test.tmp.dir.")
sql(
"""
|CREATE TABLE test_table (key int, value STRING)
|PARTITIONED BY (part STRING)
|STORED AS RCFILE
|LOCATION 'file:${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2'
""".stripMargin)
sql(
"""
|ALTER TABLE test_table ADD PARTITION (part = '1')
|LOCATION 'file:${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2/part=1'
""".stripMargin)
sql(
"""
|INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
|SELECT * FROM default.src
""".stripMargin)
checkAnswer(
sql("select part, key, value from test_table"),
sql("select '1' as part, key, value from src")
)
val path = new Path(new Path(s"file:$tempDir"), "drop_table_removes_partition_dirs_table2")
val fs = path.getFileSystem(sparkContext.hadoopConfiguration)
// The partition dir is not empty.
assert(fs.listStatus(new Path(path, "part=1")).nonEmpty)
sql("drop table test_table")
assert(fs.exists(path), "This is an external table, so the data should not have been dropped")
}
test("select partitioned table") {
val table = "table_with_partition"
withTable(table) {
sql(
s"""
|CREATE TABLE $table(c1 string)
|PARTITIONED BY (p1 string,p2 string,p3 string,p4 string,p5 string)
""".stripMargin)
sql(
s"""
|INSERT OVERWRITE TABLE $table
|PARTITION (p1='a',p2='b',p3='c',p4='d',p5='e')
|SELECT 'blarr'
""".stripMargin)
// project list is the same order of paritioning columns in table definition
checkAnswer(
sql(s"SELECT p1, p2, p3, p4, p5, c1 FROM $table"),
Row("a", "b", "c", "d", "e", "blarr") :: Nil)
// project list does not have the same order of paritioning columns in table definition
checkAnswer(
sql(s"SELECT p2, p3, p4, p1, p5, c1 FROM $table"),
Row("b", "c", "d", "a", "e", "blarr") :: Nil)
// project list contains partial partition columns in table definition
checkAnswer(
sql(s"SELECT p2, p1, p5, c1 FROM $table"),
Row("b", "a", "e", "blarr") :: Nil)
}
}
test("SPARK-14981: DESC not supported for sorting columns") {
withTable("t") {
val cause = intercept[ParseException] {
sql(
"""CREATE TABLE t USING PARQUET
|OPTIONS (PATH '/path/to/file')
|CLUSTERED BY (a) SORTED BY (b DESC) INTO 2 BUCKETS
|AS SELECT 1 AS a, 2 AS b
""".stripMargin
)
}
assert(cause.getMessage.contains("Column ordering must be ASC, was 'DESC'"))
}
}
test("insert into datasource table") {
withTable("tbl") {
sql("CREATE TABLE tbl(i INT, j STRING) USING parquet")
Seq(1 -> "a").toDF("i", "j").write.mode("overwrite").insertInto("tbl")
checkAnswer(sql("SELECT * FROM tbl"), Row(1, "a"))
}
}
test("spark-15557 promote string test") {
withTable("tbl") {
sql("CREATE TABLE tbl(c1 string, c2 string)")
sql("insert into tbl values ('3', '2.3')")
checkAnswer(
sql("select (cast (99 as decimal(19,6)) + cast('3' as decimal)) * cast('2.3' as decimal)"),
Row(204.0)
)
checkAnswer(
sql("select (cast(99 as decimal(19,6)) + '3') *'2.3' from tbl"),
Row(234.6)
)
checkAnswer(
sql("select (cast(99 as decimal(19,6)) + c1) * c2 from tbl"),
Row(234.6)
)
}
}
test("SPARK-15752 optimize metadata only query for hive table") {
withSQLConf(SQLConf.OPTIMIZER_METADATA_ONLY.key -> "true") {
withTable("data_15752", "srcpart_15752", "srctext_15752") {
val df = Seq((1, "2"), (3, "4")).toDF("key", "value")
df.createOrReplaceTempView("data_15752")
sql(
"""
|CREATE TABLE srcpart_15752 (col1 INT, col2 STRING)
|PARTITIONED BY (partcol1 INT, partcol2 STRING) STORED AS parquet
""".stripMargin)
for (partcol1 <- Seq(0, 1); partcol2 <- Seq("a", "b")) {
sql(
s"""
|INSERT OVERWRITE TABLE srcpart_15752
|PARTITION (partcol1='$partcol1', partcol2='$partcol2')
|select key, value from data_15752
""".stripMargin)
}
checkAnswer(
sql("select partcol1 from srcpart_15752 group by partcol1"),
Row(0) :: Row(1) :: Nil)
checkAnswer(
sql("select partcol1 from srcpart_15752 where partcol1 = 1 group by partcol1"),
Row(1))
checkAnswer(
sql("select partcol1, count(distinct partcol2) from srcpart_15752 group by partcol1"),
Row(0, 2) :: Row(1, 2) :: Nil)
checkAnswer(
sql("select partcol1, count(distinct partcol2) from srcpart_15752 where partcol1 = 1 " +
"group by partcol1"),
Row(1, 2) :: Nil)
checkAnswer(sql("select distinct partcol1 from srcpart_15752"), Row(0) :: Row(1) :: Nil)
checkAnswer(sql("select distinct partcol1 from srcpart_15752 where partcol1 = 1"), Row(1))
checkAnswer(
sql("select distinct col from (select partcol1 + 1 as col from srcpart_15752 " +
"where partcol1 = 1) t"),
Row(2))
checkAnswer(sql("select distinct partcol1 from srcpart_15752 where partcol1 = 1"), Row(1))
checkAnswer(sql("select max(partcol1) from srcpart_15752"), Row(1))
checkAnswer(sql("select max(partcol1) from srcpart_15752 where partcol1 = 1"), Row(1))
checkAnswer(sql("select max(partcol1) from (select partcol1 from srcpart_15752) t"), Row(1))
checkAnswer(
sql("select max(col) from (select partcol1 + 1 as col from srcpart_15752 " +
"where partcol1 = 1) t"),
Row(2))
sql(
"""
|CREATE TABLE srctext_15752 (col1 INT, col2 STRING)
|PARTITIONED BY (partcol1 INT, partcol2 STRING) STORED AS textfile
""".stripMargin)
for (partcol1 <- Seq(0, 1); partcol2 <- Seq("a", "b")) {
sql(
s"""
|INSERT OVERWRITE TABLE srctext_15752
|PARTITION (partcol1='$partcol1', partcol2='$partcol2')
|select key, value from data_15752
""".stripMargin)
}
checkAnswer(
sql("select partcol1 from srctext_15752 group by partcol1"),
Row(0) :: Row(1) :: Nil)
checkAnswer(
sql("select partcol1 from srctext_15752 where partcol1 = 1 group by partcol1"),
Row(1))
checkAnswer(
sql("select partcol1, count(distinct partcol2) from srctext_15752 group by partcol1"),
Row(0, 2) :: Row(1, 2) :: Nil)
checkAnswer(
sql("select partcol1, count(distinct partcol2) from srctext_15752 where partcol1 = 1 " +
"group by partcol1"),
Row(1, 2) :: Nil)
checkAnswer(sql("select distinct partcol1 from srctext_15752"), Row(0) :: Row(1) :: Nil)
checkAnswer(sql("select distinct partcol1 from srctext_15752 where partcol1 = 1"), Row(1))
checkAnswer(
sql("select distinct col from (select partcol1 + 1 as col from srctext_15752 " +
"where partcol1 = 1) t"),
Row(2))
checkAnswer(sql("select max(partcol1) from srctext_15752"), Row(1))
checkAnswer(sql("select max(partcol1) from srctext_15752 where partcol1 = 1"), Row(1))
checkAnswer(sql("select max(partcol1) from (select partcol1 from srctext_15752) t"), Row(1))
checkAnswer(
sql("select max(col) from (select partcol1 + 1 as col from srctext_15752 " +
"where partcol1 = 1) t"),
Row(2))
}
}
}
test("SPARK-17354: Partitioning by dates/timestamps works with Parquet vectorized reader") {
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "true") {
sql(
"""CREATE TABLE order(id INT)
|PARTITIONED BY (pd DATE, pt TIMESTAMP)
|STORED AS PARQUET
""".stripMargin)
sql("set hive.exec.dynamic.partition.mode=nonstrict")
sql(
"""INSERT INTO TABLE order PARTITION(pd, pt)
|SELECT 1 AS id, CAST('1990-02-24' AS DATE) AS pd, CAST('1990-02-24' AS TIMESTAMP) AS pt
""".stripMargin)
val actual = sql("SELECT * FROM order")
val expected = sql(
"SELECT 1 AS id, CAST('1990-02-24' AS DATE) AS pd, CAST('1990-02-24' AS TIMESTAMP) AS pt")
checkAnswer(actual, expected)
sql("DROP TABLE order")
}
}
test("SPARK-17108: Fix BIGINT and INT comparison failure in spark sql") {
withTable("t1", "t2", "t3") {
sql("create table t1(a map<bigint, array<string>>)")
sql("select * from t1 where a[1] is not null")
sql("create table t2(a map<int, array<string>>)")
sql("select * from t2 where a[1] is not null")
sql("create table t3(a map<bigint, array<string>>)")
sql("select * from t3 where a[1L] is not null")
}
}
test("SPARK-17796 Support wildcard character in filename for LOAD DATA LOCAL INPATH") {
withTempDir { dir =>
val path = dir.toURI.toString.stripSuffix("/")
val dirPath = dir.getAbsoluteFile
for (i <- 1 to 3) {
Files.write(s"$i", new File(dirPath, s"part-r-0000$i"), StandardCharsets.UTF_8)
}
for (i <- 5 to 7) {
Files.write(s"$i", new File(dirPath, s"part-s-0000$i"), StandardCharsets.UTF_8)
}
withTable("load_t") {
sql("CREATE TABLE load_t (a STRING)")
sql(s"LOAD DATA LOCAL INPATH '$path/*part-r*' INTO TABLE load_t")
checkAnswer(sql("SELECT * FROM load_t"), Seq(Row("1"), Row("2"), Row("3")))
val m = intercept[AnalysisException] {
sql("LOAD DATA LOCAL INPATH '/non-exist-folder/*part*' INTO TABLE load_t")
}.getMessage
assert(m.contains("LOAD DATA input path does not exist"))
}
}
}
test("SPARK-23425 Test LOAD DATA LOCAL INPATH with space in file name") {
withTempDir { dir =>
val path = dir.toURI.toString.stripSuffix("/")
val dirPath = dir.getAbsoluteFile
for (i <- 1 to 3) {
Files.write(s"$i", new File(dirPath, s"part-r-0000 $i"), StandardCharsets.UTF_8)
}
withTable("load_t") {
sql("CREATE TABLE load_t (a STRING)")
sql(s"LOAD DATA LOCAL INPATH '$path/part-r-0000 1' INTO TABLE load_t")
checkAnswer(sql("SELECT * FROM load_t"), Seq(Row("1")))
}
}
}
test("Support wildcard character in folderlevel for LOAD DATA LOCAL INPATH") {
withTempDir { dir =>
val path = dir.toURI.toString.stripSuffix("/")
val dirPath = dir.getAbsoluteFile
for (i <- 1 to 3) {
Files.write(s"$i", new File(dirPath, s"part-r-0000$i"), StandardCharsets.UTF_8)
}
withTable("load_t_folder_wildcard") {
sql("CREATE TABLE load_t (a STRING)")
sql(s"LOAD DATA LOCAL INPATH '${
path.substring(0, path.length - 1)
.concat("*")
}/' INTO TABLE load_t")
checkAnswer(sql("SELECT * FROM load_t"), Seq(Row("1"), Row("2"), Row("3")))
val m = intercept[AnalysisException] {
sql(s"LOAD DATA LOCAL INPATH '${
path.substring(0, path.length - 1).concat("_invalid_dir") concat ("*")
}/' INTO TABLE load_t")
}.getMessage
assert(m.contains("LOAD DATA input path does not exist"))
}
}
}
test("SPARK-17796 Support wildcard '?'char in middle as part of local file path") {
withTempDir { dir =>
val path = dir.toURI.toString.stripSuffix("/")
val dirPath = dir.getAbsoluteFile
for (i <- 1 to 3) {
Files.write(s"$i", new File(dirPath, s"part-r-0000$i"), StandardCharsets.UTF_8)
}
withTable("load_t1") {
sql("CREATE TABLE load_t1 (a STRING)")
sql(s"LOAD DATA LOCAL INPATH '$path/part-r-0000?' INTO TABLE load_t1")
checkAnswer(sql("SELECT * FROM load_t1"), Seq(Row("1"), Row("2"), Row("3")))
}
}
}
test("SPARK-17796 Support wildcard '?'char in start as part of local file path") {
withTempDir { dir =>
val path = dir.toURI.toString.stripSuffix("/")
val dirPath = dir.getAbsoluteFile
for (i <- 1 to 3) {
Files.write(s"$i", new File(dirPath, s"part-r-0000$i"), StandardCharsets.UTF_8)
}
withTable("load_t2") {
sql("CREATE TABLE load_t2 (a STRING)")
sql(s"LOAD DATA LOCAL INPATH '$path/?art-r-00001' INTO TABLE load_t2")
checkAnswer(sql("SELECT * FROM load_t2"), Seq(Row("1")))
}
}
}
test("SPARK-25738: defaultFs can have a port") {
val defaultURI = new URI("hdfs://fizz.buzz.com:8020")
val r = LoadDataCommand.makeQualified(defaultURI, new Path("/foo/bar"), new Path("/flim/flam"))
assert(r === new Path("hdfs://fizz.buzz.com:8020/flim/flam"))
}
test("Insert overwrite with partition") {
withTable("tableWithPartition") {
sql(
"""
|CREATE TABLE tableWithPartition (key int, value STRING)
|PARTITIONED BY (part STRING)
""".stripMargin)
sql(
"""
|INSERT OVERWRITE TABLE tableWithPartition PARTITION (part = '1')
|SELECT * FROM default.src
""".stripMargin)
checkAnswer(
sql("SELECT part, key, value FROM tableWithPartition"),
sql("SELECT '1' AS part, key, value FROM default.src")
)
sql(
"""
|INSERT OVERWRITE TABLE tableWithPartition PARTITION (part = '1')
|SELECT * FROM VALUES (1, "one"), (2, "two"), (3, null) AS data(key, value)
""".stripMargin)
checkAnswer(
sql("SELECT part, key, value FROM tableWithPartition"),
sql(
"""
|SELECT '1' AS part, key, value FROM VALUES
|(1, "one"), (2, "two"), (3, null) AS data(key, value)
""".stripMargin)
)
}
}
test("SPARK-19292: filter with partition columns should be case-insensitive on Hive tables") {
withTable("tbl") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
sql("CREATE TABLE tbl(i int, j int) USING hive PARTITIONED BY (j)")
sql("INSERT INTO tbl PARTITION(j=10) SELECT 1")
checkAnswer(spark.table("tbl"), Row(1, 10))
checkAnswer(sql("SELECT i, j FROM tbl WHERE J=10"), Row(1, 10))
checkAnswer(spark.table("tbl").filter($"J" === 10), Row(1, 10))
}
}
}
test("column resolution scenarios with hive table") {
val currentDb = spark.catalog.currentDatabase
withTempDatabase { db1 =>
try {
spark.catalog.setCurrentDatabase(db1)
spark.sql("CREATE TABLE t1(i1 int) STORED AS parquet")
spark.sql("INSERT INTO t1 VALUES(1)")
checkAnswer(spark.sql(s"SELECT $db1.t1.i1 FROM t1"), Row(1))
checkAnswer(spark.sql(s"SELECT $db1.t1.i1 FROM $db1.t1"), Row(1))
checkAnswer(spark.sql(s"SELECT $db1.t1.* FROM $db1.t1"), Row(1))
} finally {
spark.catalog.setCurrentDatabase(currentDb)
}
}
}
test("SPARK-17409: Do Not Optimize Query in CTAS (Hive Serde Table) More Than Once") {
withTable("bar") {
withTempView("foo") {
sql("select 0 as id").createOrReplaceTempView("foo")
// If we optimize the query in CTAS more than once, the following saveAsTable will fail
// with the error: `GROUP BY position 0 is not in select list (valid range is [1, 1])`
sql("SELECT * FROM foo group by id").toDF().write.format("hive").saveAsTable("bar")
checkAnswer(spark.table("bar"), Row(0) :: Nil)
val tableMetadata = spark.sessionState.catalog.getTableMetadata(TableIdentifier("bar"))
assert(tableMetadata.provider == Some("hive"), "the expected table is a Hive serde table")
}
}
}
test("Auto alias construction of get_json_object") {
val df = Seq(("1", """{"f1": "value1", "f5": 5.23}""")).toDF("key", "jstring")
val expectedMsg = "Cannot create a table having a column whose name contains commas " +
"in Hive metastore. Table: `default`.`t`; Column: get_json_object(jstring, $.f1)"
withTable("t") {
val e = intercept[AnalysisException] {
df.select($"key", functions.get_json_object($"jstring", "$.f1"))
.write.format("hive").saveAsTable("t")
}.getMessage
assert(e.contains(expectedMsg))
}
withTempView("tempView") {
withTable("t") {
df.createTempView("tempView")
val e = intercept[AnalysisException] {
sql("CREATE TABLE t AS SELECT key, get_json_object(jstring, '$.f1') FROM tempView")
}.getMessage
assert(e.contains(expectedMsg))
}
}
}
test("SPARK-19912 String literals should be escaped for Hive metastore partition pruning") {
withTable("spark_19912") {
Seq(
(1, "p1", "q1"),
(2, "'", "q2"),
(3, "\\"", "q3"),
(4, "p1\\" and q=\\"q1", "q4")
).toDF("a", "p", "q").write.partitionBy("p", "q").saveAsTable("spark_19912")
val table = spark.table("spark_19912")
checkAnswer(table.filter($"p" === "'").select($"a"), Row(2))
checkAnswer(table.filter($"p" === "\\"").select($"a"), Row(3))
checkAnswer(table.filter($"p" === "p1\\" and q=\\"q1").select($"a"), Row(4))
}
}
test("SPARK-21101 UDTF should override initialize(ObjectInspector[] args)") {
withUserDefinedFunction("udtf_stack1" -> true, "udtf_stack2" -> true) {
sql(
s"""
|CREATE TEMPORARY FUNCTION udtf_stack1
|AS 'org.apache.spark.sql.hive.execution.UDTFStack'
|USING JAR '${hiveContext.getHiveFile("SPARK-21101-1.0.jar").toURI}'
""".stripMargin)
val cnt =
sql("SELECT udtf_stack1(2, 'A', 10, date '2015-01-01', 'B', 20, date '2016-01-01')").count()
assert(cnt === 2)
sql(
s"""
|CREATE TEMPORARY FUNCTION udtf_stack2
|AS 'org.apache.spark.sql.hive.execution.UDTFStack2'
|USING JAR '${hiveContext.getHiveFile("SPARK-21101-1.0.jar").toURI}'
""".stripMargin)
val e = intercept[org.apache.spark.sql.AnalysisException] {
sql("SELECT udtf_stack2(2, 'A', 10, date '2015-01-01', 'B', 20, date '2016-01-01')")
}
assert(
e.getMessage.contains("public StructObjectInspector initialize(ObjectInspector[] args)"))
}
}
test("SPARK-21721: Clear FileSystem deleterOnExit cache if path is successfully removed") {
val table = "test21721"
withTable(table) {
val deleteOnExitField = classOf[FileSystem].getDeclaredField("deleteOnExit")
deleteOnExitField.setAccessible(true)
val fs = FileSystem.get(spark.sessionState.newHadoopConf())
val setOfPath = deleteOnExitField.get(fs).asInstanceOf[Set[Path]]
val testData = sparkContext.parallelize(1 to 10).map(i => TestData(i, i.toString)).toDF()
sql(s"CREATE TABLE $table (key INT, value STRING)")
val pathSizeToDeleteOnExit = setOfPath.size()
(0 to 10).foreach(_ => testData.write.mode(SaveMode.Append).insertInto(table))
assert(setOfPath.size() == pathSizeToDeleteOnExit)
}
}
test("SPARK-21912 ORC/Parquet table should not create invalid column names") {
Seq(" ", ",", ";", "{", "}", "(", ")", "\\n", "\\t", "=").foreach { name =>
Seq("ORC", "PARQUET").foreach { source =>
withTable("t21912") {
val m = intercept[AnalysisException] {
sql(s"CREATE TABLE t21912(`col$name` INT) USING $source")
}.getMessage
assert(m.contains(s"contains invalid character(s)"))
val m2 = intercept[AnalysisException] {
sql(s"CREATE TABLE t21912 USING $source AS SELECT 1 `col$name`")
}.getMessage
assert(m2.contains(s"contains invalid character(s)"))
withSQLConf(HiveUtils.CONVERT_METASTORE_PARQUET.key -> "false") {
val m3 = intercept[AnalysisException] {
sql(s"CREATE TABLE t21912(`col$name` INT) USING hive OPTIONS (fileFormat '$source')")
}.getMessage
assert(m3.contains(s"contains invalid character(s)"))
}
sql(s"CREATE TABLE t21912(`col` INT) USING $source")
val m4 = intercept[AnalysisException] {
sql(s"ALTER TABLE t21912 ADD COLUMNS(`col$name` INT)")
}.getMessage
assert(m4.contains(s"contains invalid character(s)"))
}
}
}
}
Seq("orc", "parquet").foreach { format =>
test(s"SPARK-18355 Read data from a hive table with a new column - $format") {
val client =
spark.sharedState.externalCatalog.unwrapped.asInstanceOf[HiveExternalCatalog].client
Seq("true", "false").foreach { value =>
withSQLConf(
HiveUtils.CONVERT_METASTORE_ORC.key -> value,
HiveUtils.CONVERT_METASTORE_PARQUET.key -> value) {
withTempDatabase { db =>
client.runSqlHive(
s"""
|CREATE TABLE $db.t(
| click_id string,
| search_id string,
| uid bigint)
|PARTITIONED BY (
| ts string,
| hour string)
|STORED AS $format
""".stripMargin)
client.runSqlHive(
s"""
|INSERT INTO TABLE $db.t
|PARTITION (ts = '98765', hour = '01')
|VALUES (12, 2, 12345)
""".stripMargin
)
checkAnswer(
sql(s"SELECT click_id, search_id, uid, ts, hour FROM $db.t"),
Row("12", "2", 12345, "98765", "01"))
client.runSqlHive(s"ALTER TABLE $db.t ADD COLUMNS (dummy string)")
checkAnswer(
sql(s"SELECT click_id, search_id FROM $db.t"),
Row("12", "2"))
checkAnswer(
sql(s"SELECT search_id, click_id FROM $db.t"),
Row("2", "12"))
checkAnswer(
sql(s"SELECT search_id FROM $db.t"),
Row("2"))
checkAnswer(
sql(s"SELECT dummy, click_id FROM $db.t"),
Row(null, "12"))
checkAnswer(
sql(s"SELECT click_id, search_id, uid, dummy, ts, hour FROM $db.t"),
Row("12", "2", 12345, null, "98765", "01"))
}
}
}
}
}
test("SPARK-24085 scalar subquery in partitioning expression") {
Seq("orc", "parquet").foreach { format =>
Seq(true, false).foreach { isConverted =>
withSQLConf(
HiveUtils.CONVERT_METASTORE_ORC.key -> s"$isConverted",
HiveUtils.CONVERT_METASTORE_PARQUET.key -> s"$isConverted",
"hive.exec.dynamic.partition.mode" -> "nonstrict") {
withTable(format) {
withTempPath { tempDir =>
sql(
s"""
|CREATE TABLE ${format} (id_value string)
|PARTITIONED BY (id_type string)
|LOCATION '${tempDir.toURI}'
|STORED AS ${format}
""".stripMargin)
sql(s"insert into $format values ('1','a')")
sql(s"insert into $format values ('2','a')")
sql(s"insert into $format values ('3','b')")
sql(s"insert into $format values ('4','b')")
checkAnswer(
sql(s"SELECT * FROM $format WHERE id_type = (SELECT 'b')"),
Row("3", "b") :: Row("4", "b") :: Nil)
}
}
}
}
}
}
test("SPARK-25271: Hive ctas commands should use data source if it is convertible") {
withTempView("p") {
Seq(1, 2, 3).toDF("id").createOrReplaceTempView("p")
Seq("orc", "parquet").foreach { format =>
Seq(true, false).foreach { isConverted =>
withSQLConf(
HiveUtils.CONVERT_METASTORE_ORC.key -> s"$isConverted",
HiveUtils.CONVERT_METASTORE_PARQUET.key -> s"$isConverted") {
Seq(true, false).foreach { isConvertedCtas =>
withSQLConf(HiveUtils.CONVERT_METASTORE_CTAS.key -> s"$isConvertedCtas") {
val targetTable = "targetTable"
withTable(targetTable) {
val df = sql(s"CREATE TABLE $targetTable STORED AS $format AS SELECT id FROM p")
checkAnswer(sql(s"SELECT id FROM $targetTable"),
Row(1) :: Row(2) :: Row(3) :: Nil)
val ctasDSCommand = df.queryExecution.analyzed.collect {
case _: OptimizedCreateHiveTableAsSelectCommand => true
}.headOption
val ctasCommand = df.queryExecution.analyzed.collect {
case _: CreateHiveTableAsSelectCommand => true
}.headOption
if (isConverted && isConvertedCtas) {
assert(ctasDSCommand.nonEmpty)
assert(ctasCommand.isEmpty)
} else {
assert(ctasDSCommand.isEmpty)
assert(ctasCommand.nonEmpty)
}
}
}
}
}
}
}
}
}
test("SPARK-26181 hasMinMaxStats method of ColumnStatsMap is not correct") {
withSQLConf(SQLConf.CBO_ENABLED.key -> "true") {
withTable("all_null") {
sql("create table all_null (attr1 int, attr2 int)")
sql("insert into all_null values (null, null)")
sql("analyze table all_null compute statistics for columns attr1, attr2")
// check if the stats can be calculated without Cast exception.
sql("select * from all_null where attr1 < 1").queryExecution.stringWithStats
sql("select * from all_null where attr1 < attr2").queryExecution.stringWithStats
}
}
}
test("SPARK-26709: OptimizeMetadataOnlyQuery does not handle empty records correctly") {
Seq(true, false).foreach { enableOptimizeMetadataOnlyQuery =>
withSQLConf(SQLConf.OPTIMIZER_METADATA_ONLY.key -> enableOptimizeMetadataOnlyQuery.toString) {
withTable("t") {
sql("CREATE TABLE t (col1 INT, p1 INT) USING PARQUET PARTITIONED BY (p1)")
sql("INSERT INTO TABLE t PARTITION (p1 = 5) SELECT ID FROM range(1, 1)")
if (enableOptimizeMetadataOnlyQuery) {
// The result is wrong if we enable the configuration.
checkAnswer(sql("SELECT MAX(p1) FROM t"), Row(5))
} else {
checkAnswer(sql("SELECT MAX(p1) FROM t"), Row(null))
}
checkAnswer(sql("SELECT MAX(col1) FROM t"), Row(null))
}
}
}
}
test("SPARK-25158: " +
"Executor accidentally exit because ScriptTransformationWriterThread throw Exception") {
withTempView("test") {
val defaultUncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler
try {
val uncaughtExceptionHandler = new TestUncaughtExceptionHandler
Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler)
// Use a bad udf to generate failed inputs.
val badUDF = org.apache.spark.sql.functions.udf((x: Int) => {
if (x < 1) x
else throw new RuntimeException("Failed to produce data.")
})
spark
.range(5)
.select(badUDF('id).as("a"))
.createOrReplaceTempView("test")
val scriptFilePath = getTestResourcePath("data")
val e = intercept[SparkException] {
sql(
s"""FROM test SELECT TRANSFORM(a)
|USING 'python $scriptFilePath/scripts/test_transform.py "\\t"'
""".stripMargin).collect()
}
assert(e.getMessage.contains("Failed to produce data."))
assert(uncaughtExceptionHandler.exception.isEmpty)
} finally {
Thread.setDefaultUncaughtExceptionHandler(defaultUncaughtExceptionHandler)
}
}
}
}
| WindCanDie/spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala | Scala | apache-2.0 | 89,745 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.retriever
import uk.gov.hmrc.ct.box.retriever.BoxRetriever
import uk.gov.hmrc.ct.{CATO10, CATO11, CATO12}
trait DeclarationBoxRetriever extends BoxRetriever {
def cato10(): CATO10
def cato11(): CATO11
def cato12(): CATO12
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/retriever/DeclarationBoxRetriever.scala | Scala | apache-2.0 | 869 |
/*
* Copyright 2011 TomTom International BV
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tomtom.dps.mavenizer.dependency
import org.scalatest.junit.JUnitRunner
import org.junit.runner.RunWith
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.Spec
import java.io.{FileOutputStream, File}
import org.scalatest.prop.PropertyChecks
@RunWith(classOf[JUnitRunner])
class ClassExtractorSpec extends Spec with ShouldMatchers with PropertyChecks {
val tmpFile: File = createTmpFile
val extractor = new ClassExtractor(tmpFile)
describe("ClassExtractor") {
describe("a well-known jar file should contain a well known class") {
extractor.listProperClasses.contains("com.sun.mail.handlers.multipart_mixed") should equal (true)
}
describe("a random sample should be as expected") {
forAll {
(count: Int) =>
whenever(count >= 0 && count <= extractor.listProperClasses.size) {
extractor.random(count).size should equal (count)
}
}
}
}
def createTmpFile: File = {
val tmpFile = File.createTempFile("ClassExtratorSpec", "tmp")
val output = new FileOutputStream(tmpFile)
val input = getClass.getResourceAsStream("/mail.jar")
var done = false
while (!done) {
val byte = input.read
if (byte == -1) {
done = true
} else {
output.write(byte)
}
}
output.close()
input.close()
tmpFile
}
type ? = this.type
} | ebowman/mavenizer | src/test/scala/com/tomtom/dps/mavenizer/dependency/ClassExtractorSpec.scala | Scala | apache-2.0 | 1,998 |
/*
* +1>> This source code is licensed as GPLv3 if not stated otherwise.
* >> NO responsibility taken for ANY harm, damage done
* >> to you, your data, animals, etc.
* >>
* +2>>
* >> Last modified: 4/27/14 10:09 AM
* >> Origin: phantasmatron :: Util.scala
* >>
* +3>>
* >> Copyright (c) 2014:
* >>
* >> | | |
* >> | ,---.,---|,---.|---.
* >> | | || |`---.| |
* >> `---'`---'`---'`---'`---'
* >> // Niklas Klügel
* >>
* +4>>
* >> Made in Bavaria by fat little elves - since 1983.
*/
package org.lodsb.phantasmatron.ui
import java.nio.ByteBuffer
import scalafx.scene.paint.Color
/**
* Created by lodsb on 4/12/14.
*/
object Util {
def typeString2Color(s: String) : Color = {
val hash = hashString(s);
val doubleBytes = ByteBuffer.allocate(4).putInt(hash).array().map( x => (x.abs % 16)*16)
Color.rgb(doubleBytes(0),doubleBytes(1),doubleBytes(2));
}
private def hashString(s: String) : Int = {
val prime = 17
val prime2= 31
var hash = prime
for(i <- 0 to s.size-1) {
hash = hash * prime2 + s.charAt(i)
}
println(hash)
hash
}
def toRgba(c: Color, a: Double): String = {
"rgba(" + (to255Int(c.red)) + "," + (to255Int(c.green)) + "," + (to255Int(c.blue)) + "," + a.toString + ")"
}
def to255Int(d: Double): Int = {
(d * 255).toInt
}
def gradient(c: Color): String = {
" -fx-background-color : radial-gradient(center 50% 25%,\\n" +
" radius 75%,\\n" +
" " + toRgba(c, 0.8) + " 0%,\\n" +
" rgba(82,82,82,0.9) 100%);"
}
}
| lodsb/phantasmatron | src/main/scala/org/lodsb/phantasmatron/ui/Util.scala | Scala | gpl-2.0 | 1,701 |
package scoverage.report
import java.io.File
import java.util.Date
import scoverage._
import scala.xml.Node
/** @author Stephen Samuel */
class ScoverageHtmlWriter(sourceDirectories: Seq[File], outputDir: File) extends BaseReportWriter(sourceDirectories, outputDir) {
def this (sourceDirectory: File, outputDir: File) {
this(Seq(sourceDirectory), outputDir);
}
def write(coverage: Coverage): Unit = {
val indexFile = new File(outputDir.getAbsolutePath + "/index.html")
val packageFile = new File(outputDir.getAbsolutePath + "/packages.html")
val overviewFile = new File(outputDir.getAbsolutePath + "/overview.html")
val index = IOUtils.readStreamAsString(getClass.getResourceAsStream("/scoverage/index.html"))
IOUtils.writeToFile(indexFile, index)
IOUtils.writeToFile(packageFile, packageList(coverage).toString())
IOUtils.writeToFile(overviewFile, overview(coverage).toString())
coverage.packages.foreach(writePackage)
}
private def writePackage(pkg: MeasuredPackage): Unit = {
// package overview files are written out using a filename that respects the package name
// that means package com.example declared in a class at src/main/scala/mystuff/MyClass.scala will be written
// to com.example.html
val file = new File(outputDir, packageOverviewRelativePath(pkg))
file.getParentFile.mkdirs()
IOUtils.writeToFile(file, packageOverview(pkg).toString())
pkg.files.foreach(writeFile)
}
private def writeFile(mfile: MeasuredFile): Unit = {
// each highlighted file is written out using the same structure as the original file.
val file = new File(outputDir, relativeSource(mfile.source) + ".html")
file.getParentFile.mkdirs()
IOUtils.writeToFile(file, filePage(mfile).toString())
}
private def packageOverviewRelativePath(pkg: MeasuredPackage) = pkg.name.replace("<empty>", "(empty)") + ".html"
private def filePage(mfile: MeasuredFile): Node = {
val filename = relativeSource(mfile.source) + ".html"
val css =
"table.codegrid { font-family: monospace; font-size: 12px; width: auto!important; }" +
"table.statementlist { width: auto!important; font-size: 13px; } " +
"table.codegrid td { padding: 0!important; border: 0!important } " +
"table td.linenumber { width: 40px!important; } "
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
<title id='title'>
{filename}
</title>
{plugins}
<style>
{css}
</style>
</head>
<body style="font-family: monospace;">
<ul class="nav nav-tabs">
<li>
<a href="#codegrid" data-toggle="tab">Codegrid</a>
</li>
<li>
<a href="#statementlist" data-toggle="tab">Statement List</a>
</li>
</ul>
<div class="tab-content">
<div class="tab-pane active" id="codegrid">
{xml.Unparsed(new CodeGrid(mfile).highlighted)}
</div>
<div class="tab-pane" id="statementlist">
{new StatementWriter(mfile).output}
</div>
</div>
</body>
</html>
}
def header = {
val css = """.meter {
| height: 14px;
| position: relative;
| background: #BB2020;
|}
|
|.meter span {
| display: block;
| height: 100%;
| background-color: rgb(43,194,83);
| background-image: -webkit-gradient(
| linear,
| left bottom,
| left top,
| color-stop(0, rgb(43,194,83)),
| color-stop(1, rgb(84,240,84))
| );
| background-image: -webkit-linear-gradient(
| center bottom,
| rgb(43,194,83) 37%,
| rgb(84,240,84) 69%
| );
| background-image: -moz-linear-gradient(
| center bottom,
| rgb(43,194,83) 37%,
| rgb(84,240,84) 69%
| );
| background-image: -ms-linear-gradient(
| center bottom,
| rgb(43,194,83) 37%,
| rgb(84,240,84) 69%
| );
| background-image: -o-linear-gradient(
| center bottom,
| rgb(43,194,83) 37%,
| rgb(84,240,84) 69%
| );
| -webkit-box-shadow:
| inset 0 2px 9px rgba(255,255,255,0.3),
| inset 0 -2px 6px rgba(0,0,0,0.4);
| -moz-box-shadow:
| inset 0 2px 9px rgba(255,255,255,0.3),
| inset 0 -2px 6px rgba(0,0,0,0.4);
| position: relative;
| overflow: hidden;
|}""".stripMargin
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
<title id='title'>Scoverage Code Coverage</title>
{plugins}
<style>
{css}
</style>
</head>
}
def packageOverview(pack: MeasuredPackage): Node = {
<html>
{header}<body style="font-family: monospace;">
{classesTable(pack.classes, addPath = false)}
</body>
</html>
}
def classesTable(classes: Iterable[MeasuredClass], addPath: Boolean): Node = {
<table class="tablesorter table table-striped" style="font-size:13px">
<thead>
<tr>
<th>
Class
</th>
<th>
Source file
</th>
<th>
Lines
</th>
<th>
Methods
</th>
<th>
Statements
</th>
<th>
Invoked
</th>
<th>
Coverage
</th>
<th>
</th>
<th>
Branches
</th>
<th>
Invoked
</th>
<th>
Coverage
</th>
<th>
</th>
</tr>
</thead>
<tbody>
{classes.toSeq.sortBy(_.simpleName) map classRow}
</tbody>
</table>
}
def classRow(klass: MeasuredClass): Node = {
val filename: String = {
val fileRelativeToSource = new File(relativeSource(klass.source) + ".html")
val path = fileRelativeToSource.getParent
val value = fileRelativeToSource.getName
if (path.ne("")) {
// (Normalise the pathSeparator to "/" in case we are running on Windows)
fileRelativeToSource.toString.replace(File.separator, "/")
} else {
value
}
}
val statement0f = Math.round(klass.statementCoveragePercent).toInt.toString
val branch0f = Math.round(klass.branchCoveragePercent).toInt.toString
val simpleClassName = klass.name.split('.').last
<tr>
<td>
<a href={filename}>
{simpleClassName}
</a>
</td>
<td>
{klass.statements.headOption.map(_.source.split(File.separatorChar).last).getOrElse("")}
</td>
<td>
{klass.loc.toString}
</td>
<td>
{klass.methodCount.toString}
</td>
<td>
{klass.statementCount.toString}
</td>
<td>
{klass.invokedStatementCount.toString}
</td>
<td>
<div class="meter">
<span style={s"width: $statement0f%"}></span>
</div>
</td>
<td>
{klass.statementCoverageFormatted}
%
</td>
<td>
{klass.branchCount.toString}
</td>
<td>
{klass.invokedBranchesCount.toString}
</td>
<td>
<div class="meter">
<span style={s"width: $branch0f%"}></span>
</div>
</td>
<td>
{klass.branchCoverageFormatted}
%
</td>
</tr>
}
def packageList(coverage: Coverage): Node = {
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
<title id='title'>
Scoverage Code Coverage
</title>
{plugins}
</head>
<body style="font-family: monospace;">
<table class="tablesorter table table-striped" style="font-size: 13px">
<thead>
<tr>
<td>
<a href="overview.html" target="mainFrame">All packages</a>
</td>
<td>{coverage.statementCoverageFormatted}%</td>
</tr>
</thead>
<tbody>
{coverage.packages.map(arg =>
<tr>
<td>
<a href={packageOverviewRelativePath(arg)} target="mainFrame">{arg.name}</a>
</td>
<td>{arg.statementCoverageFormatted}%</td>
</tr>
)}
</tbody>
</table>
</body>
</html>
}
def risks(coverage: Coverage, limit: Int) = {
<table class="tablesorter table table-striped" style="font-size: 12px">
<thead>
<tr>
<th>
Class
</th>
<th>
Lines
</th>
<th>
Methods
</th>
<th>
Statements
</th>
<th>
Statement Rate
</th>
<th>
Branches
</th>
<th>
Branch Rate
</th>
</tr>
</thead>
<tbody>
{coverage.risks(limit).map(klass =>
<tr>
<td>
{klass.simpleName}
</td>
<td>
{klass.loc.toString}
</td>
<td>
{klass.methodCount.toString}
</td>
<td>
{klass.statementCount.toString}
</td>
<td>
{klass.statementCoverageFormatted}
%
</td>
<td>
{klass.branchCount.toString}
</td>
<td>
{klass.branchCoverageFormatted}
%
</td>
</tr>)}
</tbody>
</table>
}
def packages2(coverage: Coverage) = {
val rows = coverage.packages.map(arg => {
<tr>
<td>
{arg.name}
</td>
<td>
{arg.invokedClasses.toString}
/
{arg.classCount}
(
{arg.classCoverage.toString}
%)
</td>
<td>
{arg.invokedStatements.toString()}
/
{arg.statementCount}
(
{arg.statementCoverageFormatted}
%)
</td>
</tr>
})
<table>
{rows}
</table>
}
def overview(coverage: Coverage): Node = {
<html>
{header}<body style="font-family: monospace;">
<div class="alert alert-info">
<b>
SCoverage
</b>
generated at
{new Date().toString}
</div>
<div class="overview">
<div class="stats">
{stats(coverage)}
</div>
<div>
{classesTable(coverage.classes, addPath = true)}
</div>
</div>
</body>
</html>
}
def stats(coverage: Coverage): Node = {
val statement0f = Math.round(coverage.statementCoveragePercent).toInt.toString
val branch0f = Math.round(coverage.branchCoveragePercent).toInt.toString
<table class="table">
<tr>
<td>
Lines of code:
</td>
<td>
{coverage.loc.toString}
</td>
<td>
Files:
</td>
<td>
{coverage.fileCount.toString}
</td>
<td>
Classes:
</td>
<td>
{coverage.classCount.toString}
</td>
<td>
Methods:
</td>
<td>
{coverage.methodCount.toString}
</td>
</tr>
<tr>
<td>
Lines per file:
</td>
<td>
{coverage.linesPerFileFormatted}
</td>
<td>
Packages:
</td>
<td>
{coverage.packageCount.toString}
</td>
<td>
Classes per package:
</td>
<td>
{coverage.avgClassesPerPackageFormatted}
</td>
<td>
Methods per class:
</td>
<td>
{coverage.avgMethodsPerClassFormatted}
</td>
</tr>
<tr>
<td>
Total statements:
</td>
<td>
{coverage.statementCount.toString}
</td>
<td>
Invoked statements:
</td>
<td>
{coverage.invokedStatementCount.toString}
</td>
<td>
Total branches:
</td>
<td>
{coverage.branchCount.toString}
</td>
<td>
Invoked branches:
</td>
<td>
{coverage.invokedBranchesCount.toString}
</td>
</tr>
<tr>
<td>
Ignored statements:
</td>
<td>
{coverage.ignoredStatementCount.toString}
</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>
Statement coverage:
</td>
<td>
{coverage.statementCoverageFormatted}
%
</td>
<td colspan="2">
<div class="meter">
<span style={s"width: $statement0f%"}></span>
</div>
</td>
<td>
Branch coverage:
</td>
<td>
{coverage.branchCoverageFormatted}
%
</td>
<td colspan="2">
<div class="meter">
<span style={s"width: $branch0f%"}></span>
</div>
</td>
</tr>
</table>
}
def plugins = {
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.20.1/css/theme.default.min.css" type="text/css"/>
<script src="http://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.20.1/js/jquery.tablesorter.min.js"></script>
<link rel="stylesheet" href="http://netdna.bootstrapcdn.com/bootstrap/3.0.3/css/bootstrap.min.css" type="text/css"/>
<script src="http://netdna.bootstrapcdn.com/bootstrap/3.0.3/js/bootstrap.min.js"></script>
<script type="text/javascript">
{xml.Unparsed("""$(document).ready(function() {$(".tablesorter").tablesorter();});""")}
</script>
}
}
| xudongzheng1225/scalac-scoverage-plugin | scalac-scoverage-plugin/src/main/scala/scoverage/report/ScoverageHtmlWriter.scala | Scala | apache-2.0 | 14,662 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail.internal
import cats.effect.Sync
import cats.syntax.all._
import monix.tail.Iterant
import monix.tail.Iterant.{Concat, Halt, Last, Next, NextBatch, NextCursor, Scope, Suspend}
private[tail] object IterantDropWhileWithIndex {
/**
* Implementation for `Iterant#dropWhileWithIndex`
*/
def apply[F[_], A](source: Iterant[F, A], p: (A, Int) => Boolean)
(implicit F: Sync[F]): Iterant[F, A] = {
Suspend(F.delay(new Loop(p).apply(source)))
}
private class Loop[F[_], A](p: (A, Int) => Boolean)
(implicit F: Sync[F])
extends Iterant.Visitor[F, A, Iterant[F, A]] { loop =>
private[this] var index = 0
private[this] var dropFinished = false
private def getAndIncrement(): Int = {
val old = index
index += 1
old
}
def visit(ref: Next[F, A]): Iterant[F, A] =
if (dropFinished) ref else {
val item = ref.item
if (p(item, getAndIncrement()))
Suspend(ref.rest.map(this))
else {
dropFinished = true
ref
}
}
def visit(ref: NextBatch[F, A]): Iterant[F, A] =
if (dropFinished) ref
else visit(ref.toNextCursor())
def visit(ref: NextCursor[F, A]): Iterant[F, A] =
if (dropFinished) ref else {
val cursor = ref.cursor
var keepDropping = true
var item: A = null.asInstanceOf[A]
while (keepDropping && cursor.hasNext()) {
item = cursor.next()
keepDropping = p(item, getAndIncrement())
}
if (keepDropping)
Suspend(ref.rest.map(this))
else {
dropFinished = true
if (cursor.hasNext())
Next(item, F.pure(ref))
else
Next(item, ref.rest)
}
}
def visit(ref: Suspend[F, A]): Iterant[F, A] =
if (dropFinished) ref else Suspend(ref.rest.map(this))
def visit(ref: Concat[F, A]): Iterant[F, A] =
if (dropFinished) ref else ref.runMap(this)
def visit[S](ref: Scope[F, S, A]): Iterant[F, A] =
if (dropFinished) ref else ref.runMap(this)
def visit(ref: Last[F, A]): Iterant[F, A] =
if (!dropFinished && p(ref.item, getAndIncrement()))
Halt(None)
else {
dropFinished = true
ref
}
def visit(ref: Halt[F, A]): Iterant[F, A] =
ref
def fail(e: Throwable): Iterant[F, A] =
Iterant.raiseError(e)
}
} | Wogan/monix | monix-tail/shared/src/main/scala/monix/tail/internal/IterantDropWhileWithIndex.scala | Scala | apache-2.0 | 3,085 |
package definiti.core.end2end.controls.naming
import definiti.common.ast.Root
import definiti.common.program.Ko
import definiti.common.tests.{ConfigurationMock, LocationPath}
import definiti.core.ProgramResultMatchers.{beResult, ok}
import definiti.core.end2end.EndToEndSpec
import definiti.core.validation.controls.naming.TypeLowerCamelCaseControl
class TypeLowerCamelCaseControlSpec extends EndToEndSpec {
import TypeLowerCamelCaseControlSpec._
"Project.generatePublicAST" should "validate a type with a valid lowerCamelCame format" in {
val output = processFile("controls.naming.typeLowerCamelCase.valid", configuration)
output shouldBe ok[Root]
}
it should "invalidate a type with an invalid lowerCamelCame format" in {
val output = processFile("controls.naming.typeLowerCamelCase.invalid", configuration)
output should beResult(Ko[Root](
TypeLowerCamelCaseControl.invalidLowerCamelCaseFormat("MyAlias", invalidLocation(1, 1, 22)),
TypeLowerCamelCaseControl.invalidLowerCamelCaseFormat("MyDefined", invalidLocation(3, 1, 5, 2)),
TypeLowerCamelCaseControl.invalidLowerCamelCaseFormat("MyString", invalidLocation(4, 3, 27)),
TypeLowerCamelCaseControl.invalidLowerCamelCaseFormat("MyEnum", invalidLocation(7, 1, 10, 2))
))
}
}
object TypeLowerCamelCaseControlSpec {
val configuration = ConfigurationMock().withOnlyControls(TypeLowerCamelCaseControl)
val invalidLocation = LocationPath.controlNaming(TypeLowerCamelCaseControl, "invalid")
} | definiti/definiti-core | src/test/scala/definiti/core/end2end/controls/naming/TypeLowerCamelCaseControlSpec.scala | Scala | mit | 1,504 |
package se.apogo.kdom
trait Terrain {
}
case object Castle extends Terrain
case object Water extends Terrain
case object Forest extends Terrain
case object Field extends Terrain
case object Mine extends Terrain
case object Pasture extends Terrain
case object Clay extends Terrain
| mratin/kdom | src/main/scala/se/apogo/kdom/Terrain.scala | Scala | mit | 283 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs.storage.common.s3
import java.io.IOException
import com.amazonaws.services.s3.AmazonS3
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.locationtech.geomesa.fs.storage.common.observer.{FileSystemObserver, FileSystemObserverFactory}
import org.opengis.feature.simple.SimpleFeatureType
/**
* Factory for S3VisibilityObserver
*/
class S3VisibilityObserverFactory extends FileSystemObserverFactory {
private var fs: FileSystem = _
private var s3: AmazonS3 = _
private var tag: String = _
override def init(conf: Configuration, root: Path, sft: SimpleFeatureType): Unit = {
try {
// use reflection to access to private client factory used by the s3a hadoop impl
fs = root.getFileSystem(conf)
val field = fs.getClass.getDeclaredField("s3")
field.setAccessible(true)
s3 = field.get(fs).asInstanceOf[AmazonS3]
tag = conf.get(S3VisibilityObserverFactory.TagNameConfig, S3VisibilityObserverFactory.DefaultTag)
} catch {
case e: Exception => throw new RuntimeException("Unable to get s3 client", e)
}
}
override def apply(path: Path): FileSystemObserver = new S3VisibilityObserver(s3, path, tag)
override def close(): Unit = {
s3 = null
if (fs != null) {
try {
fs.close()
} catch {
case e: Exception => throw new IOException("Error closing S3 filesystem", e)
} finally {
fs = null
}
}
}
}
object S3VisibilityObserverFactory {
val TagNameConfig = "geomesa.fs.vis.tag"
val DefaultTag = "geomesa.file.visibility"
}
| locationtech/geomesa | geomesa-fs/geomesa-fs-storage/geomesa-fs-storage-common/src/main/scala/org/locationtech/geomesa/fs/storage/common/s3/S3VisibilityObserverFactory.scala | Scala | apache-2.0 | 2,099 |
package smala
import org.scalatest.FunSuite
import org.scalatest.Matchers
import spire.math._
import spire.implicits.{eqOps => _, _}
import java.math.MathContext
class RealTreeTest extends FunSuite with Matchers {
test("parse sin(3.03) + 1/8 and verify value") {
val v = RealTreeEvaluator.value(RealTree.parse("sin(3.03) + 1/8").get)
v === sin(Real("3.03")) + 1/Real(8) shouldBe true
}
test("pretty print 3+(3*4) removes parentheses") {
RealTree.print(RealTree.parse("3+(3*4)").get) shouldBe "3 + 3 * 4"
}
test("3+(3*4) simplifies to 15") {
RealTree.print(RealTree.simplified(RealTree.parse("3+(3*4)").get)) shouldBe "15"
}
test("3+(3.*4) does not simplify") {
RealTree.print(RealTree.simplified(RealTree.parse("3+(3.*4)").get)) shouldBe "3 + 3. * 4"
}
}
| denisrosset/smala | src/test/scala/smala/RealTreeTest.scala | Scala | mit | 800 |
package circumflex
package orm
import java.sql._
/*!# Vendor-specific SQL dialects
Following vendors are currently supported by Circumflex ORM:
* PostgreSQL 8.3+;
* MySQL 5.7+;
* H2 database.
We also provide limited support for `Oracle`, `MS SQL Server` and `DB2`.
We would appreciate any commits for better vendors support.
*/
class H2Dialect extends Dialect {
override def driverClass = "org.h2.Driver"
override def textType = "VARCHAR"
override def createIndex(idx: Index): String = {
var result = "CREATE "
if (idx.isUnique) result += "UNIQUE "
result += "INDEX " + idx.name + " ON " + idx.relation.qualifiedName +
" (" + idx.expression + ")"
if (idx.whereClause != EmptyPredicate)
result += " WHERE " + idx.whereClause.toInlineSql
result
}
override def dropSchema(schema: Schema) = "DROP SCHEMA " + schema.name
override def RANDOM = "RAND()"
}
class PostgreSQLDialect extends Dialect {
override def driverClass = "org.postgresql.Driver"
override def timestampType = "TIMESTAMPTZ"
override def parameterizedIn(ex: String, params: Iterable[String]) = {
if (params.size == 0) "FALSE"
else ex + " IN (" + params.mkString(", ") + ")"
}
}
class MySQLDialect extends Dialect {
override def supportsSchema = false
override def driverClass = "com.mysql.jdbc.Driver"
override def initializeField[R <: Record[_, R]](field: Field[_, R]) {
// do nothing -- for MySQL you don't need to create manually a sequence for auto-incrementable fields
}
override def defaultExpression[R <: Record[_, R]](field: Field[_, R]): String =
field match {
case a: AutoIncrementable[_, _] if (a.isAutoIncrement) =>" AUTO_INCREMENT"
case _ => field.defaultExpression.map(" DEFAULT " + _).getOrElse("")
}
override def identityLastIdPredicate[PK, R <: Record[PK, R]]
(node: RelationNode[PK, R]): Predicate =
new SimpleExpression(node.alias + "." + node.relation.PRIMARY_KEY.name + " = LAST_INSERT_ID()", Nil)
override def identityLastIdQuery[PK, R <: Record[PK, R]]
(node: RelationNode[PK, R]): SQLQuery[PK] =
new Select(expr[PK]("LAST_INSERT_ID()"))
override def sequenceNextValQuery[PK, R <: Record[PK, R]]
(node: RelationNode[PK, R]): SQLQuery[PK] =
throw new UnsupportedOperationException("This operation is unsupported in MySQL dialect.")
override def createIndex(idx: Index): String = {
var result = "CREATE "
if (idx.isUnique) result += "UNIQUE "
result += "INDEX " + idx.name + " USING " + idx.usingClause +
" ON " + idx.relation.qualifiedName + " (" + idx.expression + ")"
if (idx.whereClause != EmptyPredicate)
ORM_LOG.warn("Ignoring WHERE clause of INDEX " + idx.name +
": predicates are not supported.")
result
}
override def delete[PK, R <: Record[PK, R]]
(dml: Delete[PK, R]): String = {
var result = "DELETE " + dml.node.alias + " FROM " + dml.node.toSql
if (dml.whereClause != EmptyPredicate) result += " WHERE " + dml.whereClause.toSql
result
}
override def RANDOM = "RAND()"
}
class OracleDialect extends Dialect {
override def driverClass = "oracle.jdbc.driver.OracleDriver"
override def fkNoAction = "SET_NULL"
override def fkRestrict = "SET NULL"
override def fkSetDefault = "SET NULL"
override def numericType(precision: Int, scale: Int): String =
"NUMBER" + (if (precision == -1) "" else "(" + precision + "," + scale + ")")
override def textType = "VARCHAR2(4000)"
override def varcharType(length: Int): String =
"VARCHAR2" + (if (length == -1) "" else "(" + length + ")")
override def booleanType = "NUMBER(1)"
override def supportsSchema = false
override def createIndex(idx: Index): String = {
var result = "CREATE "
if (idx.isUnique) result += "UNIQUE "
result += "INDEX " + idx.name +
" ON " + idx.relation.qualifiedName + " (" + idx.expression + ")"
if (idx.whereClause != EmptyPredicate)
ORM_LOG.warn("Ignoring WHERE clause of INDEX " + idx.name +
": predicates are not supported.")
result
}
override def alias(expression: String, alias: String) =
expression + " " + alias
override def defaultExpression[R <: Record[_, R]]
(field: Field[_, R]): String =
field.defaultExpression.map(" DEFAULT " + _).getOrElse("")
override def sequenceNextValQuery[PK, R <: Record[PK, R]]
(node: RelationNode[PK, R]): SQLQuery[PK] =
new Select(expr[PK](sequenceName(node.relation.PRIMARY_KEY) + ".nextval FROM dual"))
override def identityLastIdPredicate[PK, R <: Record[PK, R]]
(node: RelationNode[PK, R]): Predicate =
throw new UnsupportedOperationException(
"This operation is unsupported in Oracle dialect.")
override def identityLastIdQuery[PK, R <: Record[PK, R]]
(node: RelationNode[PK, R]): SQLQuery[PK] =
throw new UnsupportedOperationException(
"This operation is unsupported in Oracle dialect.")
override def prepareStatement(conn: Connection, sql: String): PreparedStatement =
conn.prepareStatement(sql, ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE)
override def NOW = "CURRENT_TIMESTAMP"
override def RANDOM = "DBMS_RANDOM.VALUE"
}
class DB2Dialect extends Dialect {
override def driverClass = "com.ibm.db2.jcc.DB2Driver"
override def prepareStatement(conn: Connection, sql: String): PreparedStatement =
conn.prepareStatement(sql, ResultSet.TYPE_SCROLL_INSENSITIVE)
override def textType = "VARCHAR(4000)"
override def booleanType = "SMALLINT"
override def defaultExpression[R <: Record[_, R]](field: Field[_, R]): String =
field match {
case a: AutoIncrementable[_, _] if (a.isAutoIncrement) =>
" GENERATED BY DEFAULT AS IDENTITY"
case _ => field.defaultExpression.map(" DEFAULT " + _).getOrElse("")
}
override def sequenceNextValQuery[PK, R <: Record[PK, R]]
(node: RelationNode[PK, R]): SQLQuery[PK] =
new NativeSQLQuery[PK](
expr[PK]("NEXTVAL FOR " + sequenceName(node.relation.PRIMARY_KEY)),
prepareExpr("SELECT {*} FROM SYSIBM.SYSDUMMY1"))
override def identityLastIdPredicate[PK, R <: Record[PK, R]]
(node: RelationNode[PK, R]): Predicate =
new SimpleExpression(node.alias + "." + node.relation.PRIMARY_KEY.name +
" = IDENTITY_VAL_LOCAL()", Nil)
override def identityLastIdQuery[PK, R <: Record[PK, R]]
(node: RelationNode[PK, R]): SQLQuery[PK] =
new NativeSQLQuery[PK](expr[PK]("IDENTITY_VAL_LOCAL()"),
prepareExpr("SELECT {*} FROM SYSIBM.SYSDUMMY1"))
override def createIndex(idx: Index): String = {
var result = "CREATE "
if (idx.isUnique) result += "UNIQUE "
result += "INDEX " + idx.name +
" ON " + idx.relation.qualifiedName + " (" + idx.expression + ")"
if (idx.whereClause != EmptyPredicate)
ORM_LOG.warn("Ignoring WHERE clause of INDEX " + idx.name +
": predicates are not supported.")
result
}
override def dropSchema(schema: Schema) =
"DROP SCHEMA " + schema.name + " RESTRICT"
override def insert[PK, R <: Record[PK, R]](dml: Insert[PK, R]): String = {
var result = "INSERT INTO " + dml.relation.qualifiedName
if (dml.fields.size > 0)
result += " (" + dml.fields.map(_.name).mkString(", ") +
") VALUES (" + dml.fields.map(_.placeholder).mkString(", ") + ")"
else result += " VALUES DEFAULT"
result
}
/*!## Common functions */
override def NOW = "CURRENT TIMESTAMP"
override def RANDOM = "RAND()"
}
class MSSQLDialect extends Dialect {
override def driverClass = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
override def booleanType = "BIT"
override def supportsSchema = false
override def defaultExpression[R <: Record[_, R]](field: Field[_, R]): String =
field match {
case a: AutoIncrementable[_, _] if (a.isAutoIncrement) =>" IDENTITY"
case _ => field.defaultExpression.map(" DEFAULT " + _).getOrElse("")
}
override def columnDefinition[R <: Record[_, R]](field: Field[_, R]): String = {
var result = field.name + " " + field.sqlType
result += defaultExpression(field)
if (field.isNotNull) result += " NOT NULL"
result
}
override def createIndex(idx: Index): String = {
var result = "CREATE "
if (idx.isUnique) result += "UNIQUE "
result += "INDEX " + idx.name + " ON " + idx.relation.qualifiedName +
" (" + idx.expression + ")"
if (idx.whereClause != EmptyPredicate)
result += " WHERE " + idx.whereClause.toInlineSql
result
}
override def sequenceNextValQuery[PK, R <: Record[PK, R]]
(node: RelationNode[PK, R]): SQLQuery[PK] =
throw new UnsupportedOperationException(
"This operation is unsupported in MS SQL Server dialect.")
override def identityLastIdPredicate[PK, R <: Record[PK, R]]
(node: RelationNode[PK, R]): Predicate =
new SimpleExpression(node.alias + "." + node.relation.PRIMARY_KEY.name +
" = @@IDENTITY", Nil)
override def identityLastIdQuery[PK, R <: Record[PK, R]]
(node: RelationNode[PK, R]): SQLQuery[PK] =
new Select(expr[PK]("@@IDENTITY"))
/*!## Common functions */
override def NOW = "CURRENT_TIMESTAMP"
override def RANDOM = "RAND()"
}
| inca/circumflex | orm/src/main/scala/vendor.scala | Scala | bsd-2-clause | 9,208 |
package debug
trait GenericTrait[C]
class GenericClass[A, C] extends GenericTrait[C] {
def foo[B](a: A, b: B) = {
// breakpoint here, keep in sync with TestValues
val bp = 1
}
}
object Generics extends App {
new GenericClass[Int, String]().foo[String](1, "ala")
} | Kwestor/scala-ide | org.scala-ide.sdt.debug.expression.tests/test-workspace/expr-eval-generics/src/debug/Generics.scala | Scala | bsd-3-clause | 281 |
package com.softwaremill.codebrag.dao.repo
import com.softwaremill.codebrag.test.FlatSpecWithSQL
import org.scalatest.matchers.ShouldMatchers
import com.softwaremill.codebrag.domain.UserRepoDetails
import com.softwaremill.codebrag.domain.builder.UserAssembler
import com.softwaremill.codebrag.common.ClockSpec
class SQLUserRepoDetailsDAOSpec extends FlatSpecWithSQL with ShouldMatchers with ClockSpec {
val contextDao = new SQLUserRepoDetailsDAO(sqlDatabase)
val Bob = UserAssembler.randomUser.get
val Alice = UserAssembler.randomUser.get
it should "save context when one doesn't exist for user and repo" in {
// given
val codebragContext = UserRepoDetails(Bob.id, "codebrag", "master", clock.nowUtc)
val bootzookaDefaultContext = UserRepoDetails(Bob.id, "bootzooka", "feature", clock.nowUtc, default = true)
// when
contextDao.save(codebragContext)
contextDao.save(bootzookaDefaultContext)
// then
val Some(codebrag) = contextDao.find(Bob.id, "codebrag")
val Some(bootzooka) = contextDao.find(Bob.id, "bootzooka")
codebrag should be(codebragContext)
bootzooka should be(bootzookaDefaultContext)
}
it should "make new context default when saved with default = true" in {
// given
val defaultContext = UserRepoDetails(Bob.id, "bootzooka", "feature", clock.nowUtc, default = true)
contextDao.save(defaultContext)
// when
val newDefaultContext = UserRepoDetails(Bob.id, "codebrag", "master", clock.nowUtc, default = true)
contextDao.save(newDefaultContext)
// then
val Some(oldDefault) = contextDao.find(Bob.id, "bootzooka")
val Some(newDefault) = contextDao.find(Bob.id, "codebrag")
oldDefault.default should be(false)
newDefault.default should be(true)
}
it should "find default context for user" in {
// given
val nonDefaultContext = UserRepoDetails(Bob.id, "codebrag", "bugfix", clock.nowUtc)
contextDao.save(nonDefaultContext)
val defaultContext = UserRepoDetails(Bob.id, "bootzooka", "feature", clock.nowUtc, default = true)
contextDao.save(defaultContext)
// when
val Some(result) = contextDao.findDefault(Bob.id)
// then
result should be(defaultContext)
}
it should "save separate contexts for different user" in {
// given
val bobContext = UserRepoDetails(Bob.id, "bootzooka", "feature", clock.nowUtc, default = true)
val aliceContext = UserRepoDetails(Alice.id, "bootzooka", "master", clock.nowUtc, default = true)
contextDao.save(bobContext)
contextDao.save(aliceContext)
// when
val Some(bobResult) = contextDao.findDefault(Bob.id)
val Some(aliceResult) = contextDao.findDefault(Alice.id)
// then
bobResult should be(bobContext)
aliceResult should be(aliceContext)
}
it should "update context for user when context exists" in {
// given
val context = UserRepoDetails(Bob.id, "bootzooka", "feature", clock.nowUtc, default = true)
contextDao.save(context)
// when
val updatedContext = context.copy(branchName = "bugfix")
contextDao.save(updatedContext)
// then
val Some(result) = contextDao.find(Bob.id, "bootzooka")
result should be(updatedContext)
}
}
| softwaremill/codebrag | codebrag-dao/src/test/scala/com/softwaremill/codebrag/dao/repo/SQLUserRepoDetailsDAOSpec.scala | Scala | agpl-3.0 | 3,211 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import scala.collection.mutable
import org.apache.spark.sql.{AnalysisException, Row, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{GlobalTempView, LocalTempView, PersistedView, UnresolvedFunction, UnresolvedRelation, ViewType}
import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable, CatalogTableType, SessionCatalog}
import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, AttributeReference, SubqueryExpression}
import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project, View}
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits.NamespaceHelper
import org.apache.spark.sql.internal.StaticSQLConf
import org.apache.spark.sql.types.{BooleanType, MetadataBuilder, StringType}
import org.apache.spark.sql.util.SchemaUtils
/**
* Create or replace a view with given query plan. This command will generate some view-specific
* properties(e.g. view default database, view query output column names) and store them as
* properties in metastore, if we need to create a permanent view.
*
* @param name the name of this view.
* @param userSpecifiedColumns the output column names and optional comments specified by users,
* can be Nil if not specified.
* @param comment the comment of this view.
* @param properties the properties of this view.
* @param originalText the original SQL text of this view, can be None if this view is created via
* Dataset API.
* @param child the logical plan that represents the view; this is used to generate the logical
* plan for temporary view and the view schema.
* @param allowExisting if true, and if the view already exists, noop; if false, and if the view
* already exists, throws analysis exception.
* @param replace if true, and if the view already exists, updates it; if false, and if the view
* already exists, throws analysis exception.
* @param viewType the expected view type to be created with this command.
*/
case class CreateViewCommand(
name: TableIdentifier,
userSpecifiedColumns: Seq[(String, Option[String])],
comment: Option[String],
properties: Map[String, String],
originalText: Option[String],
child: LogicalPlan,
allowExisting: Boolean,
replace: Boolean,
viewType: ViewType)
extends RunnableCommand {
import ViewHelper._
override def innerChildren: Seq[QueryPlan[_]] = Seq(child)
if (viewType == PersistedView) {
require(originalText.isDefined, "'originalText' must be provided to create permanent view")
}
if (allowExisting && replace) {
throw new AnalysisException("CREATE VIEW with both IF NOT EXISTS and REPLACE is not allowed.")
}
private def isTemporary = viewType == LocalTempView || viewType == GlobalTempView
// Disallows 'CREATE TEMPORARY VIEW IF NOT EXISTS' to be consistent with 'CREATE TEMPORARY TABLE'
if (allowExisting && isTemporary) {
throw new AnalysisException(
"It is not allowed to define a TEMPORARY view with IF NOT EXISTS.")
}
// Temporary view names should NOT contain database prefix like "database.table"
if (isTemporary && name.database.isDefined) {
val database = name.database.get
throw new AnalysisException(
s"It is not allowed to add database prefix `$database` for the TEMPORARY view name.")
}
override def run(sparkSession: SparkSession): Seq[Row] = {
// If the plan cannot be analyzed, throw an exception and don't proceed.
val qe = sparkSession.sessionState.executePlan(child)
qe.assertAnalyzed()
val analyzedPlan = qe.analyzed
if (userSpecifiedColumns.nonEmpty &&
userSpecifiedColumns.length != analyzedPlan.output.length) {
throw new AnalysisException(s"The number of columns produced by the SELECT clause " +
s"(num: `${analyzedPlan.output.length}`) does not match the number of column names " +
s"specified by CREATE VIEW (num: `${userSpecifiedColumns.length}`).")
}
val catalog = sparkSession.sessionState.catalog
// When creating a permanent view, not allowed to reference temporary objects.
// This should be called after `qe.assertAnalyzed()` (i.e., `child` can be resolved)
verifyTemporaryObjectsNotExists(catalog)
if (viewType == LocalTempView) {
if (replace && catalog.getTempView(name.table).isDefined &&
!catalog.getTempView(name.table).get.sameResult(child)) {
logInfo(s"Try to uncache ${name.quotedString} before replacing.")
CommandUtils.uncacheTableOrView(sparkSession, name.quotedString)
}
val aliasedPlan = aliasPlan(sparkSession, analyzedPlan)
catalog.createTempView(name.table, aliasedPlan, overrideIfExists = replace)
} else if (viewType == GlobalTempView) {
if (replace && catalog.getGlobalTempView(name.table).isDefined &&
!catalog.getGlobalTempView(name.table).get.sameResult(child)) {
val db = sparkSession.sessionState.conf.getConf(StaticSQLConf.GLOBAL_TEMP_DATABASE)
val globalTempView = TableIdentifier(name.table, Option(db))
logInfo(s"Try to uncache ${globalTempView.quotedString} before replacing.")
CommandUtils.uncacheTableOrView(sparkSession, globalTempView.quotedString)
}
val aliasedPlan = aliasPlan(sparkSession, analyzedPlan)
catalog.createGlobalTempView(name.table, aliasedPlan, overrideIfExists = replace)
} else if (catalog.tableExists(name)) {
val tableMetadata = catalog.getTableMetadata(name)
if (allowExisting) {
// Handles `CREATE VIEW IF NOT EXISTS v0 AS SELECT ...`. Does nothing when the target view
// already exists.
} else if (tableMetadata.tableType != CatalogTableType.VIEW) {
throw new AnalysisException(s"$name is not a view")
} else if (replace) {
// Detect cyclic view reference on CREATE OR REPLACE VIEW.
val viewIdent = tableMetadata.identifier
checkCyclicViewReference(analyzedPlan, Seq(viewIdent), viewIdent)
// uncache the cached data before replacing an exists view
logDebug(s"Try to uncache ${viewIdent.quotedString} before replacing.")
CommandUtils.uncacheTableOrView(sparkSession, viewIdent.quotedString)
// Handles `CREATE OR REPLACE VIEW v0 AS SELECT ...`
// Nothing we need to retain from the old view, so just drop and create a new one
catalog.dropTable(viewIdent, ignoreIfNotExists = false, purge = false)
catalog.createTable(prepareTable(sparkSession, analyzedPlan), ignoreIfExists = false)
} else {
// Handles `CREATE VIEW v0 AS SELECT ...`. Throws exception when the target view already
// exists.
throw new AnalysisException(
s"View $name already exists. If you want to update the view definition, " +
"please use ALTER VIEW AS or CREATE OR REPLACE VIEW AS")
}
} else {
// Create the view if it doesn't exist.
catalog.createTable(prepareTable(sparkSession, analyzedPlan), ignoreIfExists = false)
}
Seq.empty[Row]
}
/**
* Permanent views are not allowed to reference temp objects, including temp function and views
*/
private def verifyTemporaryObjectsNotExists(catalog: SessionCatalog): Unit = {
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
if (!isTemporary) {
// This func traverses the unresolved plan `child`. Below are the reasons:
// 1) Analyzer replaces unresolved temporary views by a SubqueryAlias with the corresponding
// logical plan. After replacement, it is impossible to detect whether the SubqueryAlias is
// added/generated from a temporary view.
// 2) The temp functions are represented by multiple classes. Most are inaccessible from this
// package (e.g., HiveGenericUDF).
def verify(child: LogicalPlan): Unit = {
child.collect {
// Disallow creating permanent views based on temporary views.
case UnresolvedRelation(nameParts, _, _) if catalog.isTempView(nameParts) =>
throw new AnalysisException(s"Not allowed to create a permanent view $name by " +
s"referencing a temporary view ${nameParts.quoted}. " +
"Please create a temp view instead by CREATE TEMP VIEW")
case other if !other.resolved => other.expressions.flatMap(_.collect {
// Traverse subquery plan for any unresolved relations.
case e: SubqueryExpression => verify(e.plan)
// Disallow creating permanent views based on temporary UDFs.
case e: UnresolvedFunction if catalog.isTemporaryFunction(e.name) =>
throw new AnalysisException(s"Not allowed to create a permanent view $name by " +
s"referencing a temporary function `${e.name}`")
})
}
}
verify(child)
}
}
/**
* If `userSpecifiedColumns` is defined, alias the analyzed plan to the user specified columns,
* else return the analyzed plan directly.
*/
private def aliasPlan(session: SparkSession, analyzedPlan: LogicalPlan): LogicalPlan = {
if (userSpecifiedColumns.isEmpty) {
analyzedPlan
} else {
val projectList = analyzedPlan.output.zip(userSpecifiedColumns).map {
case (attr, (colName, None)) => Alias(attr, colName)()
case (attr, (colName, Some(colComment))) =>
val meta = new MetadataBuilder().putString("comment", colComment).build()
Alias(attr, colName)(explicitMetadata = Some(meta))
}
session.sessionState.executePlan(Project(projectList, analyzedPlan)).analyzed
}
}
/**
* Returns a [[CatalogTable]] that can be used to save in the catalog. Generate the view-specific
* properties(e.g. view default database, view query output column names) and store them as
* properties in the CatalogTable, and also creates the proper schema for the view.
*/
private def prepareTable(session: SparkSession, analyzedPlan: LogicalPlan): CatalogTable = {
if (originalText.isEmpty) {
throw new AnalysisException(
"It is not allowed to create a persisted view from the Dataset API")
}
val aliasedSchema = aliasPlan(session, analyzedPlan).schema
val newProperties = generateViewProperties(
properties, session, analyzedPlan, aliasedSchema.fieldNames)
CatalogTable(
identifier = name,
tableType = CatalogTableType.VIEW,
storage = CatalogStorageFormat.empty,
schema = aliasedSchema,
properties = newProperties,
viewOriginalText = originalText,
viewText = originalText,
comment = comment
)
}
}
/**
* Alter a view with given query plan. If the view name contains database prefix, this command will
* alter a permanent view matching the given name, or throw an exception if view not exist. Else,
* this command will try to alter a temporary view first, if view not exist, try permanent view
* next, if still not exist, throw an exception.
*
* @param name the name of this view.
* @param originalText the original SQL text of this view. Note that we can only alter a view by
* SQL API, which means we always have originalText.
* @param query the logical plan that represents the view; this is used to generate the new view
* schema.
*/
case class AlterViewAsCommand(
name: TableIdentifier,
originalText: String,
query: LogicalPlan) extends RunnableCommand {
import ViewHelper._
override def innerChildren: Seq[QueryPlan[_]] = Seq(query)
override def run(session: SparkSession): Seq[Row] = {
// If the plan cannot be analyzed, throw an exception and don't proceed.
val qe = session.sessionState.executePlan(query)
qe.assertAnalyzed()
val analyzedPlan = qe.analyzed
if (session.sessionState.catalog.alterTempViewDefinition(name, analyzedPlan)) {
// a local/global temp view has been altered, we are done.
} else {
alterPermanentView(session, analyzedPlan)
}
Seq.empty[Row]
}
private def alterPermanentView(session: SparkSession, analyzedPlan: LogicalPlan): Unit = {
val viewMeta = session.sessionState.catalog.getTableMetadata(name)
if (viewMeta.tableType != CatalogTableType.VIEW) {
throw new AnalysisException(s"${viewMeta.identifier} is not a view.")
}
// Detect cyclic view reference on ALTER VIEW.
val viewIdent = viewMeta.identifier
checkCyclicViewReference(analyzedPlan, Seq(viewIdent), viewIdent)
val newProperties = generateViewProperties(
viewMeta.properties, session, analyzedPlan, analyzedPlan.schema.fieldNames)
val updatedViewMeta = viewMeta.copy(
schema = analyzedPlan.schema,
properties = newProperties,
viewOriginalText = Some(originalText),
viewText = Some(originalText))
session.sessionState.catalog.alterTable(updatedViewMeta)
}
}
/**
* A command for users to get views in the given database.
* If a databaseName is not given, the current database will be used.
* The syntax of using this command in SQL is:
* {{{
* SHOW VIEWS [(IN|FROM) database_name] [[LIKE] 'identifier_with_wildcards'];
* }}}
*/
case class ShowViewsCommand(
databaseName: String,
tableIdentifierPattern: Option[String]) extends RunnableCommand {
// The result of SHOW VIEWS has three basic columns: namespace, viewName and isTemporary.
override val output: Seq[Attribute] = Seq(
AttributeReference("namespace", StringType, nullable = false)(),
AttributeReference("viewName", StringType, nullable = false)(),
AttributeReference("isTemporary", BooleanType, nullable = false)())
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
// Show the information of views.
val views = tableIdentifierPattern.map(catalog.listViews(databaseName, _))
.getOrElse(catalog.listViews(databaseName, "*"))
views.map { tableIdent =>
val namespace = tableIdent.database.toArray.quoted
val tableName = tableIdent.table
val isTemp = catalog.isTemporaryTable(tableIdent)
Row(namespace, tableName, isTemp)
}
}
}
object ViewHelper {
import CatalogTable._
/**
* Generate the view query output column names in `properties`.
*/
private def generateQueryColumnNames(columns: Seq[String]): Map[String, String] = {
val props = new mutable.HashMap[String, String]
if (columns.nonEmpty) {
props.put(VIEW_QUERY_OUTPUT_NUM_COLUMNS, columns.length.toString)
columns.zipWithIndex.foreach { case (colName, index) =>
props.put(s"$VIEW_QUERY_OUTPUT_COLUMN_NAME_PREFIX$index", colName)
}
}
props.toMap
}
/**
* Remove the view query output column names in `properties`.
*/
private def removeQueryColumnNames(properties: Map[String, String]): Map[String, String] = {
// We can't use `filterKeys` here, as the map returned by `filterKeys` is not serializable,
// while `CatalogTable` should be serializable.
properties.filterNot { case (key, _) =>
key.startsWith(VIEW_QUERY_OUTPUT_PREFIX)
}
}
/**
* Generate the view properties in CatalogTable, including:
* 1. view default database that is used to provide the default database name on view resolution.
* 2. the output column names of the query that creates a view, this is used to map the output of
* the view child to the view output during view resolution.
*
* @param properties the `properties` in CatalogTable.
* @param session the spark session.
* @param analyzedPlan the analyzed logical plan that represents the child of a view.
* @return new view properties including view default database and query column names properties.
*/
def generateViewProperties(
properties: Map[String, String],
session: SparkSession,
analyzedPlan: LogicalPlan,
fieldNames: Array[String]): Map[String, String] = {
// for createViewCommand queryOutput may be different from fieldNames
val queryOutput = analyzedPlan.schema.fieldNames
// Generate the query column names, throw an AnalysisException if there exists duplicate column
// names.
SchemaUtils.checkColumnNameDuplication(
fieldNames, "in the view definition", session.sessionState.conf.resolver)
// Generate the view default catalog and namespace.
val manager = session.sessionState.catalogManager
removeQueryColumnNames(properties) ++
catalogAndNamespaceToProps(manager.currentCatalog.name, manager.currentNamespace) ++
generateQueryColumnNames(queryOutput)
}
/**
* Recursively search the logical plan to detect cyclic view references, throw an
* AnalysisException if cycle detected.
*
* A cyclic view reference is a cycle of reference dependencies, for example, if the following
* statements are executed:
* CREATE VIEW testView AS SELECT id FROM tbl
* CREATE VIEW testView2 AS SELECT id FROM testView
* ALTER VIEW testView AS SELECT * FROM testView2
* The view `testView` references `testView2`, and `testView2` also references `testView`,
* therefore a reference cycle (testView -> testView2 -> testView) exists.
*
* @param plan the logical plan we detect cyclic view references from.
* @param path the path between the altered view and current node.
* @param viewIdent the table identifier of the altered view, we compare two views by the
* `desc.identifier`.
*/
def checkCyclicViewReference(
plan: LogicalPlan,
path: Seq[TableIdentifier],
viewIdent: TableIdentifier): Unit = {
plan match {
case v: View =>
val ident = v.desc.identifier
val newPath = path :+ ident
// If the table identifier equals to the `viewIdent`, current view node is the same with
// the altered view. We detect a view reference cycle, should throw an AnalysisException.
if (ident == viewIdent) {
throw new AnalysisException(s"Recursive view $viewIdent detected " +
s"(cycle: ${newPath.mkString(" -> ")})")
} else {
v.children.foreach { child =>
checkCyclicViewReference(child, newPath, viewIdent)
}
}
case _ =>
plan.children.foreach(child => checkCyclicViewReference(child, path, viewIdent))
}
// Detect cyclic references from subqueries.
plan.expressions.foreach { expr =>
expr match {
case s: SubqueryExpression =>
checkCyclicViewReference(s.plan, path, viewIdent)
case _ => // Do nothing.
}
}
}
}
| shuangshuangwang/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala | Scala | apache-2.0 | 19,575 |
package io.iohk.ethereum.vm
object Fixtures {
val ConstantinopleBlockNumber = 200
val PetersburgBlockNumber = 400
val PhoenixBlockNumber = 600
val IstanbulBlockNumber = 600
val blockchainConfig = BlockchainConfigForEvm(
// block numbers are irrelevant
frontierBlockNumber = 0,
homesteadBlockNumber = 0,
eip150BlockNumber = 0,
eip160BlockNumber = 0,
eip161BlockNumber = 0,
byzantiumBlockNumber = 0,
constantinopleBlockNumber = ConstantinopleBlockNumber,
istanbulBlockNumber = IstanbulBlockNumber,
maxCodeSize = None,
accountStartNonce = 0,
atlantisBlockNumber = 0,
aghartaBlockNumber = 0,
petersburgBlockNumber = PetersburgBlockNumber,
phoenixBlockNumber = PhoenixBlockNumber,
chainId = 0x3d.toByte
)
}
| input-output-hk/etc-client | src/test/scala/io/iohk/ethereum/vm/Fixtures.scala | Scala | mit | 782 |
package org.eigengo.akkapatterns.api
import org.eigengo.akkapatterns.domain.User
import akka.util.Timeout
import spray.routing.HttpService
import akka.actor.ActorRef
import org.eigengo.akkapatterns.core.{NotRegisteredUser, RegisteredUser}
trait UserService extends HttpService {
this: EndpointMarshalling with AuthenticationDirectives =>
import akka.pattern.ask
implicit val timeout: Timeout
def userActor: ActorRef
// will return code 666 if NotRegisteredUser is received
implicit val UserRegistrationErrorMarshaller = errorSelectingEitherMarshaller[NotRegisteredUser, RegisteredUser](666)
val userRoute =
path("user" / "register") {
post {
handleWith {user: User =>
(userActor ? RegisteredUser(user)).mapTo[Either[NotRegisteredUser, RegisteredUser]]
}
}
}
}
| eigengo/akka-patterns | server/api/src/main/scala/org/eigengo/akkapatterns/api/user.scala | Scala | apache-2.0 | 833 |
package com.twitter.finagle.thrift
import com.twitter.finagle.Service
import com.twitter.finagle.builder.{ClientBuilder, ServerBuilder}
import com.twitter.finagle.stats.InMemoryStatsReceiver
import com.twitter.test._
import com.twitter.util.TimeConversions._
import com.twitter.util.{Await, Future, Promise, Return, Time}
import java.net.{InetAddress, InetSocketAddress}
import org.apache.thrift.TApplicationException
import org.apache.thrift.protocol.TBinaryProtocol
import org.apache.thrift.transport.{TFramedTransport, TSocket}
import org.junit.runner.RunWith
import org.scalatest.{BeforeAndAfter, OneInstancePerTest, FunSuite}
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ThriftClientFinagleServerTest extends FunSuite with BeforeAndAfter with OneInstancePerTest {
val somewayPromise = new Promise[Unit]
val processor = new B.ServiceIface {
def add(a: Int, b: Int) = Future.exception(new AnException)
def add_one(a: Int, b: Int) = Future.Void
def multiply(a: Int, b: Int) = Future { a / b }
def complex_return(someString: String) =
someString match {
case "throwAnException" =>
throw new Exception("msg")
case _ =>
Future { new SomeStruct(123, someString) }
}
def someway() = {
somewayPromise() = Return(())
Future.Void
}
def show_me_your_dtab() = Future.value("")
def show_me_your_dtab_size() = Future.value(0)
}
val server = ServerBuilder()
.codec(ThriftServerFramedCodec())
.bindTo(new InetSocketAddress(InetAddress.getLoopbackAddress, 0))
.name("ThriftServer")
.build(new B.Service(processor, new TBinaryProtocol.Factory()))
val serverAddr = server.boundAddress.asInstanceOf[InetSocketAddress]
val (client, transport) = {
val socket = new TSocket(serverAddr.getHostName, serverAddr.getPort, 1000/*ms*/)
val transport = new TFramedTransport(socket)
val protocol = new TBinaryProtocol(transport)
(new B.Client(protocol), transport)
}
transport.open()
after {
server.close(20.milliseconds)
}
test("thrift client with finagle server should make successful (void) RPCs") {
client.add_one(1, 2)
}
test("thrift client with finagle server should propagate exceptions") {
val exc = intercept[AnException] { client.add(1, 2) }
assert(exc != null)
}
test("thrift client with finagle server should handle complex return values") {
assert(client.complex_return("a string").arg_two == "a string")
}
test("treat undeclared exceptions as internal failures") {
val exc = intercept[TApplicationException] { client.multiply(1, 0/*div by zero*/) }
assert(exc.getMessage() == "Internal error processing multiply: 'java.lang.ArithmeticException: / by zero'")
}
test("treat synchronous exceptions as transport exceptions") {
val exc = intercept[TApplicationException] { client.complex_return("throwAnException") }
assert(exc.getMessage() == "Internal error processing complex_return: 'java.lang.Exception: msg'")
}
test("handle one-way calls") {
assert(somewayPromise.isDefined == false)
client.someway() // just returns(!)
assert(Await.result(somewayPromise) == ())
}
test("handle wrong interface") {
val (client, transport) = {
val socket = new TSocket(serverAddr.getHostName, serverAddr.getPort, 1000/*ms*/)
val transport = new TFramedTransport(socket)
val protocol = new TBinaryProtocol(transport)
(new F.Client(protocol), transport)
}
transport.open()
val exc = intercept[TApplicationException] { client.another_method(123) }
assert(exc.getMessage() == "Invalid method name: 'another_method'")
}
test("make sure we measure protocol negotiation latency") {
Time.withCurrentTimeFrozen { timeControl =>
val statsReceiver = new InMemoryStatsReceiver
val name = "thrift_client"
val service: Service[ThriftClientRequest, Array[Byte]] = ClientBuilder()
.hosts(serverAddr)
.name(name)
.hostConnectionLimit(1)
.codec(ThriftClientFramedCodec())
.reportTo(statsReceiver)
.build()
val client = new B.ServiceToClient(service, new TBinaryProtocol.Factory())
assert(Await.result(client.multiply(4,2)) == 2)
val key = Seq(name, "codec_connection_preparation_latency_ms")
assert(statsReceiver.repr.stats.contains(key) == true)
}
}
}
| a-manumohan/finagle | finagle-thrift/src/test/scala/com/twitter/finagle/thrift/ThriftClientFinagleServerTest.scala | Scala | apache-2.0 | 4,439 |
package jp.sf.amateras.scala.nio.jdbc
import java.sql._
import scala.reflect.ClassTag
import jp.sf.amateras.scala.nio._
object JDBCUtils {
def update(conn: Connection, template: SqlTemplate): Int = {
execute(conn, template){ stmt =>
stmt.executeUpdate()
}
}
def select[T](conn: Connection, template: SqlTemplate)(f: ResultSet => T): Seq[T] = {
execute(conn, template){ stmt =>
using(stmt.executeQuery()){ rs =>
val list = new scala.collection.mutable.ListBuffer[T]
while(rs.next){
list += f(rs)
}
list.toSeq
}
}
}
def selectInt(conn: Connection, template: SqlTemplate): Int =
selectFirst[Int](conn, template)(_.getInt(1)).getOrElse(0)
def selectString(conn: Connection, template: SqlTemplate): String =
selectFirst[String](conn, template)(_.getString(1)).getOrElse("")
def selectFirst[T](conn: Connection, template: SqlTemplate)(f: ResultSet => T): Option[T] = {
execute(conn, template){ stmt =>
using(stmt.executeQuery()){ rs =>
if(rs.next){
Some(f(rs))
} else {
None
}
}
}
}
private def execute[T](conn: Connection, template: SqlTemplate)(f: (PreparedStatement) => T): T = {
using(conn.prepareStatement(template.sql)){ stmt =>
template.params.zipWithIndex.foreach { case (x, i) =>
TypeMapper.set(stmt, i + 1, x)
}
f(stmt)
}
}
}
case class SqlTemplate(sql: String, params: Any*) | takezoe/scala-nio | src/main/scala/jp/sf/amateras/scala/nio/jdbc/JDBCUtils.scala | Scala | apache-2.0 | 1,492 |
package dotty.tools.repl
import java.util.regex.Pattern
import org.junit.Assert.{assertTrue => assert, _}
import org.junit.{Ignore, Test}
class ReplCompilerTests extends ReplTest {
import ReplCompilerTests._
private def lines() =
storedOutput().trim.linesIterator.toList
@Test def compileSingle = fromInitialState { implicit state =>
run("def foo: 1 = 1")
assertEquals("def foo: 1", storedOutput().trim)
}
@Test def compileTwo =
fromInitialState { implicit state =>
run("def foo: 1 = 1")
}
.andThen { implicit state =>
val s2 = run("def foo(i: Int): i.type = i")
assertEquals(2, s2.objectIndex)
}
@Test def inspectWrapper =
fromInitialState { implicit state =>
run("def foo = 1")
}.andThen { implicit state =>
storedOutput() // discard output
run("val x = rs$line$1.foo")
assertEquals("val x: Int = 1", storedOutput().trim)
}
@Test def testVar = fromInitialState { implicit state =>
run("var x = 5")
assertEquals("var x: Int = 5", storedOutput().trim)
}
@Test def testRes = fromInitialState { implicit state =>
run {
"""|def foo = 1 + 1
|val x = 5 + 5
|1 + 1
|var y = 5
|10 + 10""".stripMargin
}
val expected = List(
"def foo: Int",
"val res0: Int = 2",
"val res1: Int = 20",
"val x: Int = 10",
"var y: Int = 5"
)
assertEquals(expected, lines())
}
@Test def testImportMutable =
fromInitialState { implicit state =>
run("import scala.collection.mutable")
}
.andThen { implicit state =>
assertEquals(1, state.imports.size)
run("""mutable.Map("one" -> 1)""")
assertEquals(
"val res0: scala.collection.mutable.Map[String, Int] = HashMap(one -> 1)",
storedOutput().trim
)
}
@Test def rebindVariable =
fromInitialState { implicit s =>
val state = run("var x = 5")
assertEquals("var x: Int = 5", storedOutput().trim)
state
}
.andThen { implicit s =>
run("x = 10")
assertEquals("x: Int = 10", storedOutput().trim)
}
// FIXME: Tests are not run in isolation, the classloader is corrupted after the first exception
@Ignore @Test def i3305: Unit = {
fromInitialState { implicit s =>
run("null.toString")
assert(storedOutput().startsWith("java.lang.NullPointerException"))
}
fromInitialState { implicit s =>
run("def foo: Int = 1 + foo; foo")
assert(storedOutput().startsWith("def foo: Int\\njava.lang.StackOverflowError"))
}
fromInitialState { implicit s =>
run("""throw new IllegalArgumentException("Hello")""")
assert(storedOutput().startsWith("java.lang.IllegalArgumentException: Hello"))
}
fromInitialState { implicit s =>
run("val (x, y) = null")
assert(storedOutput().startsWith("scala.MatchError: null"))
}
}
@Test def i2789: Unit = fromInitialState { implicit state =>
run("(x: Int) => println(x)")
assert(storedOutput().startsWith("val res0: Int => Unit ="))
}
@Test def byNameParam: Unit = fromInitialState { implicit state =>
run("def f(g: => Int): Int = g")
assert(storedOutput().startsWith("def f(g: => Int): Int"))
}
@Test def i4051 = fromInitialState { implicit state =>
val source =
"""val x: PartialFunction[Int, Int] = { case x => x }
|val y = Map(("A", 1), ("B", 2), ("X", 3)).collect { case (k, v) => v }.toList""".stripMargin
val expected = List(
"val x: PartialFunction[Int, Int] = <function1>",
"val y: List[Int] = List(1, 2, 3)"
)
run(source)
assertEquals(expected, lines())
}
@Test def i5897 =
fromInitialState { implicit state => run("given Int = 10") }
.andThen { implicit state =>
assertEquals(
"def given_Int: Int",
storedOutput().trim
)
run("implicitly[Int]")
assertEquals(
"val res0: Int = 10",
storedOutput().trim
)
}
@Test def i6200 =
fromInitialState { implicit state =>
run("""
|trait Ord[T] {
| def compare(x: T, y: T): Int
| def (x: T) < (y: T) = compare(x, y) < 0
| def (x: T) > (y: T) = compare(x, y) > 0
|}
|
|given IntOrd as Ord[Int] {
| def compare(x: Int, y: Int) =
| if (x < y) -1 else if (x > y) +1 else 0
|}
""".stripMargin) }
.andThen { implicit state =>
assertMultiLineEquals(
"""// defined trait Ord
|// defined object IntOrd""".stripMargin,
storedOutput().trim
)
run("IntOrd")
assert(storedOutput().startsWith("val res0: IntOrd.type ="))
}
@Test def i7934: Unit = fromInitialState { state =>
implicit val ctx = state.context
assertFalse(ParseResult.isIncomplete("_ + 1")) // was: assertThrows[NullPointerException]
}
@Test def testSingletonPrint = fromInitialState { implicit state =>
run("""val a = "hello"; val x: a.type = a""")
assertMultiLineEquals("val a: String = hello\\nval x: a.type = hello", storedOutput().trim)
}
@Test def i6574 = fromInitialState { implicit state =>
run("val a: 1 | 0 = 1")
assertEquals("val a: 1 | 0 = 1", storedOutput().trim)
}
}
object ReplCompilerTests {
private val pattern = Pattern.compile("\\\\r[\\\\n]?|\\\\n");
// Ensure 'expected' and 'actual' contain the same line separator(s).
def assertMultiLineEquals(expected: String, actual: String): Unit = {
val expected0 = pattern.matcher(expected).replaceAll(System.lineSeparator)
val actual0 = pattern.matcher(actual).replaceAll(System.lineSeparator)
assertEquals(expected0, actual0)
}
}
| som-snytt/dotty | compiler/test/dotty/tools/repl/ReplCompilerTests.scala | Scala | apache-2.0 | 5,721 |
package com.github.meln1k.reactive.telegrambot.models
/**
* An API response.
*/
sealed trait Response {
def ok: Boolean
}
/**
* This object represents a successful API response.
* @param result Requested object
*/
case class SuccessfulResponse(result: ResponseEntity) extends Response {
def ok = true
}
case class SuccessfulResponseWithUpdates(result: Seq[Update]) extends Response {
def ok = true
}
/**
* This object represents a failed API response.
* @param description Error explanation
*/
case class FailedResponse(description: String) extends Response {
def ok = false
} | meln1k/reactive-telegrambot | src/main/scala/com/github/meln1k/reactive/telegrambot/models/Response.scala | Scala | mit | 597 |
package controllers
import java.awt.Color
import scala.concurrent.Future
import scala.concurrent.duration._
import scalax.collection.io.dot._
import play.api.Play._
import play.api.mvc._
import play.api.libs.concurrent.Akka
import play.api.libs.concurrent.Execution.Implicits._
import play.api.libs.json._
import akka.pattern.ask
import akka.util._
import ch.inventsoft.graph.layout.{Layout, LayoutOps}
import ch.inventsoft.graph.vector.Box3
import models.Yarn
import models.guide._
import models.plan._
import utils.JsonSerialization._
object Preview extends Controller {
def show = Action {
Ok(views.html.preview())
}
} | knittery/knittery-ui | app/controllers/Preview.scala | Scala | gpl-2.0 | 633 |
package org.denigma.drugage.domain
case class DrugSpecification(compoundName: Option[String] = None, organism: OrganismSpecification = OrganismSpecification(),
pubmedId: Option[String] = None, id: Option[DrugId] = None)
| rmihael/drugage | frontend/jvm/src/main/scala/org.denigma.drugage/domain/DrugSpecification.scala | Scala | mpl-2.0 | 223 |
package com.danielasfregola.twitter4s.http.clients.rest.directmessages.parameters
import com.danielasfregola.twitter4s.http.marshalling.Parameters
private[twitter4s] final case class CreateEventParameters(event: CreateDirectMessageEventParameters) extends Parameters
private[twitter4s] object CreateEventParameters {
def apply(recipient_id: String, text: String): CreateEventParameters = {
val target = Target(recipient_id)
val messageData = MessageData(text)
val messageCreate = MessageCreateParameter(target, messageData)
val event = CreateDirectMessageEventParameters(messageCreate)
apply(event)
}
}
private[twitter4s] final case class CreateDirectMessageEventParameters(message_create: MessageCreateParameter,
`type`: String = "message_create")
private[twitter4s] final case class MessageCreateParameter(target: Target, message_data: MessageData)
private[twitter4s] final case class Target(recipient_id: String)
private[twitter4s] final case class MessageData(text: String)
| DanielaSfregola/twitter4s | src/main/scala/com/danielasfregola/twitter4s/http/clients/rest/directmessages/parameters/CreateEventParameters.scala | Scala | apache-2.0 | 1,081 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.{Externalizable, ObjectInput, ObjectOutput}
import scala.collection.mutable
import org.roaringbitmap.RoaringBitmap
import org.apache.spark.SparkEnv
import org.apache.spark.internal.config
import org.apache.spark.storage.BlockManagerId
import org.apache.spark.util.Utils
/**
* Result returned by a ShuffleMapTask to a scheduler. Includes the block manager address that the
* task ran on as well as the sizes of outputs for each reducer, for passing on to the reduce tasks.
*/
private[spark] sealed trait MapStatus {
/** Location where this task was run. */
def location: BlockManagerId
/**
* Estimated size for the reduce block, in bytes.
*
* If a block is non-empty, then this method MUST return a non-zero size. This invariant is
* necessary for correctness, since block fetchers are allowed to skip zero-size blocks.
*/
def getSizeForBlock(reduceId: Int): Long
/**
* The unique ID of this shuffle map task, if spark.shuffle.useOldFetchProtocol enabled we use
* partitionId of the task or taskContext.taskAttemptId is used.
*/
def mapId: Long
}
private[spark] object MapStatus {
/**
* Min partition number to use [[HighlyCompressedMapStatus]]. A bit ugly here because in test
* code we can't assume SparkEnv.get exists.
*/
private lazy val minPartitionsToUseHighlyCompressMapStatus = Option(SparkEnv.get)
.map(_.conf.get(config.SHUFFLE_MIN_NUM_PARTS_TO_HIGHLY_COMPRESS))
.getOrElse(config.SHUFFLE_MIN_NUM_PARTS_TO_HIGHLY_COMPRESS.defaultValue.get)
def apply(
loc: BlockManagerId,
uncompressedSizes: Array[Long],
mapTaskId: Long): MapStatus = {
if (uncompressedSizes.length > minPartitionsToUseHighlyCompressMapStatus) {
HighlyCompressedMapStatus(loc, uncompressedSizes, mapTaskId)
} else {
new CompressedMapStatus(loc, uncompressedSizes, mapTaskId)
}
}
private[this] val LOG_BASE = 1.1
/**
* Compress a size in bytes to 8 bits for efficient reporting of map output sizes.
* We do this by encoding the log base 1.1 of the size as an integer, which can support
* sizes up to 35 GB with at most 10% error.
*/
def compressSize(size: Long): Byte = {
if (size == 0) {
0
} else if (size <= 1L) {
1
} else {
math.min(255, math.ceil(math.log(size) / math.log(LOG_BASE)).toInt).toByte
}
}
/**
* Decompress an 8-bit encoded block size, using the reverse operation of compressSize.
*/
def decompressSize(compressedSize: Byte): Long = {
if (compressedSize == 0) {
0
} else {
math.pow(LOG_BASE, compressedSize & 0xFF).toLong
}
}
}
/**
* A [[MapStatus]] implementation that tracks the size of each block. Size for each block is
* represented using a single byte.
*
* @param loc location where the task is being executed.
* @param compressedSizes size of the blocks, indexed by reduce partition id.
* @param _mapTaskId unique task id for the task
*/
private[spark] class CompressedMapStatus(
private[this] var loc: BlockManagerId,
private[this] var compressedSizes: Array[Byte],
private[this] var _mapTaskId: Long)
extends MapStatus with Externalizable {
// For deserialization only
protected def this() = this(null, null.asInstanceOf[Array[Byte]], -1)
def this(loc: BlockManagerId, uncompressedSizes: Array[Long], mapTaskId: Long) {
this(loc, uncompressedSizes.map(MapStatus.compressSize), mapTaskId)
}
override def location: BlockManagerId = loc
override def getSizeForBlock(reduceId: Int): Long = {
MapStatus.decompressSize(compressedSizes(reduceId))
}
override def mapId: Long = _mapTaskId
override def writeExternal(out: ObjectOutput): Unit = Utils.tryOrIOException {
loc.writeExternal(out)
out.writeInt(compressedSizes.length)
out.write(compressedSizes)
out.writeLong(_mapTaskId)
}
override def readExternal(in: ObjectInput): Unit = Utils.tryOrIOException {
loc = BlockManagerId(in)
val len = in.readInt()
compressedSizes = new Array[Byte](len)
in.readFully(compressedSizes)
_mapTaskId = in.readLong()
}
}
/**
* A [[MapStatus]] implementation that stores the accurate size of huge blocks, which are larger
* than spark.shuffle.accurateBlockThreshold. It stores the average size of other non-empty blocks,
* plus a bitmap for tracking which blocks are empty.
*
* @param loc location where the task is being executed
* @param numNonEmptyBlocks the number of non-empty blocks
* @param emptyBlocks a bitmap tracking which blocks are empty
* @param avgSize average size of the non-empty and non-huge blocks
* @param hugeBlockSizes sizes of huge blocks by their reduceId.
* @param _mapTaskId unique task id for the task
*/
private[spark] class HighlyCompressedMapStatus private (
private[this] var loc: BlockManagerId,
private[this] var numNonEmptyBlocks: Int,
private[this] var emptyBlocks: RoaringBitmap,
private[this] var avgSize: Long,
private[this] var hugeBlockSizes: scala.collection.Map[Int, Byte],
private[this] var _mapTaskId: Long)
extends MapStatus with Externalizable {
// loc could be null when the default constructor is called during deserialization
require(loc == null || avgSize > 0 || hugeBlockSizes.size > 0
|| numNonEmptyBlocks == 0 || _mapTaskId > 0,
"Average size can only be zero for map stages that produced no output")
protected def this() = this(null, -1, null, -1, null, -1) // For deserialization only
override def location: BlockManagerId = loc
override def getSizeForBlock(reduceId: Int): Long = {
assert(hugeBlockSizes != null)
if (emptyBlocks.contains(reduceId)) {
0
} else {
hugeBlockSizes.get(reduceId) match {
case Some(size) => MapStatus.decompressSize(size)
case None => avgSize
}
}
}
override def mapId: Long = _mapTaskId
override def writeExternal(out: ObjectOutput): Unit = Utils.tryOrIOException {
loc.writeExternal(out)
emptyBlocks.writeExternal(out)
out.writeLong(avgSize)
out.writeInt(hugeBlockSizes.size)
hugeBlockSizes.foreach { kv =>
out.writeInt(kv._1)
out.writeByte(kv._2)
}
out.writeLong(_mapTaskId)
}
override def readExternal(in: ObjectInput): Unit = Utils.tryOrIOException {
loc = BlockManagerId(in)
emptyBlocks = new RoaringBitmap()
emptyBlocks.readExternal(in)
avgSize = in.readLong()
val count = in.readInt()
val hugeBlockSizesImpl = mutable.Map.empty[Int, Byte]
(0 until count).foreach { _ =>
val block = in.readInt()
val size = in.readByte()
hugeBlockSizesImpl(block) = size
}
hugeBlockSizes = hugeBlockSizesImpl
_mapTaskId = in.readLong()
}
}
private[spark] object HighlyCompressedMapStatus {
def apply(
loc: BlockManagerId,
uncompressedSizes: Array[Long],
mapTaskId: Long): HighlyCompressedMapStatus = {
// We must keep track of which blocks are empty so that we don't report a zero-sized
// block as being non-empty (or vice-versa) when using the average block size.
var i = 0
var numNonEmptyBlocks: Int = 0
var numSmallBlocks: Int = 0
var totalSmallBlockSize: Long = 0
// From a compression standpoint, it shouldn't matter whether we track empty or non-empty
// blocks. From a performance standpoint, we benefit from tracking empty blocks because
// we expect that there will be far fewer of them, so we will perform fewer bitmap insertions.
val emptyBlocks = new RoaringBitmap()
val totalNumBlocks = uncompressedSizes.length
val threshold = Option(SparkEnv.get)
.map(_.conf.get(config.SHUFFLE_ACCURATE_BLOCK_THRESHOLD))
.getOrElse(config.SHUFFLE_ACCURATE_BLOCK_THRESHOLD.defaultValue.get)
val hugeBlockSizes = mutable.Map.empty[Int, Byte]
while (i < totalNumBlocks) {
val size = uncompressedSizes(i)
if (size > 0) {
numNonEmptyBlocks += 1
// Huge blocks are not included in the calculation for average size, thus size for smaller
// blocks is more accurate.
if (size < threshold) {
totalSmallBlockSize += size
numSmallBlocks += 1
} else {
hugeBlockSizes(i) = MapStatus.compressSize(uncompressedSizes(i))
}
} else {
emptyBlocks.add(i)
}
i += 1
}
val avgSize = if (numSmallBlocks > 0) {
totalSmallBlockSize / numSmallBlocks
} else {
0
}
emptyBlocks.trim()
emptyBlocks.runOptimize()
new HighlyCompressedMapStatus(loc, numNonEmptyBlocks, emptyBlocks, avgSize,
hugeBlockSizes, mapTaskId)
}
}
| goldmedal/spark | core/src/main/scala/org/apache/spark/scheduler/MapStatus.scala | Scala | apache-2.0 | 9,474 |
/*
* Copyright (c) 2020. Peerapat Asoktummarungsri <https://www.linkedin.com/in/peerapat>
*/
package yoda.orm
import java.sql.Connection
import com.typesafe.scalalogging.LazyLogging
import javax.inject.Singleton
import yoda.commons.Closer
@Singleton
class Database extends LazyLogging with Closer {
private val p = Persistant()
def withConnection[R](name: String)
(block: Connection => R): R = closer(p.get) { c =>
val start: Long = System.nanoTime
try {
block(c)
} finally {
val executionTime = (System.nanoTime() - start) / 1000
logger.debug(s"Execute in $executionTime ms.")
c.close()
}
}
def withTransaction[R](name: String)
(block: Connection => R): R = closer(p.get) { c =>
val start: Long = System.nanoTime
try {
c.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE)
c.setAutoCommit(false)
block(c)
} catch {
case t: Throwable => c.rollback()
throw t
} finally {
c.close()
logger.debug(s"Execute in ${(System.nanoTime() - start) / 1000} ms.")
}
}
}
| nuboat/yoda-orm | src/main/scala/yoda/orm/Database.scala | Scala | mit | 1,136 |
import java.io.File
import testgen.TestSuiteBuilder.{toString, _}
import testgen._
object BinarySearchTestGenerator {
def main(args: Array[String]): Unit = {
val file = new File("src/main/resources/binarysearch.json")
def toString(expected: CanonicalDataParser.Expected): String = {
expected match {
case Left(_) => "None"
case Right(-1) => "None"
case Right(n) => s"Some($n)"
}
}
def fromLabeledTestFromInput(argNames: String*): ToTestCaseData =
withLabeledTest { sut =>
labeledTest =>
val args = sutArgsFromInput(labeledTest.result, argNames: _*)
val property = labeledTest.property
val sutCall =
s"""$sut.$property($args)"""
val expected = toString(labeledTest.expected)
TestCaseData(labeledTest.description, sutCall, expected)
}
val code =
TestSuiteBuilder.build(file,
fromLabeledTestFromInput("array", "value"))
println(s"-------------")
println(code)
println(s"-------------")
}
}
| ricemery/xscala | testgen/src/main/scala/BinarySearchTestGenerator.scala | Scala | mit | 1,060 |
package org.scalajs.jasmine
import scala.scalajs.js
trait JasmineEnv extends js.Object {
def Clock: JasmineEnv.Clock
}
object JasmineEnv {
trait Clock extends js.Object {
def tick(time: Double): Unit
def useMock(): Unit
}
}
| swhgoon/scala-js | jasmine-test-framework/src/main/scala/org/scalajs/jasmine/JasmineEnv.scala | Scala | bsd-3-clause | 241 |
package capitulo04
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.FunSuite
@RunWith(classOf[JUnitRunner])
class IterandoPelosMaps extends FunSuite{
val scores = Map("Bob" -> 10, "Cindy" -> 8, "Fred" -> 7, "Alice" -> 10)
test("for é capaz de acessar as chaves e os valores"){
var s = ""
for ( (k, v) <- scores) s += k + "->" + v + " "
assert("Bob->10 Cindy->8 Fred->7 Alice->10 " == s)
}
test("para iterar pelos valores usa-se 'values'"){
var s = ""
for (v <- scores.values) s += v + " "
assert("10 8 7 10 " == s)
}
test("para iterar pelas chaves usa-se 'keySet'"){
var s = ""
for (v <- scores.keySet) s += v + " "
assert("Bob Cindy Fred Alice " == s)
}
test("para inverter um map, troque as chaves e os valores"){
var s = ""
val m = for ( (k,v) <- scores) yield (v, k)
for ( (k, v) <- m) s += k + "->" + v + " "
//note que Alice sobrescreveu Bob devido as chaves agora serem iguais '10'
assert("10->Alice 8->Cindy 7->Fred " == s)
}
} | celioeduardo/scala-impatient | src/test/scala/capitulo04/IterandoPelosMaps.scala | Scala | mit | 1,097 |
package Tutorial
import Chisel._
import Node._
import Literal._
import scala.collection.mutable.HashMap
import scala.collection.mutable.ArrayBuffer
object testUtil {
val t = UFix(1, 8)
}
| seyedmaysamlavasani/GorillaPP | apps/multiProtocolNpu/srcM/include/TestUtil.scala | Scala | bsd-3-clause | 192 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.util.Calendar
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.errors.TreeNodeException
import org.apache.spark.sql.catalyst.plans.PlanTestBase
import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, DateTimeTestUtils, DateTimeUtils, GenericArrayData, PermissiveMode}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
class JsonExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper with PlanTestBase {
val json =
"""
|{"store":{"fruit":[{"weight":8,"type":"apple"},{"weight":9,"type":"pear"}],
|"basket":[[1,2,{"b":"y","a":"x"}],[3,4],[5,6]],"book":[{"author":"Nigel Rees",
|"title":"Sayings of the Century","category":"reference","price":8.95},
|{"author":"Herman Melville","title":"Moby Dick","category":"fiction","price":8.99,
|"isbn":"0-553-21311-3"},{"author":"J. R. R. Tolkien","title":"The Lord of the Rings",
|"category":"fiction","reader":[{"age":25,"name":"bob"},{"age":26,"name":"jack"}],
|"price":22.99,"isbn":"0-395-19395-8"}],"bicycle":{"price":19.95,"color":"red"}},
|"email":"amy@only_for_json_udf_test.net","owner":"amy","zip code":"94025",
|"fb:testid":"1234"}
|""".stripMargin
/* invalid json with leading nulls would trigger java.io.CharConversionException
in Jackson's JsonFactory.createParser(byte[]) due to RFC-4627 encoding detection */
val badJson = "\\u0000\\u0000\\u0000A\\u0001AAA"
test("$.store.bicycle") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.bicycle")),
"""{"price":19.95,"color":"red"}""")
}
test("$['store'].bicycle") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$['store'].bicycle")),
"""{"price":19.95,"color":"red"}""")
}
test("$.store['bicycle']") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store['bicycle']")),
"""{"price":19.95,"color":"red"}""")
}
test("$['store']['bicycle']") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$['store']['bicycle']")),
"""{"price":19.95,"color":"red"}""")
}
test("$['key with spaces']") {
checkEvaluation(GetJsonObject(
Literal("""{ "key with spaces": "it works" }"""), Literal("$['key with spaces']")),
"it works")
}
test("$.store.book") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.book")),
"""[{"author":"Nigel Rees","title":"Sayings of the Century","category":"reference",
|"price":8.95},{"author":"Herman Melville","title":"Moby Dick","category":"fiction",
|"price":8.99,"isbn":"0-553-21311-3"},{"author":"J. R. R. Tolkien","title":
|"The Lord of the Rings","category":"fiction","reader":[{"age":25,"name":"bob"},
|{"age":26,"name":"jack"}],"price":22.99,"isbn":"0-395-19395-8"}]
|""".stripMargin.replace("\\n", ""))
}
test("$.store.book[0]") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.book[0]")),
"""{"author":"Nigel Rees","title":"Sayings of the Century",
|"category":"reference","price":8.95}""".stripMargin.replace("\\n", ""))
}
test("$.store.book[*]") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.book[*]")),
"""[{"author":"Nigel Rees","title":"Sayings of the Century","category":"reference",
|"price":8.95},{"author":"Herman Melville","title":"Moby Dick","category":"fiction",
|"price":8.99,"isbn":"0-553-21311-3"},{"author":"J. R. R. Tolkien","title":
|"The Lord of the Rings","category":"fiction","reader":[{"age":25,"name":"bob"},
|{"age":26,"name":"jack"}],"price":22.99,"isbn":"0-395-19395-8"}]
|""".stripMargin.replace("\\n", ""))
}
test("$") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$")),
json.replace("\\n", ""))
}
test("$.store.book[0].category") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.book[0].category")),
"reference")
}
test("$.store.book[*].category") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.book[*].category")),
"""["reference","fiction","fiction"]""")
}
test("$.store.book[*].isbn") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.book[*].isbn")),
"""["0-553-21311-3","0-395-19395-8"]""")
}
test("$.store.book[*].reader") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.book[*].reader")),
"""[{"age":25,"name":"bob"},{"age":26,"name":"jack"}]""")
}
test("$.store.basket[0][1]") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.basket[0][1]")),
"2")
}
test("$.store.basket[*]") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.basket[*]")),
"""[[1,2,{"b":"y","a":"x"}],[3,4],[5,6]]""")
}
test("$.store.basket[*][0]") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.basket[*][0]")),
"[1,3,5]")
}
test("$.store.basket[0][*]") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.basket[0][*]")),
"""[1,2,{"b":"y","a":"x"}]""")
}
test("$.store.basket[*][*]") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.basket[*][*]")),
"""[1,2,{"b":"y","a":"x"},3,4,5,6]""")
}
test("$.store.basket[0][2].b") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.basket[0][2].b")),
"y")
}
test("$.store.basket[0][*].b") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.basket[0][*].b")),
"""["y"]""")
}
test("$.zip code") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.zip code")),
"94025")
}
test("$.fb:testid") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.fb:testid")),
"1234")
}
test("preserve newlines") {
checkEvaluation(
GetJsonObject(Literal("""{"a":"b\\nc"}"""), Literal("$.a")),
"b\\nc")
}
test("escape") {
checkEvaluation(
GetJsonObject(Literal("""{"a":"b\\"c"}"""), Literal("$.a")),
"b\\"c")
}
test("$.non_exist_key") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.non_exist_key")),
null)
}
test("$..no_recursive") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$..no_recursive")),
null)
}
test("$.store.book[10]") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.book[10]")),
null)
}
test("$.store.book[0].non_exist_key") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.book[0].non_exist_key")),
null)
}
test("$.store.basket[*].non_exist_key") {
checkEvaluation(
GetJsonObject(Literal(json), Literal("$.store.basket[*].non_exist_key")),
null)
}
test("SPARK-16548: character conversion") {
checkEvaluation(
GetJsonObject(Literal(badJson), Literal("$.a")),
null
)
}
test("non foldable literal") {
checkEvaluation(
GetJsonObject(NonFoldableLiteral(json), NonFoldableLiteral("$.fb:testid")),
"1234")
}
val jsonTupleQuery = Literal("f1") ::
Literal("f2") ::
Literal("f3") ::
Literal("f4") ::
Literal("f5") ::
Nil
private def checkJsonTuple(jt: JsonTuple, expected: InternalRow): Unit = {
assert(jt.eval(null).toSeq.head === expected)
}
test("json_tuple - hive key 1") {
checkJsonTuple(
JsonTuple(
Literal("""{"f1": "value1", "f2": "value2", "f3": 3, "f5": 5.23}""") ::
jsonTupleQuery),
InternalRow.fromSeq(Seq("value1", "value2", "3", null, "5.23").map(UTF8String.fromString)))
}
test("json_tuple - hive key 2") {
checkJsonTuple(
JsonTuple(
Literal("""{"f1": "value12", "f3": "value3", "f2": 2, "f4": 4.01}""") ::
jsonTupleQuery),
InternalRow.fromSeq(Seq("value12", "2", "value3", "4.01", null).map(UTF8String.fromString)))
}
test("json_tuple - hive key 2 (mix of foldable fields)") {
checkJsonTuple(
JsonTuple(Literal("""{"f1": "value12", "f3": "value3", "f2": 2, "f4": 4.01}""") ::
Literal("f1") ::
NonFoldableLiteral("f2") ::
NonFoldableLiteral("f3") ::
Literal("f4") ::
Literal("f5") ::
Nil),
InternalRow.fromSeq(Seq("value12", "2", "value3", "4.01", null).map(UTF8String.fromString)))
}
test("json_tuple - hive key 3") {
checkJsonTuple(
JsonTuple(
Literal("""{"f1": "value13", "f4": "value44", "f3": "value33", "f2": 2, "f5": 5.01}""") ::
jsonTupleQuery),
InternalRow.fromSeq(
Seq("value13", "2", "value33", "value44", "5.01").map(UTF8String.fromString)))
}
test("json_tuple - hive key 3 (nonfoldable json)") {
checkJsonTuple(
JsonTuple(
NonFoldableLiteral(
"""{"f1": "value13", "f4": "value44",
| "f3": "value33", "f2": 2, "f5": 5.01}""".stripMargin)
:: jsonTupleQuery),
InternalRow.fromSeq(
Seq("value13", "2", "value33", "value44", "5.01").map(UTF8String.fromString)))
}
test("json_tuple - hive key 3 (nonfoldable fields)") {
checkJsonTuple(
JsonTuple(Literal(
"""{"f1": "value13", "f4": "value44",
| "f3": "value33", "f2": 2, "f5": 5.01}""".stripMargin) ::
NonFoldableLiteral("f1") ::
NonFoldableLiteral("f2") ::
NonFoldableLiteral("f3") ::
NonFoldableLiteral("f4") ::
NonFoldableLiteral("f5") ::
Nil),
InternalRow.fromSeq(
Seq("value13", "2", "value33", "value44", "5.01").map(UTF8String.fromString)))
}
test("json_tuple - hive key 4 - null json") {
checkJsonTuple(
JsonTuple(Literal(null) :: jsonTupleQuery),
InternalRow(null, null, null, null, null))
}
test("json_tuple - hive key 5 - null and empty fields") {
checkJsonTuple(
JsonTuple(Literal("""{"f1": "", "f5": null}""") :: jsonTupleQuery),
InternalRow(UTF8String.fromString(""), null, null, null, null))
}
test("json_tuple - hive key 6 - invalid json (array)") {
checkJsonTuple(
JsonTuple(Literal("[invalid JSON string]") :: jsonTupleQuery),
InternalRow(null, null, null, null, null))
}
test("json_tuple - invalid json (object start only)") {
checkJsonTuple(
JsonTuple(Literal("{") :: jsonTupleQuery),
InternalRow(null, null, null, null, null))
}
test("json_tuple - invalid json (no object end)") {
checkJsonTuple(
JsonTuple(Literal("""{"foo": "bar"""") :: jsonTupleQuery),
InternalRow(null, null, null, null, null))
}
test("json_tuple - invalid json (invalid json)") {
checkJsonTuple(
JsonTuple(Literal("\\\\") :: jsonTupleQuery),
InternalRow(null, null, null, null, null))
}
test("SPARK-16548: json_tuple - invalid json with leading nulls") {
checkJsonTuple(
JsonTuple(Literal(badJson) :: jsonTupleQuery),
InternalRow(null, null, null, null, null))
}
test("json_tuple - preserve newlines") {
checkJsonTuple(
JsonTuple(Literal("{\\"a\\":\\"b\\nc\\"}") :: Literal("a") :: Nil),
InternalRow(UTF8String.fromString("b\\nc")))
}
test("SPARK-21677: json_tuple throws NullPointException when column is null as string type") {
checkJsonTuple(
JsonTuple(Literal("""{"f1": 1, "f2": 2}""") ::
NonFoldableLiteral("f1") ::
NonFoldableLiteral("cast(NULL AS STRING)") ::
NonFoldableLiteral("f2") ::
Nil),
InternalRow(UTF8String.fromString("1"), null, UTF8String.fromString("2")))
}
test("SPARK-21804: json_tuple returns null values within repeated columns except the first one") {
checkJsonTuple(
JsonTuple(Literal("""{"f1": 1, "f2": 2}""") ::
NonFoldableLiteral("f1") ::
NonFoldableLiteral("cast(NULL AS STRING)") ::
NonFoldableLiteral("f1") ::
Nil),
InternalRow(UTF8String.fromString("1"), null, UTF8String.fromString("1")))
}
val gmtId = Option(DateTimeUtils.TimeZoneGMT.getID)
test("from_json") {
val jsonData = """{"a": 1}"""
val schema = StructType(StructField("a", IntegerType) :: Nil)
checkEvaluation(
JsonToStructs(schema, Map.empty, Literal(jsonData), gmtId, true),
InternalRow(1)
)
}
test("from_json - invalid data") {
val jsonData = """{"a" 1}"""
val schema = StructType(StructField("a", IntegerType) :: Nil)
checkEvaluation(
JsonToStructs(schema, Map.empty, Literal(jsonData), gmtId, true),
null
)
// Other modes should still return `null`.
checkEvaluation(
JsonToStructs(schema, Map("mode" -> PermissiveMode.name), Literal(jsonData), gmtId, true),
null
)
}
test("from_json - input=array, schema=array, output=array") {
val input = """[{"a": 1}, {"a": 2}]"""
val schema = ArrayType(StructType(StructField("a", IntegerType) :: Nil))
val output = InternalRow(1) :: InternalRow(2) :: Nil
checkEvaluation(JsonToStructs(schema, Map.empty, Literal(input), gmtId, true), output)
}
test("from_json - input=object, schema=array, output=array of single row") {
val input = """{"a": 1}"""
val schema = ArrayType(StructType(StructField("a", IntegerType) :: Nil))
val output = InternalRow(1) :: Nil
checkEvaluation(JsonToStructs(schema, Map.empty, Literal(input), gmtId, true), output)
}
test("from_json - input=empty array, schema=array, output=empty array") {
val input = "[ ]"
val schema = ArrayType(StructType(StructField("a", IntegerType) :: Nil))
val output = Nil
checkEvaluation(JsonToStructs(schema, Map.empty, Literal(input), gmtId, true), output)
}
test("from_json - input=empty object, schema=array, output=array of single row with null") {
val input = "{ }"
val schema = ArrayType(StructType(StructField("a", IntegerType) :: Nil))
val output = InternalRow(null) :: Nil
checkEvaluation(JsonToStructs(schema, Map.empty, Literal(input), gmtId, true), output)
}
test("from_json - input=array of single object, schema=struct, output=single row") {
val input = """[{"a": 1}]"""
val schema = StructType(StructField("a", IntegerType) :: Nil)
val output = InternalRow(1)
checkEvaluation(JsonToStructs(schema, Map.empty, Literal(input), gmtId, true), output)
}
test("from_json - input=array, schema=struct, output=null") {
val input = """[{"a": 1}, {"a": 2}]"""
val schema = StructType(StructField("a", IntegerType) :: Nil)
val output = null
checkEvaluation(JsonToStructs(schema, Map.empty, Literal(input), gmtId, true), output)
}
test("from_json - input=empty array, schema=struct, output=null") {
val input = """[]"""
val schema = StructType(StructField("a", IntegerType) :: Nil)
val output = null
checkEvaluation(JsonToStructs(schema, Map.empty, Literal(input), gmtId, true), output)
}
test("from_json - input=empty object, schema=struct, output=single row with null") {
val input = """{ }"""
val schema = StructType(StructField("a", IntegerType) :: Nil)
val output = InternalRow(null)
checkEvaluation(JsonToStructs(schema, Map.empty, Literal(input), gmtId, true), output)
}
test("from_json null input column") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
checkEvaluation(
JsonToStructs(schema, Map.empty, Literal.create(null, StringType), gmtId, true),
null
)
}
test("SPARK-20549: from_json bad UTF-8") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
checkEvaluation(
JsonToStructs(schema, Map.empty, Literal(badJson), gmtId, true),
null)
}
test("from_json with timestamp") {
val schema = StructType(StructField("t", TimestampType) :: Nil)
val jsonData1 = """{"t": "2016-01-01T00:00:00.123Z"}"""
var c = Calendar.getInstance(DateTimeUtils.TimeZoneGMT)
c.set(2016, 0, 1, 0, 0, 0)
c.set(Calendar.MILLISECOND, 123)
checkEvaluation(
JsonToStructs(schema, Map.empty, Literal(jsonData1), gmtId, true),
InternalRow(c.getTimeInMillis * 1000L)
)
// The result doesn't change because the json string includes timezone string ("Z" here),
// which means the string represents the timestamp string in the timezone regardless of
// the timeZoneId parameter.
checkEvaluation(
JsonToStructs(schema, Map.empty, Literal(jsonData1), Option("PST"), true),
InternalRow(c.getTimeInMillis * 1000L)
)
val jsonData2 = """{"t": "2016-01-01T00:00:00"}"""
for (tz <- DateTimeTestUtils.ALL_TIMEZONES) {
c = Calendar.getInstance(tz)
c.set(2016, 0, 1, 0, 0, 0)
c.set(Calendar.MILLISECOND, 0)
checkEvaluation(
JsonToStructs(
schema,
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ss"),
Literal(jsonData2),
Option(tz.getID),
true),
InternalRow(c.getTimeInMillis * 1000L)
)
checkEvaluation(
JsonToStructs(
schema,
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ss",
DateTimeUtils.TIMEZONE_OPTION -> tz.getID),
Literal(jsonData2),
gmtId,
true),
InternalRow(c.getTimeInMillis * 1000L)
)
}
}
test("SPARK-19543: from_json empty input column") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
checkEvaluation(
JsonToStructs(schema, Map.empty, Literal.create(" ", StringType), gmtId, true),
null
)
}
test("to_json - struct") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
val struct = Literal.create(create_row(1), schema)
checkEvaluation(
StructsToJson(Map.empty, struct, gmtId),
"""{"a":1}"""
)
}
test("to_json - array") {
val inputSchema = ArrayType(StructType(StructField("a", IntegerType) :: Nil))
val input = new GenericArrayData(InternalRow(1) :: InternalRow(2) :: Nil)
val output = """[{"a":1},{"a":2}]"""
checkEvaluation(
StructsToJson(Map.empty, Literal.create(input, inputSchema), gmtId),
output)
}
test("to_json - array with single empty row") {
val inputSchema = ArrayType(StructType(StructField("a", IntegerType) :: Nil))
val input = new GenericArrayData(InternalRow(null) :: Nil)
val output = """[{}]"""
checkEvaluation(
StructsToJson(Map.empty, Literal.create(input, inputSchema), gmtId),
output)
}
test("to_json - empty array") {
val inputSchema = ArrayType(StructType(StructField("a", IntegerType) :: Nil))
val input = new GenericArrayData(Nil)
val output = """[]"""
checkEvaluation(
StructsToJson(Map.empty, Literal.create(input, inputSchema), gmtId),
output)
}
test("to_json null input column") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
val struct = Literal.create(null, schema)
checkEvaluation(
StructsToJson(Map.empty, struct, gmtId),
null
)
}
test("to_json with timestamp") {
val schema = StructType(StructField("t", TimestampType) :: Nil)
val c = Calendar.getInstance(DateTimeUtils.TimeZoneGMT)
c.set(2016, 0, 1, 0, 0, 0)
c.set(Calendar.MILLISECOND, 0)
val struct = Literal.create(create_row(c.getTimeInMillis * 1000L), schema)
checkEvaluation(
StructsToJson(Map.empty, struct, gmtId),
"""{"t":"2016-01-01T00:00:00.000Z"}"""
)
checkEvaluation(
StructsToJson(Map.empty, struct, Option("PST")),
"""{"t":"2015-12-31T16:00:00.000-08:00"}"""
)
checkEvaluation(
StructsToJson(
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ss",
DateTimeUtils.TIMEZONE_OPTION -> gmtId.get),
struct,
gmtId),
"""{"t":"2016-01-01T00:00:00"}"""
)
checkEvaluation(
StructsToJson(
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ss",
DateTimeUtils.TIMEZONE_OPTION -> "PST"),
struct,
gmtId),
"""{"t":"2015-12-31T16:00:00"}"""
)
}
test("SPARK-21513: to_json support map[string, struct] to json") {
val schema = MapType(StringType, StructType(StructField("a", IntegerType) :: Nil))
val input = Literal.create(ArrayBasedMapData(Map("test" -> InternalRow(1))), schema)
checkEvaluation(
StructsToJson(Map.empty, input),
"""{"test":{"a":1}}"""
)
}
test("SPARK-21513: to_json support map[struct, struct] to json") {
val schema = MapType(StructType(StructField("a", IntegerType) :: Nil),
StructType(StructField("b", IntegerType) :: Nil))
val input = Literal.create(ArrayBasedMapData(Map(InternalRow(1) -> InternalRow(2))), schema)
checkEvaluation(
StructsToJson(Map.empty, input),
"""{"[1]":{"b":2}}"""
)
}
test("SPARK-21513: to_json support map[string, integer] to json") {
val schema = MapType(StringType, IntegerType)
val input = Literal.create(ArrayBasedMapData(Map("a" -> 1)), schema)
checkEvaluation(
StructsToJson(Map.empty, input),
"""{"a":1}"""
)
}
test("to_json - array with maps") {
val inputSchema = ArrayType(MapType(StringType, IntegerType))
val input = new GenericArrayData(ArrayBasedMapData(
Map("a" -> 1)) :: ArrayBasedMapData(Map("b" -> 2)) :: Nil)
val output = """[{"a":1},{"b":2}]"""
checkEvaluation(
StructsToJson(Map.empty, Literal.create(input, inputSchema), gmtId),
output)
}
test("to_json - array with single map") {
val inputSchema = ArrayType(MapType(StringType, IntegerType))
val input = new GenericArrayData(ArrayBasedMapData(Map("a" -> 1)) :: Nil)
val output = """[{"a":1}]"""
checkEvaluation(
StructsToJson(Map.empty, Literal.create(input, inputSchema), gmtId),
output)
}
test("to_json: verify MapType's value type instead of key type") {
// Keys in map are treated as strings when converting to JSON. The type doesn't matter at all.
val mapType1 = MapType(CalendarIntervalType, IntegerType)
val schema1 = StructType(StructField("a", mapType1) :: Nil)
val struct1 = Literal.create(null, schema1)
checkEvaluation(
StructsToJson(Map.empty, struct1, gmtId),
null
)
// The value type must be valid for converting to JSON.
val mapType2 = MapType(IntegerType, CalendarIntervalType)
val schema2 = StructType(StructField("a", mapType2) :: Nil)
val struct2 = Literal.create(null, schema2)
intercept[TreeNodeException[_]] {
checkEvaluation(
StructsToJson(Map.empty, struct2, gmtId),
null
)
}
}
test("from_json missing fields") {
for (forceJsonNullableSchema <- Seq(false, true)) {
val input =
"""{
| "a": 1,
| "c": "foo"
|}
|""".stripMargin
val jsonSchema = new StructType()
.add("a", LongType, nullable = false)
.add("b", StringType, nullable = false)
.add("c", StringType, nullable = false)
val output = InternalRow(1L, null, UTF8String.fromString("foo"))
val expr = JsonToStructs(
jsonSchema, Map.empty, Literal.create(input, StringType), gmtId, forceJsonNullableSchema)
checkEvaluation(expr, output)
val schema = expr.dataType
val schemaToCompare = if (forceJsonNullableSchema) jsonSchema.asNullable else jsonSchema
assert(schemaToCompare == schema)
}
}
}
| bravo-zhang/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/JsonExpressionsSuite.scala | Scala | apache-2.0 | 24,342 |
/*
* Copyright © 2015-2019 the contributors (see Contributors.md).
*
* This file is part of Knora.
*
* Knora is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Knora is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with Knora. If not, see <http://www.gnu.org/licenses/>.
*/
package org.knora.webapi.e2e.admin
import java.net.URLEncoder
import akka.actor.ActorSystem
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers._
import akka.http.scaladsl.testkit.RouteTestTimeout
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.util.Timeout
import com.typesafe.config.{Config, ConfigFactory}
import org.eclipse.rdf4j.model.Model
import org.knora.webapi.messages.admin.responder.projectsmessages._
import org.knora.webapi.messages.admin.responder.usersmessages.UserADM
import org.knora.webapi.messages.admin.responder.usersmessages.UsersADMJsonProtocol._
import org.knora.webapi.messages.store.triplestoremessages.{StringLiteralV2, TriplestoreJsonProtocol}
import org.knora.webapi.messages.v1.responder.sessionmessages.SessionJsonProtocol
import org.knora.webapi.testing.tags.E2ETest
import org.knora.webapi.util.{AkkaHttpUtils, MutableTestIri}
import org.knora.webapi.{E2ESpec, IRI, SharedTestDataADM}
import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
object ProjectsADME2ESpec {
val config: Config = ConfigFactory.parseString(
"""
akka.loglevel = "DEBUG"
akka.stdout-loglevel = "DEBUG"
""".stripMargin)
}
/**
* End-to-End (E2E) test specification for testing groups endpoint.
*/
@E2ETest
class ProjectsADME2ESpec extends E2ESpec(ProjectsADME2ESpec.config) with SessionJsonProtocol with ProjectsADMJsonProtocol with TriplestoreJsonProtocol {
private implicit def default(implicit system: ActorSystem) = RouteTestTimeout(30.seconds)
private val rootEmail = SharedTestDataADM.rootUser.email
private val testPass = SharedTestDataADM.testPass
private val projectIri = SharedTestDataADM.imagesProject.id
private val projectIriEnc = URLEncoder.encode(projectIri, "utf-8")
private val projectShortname = SharedTestDataADM.imagesProject.shortname
private val projectShortcode = SharedTestDataADM.imagesProject.shortcode
"The Projects Route ('admin/projects')" when {
"used to query for project information" should {
"return all projects" in {
val request = Get(baseApiUrl + s"/admin/projects") ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
assert(response.status === StatusCodes.OK)
// log.debug("projects as objects: {}", AkkaHttpUtils.httpResponseToJson(response).fields("projects").convertTo[Seq[ProjectInfoV1]])
val projects: Seq[ProjectADM] = AkkaHttpUtils.httpResponseToJson(response).fields("projects").convertTo[Seq[ProjectADM]]
projects.size should be (8)
}
"return the information for a single project identified by iri" in {
val request = Get(baseApiUrl + s"/admin/projects/iri/$projectIriEnc") ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
assert(response.status === StatusCodes.OK)
}
"return the information for a single project identified by shortname" in {
val request = Get(baseApiUrl + s"/admin/projects/shortname/$projectShortname") ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
assert(response.status === StatusCodes.OK)
}
"return the information for a single project identified by shortcode" in {
val request = Get(baseApiUrl + s"/admin/projects/shortcode/$projectShortcode") ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
assert(response.status === StatusCodes.OK)
}
"return the project's restricted view settings" in {
val request = Get(baseApiUrl + s"/admin/projects/iri/$projectIriEnc/RestrictedViewSettings") ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
logger.debug(s"response: {}", response)
assert(response.status === StatusCodes.OK)
val settings: ProjectRestrictedViewSettingsADM = AkkaHttpUtils.httpResponseToJson(response).fields("settings").convertTo[ProjectRestrictedViewSettingsADM]
settings.size should be (Some("!512,512"))
settings.watermark should be (Some("path_to_image"))
}
}
"used to modify project information" should {
val newProjectIri = new MutableTestIri
"CREATE a new project and return the project info if the supplied shortname is unique" in {
val request = Post(baseApiUrl + s"/admin/projects", HttpEntity(ContentTypes.`application/json`, SharedTestDataADM.createProjectRequest)) ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
logger.debug(s"response: {}", response)
response.status should be (StatusCodes.OK)
val result = AkkaHttpUtils.httpResponseToJson(response).fields("project").convertTo[ProjectADM]
result.shortname should be ("newproject")
result.shortcode should be ("1111")
result.longname should be (Some("project longname"))
result.description should be (Seq(StringLiteralV2(value = "project description", language = Some("en"))))
result.keywords should be (Seq("keywords"))
result.logo should be (Some("/fu/bar/baz.jpg"))
result.status should be (true)
result.selfjoin should be (false)
newProjectIri.set(result.id)
// log.debug("newProjectIri: {}", newProjectIri.get)
}
"return a 'BadRequest' if the supplied project shortname during creation is not unique" in {
val params =
s"""
|{
| "shortname": "newproject",
| "shortcode"; "1112",
| "longname": "project longname",
| "description": [{"value": "project description", "language": "en"}],
| "keywords": ["keywords"],
| "logo": "/fu/bar/baz.jpg",
| "status": true,
| "selfjoin": false
|}
""".stripMargin
val request = Post(baseApiUrl + s"/admin/projects", HttpEntity(ContentTypes.`application/json`, params)) ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
response.status should be (StatusCodes.BadRequest)
}
"return 'BadRequest' if 'shortname' during creation is missing" in {
val params =
s"""
|{
| "shortcode"; "1112",
| "longname": "project longname",
| "description": [{"value": "project description", "language": "en"}],
| "keywords": ["keywords"],
| "logo": "/fu/bar/baz.jpg",
| "status": true,
| "selfjoin": false
|}
""".stripMargin
val request = Post(baseApiUrl + s"/admin/projects", HttpEntity(ContentTypes.`application/json`, params)) ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
response.status should be (StatusCodes.BadRequest)
}
"return 'BadRequest' if 'shortcode' during creation is missing" in {
val params =
s"""
|{
| "shortname"; "newproject2",
| "longname": "project longname",
| "description": [{"value": "project description", "language": "en"}],
| "keywords": ["keywords"],
| "logo": "/fu/bar/baz.jpg",
| "status": true,
| "selfjoin": false
|}
""".stripMargin
val request = Post(baseApiUrl + s"/admin/projects", HttpEntity(ContentTypes.`application/json`, params)) ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
response.status should be (StatusCodes.BadRequest)
}
"return 'BadRequest' if 'project description' during creation is missing" in {
val params =
s"""
|{
| "shortcode"; "1114",
| "shortname"; "newproject5",
| "longname": "project longname",
| "description": [],
| "keywords": ["keywords"],
| "logo": "/fu/bar/baz.jpg",
| "status": true,
| "selfjoin": false
|}
""".stripMargin
val request = Post(baseApiUrl + s"/admin/projects", HttpEntity(ContentTypes.`application/json`, params)) ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
response.status should be (StatusCodes.BadRequest)
}
"UPDATE a project" in {
val projectIriEncoded = URLEncoder.encode(newProjectIri.get, "utf-8")
val request = Put(baseApiUrl + s"/admin/projects/iri/" + projectIriEncoded, HttpEntity(ContentTypes.`application/json`, SharedTestDataADM.updateProjectRequest)) ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
response.status should be (StatusCodes.OK)
val result: ProjectADM = AkkaHttpUtils.httpResponseToJson(response).fields("project").convertTo[ProjectADM]
result.shortname should be ("newproject")
result.shortcode should be ("1111")
result.longname should be (Some("updated project longname"))
result.description should be (Seq(StringLiteralV2(value = "updated project description", language = Some("en"))))
result.keywords.sorted should be (Seq("updated", "keywords").sorted)
result.logo should be (Some("/fu/bar/baz-updated.jpg"))
result.status should be (true)
result.selfjoin should be (true)
}
"DELETE a project" in {
val projectIriEncoded = URLEncoder.encode(newProjectIri.get, "utf-8")
val request = Delete(baseApiUrl + s"/admin/projects/iri/" + projectIriEncoded) ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
response.status should be (StatusCodes.OK)
val result: ProjectADM = AkkaHttpUtils.httpResponseToJson(response).fields("project").convertTo[ProjectADM]
result.status should be (false)
}
}
"used to query members [FUNCTIONALITY]" should {
"return all members of a project identified by iri" in {
val request = Get(baseApiUrl + s"/admin/projects/iri/$projectIriEnc/members") ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
assert(response.status === StatusCodes.OK)
val members: Seq[UserADM] = AkkaHttpUtils.httpResponseToJson(response).fields("members").convertTo[Seq[UserADM]]
members.size should be (4)
}
"return all members of a project identified by shortname" in {
val request = Get(baseApiUrl + s"/admin/projects/shortname/$projectShortname/members") ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
assert(response.status === StatusCodes.OK)
val members: Seq[UserADM] = AkkaHttpUtils.httpResponseToJson(response).fields("members").convertTo[Seq[UserADM]]
members.size should be (4)
}
"return all members of a project identified by shortcode" in {
val request = Get(baseApiUrl + s"/admin/projects/shortcode/$projectShortcode/members") ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
assert(response.status === StatusCodes.OK)
val members: Seq[UserADM] = AkkaHttpUtils.httpResponseToJson(response).fields("members").convertTo[Seq[UserADM]]
members.size should be (4)
}
"return all admin members of a project identified by iri" in {
val request = Get(baseApiUrl + s"/admin/projects/iri/$projectIriEnc/admin-members") ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
assert(response.status === StatusCodes.OK)
val members: Seq[UserADM] = AkkaHttpUtils.httpResponseToJson(response).fields("members").convertTo[Seq[UserADM]]
members.size should be (2)
}
"return all admin members of a project identified by shortname" in {
val request = Get(baseApiUrl + s"/admin/projects/shortname/$projectShortname/admin-members") ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
assert(response.status === StatusCodes.OK)
val members: Seq[UserADM] = AkkaHttpUtils.httpResponseToJson(response).fields("members").convertTo[Seq[UserADM]]
members.size should be (2)
}
"return all admin members of a project identified by shortcode" in {
val request = Get(baseApiUrl + s"/admin/projects/shortcode/$projectShortcode/admin-members") ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
assert(response.status === StatusCodes.OK)
val members: Seq[UserADM] = AkkaHttpUtils.httpResponseToJson(response).fields("members").convertTo[Seq[UserADM]]
members.size should be (2)
}
}
"used to query members [PERMISSIONS]" should {
"return members of a project to a SystemAdmin" in {
val request = Get(baseApiUrl + s"/admin/projects/iri/$projectIriEnc/members") ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
assert(response.status === StatusCodes.OK)
}
"return members of a project to a ProjectAdmin" in {
val request = Get(baseApiUrl + s"/admin/projects/iri/$projectIriEnc/members") ~> addCredentials(BasicHttpCredentials(SharedTestDataADM.imagesUser01.email, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
assert(response.status === StatusCodes.OK)
}
"return `Forbidden` for members of a project to a normal user" in {
val request = Get(baseApiUrl + s"/admin/projects/iri/$projectIriEnc/members") ~> addCredentials(BasicHttpCredentials(SharedTestDataADM.imagesUser02.email, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
assert(response.status === StatusCodes.Forbidden)
}
"return admin-members of a project to a SystemAdmin" in {
val request = Get(baseApiUrl + s"/admin/projects/iri/$projectIriEnc/members") ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
assert(response.status === StatusCodes.OK)
}
"return admin-members of a project to a ProjectAdmin" in {
val request = Get(baseApiUrl + s"/admin/projects/iri/$projectIriEnc/members") ~> addCredentials(BasicHttpCredentials(SharedTestDataADM.imagesUser01.email, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
assert(response.status === StatusCodes.OK)
}
"return `Forbidden` for admin-members of a project to a normal user" in {
val request = Get(baseApiUrl + s"/admin/projects/iri/$projectIriEnc/members") ~> addCredentials(BasicHttpCredentials(SharedTestDataADM.imagesUser02.email, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
assert(response.status === StatusCodes.Forbidden)
}
}
"used to query keywords" should {
"return all unique keywords for all projects" in {
val request = Get(baseApiUrl + s"/admin/projects/Keywords") ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
assert(response.status === StatusCodes.OK)
val keywords: Seq[String] = AkkaHttpUtils.httpResponseToJson(response).fields("keywords").convertTo[Seq[String]]
keywords.size should be (18)
}
"return all keywords for a single project" in {
val incunabulaIriEnc = URLEncoder.encode(SharedTestDataADM.incunabulaProject.id, "utf-8")
val request = Get(baseApiUrl + s"/admin/projects/iri/$incunabulaIriEnc/Keywords") ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
assert(response.status === StatusCodes.OK)
val keywords: Seq[String] = AkkaHttpUtils.httpResponseToJson(response).fields("keywords").convertTo[Seq[String]]
keywords should be (SharedTestDataADM.incunabulaProject.keywords)
}
"return empty list for a project without keywords" in {
val anythingIriEnc = URLEncoder.encode(SharedTestDataADM.anythingProject.id, "utf-8")
val request = Get(baseApiUrl + s"/admin/projects/iri/$anythingIriEnc/Keywords") ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
assert(response.status === StatusCodes.OK)
val keywords: Seq[String] = AkkaHttpUtils.httpResponseToJson(response).fields("keywords").convertTo[Seq[String]]
keywords should be (Seq.empty[String])
}
"return 'NotFound' when the project IRI is unknown" in {
val notexistingIriEnc = URLEncoder.encode("http://rdfh.ch/projects/notexisting", "utf-8")
val request = Get(baseApiUrl + s"/admin/projects/iri/$notexistingIriEnc/Keywords") ~> addCredentials(BasicHttpCredentials(rootEmail, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
// log.debug(s"response: {}", response)
assert(response.status === StatusCodes.NotFound)
}
}
"used to dump project data" should {
"return a TriG file containing all data from a project" in {
val anythingProjectIriEnc = URLEncoder.encode(SharedTestDataADM.anythingProject.id, "utf-8")
val request = Get(baseApiUrl + s"/admin/projects/iri/$anythingProjectIriEnc/AllData") ~> addCredentials(BasicHttpCredentials(SharedTestDataADM.anythingAdminUser.email, testPass))
val response: HttpResponse = singleAwaitingRequest(request)
assert(response.status === StatusCodes.OK)
val trigStrFuture: Future[String] = Unmarshal(response.entity).to[String]
val trigStr: String = Await.result(trigStrFuture, Timeout(5.seconds).duration)
val parsedTrig: Model = parseTrig(trigStr)
val contextIris: Set[IRI] = parsedTrig.contexts.asScala.map(_.stringValue).toSet
assert(contextIris == Set(
"http://www.knora.org/ontology/0001/something",
"http://www.knora.org/ontology/0001/anything",
"http://www.knora.org/data/permissions",
"http://www.knora.org/data/admin"
))
}
}
}
}
| musicEnfanthen/Knora | webapi/src/test/scala/org/knora/webapi/e2e/admin/ProjectsADME2ESpec.scala | Scala | agpl-3.0 | 23,292 |
package org.bitcoins.gui.dialog
import grizzled.slf4j.Logging
import org.bitcoins.cli.CliCommand.{LockUnspent, Rescan}
import org.bitcoins.cli.ConsoleCli
import org.bitcoins.gui.{GlobalData, TaskRunner}
import scalafx.Includes._
import scalafx.geometry.Pos
import scalafx.scene.control.{Button, ButtonType, Dialog, ProgressIndicator}
import scalafx.scene.layout.VBox
import scalafx.stage.{Modality, Window}
import java.awt.Desktop
import java.io.File
import java.nio.file.{Files, Paths}
import scala.util.{Failure, Properties, Success}
object DebugDialog extends Logging {
private val LOGFILE_NAME = "bitcoin-s.log"
def show(parentWindow: Window): Unit = {
val dialog = new Dialog[Unit]() {
initOwner(parentWindow)
initModality(Modality.None)
title = "Debug Operations"
}
dialog.dialogPane().buttonTypes = Seq(ButtonType.Close)
dialog.dialogPane().stylesheets = GlobalData.currentStyleSheets
val openLogButton = buildLogButton
val unreserveAllUTXOsButton = new Button("Unreserve All UTXOs")
val rescanButton = new Button("Rescan Wallet")
val content = new VBox {
minWidth = 300
minHeight = 300
spacing = 10
children = Seq(openLogButton, unreserveAllUTXOsButton, rescanButton)
}
val glassPane = new VBox {
children = new ProgressIndicator {
progress = ProgressIndicator.IndeterminateProgress
visible = true
}
alignment = Pos.Center
visible = false
}
lazy val taskRunner = new TaskRunner(content, glassPane)
unreserveAllUTXOsButton.onAction = _ => {
taskRunner.run(
"Unreserve All UTXOs", {
ConsoleCli.exec(LockUnspent(true, Vector.empty),
GlobalData.consoleCliConfig) match {
case Success(_) => ()
case Failure(err) =>
throw err
}
}
)
}
setRescanAction(taskRunner, rescanButton)
dialog.dialogPane().content = content
val _ = dialog.show()
}
private def buildLogButton: Button = new Button("Open Bitcoin-S Log") {
onAction = _ => {
// Get root active network directory
val location = System.getProperty("bitcoins.log.location")
val path = Paths.get(location, LOGFILE_NAME)
if (Files.exists(path)) {
// Ubuntu seems to support Desktop and Desktop.open(), but hangs on opening file
// This is an issue in JavaFX and the common workaround is to call on another thread
// I was not having any luck with Platform.runLater wrapping call to Desktop.open getting around the bug
if (Properties.isLinux) {
// Work around native file-open alternative that works on Ubuntu
val _ = Runtime
.getRuntime()
.exec(s"/usr/bin/xdg-open ${path.toString}")
} else if (Desktop.isDesktopSupported) {
val d = Desktop.getDesktop
if (d.isSupported(Desktop.Action.OPEN)) {
// Open file in default log reader per OS
d.open(new File(path.toString))
} else {
logger.error("Desktop.Action.OPEN on log file not supported")
}
} else {
logger.error("This platform is non-Linux or does not support Desktop")
}
} else {
logger.error(
s"Expected log file location does not exist ${path.toString}")
}
}
}
private def setRescanAction(taskRunner: TaskRunner, button: Button): Unit = {
val rescanCmd = {
Rescan(addressBatchSize = Some(200),
startBlock = None,
endBlock = None,
force = true,
ignoreCreationTime = false)
}
button.onAction = _ => {
taskRunner.run(
"Rescan Wallet", {
ConsoleCli.exec(rescanCmd, GlobalData.consoleCliConfig) match {
case Success(_) => ()
case Failure(err) =>
throw err
}
}
)
}
}
}
| bitcoin-s/bitcoin-s | app/gui/src/main/scala/org/bitcoins/gui/dialog/DebugDialog.scala | Scala | mit | 3,969 |
package com.gu.streams
import scala.concurrent.{ExecutionContext, Future}
import scalaz.concurrent.Task
import scalaz.Scalaz._
import scala.util.{Failure, Success}
object Tasks {
def fromScalaFuture[A](fa: => Future[A])(implicit context: ExecutionContext): Task[A] = Task.async {
register => fa onComplete {
case Success(a) => register(a.right)
case Failure(error) => register(error.left)
}
}
}
| guardian/content-streams | src/main/scala/com/gu/streams/Tasks.scala | Scala | apache-2.0 | 421 |
/*
* Copyright 2020 Lenses.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.landoop.streamreactor.connect.hive.source.offset
import com.landoop.streamreactor.connect.hive.source.{SourceOffset, SourcePartition}
import com.landoop.streamreactor.connect.hive.{DatabaseName, TableName, Topic}
import org.apache.hadoop.fs.Path
import org.mockito.MockitoSugar
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
class HiveSourceInitOffsetStorageReaderTest extends AnyFlatSpec with MockitoSugar with Matchers {
private val sourcePartitionNoOffset = SourcePartition(DatabaseName("databaseName2"), TableName("tableName2"), Topic("topic2"), new Path("path2"))
private val sourcePartition = SourcePartition(DatabaseName("databaseName1"), TableName("tableName1"), Topic("topic1"), new Path("path1"))
private val sourceOffset = SourceOffset(100)
private val offsetStorageReader = new MockOffsetStorageReader(Map(sourcePartition -> sourceOffset))
private val target = new HiveSourceInitOffsetStorageReader(offsetStorageReader)
"offset" should "find offset recorded in OffsetStorageReader" in {
target.offset(sourcePartition) should be (Some(sourceOffset))
}
"offset" should "return None when none found" in {
target.offset(sourcePartitionNoOffset) should be (None)
}
}
| datamountaineer/stream-reactor | kafka-connect-hive/src/test/scala/com/landoop/streamreactor/connect/hive/source/offset/HiveSourceInitOffsetStorageReaderTest.scala | Scala | apache-2.0 | 1,860 |
/*
* #%L
* Active OCR Web Application
* %%
* Copyright (C) 2011 - 2013 Maryland Institute for Technology in the Humanities
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
// From "Example project: Lift OpenID integration with openid-selector"
// Written by Tim Williams
// https://www.assembla.com/spaces/liftweb/wiki/OpenID
package edu.umd.mith.activeocr.web {
package model {
import net.liftweb.mapper._
import net.liftweb.util._
import net.liftweb.common._
import net.liftmodules.openid._
import org.openid4java.discovery.DiscoveryInformation
import org.openid4java.message.AuthRequest
object MyVendor extends SimpleOpenIDVendor {
def ext(di: DiscoveryInformation, authReq: AuthRequest): Unit = {
import WellKnownAttributes._
WellKnownEndpoints.findEndpoint(di) map { ep =>
ep.makeAttributeExtension(List(Email, FullName, FirstName, LastName)) foreach { ex =>
authReq.addExtension(ex)
}
}
}
override def createAConsumer = new OpenIDConsumer[UserType] {
beforeAuth = Full(ext _)
}
}
}
}
| umd-mith/activeocr | web/src/main/scala/edu/umd/mith/activeocr/web/model/MyVendor.scala | Scala | apache-2.0 | 1,572 |
package chess
import Pos.posAt
import format.UciMove
case class CheckCount(white: Int = 0, black: Int = 0) {
def add(color: Color) = copy(
white = white + color.fold(1, 0),
black = black + color.fold(0, 1))
def nonEmpty = white > 0 || black > 0
def apply(color: Color) = color.fold(white, black)
}
case class History(
lastMove: Option[UciMove] = None,
positionHashes: PositionHash = Array(),
castles: Castles = Castles.all,
checkCount: CheckCount = CheckCount(0, 0)) {
/**
* Halfmove clock: This is the number of halfmoves
* since the last pawn advance or capture.
* This is used to determine if a draw
* can be claimed under the fifty-move rule.
*/
def halfMoveClock = positionHashes.size / Hash.size
def threefoldRepetition: Boolean = halfMoveClock > 6 && {
val positions = (positionHashes grouped Hash.size).toList
positions.headOption match {
case Some(Array(x, y, z)) => (positions count {
case Array(x2, y2, z2) => x == x2 && y == y2 && z == z2
case _ => false
}) >= 3
case _ => false
}
}
def fiftyMoves: Boolean = halfMoveClock >= 100
def canCastle(color: Color) = new {
def on(side: Side): Boolean = castles can color on side
def any = (castles can color).any
}
def withoutCastles(color: Color) = copy(castles = castles without color)
def withoutAnyCastles = copy(castles = Castles.none)
def withoutCastle(color: Color, side: Side) = copy(castles = castles.without(color, side))
def withCastles(c: Castles) = copy(castles = c)
def positionHashesWith(hash: PositionHash): PositionHash =
hash ++ positionHashes
def withLastMove(m: UciMove) = copy(lastMove = Some(m))
def withCheck(color: Color, v: Boolean) =
if (v) copy(checkCount = checkCount add color) else this
}
object History {
def make(
lastMove: Option[UciMove],
positionHashes: PositionHash,
castles: Castles): History = new History(
lastMove = lastMove,
castles = castles,
positionHashes = positionHashes)
def make(
lastMove: Option[String], // a2a4
castles: String): History = make(
lastMove = lastMove flatMap UciMove.apply,
positionHashes = Array(),
castles = Castles(castles))
def castle(color: Color, kingSide: Boolean, queenSide: Boolean) =
History().copy(
castles = color match {
case White => Castles.init.copy(
whiteKingSide = kingSide,
whiteQueenSide = queenSide)
case Black => Castles.init.copy(
blackKingSide = kingSide,
blackQueenSide = queenSide)
})
def noCastle = History() withoutCastles White withoutCastles Black
}
| Happy0/scalachess | src/main/scala/History.scala | Scala | mit | 2,697 |
import scala.scalajs.js
import scala.scalajs.js.annotation.*
object Enclosing {
def method(): Unit = {
abstract class AbstractLocalJSClass extends js.Object // error
}
}
| dotty-staging/dotty | tests/neg-scalajs/abstract-local-js-class.scala | Scala | apache-2.0 | 179 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.clustering
import breeze.linalg.{DenseVector => BDV}
import org.apache.spark.annotation.{DeveloperApi, Since}
import org.apache.spark.api.java.JavaPairRDD
import org.apache.spark.graphx._
import org.apache.spark.internal.Logging
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.rdd.RDD
import org.apache.spark.util.Utils
/**
* Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
*
* Terminology:
* - "word" = "term": an element of the vocabulary
* - "token": instance of a term appearing in a document
* - "topic": multinomial distribution over words representing some concept
*
* References:
* - Original LDA paper (journal version):
* Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
*
* @see <a href="http://en.wikipedia.org/wiki/Latent_Dirichlet_allocation">
* Latent Dirichlet allocation (Wikipedia)</a>
*/
@Since("1.3.0")
class LDA private (
private var k: Int,
private var maxIterations: Int,
private var docConcentration: Vector,
private var topicConcentration: Double,
private var seed: Long,
private var checkpointInterval: Int,
private var ldaOptimizer: LDAOptimizer) extends Logging {
/**
* Constructs a LDA instance with default parameters.
*/
@Since("1.3.0")
def this() = this(k = 10, maxIterations = 20, docConcentration = Vectors.dense(-1),
topicConcentration = -1, seed = Utils.random.nextLong(), checkpointInterval = 10,
ldaOptimizer = new EMLDAOptimizer)
/**
* Number of topics to infer, i.e., the number of soft cluster centers.
*/
@Since("1.3.0")
def getK: Int = k
/**
* Set the number of topics to infer, i.e., the number of soft cluster centers.
* (default = 10)
*/
@Since("1.3.0")
def setK(k: Int): this.type = {
require(k > 0, s"LDA k (number of clusters) must be > 0, but was set to $k")
this.k = k
this
}
/**
* Concentration parameter (commonly named "alpha") for the prior placed on documents'
* distributions over topics ("theta").
*
* This is the parameter to a Dirichlet distribution.
*/
@Since("1.5.0")
def getAsymmetricDocConcentration: Vector = this.docConcentration
/**
* Concentration parameter (commonly named "alpha") for the prior placed on documents'
* distributions over topics ("theta").
*
* This method assumes the Dirichlet distribution is symmetric and can be described by a single
* [[Double]] parameter. It should fail if docConcentration is asymmetric.
*/
@Since("1.3.0")
def getDocConcentration: Double = {
val parameter = docConcentration(0)
if (docConcentration.size == 1) {
parameter
} else {
require(docConcentration.toArray.forall(_ == parameter))
parameter
}
}
/**
* Concentration parameter (commonly named "alpha") for the prior placed on documents'
* distributions over topics ("theta").
*
* This is the parameter to a Dirichlet distribution, where larger values mean more smoothing
* (more regularization).
*
* If set to a singleton vector Vector(-1), then docConcentration is set automatically. If set to
* singleton vector Vector(t) where t != -1, then t is replicated to a vector of length k during
* `LDAOptimizer.initialize()`. Otherwise, the [[docConcentration]] vector must be length k.
* (default = Vector(-1) = automatic)
*
* Optimizer-specific parameter settings:
* - EM
* - Currently only supports symmetric distributions, so all values in the vector should be
* the same.
* - Values should be greater than 1.0
* - default = uniformly (50 / k) + 1, where 50/k is common in LDA libraries and +1 follows
* from Asuncion et al. (2009), who recommend a +1 adjustment for EM.
* - Online
* - Values should be greater than or equal to 0
* - default = uniformly (1.0 / k), following the implementation from
* <a href="https://github.com/Blei-Lab/onlineldavb">here</a>.
*/
@Since("1.5.0")
def setDocConcentration(docConcentration: Vector): this.type = {
require(docConcentration.size == 1 || docConcentration.size == k,
s"Size of docConcentration must be 1 or ${k} but got ${docConcentration.size}")
this.docConcentration = docConcentration
this
}
/**
* Replicates a [[Double]] docConcentration to create a symmetric prior.
*/
@Since("1.3.0")
def setDocConcentration(docConcentration: Double): this.type = {
this.docConcentration = Vectors.dense(docConcentration)
this
}
/**
* Alias for [[getAsymmetricDocConcentration]]
*/
@Since("1.5.0")
def getAsymmetricAlpha: Vector = getAsymmetricDocConcentration
/**
* Alias for [[getDocConcentration]]
*/
@Since("1.3.0")
def getAlpha: Double = getDocConcentration
/**
* Alias for `setDocConcentration()`
*/
@Since("1.5.0")
def setAlpha(alpha: Vector): this.type = setDocConcentration(alpha)
/**
* Alias for `setDocConcentration()`
*/
@Since("1.3.0")
def setAlpha(alpha: Double): this.type = setDocConcentration(alpha)
/**
* Concentration parameter (commonly named "beta" or "eta") for the prior placed on topics'
* distributions over terms.
*
* This is the parameter to a symmetric Dirichlet distribution.
*
* @note The topics' distributions over terms are called "beta" in the original LDA paper
* by Blei et al., but are called "phi" in many later papers such as Asuncion et al., 2009.
*/
@Since("1.3.0")
def getTopicConcentration: Double = this.topicConcentration
/**
* Concentration parameter (commonly named "beta" or "eta") for the prior placed on topics'
* distributions over terms.
*
* This is the parameter to a symmetric Dirichlet distribution.
*
* @note The topics' distributions over terms are called "beta" in the original LDA paper
* by Blei et al., but are called "phi" in many later papers such as Asuncion et al., 2009.
*
* If set to -1, then topicConcentration is set automatically.
* (default = -1 = automatic)
*
* Optimizer-specific parameter settings:
* - EM
* - Value should be greater than 1.0
* - default = 0.1 + 1, where 0.1 gives a small amount of smoothing and +1 follows
* Asuncion et al. (2009), who recommend a +1 adjustment for EM.
* - Online
* - Value should be greater than or equal to 0
* - default = (1.0 / k), following the implementation from
* <a href="https://github.com/Blei-Lab/onlineldavb">here</a>.
*/
@Since("1.3.0")
def setTopicConcentration(topicConcentration: Double): this.type = {
this.topicConcentration = topicConcentration
this
}
/**
* Alias for [[getTopicConcentration]]
*/
@Since("1.3.0")
def getBeta: Double = getTopicConcentration
/**
* Alias for `setTopicConcentration()`
*/
@Since("1.3.0")
def setBeta(beta: Double): this.type = setTopicConcentration(beta)
/**
* Maximum number of iterations allowed.
*/
@Since("1.3.0")
def getMaxIterations: Int = maxIterations
/**
* Set the maximum number of iterations allowed.
* (default = 20)
*/
@Since("1.3.0")
def setMaxIterations(maxIterations: Int): this.type = {
require(maxIterations >= 0,
s"Maximum of iterations must be nonnegative but got ${maxIterations}")
this.maxIterations = maxIterations
this
}
/**
* Random seed for cluster initialization.
*/
@Since("1.3.0")
def getSeed: Long = seed
/**
* Set the random seed for cluster initialization.
*/
@Since("1.3.0")
def setSeed(seed: Long): this.type = {
this.seed = seed
this
}
/**
* Period (in iterations) between checkpoints.
*/
@Since("1.3.0")
def getCheckpointInterval: Int = checkpointInterval
/**
* Parameter for set checkpoint interval (greater than or equal to 1) or disable checkpoint (-1).
* E.g. 10 means that the cache will get checkpointed every 10 iterations. Checkpointing helps
* with recovery (when nodes fail). It also helps with eliminating temporary shuffle files on
* disk, which can be important when LDA is run for many iterations. If the checkpoint directory
* is not set in [[org.apache.spark.SparkContext]], this setting is ignored. (default = 10)
*
* @see [[org.apache.spark.SparkContext#setCheckpointDir]]
*/
@Since("1.3.0")
def setCheckpointInterval(checkpointInterval: Int): this.type = {
require(checkpointInterval == -1 || checkpointInterval > 0,
s"Period between checkpoints must be -1 or positive but got ${checkpointInterval}")
this.checkpointInterval = checkpointInterval
this
}
/**
* :: DeveloperApi ::
*
* LDAOptimizer used to perform the actual calculation
*/
@Since("1.4.0")
@DeveloperApi
def getOptimizer: LDAOptimizer = ldaOptimizer
/**
* :: DeveloperApi ::
*
* LDAOptimizer used to perform the actual calculation (default = EMLDAOptimizer)
*/
@Since("1.4.0")
@DeveloperApi
def setOptimizer(optimizer: LDAOptimizer): this.type = {
this.ldaOptimizer = optimizer
this
}
/**
* Set the LDAOptimizer used to perform the actual calculation by algorithm name.
* Currently "em", "online" are supported.
*/
@Since("1.4.0")
def setOptimizer(optimizerName: String): this.type = {
this.ldaOptimizer =
optimizerName.toLowerCase match {
case "em" => new EMLDAOptimizer
case "online" => new OnlineLDAOptimizer
case other =>
throw new IllegalArgumentException(s"Only em, online are supported but got $other.")
}
this
}
/**
* Learn an LDA model using the given dataset.
*
* @param documents RDD of documents, which are term (word) count vectors paired with IDs.
* The term count vectors are "bags of words" with a fixed-size vocabulary
* (where the vocabulary size is the length of the vector).
* Document IDs must be unique and greater than or equal to 0.
* @return Inferred LDA model
*/
@Since("1.3.0")
def run(documents: RDD[(Long, Vector)]): LDAModel = {
val state = ldaOptimizer.initialize(documents, this)
var iter = 0
val iterationTimes = Array.fill[Double](maxIterations)(0)
while (iter < maxIterations) {
val start = System.nanoTime()
state.next()
val elapsedSeconds = (System.nanoTime() - start) / 1e9
iterationTimes(iter) = elapsedSeconds
iter += 1
}
state.getLDAModel(iterationTimes)
}
/**
* Java-friendly version of `run()`
*/
@Since("1.3.0")
def run(documents: JavaPairRDD[java.lang.Long, Vector]): LDAModel = {
run(documents.rdd.asInstanceOf[RDD[(Long, Vector)]])
}
}
private[clustering] object LDA {
/*
DEVELOPERS NOTE:
This implementation uses GraphX, where the graph is bipartite with 2 types of vertices:
- Document vertices
- indexed with unique indices >= 0
- Store vectors of length k (# topics).
- Term vertices
- indexed {-1, -2, ..., -vocabSize}
- Store vectors of length k (# topics).
- Edges correspond to terms appearing in documents.
- Edges are directed Document -> Term.
- Edges are partitioned by documents.
Info on EM implementation.
- We follow Section 2.2 from Asuncion et al., 2009. We use some of their notation.
- In this implementation, there is one edge for every unique term appearing in a document,
i.e., for every unique (document, term) pair.
- Notation:
- N_{wkj} = count of tokens of term w currently assigned to topic k in document j
- N_{*} where * is missing a subscript w/k/j is the count summed over missing subscript(s)
- gamma_{wjk} = P(z_i = k | x_i = w, d_i = j),
the probability of term x_i in document d_i having topic z_i.
- Data graph
- Document vertices store N_{kj}
- Term vertices store N_{wk}
- Edges store N_{wj}.
- Global data N_k
- Algorithm
- Initial state:
- Document and term vertices store random counts N_{wk}, N_{kj}.
- E-step: For each (document,term) pair i, compute P(z_i | x_i, d_i).
- Aggregate N_k from term vertices.
- Compute gamma_{wjk} for each possible topic k, from each triplet.
using inputs N_{wk}, N_{kj}, N_k.
- M-step: Compute sufficient statistics for hidden parameters phi and theta
(counts N_{wk}, N_{kj}, N_k).
- Document update:
- N_{kj} <- sum_w N_{wj} gamma_{wjk}
- N_j <- sum_k N_{kj} (only needed to output predictions)
- Term update:
- N_{wk} <- sum_j N_{wj} gamma_{wjk}
- N_k <- sum_w N_{wk}
TODO: Add simplex constraints to allow alpha in (0,1).
See: Vorontsov and Potapenko. "Tutorial on Probabilistic Topic Modeling : Additive
Regularization for Stochastic Matrix Factorization." 2014.
*/
/**
* Vector over topics (length k) of token counts.
* The meaning of these counts can vary, and it may or may not be normalized to be a distribution.
*/
private[clustering] type TopicCounts = BDV[Double]
private[clustering] type TokenCount = Double
/** Term vertex IDs are {-1, -2, ..., -vocabSize} */
private[clustering] def term2index(term: Int): Long = -(1 + term.toLong)
private[clustering] def index2term(termIndex: Long): Int = -(1 + termIndex).toInt
private[clustering] def isDocumentVertex(v: (VertexId, _)): Boolean = v._1 >= 0
private[clustering] def isTermVertex(v: (VertexId, _)): Boolean = v._1 < 0
/**
* Compute gamma_{wjk}, a distribution over topics k.
*/
private[clustering] def computePTopic(
docTopicCounts: TopicCounts,
termTopicCounts: TopicCounts,
totalTopicCounts: TopicCounts,
vocabSize: Int,
eta: Double,
alpha: Double): TopicCounts = {
val K = docTopicCounts.length
val N_j = docTopicCounts.data
val N_w = termTopicCounts.data
val N = totalTopicCounts.data
val eta1 = eta - 1.0
val alpha1 = alpha - 1.0
val Weta1 = vocabSize * eta1
var sum = 0.0
val gamma_wj = new Array[Double](K)
var k = 0
while (k < K) {
val gamma_wjk = (N_w(k) + eta1) * (N_j(k) + alpha1) / (N(k) + Weta1)
gamma_wj(k) = gamma_wjk
sum += gamma_wjk
k += 1
}
// normalize
BDV(gamma_wj) /= sum
}
}
| spark0001/spark2.1.1 | mllib/src/main/scala/org/apache/spark/mllib/clustering/LDA.scala | Scala | apache-2.0 | 15,317 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.kafka010
import java.{util => ju}
import java.util.{Locale, Optional, UUID}
import scala.collection.JavaConverters._
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.clients.producer.ProducerConfig
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{AnalysisException, DataFrame, SaveMode, SparkSession, SQLContext}
import org.apache.spark.sql.execution.streaming.{Sink, Source}
import org.apache.spark.sql.sources._
import org.apache.spark.sql.sources.v2.{ContinuousReadSupport, DataSourceOptions, MicroBatchReadSupport, StreamWriteSupport}
import org.apache.spark.sql.sources.v2.reader.streaming.ContinuousInputPartitionReader
import org.apache.spark.sql.sources.v2.writer.streaming.StreamWriter
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.StructType
/**
* The provider class for all Kafka readers and writers. It is designed such that it throws
* IllegalArgumentException when the Kafka Dataset is created, so that it can catch
* missing options even before the query is started.
*/
private[kafka010] class KafkaSourceProvider extends DataSourceRegister
with StreamSourceProvider
with StreamSinkProvider
with RelationProvider
with CreatableRelationProvider
with StreamWriteSupport
with ContinuousReadSupport
with MicroBatchReadSupport
with Logging {
import KafkaSourceProvider._
override def shortName(): String = "kafka"
/**
* Returns the name and schema of the source. In addition, it also verifies whether the options
* are correct and sufficient to create the [[KafkaSource]] when the query is started.
*/
override def sourceSchema(
sqlContext: SQLContext,
schema: Option[StructType],
providerName: String,
parameters: Map[String, String]): (String, StructType) = {
validateStreamOptions(parameters)
require(schema.isEmpty, "Kafka source has a fixed schema and cannot be set with a custom one")
(shortName(), KafkaOffsetReader.kafkaSchema)
}
override def createSource(
sqlContext: SQLContext,
metadataPath: String,
schema: Option[StructType],
providerName: String,
parameters: Map[String, String]): Source = {
validateStreamOptions(parameters)
// Each running query should use its own group id. Otherwise, the query may be only assigned
// partial data since Kafka will assign partitions to multiple consumers having the same group
// id. Hence, we should generate a unique id for each query.
val uniqueGroupId = s"spark-kafka-source-${UUID.randomUUID}-${metadataPath.hashCode}"
val caseInsensitiveParams = parameters.map { case (k, v) => (k.toLowerCase(Locale.ROOT), v) }
val specifiedKafkaParams =
parameters
.keySet
.filter(_.toLowerCase(Locale.ROOT).startsWith("kafka."))
.map { k => k.drop(6).toString -> parameters(k) }
.toMap
val startingStreamOffsets = KafkaSourceProvider.getKafkaOffsetRangeLimit(caseInsensitiveParams,
STARTING_OFFSETS_OPTION_KEY, LatestOffsetRangeLimit)
val kafkaOffsetReader = new KafkaOffsetReader(
strategy(caseInsensitiveParams),
kafkaParamsForDriver(specifiedKafkaParams),
parameters,
driverGroupIdPrefix = s"$uniqueGroupId-driver")
new KafkaSource(
sqlContext,
kafkaOffsetReader,
kafkaParamsForExecutors(specifiedKafkaParams, uniqueGroupId),
parameters,
metadataPath,
startingStreamOffsets,
failOnDataLoss(caseInsensitiveParams))
}
/**
* Creates a [[org.apache.spark.sql.sources.v2.reader.streaming.MicroBatchReader]] to read batches
* of Kafka data in a micro-batch streaming query.
*/
override def createMicroBatchReader(
schema: Optional[StructType],
metadataPath: String,
options: DataSourceOptions): KafkaMicroBatchReader = {
val parameters = options.asMap().asScala.toMap
validateStreamOptions(parameters)
// Each running query should use its own group id. Otherwise, the query may be only assigned
// partial data since Kafka will assign partitions to multiple consumers having the same group
// id. Hence, we should generate a unique id for each query.
val uniqueGroupId = s"spark-kafka-source-${UUID.randomUUID}-${metadataPath.hashCode}"
val caseInsensitiveParams = parameters.map { case (k, v) => (k.toLowerCase(Locale.ROOT), v) }
val specifiedKafkaParams =
parameters
.keySet
.filter(_.toLowerCase(Locale.ROOT).startsWith("kafka."))
.map { k => k.drop(6).toString -> parameters(k) }
.toMap
val startingStreamOffsets = KafkaSourceProvider.getKafkaOffsetRangeLimit(caseInsensitiveParams,
STARTING_OFFSETS_OPTION_KEY, LatestOffsetRangeLimit)
val kafkaOffsetReader = new KafkaOffsetReader(
strategy(caseInsensitiveParams),
kafkaParamsForDriver(specifiedKafkaParams),
parameters,
driverGroupIdPrefix = s"$uniqueGroupId-driver")
new KafkaMicroBatchReader(
kafkaOffsetReader,
kafkaParamsForExecutors(specifiedKafkaParams, uniqueGroupId),
options,
metadataPath,
startingStreamOffsets,
failOnDataLoss(caseInsensitiveParams))
}
/**
* Creates a [[ContinuousInputPartitionReader]] to read
* Kafka data in a continuous streaming query.
*/
override def createContinuousReader(
schema: Optional[StructType],
metadataPath: String,
options: DataSourceOptions): KafkaContinuousReader = {
val parameters = options.asMap().asScala.toMap
validateStreamOptions(parameters)
// Each running query should use its own group id. Otherwise, the query may be only assigned
// partial data since Kafka will assign partitions to multiple consumers having the same group
// id. Hence, we should generate a unique id for each query.
val uniqueGroupId = s"spark-kafka-source-${UUID.randomUUID}-${metadataPath.hashCode}"
val caseInsensitiveParams = parameters.map { case (k, v) => (k.toLowerCase(Locale.ROOT), v) }
val specifiedKafkaParams =
parameters
.keySet
.filter(_.toLowerCase(Locale.ROOT).startsWith("kafka."))
.map { k => k.drop(6).toString -> parameters(k) }
.toMap
val startingStreamOffsets = KafkaSourceProvider.getKafkaOffsetRangeLimit(caseInsensitiveParams,
STARTING_OFFSETS_OPTION_KEY, LatestOffsetRangeLimit)
val kafkaOffsetReader = new KafkaOffsetReader(
strategy(caseInsensitiveParams),
kafkaParamsForDriver(specifiedKafkaParams),
parameters,
driverGroupIdPrefix = s"$uniqueGroupId-driver")
new KafkaContinuousReader(
kafkaOffsetReader,
kafkaParamsForExecutors(specifiedKafkaParams, uniqueGroupId),
parameters,
metadataPath,
startingStreamOffsets,
failOnDataLoss(caseInsensitiveParams))
}
/**
* Returns a new base relation with the given parameters.
*
* @note The parameters' keywords are case insensitive and this insensitivity is enforced
* by the Map that is passed to the function.
*/
override def createRelation(
sqlContext: SQLContext,
parameters: Map[String, String]): BaseRelation = {
validateBatchOptions(parameters)
val caseInsensitiveParams = parameters.map { case (k, v) => (k.toLowerCase(Locale.ROOT), v) }
val specifiedKafkaParams =
parameters
.keySet
.filter(_.toLowerCase(Locale.ROOT).startsWith("kafka."))
.map { k => k.drop(6).toString -> parameters(k) }
.toMap
val startingRelationOffsets = KafkaSourceProvider.getKafkaOffsetRangeLimit(
caseInsensitiveParams, STARTING_OFFSETS_OPTION_KEY, EarliestOffsetRangeLimit)
assert(startingRelationOffsets != LatestOffsetRangeLimit)
val endingRelationOffsets = KafkaSourceProvider.getKafkaOffsetRangeLimit(caseInsensitiveParams,
ENDING_OFFSETS_OPTION_KEY, LatestOffsetRangeLimit)
assert(endingRelationOffsets != EarliestOffsetRangeLimit)
new KafkaRelation(
sqlContext,
strategy(caseInsensitiveParams),
sourceOptions = parameters,
specifiedKafkaParams = specifiedKafkaParams,
failOnDataLoss = failOnDataLoss(caseInsensitiveParams),
startingOffsets = startingRelationOffsets,
endingOffsets = endingRelationOffsets)
}
override def createSink(
sqlContext: SQLContext,
parameters: Map[String, String],
partitionColumns: Seq[String],
outputMode: OutputMode): Sink = {
val defaultTopic = parameters.get(TOPIC_OPTION_KEY).map(_.trim)
val specifiedKafkaParams = kafkaParamsForProducer(parameters)
new KafkaSink(sqlContext,
new ju.HashMap[String, Object](specifiedKafkaParams.asJava), defaultTopic)
}
override def createRelation(
outerSQLContext: SQLContext,
mode: SaveMode,
parameters: Map[String, String],
data: DataFrame): BaseRelation = {
mode match {
case SaveMode.Overwrite | SaveMode.Ignore =>
throw new AnalysisException(s"Save mode $mode not allowed for Kafka. " +
s"Allowed save modes are ${SaveMode.Append} and " +
s"${SaveMode.ErrorIfExists} (default).")
case _ => // good
}
val topic = parameters.get(TOPIC_OPTION_KEY).map(_.trim)
val specifiedKafkaParams = kafkaParamsForProducer(parameters)
KafkaWriter.write(outerSQLContext.sparkSession, data.queryExecution,
new ju.HashMap[String, Object](specifiedKafkaParams.asJava), topic)
/* This method is suppose to return a relation that reads the data that was written.
* We cannot support this for Kafka. Therefore, in order to make things consistent,
* we return an empty base relation.
*/
new BaseRelation {
override def sqlContext: SQLContext = unsupportedException
override def schema: StructType = unsupportedException
override def needConversion: Boolean = unsupportedException
override def sizeInBytes: Long = unsupportedException
override def unhandledFilters(filters: Array[Filter]): Array[Filter] = unsupportedException
private def unsupportedException =
throw new UnsupportedOperationException("BaseRelation from Kafka write " +
"operation is not usable.")
}
}
override def createStreamWriter(
queryId: String,
schema: StructType,
mode: OutputMode,
options: DataSourceOptions): StreamWriter = {
import scala.collection.JavaConverters._
val spark = SparkSession.getActiveSession.get
val topic = Option(options.get(TOPIC_OPTION_KEY).orElse(null)).map(_.trim)
// We convert the options argument from V2 -> Java map -> scala mutable -> scala immutable.
val producerParams = kafkaParamsForProducer(options.asMap.asScala.toMap)
KafkaWriter.validateQuery(
schema.toAttributes, new java.util.HashMap[String, Object](producerParams.asJava), topic)
new KafkaStreamWriter(topic, producerParams, schema)
}
private def strategy(caseInsensitiveParams: Map[String, String]) =
caseInsensitiveParams.find(x => STRATEGY_OPTION_KEYS.contains(x._1)).get match {
case ("assign", value) =>
AssignStrategy(JsonUtils.partitions(value))
case ("subscribe", value) =>
SubscribeStrategy(value.split(",").map(_.trim()).filter(_.nonEmpty))
case ("subscribepattern", value) =>
SubscribePatternStrategy(value.trim())
case _ =>
// Should never reach here as we are already matching on
// matched strategy names
throw new IllegalArgumentException("Unknown option")
}
private def failOnDataLoss(caseInsensitiveParams: Map[String, String]) =
caseInsensitiveParams.getOrElse(FAIL_ON_DATA_LOSS_OPTION_KEY, "true").toBoolean
private def validateGeneralOptions(parameters: Map[String, String]): Unit = {
// Validate source options
val caseInsensitiveParams = parameters.map { case (k, v) => (k.toLowerCase(Locale.ROOT), v) }
val specifiedStrategies =
caseInsensitiveParams.filter { case (k, _) => STRATEGY_OPTION_KEYS.contains(k) }.toSeq
if (specifiedStrategies.isEmpty) {
throw new IllegalArgumentException(
"One of the following options must be specified for Kafka source: "
+ STRATEGY_OPTION_KEYS.mkString(", ") + ". See the docs for more details.")
} else if (specifiedStrategies.size > 1) {
throw new IllegalArgumentException(
"Only one of the following options can be specified for Kafka source: "
+ STRATEGY_OPTION_KEYS.mkString(", ") + ". See the docs for more details.")
}
val strategy = caseInsensitiveParams.find(x => STRATEGY_OPTION_KEYS.contains(x._1)).get match {
case ("assign", value) =>
if (!value.trim.startsWith("{")) {
throw new IllegalArgumentException(
"No topicpartitions to assign as specified value for option " +
s"'assign' is '$value'")
}
case ("subscribe", value) =>
val topics = value.split(",").map(_.trim).filter(_.nonEmpty)
if (topics.isEmpty) {
throw new IllegalArgumentException(
"No topics to subscribe to as specified value for option " +
s"'subscribe' is '$value'")
}
case ("subscribepattern", value) =>
val pattern = caseInsensitiveParams("subscribepattern").trim()
if (pattern.isEmpty) {
throw new IllegalArgumentException(
"Pattern to subscribe is empty as specified value for option " +
s"'subscribePattern' is '$value'")
}
case _ =>
// Should never reach here as we are already matching on
// matched strategy names
throw new IllegalArgumentException("Unknown option")
}
// Validate minPartitions value if present
if (caseInsensitiveParams.contains(MIN_PARTITIONS_OPTION_KEY)) {
val p = caseInsensitiveParams(MIN_PARTITIONS_OPTION_KEY).toInt
if (p <= 0) throw new IllegalArgumentException("minPartitions must be positive")
}
// Validate user-specified Kafka options
if (caseInsensitiveParams.contains(s"kafka.${ConsumerConfig.GROUP_ID_CONFIG}")) {
throw new IllegalArgumentException(
s"Kafka option '${ConsumerConfig.GROUP_ID_CONFIG}' is not supported as " +
s"user-specified consumer groups are not used to track offsets.")
}
if (caseInsensitiveParams.contains(s"kafka.${ConsumerConfig.AUTO_OFFSET_RESET_CONFIG}")) {
throw new IllegalArgumentException(
s"""
|Kafka option '${ConsumerConfig.AUTO_OFFSET_RESET_CONFIG}' is not supported.
|Instead set the source option '$STARTING_OFFSETS_OPTION_KEY' to 'earliest' or 'latest'
|to specify where to start. Structured Streaming manages which offsets are consumed
|internally, rather than relying on the kafkaConsumer to do it. This will ensure that no
|data is missed when new topics/partitions are dynamically subscribed. Note that
|'$STARTING_OFFSETS_OPTION_KEY' only applies when a new Streaming query is started, and
|that resuming will always pick up from where the query left off. See the docs for more
|details.
""".stripMargin)
}
if (caseInsensitiveParams.contains(s"kafka.${ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG}")) {
throw new IllegalArgumentException(
s"Kafka option '${ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG}' is not supported as keys "
+ "are deserialized as byte arrays with ByteArrayDeserializer. Use DataFrame operations "
+ "to explicitly deserialize the keys.")
}
if (caseInsensitiveParams.contains(s"kafka.${ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG}"))
{
throw new IllegalArgumentException(
s"Kafka option '${ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG}' is not supported as "
+ "values are deserialized as byte arrays with ByteArrayDeserializer. Use DataFrame "
+ "operations to explicitly deserialize the values.")
}
val otherUnsupportedConfigs = Seq(
ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, // committing correctly requires new APIs in Source
ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG) // interceptors can modify payload, so not safe
otherUnsupportedConfigs.foreach { c =>
if (caseInsensitiveParams.contains(s"kafka.$c")) {
throw new IllegalArgumentException(s"Kafka option '$c' is not supported")
}
}
if (!caseInsensitiveParams.contains(s"kafka.${ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG}")) {
throw new IllegalArgumentException(
s"Option 'kafka.${ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG}' must be specified for " +
s"configuring Kafka consumer")
}
}
private def validateStreamOptions(caseInsensitiveParams: Map[String, String]) = {
// Stream specific options
caseInsensitiveParams.get(ENDING_OFFSETS_OPTION_KEY).map(_ =>
throw new IllegalArgumentException("ending offset not valid in streaming queries"))
validateGeneralOptions(caseInsensitiveParams)
}
private def validateBatchOptions(caseInsensitiveParams: Map[String, String]) = {
// Batch specific options
KafkaSourceProvider.getKafkaOffsetRangeLimit(
caseInsensitiveParams, STARTING_OFFSETS_OPTION_KEY, EarliestOffsetRangeLimit) match {
case EarliestOffsetRangeLimit => // good to go
case LatestOffsetRangeLimit =>
throw new IllegalArgumentException("starting offset can't be latest " +
"for batch queries on Kafka")
case SpecificOffsetRangeLimit(partitionOffsets) =>
partitionOffsets.foreach {
case (tp, off) if off == KafkaOffsetRangeLimit.LATEST =>
throw new IllegalArgumentException(s"startingOffsets for $tp can't " +
"be latest for batch queries on Kafka")
case _ => // ignore
}
}
KafkaSourceProvider.getKafkaOffsetRangeLimit(
caseInsensitiveParams, ENDING_OFFSETS_OPTION_KEY, LatestOffsetRangeLimit) match {
case EarliestOffsetRangeLimit =>
throw new IllegalArgumentException("ending offset can't be earliest " +
"for batch queries on Kafka")
case LatestOffsetRangeLimit => // good to go
case SpecificOffsetRangeLimit(partitionOffsets) =>
partitionOffsets.foreach {
case (tp, off) if off == KafkaOffsetRangeLimit.EARLIEST =>
throw new IllegalArgumentException(s"ending offset for $tp can't be " +
"earliest for batch queries on Kafka")
case _ => // ignore
}
}
validateGeneralOptions(caseInsensitiveParams)
// Don't want to throw an error, but at least log a warning.
if (caseInsensitiveParams.get("maxoffsetspertrigger").isDefined) {
logWarning("maxOffsetsPerTrigger option ignored in batch queries")
}
}
}
private[kafka010] object KafkaSourceProvider extends Logging {
private val STRATEGY_OPTION_KEYS = Set("subscribe", "subscribepattern", "assign")
private[kafka010] val STARTING_OFFSETS_OPTION_KEY = "startingoffsets"
private[kafka010] val ENDING_OFFSETS_OPTION_KEY = "endingoffsets"
private val FAIL_ON_DATA_LOSS_OPTION_KEY = "failondataloss"
private val MIN_PARTITIONS_OPTION_KEY = "minpartitions"
val TOPIC_OPTION_KEY = "topic"
val INSTRUCTION_FOR_FAIL_ON_DATA_LOSS_FALSE =
"""
|Some data may have been lost because they are not available in Kafka any more; either the
| data was aged out by Kafka or the topic may have been deleted before all the data in the
| topic was processed. If you want your streaming query to fail on such cases, set the source
| option "failOnDataLoss" to "true".
""".stripMargin
val INSTRUCTION_FOR_FAIL_ON_DATA_LOSS_TRUE =
"""
|Some data may have been lost because they are not available in Kafka any more; either the
| data was aged out by Kafka or the topic may have been deleted before all the data in the
| topic was processed. If you don't want your streaming query to fail on such cases, set the
| source option "failOnDataLoss" to "false".
""".stripMargin
private val deserClassName = classOf[ByteArrayDeserializer].getName
def getKafkaOffsetRangeLimit(
params: Map[String, String],
offsetOptionKey: String,
defaultOffsets: KafkaOffsetRangeLimit): KafkaOffsetRangeLimit = {
params.get(offsetOptionKey).map(_.trim) match {
case Some(offset) if offset.toLowerCase(Locale.ROOT) == "latest" =>
LatestOffsetRangeLimit
case Some(offset) if offset.toLowerCase(Locale.ROOT) == "earliest" =>
EarliestOffsetRangeLimit
case Some(json) => SpecificOffsetRangeLimit(JsonUtils.partitionOffsets(json))
case None => defaultOffsets
}
}
def kafkaParamsForDriver(specifiedKafkaParams: Map[String, String]): ju.Map[String, Object] =
ConfigUpdater("source", specifiedKafkaParams)
.set(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, deserClassName)
.set(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, deserClassName)
// Set to "earliest" to avoid exceptions. However, KafkaSource will fetch the initial
// offsets by itself instead of counting on KafkaConsumer.
.set(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
// So that consumers in the driver does not commit offsets unnecessarily
.set(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false")
// So that the driver does not pull too much data
.set(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, new java.lang.Integer(1))
// If buffer config is not set, set it to reasonable value to work around
// buffer issues (see KAFKA-3135)
.setIfUnset(ConsumerConfig.RECEIVE_BUFFER_CONFIG, 65536: java.lang.Integer)
.build()
def kafkaParamsForExecutors(
specifiedKafkaParams: Map[String, String],
uniqueGroupId: String): ju.Map[String, Object] =
ConfigUpdater("executor", specifiedKafkaParams)
.set(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, deserClassName)
.set(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, deserClassName)
// Make sure executors do only what the driver tells them.
.set(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none")
// So that consumers in executors do not mess with any existing group id
.set(ConsumerConfig.GROUP_ID_CONFIG, s"$uniqueGroupId-executor")
// So that consumers in executors does not commit offsets unnecessarily
.set(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false")
// If buffer config is not set, set it to reasonable value to work around
// buffer issues (see KAFKA-3135)
.setIfUnset(ConsumerConfig.RECEIVE_BUFFER_CONFIG, 65536: java.lang.Integer)
.build()
/** Class to conveniently update Kafka config params, while logging the changes */
private case class ConfigUpdater(module: String, kafkaParams: Map[String, String]) {
private val map = new ju.HashMap[String, Object](kafkaParams.asJava)
def set(key: String, value: Object): this.type = {
map.put(key, value)
logDebug(s"$module: Set $key to $value, earlier value: ${kafkaParams.getOrElse(key, "")}")
this
}
def setIfUnset(key: String, value: Object): ConfigUpdater = {
if (!map.containsKey(key)) {
map.put(key, value)
logDebug(s"$module: Set $key to $value")
}
this
}
def build(): ju.Map[String, Object] = map
}
private[kafka010] def kafkaParamsForProducer(
parameters: Map[String, String]): Map[String, String] = {
val caseInsensitiveParams = parameters.map { case (k, v) => (k.toLowerCase(Locale.ROOT), v) }
if (caseInsensitiveParams.contains(s"kafka.${ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG}")) {
throw new IllegalArgumentException(
s"Kafka option '${ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG}' is not supported as keys "
+ "are serialized with ByteArraySerializer.")
}
if (caseInsensitiveParams.contains(s"kafka.${ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG}"))
{
throw new IllegalArgumentException(
s"Kafka option '${ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG}' is not supported as "
+ "value are serialized with ByteArraySerializer.")
}
parameters
.keySet
.filter(_.toLowerCase(Locale.ROOT).startsWith("kafka."))
.map { k => k.drop(6).toString -> parameters(k) }
.toMap + (ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG -> classOf[ByteArraySerializer].getName,
ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG -> classOf[ByteArraySerializer].getName)
}
}
| bravo-zhang/spark | external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaSourceProvider.scala | Scala | apache-2.0 | 25,520 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import scala.util.control.NonFatal
case class ArgsException(message: String) extends RuntimeException(message)
/**
* The args class does a simple command line parsing. The rules are:
* keys start with one or more "-". Each key has zero or more values
* following.
*/
object Args {
/**
* Split on whitespace and then parse.
*/
def apply(argString: String): Args = Args(argString.split("\\\\s+"))
/**
* parses keys as starting with a dash, except single dashed digits.
* All following non-dashed args are a list of values.
* If the list starts with non-dashed args, these are associated with the
* empty string: ""
*/
def apply(args: Iterable[String]): Args = {
def startingDashes(word: String) = word.takeWhile { _ == '-' }.length
new Args(
//Fold into a list of (arg -> List[values])
args
.filter{ a => !a.matches("\\\\s*") }
.foldLeft(List("" -> List[String]())) { (acc, arg) =>
val noDashes = arg.dropWhile{ _ == '-' }
if (arg == noDashes || isNumber(arg))
(acc.head._1 -> (arg :: acc.head._2)) :: acc.tail
else
(noDashes -> List()) :: acc
}
//Now reverse the values to keep the same order
.map { case (key, value) => key -> value.reverse }.toMap)
}
def isNumber(arg: String): Boolean = {
try {
arg.toDouble
true
} catch {
case e: NumberFormatException => false
}
}
/**
* By default, scalding will use reflection to try and identify classes to tokenize. Set to false to disable
*/
val jobClassReflection = "scalding.job.classreflection"
}
class Args(val m: Map[String, List[String]]) extends java.io.Serializable {
//Replace or add a given key+args pair:
def +(keyvals: (String, Iterable[String])): Args = new Args(m + (keyvals._1 -> keyvals._2.toList))
/**
* Does this Args contain a given key?
*/
def boolean(key: String): Boolean = m.contains(key)
/**
* Get the list of values associated with a given key.
* if the key is absent, return the empty list. NOTE: empty
* does not mean the key is absent, it could be a key without
* a value. Use boolean() to check existence.
*/
def list(key: String): List[String] = m.get(key).getOrElse(List())
/**
* This is a synonym for required
*/
def apply(key: String): String = required(key)
/**
* Gets the list of positional arguments
*/
def positional: List[String] = list("")
/**
* return required positional value.
*/
def required(position: Int): String = positional match {
case l if l.size > position => l(position)
case _ => throw ArgsException("Please provide " + (position + 1) + " positional arguments")
}
/**
* This is a synonym for required
*/
def apply(position: Int): String = required(position)
override def equals(other: Any): Boolean = {
if (other.isInstanceOf[Args]) {
other.asInstanceOf[Args].m.equals(m)
} else {
false
}
}
override def hashCode(): Int = m.hashCode()
/**
* Equivalent to .optional(key).getOrElse(default)
*/
def getOrElse(key: String, default: String): String = optional(key).getOrElse(default)
/**
* return exactly one value for a given key.
* If there is more than one value, you get an exception
*/
def required(key: String): String = list(key) match {
case List() => throw ArgsException("Please provide a value for --" + key)
case List(a) => a
case _ => throw ArgsException("Please only provide a single value for --" + key)
}
def toList: List[String] = {
m.foldLeft(List[String]()) { (args, kvlist) =>
val k = kvlist._1
val values = kvlist._2
if (k != "") {
//Make sure positional args are first
args ++ ((("--" + k) :: values))
} else {
// These are positional args (no key), put them first:
values ++ args
}
}
}
/**
* Asserts whether all the args belong to the given set of accepted arguments.
* If an arg does not belong to the given set, you get an error.
*/
def restrictTo(acceptedArgs: Set[String]): Unit = {
val invalidArgs = m.keySet.filter(!_.startsWith("scalding.")) -- (acceptedArgs + "" + "tool.graph" + "hdfs" + "local")
if (!invalidArgs.isEmpty) throw ArgsException("Invalid args: " + invalidArgs.map("--" + _).mkString(", "))
}
// TODO: if there are spaces in the keys or values, this will not round-trip
override def toString: String = toList.mkString(" ")
/**
* If there is zero or one element, return it as an Option.
* If there is a list of more than one item, you get an error
*/
def optional(key: String): Option[String] = list(key) match {
case List() => None
case List(a) => Some(a)
case _ => throw ArgsException("Please provide at most one value for --" + key)
}
def int(key: String, default: Int): Int = {
optional(key).map(value => try value.toInt catch {
case NonFatal(_) => throw ArgsException(s"Invalid value ${value} for -- ${key}")
}).getOrElse(default)
}
def int(key: String): Int = {
val value = required(key)
try value.toInt catch {
case NonFatal(_) => throw ArgsException(s"Invalid value ${value} for -- ${key}")
}
}
def long(key: String, default: Long): Long = {
optional(key).map(value => try value.toLong catch {
case NonFatal(_) => throw ArgsException(s"Invalid value ${value} for -- ${key}")
}).getOrElse(default)
}
def long(key: String): Long = {
val value = required(key)
try value.toLong catch {
case NonFatal(_) => throw ArgsException(s"Invalid value ${value} for -- ${key}")
}
}
def float(key: String, default: Float): Float = {
optional(key).map(value => try value.toFloat catch {
case NonFatal(_) => throw ArgsException(s"Invalid value ${value} for -- ${key}")
}).getOrElse(default)
}
def float(key: String): Float = {
val value = required(key)
try value.toFloat catch {
case NonFatal(_) => throw ArgsException(s"Invalid value ${value} for -- ${key}")
}
}
def double(key: String, default: Double): Double = {
optional(key).map(value => try value.toDouble catch {
case NonFatal(_) => throw ArgsException(s"Invalid value ${value} for -- ${key}")
}).getOrElse(default)
}
def double(key: String): Double = {
val value = required(key)
try value.toDouble catch {
case NonFatal(_) => throw ArgsException(s"Invalid value ${value} for -- ${key}")
}
}
}
| tdyas/scalding | scalding-args/src/main/scala/com/twitter/scalding/Args.scala | Scala | apache-2.0 | 7,118 |
/**
* Copyright (c) 2002-2012 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.pipes.matching
import org.neo4j.graphdb.{NotFoundException, Relationship, Node, PropertyContainer}
import collection.Map
case class MatchingPair(patternElement: PatternElement, entity: Any) {
def matches(x: Any) = x == entity || x == patternElement || entity == x || patternElement == x
override def toString = {
val value = entity match {
case propC: PropertyContainer => try {
propC.getProperty("name").toString
} catch {
case e: NotFoundException => propC.toString
}
case null => "null"
case x => x.toString
}
patternElement.key + "=" + value
}
def matchesBoundEntity(boundNodes: Map[String, MatchingPair]): Boolean = boundNodes.get(patternElement.key) match {
case Some(pinnedNode) => (entity, pinnedNode.entity) match {
case (a: Node, b: Node) => a == b
case (a: SingleGraphRelationship, b: Relationship) => a.rel == b
case (a: Relationship, b: SingleGraphRelationship) => a == b.rel
case (a: VariableLengthGraphRelationship, b: VariableLengthGraphRelationship) => a.path == b.path
case (a: VariableLengthGraphRelationship, b) => false
case (a, b: VariableLengthGraphRelationship) => false
}
case None => true
}
def getGraphRelationships(pRel: PatternRelationship): Seq[GraphRelationship] = patternElement.asInstanceOf[PatternNode].getGraphRelationships(entity.asInstanceOf[Node], pRel)
def getPatternAndGraphPoint: (PatternNode, Node) = (patternElement.asInstanceOf[PatternNode], entity.asInstanceOf[Node])
def patternNode = patternElement.asInstanceOf[PatternNode]
}
| dksaputra/community | cypher/src/main/scala/org/neo4j/cypher/internal/pipes/matching/MatchingPair.scala | Scala | gpl-3.0 | 2,436 |
package com.eharmony.aloha.models.vw.jni
import java.io.FileInputStream
import com.eharmony.aloha.id.ModelId
import com.eharmony.aloha.io.vfs.{File, Vfs1, Vfs2}
import com.eharmony.aloha.models.reg.ConstantDeltaSpline
import org.apache.commons.{vfs => vfs1, vfs2}
import org.junit.Assert._
import org.junit.{BeforeClass, Test}
import spray.json.{JsObject, pimpString}
/**
* These tests are now designed to pass if the VW model cannot be created in the BeforeClass method.
* This is due to Travis not working as we expect it to. Because cat /proc/version doesn't match
* the purported os the VW JNI library doesn't know which system dependent version of the lib
* to load and these tests will consequently fail.
*
* Created by jmorra on 7/10/15.
*/
object VwJniModelJsonTest {
@BeforeClass def createModel(): Unit = VwJniModelTest.createModel()
lazy val base64EncodedModelString = VwJniModel.readBinaryVwModelToB64String(new FileInputStream(VwJniModelTest.VwModelFile))
val vfs = vfs2.VFS.getManager
val vfsModel = Vfs2(vfs2.VFS.getManager.resolveFile(VwJniModelTest.VwModelPath))
val vfsSpec = Vfs2(vfs2.VFS.getManager.resolveFile("res:com/eharmony/aloha/models/vw/jni/good.logistic.aloha.js"))
val cds = ConstantDeltaSpline(0, 1, IndexedSeq(0.25, 0.75))
}
class VwJniModelJsonTest {
import VwJniModelJsonTest._
@Test def testGoodModel() = {
val expected =
("""
|{
| "modelType": "VwJNI",
| "modelId": { "id": 0, "name": "model name" },
| "features": {
| "height_mm": "Seq((\"1800\", 1.0))"
| },
| "namespaces": {
| "personal_features": [ "height_mm" ]
| },
| "vw": {
| "params": "--quiet -t",
| "model": """".stripMargin.trim + base64EncodedModelString + """"
| }
|}
""").stripMargin.parseJson
val actual = VwJniModel.json(vfsSpec, vfsModel, ModelId(0, "model name"), Some("--quiet -t"))
val fields = actual.asJsObject.fields
val act = JsObject(fields + ("vw" -> JsObject(fields("vw").asJsObject.fields - "creationDate")))
assertEquals(expected, act)
}
@Test def testGoodModelViaVfs1() = {
val expected =
("""
|{
| "modelType": "VwJNI",
| "modelId": { "id": 0, "name": "model name" },
| "features": {
| "height_mm": "Seq((\"1800\", 1.0))"
| },
| "namespaces": {
| "personal_features": [ "height_mm" ]
| },
| "vw": {
| "params": "--quiet -t",
| "model": """".stripMargin.trim + base64EncodedModelString + """"
| }
|}
""").stripMargin.parseJson
val vfs1Model = Vfs1(vfs1.VFS.getManager.resolveFile(VwJniModelTest.VwModelPath))
val actual = VwJniModel.json(vfsSpec, vfs1Model, ModelId(0, "model name"), Some("--quiet -t"))
val fields = actual.asJsObject.fields
val act = JsObject(fields + ("vw" -> JsObject(fields("vw").asJsObject.fields - "creationDate")))
assertEquals(expected, act)
}
@Test def testGoodModelViaFile() = {
val expected =
("""
|{
| "modelType": "VwJNI",
| "modelId": { "id": 0, "name": "model name" },
| "features": {
| "height_mm": "Seq((\"1800\", 1.0))"
| },
| "namespaces": {
| "personal_features": [ "height_mm" ]
| },
| "vw": {
| "params": "--quiet -t",
| "model": """".stripMargin.trim + base64EncodedModelString + """"
| }
|}
""").stripMargin.parseJson
val vfsFile = File(new java.io.File(VwJniModelTest.VwModelPath))
val actual = VwJniModel.json(vfsSpec, vfsFile, ModelId(0, "model name"), Some("--quiet -t"))
val fields = actual.asJsObject.fields
val act = JsObject(fields + ("vw" -> JsObject(fields("vw").asJsObject.fields - "creationDate")))
assertEquals(expected, act)
}
@Test def withNotes() = {
val expected =
("""
|{
| "modelType": "VwJNI",
| "modelId": { "id": 0, "name": "model name" },
| "features": {
| "height_mm": "Seq((\"1800\", 1.0))"
| },
| "notes": [
| "This is a note"
| ],
| "namespaces": {
| "personal_features": [ "height_mm" ]
| },
| "vw": {
| "params": "--quiet -t",
| "model": """".stripMargin.trim + base64EncodedModelString + """"
| }
|}
""").stripMargin.parseJson
val actual = VwJniModel.json(vfsSpec, vfsModel, ModelId(0, "model name"), Some("--quiet -t"), false, None, Some(Seq("This is a note")))
val fields = actual.asJsObject.fields
val act = JsObject(fields + ("vw" -> JsObject(fields("vw").asJsObject.fields - "creationDate")))
assertEquals(expected, act)
}
@Test def withSpline() = {
val expected =
("""
|{
| "modelType": "VwJNI",
| "modelId": { "id": 0, "name": "model name" },
| "features": {
| "height_mm": "Seq((\"1800\", 1.0))"
| },
| "spline": {
| "min": 0.0,
| "max": 1.0,
| "knots": [0.25, 0.75]
| },
| "namespaces": {
| "personal_features": [ "height_mm" ]
| },
| "vw": {
| "params": "--quiet -t",
| "model": """".stripMargin.trim + base64EncodedModelString + """"
| }
|}
""").stripMargin.parseJson
val actual = VwJniModel.json(vfsSpec, vfsModel, ModelId(0, "model name"), Some("--quiet -t"), false, None, None, Some(cds))
val fields = actual.asJsObject.fields
val act = JsObject(fields + ("vw" -> JsObject(fields("vw").asJsObject.fields - "creationDate")))
assertEquals(expected, act)
}
@Test def withNotesAndSpline() = {
val expected =
("""
|{
| "modelType": "VwJNI",
| "modelId": { "id": 0, "name": "model name" },
| "features": {
| "height_mm": "Seq((\"1800\", 1.0))"
| },
| "notes": [
| "This is a note"
| ],
| "spline": {
| "min": 0.0,
| "max": 1.0,
| "knots": [0.25, 0.75]
| },
| "namespaces": {
| "personal_features": [ "height_mm" ]
| },
| "vw": {
| "params": "--quiet -t",
| "model": """".stripMargin.trim + base64EncodedModelString + """"
| }
|}
""").stripMargin.parseJson
val actual = VwJniModel.json(vfsSpec, vfsModel, ModelId(0, "model name"), Some("--quiet -t"), false, None, Some(Seq("This is a note")), Some(cds))
val fields = actual.asJsObject.fields
val act = JsObject(fields + ("vw" -> JsObject(fields("vw").asJsObject.fields - "creationDate")))
assertEquals(expected, act)
}
}
| eHarmony/aloha | aloha-vw-jni/src/test/scala/com/eharmony/aloha/models/vw/jni/VwJniModelJsonTest.scala | Scala | mit | 7,354 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.features.kryo.json
import org.geotools.factory.CommonFactoryFinder
import org.geotools.filter.expression.PropertyAccessors
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.features.kryo.KryoFeatureSerializer
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class JsonPathPropertyAccessorTest extends Specification {
sequential
val ff = CommonFactoryFinder.getFilterFactory2
val sft = SimpleFeatureTypes.createType("json", "json:String:json=true,s:String,dtg:Date,*geom:Point:srid=4326")
"JsonPathPropertyAccessor" should {
"be available on the classpath" in {
import scala.collection.JavaConverters._
val accessors =
PropertyAccessors.findPropertyAccessors(new ScalaSimpleFeature("", sft), "$.json.foo", classOf[String], null)
accessors must not(beNull)
accessors.asScala must contain(JsonPathPropertyAccessor)
}
"access json values in simple features" in {
val property = ff.property("$.json.foo")
val sf = new ScalaSimpleFeature("", sft)
sf.setAttribute(0, """{ "foo" : "bar" }""")
property.evaluate(sf) mustEqual "bar"
sf.setAttribute(0, """{ "foo" : "baz" }""")
property.evaluate(sf) mustEqual "baz"
}
"access json values in simple features with spaces in the json path" in {
val property = ff.property("""$.json.['foo path']""")
val sf = new ScalaSimpleFeature("", sft)
sf.setAttribute(0, """{ "foo path" : "bar" }""")
property.evaluate(sf) mustEqual "bar"
sf.setAttribute(0, """{ "foo path" : "baz" }""")
property.evaluate(sf) mustEqual "baz"
}
"access nested json values in simple features with a json path" in {
val property = ff.property("""$.json.foo.bar""")
val sf = new ScalaSimpleFeature("", sft)
sf.setAttribute(0, """{ "foo" : { "bar" : 0 } }""")
property.evaluate(sf) mustEqual 0
sf.setAttribute(0, """{ "foo" : { "bar" : "baz" } }""")
property.evaluate(sf) mustEqual "baz"
}
"access non-json strings in simple features" in {
val property = ff.property("$.s.foo")
val sf = new ScalaSimpleFeature("", sft)
sf.setAttribute(1, """{ "foo" : "bar" }""")
property.evaluate(sf) mustEqual "bar"
sf.setAttribute(1, """{ "foo" : "baz" }""")
property.evaluate(sf) mustEqual "baz"
}
"access json values in kryo serialized simple features" in {
val property = ff.property("$.json.foo")
val serializer = new KryoFeatureSerializer(sft)
val sf = serializer.getReusableFeature
sf.setBuffer(serializer.serialize(new ScalaSimpleFeature("", sft, Array("""{ "foo" : "bar" }""", null, null, null))))
property.evaluate(sf) mustEqual "bar"
sf.setBuffer(serializer.serialize(new ScalaSimpleFeature("", sft, Array("""{ "foo" : "baz" }""", null, null, null))))
property.evaluate(sf) mustEqual "baz"
}
"access json values with spaces in kryo serialized simple features" in {
val property = ff.property("$.json.['foo path']")
val serializer = new KryoFeatureSerializer(sft)
val sf = serializer.getReusableFeature
sf.setBuffer(serializer.serialize(new ScalaSimpleFeature("", sft, Array("""{ "foo path" : "bar" }""", null, null, null))))
property.evaluate(sf) mustEqual "bar"
sf.setBuffer(serializer.serialize(new ScalaSimpleFeature("", sft, Array("""{ "foo path" : "baz" }""", null, null, null))))
property.evaluate(sf) mustEqual "baz"
}
"accept json path in ECQL" in {
val expression = ECQL.toFilter(""""$.json.foo" = 'bar'""")
val sf = new ScalaSimpleFeature("", sft)
sf.setAttribute(0, """{ "foo" : "bar" }""")
expression.evaluate(sf) must beTrue
sf.setAttribute(0, """{ "foo" : "baz" }""")
expression.evaluate(sf) must beFalse
}
"return null for invalid paths" in {
val sf0 = {
val sf = new ScalaSimpleFeature("", sft)
sf.setAttribute(0, """{ "foo" : "bar" }""")
sf
}
val sf1 = {
val serializer = new KryoFeatureSerializer(sft)
val sf = serializer.getReusableFeature
sf.setBuffer(serializer.serialize(sf0))
sf
}
forall(Seq(sf0, sf1)) { sf =>
forall(Seq("$baz", "$.baz", "baz", "$.baz/a")) { path =>
ff.property(path).evaluate(sf) must beNull
ECQL.toFilter(s""""$path" = 'bar'""").evaluate(sf) must beFalse
}
}
}
}
}
| tkunicki/geomesa | geomesa-features/geomesa-feature-kryo/src/test/scala/org/locationtech/geomesa/features/kryo/json/JsonPathPropertyAccessorTest.scala | Scala | apache-2.0 | 5,160 |
package rx.lang.scala.completeness
import java.util.Calendar
/**
* Generate comparison tables for Scala classes and Java classes. Run `sbt 'test:run rx.lang.scala.completeness.CompletenessTest'` to generate them.
*/
object CompletenessTables {
/**
* CompletenessKits to generate completeness tables.
*/
val completenessKits = List(
new ObservableCompletenessKit,
new BlockingObservableCompletenessKit,
new TestSchedulerCompletenessKit,
new TestSubscriberCompletenessKit)
def setTodoForMissingMethods(completenessKit: CompletenessKit): Map[String, String] = {
val actualMethods = completenessKit.rxScalaPublicInstanceAndCompanionMethods.toSet
for ((javaM, scalaM) <- completenessKit.correspondence) yield
(javaM, if (actualMethods.contains(scalaM) || scalaM.charAt(0) == '[') scalaM else "[**TODO: missing**]")
}
def scalaToJavaSignature(s: String) =
s.replaceAllLiterally("_ <:", "? extends")
.replaceAllLiterally("_ >:", "? super")
.replaceAllLiterally("[", "<")
.replaceAllLiterally("]", ">")
.replaceAllLiterally("Array<T>", "T[]")
def escapeJava(s: String) =
s.replaceAllLiterally("<", "<")
.replaceAllLiterally(">", ">")
def printMarkdownCorrespondenceTables() {
println("""
---
layout: comparison
title: Comparison of Scala Classes and Java Classes
---
Note:
* These tables contain both static methods and instance methods.
* If a signature is too long, move your mouse over it to get the full signature.
""")
completenessKits.foreach(printMarkdownCorrespondenceTable)
val completenessTablesClassName = getClass.getCanonicalName.dropRight(1) // Drop "$"
println(s"\\nThese tables were generated on ${Calendar.getInstance().getTime}.")
println(s"**Do not edit**. Instead, edit `${completenessTablesClassName}` and run `sbt 'test:run ${completenessTablesClassName}'` to generate these tables.")
}
def printMarkdownCorrespondenceTable(completenessKit: CompletenessKit): Unit = {
def groupingKey(p: (String, String)): (String, String) =
(if (p._1.startsWith("average")) "average" else p._1.takeWhile(_ != '('), p._2)
def formatJavaCol(name: String, alternatives: Iterable[String]): String = {
alternatives.toList.sorted.map(scalaToJavaSignature).map(s => {
if (s.length > 64) {
val toolTip = escapeJava(s)
"<span title=\\"" + toolTip + "\\"><code>" + name + "(...)</code></span>"
} else {
"`" + s + "`"
}
}).mkString("<br/>")
}
def formatScalaCol(s: String): String =
if (s.startsWith("[") && s.endsWith("]")) s.drop(1).dropRight(1) else "`" + s + "`"
val ps = setTodoForMissingMethods(completenessKit)
println(s"""
|## Comparison of Scala ${completenessKit.rxScalaType.typeSymbol.name} and Java ${completenessKit.rxJavaType.typeSymbol.name}
|
|| Java Method | Scala Method |
||-------------|--------------|""".stripMargin)
(for (((javaName, scalaCol), pairs) <- ps.groupBy(groupingKey).toList.sortBy(_._1._1)) yield {
"| " + formatJavaCol(javaName, pairs.map(_._1)) + " | " + formatScalaCol(scalaCol) + " |"
}).foreach(println(_))
}
def main(args: Array[String]): Unit = {
printMarkdownCorrespondenceTables()
}
}
| zjrstar/RxScala | src/test/scala-2.11/rx/lang/scala/completeness/CompletenessTables.scala | Scala | apache-2.0 | 3,336 |
/*
* Copyright (C) 2016-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.javadsl.persistence.jdbc.testkit
import com.lightbend.lagom.javadsl.persistence.jdbc.JdbcPersistenceSpec
import com.lightbend.lagom.javadsl.persistence.testkit.AbstractEmbeddedPersistentActorSpec
class EmbeddedJdbcPersistentActorSpec extends JdbcPersistenceSpec with AbstractEmbeddedPersistentActorSpec
| edouardKaiser/lagom | persistence-jdbc/javadsl/src/test/scala/com/lightbend/lagom/javadsl/persistence/jdbc/testkit/EmbeddedJdbcPersistentActorSpec.scala | Scala | apache-2.0 | 410 |
package com.sksamuel.elastic4s.admin
import com.sksamuel.elastic4s.{Indexes, IndexesAndTypes}
trait IndexAdminApi {
def refreshIndex(first: String, rest: String*): RefreshIndexRequest = refreshIndex(first +: rest)
def refreshIndex(indexes: Iterable[String]): RefreshIndexRequest = refreshIndex(Indexes(indexes))
def refreshIndex(indexes: Indexes): RefreshIndexRequest = RefreshIndexRequest(indexes.values)
def indexStats(indexes: Indexes = Indexes.All): IndexStatsRequest = IndexStatsRequest(indexes)
def indexStats(first: String, rest: String*): IndexStatsRequest = indexStats(first +: rest)
def typesExist(indexesAndTypes: IndexesAndTypes) = TypesExistsRequest(indexesAndTypes.indexes, indexesAndTypes.types)
def typesExist(types: String*): TypesExistExpectsIn = typesExist(types)
def typesExist(types: Iterable[String]): TypesExistExpectsIn = new TypesExistExpectsIn(types)
class TypesExistExpectsIn(types: Iterable[String]) {
def in(indexes: String*): TypesExistsRequest = TypesExistsRequest(indexes, types.toSeq)
}
def closeIndex(first: String, rest: String*): CloseIndexRequest = CloseIndexRequest(first +: rest)
def openIndex(first: String, rest: String*): OpenIndexRequest = OpenIndexRequest(first +: rest)
def getSegments(indexes: Indexes): GetSegmentsRequest = GetSegmentsRequest(indexes)
def getSegments(first: String, rest: String*): GetSegmentsRequest = getSegments(first +: rest)
def flushIndex(indexes: Iterable[String]): FlushIndexRequest = FlushIndexRequest(indexes.toSeq)
def flushIndex(indexes: String*): FlushIndexRequest = flushIndex(indexes)
def indexExists(index: String): IndicesExistsRequest = IndicesExistsRequest(index)
def indicesExists(indices: Indexes): IndicesExistsRequest = IndicesExistsRequest(indices)
def aliasExists(alias: String): AliasExistsRequest = AliasExistsRequest(alias)
def clearCache(first: String, rest: String*): ClearCacheRequest = clearCache(first +: rest)
def clearCache(indexes: Iterable[String]): ClearCacheRequest = ClearCacheRequest(indexes.toSeq)
def clearIndex(first: String, rest: String*): ClearCacheRequest = clearIndex(first +: rest)
def clearIndex(indexes: Iterable[String]): ClearCacheRequest = ClearCacheRequest(indexes.toSeq)
def rolloverIndex(alias: String): RolloverIndexRequest = RolloverIndexRequest(alias)
@deprecated("use shrinkIndex(source, target)", "6.1.2")
def shrink(source: String, target: String): ShrinkIndexRequest = ShrinkIndexRequest(source, target)
def shrinkIndex(source: String, target: String): ShrinkIndexRequest = ShrinkIndexRequest(source, target)
def updateIndexLevelSettings(first: String, rest: String*): UpdateIndexLevelSettingsRequest =
updateIndexLevelSettings(first +: rest)
def updateIndexLevelSettings(indexes: Iterable[String]): UpdateIndexLevelSettingsRequest =
updateIndexLevelSettings(Indexes(indexes))
def updateIndexLevelSettings(indexes: Indexes): UpdateIndexLevelSettingsRequest =
UpdateIndexLevelSettingsRequest(indexes.values)
def indexShardStores(first: String, rest: String*): IndexShardStoreRequest = indexShardStores(first +: rest)
def indexShardStores(indexes: Iterable[String]): IndexShardStoreRequest = indexShardStores(Indexes(indexes))
def indexShardStores(indexes: Indexes): IndexShardStoreRequest = IndexShardStoreRequest(indexes)
}
| Tecsisa/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/admin/IndexAdminApi.scala | Scala | apache-2.0 | 3,443 |
// FILTROS
val archivos = new java.io.File("./").listFiles
for(archivo <- archivos
if archivo.isFile
if archivo.getName.endsWith("log")
)
println(archivo)
| romanarranz/NTP | S5/estructurasForFiltros.scala | Scala | mit | 157 |
import sbt._
import Keys._
import Status.{ publishStatus }
import com.typesafe.sbt.{ SbtGhPages, SbtGit, SbtSite, site => sbtsite }
import SbtSite.{ site, SiteKeys }
import SbtGhPages.{ ghpages, GhPagesKeys => ghkeys }
import SbtGit.{ git, GitKeys }
import sbtsite.SphinxSupport
import SiteKeys.{ makeSite, siteMappings }
import Sxr.sxr
import SiteMap.Entry
object Docs {
val rootFiles = SettingKey[Seq[File]]("root-files", "Location of file that will be copied to the website root.")
val latestRelease = SettingKey[Boolean]("latest-release")
val siteExcludes = Set(".buildinfo", "objects.inv")
def siteInclude(f: File) = !siteExcludes.contains(f.getName)
def siteSourceBase(siteSourceVersion: String) = s"https://github.com/sbt/sbt/raw/$siteSourceVersion/src/sphinx/"
val sbtSiteBase = uri("http://www.scala-sbt.org/")
val SnapshotPath = "snapshot"
val ReleasePath = "release"
val DocsPath = "docs"
val IndexHtml = "index.html"
val HomeHtml = "home.html"
val VersionPattern = """(\\d+)\\.(\\d+)\\.(\\d+)(-.+)?""".r.pattern
def settings: Seq[Setting[_]] =
site.settings ++
site.sphinxSupport(DocsPath) ++
site.includeScaladoc("api") ++
siteIncludeSxr("sxr") ++
ghPagesSettings ++
Seq(
SphinxSupport.sphinxEnv in SphinxSupport.Sphinx <<= sphinxEnvironmentVariables,
SphinxSupport.sphinxIncremental in SphinxSupport.Sphinx := true,
// TODO: set to true with newer sphinx plugin release
SphinxSupport.enableOutput in SphinxSupport.generatePdf := false
)
def ghPagesSettings = ghpages.settings ++ Seq(
git.remoteRepo := "git@github.com:sbt/sbt.github.com.git",
localRepoDirectory,
ghkeys.synchLocal <<= synchLocalImpl,
rootFiles := {
val base = (sourceDirectory in SphinxSupport.Sphinx).value
Seq("CNAME", "robots.txt").map(base / _)
},
latestRelease in ThisBuild := false,
commands += setLatestRelease,
GitKeys.gitBranch in ghkeys.updatedRepository := Some("master")
)
def localRepoDirectory = ghkeys.repository := {
// distinguish between building to update the site or not so that CI jobs
// that don't commit+publish don't leave uncommitted changes in the working directory
val status = if (isSnapshot.value) "snapshot" else "public"
Path.userHome / ".sbt" / "ghpages" / status / organization.value / name.value
}
def siteIncludeSxr(prefix: String) = Seq(
mappings in sxr <<= sxr.map(dir => Path.allSubpaths(dir).toSeq),
site.addMappingsToSiteDir(mappings in sxr, prefix)
)
def sphinxEnvironmentVariables = (scalaVersion, version, isSnapshot) map { (scalaV, sbtV, snap) =>
// sphinx's terminology: major.minor
def release(v: String): String = CrossVersion.partialVersion(v) match {
case Some((major, minor)) => major + "." + minor
case None => v
}
val siteVersion = sbtV.takeWhile(_ != '-')
val siteSourceVersion = if (snap) release(siteVersion) else siteVersion
Map[String, String](
"sbt.full.version" -> sbtV,
"sbt.partial.version" -> release(sbtV),
"sbt.site.version" -> siteVersion,
"sbt.site.source.base" -> siteSourceBase(siteSourceVersion),
"sbt.binary.version" -> CrossVersion.binarySbtVersion(sbtV),
"scala.full.version" -> scalaV,
"scala.partial.version" -> release(scalaV),
"scala.binary.version" -> CrossVersion.binaryScalaVersion(scalaV)
)
}
def synchLocalImpl = (ghkeys.privateMappings, ghkeys.updatedRepository, version, isSnapshot, latestRelease, streams, rootFiles) map {
(mappings, repo, v, snap, latest, s, roots) =>
val versioned = repo / v
IO.delete(versioned)
val toCopy = for ((file, target) <- mappings if siteInclude(file)) yield (file, versioned / target)
IO.copy(toCopy)
for (f <- roots)
IO.copyFile(f, repo / f.getName)
IO.touch(repo / ".nojekyll")
IO.write(repo / "versions.js", versionsJs(sortVersions(collectVersions(repo))))
if (!snap && latest)
RootIndex(versioned / DocsPath / "home.html", repo / IndexHtml)
if (snap || latest)
linkSite(repo, v, if (snap) SnapshotPath else ReleasePath, s.log)
s.log.info("Copied site to " + versioned)
if (latest) {
val (index, siteMaps) = SiteMap.generate(repo, sbtSiteBase, gzip = true, siteEntry(v), s.log)
s.log.info(s"Generated site map index: $index")
s.log.debug(s"Generated site maps: ${siteMaps.mkString("\\n\\t", "\\n\\t", "")}")
}
repo
}
def siteEntry(CurrentVersion: String)(file: File, relPath: String): Option[Entry] =
{
val apiOrSxr = """([^/]+)/(api|sxr)/.*""".r
val docs = """([^/]+)/docs/.*""".r
val old077 = """0\\.7\\.7/.*""".r
val manualRedirects = """[^/]+\\.html""".r
val snapshot = """(.+-SNAPSHOT|snapshot)/.+/.*""".r
// highest priority is the home page
// X/docs/ are higher priority than X/(api|sxr)/
// release/ is slighty higher priority than <releaseVersion>/
// non-current releases are low priority
// 0.7.7 documentation is very low priority
// snapshots docs are very low priority
// the manual redirects from the old version of the site have no priority at all
relPath match {
case "index.html" => Some(Entry("weekly", 1.0))
case docs(ReleasePath) => Some(Entry("weekly", 0.9))
case docs(CurrentVersion) => Some(Entry("weekly", 0.8))
case apiOrSxr(ReleasePath, _) => Some(Entry("weekly", 0.6))
case apiOrSxr(CurrentVersion, _) => Some(Entry("weekly", 0.5))
case snapshot(_) => Some(Entry("weekly", 0.02))
case old077() => Some(Entry("never", 0.01))
case docs(_) => Some(Entry("never", 0.2))
case apiOrSxr(_, _) => Some(Entry("never", 0.1))
case x => Some(Entry("never", 0.0))
}
}
def versionsJs(vs: Seq[String]): String = "var availableDocumentationVersions = " + vs.mkString("['", "', '", "']")
// names of all directories that are explicit versions
def collectVersions(base: File): Seq[String] = (base * versionFilter).get.map(_.getName)
def sortVersions(vs: Seq[String]): Seq[String] = vs.sortBy(versionComponents).reverse
def versionComponents(v: String): Option[(Int, Int, Int, Option[String])] = {
val m = VersionPattern.matcher(v)
if (m.matches())
Some((m.group(1).toInt, m.group(2).toInt, m.group(3).toInt, Option(m.group(4))))
else
None
}
def versionFilter = new PatternFilter(VersionPattern) && DirectoryFilter
def linkSite(base: File, to: String, from: String, log: Logger) {
val current = base / to
assert(current.isDirectory, "Versioned site not present at " + current.getAbsolutePath)
val symlinkDir = base / from
symlinkDir.delete()
symlink(path = to, file = symlinkDir, log = log)
}
// TODO: platform independence/use symlink from Java 7
def symlink(path: String, file: File, log: Logger): Unit =
"ln" :: "-s" :: path :: file.getAbsolutePath :: Nil ! log match {
case 0 => ()
case code => error("Could not create symbolic link '" + file.getAbsolutePath + "' with path " + path)
}
def setLatestRelease = Command.command("latest-release-docs") { state =>
Project.extract(state).append((latestRelease in ThisBuild := true) :: Nil, state)
}
}
object RootIndex {
import Docs._
import org.jsoup._
def apply(versionIndex: File, to: File) {
val doc = Jsoup.parse(versionIndex, "UTF-8")
rewriteLinks(doc)
removeSearch(doc)
IO.write(to, doc.outerHtml)
}
def retargetIndexLink(original: String): String =
if (isAbsolute(original) || original.startsWith("#"))
original
else
ReleasePath + "/docs/" + original
def isAbsolute(s: String): Boolean = (new java.net.URI(s)).isAbsolute
def rewriteLinks(doc: nodes.Document) {
rewriteLinks(doc, "*", "href")
rewriteLinks(doc, "script", "src")
}
def rewriteLinks(doc: nodes.Document, elemName: String, attrName: String): Unit =
for (elem <- select(doc, elemName + "[" + attrName + "]"))
elem.attr(attrName, retargetIndexLink(elem.attr(attrName)))
def removeSearch(doc: nodes.Document): Unit =
doc.select(".search").remove()
def select(doc: nodes.Document, s: String) =
{
import collection.JavaConverters._
doc.select(s).iterator.asScala
}
} | niktrop/sbt | project/Docs.scala | Scala | bsd-3-clause | 8,519 |
package net.scalax.ubw.core
import cats.{ Functor, Monad }
import net.scalax.ubw.core.Channel.PilePipImpl
import scala.language.higherKinds
trait InputChannel[T] {
self =>
val pilesGen: Channel.PileGen[T]
def withSyntax[R[_]](syntax1: PileSyntaxFunctor[T, R]): IOChannel[T, R] = {
new IOChannel[T, R] {
override val pilesGen = self.pilesGen
override val PileSyntaxFunctor = syntax1
}
}
/*def flatMap[S, U](mapPiles: Channel.PileGenImpl[List[DataPile] => S])(cv: (T, List[DataPile] => S) => U): Channel.PileGenImpl[List[DataPile] => U] = {
val monad = implicitly[Monad[Channel.PileGenImpl]]
monad.flatMap(pilesGen) { tGen =>
monad.map(mapPiles) { sGen =>
{ values: List[DataPile] =>
cv(tGen(values), sGen)
}
}
}
}*/
def result(piles: List[Pile]): Either[AtomicException, T] = {
val listPile = new PileListImpl(
piles,
piles.map(_.asInstanceOf[CommonPile]),
{ list: List[Any] => list },
{ list: List[Any] => list }
)
commonResult(listPile)
}
def commonResult(pile: Pile): Either[AtomicException, T] = {
pilesGen.gen(pile).right.map { s =>
s.valueFunc(new DataPileContent {
override val atomicList = pile.leafZero
override val oldDataPiles = pile.leafZeroDataPiles
override val newDataPiles = pile.leafZeroDataPiles
override val previousContent = Option.empty
})
}
}
}
trait IOChannel[T, R[_]] extends InputChannel[T] {
self =>
//override val pilesGen: Channel.PileGen[T]
val PileSyntaxFunctor: PileSyntaxFunctor[T, R]
def withFunctor(functor: cats.Functor[R]): FoldableChannel[T, R] = {
val functor1 = functor
new FoldableChannel[T, R] {
override val pilesGen = self.pilesGen
override val PileSyntaxFunctor = self.PileSyntaxFunctor
override val functor = functor1
}
}
def next[U](other: InputChannel[U]): InputChannel[R[U]] = {
new InputChannel[R[U]] {
override val pilesGen: Channel.PileGen[R[U]] = {
PileSyntaxFunctor.reduce(self.pilesGen, other.pilesGen)
}
}
}
/*def afterResult[E](filter: PileFilter[E]): IOChannel[(T, R[E]), ({ type L[K] = (R[K], R[E]) })#L] = {
new IOChannel[(T, R[E]), ({ type L[K] = (R[K], R[E]) })#L] {
override val pilesGen = new Channel.PileGen[(T, R[E])] {
override def gen(pile: Pile): Either[AtomicException, Channel.PilePip[(T, R[E])]] = {
self.pilesGen.gen(pile) match {
case Right(oldPile) =>
Right(PilePipImpl[DataPileContent => (T, R[E])](oldPile.piles, { dataPiles =>
oldPile.valueFunc(dataPiles) -> self.PileSyntaxFunctor.pileMap(oldPile.valueFunc(dataPiles), { content =>
filter.transform(content.newDataPiles)
})
}))
case Left(e) => Left(e)
}
}
}
override val PileSyntaxFunctor = new PileSyntaxFunctor[(T, R[E]), ({ type L[K] = (R[K], R[E]) })#L] {
override def pileMap[U](a: (T, R[E]), pervious: DataPileContent => U): (R[U], R[E]) = {
self.PileSyntaxFunctor.pileMap(a._1, pervious) -> a._2
}
}
}
}*/
def afterResult[E](filter: PileFilter[E]): InputChannel[(R[E])] = {
new InputChannel[R[E]] {
override val pilesGen = new Channel.PileGen[R[E]] {
override def gen(pile: Pile): Either[AtomicException, Channel.PilePip[R[E]]] = {
self.pilesGen.gen(pile) match {
case Right(oldPile) =>
Right(PilePipImpl[DataPileContent => R[E]](oldPile.piles, { dataPiles =>
val selfResult = oldPile.valueFunc(dataPiles)
self.PileSyntaxFunctor.pileMap(selfResult, { content =>
filter.transform(content.newDataPiles)
})
}))
case Left(e) => Left(e)
}
}
}
}
}
}
trait FoldableChannel[T, R[_]] extends IOChannel[T, R] {
self =>
override val pilesGen: Channel.PileGen[T]
override val PileSyntaxFunctor: PileSyntaxFunctor[T, R]
val functor: cats.Functor[R]
def next2222[U, H[_]](other: IOChannel[U, H]): IOChannel[R[U], ({ type V[W] = R[H[W]] })#V] = {
new IOChannel[R[U], ({ type V[W] = R[H[W]] })#V] {
override val pilesGen: Channel.PileGen[R[U]] = {
self.PileSyntaxFunctor.reduce(self.pilesGen, other.pilesGen)
}
override val PileSyntaxFunctor = new PileSyntaxFunctor[R[U], ({ type V[W] = R[H[W]] })#V] {
def pileMap[M](a: R[U], pervious: DataPileContent => M): R[H[M]] = {
self.functor.map(a) { u =>
other.PileSyntaxFunctor.pileMap(u, pervious)
}
}
}
}
}
def next3333[U, H[_]](other: FoldableChannel[U, H]): FoldableChannel[R[U], ({ type V[W] = R[H[W]] })#V] = {
new FoldableChannel[R[U], ({ type V[W] = R[H[W]] })#V] {
override val pilesGen: Channel.PileGen[R[U]] = {
self.PileSyntaxFunctor.reduce(self.pilesGen, other.pilesGen)
}
override val PileSyntaxFunctor = new PileSyntaxFunctor[R[U], ({ type V[W] = R[H[W]] })#V] {
def pileMap[M](a: R[U], pervious: DataPileContent => M): R[H[M]] = {
self.functor.map(a) { u =>
other.PileSyntaxFunctor.pileMap(u, pervious)
}
}
}
override val functor: cats.Functor[({ type V[W] = R[H[W]] })#V] = {
new Functor[({ type V[W] = R[H[W]] })#V] {
override def map[A, B](fa: R[H[A]])(f: (A) => B): R[H[B]] = self.functor.map(fa) { s => other.functor.map(s)(f) }
}
}
}
}
/*def afterResult[E](filter: PileFilter[E]): InputChannel[(R[(T, E)]), ({ type L[K] = R[R[(K, E)]] })#L] = {
new FoldableChannel[R[(T, E)], ({ type L[K] = R[R[(K, E)]] })#L] {
override val pilesGen = new Channel.PileGen[R[(T, E)]] {
override def gen(pile: Pile): Either[AtomicException, Channel.PilePip[R[(T, E)]]] = {
self.pilesGen.gen(pile) match {
case Right(oldPile) =>
Right(PilePipImpl[DataPileContent => R[(T, E)]](oldPile.piles, { dataPiles =>
val selfResult = oldPile.valueFunc(dataPiles)
self.functor.map(self.PileSyntaxFunctor.pileMap(selfResult, { content =>
filter.transform(content.newDataPiles)
})) { s => selfResult -> s }
}))
case Left(e) => Left(e)
}
}
}
override val PileSyntaxFunctor = new PileSyntaxFunctor[R[(T, E)], ({ type L[K] = R[R[(K, E)]] })#L] {
override def pileMap[U](a: R[(T, E)], pervious: DataPileContent => U): R[R[(U, E)]] = {
self.functor.map(a) {
case (t, e) =>
self.PileSyntaxFunctor.pileMap(t, { content => pervious(content) -> e })
}
}
}
override val functor: cats.Functor[({ type L[K] = R[R[(K, E)]] })#L] = new cats.Functor[({ type L[K] = R[R[(K, E)]] })#L] {
override def map[A, B](fa: R[R[(A, E)]])(f: A => B): R[R[(B, E)]] = {
self.functor.map(fa) { t => self.functor.map(t)(t => f(t._1) -> t._2) }
}
}
}
}*/
}
trait PilesGenHelper {
}
trait PileSyntaxFunctor[T, R[_]] extends PilesGenHelper {
def pileMap[U](a: T, pervious: DataPileContent => U): R[U]
def reduce[S](pervious: Channel.PileGen[T], next: Channel.PileGen[S]): Channel.PileGen[R[S]] = {
Channel.compose(pervious)(next) { (t, gen) =>
pileMap(t, gen)
}
}
} | scalax/fsn | framework/ubw-core/src/main/scala/net/scalax/ubw/core/PileSyntax.scala | Scala | mit | 7,680 |
package task.airport
package util
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import org.webjars.WebJarAssetLocator
import scala.util.{Failure, Success, Try}
// See: https://github.com/ThoughtWorksInc/akka-http-webjars/blob/master/src/main/scala/com/thoughtworks/akka/http/WebJarsSupport.scala
trait WebJarsSupport {
private val webJarAssetLocator = new WebJarAssetLocator
val webJars: Route =
path(Segment / Remaining) { (webJar, partialPath) =>
Try {
webJarAssetLocator.getFullPath(webJar, partialPath)
} match {
case Success(fullPath) => getFromResource(fullPath)
case Failure(_: IllegalArgumentException) => reject
case Failure(exception) => failWith(exception)
}
}
}
object WebJarsSupport extends WebJarsSupport
| rarrabi/task-airport | jvm/src/main/scala/task/airport/util/WebJarsSupport.scala | Scala | mit | 827 |
import org.scalatest.{Matchers, FunSuite}
class CountingSortTest extends FunSuite with Matchers {
test("generateCountList should generate") {
val objectForCountingSort = new CountingSort
objectForCountingSort.countSort(List(11,1,22,33,23),1,33) should be(List(1,11,22,23,33))
objectForCountingSort.countSort(List(1,2,3,4,5,6,7,8,9),1,9) should be(List(1,2,3,4,5,6,7,8,9))
}
}
| warreee/Algorithm-Implementations | Counting_Sort/Scala/aayushKumarJarvis/CountingSortTest.scala | Scala | mit | 396 |
package ru.wordmetrix.dreamcrammer
import java.io._
import android.content.{Context, Intent}
import ru.wordmetrix.dreamcrammer.db._
import ru.wordmetrix._
import android.view.{Menu, MenuItem, View, ViewGroup, LayoutInflater}
import android.widget.{ToggleButton, TextView, Button, CompoundButton, ImageView, ArrayAdapter, ListView, PopupMenu, AdapterView, GridView}
import android.graphics.{BitmapFactory,Bitmap}
import android.support.v4.util.LruCache
class PictureDisplay(context : DreamCrammerBase, picture : Picture) extends BaseDisplay(context) {
override
def item(resource : Int = R.layout.pictureitem) = super.item(resource)
override
def item(viewgroup : ViewGroup) : ViewGroup = {
val imageView : ImageView = viewgroup.findViewById(R.id.picture_body).asInstanceOf[ImageView]
//log("picture %s %s %s", imageView, picture, picture.body.size)
// ticket : Add a message that something is wrong with picture instead of silent droping it
picture.bodyOption match {
case Some(body) => try {
imageView.setImageBitmap(Bitmap.createScaledBitmap(BitmapFactory.decodeByteArray(body,0,picture.body.size),256,256,true) )
//imageView.setImageBitmap(BitmapFactory.decodeByteArray(body,0,picture.body.size))
} catch {
case x : Throwable => log("Bitmap is broken",x)
}
case None => log("Bitmap has not been loaded yet")
}
viewgroup
}
override
def view(resource : Int = R.layout.pictureview) : ViewGroup = super.view(resource)
}
| electricmind/dreamcrammer | src/main/ru/wordmetrix/dreamcrammer/picturedisplay.scala | Scala | apache-2.0 | 1,620 |
package shared.responses.groups.members
case class RemoveMemberGroupResponse(
organizationId: String,
name: String,
userId: String
)
| beikern/foulkon-ui | shared/src/main/scala/shared/responses/groups/members/RemoveMemberGroupResponse.scala | Scala | apache-2.0 | 137 |
package com.mooveit.moonitor.principal.actors
import akka.actor._
import akka.remote.RemoteScope
import com.mooveit.moonitor.agent.actors.Agent
import com.mooveit.moonitor.agent.actors.Agent._
import com.mooveit.moonitor.domain.alerts.AlertConfiguration
import com.mooveit.moonitor.domain.metrics.MetricConfiguration
import com.mooveit.moonitor.principal.actors.ConfigurationStore._
import com.mooveit.moonitor.principal.actors.MetricsStore.Save
import com.mooveit.moonitor.principal.actors.Principal._
import com.mooveit.moonitor.principal.actors.Watcher._
class Principal(host: String, store: ActorRef, confStore: ActorRef)
extends Actor {
val config = context.system.settings.config
private var agent: ActorRef = _
private var watcher: ActorRef = _
private val mailInformer = context.actorOf(Props[MailInformer])
override def preStart() = {
confStore ! RetrieveMetricsConfig(host)
confStore ! RetrieveAlertsConfig(host)
}
override def postRestart(reason: Throwable) = {}
def agentDeployConfig = {
val protocol = config.getString("agent.protocol")
val systemName = config.getString("agent.system_name")
val port = config.getInt("agent.port")
Deploy(scope = RemoteScope(Address(protocol, systemName, host, port)))
}
override def receive = {
case MetricsConfiguration(metricsConfig) =>
val deploy = agentDeployConfig
val props = Agent.props(metricsConfig).withDeploy(deploy)
agent = context.actorOf(props, s"agent-$host")
case AlertsConfiguration(alertsConfig) =>
watcher = context.actorOf(
Watcher.props(host, alertsConfig, mailInformer), s"watcher-$host")
case startCollecting @ StartCollecting(mconf) =>
confStore ! SaveMetric(host, mconf)
agent ! startCollecting
case stopCollecting @ StopCollecting(m) =>
confStore ! RemoveMetric(host, m)
agent ! stopCollecting
case startWatching @ StartWatching(aconf) =>
confStore ! SaveAlert(host, aconf)
watcher ! startWatching
case stopWatching @ StopWatching(metric) =>
confStore ! RemoveAlert(host, metric)
watcher ! stopWatching
case Stop =>
agent ! Stop
watcher ! PoisonPill
context stop self
case metricCollected @ MetricCollected(id, result) =>
store ! Save(host, id, result)
watcher ! metricCollected
}
}
object Principal {
def props(host: String, store: ActorRef, confStore: ActorRef) =
Props(new Principal(host, store, confStore))
case class MetricsConfiguration(conf: Iterable[MetricConfiguration])
object MetricsConfiguration {
def apply(confs: MetricConfiguration*): MetricsConfiguration = apply(confs)
}
case class AlertsConfiguration(conf: Iterable[AlertConfiguration])
object AlertsConfiguration {
def apply(confs: AlertConfiguration*): AlertsConfiguration = apply(confs)
}
}
| moove-it/moonitor | principal/src/main/scala/com/mooveit/moonitor/principal/actors/Principal.scala | Scala | mit | 2,875 |
package controllers
import models.APIProvider
import play.api._
import play.api.mvc._
object APIProviderController extends Controller {
def populateEverything = Action {
APIProvider.populateEverything()
Ok("Population under way!")
}
}
| slacker2/departure-times | app/controllers/APIProviderController.scala | Scala | gpl-3.0 | 251 |
package com.github.alixba.vast
import javax.xml.datatype.{ DatatypeFactory, XMLGregorianCalendar }
import javax.xml.parsers.SAXParserFactory
import scala.language.implicitConversions
import scala.xml._
object VASTElementCompanion {
lazy val saxParserFactory = SAXParserFactory.newInstance()
saxParserFactory.setNamespaceAware(false)
lazy val datatypeFactory = DatatypeFactory.newInstance()
}
trait VASTElementCompanion[T] extends fromXMLImplicits {
/**
* Deserializes a String to a T.
* Makes use of the fromXML method.
* Why fromXML? because it is way simpler
* to transform Node -> T than String -> T.
*/
def fromString(string: String): T = {
// don't recreate Factory every time
val saxParser = VASTElementCompanion.saxParserFactory.newSAXParser()
fromXML(XML.withSAXParser(saxParser).loadString(string))
}
/**
* Deserializes a Node to a T.
* The highest tag of the Node should match
* the T.
*
* {{{
* val elem = <Ad><SomeTags/></Ad>
* val ad = Ad.fromXML(elem)
* }}}
*/
def fromXML(node: Node): T
}
trait fromXMLImplicits {
implicit def nodeOptToIntOpt(opt: Option[Node]): Option[Int] =
opt.map(_.text.toInt)
implicit def nodeOptToStringOpt(opt: Option[Node]): Option[String] =
opt.map(_.text)
implicit def nodeOptToBooleanOpt(opt: Option[Node]): Option[Boolean] =
opt.map(_.text.equalsIgnoreCase("true"))
implicit def nodeOptToCalendarOpt(opt: Option[Node]): Option[XMLGregorianCalendar] =
opt.map(n ⇒ VASTElementCompanion.datatypeFactory.newXMLGregorianCalendar(n.text))
implicit def nodeToInt(n: Node): Int =
n.text.toInt
implicit def nodeToString(n: Node): String =
n.text
implicit def nodeToCalendar(n: Node): XMLGregorianCalendar =
VASTElementCompanion.datatypeFactory.newXMLGregorianCalendar(n.text)
} | AlixBa/vast | src/main/scala/com/github/alixba/vast/VASTElementCompanion.scala | Scala | mit | 1,851 |
package es.codemotion.akkaships.client
import akka.actor.ActorSystem
import com.typesafe.config.{ConfigValue, ConfigValueFactory}
import es.codemotion.akkaships.client.config.PlayerConfig
import org.apache.log4j.Logger
import scala.collection.JavaConversions._
import scala.language.postfixOps
object PlayerInitialize extends PlayerConfig{
override lazy val logger = Logger.getLogger(getClass)
def apply(seedNodes: java.util.List[String]) =
new PlayerInitialize(seedNodes)
def apply() = new PlayerInitialize()
}
class PlayerInitialize(properties: java.util.Map[String, ConfigValue]) {
def this(serverHosts: java.util.List[String]) =
this(Map(PlayerConfig.PlayerConfigHosts -> ConfigValueFactory.fromAnyRef(serverHosts)))
def this() = this(Map.empty[String, ConfigValue])
def initPlayer():Unit={
lazy val logger = PlayerInitialize.logger
val finalConfig = properties.foldLeft(PlayerInitialize.config) { case (previousConfig, keyValue) =>
previousConfig.withValue(keyValue._1, keyValue._2)
}
// Inicializar sistema de actores en cluster
val system = ActorSystem("ShipsServerCluster", finalConfig)
if (logger.isDebugEnabled) {
system.logConfiguration()
}
val serverNode=s"${PlayerInitialize.config.getStringList(PlayerConfig.ServerNode)(0)}/user/server"
//Creamos el Player Actor conectado al server
}
} | jjlopezm/Akkaships-Exercise | Client/src/main/scala/es/codemotion/akkaships/client/PlayerInitialize.scala | Scala | apache-2.0 | 1,385 |
//package org.eoin
//
//import org.eoin.Chapter11.Monad
//
//import scala.annotation.tailrec
//
//
///**
// * Created by eoin.parker on 1/13/17.
// */
//object Chapter13 {
//
//// object PastedFromBookText_Ignore {
//// trait IO[F[_], I, +A]
////
//// case class Pure[F[_], I, +A](get: A) extends IO[F,I,A]
////
//// case class Request[F[_], I, +A](
//// expr: F[I],
//// receive: (I) => IO[F, I, A] ) extends IO[F,I,A]
////
//// trait Console[A]
//// case object ReadLine extends Console[Option[String]]
//// case class PrintLine(s: String) extends Console[Unit]
////
////
//// trait Run[F[_]] {
//// def apply[A](expr: F[A]): (A, Run[F])
//// }
////
//// object IO {
//// @annotation.tailrec
//// def run[F[_],I,A](R: Run[F])(io: IO[F,I,A]): A = io match {
//// case Pure(a:A) => a
//// case Request(expr:F[I],recv: Function1[I,IO[F,I,A]]) => {
//// R(expr) match {
//// case (e,r2) => run(r2)(recv(e))
//// }
//// }
//// }
//// }
////
//// trait RunConsoleMock[F[_]] extends Run[Console[F]] {
//// def apply(c: Console[Option[String]]) = c match {
//// case ReadLine => (Some("Hello world!"), this)
//// case PrintLine(_) => ((), this)
//// }
//// }
////
//// trait RunConsole[F[_]] extends Run[Console[F]] {
//// def apply(c: Console) = c match {
//// case ReadLine =>
//// val r = try Some(readLine) catch { case _ => None }
//// (r, this)
//// case PrintLine(s) => (println(s), this)
//// }
//// }
//// }
//
// // the real defn
//// sealed trait IO[F[_],A] { self =>
//// def run: A
//// def map[B](f: A => B): IO[F,B] = unit(f(run) )
//// def flatMap[B](f: A => IO[F,B]): IO[F,B] = f(run)
//// def unit[A](a: => A): IO[F,A] = new IO[F,A] { override def run: A = a }
////
//// }
////
//// case class Pure[A](a: A) extends IO[A] {
//// override def run: A = a
//// }
////
//// case class Request[F[_] : Run,A] (expr: F[A], recvFn: A => (A, F[A]) ) extends IO[F,A] {
//// override def run: A = {
//// val runIt = implicitly[Run[F,A]].apply(expr)
//// runIt._1 // TODO ok to discard _2 ?
//// }
//// }
//
//// sealed trait IO[A] { self =>
//// def run: A
//// def map[B](f: A => B): IO[B] =
//// new IO[B] { def run = f(self.run) }
//// def flatMap[B](f: A => IO[B]): IO[B] =
//// new IO[B] { def run = f(self.run).run }
//// }
//
// trait IO[F[_], +A]
// case class Pure[F[_], +A](get: A) extends IO[F,A]
// case class Request[F[_],I,+A](
// expr: F[I],
// receive: I => IO[F,A]) extends IO[F,A]
// case class BindMore[F[_],A,+B](
// force: () => IO[F,A],
// f: A => IO[F,B]) extends IO[F,B]
// case class BindRequest[F[_],I,A,+B](
// expr: F[I], receive: I => IO[F,A],
// f: A => IO[F,B]) extends IO[F,B]
// case class More[F[_],A](force: () => IO[F,A]) extends IO[F,A]
//
// type PartiallyFixedMonad[F] = Monad[({ type f[a] = IO[F,a]})#f]
//
// object IO {
// //def run[F[_],A](R: Run[F])(io: IO[F,A]): A = ???
// def run[F[_],A](F: Monad[F])(io: IO[F,A]): F[A] = {
// io match {
// case Pure(get) => F.unit(get)
// case Request(expr, receive) =>
// case BindMore(force,f) => F.flatMap(force())(f)
// case BindRequest(expr,f,receive) =>
// case More(force) =>
// }
// }
//
// }
//
// def monad[F[_]] = new PartiallyFixedMonad[F] {
//// override def unit[A](a: => A): IO[F, A] = new IO[F,A] { override def run: A = a }
//// override def flatMap[A, B](ma: IO[F, A])(f: (A) => IO[F, B]): IO[F, B] = ma flatMap(f)
// override def unit[A](a: => A): IO[F, A] = ???
//
// override def flatMap[A, B](ma: IO[F, A])(f: (A) => IO[F, B]): IO[F, B] = ???
// }
//
//
// object IO extends PartiallyFixedMonad[IO] {
// def unit[A](a: => A): IO[A] = new IO[A] { def run = a }
// def flatMap[A,B](fa: IO[A])(f: A => IO[B]) = fa flatMap f
// def apply[A](a: => A): IO[A] = unit(a) // syntax for IO { .. }
//
// def ref[A](a: A): IO[IORef[A]] = IO { new IORef(a) }
// sealed class IORef[A](var value: A) {
// def set(a: A): IO[A] = IO { value = a; a }
// def get: IO[A] = IO { value }
// def modify(f: A => A): IO[A] = get flatMap (a => set(f(a)))
// }
// }
//
//
// import scala.language.higherKinds
//
// trait Run[F[_], A] {
// def apply(expr: F[A]): (A, Run[F, A])
// }
//
// trait Console[A]
// case object ReadLine extends Console[Option[String]]
// case class PrintLine(s: String) extends Console[Unit]
//
//
// //type FixedMonad[F] = Monad[({ type f[a] = IO[F,a]})#f]
//
// object exercise1 {
//
// def monad[F[_]] = new Monad[({ type f[a] = IO[F,a]})#f] {
// override def unit[A](a: => A): IO[F, A] = new IO[F,A] { override def run: A = a }
// override def flatMap[A, B](ma: IO[F, A])(f: (A) => IO[F, B]): IO[F, B] = ma flatMap(f)
// }
//
// }
//
// object exercise2 { //typechecks OK but seems 100% bananas
// //@tailrec
// def console(lines: List[String]) :Run[Console, Option[String]] = new Run[Console, Option[String]] { self =>
// override def apply(expr: Console[Option[String]]) : (Option[String], Run[Console, Option[String]])= {
// if (lines.isEmpty) (None,self)
// else
// expr match {
// case PrintLine(_) => null //ignore TODO
// case ReadLine => (Some(lines.head), console(lines.tail))
// }
// }
// }
//
// }
//
// object exercise3 {
//
// def run[F[_],A](F: Monad[F])(io: IO[F,A]): F[A] = io match {
// case Pure(a:A) => F.unit(a)
// case Request(expr, receive) => F.flatMap(expr) { (a:A) => receive(a)._2 } //discard the _1
// }
// }
//
//
// object exercise4 {
//
// import exercise1._
// val F = monad[java.lang.Runnable]
//
// val ex1 = F.sequence(List.fill(100000)(IO { math.random }))
// }
//
//
// object exercise5 {
// sealed trait Trampoline[+A]
// case class Done[+A](get: A) extends Trampoline[A]
// case class More[+A](force: () => Trampoline[A]) extends Trampoline[A]
// case class Bind[A,+B](force: () => Trampoline[A],
// f: A => Trampoline[B]) extends Trampoline[B]
//
// @tailrec
// def run[A](t: Trampoline[A]): A = t match {
// case Done(get) => get
// case More (force) => run(force())
// case Bind(force: Function0[Trampoline[A]], f ) => run(force())
// }
// }
//
// object exercise6 {
//
// import exercise5._
//
// object TrampolineMonad extends Monad[Trampoline] {
// override def unit[A](a: => A): Trampoline[A] = Done(a)
//
// override def flatMap[A, B](ma: Trampoline[A])(f: (A) => Trampoline[B]): Trampoline[B] = {
// ma match {
// case Done(get) => f(get)
// case More (force) => Bind(force, f)
// case Bind(force: Function0[Trampoline[A]], bindF: Function1[A,Trampoline[B]] ) => Bind(force, bindF andThen f) // compiles but seems wrong
// }
// }
// }
//
// }
//
| eoinparker/FunctionalProgrammingRedBook | src/main/scala/org/eoin/Chapter13.scala | Scala | mit | 7,051 |
package com.twitter.server
import org.scalatest.funsuite.AnyFunSuite
class LintersTest extends AnyFunSuite {
val server = new TestTwitterServer
val ruleIdsSet = server.linterRules.map(_.id).toSet
test("Linter has number of StatsReceivers rule") {
assert(ruleIdsSet.contains("number-of-statsreceivers"))
}
test("Linter has duplicate client StackRegistry names rule") {
assert(ruleIdsSet.contains("duplicate-client-stackregistry-names"))
}
test("Linter has duplicate server StackRegistry names rule") {
assert(ruleIdsSet.contains("duplicate-server-stackregistry-names"))
}
test("Linter has NullStatsReceiver client rule") {
assert(ruleIdsSet.contains("finagle-client-without-metrics"))
}
test("Linter has NullStatsReceiver server rule") {
assert(ruleIdsSet.contains("finagle-server-without-metrics"))
}
test("Linter has Memcache fail fast rule") {
assert(ruleIdsSet.contains("memcache-client-has-failfast-enabled"))
}
test("Linter has multiple slf4j implementations rule") {
assert(ruleIdsSet.contains("multiple-slf4j-implementations"))
}
}
| twitter/twitter-server | server/src/test/scala/com/twitter/server/LintersTest.scala | Scala | apache-2.0 | 1,110 |
package dev.code_n_roll.gatling.jdbc.simulation
import dev.code_n_roll.gatling.jdbc.Predef._
import dev.code_n_roll.gatling.jdbc.builder.column.ColumnHelper._
import io.gatling.core.Predef._
import io.gatling.core.scenario.Simulation
import scalikejdbc.{GlobalSettings, LoggingSQLAndTimeSettings}
/**
* Created by ronny on 10.05.17.
*/
class SelectTypedCheckSimulation extends Simulation {
val jdbcConfig = jdbc.url("jdbc:h2:mem:test;DB_CLOSE_ON_EXIT=FALSE").username("sa").password("sa").driver("org.h2.Driver")
GlobalSettings.loggingSQLAndTime = LoggingSQLAndTimeSettings(singleLineMode = true, logLevel = 'warn)
val testScenario = scenario("createTable").
exec(jdbc("bar table")
.create()
.table("bar")
.columns(
column(
name("abc"),
dataType("INTEGER"),
constraint("PRIMARY KEY")
),
column(
name("foo"),
dataType("INTEGER")
)
)
).repeat(10, "n") {
exec(jdbc("insertion")
.insert()
.into("bar")
.values("${n}, ${n}")
)
}.pause(1).
exec(jdbc("selectionSingleCheck")
.select("*")
.from("bar")
.where("abc=4")
.mapResult(rs => Stored(rs.int("abc"), rs.int("foo")))
.check(singleResponse[Stored].is(Stored(4, 4))
.saveAs("myResult"))
).pause(1).
exec(jdbc("selectionManyCheck")
.select("*")
.from("bar")
.where("abc=4 OR abc=5")
.mapResult(rs => Stored(rs.int("abc"), rs.int("foo")))
.check(manyResponse[Stored].is(List(
Stored(4, 4),
Stored(5, 5)))
)
)
//.exec(session => session("something").as[List[Map[String, Any]]])
setUp(testScenario.inject(atOnceUsers(1)))
.protocols(jdbcConfig)
.assertions(global.failedRequests.count.is(0))
}
case class Stored(abc: Int, foo: Int) | rbraeunlich/gatling-jdbc | src/test/scala/dev/code_n_roll/gatling/jdbc/simulation/SelectTypedCheckSimulation.scala | Scala | apache-2.0 | 1,852 |
package com.lookout.borderpatrol.test.util
import com.lookout.borderpatrol.test._
import com.lookout.borderpatrol.util.Combinators._
import com.lookout.borderpatrol.util.Helpers._
import com.twitter.finagle.http.{Method, Request}
class HelpersSpec extends BorderPatrolSuite {
def toPostRequest(uri: String): Request =
tap(Request(Method.Post, "/"))(req => {
req.contentType = "application/x-www-form-urlencoded"
req.contentString = uri
})
behavior of "scrubQueryParams"
it should "scrub the special characters from the encoded query params of GET Request URI" in {
/* Query encoded GET URLs */
val req1 = Request("logout?destination=%20/abc%0d%0a%20test:abc%0d%0a&blah=/abc%0d%0atest2:abc2%0d%0a")
scrubQueryParams(req1.params, "destination") should be(Some("/abc"))
val req2 = Request("logout?destination=/abc%0d%0atest:abc%0d%0a")
scrubQueryParams(req2.params, "destination") should be(Some("/abc"))
val req3 = Request("logout?destination=/abc%0d%0a")
scrubQueryParams(req3.params, "destination") should be(Some("/abc"))
val req4 = Request("logout?destination=/%0d%0aabc%0d%0a")
scrubQueryParams(req4.params, "destination") should be(Some("/"))
val req6 = Request("logout?destination=abc%20efg")
scrubQueryParams(req6.params, "destination") should be(Some("abc efg"))
/* Error conditions */
scrubQueryParams(req1.params, "foo") should be(None)
scrubQueryParams(req1.params, null) should be(None)
}
it should "scrub the special characters from the query params of GET Request URI" in {
/* URLs w/o encoding */
val req11 = Request("logout?destination=/abc\n\rtest:abc\n\r&blah=/abc\n\rtest2:abc2\n\r")
scrubQueryParams(req11.params, "destination") should be(Some("/abc"))
val req12 = Request("logout?destination=/abc\n\rtest:abc\n\r")
scrubQueryParams(req12.params, "destination") should be(Some("/abc"))
val req13 = Request("logout?destination=/abc\n\r")
scrubQueryParams(req13.params, "destination") should be(Some("/abc"))
val req14 = Request("logout?destination=/\n\rabc\n\r")
scrubQueryParams(req14.params, "destination") should be(Some("/"))
val req15 = Request("logout?destination=/abc\r")
scrubQueryParams(req15.params, "destination") should be(Some("/abc"))
val req16 = Request("logout?destination=/abc\n\r")
scrubQueryParams(req16.params, "destination") should be(Some("/abc"))
val req17 = Request("logout?destination=/abc\n")
scrubQueryParams(req17.params, "destination") should be(Some("/abc"))
val req18 = Request("logout?destination=\n\r/abc")
scrubQueryParams(req18.params, "destination") should be(Some("/abc"))
val req19 = Request("logout?destination=")
scrubQueryParams(req19.params, "destination") should be(None)
val req20 = Request("logout?destination=\n\r")
scrubQueryParams(req20.params, "destination") should be(None)
}
it should "scrub the special characters from the encoded query params of POST Request URI" in {
/* Query encoded POST URLs */
val req30 = toPostRequest("destination=/abc")
scrubQueryParams(req30.params, "destination") should be(Some("/abc"))
val req31 = toPostRequest("destination=/abc%0d%0atest:abc%0d%0a&blah=/abc%0d%0atest2:abc2%0d%0a")
scrubQueryParams(req31.params, "destination") should be(Some("/abc"))
val req32 = toPostRequest("destination=/abc%0d%0atest:abc%0d%0a")
scrubQueryParams(req32.params, "destination") should be(Some("/abc"))
val req33 = toPostRequest("destination=/abc%0d%0a")
scrubQueryParams(req33.params, "destination") should be(Some("/abc"))
val req34 = toPostRequest("destination=/%0d%0aabc%0d%0a")
scrubQueryParams(req34.params, "destination") should be(Some("/"))
}
it should "scrub the special characters from the query params of POST Request URI" in {
/* URLs w/o POST encoding */
val req41 = toPostRequest("destination=/abc\n\rtest:abc\n\r&blah=/abc\n\rtest2:abc2\n\r")
scrubQueryParams(req41.params, "destination") should be(Some("/abc"))
val req42 = toPostRequest("destination=/abc\n\rtest:abc\n\r")
scrubQueryParams(req42.params, "destination") should be(Some("/abc"))
val req43 = toPostRequest("destination=/abc\n\r")
scrubQueryParams(req43.params, "destination") should be(Some("/abc"))
val req44 = toPostRequest("destination=/\n\rabc\n\r")
scrubQueryParams(req44.params, "destination") should be(Some("/"))
val req45 = toPostRequest("destination=/abc\r")
scrubQueryParams(req45.params, "destination") should be(Some("/abc"))
val req46 = toPostRequest("destination=/abc\n\r")
scrubQueryParams(req46.params, "destination") should be(Some("/abc"))
val req47 = toPostRequest("destination=/abc\n")
scrubQueryParams(req47.params, "destination") should be(Some("/abc"))
val req48 = toPostRequest("destination=\n\r/abc")
scrubQueryParams(req48.params, "destination") should be(Some("/abc"))
}
behavior of "scrubAString"
it should "scrub a string of special chars" in {
val dirtyValue = "destination\ndoo"
scrubAString(dirtyValue) should be (Some("destination"))
val dirtyValue2 = "\rabc"
scrubAString(dirtyValue2) should be (Some("abc"))
}
}
| rikesh-chouhan/borderpatrol | core/src/test/scala/com/lookout/borderpatrol/test/util/HelpersSpec.scala | Scala | mit | 5,241 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert.avro
import java.nio.ByteBuffer
import org.apache.avro.generic.GenericRecord
import org.locationtech.geomesa.convert.EvaluationContext
import org.locationtech.geomesa.convert2.transforms.TransformerFunction.NamedTransformerFunction
import org.locationtech.geomesa.convert2.transforms.{TransformerFunction, TransformerFunctionFactory}
import org.locationtech.geomesa.features.avro.AvroSimpleFeatureUtils
class AvroFunctionFactory extends TransformerFunctionFactory {
override def functions: Seq[TransformerFunction] = Seq(avroPath, binaryList, binaryMap, binaryUuid)
private val avroPath = new AvroPathFn()
// parses a list encoded by the geomesa avro writer
private val binaryList = TransformerFunction.pure("avroBinaryList") { args =>
AvroSimpleFeatureUtils.decodeList(ByteBuffer.wrap(args(0).asInstanceOf[Array[Byte]]))
}
// parses a map encoded by the geomesa avro writer
private val binaryMap = TransformerFunction.pure("avroBinaryMap") { args =>
AvroSimpleFeatureUtils.decodeMap(ByteBuffer.wrap(args(0).asInstanceOf[Array[Byte]]))
}
// parses a uuid encoded by the geomesa avro writer
private val binaryUuid = TransformerFunction.pure("avroBinaryUuid") { args =>
AvroSimpleFeatureUtils.decodeUUID(ByteBuffer.wrap(args(0).asInstanceOf[Array[Byte]]))
}
class AvroPathFn extends NamedTransformerFunction(Seq("avroPath"), pure = true) {
private var path: AvroPath = _
override def getInstance: AvroPathFn = new AvroPathFn()
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = {
if (path == null) {
path = AvroPath(args(1).asInstanceOf[String])
}
path.eval(args(0).asInstanceOf[GenericRecord]).orNull
}
}
}
| elahrvivaz/geomesa | geomesa-convert/geomesa-convert-avro/src/main/scala/org/locationtech/geomesa/convert/avro/AvroFunctionFactory.scala | Scala | apache-2.0 | 2,226 |
package com.iheart.playSwagger
import com.iheart.playSwagger.Domain.SwaggerParameter
import org.joda.time.DateTime
object SwaggerParameterMapper {
def mapParam(name: String, scalaTypeName: String, domainNameSpace: Option[String] = None): SwaggerParameter = {
def higherOrderType(higherOrder: String, typeName: String): Option[String] = s"$higherOrder\\\\[(\\\\S+)\\\\]".r.findFirstMatchIn(typeName).map(_.group(1))
def collectionItemType(typeName: String): Option[String] =
List("Seq", "List", "Set", "Vector").map(higherOrderType(_, typeName)).reduce(_ orElse _)
def prop(tp: String, format: Option[String] = None, required: Boolean = true) =
SwaggerParameter(name, `type` = Some(tp), format = format, required = required)
val typeName = scalaTypeName.replace("scala.", "").replace("java.lang.", "")
if(domainNameSpace.fold(false)(typeName.startsWith(_)))
SwaggerParameter(name, referenceType = Some(typeName))
else {
val optionalType = higherOrderType("Option", typeName)
val itemType = collectionItemType(typeName)
if(itemType.isDefined)
SwaggerParameter(name, items = itemType)
else if (optionalType.isDefined)
mapParam(name, optionalType.get).copy(required = false)
else
typeName match {
case "Int" ⇒ prop("integer", Some("int32"))
case "Long" ⇒ prop("integer", Some("int64"))
case "Double" ⇒ prop("number", Some("double"))
case "Float" ⇒ prop("number", Some("float"))
case "org.jodaTime.DateTime" ⇒ prop("integer", Some("epoch"))
case unknown ⇒ prop(unknown.toLowerCase())
}
}
}
}
| Product-Foundry/play-swagger | src/main/scala/com/iheart/playSwagger/SwaggerParameterMapper.scala | Scala | apache-2.0 | 1,693 |
package com.github.jlprat.gameserver.fsm.model
/**
* Companion object for the DiscardPile
*/
object DiscardPile {
/**
* creates a new Discard pile with the specific card
* @param playedCard the card to add to the discard pile
* @return the desired discard pile
*/
def apply(playedCard: PlayedCard) = new DiscardPile(List(playedCard))
def empty = new DiscardPile(Nil)
}
/**
* This class represents the discarded cards from all players
* Created by josep on 12/23/14.
*/
case class DiscardPile (cards: List[PlayedCard]) {
/**
* discards a new card to the discard pile
* @param playedCard the card to be discarded
* @return a new discarded pile with this card on top
*/
def ::(playedCard: PlayedCard) = DiscardPile(playedCard :: cards)
/**
* @return the size of the discard pile
*/
def size: Int = cards.size
/**
* @return the top card of the discard pile if there is any
*/
def topCard: Option[PlayedCard] = cards.headOption
}
| jlprat/akka-gameserver | src/main/scala/com/github/jlprat/gameserver/fsm/model/DiscardPile.scala | Scala | apache-2.0 | 992 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.