code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package alu
import Chisel._
import MicroCodes.MicroCodes._
class ALU extends Module {
var io = new Bundle{
var wire_op= Bits(INPUT, width = 5)
var wire_data_reg_a= SInt(INPUT ,width = 32)
var wire_data_reg_b= SInt(INPUT ,width = 32)
var wire_data_output = SInt(OUTPUT, width = 32)
}
val alu_shamt = io.wire_data_reg_b.toUInt
io.wire_data_output := MuxCase(Bits(0), Array[(Chisel.Bool, UInt)](
(io.wire_op === MICRO_OP_ADD) -> (io.wire_data_reg_a + io.wire_data_reg_b).toUInt,
(io.wire_op === MICRO_OP_SUB) -> (io.wire_data_reg_a - io.wire_data_reg_b).toUInt,
(io.wire_op === MICRO_OP_SLL) -> ((io.wire_data_reg_a<< alu_shamt)(31,0)).toUInt,
(io.wire_op === MICRO_OP_SRL) -> (io.wire_data_reg_a>> alu_shamt).toUInt,
(io.wire_op === MICRO_OP_SRA) -> (io.wire_data_reg_a.toSInt >> alu_shamt).toUInt,
(io.wire_op === MICRO_OP_AND) -> (io.wire_data_reg_a & io.wire_data_reg_b).toUInt,
(io.wire_op === MICRO_OP_OR) -> (io.wire_data_reg_a| io.wire_data_reg_a).toUInt,
(io.wire_op === MICRO_OP_XOR) -> (io.wire_data_reg_a^ io.wire_data_reg_b).toUInt,
(io.wire_op === MICRO_OP_SLT) -> (io.wire_data_reg_a.toSInt <
io.wire_data_reg_b.toSInt).toUInt,
(io.wire_op === MICRO_OP_SLTU) -> (io.wire_data_reg_a< io.wire_data_reg_b).toUInt,
(io.wire_op === MICRO_OP_ADDU) -> (io.wire_data_reg_a.toUInt + io.wire_data_reg_b.toUInt),
(io.wire_op === MICRO_OP_SUBU) -> (io.wire_data_reg_a.toUInt - io.wire_data_reg_b.toUInt)
))
}
class ALUTests(c:ALU) extends Tester(c){
var testReg_a : Int = 0
var testReg_b : Int = 0
var start_nu = math.pow(2,32).toInt - 10
val end_nu = math.pow(2,32).toInt
def all_alu_test(reg_a : Int , reg_b : Int){
poke(c.io.wire_data_reg_a,reg_a)
poke(c.io.wire_data_reg_b,reg_b)
step(1)
expect(c.io.wire_data_output,reg_a + reg_b)
}
for(testReg_a <- start_nu until end_nu ){
for(testReg_b <- start_nu until end_nu ){
all_alu_test(testReg_a,testReg_b)
}
}
}
|
Coxious/MixCPU
|
project/alu.scala
|
Scala
|
gpl-2.0
| 2,105
|
package config
import org.apache.commons.lang3.StringUtils
import org.pac4j.core.authorization.authorizer.ProfileAuthorizer
import org.pac4j.core.context.WebContext
import org.pac4j.core.profile.CommonProfile
class CustomAuthorizer extends ProfileAuthorizer[CommonProfile] {
def isAuthorized(context: WebContext, profiles: java.util.List[CommonProfile]): Boolean = {
return isAnyAuthorized(context, profiles)
}
def isProfileAuthorized(context: WebContext, profile: CommonProfile): Boolean = {
if (profile == null) {
false
} else {
StringUtils.startsWith (profile.getUsername, "jle")
}
}
}
|
ygpark2/play-ain-board
|
app/config/CustomAuthorizer.scala
|
Scala
|
bsd-3-clause
| 628
|
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import java.io.Serializable
import java.lang.reflect.{ Type, ParameterizedType }
import cascading.pipe.Pipe
import cascading.tap.SinkMode
import cascading.tuple.{ Tuple, TupleEntry, Fields }
import com.fasterxml.jackson.core.`type`.TypeReference
import com.fasterxml.jackson.module.scala._
import com.fasterxml.jackson.databind.ObjectMapper
/**
* This Source writes out the TupleEntry as a simple JSON object, using the field
* names as keys and the string representation of the values.
*
* TODO: it would be nice to have a way to add read/write transformations to pipes
* that doesn't require extending the sources and overriding methods.
*
* @param failOnEmptyLines When set to false, it just skips empty lines instead of failing the jobs. Defaults to true
* for backwards compatibility.
*/
case class JsonLine(p: String, fields: Fields = Fields.ALL,
override val sinkMode: SinkMode = SinkMode.REPLACE,
override val transformInTest: Boolean = false,
failOnEmptyLines: Boolean = true)
extends FixedPathSource(p) with TextLineScheme {
import Dsl._
import JsonLine._
override def transformForWrite(pipe: Pipe) = pipe.mapTo(fields -> 'json) {
t: TupleEntry => mapper.writeValueAsString(TupleConverter.ToMap(t))
}
override def transformForRead(pipe: Pipe) = {
@scala.annotation.tailrec
def nestedRetrieval(node: Option[Map[String, AnyRef]], path: List[String]): AnyRef = {
(path, node) match {
case (_, None) => null
case (h :: Nil, Some(fs)) => fs.get(h).orNull
case (h :: tail, Some(fs)) => fs.get(h).orNull match {
case fs: Map[String @unchecked, AnyRef @unchecked] => nestedRetrieval(Option(fs), tail)
case _ => null
}
case (Nil, _) => null
}
}
val splitFields = (0 until fields.size).map { i: Int => fields.get(i).toString.split('.').toList }
pipe.collectTo[String, Tuple]('line -> fields) {
case line: String if failOnEmptyLines || line.trim.nonEmpty =>
val fs: Map[String, AnyRef] = mapper.readValue(line, mapTypeReference)
val values = splitFields.map { nestedRetrieval(Option(fs), _) }
new cascading.tuple.Tuple(values: _*)
}
}
override def toString = "JsonLine(" + p + ", " + fields.toString + ")"
}
/**
* TODO: at the next binary incompatible version remove the AbstractFunction2/scala.Serializable jank which
* was added to get mima to not report binary errors
*/
object JsonLine extends scala.runtime.AbstractFunction5[String, Fields, SinkMode, Boolean, Boolean, JsonLine]
with Serializable with scala.Serializable {
val mapTypeReference = typeReference[Map[String, AnyRef]]
private[this] def typeReference[T: Manifest] = new TypeReference[T] {
override def getType = typeFromManifest(manifest[T])
}
private[this] def typeFromManifest(m: Manifest[_]): Type = {
if (m.typeArguments.isEmpty) { m.runtimeClass }
else new ParameterizedType {
def getRawType = m.runtimeClass
def getActualTypeArguments = m.typeArguments.map(typeFromManifest).toArray
def getOwnerType = null
}
}
val mapper = new ObjectMapper()
mapper.registerModule(DefaultScalaModule)
}
|
tdyas/scalding
|
scalding-json/src/main/scala/com/twitter/scalding/JsonLine.scala
|
Scala
|
apache-2.0
| 3,800
|
/*
* Copyright (C) 2014 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.cassandra.lucene
import com.google.common.collect.Lists
import org.apache.cassandra.db.marshal._
import org.scalatest.{FunSuite, Matchers}
import scala.collection.JavaConverters._
/** Base test.
*
* @author Andres de la Pena `adelapena@stratio.com`
*/
class BaseScalaTest extends FunSuite with Matchers {
}
object BaseScalaTest {
val utf8 = UTF8Type.instance
val ascii = AsciiType.instance
val int32 = Int32Type.instance
val byte = ByteType.instance
val short = ShortType.instance
val long = LongType.instance
val float = FloatType.instance
val double = DoubleType.instance
val date = SimpleDateType.instance
val integer = IntegerType.instance
val uuid = UUIDType.instance
val lexicalUuid = LexicalUUIDType.instance
val timeUuid = TimeUUIDType.instance
val decimal = DecimalType.instance
val timestamp = TimestampType.instance
val boolean = BooleanType.instance
def set[A](elements: AbstractType[A], multiCell: Boolean): SetType[A] =
SetType.getInstance(elements, multiCell)
def list[A](elements: AbstractType[A], multiCell: Boolean): ListType[A] =
ListType.getInstance(elements, multiCell)
def map[A, B](keys: AbstractType[A], values: AbstractType[B], multiCell: Boolean): MapType[A, B] =
MapType.getInstance(keys, values, multiCell)
def udt(names: List[String], types: List[AbstractType[_]]): UserType =
new UserType(
"ks",
utf8.decompose("cell"),
Lists.newArrayList(names.map(x => utf8.decompose(x)).asJava),
Lists.newArrayList(types.asJava))
def reversed[A](base: AbstractType[A]): ReversedType[A] = ReversedType.getInstance(base)
}
|
adelapena/cassandra-lucene-index
|
plugin/src/test/scala/com/stratio/cassandra/lucene/BaseScalaTest.scala
|
Scala
|
apache-2.0
| 2,276
|
/**
* Copyright 2011-2012 eBusiness Information, Groupe Excilys (www.excilys.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.excilys.ebi.gatling.http.check.bodypart
import com.excilys.ebi.gatling.core.check.Matcher
import com.excilys.ebi.gatling.core.session.{ NOOP_EVALUATABLE_STRING, Session }
import com.excilys.ebi.gatling.http.request.HttpPhase.BodyPartReceived
import com.excilys.ebi.gatling.http.response.ExtendedResponse
import com.excilys.ebi.gatling.http.check.HttpCheck
class ChecksumCheck(val algorithm: String, matcher: Matcher[ExtendedResponse, String], saveAs: Option[String]) extends HttpCheck(NOOP_EVALUATABLE_STRING, matcher, saveAs, BodyPartReceived)
|
Tjoene/thesis
|
Case_Programs/gatling-1.4.0/gatling-http/src/main/scala/com/excilys/ebi/gatling/http/check/bodypart/ChecksumCheck.scala
|
Scala
|
gpl-2.0
| 1,202
|
package ch.wsl.rest.service
import spray.http.StatusCodes
import akka.actor.Actor
import spray.routing.{HttpService, RejectionHandler}
// we don't implement our route structure directly in the service actor because
// we want to be able to test it independently, without having to spin up an actor
class MainService extends Actor with HttpService with RouteRoot {
// the HttpService trait defines only one abstract member, which
// connects the services environment to the enclosing actor or test
def actorRefFactory = context
// Handle HTTP errors, like page not found, etc.
implicit val myRejectionHandler = RejectionHandler {
case Nil ⇒ complete(StatusCodes.NotFound, "The requested resource could not be found.")
case t => {
println(t)
complete(StatusCodes.BadRequest,"Something went wrong here: " + t)
}
}
// this actor only runs our route, but you could add
// other things here, like request stream processing
// or timeout handling
def receive = runRoute(route)
}
|
minettiandrea/postgres-restify
|
server/src/main/scala/ch/wsl/rest/service/MainService.scala
|
Scala
|
apache-2.0
| 1,036
|
package im.actor.server.enrich
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import im.actor.server
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{ FlatSpecLike, Matchers }
import im.actor.server.{ ActorSpecification, SqlSpecHelpers }
abstract class BaseRichMessageSpec(_system: ActorSystem = { ActorSpecification.createSystem() })
extends server.ActorSuite(_system) with FlatSpecLike with ScalaFutures with Matchers with SqlSpecHelpers {
implicit val materializer = ActorMaterializer()
}
|
boneyao/actor-platform
|
actor-server/actor-tests/src/test/scala/im/actor/server/enrich/BaseRichMessageSpec.scala
|
Scala
|
mit
| 539
|
/*
* This file is part of the Linux Variability Modeling Tools (LVAT).
*
* Copyright (C) 2010 Steven She <shshe@gsd.uwaterloo.ca>
*
* LVAT is free software: you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the
* Free Software Foundation, either version 3 of the License, or (at your
* option) any later version.
*
* LVAT is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
* more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with LVAT. (See files COPYING and COPYING.LESSER.) If not, see
* <http://www.gnu.org/licenses/>.
*/
package gsd.linux
import org.kiama.rewriting.Rewriter
/**
* A simple Tseitin's Transformation that introduces a new variable for each
* sub-expression.
*
* An improved version should eventually replace the manual transformation done
* in the LVAT BooleanTranslation.
*
* @author Steven She (shshe@gsd.uwaterloo.ca)
*/
object Tseitin {
case class TransformResult(expressions: List[BExpr], generated: List[String])
case class IdGen(start: Int, prefix: String) {
var i = start
def next = { i+=1; prefix + i }
def allIds = (start to i).map { prefix + _ }.toList
}
def transform(in: List[BExpr], prefix: String = "_T", offset: Int = 0): TransformResult = {
val idGen = IdGen(offset, prefix)
def _transform(ein: BExpr): List[BExpr] = {
import gsd.linux.cnf.CNFBuilder._
val e = Rewriter.rewrite(sIffRule <* sImpliesRule)(ein).simplify
def _tt(e: BExpr): Pair[BId, List[BExpr]] = {
e match {
case v: BId => {
val eId = BId(idGen.next)
eId -> List((!eId | v), (eId | !v))
}
case BNot(x) => {
val (xId, xExprs) = _tt(x)
val eId = BId(idGen.next)
eId -> ((!eId | !xId) :: (eId | xId) :: xExprs)
}
case BAnd(x,y) => {
val (xId, xExprs) = _tt(x)
val (yId, yExprs) = _tt(y)
val eId = BId(idGen.next)
eId -> ((!eId | xId) :: (!eId | yId) :: (eId | !xId | !yId) :: xExprs ::: yExprs)
}
case BOr(x,y) => {
val (xId, xExprs) = _tt(x)
val (yId, yExprs) = _tt(y)
val eId = BId(idGen.next)
eId -> ((eId | !xId) :: (eId | !yId) :: (!eId | xId | yId) :: xExprs ::: yExprs)
}
case _ => sys.error("not supported: " + e + " from: " + in)
}
}
val (ttId, ttExprs) = _tt(e)
ttId :: ttExprs
}
val results = in map _transform
TransformResult(results reduceLeft (_ ::: _), idGen.allIds)
}
}
|
scas-mdd/linux-variability-analysis-tools.fm-translation
|
src/main/scala/gsd/linux/Tseitin.scala
|
Scala
|
gpl-3.0
| 2,848
|
package coursier.cli.install
import java.nio.file.{Path, Paths}
import caseapp.Tag
import cats.data.{Validated, ValidatedNel}
final case class UninstallParams(
dir: Path,
all: Boolean,
verbosity: Int
)
object UninstallParams {
def apply(options: UninstallOptions): ValidatedNel[String, UninstallParams] = {
val dir = options.installDir.filter(_.nonEmpty) match {
case Some(d) => Paths.get(d)
case None => SharedInstallParams.defaultDir
}
val all = options.all
val verbosityV =
if (Tag.unwrap(options.quiet) > 0 && Tag.unwrap(options.verbose) > 0)
Validated.invalidNel("Cannot have both quiet, and verbosity > 0")
else
Validated.validNel(Tag.unwrap(options.verbose) - Tag.unwrap(options.quiet))
verbosityV.map { verbosity =>
UninstallParams(
dir,
all,
verbosity
)
}
}
}
|
alexarchambault/coursier
|
modules/cli/src/main/scala/coursier/cli/install/UninstallParams.scala
|
Scala
|
apache-2.0
| 890
|
package gv
package isi
package std.conversions
object ImplicitResolutionOrder {
trait P0 extends AnyRef
// with convertible.ImplicitResolutionOrder.Conversions
trait P50 extends AnyRef with P0
with ByteConversions
trait P60 extends AnyRef with P50
with JavaIoConversions
trait P70 extends AnyRef with P60
with ToFutureConversions
with UriConversions
with PathConversions
trait P80 extends AnyRef with P70
with ExecutionContextConversions
with ToTryConversions
trait P90 extends AnyRef with P80
trait P100 extends AnyRef with P90
trait Conversions extends AnyRef with P100
}
|
mouchtaris/jleon
|
src/main/scala-2.12/gv/isi/std/conversions/ImplicitResolutionOrder.scala
|
Scala
|
mit
| 626
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.io.File
import java.sql.Timestamp
import java.text.SimpleDateFormat
import org.apache.spark.sql.{AnalysisException, Column, DataFrame, QueryTest, Row}
import org.apache.spark.sql.execution.FileSourceScanExec
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.{IntegerType, LongType, StringType, StructField, StructType}
class FileMetadataStructSuite extends QueryTest with SharedSparkSession {
val data0: Seq[Row] = Seq(Row("jack", 24, Row(12345L, "uom")))
val data1: Seq[Row] = Seq(Row("lily", 31, Row(54321L, "ucb")))
val schema: StructType = new StructType()
.add(StructField("name", StringType))
.add(StructField("age", IntegerType))
.add(StructField("info", new StructType()
.add(StructField("id", LongType))
.add(StructField("university", StringType))))
val schemaWithNameConflicts: StructType = new StructType()
.add(StructField("name", StringType))
.add(StructField("age", IntegerType))
.add(StructField("_METADATA", new StructType()
.add(StructField("id", LongType))
.add(StructField("university", StringType))))
private val METADATA_FILE_PATH = "_metadata.file_path"
private val METADATA_FILE_NAME = "_metadata.file_name"
private val METADATA_FILE_SIZE = "_metadata.file_size"
private val METADATA_FILE_MODIFICATION_TIME = "_metadata.file_modification_time"
/**
* This test wrapper will test for both row-based and column-based file formats:
* (json and parquet) with nested schema:
* 1. create df0 and df1 and save them as testFileFormat under /data/f0 and /data/f1
* 2. read the path /data, return the df for further testing
* 3. create actual metadata maps for both files under /data/f0 and /data/f1 for further testing
*
* The final df will have data:
* jack | 24 | {12345, uom}
* lily | 31 | {54321, ucb}
*
* The schema of the df will be the `fileSchema` provided to this method
*
* This test wrapper will provide a `df` and actual metadata map `f0`, `f1`
*/
private def metadataColumnsTest(
testName: String, fileSchema: StructType)
(f: (DataFrame, Map[String, Any], Map[String, Any]) => Unit): Unit = {
Seq("json", "parquet").foreach { testFileFormat =>
test(s"metadata struct ($testFileFormat): " + testName) {
withTempDir { dir =>
import scala.collection.JavaConverters._
// 1. create df0 and df1 and save under /data/f0 and /data/f1
val df0 = spark.createDataFrame(data0.asJava, fileSchema)
val f0 = new File(dir, "data/f0").getCanonicalPath
df0.coalesce(1).write.format(testFileFormat).save(f0)
val df1 = spark.createDataFrame(data1.asJava, fileSchema)
val f1 = new File(dir, "data/f1").getCanonicalPath
df1.coalesce(1).write.format(testFileFormat).save(f1)
// 2. read both f0 and f1
val df = spark.read.format(testFileFormat).schema(fileSchema)
.load(new File(dir, "data").getCanonicalPath + "/*")
val realF0 = new File(dir, "data/f0").listFiles()
.filter(_.getName.endsWith(s".$testFileFormat")).head
val realF1 = new File(dir, "data/f1").listFiles()
.filter(_.getName.endsWith(s".$testFileFormat")).head
// 3. create f0 and f1 metadata data
val f0Metadata = Map(
METADATA_FILE_PATH -> realF0.toURI.toString,
METADATA_FILE_NAME -> realF0.getName,
METADATA_FILE_SIZE -> realF0.length(),
METADATA_FILE_MODIFICATION_TIME -> new Timestamp(realF0.lastModified())
)
val f1Metadata = Map(
METADATA_FILE_PATH -> realF1.toURI.toString,
METADATA_FILE_NAME -> realF1.getName,
METADATA_FILE_SIZE -> realF1.length(),
METADATA_FILE_MODIFICATION_TIME -> new Timestamp(realF1.lastModified())
)
f(df, f0Metadata, f1Metadata)
}
}
}
}
metadataColumnsTest("read partial/all metadata struct fields", schema) { (df, f0, f1) =>
// read all available metadata struct fields
checkAnswer(
df.select("name", "age", "info",
METADATA_FILE_NAME, METADATA_FILE_PATH,
METADATA_FILE_SIZE, METADATA_FILE_MODIFICATION_TIME),
Seq(
Row("jack", 24, Row(12345L, "uom"),
f0(METADATA_FILE_NAME), f0(METADATA_FILE_PATH),
f0(METADATA_FILE_SIZE), f0(METADATA_FILE_MODIFICATION_TIME)),
Row("lily", 31, Row(54321L, "ucb"),
f1(METADATA_FILE_NAME), f1(METADATA_FILE_PATH),
f1(METADATA_FILE_SIZE), f1(METADATA_FILE_MODIFICATION_TIME))
)
)
// read a part of metadata struct fields
checkAnswer(
df.select("name", "info.university", METADATA_FILE_NAME, METADATA_FILE_SIZE),
Seq(
Row("jack", "uom", f0(METADATA_FILE_NAME), f0(METADATA_FILE_SIZE)),
Row("lily", "ucb", f1(METADATA_FILE_NAME), f1(METADATA_FILE_SIZE))
)
)
}
metadataColumnsTest("read metadata struct fields with random ordering", schema) { (df, f0, f1) =>
// read a part of metadata struct fields with random ordering
checkAnswer(
df.select(METADATA_FILE_NAME, "name", METADATA_FILE_SIZE, "info.university"),
Seq(
Row(f0(METADATA_FILE_NAME), "jack", f0(METADATA_FILE_SIZE), "uom"),
Row(f1(METADATA_FILE_NAME), "lily", f1(METADATA_FILE_SIZE), "ucb")
)
)
}
metadataColumnsTest("read metadata struct fields with expressions", schema) { (df, f0, f1) =>
checkAnswer(
df.select(
// substring of file name
substring(col(METADATA_FILE_NAME), 1, 3),
// format timestamp
date_format(col(METADATA_FILE_MODIFICATION_TIME), "yyyy-MM")
.as("_file_modification_year_month"),
// convert to kb
col(METADATA_FILE_SIZE).divide(lit(1024)).as("_file_size_kb"),
// get the file format
substring_index(col(METADATA_FILE_PATH), ".", -1).as("_file_format")
),
Seq(
Row(
f0(METADATA_FILE_NAME).toString.substring(0, 3), // sql substring vs scala substring
new SimpleDateFormat("yyyy-MM").format(f0(METADATA_FILE_MODIFICATION_TIME)),
f0(METADATA_FILE_SIZE).asInstanceOf[Long] / 1024.toDouble,
f0(METADATA_FILE_PATH).toString.split("\\\\.").takeRight(1).head
),
Row(
f1(METADATA_FILE_NAME).toString.substring(0, 3), // sql substring vs scala substring
new SimpleDateFormat("yyyy-MM").format(f1(METADATA_FILE_MODIFICATION_TIME)),
f1(METADATA_FILE_SIZE).asInstanceOf[Long] / 1024.toDouble,
f1(METADATA_FILE_PATH).toString.split("\\\\.").takeRight(1).head
)
)
)
}
metadataColumnsTest("select all will not select metadata struct fields", schema) { (df, _, _) =>
checkAnswer(
df.select("*"),
Seq(
Row("jack", 24, Row(12345L, "uom")),
Row("lily", 31, Row(54321L, "ucb"))
)
)
}
metadataColumnsTest("metadata will not overwrite user data",
schemaWithNameConflicts) { (df, _, _) =>
// the user data has the schema: name, age, _metadata.id, _metadata.university
// select user data
checkAnswer(
df.select("name", "age", "_METADATA", "_metadata"),
Seq(
Row("jack", 24, Row(12345L, "uom"), Row(12345L, "uom")),
Row("lily", 31, Row(54321L, "ucb"), Row(54321L, "ucb"))
)
)
// select metadata will fail when analysis
val ex = intercept[AnalysisException] {
df.select("name", METADATA_FILE_NAME).collect()
}
assert(ex.getMessage.contains("No such struct field file_name in id, university"))
}
metadataColumnsTest("select only metadata", schema) { (df, f0, f1) =>
checkAnswer(
df.select(METADATA_FILE_NAME, METADATA_FILE_PATH,
METADATA_FILE_SIZE, METADATA_FILE_MODIFICATION_TIME),
Seq(
Row(f0(METADATA_FILE_NAME), f0(METADATA_FILE_PATH),
f0(METADATA_FILE_SIZE), f0(METADATA_FILE_MODIFICATION_TIME)),
Row(f1(METADATA_FILE_NAME), f1(METADATA_FILE_PATH),
f1(METADATA_FILE_SIZE), f1(METADATA_FILE_MODIFICATION_TIME))
)
)
checkAnswer(
df.select("_metadata"),
Seq(
Row(Row(f0(METADATA_FILE_PATH), f0(METADATA_FILE_NAME),
f0(METADATA_FILE_SIZE), f0(METADATA_FILE_MODIFICATION_TIME))),
Row(Row(f1(METADATA_FILE_PATH), f1(METADATA_FILE_NAME),
f1(METADATA_FILE_SIZE), f1(METADATA_FILE_MODIFICATION_TIME)))
)
)
}
metadataColumnsTest("select and re-select", schema) { (df, f0, f1) =>
checkAnswer(
df.select("name", "age", "info",
METADATA_FILE_NAME, METADATA_FILE_PATH,
METADATA_FILE_SIZE, METADATA_FILE_MODIFICATION_TIME)
.select("name", "file_path"), // cast _metadata.file_path as file_path
Seq(
Row("jack", f0(METADATA_FILE_PATH)),
Row("lily", f1(METADATA_FILE_PATH))
)
)
}
metadataColumnsTest("alias", schema) { (df, f0, f1) =>
val aliasDF = df.select(
Column("name").as("myName"),
Column("age").as("myAge"),
Column(METADATA_FILE_NAME).as("myFileName"),
Column(METADATA_FILE_SIZE).as("myFileSize")
)
// check schema
val expectedSchema = new StructType()
.add(StructField("myName", StringType))
.add(StructField("myAge", IntegerType))
.add(StructField("myFileName", StringType))
.add(StructField("myFileSize", LongType))
assert(aliasDF.schema.fields.toSet == expectedSchema.fields.toSet)
// check data
checkAnswer(
aliasDF,
Seq(
Row("jack", 24, f0(METADATA_FILE_NAME), f0(METADATA_FILE_SIZE)),
Row("lily", 31, f1(METADATA_FILE_NAME), f1(METADATA_FILE_SIZE))
)
)
}
metadataColumnsTest("filter", schema) { (df, f0, _) =>
val filteredDF = df.select("name", "age", METADATA_FILE_NAME)
.where(Column(METADATA_FILE_NAME) === f0(METADATA_FILE_NAME))
// check the filtered file
val partitions = filteredDF.queryExecution.sparkPlan.collectFirst {
case p: FileSourceScanExec => p.selectedPartitions
}.get
assert(partitions.length == 1) // 1 partition
assert(partitions.head.files.length == 1) // 1 file in that partition
assert(partitions.head.files.head.getPath.toString == f0(METADATA_FILE_PATH)) // the file is f0
// check result
checkAnswer(
filteredDF,
Seq(
// _file_name == f0's name, so we will only have 1 row
Row("jack", 24, f0(METADATA_FILE_NAME))
)
)
}
metadataColumnsTest("filter on metadata and user data", schema) { (df, _, f1) =>
val filteredDF = df.select("name", "age", "info",
METADATA_FILE_NAME, METADATA_FILE_PATH,
METADATA_FILE_SIZE, METADATA_FILE_MODIFICATION_TIME)
// mix metadata column + user column
.where(Column(METADATA_FILE_NAME) === f1(METADATA_FILE_NAME) and Column("name") === "lily")
// only metadata columns
.where(Column(METADATA_FILE_PATH) === f1(METADATA_FILE_PATH))
// only user column
.where("age == 31")
// check the filtered file
val partitions = filteredDF.queryExecution.sparkPlan.collectFirst {
case p: FileSourceScanExec => p.selectedPartitions
}.get
assert(partitions.length == 1) // 1 partition
assert(partitions.head.files.length == 1) // 1 file in that partition
assert(partitions.head.files.head.getPath.toString == f1(METADATA_FILE_PATH)) // the file is f1
// check result
checkAnswer(
filteredDF,
Seq(Row("lily", 31, Row(54321L, "ucb"),
f1(METADATA_FILE_NAME), f1(METADATA_FILE_PATH),
f1(METADATA_FILE_SIZE), f1(METADATA_FILE_MODIFICATION_TIME)))
)
}
Seq(true, false).foreach { caseSensitive =>
metadataColumnsTest(s"upper/lower case when case " +
s"sensitive is $caseSensitive", schemaWithNameConflicts) { (df, f0, f1) =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
// file schema: name, age, _METADATA.id, _METADATA.university
if (caseSensitive) {
// for case sensitive mode:
// _METADATA is user data
// _metadata is metadata
checkAnswer(
df.select("name", "age", "_METADATA", "_metadata"),
Seq(
Row("jack", 24, Row(12345L, "uom"),
Row(f0(METADATA_FILE_PATH), f0(METADATA_FILE_NAME),
f0(METADATA_FILE_SIZE), f0(METADATA_FILE_MODIFICATION_TIME))),
Row("lily", 31, Row(54321L, "ucb"),
Row(f1(METADATA_FILE_PATH), f1(METADATA_FILE_NAME),
f1(METADATA_FILE_SIZE), f1(METADATA_FILE_MODIFICATION_TIME)))
)
)
} else {
// for case insensitive mode:
// _METADATA and _metadata are both user data
// select user data
checkAnswer(
df.select("name", "age",
// user columns
"_METADATA", "_metadata",
"_metadata.ID", "_METADATA.UniVerSity"),
Seq(
Row("jack", 24, Row(12345L, "uom"), Row(12345L, "uom"), 12345L, "uom"),
Row("lily", 31, Row(54321L, "ucb"), Row(54321L, "ucb"), 54321L, "ucb")
)
)
// select metadata will fail when analysis - metadata cannot overwrite user data
val ex = intercept[AnalysisException] {
df.select("name", "_metadata.file_name").collect()
}
assert(ex.getMessage.contains("No such struct field file_name in id, university"))
val ex1 = intercept[AnalysisException] {
df.select("name", "_METADATA.file_NAME").collect()
}
assert(ex1.getMessage.contains("No such struct field file_NAME in id, university"))
}
}
}
}
Seq("true", "false").foreach { offHeapColumnVectorEnabled =>
withSQLConf("spark.sql.columnVector.offheap.enabled" -> offHeapColumnVectorEnabled) {
metadataColumnsTest(s"read metadata with " +
s"offheap set to $offHeapColumnVectorEnabled", schema) { (df, f0, f1) =>
// read all available metadata struct fields
checkAnswer(
df.select("name", "age", "info",
METADATA_FILE_NAME, METADATA_FILE_PATH,
METADATA_FILE_SIZE, METADATA_FILE_MODIFICATION_TIME),
Seq(
Row("jack", 24, Row(12345L, "uom"), f0(METADATA_FILE_NAME), f0(METADATA_FILE_PATH),
f0(METADATA_FILE_SIZE), f0(METADATA_FILE_MODIFICATION_TIME)),
Row("lily", 31, Row(54321L, "ucb"), f1(METADATA_FILE_NAME), f1(METADATA_FILE_PATH),
f1(METADATA_FILE_SIZE), f1(METADATA_FILE_MODIFICATION_TIME))
)
)
// read a part of metadata struct fields
checkAnswer(
df.select("name", "info.university", METADATA_FILE_NAME, METADATA_FILE_SIZE),
Seq(
Row("jack", "uom", f0(METADATA_FILE_NAME), f0(METADATA_FILE_SIZE)),
Row("lily", "ucb", f1(METADATA_FILE_NAME), f1(METADATA_FILE_SIZE))
)
)
}
}
}
Seq("true", "false").foreach { enabled =>
withSQLConf("spark.sql.optimizer.nestedSchemaPruning.enabled" -> enabled) {
metadataColumnsTest(s"read metadata with" +
s"nestedSchemaPruning set to $enabled", schema) { (df, f0, f1) =>
// read a part of data: schema pruning
checkAnswer(
df.select("name", "info.university", METADATA_FILE_NAME, METADATA_FILE_SIZE),
Seq(
Row("jack", "uom", f0(METADATA_FILE_NAME), f0(METADATA_FILE_SIZE)),
Row("lily", "ucb", f1(METADATA_FILE_NAME), f1(METADATA_FILE_SIZE))
)
)
}
}
}
metadataColumnsTest("prune metadata schema in projects", schema) { (df, f0, f1) =>
val prunedDF = df.select("name", "age", "info.id", METADATA_FILE_NAME)
val fileSourceScanMetaCols = prunedDF.queryExecution.sparkPlan.collectFirst {
case p: FileSourceScanExec => p.metadataColumns
}.get
assert(fileSourceScanMetaCols.size == 1)
assert(fileSourceScanMetaCols.head.name == "file_name")
checkAnswer(
prunedDF,
Seq(Row("jack", 24, 12345L, f0(METADATA_FILE_NAME)),
Row("lily", 31, 54321L, f1(METADATA_FILE_NAME)))
)
}
metadataColumnsTest("prune metadata schema in filters", schema) { (df, f0, f1) =>
val prunedDF = df.select("name", "age", "info.id")
.where(col(METADATA_FILE_PATH).contains("data/f0"))
val fileSourceScanMetaCols = prunedDF.queryExecution.sparkPlan.collectFirst {
case p: FileSourceScanExec => p.metadataColumns
}.get
assert(fileSourceScanMetaCols.size == 1)
assert(fileSourceScanMetaCols.head.name == "file_path")
checkAnswer(
prunedDF,
Seq(Row("jack", 24, 12345L))
)
}
metadataColumnsTest("prune metadata schema in projects and filters", schema) { (df, f0, f1) =>
val prunedDF = df.select("name", "age", "info.id", METADATA_FILE_SIZE)
.where(col(METADATA_FILE_PATH).contains("data/f0"))
val fileSourceScanMetaCols = prunedDF.queryExecution.sparkPlan.collectFirst {
case p: FileSourceScanExec => p.metadataColumns
}.get
assert(fileSourceScanMetaCols.size == 2)
assert(fileSourceScanMetaCols.map(_.name).toSet == Set("file_size", "file_path"))
checkAnswer(
prunedDF,
Seq(Row("jack", 24, 12345L, f0(METADATA_FILE_SIZE)))
)
}
}
|
shaneknapp/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileMetadataStructSuite.scala
|
Scala
|
apache-2.0
| 18,362
|
package java.util
trait ListIterator[E] extends Iterator[E] {
def add(e: E): Unit
def hasPrevious(): Boolean
def previous(): E
def previousIndex(): Int
def nextIndex(): Int
def set(e: E): Unit
}
|
doron123/scala-js
|
javalib/src/main/scala/java/util/ListIterator.scala
|
Scala
|
bsd-3-clause
| 208
|
package com.sfxcode.sapphire.core.demo.login.controller
import com.sfxcode.sapphire.core.controller.ViewController
import com.sfxcode.sapphire.core.demo.login.LoginApplicationController
import com.sfxcode.sapphire.core.demo.login.model.User
import com.sfxcode.sapphire.core.value._
import javafx.event.ActionEvent
import javafx.scene.control.CheckBox
class ProfileController extends ViewController {
lazy val userAdapter: FXBeanAdapter[User] = FXBeanAdapter[User](this)
override def didGainVisibility() {
super.didGainVisibility()
val bindings = KeyBindings("email", "phone", "address", "subscribed")
bindings.add("user", "User: ${_self.name()} Mailsize: (${_self.email().length()})")
userAdapter.addBindings(bindings)
userAdapter.set(applicationController().applicationUser.get)
}
def actionLogout(event: ActionEvent) {
userAdapter.revert()
userAdapter.unset()
applicationController().applicationUser = None
applicationController().showLogin()
}
def applicationController(): LoginApplicationController = registeredBean[LoginApplicationController].get
def actionUpdate(event: ActionEvent) {
debugUserData()
}
def debugUserData() {
val maybeTextField = locateTextField("user")
println(maybeTextField)
val checkBoxOption = locate[CheckBox]("#subscribed")
checkBoxOption.foreach(cb => println(cb.selectedProperty.get))
println(applicationController().applicationUser.get.bean)
}
}
|
sfxcode/sapphire-core
|
demos/login/src/main/scala/com/sfxcode/sapphire/core/demo/login/controller/ProfileController.scala
|
Scala
|
apache-2.0
| 1,470
|
object DepMethTypes {
trait Foo {
type Bar
def bar: Bar
}
object AFoo extends Foo {
type Bar = String
def bar = ""
}
val x: Foo = null
def bar(foo: Foo): foo.Bar = foo.bar /* Expression of type DepMethTypes.Foo#Bar doesn't conform to expected type foo.type#Bar */
val s: String = /*start*/bar(AFoo)/*end*/ /* Expression of type foo.type#Bar doesn't conform to expected type String */
}
//DepMethTypes.AFoo.Bar
|
ilinum/intellij-scala
|
testdata/typeInference/bugs5/SCL5048.scala
|
Scala
|
apache-2.0
| 443
|
/*
* Copyright 2017 Georgi Krastev
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink
package api.scala.derived.typeinfo
import api.common.ExecutionConfig
import api.common.typeinfo.TypeInformation
import api.java.typeutils.ObjectArrayTypeInfo
import api.scala.typeutils._
import api.scala.derived.typeutils._
import scala.collection.SortedMap
import scala.collection.generic._
import scala.collection.immutable
import scala.collection.mutable
import scala.reflect._
/** `TypeInformation` instances for `Array`, `Traversable` and `Map`. */
private[typeinfo] abstract class MkTypeInfo4_Traversable extends MkTypeInfo5_Value {
/** Creates `TypeInformation` for a (non-primitive) `Array`. */
implicit def mkArrayTypeInfo[E](implicit tiE: TypeInformation[E]): MkTypeInfo[Array[E]] =
this(ObjectArrayTypeInfo.getInfoFor(tiE))
/** Creates `TypeInformation` for a `Traversable` collection. */
implicit def mkTraversableTypeInfo[T[e] <: Traversable[e], E](
implicit
tiE: TypeInformation[E],
tag: ClassTag[T[E]],
cbf: CanBuild[E, T[E]],
gen: T[E] <:< GenericTraversableTemplate[E, T]
): MkTypeInfo[T[E]] = this {
// Workaround `CanBuildFrom` not being `Serializable`.
new TraversableTypeInfo[T[E], E](tag.runtimeClass.asInstanceOf[Class[T[E]]], tiE) {
val empty = cbf().result()
def createSerializer(config: ExecutionConfig) =
new TraversableSerializer[T[E], E](elementTypeInfo.createSerializer(config)) {
override def toString = s"TraversableSerializer[$elementSerializer]"
def getCbf = new CanBuildFrom[T[E], E, T[E]] {
def apply(from: T[E]) = gen(from).genericBuilder
def apply() = apply(empty)
}
}
}
}
/** Creates `TypeInformation` for an (immutable) `Map`. */
implicit def mkMapTypeInfo[K, V](
implicit tiKV: TypeInformation[(K, V)]
): MkTypeInfo[Map[K, V]] = mkInjectTypeInfo(
Inject(_.toSeq, (seq: Seq[(K, V)]) => seq.toMap), seqTypeInfo, classTag
)
/** Creates `TypeInformation` for a `mutable.Map`. */
implicit def mkMutableMapTypeInfo[K, V](
implicit tiKV: TypeInformation[(K, V)]
): MkTypeInfo[mutable.Map[K, V]] = mkInjectTypeInfo(
Inject(_.toSeq, (seq: Seq[(K, V)]) => mutable.Map(seq: _*)), seqTypeInfo, classTag
)
/** Creates `TypeInformation` for a `SortedMap`. */
implicit def mkSortedMapTypeInfo[K, V](
implicit tiKV: TypeInformation[(K, V)], ord: Ordering[K]
): MkTypeInfo[SortedMap[K, V]] = mkInjectTypeInfo(
Inject(_.toSeq, (seq: Seq[(K, V)]) => SortedMap(seq: _*)), seqTypeInfo, classTag
)
/** Creates `TypeInformation` for an `immutable.TreeMap`. */
implicit def mkImmutableTreeMapTypeInfo[K, V](
implicit tiKV: TypeInformation[(K, V)], ord: Ordering[K]
): MkTypeInfo[immutable.TreeMap[K, V]] = mkInjectTypeInfo(
Inject(_.toSeq, (seq: Seq[(K, V)]) => immutable.TreeMap(seq: _*)), seqTypeInfo, classTag
)
/** Creates `TypeInformation` for an `immutable.HashMap`. */
implicit def mkImmutableHashMapTypeInfo[K, V](
implicit tiKV: TypeInformation[(K, V)]
): MkTypeInfo[immutable.HashMap[K, V]] = mkInjectTypeInfo(
Inject(_.toSeq, (seq: Seq[(K, V)]) => immutable.HashMap(seq: _*)), seqTypeInfo, classTag
)
/** Creates `TypeInformation` for a `mutable.HashMap`. */
implicit def mkMutableHashMapTypeInfo[K, V](
implicit tiKV: TypeInformation[(K, V)]
): MkTypeInfo[mutable.HashMap[K, V]] = mkInjectTypeInfo(
Inject(_.toSeq, (seq: Seq[(K, V)]) => mutable.HashMap(seq: _*)), seqTypeInfo, classTag
)
/** Used to inject `Map` types into `Seq`. */
private def seqTypeInfo[E](implicit tiE: TypeInformation[E]): TypeInformation[Seq[E]] =
mkTraversableTypeInfo[Seq, E].apply()
}
|
joroKr21/flink-shapeless
|
src/main/scala/org/apache/flink/api/scala/derived/typeinfo/MkTypeInfo4_Traversable.scala
|
Scala
|
apache-2.0
| 4,259
|
package io.github.tailhq.dynaml.models.gp
import io.github.tailhq.dynaml.kernels.LocalScalarKernel
import org.apache.log4j.Logger
/**
* @author tailhq date: 11/8/16.
*
* Abstract implementation of multi-task gaussian process
* as outlined in Lawrence et. al 2012 on arxiv
* @tparam I The index set of the GP.
*/
class MTGPRegressionModel[I](
cov: LocalScalarKernel[(I, Int)],
n: LocalScalarKernel[(I, Int)],
data: Seq[Stream[(I, Double)]],
num: Int, numOutputs: Int) extends
AbstractGPRegressionModel[
Seq[Stream[(I, Double)]],
(I, Int)](cov, n, data, num*numOutputs) {
assert(
data.length == numOutputs,
"Number of outputs in data should match numOutputs constructor variable"
)
private val logger = Logger.getLogger(this.getClass)
val noutputs = numOutputs
/**
* Convert from the underlying data structure to
* Seq[(I, Y)] where I is the index set of the GP
* and Y is the value/label type.
**/
override def dataAsSeq(data: Seq[Stream[(I, Double)]]): Seq[((I, Int), Double)] =
data.zipWithIndex.map((patternSet) =>
patternSet._1.map(patternAndLabel => ((patternAndLabel._1, patternSet ._2), patternAndLabel._2))
).reduceLeft((s1, s2) => s1 ++ s2)
}
|
mandar2812/DynaML
|
dynaml-core/src/main/scala/io/github/tailhq/dynaml/models/gp/MTGPRegressionModel.scala
|
Scala
|
apache-2.0
| 1,232
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.integrationtest
import java.io.{Closeable, File, PrintWriter}
import java.nio.file.{Files, Path}
import java.util.concurrent.CountDownLatch
import scala.collection.JavaConverters._
import scala.util.Try
import io.fabric8.kubernetes.client.dsl.ExecListener
import okhttp3.Response
import org.apache.commons.io.output.ByteArrayOutputStream
import org.apache.hadoop.util.VersionInfo
import org.apache.spark.{SPARK_VERSION, SparkException}
import org.apache.spark.internal.Logging
import org.apache.spark.util.{Utils => SparkUtils}
object Utils extends Logging {
def getExamplesJarName(): String = {
val scalaVersion = scala.util.Properties.versionNumberString
.split("\\\\.")
.take(2)
.mkString(".")
s"spark-examples_$scalaVersion-${SPARK_VERSION}.jar"
}
def tryWithResource[R <: Closeable, T](createResource: => R)(f: R => T): T = {
val resource = createResource
try f.apply(resource) finally resource.close()
}
def executeCommand(cmd: String*)(
implicit podName: String,
kubernetesTestComponents: KubernetesTestComponents): String = {
val out = new ByteArrayOutputStream()
val pod = kubernetesTestComponents
.kubernetesClient
.pods()
.withName(podName)
// Avoid timing issues by looking for open/close
class ReadyListener extends ExecListener {
val openLatch: CountDownLatch = new CountDownLatch(1)
val closeLatch: CountDownLatch = new CountDownLatch(1)
override def onOpen(response: Response) {
openLatch.countDown()
}
override def onClose(a: Int, b: String) {
closeLatch.countDown()
}
override def onFailure(e: Throwable, r: Response) {
}
def waitForInputStreamToConnect(): Unit = {
openLatch.await()
}
def waitForClose(): Unit = {
closeLatch.await()
}
}
val listener = new ReadyListener()
val watch = pod
.readingInput(System.in)
.writingOutput(out)
.writingError(System.err)
.withTTY()
.usingListener(listener)
.exec(cmd.toArray: _*)
// under load sometimes the stdout isn't connected by the time we try to read from it.
listener.waitForInputStreamToConnect()
listener.waitForClose()
watch.close()
out.flush()
val result = out.toString()
result
}
def createTempFile(contents: String, hostPath: String): String = {
val filename = try {
val f = File.createTempFile("tmp", ".txt", new File(hostPath))
f.deleteOnExit()
new PrintWriter(f) {
try {
write(contents)
} finally {
close()
}
}
f.getName
} catch {
case e: Exception => e.printStackTrace(); throw e;
}
filename
}
def getExamplesJarAbsolutePath(sparkHomeDir: Path): String = {
val jarName = getExamplesJarName()
val jarPathsFound = Files
.walk(sparkHomeDir)
.filter(Files.isRegularFile(_))
.filter((f: Path) => {f.toFile.getName == jarName})
// we should not have more than one here under current test build dir
// we only need one though
val jarPath = jarPathsFound
.iterator()
.asScala
.map(_.toAbsolutePath.toString)
.toArray
.headOption
jarPath match {
case Some(jar) => jar
case _ => throw new SparkException(s"No valid $jarName file was found " +
s"under spark home test dir ${sparkHomeDir.toAbsolutePath}!")
}
}
def isHadoop3(): Boolean = {
VersionInfo.getVersion.startsWith("3")
}
}
|
dbtsai/spark
|
resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/Utils.scala
|
Scala
|
apache-2.0
| 4,377
|
package spire
package macros
import org.scalatest.FunSuite
import org.scalatest.Matchers
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import org.scalacheck.{Arbitrary, Gen}
class CheckedTest extends FunSuite with GeneratorDrivenPropertyChecks with Matchers {
import Checked.checked
import Arbitrary.arbitrary
case class NotZero[A](value: A)
implicit def arbNotZeroLong = Arbitrary(arbitrary[Long] filter (_ != 0L) map (NotZero(_)))
implicit def arbNotZeroInt = Arbitrary(arbitrary[Int] filter (_ != 0L) map (NotZero(_)))
def checkForLongOverflow(value: BigInt, check: => Long) = {
if (value.isValidLong) {
check should equal (value.toLong)
} else {
an[ArithmeticException] should be thrownBy { check }
}
}
def checkForIntOverflow(value: BigInt, check: => Int) = {
if (value.isValidInt) {
check should equal (value.toInt)
} else {
an[ArithmeticException] should be thrownBy { check }
}
}
test("Negate of Int.MinValue overflows") {
val x = Int.MinValue
an[ArithmeticException] should be thrownBy { checked(-x) }
}
test("Int negate overflow throws arithmetic exception") {
forAll("x") { (x: Int) =>
checkForIntOverflow(-BigInt(x), checked(-x))
}
}
test("Int addition overflow throws arithmetic exception") {
forAll("x", "y") { (x: Int, y: Int) =>
checkForIntOverflow(BigInt(x) + BigInt(y), checked(x + y))
}
}
test("Int subtraction overflow throws arithmetic exception") {
forAll("x", "y") { (x: Int, y: Int) =>
checkForIntOverflow(BigInt(x) - BigInt(y), checked(x - y))
}
}
test("Int multiplication overflow throws arithmetic exception") {
forAll("x", "y") { (x: Int, y: Int) =>
checkForIntOverflow(BigInt(x) * BigInt(y), checked(x * y))
}
}
test("Int division overflow throws arithmetic exception") {
forAll("x", "y") { (x: Int, y: NotZero[Int]) =>
checkForIntOverflow(BigInt(x) / BigInt(y.value), checked(x / y.value))
}
}
def distSq(x: Long, y: Long): BigInt = BigInt(x) * BigInt(x) + BigInt(y) * BigInt(y)
test("Int euclidean square distance overflow throws arithmetic exception") {
forAll("x", "y") { (x: Int, y: Int) =>
checkForIntOverflow(distSq(x, y), checked(x * x + y * y))
}
}
test("Negate of Long.MinValue overflows") {
val x = Long.MinValue
an[ArithmeticException] should be thrownBy { checked(-x) }
}
test("Long negate overflow throws arithmetic exception") {
forAll("x") { (x: Long) =>
checkForLongOverflow(-BigInt(x), checked(-x))
}
}
test("Long addition overflow throws arithmetic exception") {
forAll("x", "y") { (x: Long, y: Long) =>
checkForLongOverflow(BigInt(x) + BigInt(y), checked(x + y))
}
}
test("Long subtraction overflow throws arithmetic exception") {
forAll("x", "y") { (x: Long, y: Long) =>
checkForLongOverflow(BigInt(x) - BigInt(y), checked(x - y))
}
}
test("Long multiplication overflow throws arithmetic exception") {
forAll("x", "y") { (x: Long, y: Long) =>
checkForLongOverflow(BigInt(x) * BigInt(y), checked(x * y))
}
}
test("Long division overflow throws arithmetic exception") {
forAll("x", "y") { (x: Long, y: NotZero[Long]) =>
checkForLongOverflow(BigInt(x) / BigInt(y.value), checked(x / y.value))
}
}
test("Long euclidean square distance overflow throws arithmetic exception") {
forAll("x", "y") { (x: Long, y: Long) =>
checkForLongOverflow(distSq(x, y), checked(x * x + y * y))
}
}
test("Int upgrades to Long for overflow checks when mixed in binary op") {
Checked.option {
val x = 2L
val y = Int.MaxValue
x + y
} should equal(Some(Int.MaxValue.toLong + 2))
Checked.option {
val x = 2L
val y = Int.MaxValue
y + x
} should equal(Some(Int.MaxValue.toLong + 2))
an[ArithmeticException] should be thrownBy (checked {
val x = Long.MaxValue
val y = 2
x * y
})
an[ArithmeticException] should be thrownBy (checked {
val x = Long.MaxValue
val y = 2
y * x
})
}
test("Byte and Short upgrade to Int when mixed") {
an[ArithmeticException] should be thrownBy (checked {
val x = Int.MaxValue
val y = (2: Byte)
x * y
})
an[ArithmeticException] should be thrownBy (checked {
val x = Int.MaxValue
val y = (2: Byte)
y * x
})
an[ArithmeticException] should be thrownBy (checked {
val x = Int.MaxValue
val y = (2: Short)
x * y
})
an[ArithmeticException] should be thrownBy (checked {
val x = Int.MaxValue
val y = (2: Short)
y * x
})
}
}
|
rklaehn/spire
|
macros/src/test/scala/spire/macros/CheckedTest.scala
|
Scala
|
mit
| 4,723
|
package net.vanfleteren.objectvalidation
import org.scalactic.{Bad, Every, Fail, Good, One, Or, Pass}
/**
* Methods returning T Or Every[Error] should be used first on the actual value being validated.
* The methods returning ValidationError can be chained on them later using when.
*
* For example:
*
* val x = Some("string")
* validate(required(x).when(notEmpty))
*
* will return String Or Every[Error] only when the passed option is defined and has a non empty string.
*
*/
trait DefaultValidations {
type ValidationError = org.scalactic.Validation[Error]
/**
* The value t should be valid according the the passed validator
* @param value alue to validate
* @param validator the actual Validator that will do the validation
* @tparam T the type of the value
*/
def valid[T](value: T)(implicit validator: T => T Or Every[Error]): T Or Every[Error] = {
validator(value)
}
/**
* If the option is defined, it should be valid. If it is None, it is also considered valid.
*/
def valid[T](t: Option[T])(implicit validator: T => T Or Every[Error]): Option[T] Or Every[Error] = {
if (t.isDefined) {
validator(t.get).map(Some(_))
} else {
Good(None)
}
}
/**
* Always passes validation
*/
def ok[T](t: T): T Or One[Error] = {
Good(t)
}
/**
* The passed option must be defined.
*/
def require[T](value: Option[T]): T Or One[Error] = {
value.map(Good(_)).getOrElse(Bad(One(Error(s"verplicht"))))
}
/**
* The passed option is always good. Acts the same as ok, but might be clearer to use.
*/
def optional[T](value: Option[T]): Option[T] Or One[Error] = {
Good(value)
}
/**
* Only executes the passed validation if the option is defined.
* For example:
*
* Since minLength expects an actual String and not an Option, you need to wrap minLength in opt
*
* validate(optional(Some("string)).when(opt(minLength(2)))
*
*
* @param validation
* @param value
* @tparam T
* @return
*/
def opt[T](validation: T => ValidationError)(value: Option[T]): ValidationError = {
if (value.isDefined) {
validation(value.get)
} else {
Pass
}
}
/**
* String should be minimum length
* @param length
* @param value
* @return
*/
def minLength(length: Int)(value: String): ValidationError = {
if (value.length >= length) {
Pass
} else {
Fail(Error(s"Lengte moet minstens $length zijn"))
}
}
/**
* String should not be empty
* @param value
* @return
*/
def notEmpty(value: String): ValidationError = {
if (value != null && value.trim.length > 0) {
Pass
} else {
Fail(Error(s"Mag niet leeg zijn"))
}
}
/**
* The value to be validated should be one of the passed values
*/
def isOneOf[T](values:Set[T])(value:T): ValidationError = {
if(values.contains(value)) {
Pass
} else {
Fail(Error(s"$value is not a valid value"))
}
}
}
object DefaultValidations extends DefaultValidations
|
cvanfleteren/objectvalidation
|
src/main/scala/net/vanfleteren/objectvalidation/DefaultValidations.scala
|
Scala
|
apache-2.0
| 3,073
|
// Copyright 2011-2012 James Michael Callahan
// See LICENSE-2.0 file for licensing information.
package org.scalagfx.math
//--------------------------------------------------------------------------------------------------
// P O S 2 D
//--------------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------------
// Supported Subset of Operations:
//
// P + S -> P P - S -> P P * S -> P P / V -> P
// V + S -> V V - S -> V V * S -> V V / S -> V
//
// --- P - P -> V --- ---
// P + V -> P P - V -> P P * V -> P P / V -> P
// V + V -> V V - V -> V V * V -> V V / V -> V
//
// S = Scalar(Double), P = Position(Pos2d), V = Vector(Vec2d)
//--------------------------------------------------------------------------------------------------
/** Companion object for Pos2d. */
object Pos2d
{
//------------------------------------------------------------------------------------------------
// C R E A T I O N
//------------------------------------------------------------------------------------------------
/** Create a new position from components. */
def apply(x: Double, y: Double) =
new Pos2d(x, y)
/** Create a new position in which all components are the same scalar value. */
def apply(s: Double) =
new Pos2d(s, s)
/** The origin. */
val origin: Pos2d =
Pos2d(0.0)
//------------------------------------------------------------------------------------------------
// C O M P A R I S O N
//------------------------------------------------------------------------------------------------
/** The component-wise comparison of whether two positions are within a given epsilon. */
def equiv(a: Pos2d, b: Pos2d, epsilon: Double): Boolean =
a.equiv(b, epsilon)
/** The component-wise comparison of whether two positions are within a type specific
* epsilon. */
def equiv(a: Pos2d, b: Pos2d): Boolean =
(a equiv b)
/** The component-wise minimum of two positions. */
def min(a: Pos2d, b: Pos2d): Pos2d =
compwise(a, b, scala.math.min(_, _))
/** The component-wise maximum of two positions. */
def max(a: Pos2d, b: Pos2d): Pos2d =
compwise(a, b, scala.math.max(_, _))
//------------------------------------------------------------------------------------------------
// I N T E R P O L A T I O N
//------------------------------------------------------------------------------------------------
/** Linearly interpolate between two positions. */
def lerp(a: Pos2d, b: Pos2d, t: Double): Pos2d =
compwise(a, b, Scalar.lerp(_, _, t))
/** Smooth-step interpolate between two positions. */
def smoothlerp(a: Pos2d, b: Pos2d, t: Double): Pos2d =
compwise(a, b, Scalar.smoothlerp(_, _, t))
//------------------------------------------------------------------------------------------------
// U T I L I T Y
//------------------------------------------------------------------------------------------------
/** Create a position who's components are generated by applying the given binary operator
* to each of the corresponding components of the given two positions. */
def compwise(a: Pos2d, b: Pos2d, f: (Double, Double) => Double): Pos2d =
Pos2d(f(a.x, b.x), f(a.y, b.y))
}
/** An immutable 2-dimensional vector of Double element type used to represent a position in
* space for use in computational geometry applications.
*
* This is not meant to be a general purpose vector, but rather to only defined the limited
* set of operations which make geometric sense. This allows Scala type checking to catch
* many of the most common errors where scalars, vectors or positions are being accidently
* used in a way that is geometrically meaningless. */
class Pos2d(val x: Double, val y: Double) extends Vector2dLike
{
type Self = Pos2d
//------------------------------------------------------------------------------------------------
// C O M P O N E N T O P S
//------------------------------------------------------------------------------------------------
/** A copy of this position in which the X component has been replaced with the given
* value. */
def newX(v: Double): Pos2d =
Pos2d(v, y)
/** A copy of this position in which Y component has been replaced with the given value. */
def newY(v: Double): Pos2d =
Pos2d(x, v)
/** A copy of this position in which the component with the given index as been replaced. */
def newComp(i: Int, v: Double) =
i match {
case 0 => Pos2d(v, y)
case 1 => Pos2d(x, v)
case _ => throw new IllegalArgumentException("Invalid index (" + i + ")!")
}
//------------------------------------------------------------------------------------------------
// U N A R Y O P S
//------------------------------------------------------------------------------------------------
/** A position reflected about the origin. */
def negated: Pos2d = Pos2d(-x, -y)
//------------------------------------------------------------------------------------------------
// O P E R A T O R S
//------------------------------------------------------------------------------------------------
/** The addition of a scalar to all components of this position. */
def + (scalar: Double): Pos2d = Pos2d(x+scalar, y+scalar)
/** The component-wise addition of a vector with this position. */
def + (that: Vec2d): Pos2d = Pos2d(x+that.x, y+that.y)
/** The subtraction of a scalar value to all components of this position. */
def - (scalar: Double): Pos2d = Pos2d(x-scalar, y-scalar)
/** The component-wise subtraction a vector from this position. */
def - (that: Vec2d): Pos2d = Pos2d(x-that.x, y-that.y)
/** The vector from the given position to this position. */
def - (that: Pos2d): Vec2d = Vec2d(x-that.x, y-that.y)
/** The product of a scalar value with all components of this position. */
def * (scalar: Double): Pos2d = Pos2d(x*scalar, y*scalar)
/** The component-wise multiplication of a vector with this position. */
def * (that: Vec2d): Pos2d = Pos2d(x*that.x, y*that.y)
/** The quotient of dividing all components of this position by a scalar value. */
def / (scalar: Double): Pos2d = Pos2d(x/scalar, y/scalar)
/** The component-wise division of this position by a vector. */
def / (that: Vec2d): Pos2d = Pos2d(x/that.x, y/that.y)
//------------------------------------------------------------------------------------------------
// C O M P A R I S O N
//------------------------------------------------------------------------------------------------
/** Compares this position to the specified value for equality. */
override def equals(that: Any): Boolean =
that match {
case that: Pos2d =>
(that canEqual this) && (x == that.x) && (y == that.y)
case _ => false
}
/** A method that should be called from every well-designed equals method that is open
* to be overridden in a subclass. */
def canEqual(that: Any): Boolean =
that.isInstanceOf[Pos2d]
/** Returns a hash code value for the object. */
override def hashCode: Int =
43 * (41 + x.##) + y.##
//------------------------------------------------------------------------------------------------
// U T I L I T Y
//------------------------------------------------------------------------------------------------
/** Tests whether the given predicate holds true for all of the corresponding components
* of this and the given position. */
def forall(that: Pos2d)(p: (Double, Double) => Boolean): Boolean =
p(x, that.x) && p(y, that.y)
/** Tests whether the given predicate holds true for any of the corresponding components
* of this and the given position. */
def forany(that: Pos2d)(p: (Double, Double) => Boolean): Boolean =
p(x, that.x) || p(y, that.y)
/** Builds a new position by applying a function to each component of this position. */
def map(f: (Double) => Double): Pos2d =
Pos2d(f(x), f(y))
//------------------------------------------------------------------------------------------------
// C O N V E R S I O N
//------------------------------------------------------------------------------------------------
/** Convert to a string representation. */
override def toString() =
"Pos2d(%.2f, %.2f)".format(x, y)
}
|
JimCallahan/Graphics
|
src/org/scalagfx/math/Pos2d.scala
|
Scala
|
apache-2.0
| 9,662
|
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.internal.cypher.acceptance
import org.neo4j.cypher.{SyntaxException, NewPlannerTestSupport, ExecutionEngineFunSuite}
class SkipLimitAcceptanceTest extends ExecutionEngineFunSuite {
test("SKIP should not allow identifiers") {
intercept[SyntaxException](execute("MATCH (n) RETURN n SKIP n.count"))
}
test("LIMIT should not allow identifiers") {
intercept[SyntaxException](execute("MATCH (n) RETURN n LIMIT n.count"))
}
test("SKIP with an expression that does not depend on identifiers should work") {
1 to 10 foreach { _ => createNode() }
val query = "MATCH (n) RETURN n SKIP toInt(rand()*9)"
val result = execute(query)
result.toList should not be empty
}
test("LIMIT with an expression that does not depend on identifiers should work") {
1 to 3 foreach { _ => createNode() }
val query = "MATCH (n) RETURN n LIMIT toInt(ceil(1.7))"
val result = execute(query)
result.toList should have size 2
}
}
|
HuangLS/neo4j
|
community/cypher/acceptance/src/test/scala/org/neo4j/internal/cypher/acceptance/SkipLimitAcceptanceTest.scala
|
Scala
|
apache-2.0
| 1,775
|
import sbt._
import de.wayofquality.sbt.testlogconfig.TestLogConfig.autoImport._
import blended.sbt.Dependencies
object BlendedAkka extends ProjectFactory {
private[this] val helper = new ProjectSettings(
projectName = "blended.akka",
description = "Provide OSGi services and API to use Actors in OSGi bundles with a shared ActorSystem.",
deps = Seq(
Dependencies.orgOsgi,
Dependencies.akkaActor,
Dependencies.domino,
Dependencies.scalatest % "test",
Dependencies.logbackCore % "test",
Dependencies.logbackClassic % "test"
),
adaptBundle = b => b.copy(
bundleActivator = s"${b.bundleSymbolicName}.internal.BlendedAkkaActivator",
exportPackage = Seq(
b.bundleSymbolicName,
s"${b.bundleSymbolicName}.protocol"
)
)
) {
override def settings: Seq[sbt.Setting[_]] = defaultSettings ++ Seq(
Test / testlogDefaultLevel := "INFO",
Test / testlogLogPackages ++= Map(
"blended" -> "TRACE"
)
)
}
override val project = helper.baseProject.dependsOn(
BlendedUtilLogging.project,
BlendedContainerContextApi.project,
BlendedDomino.project,
BlendedTestsupport.project % "test"
)
}
|
lefou/blended
|
project/BlendedAkka.scala
|
Scala
|
apache-2.0
| 1,224
|
package org.reactivecouchbase.rs.scaladsl
/**
* Handles the retries on error. Similar to `RetryBuilder` of java client of couchbase.
*/
object Retries {
import scala.concurrent._
import scala.concurrent.duration._
import akka.pattern.after
import akka.actor.Scheduler
/**
* Tries to invoke a future block as many time as asked on occurrence of a specific type of error. A fix delay/interval
* is applied before each invocation.
* Note that if the errors keep occurring more than the maximum allowed number of attempts, the last error that
* triggered the extraneous attempt will be wrapped as the cause inside a CannotRetryException
*
* @param retries Number of retries to be made
* @param delay Interval between two consecutive retries
* @param errFilter Function to filter a specific type of error
* @param f Block of code to be retried, so this should be the couchbase query to retry on failure
* @param ec Execution context
* @param s Scheduler
* @tparam T Type of result
* @return Either the result inside a future or an exception
*/
def retryOnError[T](
retries: Int = 1,
delay: FiniteDuration = 0.millis,
errFilter: Throwable => Boolean = _ => true
)(
f: => Future[T]
)(
implicit ec: ExecutionContext,
s: Scheduler
): Future[T] = {
f recoverWith {
case e if errFilter(e) && retries > 0 =>
after(delay, s)(retryOnError(retries - 1, delay)(f))
case e if errFilter(e) && retries == 0 =>
Future.failed(CannotRetryException(s"All retries failed because of ${e.getMessage}"))
}
}
}
/**
* Thrown exception if error persist even after maximum number of retries.
*
* @param message Message exception
*/
case class CannotRetryException(message: String) extends RuntimeException(message)
|
ReactiveCouchbase/reactivecouchbase-rs-core
|
src/main/scala/org/reactivecouchbase/rs/scaladsl/Retries.scala
|
Scala
|
apache-2.0
| 1,859
|
package core.clustering.model
import core.DataSet
import core.plotting.Plotting
import core.util.Distances.DistanceFunc
import org.jfree.chart.{ChartFactory, ChartFrame}
import org.jfree.data.xy.{XYSeries, XYSeriesCollection}
/**
* This class represents
*/
class CentroidModel(clusters: List[CentroidCluster],
assignments :Array[Int],
dataSet: DataSet) extends Plotting{
/**
*
* @return
*/
def getClusters: List[CentroidCluster] = this.clusters
/**
*
* @return
*/
def getAssignments : Array[Int] = this.assignments
def evaluateInterClusterDistances(distanceFunc: DistanceFunc): Double ={
val visited = new Array[Boolean](clusters.size)
var distanceSum = 0.0
for(i <- clusters.indices)
if(!visited(i))
for(j <-clusters.indices)
if(i!=j && !visited(j))
distanceSum += distanceFunc(clusters(i).getCentroid, clusters(j).getCentroid)
distanceSum/clusters.size
}
def evaluateIntraClusterDistances(distanceFunc: DistanceFunc): Double ={
val distanceSum = clusters.map(x => x.distancesSum(distanceFunc)).sum
distanceSum/dataSet.data.size
}
/**
*
* @param name
* @param dimensionX
* @param dimensionY
*/
def render2D(name:String, dimensionX: Int, dimensionY: Int){
// Por cada cluster creamos una serie XY
val xyCollection = new XYSeriesCollection()
var index = 0
for(cluster <- clusters){
val series = new XYSeries("Cluster_"+index)
index = index +1
// Iteramos por los miembros del cluster
for(index <- cluster.getMembers){
val x = dataSet.data(index)(dimensionX)
val y = dataSet.data(index)(dimensionY)
series.add(x,y)
}
xyCollection.addSeries(series)
}
// Ahora creamos una serie unicamente para los centroides
val centroidSeries = new XYSeries("Centroids")
for(cluster <- clusters){
val cX = cluster.getCentroid(dimensionX)
val cY = cluster.getCentroid(dimensionY)
centroidSeries.add(cX, cY)
}
xyCollection.addSeries(centroidSeries)
val chart = ChartFactory.createScatterPlot(name, "Dim_"+dimensionX, "Dim_"+dimensionY, xyCollection)
// create and display a frame...
val frame = new ChartFrame("First", chart)
frame.pack()
frame.setVisible(true)
}
}
|
fernandoj92/ScalaML
|
src/main/scala/core/clustering/model/CentroidModel.scala
|
Scala
|
gpl-3.0
| 2,370
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.data.storage.s3
import java.io.ByteArrayInputStream
import org.apache.predictionio.data.storage.Model
import org.apache.predictionio.data.storage.Models
import org.apache.predictionio.data.storage.StorageClientConfig
import com.amazonaws.services.s3.AmazonS3
import com.amazonaws.services.s3.model.DeleteObjectRequest
import com.amazonaws.services.s3.model.GetObjectRequest
import com.amazonaws.services.s3.model.ObjectMetadata
import com.amazonaws.services.s3.model.PutObjectRequest
import com.amazonaws.services.s3.model.S3Object
import com.google.common.io.ByteStreams
import grizzled.slf4j.Logging
class S3Models(s3Client: AmazonS3, config: StorageClientConfig, prefix: String)
extends Models with Logging {
def insert(i: Model): Unit = {
def getModel(bucketName: String, key: String): Option[Model] = {
val data = i.models
val metadata: ObjectMetadata = new ObjectMetadata()
metadata.setContentLength(data.length)
val req = new PutObjectRequest(bucketName, key, new ByteArrayInputStream(data), metadata)
try {
s3Client.putObject(req)
} catch {
case e: Throwable => error(s"Failed to insert a model to s3://${bucketName}/${key}", e)
}
None
}
doAction(i.id, getModel)
}
def get(id: String): Option[Model] = {
def getModel(bucketName: String, key: String): Option[Model] = {
val s3object: S3Object = s3Client.getObject(new GetObjectRequest(
bucketName, key));
val is = s3object.getObjectContent
try {
Some(Model(
id = id,
models = ByteStreams.toByteArray(is)))
} catch {
case e: Throwable =>
error(s"Failed to get a model from s3://${bucketName}/${key}", e)
None
} finally {
is.close()
}
}
doAction(id, getModel)
}
def delete(id: String): Unit = {
def deleteModel(bucketName: String, key: String): Option[Model] = {
try {
s3Client.deleteObject(new DeleteObjectRequest(bucketName, key))
} catch {
case e: Throwable => error(s"Failed to delete s3://${bucketName}/${key}", e)
}
None
}
doAction(id, deleteModel)
}
def doAction(id: String, action: (String, String) => Option[Model]): Option[Model] = {
config.properties.get("BUCKET_NAME") match {
case Some(bucketName) =>
val key = config.properties.get("BASE_PATH") match {
case Some(basePath) => s"${basePath}/${prefix}${id}"
case None => s"${prefix}${id}"
}
action(bucketName, key)
case None =>
error("S3 bucket is empty.")
None
}
}
}
|
PredictionIO/PredictionIO
|
storage/s3/src/main/scala/org/apache/predictionio/data/storage/s3/S3Models.scala
|
Scala
|
apache-2.0
| 3,477
|
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package tools.nsc
import Properties.{versionMsg, residentPromptString}
import scala.reflect.internal.util.FakePos
import scala.tools.nsc.reporters.Reporter
import scala.tools.util.SystemExit
abstract class Driver {
val prompt = residentPromptString
var reporter: Reporter = _
protected var command: CompilerCommand = _
protected var settings: Settings = _
/** Forward errors to the (current) reporter. */
protected def scalacError(msg: String): Unit = {
reporter.error(FakePos("scalac"), msg + "\\n scalac -help gives more information")
}
/** True to continue compilation. */
protected def processSettingsHook(): Boolean = {
if (settings.version) { reporter echo versionMsg ; false }
else !reporter.hasErrors
}
protected def newCompiler(): Global
protected def doCompile(compiler: Global): Unit = {
if (command.files.isEmpty) {
reporter.echo(command.usageMsg)
reporter.echo(compiler.pluginOptionsHelp)
} else {
val run = new compiler.Run()
run compile command.files
reporter.finish()
}
}
def process(args: Array[String]): Boolean = {
val ss = new Settings(scalacError)
reporter = Reporter(ss)
command = new CompilerCommand(args.toList, ss)
settings = command.settings
if (processSettingsHook()) {
val compiler = newCompiler()
reporter = compiler.reporter // adopt the compiler's reporter, which may be custom
try {
if (reporter.hasErrors)
reporter.flush()
else if (command.shouldStopWithInfo)
reporter.echo(command.getInfoMessage(compiler))
else
doCompile(compiler)
} catch {
case _: SystemExit => // user requested to bail
case ex: Throwable =>
compiler.reportThrowable(ex)
ex match {
case FatalError(msg) => // signals that we should fail compilation.
case _ => throw ex // unexpected error, tell the outside world.
}
}
} else if (reporter.hasErrors) reporter.flush()
!reporter.hasErrors
}
def main(args: Array[String]): Unit = System.exit(if (process(args)) 0 else 1)
}
|
scala/scala
|
src/compiler/scala/tools/nsc/Driver.scala
|
Scala
|
apache-2.0
| 2,483
|
package nlpdata.util
import cats.Show
import cats.Order
import cats.Monoid
import scala.language.implicitConversions
/* Welcome to the new world.
* The world of ad-hoc refinement types requiring nothing more from the user than a single method call.
* NO MORE WILL YOU BE UNCERTAIN, ON THE FIRST LINE OF YOUR METHOD,
* WHETHER THE STRING WAS GUARANTEED TO BE LOWERCASE.
* FOR YOU HAVE GUARANTEED IT ALREADY IN THE TYPE SYSTEM.
* This is your weapon. This is your LowerCaseString.
* Wield it with pride.
* NOTE: there are projects to help you do refinement typing...but they seem a bit heavier weight for client code...idk
* Anyway, don't try to read the code just below. The point is that you can write:
* import nlpdata.util.LowerCaseStrings._
* and then you get the _.lowerCase method on strings, which yields a LowerCaseString,
* as well as an implicit conversion from LowerCaseString back to String.
* In addition, certain uses of existing methods on String will preserve LowerCaseString (as of now, just +);
* if you want there to be more, feel free to let me (Julian) know and I can add them here.
* I know it seems like weird extra complication, but honestly I was already having bugs from not lowercasing strings,
* despite sprinkling calls to .toLowerCase around so much that the code had gotten noticeably harder to read.
*/
sealed trait LowerCaseStringCapsule0 {
type LowerCaseString
sealed trait LowerCaseStringOps {
def lowerCase(s: String): LowerCaseString
def +(s1: LowerCaseString, s2: LowerCaseString): LowerCaseString
def contains(s1: LowerCaseString, s2: LowerCaseString): Boolean
def startsWith(s1: LowerCaseString, s2: LowerCaseString): Boolean
def endsWith(s1: LowerCaseString, s2: LowerCaseString): Boolean
def substring(s: LowerCaseString, beginIndex: Int): LowerCaseString
def substring(s: LowerCaseString, beginIndex: Int, endIndex: Int): LowerCaseString
}
val LowerCaseStringOpsImpl: LowerCaseStringOps
implicit def lowerCaseStringToString(lcs: LowerCaseString): String
}
sealed trait LowerCaseStringCapsule extends LowerCaseStringCapsule0 {
implicit def wrapLowerCaseString(lcs: LowerCaseString): LowerCaseStringWrapper
implicit def wrapStringToMakeLowerCase(s: String): StringToLowerCaseWrapper
implicit def lowerCaseStringShow: Show[LowerCaseString]
implicit def lowerCaseStringMonoid: Monoid[LowerCaseString]
implicit def lowerCaseStringOrder: Order[LowerCaseString]
}
protected[util] object LowerCaseStringsImpl extends LowerCaseStringCapsule {
override type LowerCaseString = String
override object LowerCaseStringOpsImpl extends LowerCaseStringOps {
override def lowerCase(s: String): LowerCaseString = s.toLowerCase
override def +(s1: LowerCaseString, s2: LowerCaseString) = s1 + s2
override def contains(s1: LowerCaseString, s2: LowerCaseString) =
s1 contains s2
override def startsWith(s1: LowerCaseString, s2: LowerCaseString) =
s1 startsWith s2
override def endsWith(s1: LowerCaseString, s2: LowerCaseString) =
s1 endsWith s2
override def substring(s: LowerCaseString, beginIndex: Int) =
s.substring(beginIndex)
override def substring(s: LowerCaseString, beginIndex: Int, endIndex: Int) =
s.substring(beginIndex, endIndex)
}
override implicit def lowerCaseStringToString(lcs: LowerCaseString): String =
lcs
override implicit def wrapLowerCaseString(lcs: LowerCaseString) =
new LowerCaseStringWrapper(lcs.asInstanceOf[LowerCaseStrings.LowerCaseString]) // upcasted version of this object
override implicit def wrapStringToMakeLowerCase(s: String) =
new StringToLowerCaseWrapper(s)
override implicit val lowerCaseStringShow: Show[LowerCaseString] =
new Show[LowerCaseString] {
override def show(lcs: LowerCaseString): String = lcs.toString
}
override implicit val lowerCaseStringMonoid: Monoid[LowerCaseString] =
new Monoid[LowerCaseString] {
def empty: LowerCaseString = ""
def combine(x: LowerCaseString, y: LowerCaseString): LowerCaseString =
x + y
override def combineAll(xs: TraversableOnce[LowerCaseString]): LowerCaseString = {
val sb = new StringBuilder
xs.foreach(sb.append)
sb.toString
}
}
override implicit val lowerCaseStringOrder: Order[LowerCaseString] =
new Order[LowerCaseString] {
override def eqv(x: String, y: String): Boolean = x == y
override def compare(x: String, y: String): Int =
if (x eq y) 0 else x compareTo y
}
}
// take value with opaque-sealed type from the package object
import LowerCaseStrings.LowerCaseString
protected[util] class LowerCaseStringWrapper(val lcs: LowerCaseString) extends AnyVal {
def +(other: LowerCaseString): LowerCaseString =
LowerCaseStrings.LowerCaseStringOpsImpl.+(lcs, other)
def contains(other: LowerCaseString): Boolean =
LowerCaseStrings.LowerCaseStringOpsImpl.contains(lcs, other)
def startsWith(other: LowerCaseString): Boolean =
LowerCaseStrings.LowerCaseStringOpsImpl.startsWith(lcs, other)
def endsWith(other: LowerCaseString): Boolean =
LowerCaseStrings.LowerCaseStringOpsImpl.endsWith(lcs, other)
def substring(beginIndex: Int): LowerCaseString =
LowerCaseStrings.LowerCaseStringOpsImpl.substring(lcs, beginIndex)
def substring(beginIndex: Int, endIndex: Int): LowerCaseString =
LowerCaseStrings.LowerCaseStringOpsImpl.substring(lcs, beginIndex, endIndex)
}
protected[util] class StringToLowerCaseWrapper(val s: String) extends AnyVal {
def lowerCase = LowerCaseStrings.LowerCaseStringOpsImpl.lowerCase(s)
}
|
julianmichael/nlpdata
|
nlpdata/src/nlpdata/util/LowerCaseString.scala
|
Scala
|
mit
| 5,631
|
/* Copyright 2009-2021 EPFL, Lausanne */
import stainless.lang._
object ArrayNested1 {
def test(): Int = {
var a = Array(1, 2, 0)
def nested(): Unit = {
require(a.length == 3)
a = a.updated(1, 5)
}
nested()
a(1)
} ensuring(_ == 5)
}
|
epfl-lara/stainless
|
frontends/benchmarks/imperative/valid/ArrayNested1.scala
|
Scala
|
apache-2.0
| 277
|
package com.databricks.spark.sql.perf.mllib.feature
import org.apache.spark.ml
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.PipelineStage
import org.apache.spark.sql._
import org.apache.spark.sql.functions._
import com.databricks.spark.sql.perf.mllib.OptionImplicits._
import com.databricks.spark.sql.perf.mllib.data.DataGenerator
import com.databricks.spark.sql.perf.mllib.{BenchmarkAlgorithm, MLBenchContext, TestFromTraining}
/** Object for testing VectorAssembler performance */
object VectorAssembler extends BenchmarkAlgorithm with TestFromTraining {
private def getInputCols(numInputCols: Int): Array[String] = {
Array.tabulate(numInputCols)(i => s"c${i}")
}
override def trainingDataSet(ctx: MLBenchContext): DataFrame = {
import ctx.params._
require(numInputCols.get <= numFeatures.get,
s"numInputCols (${numInputCols}) cannot be greater than numFeatures (${numFeatures}).")
val df = DataGenerator.generateContinuousFeatures(
ctx.sqlContext,
numExamples,
ctx.seed(),
numPartitions,
numFeatures)
val slice = udf { (v: Vector, numSlices: Int) =>
val data = v.toArray
val n = data.length.toLong
(0 until numSlices).map { i =>
val start = ((i * n) / numSlices).toInt
val end = ((i + 1) * n / numSlices).toInt
Vectors.dense(data.slice(start, end))
}
}
val inputCols = getInputCols(numInputCols.get)
df.select(slice(col("features"), lit(numInputCols.get)).as("slices"))
.select((0 until numInputCols.get).map(i => col("slices")(i).as(inputCols(i))): _*)
}
override def getPipelineStage(ctx: MLBenchContext): PipelineStage = {
import ctx.params._
val inputCols = getInputCols(numInputCols.get)
new ml.feature.VectorAssembler()
.setInputCols(inputCols)
}
}
|
databricks/spark-sql-perf
|
src/main/scala/com/databricks/spark/sql/perf/mllib/feature/VectorAssembler.scala
|
Scala
|
apache-2.0
| 1,854
|
package xfp.simulation
import xfp.fixedpoint.{FixedPointFormat => FPFormat, Interval}
object TestExamples extends App with Simulation {
//val err = findMaxError(bspline1_1D, bspline1_1I, FPFormat(16, 14), Interval(0.0, 1.0), 15, 0.1)
var err = findLowerBound(bspline1_1D, bspline1_1I, FPFormat(16, 14), Interval(0.0, 1.0), 15)
println("max error %1.8f".format(err._1))
err = findLowerBound(bspline1_2D, bspline1_2I, FPFormat(16, 14), Interval(0.0, 1.0), 15)
println("max error %1.8f".format(err._1))
err = findLowerBound(bspline1_3D, bspline1_3I, FPFormat(16, 14), Interval(0.0, 1.0), 15)
println("max error %1.8f".format(err._1))
err = findLowerBound(bspline1_4D, bspline1_4I, FPFormat(16, 14), Interval(0.0, 1.0), 15)
println("max error %1.8f".format(err._1))
println("\n bspline2: ")
err = findLowerBound(bspline2_1D, bspline2_1I, FPFormat(16, 14), Interval(0.0, 1.0), 15)
println("max error %1.8f".format(err._1))
err = findLowerBound(bspline2_2D, bspline2_2I, FPFormat(16, 14), Interval(0.0, 1.0), 15)
println("max error %1.8f".format(err._1))
err = findLowerBound(bspline2_3D, bspline2_3I, FPFormat(16, 14), Interval(0.0, 1.0), 15)
println("max error %1.8f".format(err._1))
err = findLowerBound(bspline2_4D, bspline2_4I, FPFormat(16, 14), Interval(0.0, 1.0), 15)
println("max error %1.8f".format(err._1))
err = findLowerBound(bspline2_5D, bspline2_5I, FPFormat(16, 14), Interval(0.0, 1.0), 15)
println("max error %1.8f".format(err._1))
def logPolynomial1D(x: Double): Double = x - 0.5*(x*x) + 0.333*(x*x*x) - 0.25*(x*x*x*x)
def logPolynomial1I(x: Int): Int = {
val tmp0 = ((x * x) >> 14)
val tmp1 = ((16384 * tmp0) >> 14)
val tmp2 = ((x << 1) - tmp1)
val tmp3 = ((x * x) >> 14)
val tmp4 = ((tmp3 * x) >> 14)
val tmp5 = ((21823 * tmp4) >> 15)
val tmp6 = (tmp2 + tmp5)
val tmp7 = ((x * x) >> 14)
val tmp8 = ((tmp7 * x) >> 14)
val tmp9 = ((tmp8 * x) >> 14)
val tmp10 = ((16384 * tmp9) >> 15)
val tmp11 = (tmp6 - tmp10)
return tmp11
}
def logPolynomial2D(x: Double): Double = x * (1.0 + x * (-0.5 + x * (0.333 - 0.25 * x)))
def logPolynomial2I(x: Int): Int = {
val tmp0 = ((16384 * x) >> 15)
val tmp1 = ((21823 - (tmp0 << 1)) >> 1)
val tmp2 = ((x * tmp1) >> 14)
val tmp3 = (-16384 + tmp2)
val tmp4 = ((x * tmp3) >> 14)
val tmp5 = (((16384 << 1) + tmp4) >> 1)
val tmp6 = ((x * tmp5) >> 13)
return tmp6
}
def logPolynomial3D(x: Double): Double = (((-0.5 * (x * x)) + (x + ((x * (x * x)) * 0.333))) + (-0.25 * (x * (x * (x * x)))))
def logPolynomial3I(x: Int): Int = {
val tmp0 = ((x * x) >> 14)
val tmp1 = ((-16384 * tmp0) >> 14)
val tmp2 = ((x * x) >> 14)
val tmp3 = ((x * tmp2) >> 14)
val tmp4 = ((tmp3 * 21823) >> 15)
val tmp5 = (((x << 1) + tmp4) >> 1)
val tmp6 = (tmp1 + (tmp5 << 1))
val tmp7 = ((x * x) >> 14)
val tmp8 = ((x * tmp7) >> 14)
val tmp9 = ((x * tmp8) >> 14)
val tmp10 = ((-16384 * tmp9) >> 15)
val tmp11 = (tmp6 + tmp10)
return tmp11
}
def logPolynomial4D(x: Double): Double = (((-0.5 * (x * x)) + x) + ((0.333 * (x * (x * x))) + ((-0.25 * (x * x)) * (x * x))))
def logPolynomial4I(x: Int): Int = {
val tmp0 = ((x * x) >> 14)
val tmp1 = ((-16384 * tmp0) >> 14)
val tmp2 = (tmp1 + (x << 1))
val tmp3 = ((x * x) >> 14)
val tmp4 = ((x * tmp3) >> 14)
val tmp5 = ((21823 * tmp4) >> 15)
val tmp6 = ((x * x) >> 14)
val tmp7 = ((-16384 * tmp6) >> 15)
val tmp8 = ((x * x) >> 14)
val tmp9 = ((tmp7 * tmp8) >> 14)
val tmp10 = (tmp5 + tmp9)
val tmp11 = (tmp2 + tmp10)
return tmp11
}
def bspline1_1D(x: Double): Double = ((((3.0 * (x * (x * x))) + (-6.0 * (x * x))) * 0.1666) + (4.0 * 0.1666))
def bspline1_1I(x: Int): Int = {
val tmp0 = ((x * x) >> 14)
val tmp1 = ((x * tmp0) >> 14)
val tmp2 = ((24576 * tmp1) >> 14)
val tmp3 = ((x * x) >> 14)
val tmp4 = ((-24576 * tmp3) >> 14)
val tmp5 = (tmp2 + (tmp4 << 1))
val tmp6 = ((tmp5 * 21837) >> 15)
val tmp7 = ((16384 * 21837) >> 14)
val tmp8 = (tmp6 + tmp7)
return tmp8
}
def bspline1_2D(x: Double): Double = ((((3.0 * (x * (x * x))) + (-6.0 * (x * x))) + 4.0) * 0.1666)
def bspline1_2I(x: Int): Int = {
val tmp0 = ((x * x) >> 14)
val tmp1 = ((x * tmp0) >> 14)
val tmp2 = ((24576 * tmp1) >> 14)
val tmp3 = ((x * x) >> 14)
val tmp4 = ((-24576 * tmp3) >> 14)
val tmp5 = (tmp2 + (tmp4 << 1))
val tmp6 = ((tmp5 + (16384 << 1)) >> 1)
val tmp7 = ((tmp6 * 21837) >> 14)
return tmp7
}
def bspline1_3D(x: Double): Double = ((4.0 + (((3.0 * x) + -6.0) * (x * x))) * 0.1666)
def bspline1_3I(x: Int): Int = {
val tmp0 = ((24576 * x) >> 14)
val tmp1 = ((tmp0 + (-24576 << 1)) >> 1)
val tmp2 = ((x * x) >> 14)
val tmp3 = ((tmp1 * tmp2) >> 13)
val tmp4 = (((16384 << 1) + tmp3) >> 1)
val tmp5 = ((tmp4 * 21837) >> 14)
return tmp5
}
def bspline1_4D(x: Double): Double = ((4.0 + (((-6.0 * x) + (x * (3.0 * x))) * x)) * 0.1666)
def bspline1_4I(x: Int): Int = {
val tmp0 = ((-24576 * x) >> 14)
val tmp1 = ((24576 * x) >> 14)
val tmp2 = ((x * tmp1) >> 14)
val tmp3 = ((tmp0 << 1) + tmp2)
val tmp4 = ((tmp3 * x) >> 14)
val tmp5 = (((16384 << 1) + tmp4) >> 1)
val tmp6 = ((tmp5 * 21837) >> 14)
return tmp6
}
def bspline2_1D(x: Double): Double = (0.1666 * (((-3.0 * (x * (x * x))) + ((3.0 * (x * x)) + (3.0 * x))) + 1.0))
def bspline2_1I(x: Int): Int = {
val tmp0 = ((x * x) >> 14)
val tmp1 = ((x * tmp0) >> 14)
val tmp2 = ((-24576 * tmp1) >> 14)
val tmp3 = ((x * x) >> 14)
val tmp4 = ((24576 * tmp3) >> 14)
val tmp5 = ((24576 * x) >> 14)
val tmp6 = ((tmp4 + tmp5) >> 1)
val tmp7 = (tmp2 + (tmp6 << 1))
val tmp8 = (((tmp7 << 1) + 16384) >> 2)
val tmp9 = ((21837 * tmp8) >> 14)
return tmp9
}
def bspline2_2D(x: Double): Double = (((((-3.0 * (x * (x * x))) + (3.0 * (x * x))) + (3.0 * x)) + 1.0) * 0.1666)
def bspline2_2I(x: Int): Int = {
val tmp0 = ((x * x) >> 14)
val tmp1 = ((x * tmp0) >> 14)
val tmp2 = ((-24576 * tmp1) >> 14)
val tmp3 = ((x * x) >> 14)
val tmp4 = ((24576 * tmp3) >> 14)
val tmp5 = ((tmp2 + tmp4) << 2)
val tmp6 = ((24576 * x) >> 14)
val tmp7 = ((tmp5 + (tmp6 << 2)) >> 2)
val tmp8 = (((tmp7 << 1) + 16384) >> 2)
val tmp9 = ((tmp8 * 21837) >> 14)
return tmp9
}
def bspline2_3D(x: Double): Double = (((3.0 * x) + (1.0 + (x * (x + (x * (x * -3.0)))))) * 0.1666)
def bspline2_3I(x: Int): Int = {
val tmp0 = ((24576 * x) >> 14)
val tmp1 = ((x * -24576) >> 14)
val tmp2 = ((x * tmp1) >> 14)
val tmp3 = ((x + (tmp2 << 1)) >> 1)
val tmp4 = ((x * tmp3) >> 14)
val tmp5 = (16384 + (tmp4 << 1))
val tmp6 = (((tmp0 << 1) + tmp5) >> 1)
val tmp7 = ((tmp6 * 21837) >> 15)
return tmp7
}
def bspline2_4D(x: Double): Double = (0.1666 * (1.0 + (x * ((x + (-3.0 * (x * x))) + 3.0))))
def bspline2_4I(x: Int): Int = {
val tmp0 = ((x * x) >> 14)
val tmp1 = ((-24576 * tmp0) >> 14)
val tmp2 = ((x + (tmp1 << 1)) >> 1)
val tmp3 = (tmp2 + 24576)
val tmp4 = ((x * tmp3) >> 13)
val tmp5 = ((16384 + tmp4) >> 1)
val tmp6 = ((21837 * tmp5) >> 15)
return tmp6
}
def bspline2_5D(x: Double): Double = ((1.0 + ((3.0 + (((x * -3.0) * x) + x)) * x)) * 0.1666)
def bspline2_5I(x: Int): Int = {
val tmp0 = ((x * -24576) >> 14)
val tmp1 = ((tmp0 * x) >> 14)
val tmp2 = (((tmp1 << 1) + x) >> 1)
val tmp3 = (24576 + tmp2)
val tmp4 = ((tmp3 * x) >> 13)
val tmp5 = ((16384 + tmp4) >> 1)
val tmp6 = ((tmp5 * 21837) >> 15)
return tmp6
}
def field_dc_motor_1D(i_f: Double, ia: Double, omega: Double): Double = {
val theta, a, b, c, rho = 1.0; val epsilon = 0.1
(1/((epsilon + (theta * ia))) * (((theta * ((a + b) * (i_f * ia))) +
(theta * (rho * i_f))) + -(c * (((i_f * i_f) * omega) * theta))))
}
def field_dc_motor_1I(i_f: Int, ia: Int, omega: Int): Int = {
val theta, a, b, c, rho = 16384; val epsilon = 26214
val tmp0 = ((theta * ia) >> 14)
val tmp1 = ((epsilon + (tmp0 << 4)) >> 4)
val tmp2 = ((16384 << 13) / tmp1)
val tmp3 = ((a + b) >> 1)
val tmp4 = ((i_f * ia) >> 15)
val tmp5 = ((tmp3 * tmp4) >> 14)
val tmp6 = ((theta * tmp5) >> 14)
val tmp7 = ((rho * i_f) >> 14)
val tmp8 = ((theta * tmp7) >> 14)
val tmp9 = (((tmp6 << 2) + tmp8) >> 2)
val tmp10 = ((i_f * i_f) >> 15)
val tmp11 = ((tmp10 * omega) >> 14)
val tmp12 = ((tmp11 * theta) >> 14)
val tmp13 = ((c * tmp12) >> 14)
val tmp14 = -(tmp13)
val tmp15 = (((tmp9 << 1) + tmp14) >> 1)
val tmp16 = ((tmp2 * tmp15) >> 15)
return tmp16 //10
}
def field_dc_motor_2D(i_f: Double, ia: Double, omega: Double): Double = {
val theta, a, b, c, rho = 1.0; val epsilon = 0.1
(1/((epsilon + (theta * ia))) * ((theta * (((a + b) * (i_f * ia)) +
(rho * i_f))) + -(c * (((i_f * i_f) * omega) * theta))))
}
def field_dc_motor_2I(i_f: Int, ia: Int, omega: Int): Int = {
val theta, a, b, c, rho = 16384; val epsilon = 26214
val tmp0 = ((theta * ia) >> 14)
val tmp1 = ((epsilon + (tmp0 << 4)) >> 4)
val tmp2 = ((16384 << 13) / tmp1)
val tmp3 = ((a + b) >> 1)
val tmp4 = ((i_f * ia) >> 15)
val tmp5 = ((tmp3 * tmp4) >> 14)
val tmp6 = ((rho * i_f) >> 14)
val tmp7 = (((tmp5 << 2) + tmp6) >> 2)
val tmp8 = ((theta * tmp7) >> 14)
val tmp9 = ((i_f * i_f) >> 15)
val tmp10 = ((tmp9 * omega) >> 14)
val tmp11 = ((tmp10 * theta) >> 14)
val tmp12 = ((c * tmp11) >> 14)
val tmp13 = -(tmp12)
val tmp14 = (((tmp8 << 1) + tmp13) >> 1)
val tmp15 = ((tmp2 * tmp14) >> 15)
return tmp15 //10
}
def field_dc_motor_3D(i_f: Double, ia: Double, omega: Double): Double = {
val theta, a, b, c, rho = 1.0; val epsilon = 0.1
(1/((epsilon + (theta * ia))) * ((theta * ((((b + a) * i_f) * ia) +
(rho * i_f))) + -(c * (((i_f * i_f) * omega) * theta))))
}
def field_dc_motor_3I(i_f: Int, ia: Int, omega: Int): Int = {
val theta, a, b, c, rho = 16384; val epsilon = 26214
val tmp0 = ((theta * ia) >> 14)
val tmp1 = ((epsilon + (tmp0 << 4)) >> 4)
val tmp2 = ((16384 << 13) / tmp1)
val tmp3 = ((b + a) >> 1)
val tmp4 = ((tmp3 * i_f) >> 14)
val tmp5 = ((tmp4 * ia) >> 15)
val tmp6 = ((rho * i_f) >> 14)
val tmp7 = (((tmp5 << 2) + tmp6) >> 2)
val tmp8 = ((theta * tmp7) >> 14)
val tmp9 = ((i_f * i_f) >> 15)
val tmp10 = ((tmp9 * omega) >> 14)
val tmp11 = ((tmp10 * theta) >> 14)
val tmp12 = ((c * tmp11) >> 14)
val tmp13 = -(tmp12)
val tmp14 = (((tmp8 << 1) + tmp13) >> 1)
val tmp15 = ((tmp2 * tmp14) >> 15)
return tmp15 //10
}
def field_dc_motor_4D(i_f: Double, ia: Double, omega: Double): Double = {
val theta, a, b, c, rho = 1.0; val epsilon = 0.1
((1/((epsilon + (theta * ia))) * (theta * (((b + a) * (ia * i_f)) +
(rho * i_f)))) + (1/((epsilon + (theta * ia))) * -(c * ((i_f * i_f) * (omega * theta)))))
}
def field_dc_motor_4I(i_f: Int, ia: Int, omega: Int): Int = {
val theta, a, b, c, rho = 16384; val epsilon = 26214
val tmp0 = ((theta * ia) >> 14)
val tmp1 = ((epsilon + (tmp0 << 4)) >> 4)
val tmp2 = ((16384 << 13) / tmp1)
val tmp3 = ((b + a) >> 1)
val tmp4 = ((ia * i_f) >> 15)
val tmp5 = ((tmp3 * tmp4) >> 14)
val tmp6 = ((rho * i_f) >> 14)
val tmp7 = (((tmp5 << 2) + tmp6) >> 2)
val tmp8 = ((theta * tmp7) >> 14)
val tmp9 = ((tmp2 * tmp8) >> 15)
val tmp10 = ((theta * ia) >> 14)
val tmp11 = ((epsilon + (tmp10 << 4)) >> 4)
val tmp12 = ((16384 << 13) / tmp11)
val tmp13 = ((i_f * i_f) >> 15)
val tmp14 = ((omega * theta) >> 14)
val tmp15 = ((tmp13 * tmp14) >> 14)
val tmp16 = ((c * tmp15) >> 14)
val tmp17 = -(tmp16)
val tmp18 = ((tmp12 * tmp17) >> 15)
val tmp19 = (((tmp9 << 1) + tmp18) >> 1)
return tmp19 //10
}
def field_dc_motor_5D(i_f: Double, ia: Double, omega: Double): Double = {
val theta, a, b, c, rho = 1.0; val epsilon = 0.1
(1/(((theta * ia) + epsilon)) * ((theta * ((((b * i_f) + (i_f * a)) * ia) +
(rho * i_f))) + -(c * (((i_f * i_f) * omega) * theta))))
}
def field_dc_motor_5I(i_f: Int, ia: Int, omega: Int): Int = {
val theta, a, b, c, rho = 16384; val epsilon = 26214
val tmp0 = ((theta * ia) >> 14)
val tmp1 = (((tmp0 << 4) + epsilon) >> 4)
val tmp2 = ((16384 << 13) / tmp1)
val tmp3 = ((b * i_f) >> 14)
val tmp4 = ((i_f * a) >> 14)
val tmp5 = ((tmp3 + tmp4) >> 1)
val tmp6 = ((tmp5 * ia) >> 15)
val tmp7 = ((rho * i_f) >> 14)
val tmp8 = (((tmp6 << 2) + tmp7) >> 2)
val tmp9 = ((theta * tmp8) >> 14)
val tmp10 = ((i_f * i_f) >> 15)
val tmp11 = ((tmp10 * omega) >> 14)
val tmp12 = ((tmp11 * theta) >> 14)
val tmp13 = ((c * tmp12) >> 14)
val tmp14 = -(tmp13)
val tmp15 = (((tmp9 << 1) + tmp14) >> 1)
val tmp16 = ((tmp2 * tmp15) >> 15)
return tmp16 //10
}
def hermite_1D(x: Double): Double =(((((((((((x * x) * x) * x) * x) * x) * x) * x) + (-28.0 * (((((x * x) * x) * x) * x) * x))) + (210.0 * (((x * x) * x) * x))) + (-420.0 * (x * x))) + 105.0)
def hermite_1I(x: Long): Long = {
val tmp0 = ((x * x) >> 30)
val tmp1 = ((tmp0 * x) >> 30)
val tmp2 = ((tmp1 * x) >> 31)
val tmp3 = ((tmp2 * x) >> 30)
val tmp4 = ((tmp3 * x) >> 30)
val tmp5 = ((tmp4 * x) >> 31)
val tmp6 = ((tmp5 * x) >> 30)
val tmp7 = ((x * x) >> 30)
val tmp8 = ((tmp7 * x) >> 30)
val tmp9 = ((tmp8 * x) >> 31)
val tmp10 = ((tmp9 * x) >> 30)
val tmp11 = ((tmp10 * x) >> 30)
val tmp12 = ((-1879048192L * tmp11) >> 31)
val tmp13 = (((tmp6 << 2) + tmp12) >> 2)
val tmp14 = ((x * x) >> 30)
val tmp15 = ((tmp14 * x) >> 30)
val tmp16 = ((tmp15 * x) >> 31)
val tmp17 = ((1761607680L * tmp16) >> 31)
val tmp18 = (((tmp13 << 5) + tmp17) >> 5)
val tmp19 = ((x * x) >> 30)
val tmp20 = ((-1761607680L * tmp19) >> 31)
val tmp21 = (((tmp18 << 11) + tmp20) >> 11)
val tmp22 = (((tmp21 << 20) + 1761607680L) >> 20)
return tmp22
}
def hermite_2D(x: Double): Double = ((((((((((x * x) * x) * x) * x) * (x * x)) * x) + (-28.0 * (((((x * x) * x) * x) * x) * x))) + (210.0 * (((x * x) * x) * x))) + (-420.0 * (x * x))) + 105.0)
def hermite_2I(x: Long): Long = {
val tmp0 = ((x * x) >> 30)
val tmp1 = ((tmp0 * x) >> 30)
val tmp2 = ((tmp1 * x) >> 31)
val tmp3 = ((tmp2 * x) >> 30)
val tmp4 = ((x * x) >> 30)
val tmp5 = ((tmp3 * tmp4) >> 31)
val tmp6 = ((tmp5 * x) >> 30)
val tmp7 = ((x * x) >> 30)
val tmp8 = ((tmp7 * x) >> 30)
val tmp9 = ((tmp8 * x) >> 31)
val tmp10 = ((tmp9 * x) >> 30)
val tmp11 = ((tmp10 * x) >> 30)
val tmp12 = ((-1879048192L * tmp11) >> 31)
val tmp13 = (((tmp6 << 2) + tmp12) >> 2)
val tmp14 = ((x * x) >> 30)
val tmp15 = ((tmp14 * x) >> 30)
val tmp16 = ((tmp15 * x) >> 31)
val tmp17 = ((1761607680L * tmp16) >> 31)
val tmp18 = (((tmp13 << 5) + tmp17) >> 5)
val tmp19 = ((x * x) >> 30)
val tmp20 = ((-1761607680L * tmp19) >> 31)
val tmp21 = (((tmp18 << 11) + tmp20) >> 11)
val tmp22 = (((tmp21 << 20) + 1761607680L) >> 20)
return tmp22
}
def hermite_3D(x: Double): Double = ((((((((((x * x) * x) * x) * x) * x) * (x * x)) + (-28.0 * (((((x * x) * x) * x) * x) * x))) + (210.0 * (((x * x) * x) * x))) + (-420.0 * (x * x))) + 105.0)
def hermite_3I(x: Long): Long = {
val tmp0 = ((x * x) >> 30)
val tmp1 = ((tmp0 * x) >> 30)
val tmp2 = ((tmp1 * x) >> 31)
val tmp3 = ((tmp2 * x) >> 30)
val tmp4 = ((tmp3 * x) >> 30)
val tmp5 = ((x * x) >> 30)
val tmp6 = ((tmp4 * tmp5) >> 31)
val tmp7 = ((x * x) >> 30)
val tmp8 = ((tmp7 * x) >> 30)
val tmp9 = ((tmp8 * x) >> 31)
val tmp10 = ((tmp9 * x) >> 30)
val tmp11 = ((tmp10 * x) >> 30)
val tmp12 = ((-1879048192L * tmp11) >> 31)
val tmp13 = (((tmp6 << 2) + tmp12) >> 2)
val tmp14 = ((x * x) >> 30)
val tmp15 = ((tmp14 * x) >> 30)
val tmp16 = ((tmp15 * x) >> 31)
val tmp17 = ((1761607680L * tmp16) >> 31)
val tmp18 = (((tmp13 << 5) + tmp17) >> 5)
val tmp19 = ((x * x) >> 30)
val tmp20 = ((-1761607680L * tmp19) >> 31)
val tmp21 = (((tmp18 << 11) + tmp20) >> 11)
val tmp22 = (((tmp21 << 20) + 1761607680L) >> 20)
return tmp22
}
def hermite_4D(x: Double): Double = ((((((((((x * x) * x) * x) * x) * x) * x) * x) + ((-28.0 * (((((x * x) * x) * x) * x) * x)) + (210.0 * (((x * x) * x) * x)))) + (-420.0 * (x * x))) + 105.0)
def hermite_4I(x: Long): Long = {
val tmp0 = ((x * x) >> 30)
val tmp1 = ((tmp0 * x) >> 30)
val tmp2 = ((tmp1 * x) >> 31)
val tmp3 = ((tmp2 * x) >> 30)
val tmp4 = ((tmp3 * x) >> 30)
val tmp5 = ((tmp4 * x) >> 31)
val tmp6 = ((tmp5 * x) >> 30)
val tmp7 = ((x * x) >> 30)
val tmp8 = ((tmp7 * x) >> 30)
val tmp9 = ((tmp8 * x) >> 31)
val tmp10 = ((tmp9 * x) >> 30)
val tmp11 = ((tmp10 * x) >> 30)
val tmp12 = ((-1879048192L * tmp11) >> 31)
val tmp13 = ((x * x) >> 30)
val tmp14 = ((tmp13 * x) >> 30)
val tmp15 = ((tmp14 * x) >> 31)
val tmp16 = ((1761607680L * tmp15) >> 31)
val tmp17 = (((tmp12 << 3) + tmp16) >> 3)
val tmp18 = (((tmp6 << 2) + tmp17) >> 2)
val tmp19 = ((x * x) >> 30)
val tmp20 = ((-1761607680L * tmp19) >> 31)
val tmp21 = (((tmp18 << 11) + tmp20) >> 11)
val tmp22 = (((tmp21 << 20) + 1761607680L) >> 20)
return tmp22
}
def hermite_5D(x: Double): Double = (((((((((x * x) * (x * x)) * x) * x) * x) * x) + (-28.0 * (((((x * x) * x) * x) * x) * x))) + ((210.0 * (((x * x) * x) * x)) + (-420.0 * (x * x)))) + 105.0)
def hermite_5I(x: Long): Long = {
val tmp0 = ((x * x) >> 30)
val tmp1 = ((x * x) >> 30)
val tmp2 = ((tmp0 * tmp1) >> 31)
val tmp3 = ((tmp2 * x) >> 30)
val tmp4 = ((tmp3 * x) >> 30)
val tmp5 = ((tmp4 * x) >> 31)
val tmp6 = ((tmp5 * x) >> 30)
val tmp7 = ((x * x) >> 30)
val tmp8 = ((tmp7 * x) >> 30)
val tmp9 = ((tmp8 * x) >> 31)
val tmp10 = ((tmp9 * x) >> 30)
val tmp11 = ((tmp10 * x) >> 30)
val tmp12 = ((-1879048192L * tmp11) >> 31)
val tmp13 = (((tmp6 << 2) + tmp12) >> 2)
val tmp14 = ((x * x) >> 30)
val tmp15 = ((tmp14 * x) >> 30)
val tmp16 = ((tmp15 * x) >> 31)
val tmp17 = ((1761607680L * tmp16) >> 31)
val tmp18 = ((x * x) >> 30)
val tmp19 = ((-1761607680L * tmp18) >> 31)
val tmp20 = (((tmp17 << 6) + tmp19) >> 5)
val tmp21 = (((tmp13 << 6) + tmp20) >> 6)
val tmp22 = (((tmp21 << 20) + 1761607680L) >> 20)
return tmp22
}
def hermite_6D(x: Double): Double = ((((((x * ((((x * x) * x) * x) * x)) * x) * x) + (-28.0 * (((((x * x) * x) * x) * x) * x))) + ((((x * x) * x) * (x * 210.0)) + (-420.0 * (x * x)))) + 105.0)
def hermite_6I(x: Long): Long = {
val tmp0 = ((x * x) >> 30)
val tmp1 = ((tmp0 * x) >> 30)
val tmp2 = ((tmp1 * x) >> 31)
val tmp3 = ((tmp2 * x) >> 30)
val tmp4 = ((x * tmp3) >> 30)
val tmp5 = ((tmp4 * x) >> 31)
val tmp6 = ((tmp5 * x) >> 30)
val tmp7 = ((x * x) >> 30)
val tmp8 = ((tmp7 * x) >> 30)
val tmp9 = ((tmp8 * x) >> 31)
val tmp10 = ((tmp9 * x) >> 30)
val tmp11 = ((tmp10 * x) >> 30)
val tmp12 = ((-1879048192L * tmp11) >> 31)
val tmp13 = (((tmp6 << 2) + tmp12) >> 2)
val tmp14 = ((x * x) >> 30)
val tmp15 = ((tmp14 * x) >> 30)
val tmp16 = ((x * 1761607680L) >> 31)
val tmp17 = ((tmp15 * tmp16) >> 31)
val tmp18 = ((x * x) >> 30)
val tmp19 = ((-1761607680L * tmp18) >> 31)
val tmp20 = (((tmp17 << 6) + tmp19) >> 5)
val tmp21 = (((tmp13 << 6) + tmp20) >> 6)
val tmp22 = (((tmp21 << 20) + 1761607680L) >> 20)
return tmp22
}
def hermite_7D(x: Double): Double = ((((((((x * x) * x) * x) * x) * x) * (x * x)) + (-28.0 * (((((x * x) * x) * x) * x) * x))) + (((-420.0 * (x * x)) + (210.0 * ((x * x) * (x * x)))) + 105.0))
def hermite_7I(x: Long): Long = {
val tmp0 = ((x * x) >> 30)
val tmp1 = ((tmp0 * x) >> 30)
val tmp2 = ((tmp1 * x) >> 31)
val tmp3 = ((tmp2 * x) >> 30)
val tmp4 = ((tmp3 * x) >> 30)
val tmp5 = ((x * x) >> 30)
val tmp6 = ((tmp4 * tmp5) >> 31)
val tmp7 = ((x * x) >> 30)
val tmp8 = ((tmp7 * x) >> 30)
val tmp9 = ((tmp8 * x) >> 31)
val tmp10 = ((tmp9 * x) >> 30)
val tmp11 = ((tmp10 * x) >> 30)
val tmp12 = ((-1879048192L * tmp11) >> 31)
val tmp13 = (((tmp6 << 2) + tmp12) >> 2)
val tmp14 = ((x * x) >> 30)
val tmp15 = ((-1761607680L * tmp14) >> 31)
val tmp16 = ((x * x) >> 30)
val tmp17 = ((x * x) >> 30)
val tmp18 = ((tmp16 * tmp17) >> 31)
val tmp19 = ((1761607680L * tmp18) >> 31)
val tmp20 = ((tmp15 + (tmp19 << 6)) >> 5)
val tmp21 = (((tmp20 << 14) + 1761607680L) >> 14)
val tmp22 = (((tmp13 << 6) + tmp21) >> 6)
return tmp22
}
def hermite_8D(x: Double): Double = (((((((x * (x * x)) * x) * x) * x) * (x * x)) + (-28.0 * (((((x * x) * x) * x) * x) * x))) + (((-420.0 * (x * x)) + (210.0 * ((x * (x * x)) * x))) + 105.0))
def hermite_8I(x: Long): Long = {
val tmp0 = ((x * x) >> 30)
val tmp1 = ((x * tmp0) >> 30)
val tmp2 = ((tmp1 * x) >> 31)
val tmp3 = ((tmp2 * x) >> 30)
val tmp4 = ((tmp3 * x) >> 30)
val tmp5 = ((x * x) >> 30)
val tmp6 = ((tmp4 * tmp5) >> 31)
val tmp7 = ((x * x) >> 30)
val tmp8 = ((tmp7 * x) >> 30)
val tmp9 = ((tmp8 * x) >> 31)
val tmp10 = ((tmp9 * x) >> 30)
val tmp11 = ((tmp10 * x) >> 30)
val tmp12 = ((-1879048192L * tmp11) >> 31)
val tmp13 = (((tmp6 << 2) + tmp12) >> 2)
val tmp14 = ((x * x) >> 30)
val tmp15 = ((-1761607680L * tmp14) >> 31)
val tmp16 = ((x * x) >> 30)
val tmp17 = ((x * tmp16) >> 30)
val tmp18 = ((tmp17 * x) >> 31)
val tmp19 = ((1761607680L * tmp18) >> 31)
val tmp20 = ((tmp15 + (tmp19 << 6)) >> 5)
val tmp21 = (((tmp20 << 14) + 1761607680L) >> 14)
val tmp22 = (((tmp13 << 6) + tmp21) >> 6)
return tmp22
}
def hermite_9D(x: Double): Double = ((((((((x * x) * x) * x) * x) * x) * (x * x)) + (-28.0 * (((((x * x) * x) * x) * x) * x))) + (((-420.0 * (x * x)) + (210.0 * (((x * x) * x) * x))) + 105.0))
def hermite_9I(x: Long): Long = {
val tmp0 = ((x * x) >> 30)
val tmp1 = ((tmp0 * x) >> 30)
val tmp2 = ((tmp1 * x) >> 31)
val tmp3 = ((tmp2 * x) >> 30)
val tmp4 = ((tmp3 * x) >> 30)
val tmp5 = ((x * x) >> 30)
val tmp6 = ((tmp4 * tmp5) >> 31)
val tmp7 = ((x * x) >> 30)
val tmp8 = ((tmp7 * x) >> 30)
val tmp9 = ((tmp8 * x) >> 31)
val tmp10 = ((tmp9 * x) >> 30)
val tmp11 = ((tmp10 * x) >> 30)
val tmp12 = ((-1879048192L * tmp11) >> 31)
val tmp13 = (((tmp6 << 2) + tmp12) >> 2)
val tmp14 = ((x * x) >> 30)
val tmp15 = ((-1761607680L * tmp14) >> 31)
val tmp16 = ((x * x) >> 30)
val tmp17 = ((tmp16 * x) >> 30)
val tmp18 = ((tmp17 * x) >> 31)
val tmp19 = ((1761607680L * tmp18) >> 31)
val tmp20 = ((tmp15 + (tmp19 << 6)) >> 5)
val tmp21 = (((tmp20 << 14) + 1761607680L) >> 14)
val tmp22 = (((tmp13 << 6) + tmp21) >> 6)
return tmp22
}
def multi_poly_1D(a: Double, b: Double, c: Double) = ((((c + (a * b)) + 10.0) * (((a * c) + b) + 30.0)) * (((b * c) + a) + 20.0))
def multi_poly_1I(a: Long, b: Long, c: Long): Long = {
val tmp0 = ((a * b) >> 30)
val tmp1 = ((c + (tmp0 << 3)) >> 3)
val tmp2 = (((tmp1 << 3) + 1342177280L) >> 3)
val tmp3 = ((a * c) >> 30)
val tmp4 = (((tmp3 << 3) + b) >> 4)
val tmp5 = (((tmp4 << 3) + 2013265920L) >> 3)
val tmp6 = ((tmp2 * tmp5) >> 30)
val tmp7 = ((b * c) >> 30)
val tmp8 = (((tmp7 << 3) + a) >> 3)
val tmp9 = (((tmp8 << 2) + 1342177280L) >> 2)
val tmp10 = ((tmp6 * tmp9) >> 31)
return tmp10
}
def multi_poly_2D(a: Double, b: Double, c: Double) = (((c + (a * b)) + 10.0) * ((((a * c) + b) + 30.0) * (((b * c) + a) + 20.0)))
def multi_poly_2I(a: Long, b: Long, c: Long): Long = {
val tmp0 = ((a * b) >> 30)
val tmp1 = ((c + (tmp0 << 3)) >> 3)
val tmp2 = (((tmp1 << 3) + 1342177280L) >> 3)
val tmp3 = ((a * c) >> 30)
val tmp4 = (((tmp3 << 3) + b) >> 4)
val tmp5 = (((tmp4 << 3) + 2013265920L) >> 3)
val tmp6 = ((b * c) >> 30)
val tmp7 = (((tmp6 << 3) + a) >> 3)
val tmp8 = (((tmp7 << 2) + 1342177280L) >> 2)
val tmp9 = ((tmp5 * tmp8) >> 31)
val tmp10 = ((tmp2 * tmp9) >> 30)
return tmp10
}
def multi_poly_3D(a: Double, b: Double, c: Double) = (((((a * b) + c) + 10.0) * (((a * c) + b) + 30.0)) * ((b * c) + (a + 20.0)))
def multi_poly_3I(a: Long, b: Long, c: Long): Long = {
val tmp0 = ((a * b) >> 30)
val tmp1 = (((tmp0 << 3) + c) >> 3)
val tmp2 = (((tmp1 << 3) + 1342177280L) >> 3)
val tmp3 = ((a * c) >> 30)
val tmp4 = (((tmp3 << 3) + b) >> 4)
val tmp5 = (((tmp4 << 3) + 2013265920L) >> 3)
val tmp6 = ((tmp2 * tmp5) >> 30)
val tmp7 = ((b * c) >> 30)
val tmp8 = ((a + (1342177280L << 1)) >> 1)
val tmp9 = (((tmp7 << 2) + tmp8) >> 2)
val tmp10 = ((tmp6 * tmp9) >> 31)
return tmp10
}
def multi_poly_4D(a: Double, b: Double, c: Double) = (((((c + (a * b)) + 10.0) * ((a * c) + b)) + (((c + (a * b)) + 10.0) * 30.0)) * ((b * c) + (a + 20.0)))
def multi_poly_4I(a: Long, b: Long, c: Long): Long = {
val tmp0 = ((a * b) >> 30)
val tmp1 = ((c + (tmp0 << 3)) >> 3)
val tmp2 = (((tmp1 << 3) + 1342177280L) >> 3)
val tmp3 = ((a * c) >> 30)
val tmp4 = (((tmp3 << 3) + b) >> 4)
val tmp5 = ((tmp2 * tmp4) >> 30)
val tmp6 = ((a * b) >> 30)
val tmp7 = ((c + (tmp6 << 3)) >> 3)
val tmp8 = (((tmp7 << 3) + 1342177280L) >> 3)
val tmp9 = ((tmp8 * 2013265920L) >> 31)
val tmp10 = (((tmp5 << 2) + tmp9) >> 2)
val tmp11 = ((b * c) >> 30)
val tmp12 = ((a + (1342177280L << 1)) >> 1)
val tmp13 = (((tmp11 << 2) + tmp12) >> 2)
val tmp14 = ((tmp10 * tmp13) >> 31)
return tmp14
}
def multi_poly_5D(a: Double, b: Double, c: Double) = (((((a * b) + c) * (((a * c) + b) + 30.0)) + (10.0 * (((c * a) + b) + 30.0))) * ((b * c) + (a + 20.0)))
def multi_poly_5I(a: Long, b: Long, c: Long): Long = {
val tmp0 = ((a * b) >> 30)
val tmp1 = (((tmp0 << 3) + c) >> 3)
val tmp2 = ((a * c) >> 30)
val tmp3 = (((tmp2 << 3) + b) >> 4)
val tmp4 = (((tmp3 << 3) + 2013265920L) >> 3)
val tmp5 = ((tmp1 * tmp4) >> 30)
val tmp6 = ((c * a) >> 30)
val tmp7 = (((tmp6 << 3) + b) >> 4)
val tmp8 = (((tmp7 << 3) + 2013265920L) >> 3)
val tmp9 = ((1342177280L * tmp8) >> 30)
val tmp10 = (((tmp5 << 3) + tmp9) >> 3)
val tmp11 = ((b * c) >> 30)
val tmp12 = ((a + (1342177280L << 1)) >> 1)
val tmp13 = (((tmp11 << 2) + tmp12) >> 2)
val tmp14 = ((tmp10 * tmp13) >> 31)
return tmp14
}
def multi_poly_6D(a: Double, b: Double, c: Double) = (((((c + (b * a)) + 10.0) * ((a * c) + b)) + (((b * a) + (c + 10.0)) * 30.0)) * ((c * b) + (20.0 + a)))
def multi_poly_6I(a: Long, b: Long, c: Long): Long = {
val tmp0 = ((b * a) >> 30)
val tmp1 = ((c + (tmp0 << 3)) >> 3)
val tmp2 = (((tmp1 << 3) + 1342177280L) >> 3)
val tmp3 = ((a * c) >> 30)
val tmp4 = (((tmp3 << 3) + b) >> 4)
val tmp5 = ((tmp2 * tmp4) >> 30)
val tmp6 = ((b * a) >> 30)
val tmp7 = ((c + 1342177280L) >> 1)
val tmp8 = (((tmp6 << 2) + tmp7) >> 2)
val tmp9 = ((tmp8 * 2013265920L) >> 31)
val tmp10 = (((tmp5 << 2) + tmp9) >> 2)
val tmp11 = ((c * b) >> 30)
val tmp12 = (((1342177280L << 1) + a) >> 1)
val tmp13 = (((tmp11 << 2) + tmp12) >> 2)
val tmp14 = ((tmp10 * tmp13) >> 31)
return tmp14
}
def multi_poly_7D(a: Double, b: Double, c: Double) = ((((((c + (b * a)) + 10.0) * (a * c)) + (((c + (b * a)) + 10.0) * b)) + (30.0 * ((a * b) + (c + 10.0)))) * ((b * c) + (a + 20.0)))
def multi_poly_7I(a: Long, b: Long, c: Long): Long = {
val tmp0 = ((b * a) >> 30)
val tmp1 = ((c + (tmp0 << 3)) >> 3)
val tmp2 = (((tmp1 << 3) + 1342177280L) >> 3)
val tmp3 = ((a * c) >> 30)
val tmp4 = ((tmp2 * tmp3) >> 31)
val tmp5 = ((b * a) >> 30)
val tmp6 = ((c + (tmp5 << 3)) >> 3)
val tmp7 = (((tmp6 << 3) + 1342177280L) >> 3)
val tmp8 = ((tmp7 * b) >> 30)
val tmp9 = (((tmp4 << 4) + tmp8) >> 4)
val tmp10 = ((a * b) >> 30)
val tmp11 = ((c + 1342177280L) >> 1)
val tmp12 = (((tmp10 << 2) + tmp11) >> 2)
val tmp13 = ((2013265920L * tmp12) >> 31)
val tmp14 = (((tmp9 << 2) + tmp13) >> 2)
val tmp15 = ((b * c) >> 30)
val tmp16 = ((a + (1342177280L << 1)) >> 1)
val tmp17 = (((tmp15 << 2) + tmp16) >> 2)
val tmp18 = ((tmp14 * tmp17) >> 31)
return tmp18
}
def multi_poly_8D(a: Double, b: Double, c: Double) = ((((((c + (b * a)) + 10.0) * (a * c)) + (b * ((c + (b * a)) + 10.0))) * ((b * c) + (a + 20.0))) + ((30.0 * ((a * b) + (c + 10.0))) * ((b * c) + (a + 20.0))))
def multi_poly_8I(a: Long, b: Long, c: Long): Long = {
val tmp0 = ((b * a) >> 30)
val tmp1 = ((c + (tmp0 << 3)) >> 3)
val tmp2 = (((tmp1 << 3) + 1342177280L) >> 3)
val tmp3 = ((a * c) >> 30)
val tmp4 = ((tmp2 * tmp3) >> 31)
val tmp5 = ((b * a) >> 30)
val tmp6 = ((c + (tmp5 << 3)) >> 3)
val tmp7 = (((tmp6 << 3) + 1342177280L) >> 3)
val tmp8 = ((b * tmp7) >> 30)
val tmp9 = (((tmp4 << 4) + tmp8) >> 4)
val tmp10 = ((b * c) >> 30)
val tmp11 = ((a + (1342177280L << 1)) >> 1)
val tmp12 = (((tmp10 << 2) + tmp11) >> 2)
val tmp13 = ((tmp9 * tmp12) >> 31)
val tmp14 = ((a * b) >> 30)
val tmp15 = ((c + 1342177280L) >> 1)
val tmp16 = (((tmp14 << 2) + tmp15) >> 2)
val tmp17 = ((2013265920L * tmp16) >> 31)
val tmp18 = ((b * c) >> 30)
val tmp19 = ((a + (1342177280L << 1)) >> 1)
val tmp20 = (((tmp18 << 2) + tmp19) >> 2)
val tmp21 = ((tmp17 * tmp20) >> 31)
val tmp22 = (((tmp13 << 2) + tmp21) >> 2)
return tmp22
}
def multi_poly_9D(a: Double, b: Double, c: Double) = ((((((c + (b * a)) * a) + (10.0 * a)) * c) + ((((c + (b * a)) + 10.0) * b) + ((30.0 * (a * b)) + (30.0 * (c + 10.0))))) * ((c * b) + (a + 20.0)))
def multi_poly_9I(a: Long, b: Long, c: Long): Long = {
val tmp0 = ((b * a) >> 30)
val tmp1 = ((c + (tmp0 << 3)) >> 3)
val tmp2 = ((tmp1 * a) >> 30)
val tmp3 = ((1342177280L * a) >> 30)
val tmp4 = (((tmp2 << 3) + tmp3) >> 3)
val tmp5 = ((tmp4 * c) >> 31)
val tmp6 = ((b * a) >> 30)
val tmp7 = ((c + (tmp6 << 3)) >> 3)
val tmp8 = (((tmp7 << 3) + 1342177280L) >> 3)
val tmp9 = ((tmp8 * b) >> 30)
val tmp10 = ((a * b) >> 30)
val tmp11 = ((2013265920L * tmp10) >> 31)
val tmp12 = ((c + 1342177280L) >> 1)
val tmp13 = ((2013265920L * tmp12) >> 31)
val tmp14 = (((tmp11 << 2) + tmp13) >> 2)
val tmp15 = ((tmp9 + (tmp14 << 2)) >> 2)
val tmp16 = (((tmp5 << 2) + tmp15) >> 2)
val tmp17 = ((c * b) >> 30)
val tmp18 = ((a + (1342177280L << 1)) >> 1)
val tmp19 = (((tmp17 << 2) + tmp18) >> 2)
val tmp20 = ((tmp16 * tmp19) >> 31)
return tmp20
}
def doppler_1D(t: Double, f: Double, u: Double): Double = {
//println("t: " + t + " f:" + f + " u:" + u)
val x1 = ((331.4 + (0.6 * t)) * f)
val x2 = 1/((((331.4 + (0.6 * t)) + u) * ((331.4 + (0.6 * t)) + u)))
val result = x1 * x2
//println("x1 " + x1 + " x2 " + x2 + " result " + result)
result
}
def doppler_1I(t: Long, f: Long, u: Long): Long = {
val tmp0 = ((1288490189 * t) >> 30)
val tmp1 = (((1389992346 << 4) + tmp0) >> 4)
val tmp2 = ((tmp1 * f) >> 30)
val tmp3 = ((tmp2 * 1073741824) >> 30)
val tmp4 = ((1288490189 * t) >> 30)
val tmp5 = (((1389992346 << 4) + tmp4) >> 4)
val tmp6 = (((tmp5 << 2) + u) >> 2)
val tmp7 = ((1288490189 * t) >> 30)
val tmp8 = (((1389992346 << 4) + tmp7) >> 4)
val tmp9 = (((tmp8 << 2) + u) >> 2)
val tmp10 = ((tmp6 * tmp9) >> 31)
//println("t: " + t + " f:" + f + " u:" + u)
//println(tmp10)
val tmp11 = ((tmp3 << 27) / tmp10)
return tmp11 //22
}
def doppler_2D(t: Double, f: Double, u: Double): Double = {
(((331.4 + (0.6 * t)) * f) * 1/(((331.4 + ((0.6 * t) + u)) * (331.4 + ((0.6 * t) + u)))))
}
def doppler_2I(t: Long, f: Long, u: Long): Long = {
val tmp0 = ((1288490189 * t) >> 30)
val tmp1 = (((1389992346 << 4) + tmp0) >> 4)
val tmp2 = ((tmp1 * f) >> 30)
val tmp3 = ((tmp2 * 1073741824) >> 30)
val tmp4 = ((1288490189 * t) >> 30)
val tmp5 = ((tmp4 + (u << 2)) >> 3)
val tmp6 = (((1389992346 << 1) + tmp5) >> 1)
val tmp7 = ((1288490189 * t) >> 30)
val tmp8 = ((tmp7 + (u << 2)) >> 3)
val tmp9 = (((1389992346 << 1) + tmp8) >> 1)
val tmp10 = ((tmp6 * tmp9) >> 31)
val tmp11 = ((tmp3 << 27) / tmp10)
return tmp11 //22
}
def doppler_3D(t: Double, f: Double, u: Double): Double = {
(((331.4 + (t * 0.6)) * f) * (1/(((331.4 + (t * 0.6)) + u)) * 1/(((331.4 + (t * 0.6)) + u))))
}
def doppler_3I(t: Long, f: Long, u: Long): Long = {
val tmp0 = ((t * 1288490189) >> 30)
val tmp1 = (((1389992346 << 4) + tmp0) >> 4)
val tmp2 = ((tmp1 * f) >> 30)
val tmp3 = ((t * 1288490189) >> 30)
val tmp4 = (((1389992346 << 4) + tmp3) >> 4)
val tmp5 = (((tmp4 << 2) + u) >> 2)
val tmp6 = ((1073741824 << 23) / tmp5)
val tmp7 = ((tmp6 * 1073741824) >> 30)
val tmp8 = ((t * 1288490189) >> 30)
val tmp9 = (((1389992346 << 4) + tmp8) >> 4)
val tmp10 = (((tmp9 << 2) + u) >> 2)
val tmp11 = ((tmp7 << 22) / tmp10)
val tmp12 = ((tmp2 * tmp11) >> 16)
return tmp12 //23
}
def doppler_4D(t: Double, f: Double, u: Double): Double = {
((1/(((331.4 + ((0.6 * t) + u)) * (331.4 + (u + (0.6 * t))))) * ((0.6 * t) + 331.4)) * f)
}
def doppler_4I(t: Long, f: Long, u: Long): Long = {
val tmp0 = ((1288490189 * t) >> 30)
val tmp1 = ((tmp0 + (u << 2)) >> 3)
val tmp2 = (((1389992346 << 1) + tmp1) >> 1)
val tmp3 = ((1288490189 * t) >> 30)
val tmp4 = (((u << 2) + tmp3) >> 3)
val tmp5 = (((1389992346 << 1) + tmp4) >> 1)
val tmp6 = ((tmp2 * tmp5) >> 31)
val tmp7 = ((1073741824 << 14) / tmp6)
val tmp8 = ((1288490189 * t) >> 30)
val tmp9 = ((tmp8 + (1389992346 << 4)) >> 4)
val tmp10 = ((tmp7 * tmp9) >> 22)
val tmp11 = ((tmp10 * f) >> 25)
return tmp11 // 22
}
}
|
malyzajko/xfp
|
analysis_tool/src/xfp/simulation/TestExamples.scala
|
Scala
|
bsd-3-clause
| 34,262
|
package arrxml.arrow
import annotation.implicitNotFound
@implicitNotFound(msg = "No instance in scope for ArrowIf[${=>>}].")
trait ArrowIf[=>>[-_, +_]] extends ArrowList[=>>] {
/**
* if lifted to arrows
*/
def ifA[A, B, C](predicate : A =>> B)(a1 : A =>> C)(a2 : A =>> C) : A =>> C
/**
* shortcut: @ ifP p = ifA (isA p) @
*/
def ifP[A, B](predicate : A ⇒ Boolean)(a1 : A =>> B)(a2 : A =>> B) : A =>> B =
ifA(isA(predicate))(a1)(a2)
/**
* negation: @ neg f = ifA f none this @
*/
def neg[A, B](a : A =>> B) : A =>> A =
ifA(a)(none[A, A])(self)
/**
* @ f \\`when\\` g @ : when the predicate g holds, f is applied, else the identity filter this
*/
def when[A, B](a : A =>> A)(predicate : A =>> B) : (A =>> A) =
ifA(predicate)(a)(self)
/**
* shortcut: @ f \\`whenP\\` p = f \\`when\\` (isA p) @
*/
def whenP[A](a : A =>> A)(predicate : A ⇒ Boolean) : (A =>> A) =
ifP(predicate)(a)(self)
/**
* @ f \\`whenNot\\` g @ : when the predicate g does not hold, f is applied, else the identity filter this
*/
def whenNot[A, B](a : A =>> A)(predicate : A =>> B) : (A =>> A) =
ifA(predicate)(self)(a)
/**
* like 'whenP'
*/
def whenNotP[A](a : A =>> A)(predicate : A ⇒ Boolean) : (A =>> A) =
ifP(predicate)(self)(a)
/**
* @ g \\`guards\\` f @ : when the predicate g holds, f is applied, else none
*/
def guards[A, B, C](predicate : A =>> B)(a : A =>> C) : (A =>> C) =
ifA(predicate)(a)(none[A, C])
/**
* like 'whenP'
*/
def guardsP[A, B](predicate : A ⇒ Boolean)(a : A =>> B) : (A =>> B) =
ifP(predicate)(a)(none[A, B])
/**
* shortcut for @ f `guards` this @
*/
def filterA[A, B](predicate : A =>> B) : (A =>> A) =
ifA(predicate)(self)(none[A, A])
/**
* @ f \\`containing\\` g @ : keep only those results from f for which g holds
*
* definition: @ f \\`containing\\` g = f >>> g \\`guards\\` this @
*/
def containing[A, B, C](a1 : A =>> B)(a2 : B =>> C) : (A =>> B) =
>>>(a1, guards(a2)(self))
/**
* @ f \\`notContaining\\` g @ : keep only those results from f for which g does not hold
*
* definition: @ f \\`notContaining\\` g = f >>> ifA g none this @
*/
def notContaining[A, B, C](a1 : A =>> B)(a2 : B =>> C) : (A =>> B) =
>>>(a1, ifA(a2)(none[B, B])(self))
/**
* @ f \\`orElse\\` g @ : directional choice: if f succeeds, the result of f is the result, else g is applied
*/
def orElse[A, B](a1 : A =>> B)(a2 : A =>> B) : (A =>> B) // TODO implementation
/**
*
* generalisation of 'orElse' for multi way branches like in case expressions.
*
* An auxiliary data type 'IfThen' with an infix constructor ':->' is used for writing multi way branches
*
* example: @ choiceA [ p1 :-> e1, p2 :-> e2, this :-> default ] @
*/
def choiceA[A, B, C] : List[IfThen[A =>> B, A =>> C]] ⇒ (A =>> C)
/**
* tag a value with Left or Right, if arrow has success, input is tagged with Left, else with Right
*/
def tagA[A, B](a : A =>> B) : (A =>> Either[A, A]) = {
val left : A ⇒ Either[A, A] = Left.apply[A, A] _
val right : A ⇒ Either[A, A] = Right.apply[A, A] _
ifA(a)(arr(left))(arr(right))
}
/**
* split a list value with an arrow and returns a pair of lists.
* This is the arrow version of 'span'. The arrow is deterministic.
*
* example: @ runLA (spanA (isA (\\/= \\'-\\'))) \\"abc-def\\" @ gives @ [(\\"abc\\",\\"-def\\")] @ as result
*/
def spanA[A](a : A =>> A) : (List[A] =>> (List[A], List[A])) = {
val predicate = >>>(arrL((l : List[A]) ⇒ l.take(1)), a)
val split = combine(arr((l : List[A]) ⇒ l.head), >>>(arr((l : List[A]) ⇒ l.tail), spanA(a)))
val rejoin = arr((tup : (A, (List[A], List[A]))) ⇒ (tup._1 :: tup._2._1, tup._2._2))
val elseCase = arr((l : List[A]) ⇒ (List.empty[A], l))
ifA(predicate)(>>>(split, rejoin))(elseCase)
}
/**
* partition a list of values into a pair of lists
*
* This is the arrow Version of 'Data.List.partition'
*/
def partitionA[A](a : A =>> A) : List[A] =>> (List[A], List[A]) = {
val part = listA(>>>(arrL((as : List[A]) ⇒ as), tagA(a)))
def untag(l : List[Either[A, A]]) = {
val (l1, l2) = l.partition(_.isLeft)
(l1.map(_.fold(((a : A) ⇒ a), ((a : A) ⇒ a))), l2.map(_.fold(((a : A) ⇒ a), ((a : A) ⇒ a))))
}
>>>(part, arr(untag _))
}
}
object ArrowIf {
@inline def apply[F[-_, +_]](implicit ev : ArrowIf[F]) : ArrowIf[F] = ev
}
trait ToArrowIfOps {
// For endoarrows.
implicit class ArrowIfOps0[=>>[-_, +_], A](v : A =>> A) {
final def when[B](predicate : A =>> B)(implicit ev : ArrowIf[=>>]) = ev.when(v)(predicate)
final def whenP(predicate : A ⇒ Boolean)(implicit ev : ArrowIf[=>>]) = ev.whenP(v)(predicate)
final def whenNot[B](predicate : A =>> B)(implicit ev : ArrowIf[=>>]) = ev.whenNot(v)(predicate)
final def whenNotP(predicate : A ⇒ Boolean)(implicit ev : ArrowIf[=>>]) = ev.whenNotP(v)(predicate)
}
implicit class ArrowIfOps1[=>>[-_, +_], A, B](v : A =>> B) {
final def guards[C](a : A =>> C)(implicit ev : ArrowIf[=>>]) = ev.guards(v)(a)
}
}
trait ToArrowIfFuncOps {
implicit class ArrowIfFuncOps[=>>[-_, +_], A](v : A ⇒ Boolean) {
final def guardsP[B](a : A =>> B)(implicit ev : ArrowIf[=>>]) = ev.guardsP(v)(a)
}
}
class IfThen[A, B]
|
ChrisNeveu/ArrXml
|
src/main/scala/arrxml/arrow/ArrowIf.scala
|
Scala
|
bsd-3-clause
| 5,514
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.stream.table
import org.apache.flink.api.common.time.Time
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.planner.runtime.utils.StreamingWithStateTestBase.StateBackendMode
import org.apache.flink.table.planner.runtime.utils.TestData._
import org.apache.flink.table.planner.runtime.utils.TimeTestUtil.TimestampAndWatermarkWithOffset
import org.apache.flink.table.planner.runtime.utils.{StreamingWithStateTestBase, TestingAppendSink}
import org.apache.flink.table.planner.utils.Top3
import org.apache.flink.types.Row
import org.junit.Assert._
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import java.math.BigDecimal
@RunWith(classOf[Parameterized])
class GroupWindowTableAggregateITCase(mode: StateBackendMode)
extends StreamingWithStateTestBase(mode) {
val data = List(
(1L, 1, "Hi"),
(2L, 2, "Hello"),
(4L, 2, "Hello"),
(8L, 3, "Hello world"),
(16L, 3, "Hello world"))
val data2 = List(
(1L, 1, 1d, 1f, new BigDecimal("1"), "Hi"),
(2L, 2, 2d, 2f, new BigDecimal("2"), "Hallo"),
(3L, 2, 2d, 2f, new BigDecimal("2"), "Hello"),
(4L, 5, 5d, 5f, new BigDecimal("5"), "Hello"),
(7L, 3, 3d, 3f, new BigDecimal("3"), "Hello"),
(8L, 3, 3d, 3f, new BigDecimal("3"), "Hello world"),
(16L, 4, 4d, 4f, new BigDecimal("4"), "Hello world"),
(32L, 4, 4d, 4f, new BigDecimal("4"), null.asInstanceOf[String]))
@Test
def testProcessingTimeSlidingGroupWindowOverCount(): Unit = {
tEnv.getConfig.setIdleStateRetentionTime(Time.hours(1), Time.hours(2))
val stream = failingDataSource(tupleData3)
val table = stream.toTable(tEnv, 'int, 'long, 'string, 'proctime.proctime)
val top3 = new Top3
val windowedTable = table
.window(Slide over 4.rows every 2.rows on 'proctime as 'w)
.groupBy('w, 'long)
.flatAggregate(call(top3, 'int) as ('x, 'y))
.select('long, 'x, 'y)
val sink = new TestingAppendSink
windowedTable.toAppendStream[Row].addSink(sink)
env.execute()
val expected = Seq("4,8,8", "4,9,9", "4,10,10", "5,12,12", "5,13,13", "5,14,14",
"6,17,17", "6,18,18", "6,19,19", "6,19,19", "6,20,20", "6,21,21")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testEventTimeSessionGroupWindowOverTime(): Unit = {
//To verify the "merge" functionality, we create this test with the following characteristics:
// 1. set the Parallelism to 1, and have the test data out of order
// 2. create a waterMark with 10ms offset to delay the window emission by 10ms
val sessionWindowTestData = List(
(1L, 1, "Hello"),
(2L, 2, "Hello"),
(8L, 8, "Hello"),
(9L, 9, "Hello World"),
(4L, 4, "Hello"),
(16L, 16, "Hello"))
val top3 = new Top3
val stream = failingDataSource(sessionWindowTestData)
.assignTimestampsAndWatermarks(new TimestampAndWatermarkWithOffset[(Long, Int, String)](10L))
val table = stream.toTable(tEnv, 'long, 'int, 'string, 'rowtime.rowtime)
val windowedTable = table
.window(Session withGap 5.milli on 'rowtime as 'w)
.groupBy('w, 'string)
.flatAggregate(top3('int))
.select('string, 'f0, 'f1)
val sink = new TestingAppendSink
windowedTable.toAppendStream[Row].addSink(sink)
env.execute()
val expected = Seq("Hello,2,2", "Hello,4,4", "Hello,8,8", "Hello World,9,9", "Hello,16,16")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testAllProcessingTimeTumblingGroupWindowOverCount(): Unit = {
tEnv.getConfig.setIdleStateRetentionTime(Time.hours(1), Time.hours(2))
val stream = failingDataSource(tupleData3)
val table = stream.toTable(tEnv, 'int, 'long, 'string, 'proctime.proctime)
val top3 = new Top3
val windowedTable = table
.window(Tumble over 7.rows on 'proctime as 'w)
.groupBy('w)
.flatAggregate(top3('int))
.select('f0, 'f1)
val sink = new TestingAppendSink
windowedTable.toAppendStream[Row].addSink(sink)
env.execute()
val expected = Seq("5,5", "6,6", "7,7", "12,12", "13,13", "14,14", "19,19", "20,20", "21,21")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testEventTimeTumblingWindow(): Unit = {
val stream = failingDataSource(tupleData3)
.assignTimestampsAndWatermarks(new TimestampAndWatermarkWithOffset[(Int, Long, String)](0L))
val table = stream.toTable(tEnv, 'int, 'long, 'string, 'rowtime.rowtime)
val top3 = new Top3
val windowedTable = table
.window(Tumble over 10.milli on 'rowtime as 'w)
.groupBy('w, 'long)
.flatAggregate(top3('int) as ('x, 'y))
.select('w.start, 'w.end, 'long, 'x, 'y + 1)
val sink = new TestingAppendSink
windowedTable.toAppendStream[Row].addSink(sink)
env.execute()
val expected = Seq(
"1970-01-01T00:00,1970-01-01T00:00:00.010,1,1,2",
"1970-01-01T00:00,1970-01-01T00:00:00.010,2,2,3",
"1970-01-01T00:00,1970-01-01T00:00:00.010,2,3,4",
"1970-01-01T00:00,1970-01-01T00:00:00.010,3,4,5",
"1970-01-01T00:00,1970-01-01T00:00:00.010,3,5,6",
"1970-01-01T00:00,1970-01-01T00:00:00.010,3,6,7",
"1970-01-01T00:00,1970-01-01T00:00:00.010,4,7,8",
"1970-01-01T00:00,1970-01-01T00:00:00.010,4,8,9",
"1970-01-01T00:00,1970-01-01T00:00:00.010,4,9,10",
"1970-01-01T00:00:00.010,1970-01-01T00:00:00.020,4,10,11",
"1970-01-01T00:00:00.010,1970-01-01T00:00:00.020,5,13,14",
"1970-01-01T00:00:00.010,1970-01-01T00:00:00.020,5,14,15",
"1970-01-01T00:00:00.010,1970-01-01T00:00:00.020,5,15,16",
"1970-01-01T00:00:00.010,1970-01-01T00:00:00.020,6,17,18",
"1970-01-01T00:00:00.010,1970-01-01T00:00:00.020,6,18,19",
"1970-01-01T00:00:00.010,1970-01-01T00:00:00.020,6,19,20",
"1970-01-01T00:00:00.020,1970-01-01T00:00:00.030,6,21,22",
"1970-01-01T00:00:00.020,1970-01-01T00:00:00.030,6,20,21")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testGroupWindowWithoutKeyInProjection(): Unit = {
val data = List(
(1L, 1, "Hi", 1, 1),
(2L, 2, "Hello", 2, 2),
(4L, 2, "Hello", 2, 2),
(8L, 3, "Hello world", 3, 3),
(16L, 3, "Hello world", 3, 3))
val stream = failingDataSource(data)
val table = stream.toTable(tEnv, 'long, 'int, 'string, 'int2, 'int3, 'proctime.proctime)
val top3 = new Top3
val windowedTable = table
.window(Slide over 2.rows every 1.rows on 'proctime as 'w)
.groupBy('w, 'int2, 'int3, 'string)
.flatAggregate(top3('int))
.select('f0, 'f1)
val sink = new TestingAppendSink
windowedTable.toAppendStream[Row].addSink(sink)
env.execute()
val expected = Seq("2,2", "2,2", "3,3", "3,3")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
// ----------------------------------------------------------------------------------------------
// Sliding windows
// ----------------------------------------------------------------------------------------------
@Test
def testAllEventTimeSlidingGroupWindowOverTime(): Unit = {
// please keep this test in sync with the bounded variant
val stream = failingDataSource(data2)
.assignTimestampsAndWatermarks(
new TimestampAndWatermarkWithOffset[(Long, Int, Double, Float, BigDecimal, String)](0L))
val table = stream.toTable(tEnv, 'long.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val top3 = new Top3
val windowedTable = table
.window(Slide over 5.milli every 2.milli on 'long as 'w)
.groupBy('w)
.flatAggregate(top3('int))
.select('f0, 'f1, 'w.start, 'w.end, 'w.rowtime)
val sink = new TestingAppendSink
windowedTable.toAppendStream[Row].addSink(sink)
env.execute()
val expected = Seq(
"1,1,1969-12-31T23:59:59.998,1970-01-01T00:00:00.003,1970-01-01T00:00:00.002",
"2,2,1969-12-31T23:59:59.998,1970-01-01T00:00:00.003,1970-01-01T00:00:00.002",
"2,2,1970-01-01T00:00,1970-01-01T00:00:00.005,1970-01-01T00:00:00.004",
"5,5,1970-01-01T00:00,1970-01-01T00:00:00.005,1970-01-01T00:00:00.004",
"2,2,1970-01-01T00:00,1970-01-01T00:00:00.005,1970-01-01T00:00:00.004",
"2,2,1970-01-01T00:00:00.002,1970-01-01T00:00:00.007,1970-01-01T00:00:00.006",
"2,2,1970-01-01T00:00:00.002,1970-01-01T00:00:00.007,1970-01-01T00:00:00.006",
"5,5,1970-01-01T00:00:00.002,1970-01-01T00:00:00.007,1970-01-01T00:00:00.006",
"3,3,1970-01-01T00:00:00.004,1970-01-01T00:00:00.009,1970-01-01T00:00:00.008",
"3,3,1970-01-01T00:00:00.004,1970-01-01T00:00:00.009,1970-01-01T00:00:00.008",
"5,5,1970-01-01T00:00:00.004,1970-01-01T00:00:00.009,1970-01-01T00:00:00.008",
"3,3,1970-01-01T00:00:00.006,1970-01-01T00:00:00.011,1970-01-01T00:00:00.010",
"3,3,1970-01-01T00:00:00.006,1970-01-01T00:00:00.011,1970-01-01T00:00:00.010",
"3,3,1970-01-01T00:00:00.008,1970-01-01T00:00:00.013,1970-01-01T00:00:00.012",
"4,4,1970-01-01T00:00:00.012,1970-01-01T00:00:00.017,1970-01-01T00:00:00.016",
"4,4,1970-01-01T00:00:00.014,1970-01-01T00:00:00.019,1970-01-01T00:00:00.018",
"4,4,1970-01-01T00:00:00.016,1970-01-01T00:00:00.021,1970-01-01T00:00:00.020",
"4,4,1970-01-01T00:00:00.028,1970-01-01T00:00:00.033,1970-01-01T00:00:00.032",
"4,4,1970-01-01T00:00:00.030,1970-01-01T00:00:00.035,1970-01-01T00:00:00.034",
"4,4,1970-01-01T00:00:00.032,1970-01-01T00:00:00.037,1970-01-01T00:00:00.036")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testEventTimeSlidingGroupWindowOverTimeOverlappingSplitPane(): Unit = {
// please keep this test in sync with the bounded variant
val stream = failingDataSource(data2)
.assignTimestampsAndWatermarks(
new TimestampAndWatermarkWithOffset[(Long, Int, Double, Float, BigDecimal, String)](0L))
val table = stream.toTable(tEnv, 'long.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val top3 = new Top3
val windowedTable = table
.window(Slide over 5.milli every 4.milli on 'long as 'w)
.groupBy('w, 'string)
.flatAggregate(top3('int))
.select('string, 'f0, 'f1, 'w.start, 'w.end)
val sink = new TestingAppendSink
windowedTable.toAppendStream[Row].addSink(sink)
env.execute()
val expected = Seq(
"Hello,2,2,1970-01-01T00:00,1970-01-01T00:00:00.005",
"Hello,5,5,1970-01-01T00:00,1970-01-01T00:00:00.005",
"Hallo,2,2,1970-01-01T00:00,1970-01-01T00:00:00.005",
"Hello world,3,3,1970-01-01T00:00:00.004,1970-01-01T00:00:00.009",
"Hello world,3,3,1970-01-01T00:00:00.008,1970-01-01T00:00:00.013",
"Hello,3,3,1970-01-01T00:00:00.004,1970-01-01T00:00:00.009",
"Hi,1,1,1970-01-01T00:00,1970-01-01T00:00:00.005",
"Hello,5,5,1970-01-01T00:00:00.004,1970-01-01T00:00:00.009",
"Hello world,4,4,1970-01-01T00:00:00.012,1970-01-01T00:00:00.017",
"null,4,4,1970-01-01T00:00:00.028,1970-01-01T00:00:00.033",
"Hello world,4,4,1970-01-01T00:00:00.016,1970-01-01T00:00:00.021",
"null,4,4,1970-01-01T00:00:00.032,1970-01-01T00:00:00.037")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testEventTimeSlidingGroupWindowOverTimeNonOverlappingSplitPane(): Unit = {
// please keep this test in sync with the bounded variant
val stream = failingDataSource(data2)
.assignTimestampsAndWatermarks(
new TimestampAndWatermarkWithOffset[(Long, Int, Double, Float, BigDecimal, String)](0L))
val table = stream.toTable(tEnv, 'long.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val top3 = new Top3
val windowedTable = table
.window(Slide over 3.milli every 10.milli on 'long as 'w)
.groupBy('w, 'string)
.flatAggregate(top3('int))
.select('string, 'f0, 'f1, 'w.start, 'w.end)
val sink = new TestingAppendSink
windowedTable.toAppendStream[Row].addSink(sink)
env.execute()
val expected = Seq(
"null,4,4,1970-01-01T00:00:00.030,1970-01-01T00:00:00.033",
"Hallo,2,2,1970-01-01T00:00,1970-01-01T00:00:00.003",
"Hi,1,1,1970-01-01T00:00,1970-01-01T00:00:00.003")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testEventTimeGroupWindowWithoutExplicitTimeField(): Unit = {
val stream = failingDataSource(data2)
.assignTimestampsAndWatermarks(
new TimestampAndWatermarkWithOffset[(Long, Int, Double, Float, BigDecimal, String)](0L))
.map(t => (t._2, t._6))
val table = stream.toTable(tEnv, 'int, 'string, 'rowtime.rowtime)
val top3 = new Top3
val windowedTable = table
.window(Slide over 3.milli every 10.milli on 'rowtime as 'w)
.groupBy('w, 'string)
.flatAggregate(top3('int))
.select('string, 'f0, 'f1, 'w.start, 'w.end)
val sink = new TestingAppendSink
windowedTable.toAppendStream[Row].addSink(sink)
env.execute()
val expected = Seq(
"Hallo,2,2,1970-01-01T00:00,1970-01-01T00:00:00.003",
"Hi,1,1,1970-01-01T00:00,1970-01-01T00:00:00.003",
"null,4,4,1970-01-01T00:00:00.030,1970-01-01T00:00:00.033")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
}
|
apache/flink
|
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/runtime/stream/table/GroupWindowTableAggregateITCase.scala
|
Scala
|
apache-2.0
| 14,170
|
/*
* Copyright 2013 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.storehaus.algebra.reporting
import com.twitter.util.{ Await, Future }
import com.twitter.storehaus.{ReadableStore, ReadableStoreProxy}
import org.scalacheck.Properties
import org.scalacheck.Prop._
object ReportingReadableStoreProperties extends Properties("ReportingReadableStore") {
/**
* get returns none when not in either store
*/
class DummyReporter[K, V](val self: ReadableStore[K, V])
extends ReadableStoreProxy[K, V] with ReadableStoreReporter[ReadableStore[K, V], K, V] {
def traceMultiGet[K1 <: K](
ks: Set[K1], request: Map[K1, Future[Option[V]]]): Map[K1, Future[Unit]] =
request.mapValues(_.unit)
def traceGet(k: K, request: Future[Option[V]]): Future[Unit] = request.unit
}
def buildStoreRunQueries[K, V](
mA: Map[K, V], others: Set[K], builder: (ReadableStore[K, V]) => ReadableStore[K, V]
): Seq[(Option[V], Option[V])] = {
val baseStore = ReadableStore.fromMap(mA)
val wrappedStore = builder(baseStore)
val expanded: Set[K] = mA.keySet ++ others
// We use call to list, or it keeps the results of the map as a set and we loose data
expanded.toList.map{k: K => (mA.get(k), Await.result(wrappedStore.get(k)))}
}
def buildStoreRunMultiGetQueries[K, V](
mA: Map[K, V], others: Set[K], builder: (ReadableStore[K, V]) => ReadableStore[K, V]
): (Map[K, Option[V]], Map[K, Option[V]]) = {
val baseStore = ReadableStore.fromMap(mA)
val wrappedStore = builder(baseStore)
val expanded: Set[K] = mA.keySet ++ others
// We use call to list, or it keeps the results of the map as a set and we loose data
(expanded.map{k => (k, mA.get(k))}.toMap,
wrappedStore.multiGet(expanded).map { case (k, futureV) => (k, Await.result(futureV)) })
}
property("Stats store matches raw get for all queries") =
forAll { (mA: Map[Int, String], others: Set[Int]) =>
def reporter(store: ReadableStore[Int, String]): DummyReporter[Int, String] =
new DummyReporter[Int, String](store)
val queryResults = buildStoreRunQueries(mA, others, reporter)
queryResults.forall{case (a, b) => a == b}
}
property("Present/Absent count matches") = forAll { (mA: Map[Int, String], others: Set[Int]) =>
var presentCount = 0
var absentCount = 0
def reporter(store: ReadableStore[Int, String]): DummyReporter[Int, String] =
new DummyReporter[Int, String](store) {
override def traceGet(k: Int, request: Future[Option[String]]) = {
request.map {
case Some(_) => presentCount += 1
case None => absentCount += 1
}
}
}
val queryResults = buildStoreRunQueries(mA, others, reporter)
val wrappedResults = queryResults.map(_._2)
wrappedResults.collect{case Some(b) => b}.size == presentCount &&
wrappedResults.collect{case None => 1}.size == absentCount
}
property("Stats store matches raw get for multiget all queries") =
forAll { (mA: Map[Int, String], others: Set[Int]) =>
def reporter(store: ReadableStore[Int, String]): DummyReporter[Int, String] =
new DummyReporter[Int, String](store)
val (mapRes, storeResults) = buildStoreRunMultiGetQueries(mA, others, reporter)
mapRes.size == storeResults.size &&
mapRes.keySet.forall(k => mapRes.get(k) == storeResults.get(k))
}
property("Present/Absent count matches in multiget") =
forAll { (mA: Map[Int, String], others: Set[Int]) =>
var presentCount = 0
var absentCount = 0
def reporter(store: ReadableStore[Int, String]): DummyReporter[Int, String] =
new DummyReporter[Int, String](store) {
override def traceMultiGet[K1 <: Int](
ks: Set[K1], request: Map[K1, Future[Option[String]]]) = {
request.mapValues{fOptV =>
fOptV.map {
case Some(_) => presentCount += 1
case None => absentCount += 1
}
}
}
}
val (_, storeResults) = buildStoreRunMultiGetQueries(mA, others, reporter)
storeResults.values.collect{case Some(b) => b}.size == presentCount &&
storeResults.values.collect{case None => 1}.size == absentCount
}
}
|
twitter/storehaus
|
storehaus-algebra/src/test/scala/com/twitter/storehaus/algebra/reporting/ReportingReadableStoreProperties.scala
|
Scala
|
apache-2.0
| 4,888
|
package im.actor.server.persist.auth
import java.time.{ ZoneOffset, LocalDateTime }
import scala.concurrent.ExecutionContext
import im.actor.server.db.ActorPostgresDriver.api._
import im.actor.server.model.{ AuthTransaction, AuthTransactionBase }
private[auth] abstract class AuthTransactionRepoBase[T](tag: Tag, tname: String) extends Table[T](tag, tname) {
def transactionHash = column[String]("transaction_hash", O.PrimaryKey)
def appId = column[Int]("app_id")
def apiKey = column[String]("api_key")
def deviceHash = column[Array[Byte]]("device_hash")
def deviceTitle = column[String]("device_title")
def accessSalt = column[String]("access_salt")
def deviceInfo = column[Array[Byte]]("device_info")
def isChecked = column[Boolean]("is_checked")
def deletedAt = column[Option[LocalDateTime]]("deleted_at")
}
final class AuthTransactionTable(tag: Tag) extends AuthTransactionRepoBase[AuthTransaction](tag, "auth_transactions") {
def * = (
transactionHash,
appId,
apiKey,
deviceHash,
deviceTitle,
accessSalt,
deviceInfo,
isChecked,
deletedAt
) <> (AuthTransaction.tupled, AuthTransaction.unapply)
}
object AuthTransactionRepo {
val transactions = TableQuery[AuthTransactionTable]
val active = transactions.filter(_.deletedAt.isEmpty)
def find(transactionHash: String) =
active.filter(_.transactionHash === transactionHash).result.headOption
def findChildren(transactionHash: String)(implicit ec: ExecutionContext): DBIO[Option[AuthTransactionBase]] =
for {
email ← AuthEmailTransactionRepo.find(transactionHash)
phone ← AuthPhoneTransactionRepo.find(transactionHash)
username ← AuthUsernameTransactionRepo.find(transactionHash)
} yield (email, phone, username) match {
case (Some(e), None, None) ⇒ email
case (None, Some(p), None) ⇒ phone
case (None, None, Some(p)) ⇒ username
case _ ⇒ None
}
def delete(transactionHash: String) =
transactions.filter(_.transactionHash === transactionHash).map(_.deletedAt).update(Some(LocalDateTime.now(ZoneOffset.UTC)))
def updateSetChecked(transactionHash: String) =
transactions.filter(_.transactionHash === transactionHash).map(_.isChecked).update(true)
}
|
EaglesoftZJ/actor-platform
|
actor-server/actor-persist/src/main/scala/im/actor/server/persist/auth/AuthTransactionRepo.scala
|
Scala
|
agpl-3.0
| 2,277
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.types
import java.util.Locale
import scala.annotation.tailrec
import scala.reflect.runtime.universe.typeTag
import org.apache.spark.annotation.Stable
import org.apache.spark.sql.catalyst.expressions.{Expression, Literal}
import org.apache.spark.sql.errors.QueryCompilationErrors
import org.apache.spark.sql.internal.SQLConf
/**
* The data type representing `java.math.BigDecimal` values.
* A Decimal that must have fixed precision (the maximum number of digits) and scale (the number
* of digits on right side of dot).
*
* The precision can be up to 38, scale can also be up to 38 (less or equal to precision).
*
* The default precision and scale is (10, 0).
*
* Please use `DataTypes.createDecimalType()` to create a specific instance.
*
* @since 1.3.0
*/
@Stable
case class DecimalType(precision: Int, scale: Int) extends FractionalType {
DecimalType.checkNegativeScale(scale)
if (scale > precision) {
throw QueryCompilationErrors.decimalCannotGreaterThanPrecisionError(scale, precision)
}
if (precision > DecimalType.MAX_PRECISION) {
throw QueryCompilationErrors.decimalOnlySupportPrecisionUptoError(
DecimalType.simpleString, DecimalType.MAX_PRECISION)
}
// default constructor for Java
def this(precision: Int) = this(precision, 0)
def this() = this(10)
private[sql] type InternalType = Decimal
@transient private[sql] lazy val tag = typeTag[InternalType]
private[sql] val numeric = Decimal.DecimalIsFractional
private[sql] val fractional = Decimal.DecimalIsFractional
private[sql] val ordering = Decimal.DecimalIsFractional
private[sql] val asIntegral = Decimal.DecimalAsIfIntegral
override private[sql] def exactNumeric = DecimalExactNumeric
override def typeName: String = s"decimal($precision,$scale)"
override def toString: String = s"DecimalType($precision,$scale)"
override def sql: String = typeName.toUpperCase(Locale.ROOT)
/**
* Returns whether this DecimalType is wider than `other`. If yes, it means `other`
* can be casted into `this` safely without losing any precision or range.
*/
private[sql] def isWiderThan(other: DataType): Boolean = isWiderThanInternal(other)
@tailrec
private def isWiderThanInternal(other: DataType): Boolean = other match {
case dt: DecimalType =>
(precision - scale) >= (dt.precision - dt.scale) && scale >= dt.scale
case dt: IntegralType =>
isWiderThanInternal(DecimalType.forType(dt))
case _ => false
}
/**
* Returns whether this DecimalType is tighter than `other`. If yes, it means `this`
* can be casted into `other` safely without losing any precision or range.
*/
private[sql] def isTighterThan(other: DataType): Boolean = isTighterThanInternal(other)
@tailrec
private def isTighterThanInternal(other: DataType): Boolean = other match {
case dt: DecimalType =>
(precision - scale) <= (dt.precision - dt.scale) && scale <= dt.scale
case dt: IntegralType =>
isTighterThanInternal(DecimalType.forType(dt))
case _ => false
}
/**
* The default size of a value of the DecimalType is 8 bytes when precision is at most 18,
* and 16 bytes otherwise.
*/
override def defaultSize: Int = if (precision <= Decimal.MAX_LONG_DIGITS) 8 else 16
override def simpleString: String = s"decimal($precision,$scale)"
private[spark] override def asNullable: DecimalType = this
}
/**
* Extra factory methods and pattern matchers for Decimals.
*
* @since 1.3.0
*/
@Stable
object DecimalType extends AbstractDataType {
import scala.math.min
val MAX_PRECISION = 38
val MAX_SCALE = 38
val SYSTEM_DEFAULT: DecimalType = DecimalType(MAX_PRECISION, 18)
val USER_DEFAULT: DecimalType = DecimalType(10, 0)
val MINIMUM_ADJUSTED_SCALE = 6
// The decimal types compatible with other numeric types
private[sql] val BooleanDecimal = DecimalType(1, 0)
private[sql] val ByteDecimal = DecimalType(3, 0)
private[sql] val ShortDecimal = DecimalType(5, 0)
private[sql] val IntDecimal = DecimalType(10, 0)
private[sql] val LongDecimal = DecimalType(20, 0)
private[sql] val FloatDecimal = DecimalType(14, 7)
private[sql] val DoubleDecimal = DecimalType(30, 15)
private[sql] val BigIntDecimal = DecimalType(38, 0)
private[sql] def forType(dataType: DataType): DecimalType = dataType match {
case ByteType => ByteDecimal
case ShortType => ShortDecimal
case IntegerType => IntDecimal
case LongType => LongDecimal
case FloatType => FloatDecimal
case DoubleType => DoubleDecimal
}
private[sql] def fromLiteral(literal: Literal): DecimalType = literal.value match {
case v: Short => fromDecimal(Decimal(BigDecimal(v)))
case v: Int => fromDecimal(Decimal(BigDecimal(v)))
case v: Long => fromDecimal(Decimal(BigDecimal(v)))
case _ => forType(literal.dataType)
}
private[sql] def fromDecimal(d: Decimal): DecimalType = DecimalType(d.precision, d.scale)
private[sql] def bounded(precision: Int, scale: Int): DecimalType = {
DecimalType(min(precision, MAX_PRECISION), min(scale, MAX_SCALE))
}
private[sql] def checkNegativeScale(scale: Int): Unit = {
if (scale < 0 && !SQLConf.get.allowNegativeScaleOfDecimalEnabled) {
throw QueryCompilationErrors.negativeScaleNotAllowedError(scale)
}
}
/**
* Scale adjustment implementation is based on Hive's one, which is itself inspired to
* SQLServer's one. In particular, when a result precision is greater than
* {@link #MAX_PRECISION}, the corresponding scale is reduced to prevent the integral part of a
* result from being truncated.
*
* This method is used only when `spark.sql.decimalOperations.allowPrecisionLoss` is set to true.
*/
private[sql] def adjustPrecisionScale(precision: Int, scale: Int): DecimalType = {
// Assumptions:
checkNegativeScale(scale)
assert(precision >= scale)
if (precision <= MAX_PRECISION) {
// Adjustment only needed when we exceed max precision
DecimalType(precision, scale)
} else if (scale < 0) {
// Decimal can have negative scale (SPARK-24468). In this case, we cannot allow a precision
// loss since we would cause a loss of digits in the integer part.
// In this case, we are likely to meet an overflow.
DecimalType(MAX_PRECISION, scale)
} else {
// Precision/scale exceed maximum precision. Result must be adjusted to MAX_PRECISION.
val intDigits = precision - scale
// If original scale is less than MINIMUM_ADJUSTED_SCALE, use original scale value; otherwise
// preserve at least MINIMUM_ADJUSTED_SCALE fractional digits
val minScaleValue = Math.min(scale, MINIMUM_ADJUSTED_SCALE)
// The resulting scale is the maximum between what is available without causing a loss of
// digits for the integer part of the decimal and the minimum guaranteed scale, which is
// computed above
val adjustedScale = Math.max(MAX_PRECISION - intDigits, minScaleValue)
DecimalType(MAX_PRECISION, adjustedScale)
}
}
override private[sql] def defaultConcreteType: DataType = SYSTEM_DEFAULT
override private[sql] def acceptsType(other: DataType): Boolean = {
other.isInstanceOf[DecimalType]
}
override private[sql] def simpleString: String = "decimal"
private[sql] object Fixed {
def unapply(t: DecimalType): Option[(Int, Int)] = Some((t.precision, t.scale))
}
private[sql] object Expression {
def unapply(e: Expression): Option[(Int, Int)] = e.dataType match {
case t: DecimalType => Some((t.precision, t.scale))
case _ => None
}
}
/**
* Returns if dt is a DecimalType that fits inside an int
*/
def is32BitDecimalType(dt: DataType): Boolean = {
dt match {
case t: DecimalType =>
t.precision <= Decimal.MAX_INT_DIGITS
case _ => false
}
}
/**
* Returns if dt is a DecimalType that fits inside a long
*/
def is64BitDecimalType(dt: DataType): Boolean = {
dt match {
case t: DecimalType =>
t.precision <= Decimal.MAX_LONG_DIGITS
case _ => false
}
}
/**
* Returns if dt is a DecimalType that doesn't fit inside a long
*/
def isByteArrayDecimalType(dt: DataType): Boolean = {
dt match {
case t: DecimalType =>
t.precision > Decimal.MAX_LONG_DIGITS
case _ => false
}
}
def unapply(t: DataType): Boolean = t.isInstanceOf[DecimalType]
def unapply(e: Expression): Boolean = e.dataType.isInstanceOf[DecimalType]
}
|
ueshin/apache-spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/types/DecimalType.scala
|
Scala
|
apache-2.0
| 9,328
|
/*
* This file is part of EasyForger which is released under GPLv3 License.
* See file LICENSE.txt or go to http://www.gnu.org/licenses/gpl-3.0.en.html for full license details.
*/
package com.easyforger.recipes
import net.minecraft.block.Block
import net.minecraft.enchantment.Enchantment
import net.minecraft.init.{Blocks, Items}
import net.minecraft.item.{EnumDyeColor, Item, ItemStack}
trait RecipeOps extends RecipeSourceOps with CraftingSourceOps {
implicit class RecipeIntOps(quantity: Int) {
def *(item: Item): ItemStack = new ItemStack(item, quantity)
def *(block: Block): ItemStack = new ItemStack(block, quantity)
}
implicit def recipeToSmelting(recipe: Recipe): SmeltingRecipe = SmeltingRecipe(recipe, 1)
implicit def recipeToCrafting(recipe: Recipe): CraftingRecipe = CraftingRecipe(Set(recipe.source), None, recipe.result)
def enchanted(itemStack: ItemStack, enchantment: Enchantment, level: Int): ItemStack = {
itemStack.addEnchantment(enchantment, level)
itemStack
}
def smelting(smelts: SmeltingRecipe*): Unit = Smelting.smelting(smelts: _*)
def crafting(crafts: CraftingRecipe*): Unit = Crafting.crafting(crafts: _*)
}
object RecipeOps {
val errorShort = 'E
val blockShorts = Map(
Blocks.BED -> 'b,
Blocks.CAKE -> 'c,
Blocks.BREWING_STAND -> 'b,
Blocks.REEDS -> 'r,
Blocks.ACACIA_DOOR -> 'a,
Blocks.JUNGLE_DOOR -> 'j,
Blocks.OAK_DOOR -> 'o,
Blocks.DARK_OAK_DOOR -> 'd,
Blocks.BIRCH_DOOR -> 'b,
Blocks.SPRUCE_DOOR -> 's
)
def shortForItem(item: Item): Symbol =
new ItemStack(item).getDisplayName.toLowerCase.charAt(0).symbol
def shortForSpecialItem(item: Item, meta: Int): Symbol =
if (item == Items.DYE)
EnumDyeColor.byMetadata(meta).getName.toLowerCase.charAt(0).symbol
else
errorShort
def shortForBlock(block: Block): Symbol =
Item.getItemFromBlock(block) match {
case Items.AIR => blockShorts.getOrElse(block, errorShort)
case item: Any => shortForItem(item)
}
def calcMCParamsArray(craftRecipe: CraftingRecipe): Array[Object] = {
val params = craftRecipe.shape
.map(_.trim.replace(" ", "").replace('.', ' ').split("\\n"))
.getOrElse(Array.empty)
// turn into a list to avoid removing duplicated acronyms, which would end up hiding errors
val acronyms = craftRecipe.sources.toList.flatMap { recipeSource =>
Seq(new Character(recipeSource.acronym.flatMap(_.name.headOption).getOrElse('E')), recipeSource.itemStack)
}
params ++ acronyms
}
implicit class SymbolOps(char: Char) {
val symbol = Symbol(char.toString)
}
}
|
easyforger/easyforger
|
src/main/scala/com/easyforger/recipes/RecipeOps.scala
|
Scala
|
gpl-3.0
| 2,632
|
package jp.opap.material.facade
import jp.opap.material.facade.RepositoryDataEventEmitter.{Progress, ProgressListener}
import scala.beans.BeanProperty
import scala.collection.mutable
class RepositoryDataEventEmitter() {
var _isRunning: Boolean = false
def setRunning(value: Boolean): Unit = synchronized {
_isRunning = value
}
def getRunning: Boolean = synchronized {
_isRunning
}
protected val listeners: mutable.Set[ProgressListener] = mutable.Set()
def subscribe(listener: ProgressListener): Unit = synchronized {
if (_isRunning)
this.listeners += listener
}
def unsubscribe(listener: ProgressListener): Unit = synchronized {
this.listeners.remove(listener)
}
def publish(progress: Progress): Unit = synchronized {
this.listeners.foreach(listener => listener.onUpdate(progress))
}
def finish(): Unit = synchronized {
this.listeners.foreach(listener => listener.onFinish())
this.setRunning(false)
}
}
object RepositoryDataEventEmitter {
trait ProgressListener {
def onUpdate(progress: Progress): Unit
def onFinish(): Unit
}
case class Progress(@BeanProperty current: Int, @BeanProperty max: Int, @BeanProperty processing: String, @BeanProperty name: String)
}
|
opap-jp/material-explorer
|
rest/src/main/scala/jp/opap/material/facade/RepositoryDataEventEmitter.scala
|
Scala
|
mit
| 1,248
|
package me.reminisce.gameboard.questions
import akka.actor.Props
import me.reminisce.database.MongoCollections
import me.reminisce.database.MongoDBEntities.FBPage
import me.reminisce.database.MongoDBFormats._
import me.reminisce.gameboard.board.GameboardEntities._
import me.reminisce.gameboard.questions.QuestionGenerator._
import reactivemongo.api.DefaultDB
import reactivemongo.api.collections.bson.BSONCollection
import reactivemongo.bson.BSONDocument
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Random, Success}
/**
* Factory for [[me.reminisce.gameboard.questions.WhichPageDidYouLike]]
*/
object WhichPageDidYouLike {
/**
* Creates a WhichPageDidYouLike question generator
*
* @param database database from which to take the data
* @return props for the created actor
*/
def props(database: DefaultDB): Props =
Props(new WhichPageDidYouLike(database))
}
/**
* WhichPageDidYouLike question generator
*
* @param db database from which to take the data
*/
class WhichPageDidYouLike(db: DefaultDB) extends QuestionGenerator {
/**
* Entry point for this actor, handles the CreateQuestionWithMultipleItems(userId, itemIds) message by getting the
* necessary items from the database and creating a question. If some items are non conform to what is expected,
* missing or there is an error while contacting the database, the error is reported to the client.
*
* @return Nothing
*/
def receive: Receive = {
case CreateQuestion(userId, itemId, maybeDifficulty) =>
val client = sender()
val pagesCollection = db[BSONCollection](MongoCollections.fbPages)
val likesCollection = db[BSONCollection](MongoCollections.fbPageLikes)
fetchPage(pagesCollection, itemId, client) {
case Some(page) =>
fetchLikedPages(likesCollection, userId, client) {
list =>
val ids = list.map(fbPageLike => fbPageLike.pageId)
val queryNotLiked = BSONDocument(
"pageId" -> BSONDocument("$nin" -> ids)
)
getDocuments[FBPage](db, pagesCollection, queryNotLiked, 40).onComplete {
case Success(listPages) =>
if (listPages.length < 3) {
client ! NotEnoughData(s"Unable to create question : not enough not liked pages.")
} else {
val choices = getChoices(maybeDifficulty, listPages)
val possibilities = (page :: choices).map {
pge =>
val url = pge.photos.flatMap(_.source)
Possibility(pge.name.get, url, "Page", Some(pge.pageId))
}
possibilities.headOption match {
case Some(answer) =>
val shuffled = Random.shuffle(possibilities)
val gameQuestion = MultipleChoiceQuestion(userId, MultipleChoice, MCWhichPageDidYouLike, None, shuffled, shuffled.indexOf(answer))
client ! FinishedQuestionCreation(gameQuestion)
case None =>
client ! NotEnoughData(s"This should not happen, but the possibilities were empty.")
}
}
case Failure(e) =>
client ! MongoDBError(s"${e.getMessage}")
case any =>
client ! MongoDBError(s"Unknown error : $any.")
}
}
case None =>
client ! NotEnoughData(s"Page not found. $itemId")
}
case any =>
log.error(s"WhichPageDidYouLike received a unexpected message $any")
}
/**
* Get 3 choices. The size of the pool from which there are chosen follows:
* y = -30*difficulty + 40 (This is arbitrary)
*
* @param maybeDifficulty User difficulty for question
* @param listPages List of potential FBpage as choices
* @return 3 FBpages as a List
*/
private def getChoices(maybeDifficulty: Option[Double], listPages: List[FBPage]): List[FBPage] = {
val sortedList = listPages.sortBy(-_.likesNumber)
val pool = Random.shuffle(sortedList.take((-30*maybeDifficulty.getOrElse(0.0)+40).toInt))
pool.take(3)
}
}
|
reminisceme/game-creator
|
src/main/scala/me/reminisce/gameboard/questions/WhichPageDidYouLike.scala
|
Scala
|
apache-2.0
| 4,289
|
package org.jetbrains.plugins.scala
package codeInsight
package template
package macros
import com.intellij.codeInsight.CodeInsightBundle
import com.intellij.codeInsight.lookup.{LookupElement, LookupElementBuilder}
import com.intellij.codeInsight.template._
import com.intellij.openapi.project.Project
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.psi.{PsiClass, PsiDocumentManager}
import org.jetbrains.plugins.scala.codeInsight.template.impl.ScalaCodeContextType
import org.jetbrains.plugins.scala.codeInsight.template.util.MacroUtil
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypedDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition
import org.jetbrains.plugins.scala.lang.psi.types.result.TypingContext
import org.jetbrains.plugins.scala.lang.psi.types.{ScType, ScTypeExt}
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
import _root_.scala.collection.mutable.ArrayBuffer
/**
* User: Alexander Podkhalyuzin
* Date: 30.01.2009
*/
/**
* This class provides macros for live templates. Return elements
* of given class type (or class types).
*/
class ScalaVariableOfTypeMacro extends ScalaMacro {
def getPresentableName: String = "Scala variable of type macro"
override def innerCalculateLookupItems(exprs: Array[Expression], context: ExpressionContext): Array[LookupElement] = {
calculateLookupItems(exprs.map(_.calculateResult(context).toString), context, showOne = false)
}
def calculateLookupItems(exprs: Array[String], context: ExpressionContext, showOne: Boolean): Array[LookupElement] = {
if (!validExprs(exprs)) return null
val offset = context.getStartOffset
val editor = context.getEditor
val array = new ArrayBuffer[LookupElement]
val file = PsiDocumentManager.getInstance(editor.getProject).getPsiFile(editor.getDocument)
PsiDocumentManager.getInstance(editor.getProject).commitDocument(editor.getDocument)
file match {
case file: ScalaFile =>
val element = file.findElementAt(offset)
val variants = MacroUtil.getVariablesForScope(element).filter(r => {
val clazz = PsiTreeUtil.getParentOfType(r.element, classOf[PsiClass])
if (clazz == null) true
else {
clazz.qualifiedName match {
case "scala.Predef" => false
case "scala" => false
case _ => true
}
}
})
for (variant <- variants) {
variant.getElement match {
case typed: ScTypedDefinition =>
for (t <- typed.getType(TypingContext.empty))
addLookupItems(exprs, context, variant, t, file.getProject, array)
case _ =>
}
}
case _ =>
}
if (array.length < 2 && !showOne) return null
array.toArray
}
def innerCalculateResult(exprs: Array[Expression], context: ExpressionContext): Result = {
if (!validExprs(exprs)) return null
val offset = context.getStartOffset
val editor = context.getEditor
val file = PsiDocumentManager.getInstance(editor.getProject).getPsiFile(editor.getDocument)
PsiDocumentManager.getInstance(editor.getProject).commitDocument(editor.getDocument)
file match {
case file: ScalaFile =>
val element = file.findElementAt(offset)
val variants = MacroUtil.getVariablesForScope(element).filter(r => {
val clazz = PsiTreeUtil.getParentOfType(r.element, classOf[PsiClass])
if (clazz == null) true
else {
clazz.qualifiedName match {
case "scala.Predef" => false
case "scala" => false
case _ => true
}
}
})
for (variant <- variants) {
variant.getElement match {
case typed: ScTypedDefinition =>
for (t <- typed.getType(TypingContext.empty))
getResult(exprs, context, variant, t, file.getProject).map(return _)
case _ =>
}
}
null
case _ => null
}
}
override def isAcceptableInContext(context: TemplateContextType): Boolean = context.isInstanceOf[ScalaCodeContextType]
override def calculateQuickResult(p1: Array[Expression], p2: ExpressionContext): Result = null
def getDescription: String = CodeInsightBundle.message("macro.variable.of.type")
def getName: String = "scalaVariableOfType"
override def getDefaultValue: String = "x"
def validExprs(exprs: Array[Expression]): Boolean = validExprsCount(exprs.length)
def validExprs(exprs: Array[String]): Boolean = validExprsCount(exprs.length)
def validExprsCount(exprsCount: Int): Boolean = exprsCount != 0
def getResult(exprs: Array[Expression],
context: ExpressionContext,
variant: ScalaResolveResult,
scType: ScType,
project: Project): Option[Result] = {
exprs.apply(0).calculateResult(context).toString match {
case "" =>
Some(new TextResult(variant.getElement.name))
case ScalaVariableOfTypeMacro.iterableId =>
if (scType.canonicalText.startsWith("_root_.scala.Array")) Some(new TextResult(variant.getElement.name))
else scType.extractClass.collect {
case x: ScTypeDefinition if x.functionsByName("foreach").nonEmpty => new TextResult(variant.getElement.name)
}
case _ =>
val qualName = scType.extractClass match {
case Some(x) => x.qualifiedName
case None => ""
}
exprs.find(expr => qualName == expr.calculateResult(context).toString)
.map(_ => new TextResult(variant.getElement.name))
}
}
def addLookupItems(exprs: Array[String],
context: ExpressionContext,
variant: ScalaResolveResult,
scType: ScType,
project: Project,
array: ArrayBuffer[LookupElement]) {
exprs.apply(0) match {
case "" =>
val item = LookupElementBuilder.create(variant.getElement, variant.getElement.name).
withTypeText(scType.presentableText)
array += item
case ScalaVariableOfTypeMacro.iterableId if scType.canonicalText.startsWith("_root_.scala.Array") =>
array += LookupElementBuilder.create(variant.getElement, variant.getElement.name)
case ScalaVariableOfTypeMacro.iterableId =>
scType.extractClass match {
case Some(x: ScTypeDefinition) if x.functionsByName("foreach").nonEmpty =>
array += LookupElementBuilder.create(variant.getElement, variant.getElement.name)
case _ =>
}
case _ =>
for (expr <- exprs) {
if ((scType.extractClass match {
case Some(x) => x.qualifiedName
case None => ""
}) == expr) array += LookupElementBuilder.create(variant.getElement, variant.getElement.name)
}
}
}
}
object ScalaVariableOfTypeMacro {
val iterableId = "foreach"
}
|
ilinum/intellij-scala
|
src/org/jetbrains/plugins/scala/codeInsight/template/macros/ScalaVariableOfTypeMacro.scala
|
Scala
|
apache-2.0
| 7,126
|
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package matryoshka
import slamdata.Predef._
import matryoshka.implicits._
import scalaz._
import simulacrum._
@typeclass trait EqualT[T[_[_]]] {
def equal[F[_]: Functor](tf1: T[F], tf2: T[F])(implicit del: Delay[Equal, F]):
Boolean
def equalT[F[_]: Functor](delay: Delay[Equal, F]): Equal[T[F]] =
Equal.equal[T[F]](equal[F](_, _)(Functor[F], delay))
}
@SuppressWarnings(Array("org.wartremover.warts.PublicInference"))
object EqualT {
def recursiveT[T[_[_]]: RecursiveT]: EqualT[T] = new EqualT[T] {
def equal[F[_]: Functor]
(tf1: T[F], tf2: T[F])
(implicit del: Delay[Equal, F]) =
del(equalT[F](del)).equal(tf1.project, tf2.project)
}
}
|
slamdata/matryoshka
|
core/shared/src/main/scala/matryoshka/EqualT.scala
|
Scala
|
apache-2.0
| 1,287
|
package com.github.al.roulette.player.impl
import java.util.UUID
import akka.NotUsed
import akka.actor.Scheduler
import com.github.al.logging.EventStreamLogging
import com.github.al.logging.LoggedServerServiceCall.logged
import com.github.al.persistence.{PersistentEntityRegistrySugar, Retrying}
import com.github.al.roulette.player.api
import com.github.al.roulette.player.api._
import com.lightbend.lagom.scaladsl.api.ServiceCall
import com.lightbend.lagom.scaladsl.api.broker.Topic
import com.lightbend.lagom.scaladsl.api.transport.NotFound
import com.lightbend.lagom.scaladsl.broker.TopicProducer
import com.lightbend.lagom.scaladsl.persistence.{EventStreamElement, PersistentEntityRegistry}
import com.lightbend.lagom.scaladsl.server.ServerServiceCall
import com.typesafe.scalalogging.LazyLogging
import scala.concurrent.duration.DurationInt
import scala.concurrent.{ExecutionContext, Future}
import scala.language.postfixOps
class PlayerServiceImpl(override val entityRegistry: PersistentEntityRegistry, playerRepository: PlayerRepository)
(implicit val executionContext: ExecutionContext, scheduler: Scheduler)
extends PlayerService
with PersistentEntityRegistrySugar
with Retrying
with EventStreamLogging
with LazyLogging {
override def registerPlayer: ServiceCall[Player, PlayerId] = logged {
ServerServiceCall { player =>
val id = UUID.randomUUID()
entityRef[PlayerEntity](id)
.ask(CreatePlayer(PlayerState(player.playerName)))
.map(_ => PlayerId(id))
}
}
override def login: ServiceCall[PlayerCredentials, PlayerAccessToken] = {
ServerServiceCall { credentials =>
for {
playerId <- retry(playerRepository.getPlayerIdByName(credentials.playerName), delay = 300 millis, timeout = 3 seconds)
accessToken <- entityRef[PlayerEntity](playerId).ask(IssueAccessToken)
} yield PlayerAccessToken(accessToken)
}
}
override def getPlayer(id: UUID): ServiceCall[NotUsed, Player] = logged {
ServerServiceCall { _: NotUsed =>
entityRef[PlayerEntity](id).ask(GetPlayer).map {
case Some(playerState) => Player(playerState.playerName)
case None => throw NotFound(s"Player $id not found")
}
}
}
override def playerEvents: Topic[api.PlayerEvent] = TopicProducer.singleStreamWithOffset { offset =>
entityRegistry.eventStream(PlayerEvent.Tag, offset)
.filter(_.event.isInstanceOf[PlayerCreated])
.mapAsync(1)(logEventStreamElementAsync).mapAsync(1)({
case EventStreamElement(playerId, PlayerCreated(_), _offset) =>
Future.successful(api.PlayerRegistered(playerId) -> _offset)
})
}
}
|
andrei-l/reactive-roulette
|
player-impl/src/main/scala/com/github/al/roulette/player/impl/PlayerServiceImpl.scala
|
Scala
|
mit
| 2,679
|
package magento
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import io.gatling.jdbc.Predef._
import io.gatling.http.Headers.Names._
import scala.concurrent.duration._
import bootstrap._
import assertions._
import Headers._
object AnonymousBrowserScenario {
var feedCategoriesURL = csv("magento_category_urls.csv")
var feedProductsURL = csv("magento_product_urls.csv")
val homepage =
exec(http("Get Homepage")
.get("/")
.headers(headers_1)
)
.pause(2)
val categories =
feed(feedCategoriesURL.random)
.repeat(3) {
exec(http("Get Catalog Page")
.get("${magento_category_url}")
.headers(headers_1a)
)
.pause(1)
}
.pause(2)
val products =
feed(feedProductsURL.random)
.exec(http("Get Product Page")
.get("${magento_product_url}")
.headers(headers_1a)
)
val scn = scenario("Anonymous Browser")
.exec( homepage, categories, products )
}
|
candes/magento-gatling2
|
user-files/simulations/magento/AnonymousBrowserScenario.scala
|
Scala
|
apache-2.0
| 1,153
|
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.model
import munit.FunSuite
class ClampSuite extends FunSuite {
val step = 60000L
val dataTags = Map("name" -> "cpu", "node" -> "i-1")
val inputTS = TimeSeries(
dataTags,
new ArrayTimeSeq(
DsType.Gauge,
0L,
step,
Array[Double](1.0, 1.5, 1.6, 1.7, 1.4, 1.3, 1.2, 1.0, 0.0, 0.0)
)
)
val des = StatefulExpr.Des(DataExpr.Sum(Query.Equal("name", "cpu")), 2, 0.1, 0.02)
def eval(expr: TimeSeriesExpr, data: List[List[Datapoint]]): List[List[TimeSeries]] = {
var state = Map.empty[StatefulExpr, Any]
data.map { ts =>
val t = ts.head.timestamp
val context = EvalContext(t, t + step, step, state)
val rs = expr.eval(context, ts)
state = rs.state
rs.data
}
}
test("clamp-min") {
val s = 0L
val e = 10L * step
val context = EvalContext(s, e, step, Map.empty)
val clamp = MathExpr.ClampMin(DataExpr.Sum(Query.Equal("name", "cpu")), 1.1)
val actual = clamp.eval(context, List(inputTS)).data.head.data.bounded(s, e).data
val expected = Array[Double](1.1, 1.5, 1.6, 1.7, 1.4, 1.3, 1.2, 1.1, 1.1, 1.1)
assertEquals(actual.toSeq, expected.toSeq)
}
test("clamp-max") {
val s = 0L
val e = 10L * step
val context = EvalContext(s, e, step, Map.empty)
val clamp = MathExpr.ClampMax(DataExpr.Sum(Query.Equal("name", "cpu")), 1.1)
val actual = clamp.eval(context, List(inputTS)).data.head.data.bounded(s, e).data
val expected = Array[Double](1.0, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.0, 0.0, 0.0)
assertEquals(actual.toSeq, expected.toSeq)
}
}
|
copperlight/atlas
|
atlas-core/src/test/scala/com/netflix/atlas/core/model/ClampSuite.scala
|
Scala
|
apache-2.0
| 2,213
|
package feature
/** Exceptions for invalid [[Feature]]s */
object Exceptions {
/** Exception indicating invalid CDS size */
class CDSSizeException(message: String) extends IllegalArgumentException(message)
}
|
pamelarussell/sgxlib
|
src/main/scala/feature/Exceptions.scala
|
Scala
|
mit
| 216
|
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.execution.Ack
import monix.execution.Ack.Continue
import monix.execution.exceptions.DummyException
import monix.reactive.{Observable, Observer}
import scala.concurrent.duration._
import scala.concurrent.duration.Duration.Zero
import scala.concurrent.{Future, Promise}
import scala.util.Success
object FilterSuite extends BaseOperatorSuite {
def count(sourceCount: Int) = {
sourceCount
}
def sum(sourceCount: Int): Long =
sourceCount.toLong * (sourceCount + 1)
def createObservable(sourceCount: Int) = {
require(sourceCount > 0, "sourceCount should be strictly positive")
Some {
val o =
if (sourceCount == 1)
Observable.now(2L).filter(_ % 2 == 0)
else
Observable.range(1, sourceCount.toLong * 2 + 1, 1).filter(_ % 2 == 0)
Sample(o, count(sourceCount), sum(sourceCount), Zero, Zero)
}
}
def observableInError(sourceCount: Int, ex: Throwable) = {
require(sourceCount > 0, "sourceCount should be strictly positive")
Some {
val ex = DummyException("dummy")
val o =
if (sourceCount == 1)
createObservableEndingInError(Observable.now(2L), ex)
.filter(_ % 2 == 0)
else
createObservableEndingInError(Observable.range(1, sourceCount.toLong * 2 + 1, 1), ex)
.filter(_ % 2 == 0)
Sample(o, count(sourceCount), sum(sourceCount), Zero, Zero)
}
}
def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = {
require(sourceCount > 0, "sourceCount should be strictly positive")
Some {
val o =
if (sourceCount == 1)
Observable.now(1L).filter(_ => throw ex)
else
Observable.range(1, sourceCount.toLong * 2 + 1, 1).filter { x =>
if (x == sourceCount * 2)
throw ex
else
x % 2 == 0
}
Sample(o, count(sourceCount - 1), sum(sourceCount - 1), Zero, Zero)
}
}
override def cancelableObservables(): Seq[Sample] = {
val sample = Observable
.range(0, 10)
.delayOnNext(1.second)
.filter(_ % 2 == 0)
Seq(Sample(sample, 0, 0, 0.seconds, 0.seconds))
}
test("should not do back-pressure for onComplete, for 1 element") { implicit s =>
val p = Promise[Continue.type]()
var wasCompleted = false
createObservable(1) match {
case Some(Sample(obs, _, _, waitForFirst, waitForNext)) =>
var onNextReceived = false
obs.unsafeSubscribeFn(new Observer[Long] {
def onNext(elem: Long): Future[Ack] = { onNextReceived = true; p.future }
def onError(ex: Throwable): Unit = throw new IllegalStateException()
def onComplete(): Unit = wasCompleted = true
})
s.tick(waitForFirst)
assert(onNextReceived)
assert(wasCompleted)
p.success(Continue)
s.tick(waitForNext)
}
}
test("withFilter syntax works") { implicit s =>
val source = Observable.range(1, 1000)
val filtered = for {
element <- source if element % 2 == 1
} yield element
val f = filtered.toListL.runToFuture
s.tick()
assertEquals(f.value, Some(Success(List.range(1, 1000, 2))))
}
}
|
monifu/monix
|
monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/FilterSuite.scala
|
Scala
|
apache-2.0
| 3,924
|
package com.github.aselab.activerecord.io
import com.github.aselab.activerecord._
import inner._
import validations._
import reflections._
object FormSerializer {
import ReflectionUtil._
def assignFunc(value: Any, fieldInfo: FieldInfo): Any =
formAssignFunc(value, fieldInfo)((v: Any, k: FieldInfo) => v)
def formAssignFunc(value: Any, fieldInfo: FieldInfo)(assignFunc: (Any, FieldInfo) => Any): Any = {
(value, fieldInfo) match {
case (v: FormSerializer, FieldInfo(name, klass, _, isSeq, _)) if classOf[FormSerializer].isAssignableFrom(klass) && !isSeq =>
value
case (v, FieldInfo(name, klass, _, isSeq, _)) if classOf[FormSerializer].isAssignableFrom(klass) && !isSeq =>
val companion = classToCompanion(klass).asInstanceOf[FormSupport[ActiveModel]]
companion.unsafeAssign(v.asInstanceOf[Map[String, Any]], assignFunc)
case (v, FieldInfo(name, klass, _, isSeq, _)) if classOf[FormSerializer].isAssignableFrom(klass) && isSeq =>
if (v.asInstanceOf[List[_]].headOption.exists(e => classOf[FormSerializer].isAssignableFrom(e.getClass))) {
value
} else {
val companion = classToCompanion(klass).asInstanceOf[FormSupport[ActiveModel]]
v.asInstanceOf[List[Map[String, Any]]].map(companion.unsafeAssign(_, assignFunc))
}
case _ => value
}
}
}
trait FormSerializer extends IO { self: ProductModel =>
import ReflectionUtil._
override def toFieldType(value: Any, fieldInfo: FieldInfo): Any = {
if (fieldInfo.isOption) {
value match {
case Some(v) => Some(v)
case null | None | "" => None
case v => Some(v)
}
} else { value }
}
def toFormValues: Map[String, String] = toFormValues(None)
def toFormValues(prefix: Option[String]): Map[String, String] = {
def serialize(c: Class[_], value: Any, key: String): Map[String, String] =
if (classOf[FormSerializer].isAssignableFrom(c)) {
value.asInstanceOf[FormSerializer].toFormValues(Some(key))
} else {
Map(FormConverter.get(c).map(key -> _.serialize(value)).getOrElse(
throw ActiveRecordException.unsupportedType(key)
))
}
toMap.flatMap { case (k, v) =>
val info = _companion.fieldInfo(k)
val key = prefix.map(FormUtil.join(_, k)).getOrElse(k)
if (info.isSeq) {
v.asInstanceOf[Seq[_]].zipWithIndex.flatMap { case (value, index) =>
serialize(info.fieldType, value, FormUtil.join(key, index))
}
} else {
serialize(info.fieldType, v, key)
}
}
}
def assignFormValues(data: Map[String, String]): this.type = {
unsafeAssign(_companion.fieldInfo.flatMap {
case (name, info) =>
lazy val converter = FormConverter.get(info.fieldType).getOrElse(
throw ActiveRecordException.unsupportedType(name)
)
def deserialize(data: Map[String, String], key: String) = if (info.isModel) {
val companion = classToCompanion(info.fieldType).asInstanceOf[FormSupport[ActiveModel]]
val map = data.collect {
case (k, v) if k.startsWith(key + "[") => FormUtil.shift(k) -> v
}
if (!(info.isOption && map.nonEmpty)) Some(companion.bind(map)) else None
} else {
data.get(key).collect {
case v if !(info.isOption && v.isEmpty) => converter.deserialize(v)
case _ => ""
}
}
try {
if (info.isSeq) {
val dataList = Stream.from(0).map(i => data.collect {
case (k, v) if k.startsWith("%s[%d]".format(name, i)) => FormUtil.shift(k) -> v
}.toMap).takeWhile(_.nonEmpty)
Some(name -> dataList.zipWithIndex.flatMap {
case (d, i) => deserialize(d, i.toString)
}.toList)
} else {
deserialize(data, name).map(name -> _)
}
} catch {
case e: Throwable =>
this.errors.add(name, Validator.ERROR_PREFIX + "invalid")
None
}
}, FormSerializer.assignFunc(_, _))
}
def formErrors: Seq[ValidationError] = {
val nestErrors = _companion.validatableFields.flatMap { f =>
f.toSeq[FormSerializer](this).zipWithIndex.flatMap { case (m, i) =>
m.formErrors.map {
case e if e.isGlobal => e.copy(model = this.getClass, key = f.name + e.key)
case e if f.isSeq => e.copy(key = FormUtil.join(f.name, i, e.key))
case e => e.copy(key = FormUtil.join(f.name, e.key))
}
}
}
errors.toSeq ++ nestErrors
}
override def validate(): Boolean = super.validate && formErrors.isEmpty
}
trait FormSupport[T <: ActiveModel] { self: ProductModelCompanion[T] =>
import ReflectionUtil._
type C = ActiveModelCompanion[ActiveModel]
def isRequired(name: String): Boolean = {
def inner(c: C, names: Seq[String]): Boolean = {
(names.headOption, names.tail) match {
case (Some(name), tail) =>
c.fieldInfo.get(name).map { info =>
if (tail.isEmpty) {
info.isRequired
} else {
inner(classToCompanion(info.fieldType).asInstanceOf[C], tail)
}
}.getOrElse(false)
case _ => false
}
}
inner(this.asInstanceOf[C], FormUtil.split(name).filterNot(s => s.isEmpty || s.matches("^[0-9]+$")))
}
def assignValue(value: Any, fieldInfo: FieldInfo): Any = {
(value, fieldInfo) match {
case (v: FormSerializer, FieldInfo(name, klass, _, isSeq, _)) if classOf[FormSerializer].isAssignableFrom(klass) && !isSeq =>
value
case (v, FieldInfo(name, klass, _, isSeq, _)) if classOf[FormSerializer].isAssignableFrom(klass) && !isSeq =>
val companion = classToCompanion(klass).asInstanceOf[FormSupport[ActiveModel]]
companion.unsafeAssign(v.asInstanceOf[Map[String, Any]], (v, k) => v)
case (v, FieldInfo(name, klass, _, isSeq, _)) if classOf[FormSerializer].isAssignableFrom(klass) && isSeq =>
if (v.asInstanceOf[List[_]].headOption.exists(e => classOf[FormSerializer].isAssignableFrom(e.getClass))) {
value
} else {
val companion = classToCompanion(klass).asInstanceOf[FormSupport[ActiveModel]]
v.asInstanceOf[List[Map[String, Any]]].map(companion.unsafeAssign(_, (v, k) => v))
}
case _ => value
}
}
def bind(data: Map[String, String])(implicit source: T = self.newInstance): T = {
source.assignFormValues(data)
source
}
def unsafeAssign(data: Map[String, Any], assignFunc: (Any, FieldInfo) => Any)(implicit source: T = self.newInstance): T = {
source.unsafeAssign(data, assignFunc)
source
}
def unbind(m: T): Map[String, String] = m.toFormValues
}
object FormUtil {
/** a[b][c] => b[c] */
def shift(s: String): String = s.replaceFirst("""[^\\[]+\\[([^\\[\\]]+)\\]""", "$1")
/** a[b][c] => a, b, c */
def split(s: String): Seq[String] = s.replaceAll("""\\[([^\\[\\]]*)\\]""", ",$1").split(",")
/** a, b, c[d] => a[b][c][d] */
def join(a: String, b: Any*): String =
a + b.flatMap(s => split(s.toString)).map("[%s]".format(_)).mkString
}
|
xdougx/scala-activerecord
|
activerecord/src/main/scala/io/FormSupport.scala
|
Scala
|
mit
| 7,176
|
package org.jetbrains.sbt.project.data.service
import java.io.File
import java.util
import com.intellij.facet.ModifiableFacetModel
import com.intellij.openapi.externalSystem.model.project.{ModuleData, ProjectData}
import com.intellij.openapi.externalSystem.model.{DataNode, Key, ProjectKeys}
import com.intellij.openapi.externalSystem.service.project.IdeModifiableModelsProvider
import com.intellij.openapi.externalSystem.service.project.manage.AbstractProjectDataService
import com.intellij.openapi.externalSystem.util.{DisposeAwareProjectChange, ExternalSystemApiUtil}
import com.intellij.openapi.module.Module
import com.intellij.openapi.project.Project
import com.intellij.openapi.roots.ModifiableRootModel
import com.intellij.openapi.roots.impl.libraries.LibraryEx
import com.intellij.openapi.roots.libraries.Library
import com.intellij.util.CommonProcessors.CollectProcessor
import org.jetbrains.plugins.scala.project.Platform.{Dotty, Scala}
import org.jetbrains.plugins.scala.project.{DottyLibraryName, Platform, ScalaLanguageLevel, ScalaLibraryName, ScalaLibraryProperties, ScalaLibraryType}
import scala.collection.JavaConverters._
/**
* @author Pavel Fatin
*/
abstract class AbstractDataService[E, I](key: Key[E]) extends AbstractProjectDataService[E, I] {
def createImporter(toImport: Seq[DataNode[E]],
projectData: ProjectData,
project: Project,
modelsProvider: IdeModifiableModelsProvider): Importer[E]
def getTargetDataKey: Key[E] = key
override final def importData(toImport: util.Collection[DataNode[E]],
projectData: ProjectData,
project: Project,
modelsProvider: IdeModifiableModelsProvider): Unit =
createImporter(toImport.asScala.toSeq, projectData, project, modelsProvider).importData()
}
/**
* The purposes of this trait are the following:
* - Incapsulate logic necessary for importing specified data
* - Wrap "unsafe" methods from IdeModifiableModelsProvider
* - Collect import parameters as class fields to eliminate necessity of
* dragging them into each and every method of ProjectDataService
* - Abstract from External System's API which is rather unstable
*/
trait Importer[E] {
val dataToImport: Seq[DataNode[E]]
val projectData: ProjectData
val project: Project
val modelsProvider: IdeModifiableModelsProvider
def importData(): Unit
// IdeModifiableModelsProvider wrappers
def findIdeModule(name: String): Option[Module] =
Option(modelsProvider.findIdeModule(name))
def findIdeModule(data: ModuleData): Option[Module] =
Option(modelsProvider.findIdeModule(data))
def getModifiableFacetModel(module: Module): ModifiableFacetModel =
modelsProvider.getModifiableFacetModel(module)
def getModifiableLibraryModel(library: Library): Library.ModifiableModel =
modelsProvider.getModifiableLibraryModel(library)
def getModifiableRootModel(module: Module): ModifiableRootModel =
modelsProvider.getModifiableRootModel(module)
def getModules: Array[Module] =
modelsProvider.getModules
// FIXME: should be implemented in External System
def getModifiableLibraryModelEx(library: Library): LibraryEx.ModifiableModelEx =
modelsProvider.getModifiableLibraryModel(library).asInstanceOf[LibraryEx.ModifiableModelEx]
// Utility methods
def getIdeModuleByNode(node: DataNode[_]): Option[Module] =
for {
moduleData <- Option(node.getData(ProjectKeys.MODULE))
module <- findIdeModule(moduleData)
} yield module
def getScalaLibraries: Set[Library] =
modelsProvider.getAllLibraries.filter(l => Option(l.getName).exists(_.contains(ScalaLibraryName))).toSet
def getScalaLibraries(module: Module, platform: Platform): Set[Library] = {
val libraryName = platform match {
case Scala => ScalaLibraryName
case Dotty => DottyLibraryName
}
val collector = new CollectProcessor[Library]()
getModifiableRootModel(module).orderEntries().librariesOnly().forEachLibrary(collector)
collector.getResults.asScala
.toSet
.filter(l => Option(l.getName).exists(_.contains(libraryName)))
}
def executeProjectChangeAction(action: => Unit): Unit =
ExternalSystemApiUtil.executeProjectChangeAction(new DisposeAwareProjectChange(project) {
override def execute(): Unit = action
})
def setScalaSdk(library: Library,
platform: Platform,
languageLevel: ScalaLanguageLevel,
compilerClasspath: Seq[File]): Unit = {
val properties = new ScalaLibraryProperties()
properties.platform = platform
properties.languageLevel = languageLevel
properties.compilerClasspath = compilerClasspath
val model = getModifiableLibraryModelEx(library)
model.setKind(ScalaLibraryType.instance.getKind)
model.setProperties(properties)
}
}
abstract class AbstractImporter[E](val dataToImport: Seq[DataNode[E]],
val projectData: ProjectData,
val project: Project,
val modelsProvider: IdeModifiableModelsProvider) extends Importer[E]
|
triplequote/intellij-scala
|
scala/scala-impl/src/org/jetbrains/sbt/project/data/service/AbstractDataService.scala
|
Scala
|
apache-2.0
| 5,245
|
package recsys
import java.io.File
import java.util.{Map => JMap}
import com.datastax.driver.core.Cluster
import org.constretto.Constretto
import org.constretto.Constretto._
import org.constretto.Converter._
object Config {
implicit val cassandraConfConverter = fromObject { obj =>
val servers = obj[List[String]]("servers")
val port = obj.get[Int]("port").getOrElse(9042)
CassandraConf(servers, port)
}
implicit val configConverter = fromObject { obj =>
val cassandra = obj[CassandraConf]("cassandra")
Config(cassandra)
}
}
object Sites {
implicit val cassandraSiteConverter = fromObject{ obj =>
CassandraSite(obj[String]("keyspace"), obj[String]("table-prefix"))
}
implicit val siteConverter = fromObject{ obj =>
SiteConfig(
obj[String]("name"),
obj[CassandraSite]("cassandra")
)
}
def apply(file: File): Sites = {
val con = Constretto(List(json(file.toURI.toString, "sites")))
Sites(con[Map[String, SiteConfig]]("sites"))
}
}
case class CassandraConf(servers: List[String], port: Int) {
lazy val cluster = {
val builder = Cluster.builder()
servers.foreach(builder.addContactPoint)
builder.withPort(port)
.withoutJMXReporting()
.withoutMetrics()
.build()
}
def getSession(ks: String) = cluster.connect(ks)
}
case class Config(cassandra: CassandraConf)
case class CassandraSite(keyspace: String, tablePrefix: String)
case class SiteConfig(name: String, cassandra: CassandraSite)
case class Sites(underlying: Map[String, SiteConfig]) {
def get(name: String): Option[SiteConfig] = underlying.get(name)
}
|
przemek1990/spark-cassandra
|
src/main/scala/recsys/Config.scala
|
Scala
|
apache-2.0
| 1,630
|
package be.wegenenverkeer.atomium.client
import be.wegenenverkeer.atomium.japi
import com.github.tomakehurst.wiremock.WireMockServer
import com.github.tomakehurst.wiremock.client.WireMock
import com.github.tomakehurst.wiremock.client.WireMock._
import com.github.tomakehurst.wiremock.common.SingleRootFileSource
import com.github.tomakehurst.wiremock.core.WireMockConfiguration._
import org.scalatest.{BeforeAndAfterAll, Suite}
/**
* Created by Karel Maesen, Geovise BVBA on 17/11/15.
*/
trait WithWireMock extends BeforeAndAfterAll {
self: Suite =>
import be.wegenenverkeer.atomium.client.ImplicitConversions._
def fileSource: String
def mappings = new SingleRootFileSource(fileSource)
//we take a different port then in java-client module, because tests unfortunately continue to overlap with java client module
val port: Int = 8089
lazy val server = new WireMockServer(wireMockConfig.port(port).fileSource(mappings))
def mkClientAcceptingXml = new japi.client.AtomiumClient.Builder()
.setBaseUrl(s"http://localhost:$port/")
.setAcceptXml()
.build
.asScala
def mkClientAcceptingJson = new japi.client.AtomiumClient.Builder()
.setBaseUrl(s"http://localhost:$port/")
.setAcceptJson()
.build
.asScala
def resetWireMock() {
WireMock.resetToDefault()
}
override def beforeAll() {
server.start()
configureFor("localhost", port)
resetToDefault()
}
override def afterAll() {
server.shutdown()
Thread.sleep(1000)
}
}
|
joachimvda/atomium
|
modules/client-scala/src/test/scala/be/wegenenverkeer/atomium/client/WithWireMock.scala
|
Scala
|
mit
| 1,513
|
import _root_.io.gatling.core.scenario.Simulation
import ch.qos.logback.classic.{Level, LoggerContext}
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
/**
* Performance test for the TeamProject entity.
*/
class TeamProjectGatlingTest extends Simulation {
val context: LoggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
// Log all HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("TRACE"))
// Log failed HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("DEBUG"))
val baseURL = Option(System.getProperty("baseURL")) getOrElse """http://localhost:8080"""
val httpConf = http
.baseURL(baseURL)
.inferHtmlResources()
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3")
.connectionHeader("keep-alive")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:33.0) Gecko/20100101 Firefox/33.0")
val headers_http = Map(
"Accept" -> """application/json"""
)
val headers_http_authentication = Map(
"Content-Type" -> """application/json""",
"Accept" -> """application/json"""
)
val headers_http_authenticated = Map(
"Accept" -> """application/json""",
"Authorization" -> "${access_token}"
)
val scn = scenario("Test the TeamProject entity")
.exec(http("First unauthenticated request")
.get("/api/account")
.headers(headers_http)
.check(status.is(401))).exitHereIfFailed
.pause(10)
.exec(http("Authentication")
.post("/api/authenticate")
.headers(headers_http_authentication)
.body(StringBody("""{"username":"admin", "password":"admin"}""")).asJSON
.check(header.get("Authorization").saveAs("access_token"))).exitHereIfFailed
.pause(1)
.exec(http("Authenticated request")
.get("/api/account")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10)
.repeat(2) {
exec(http("Get all teamProjects")
.get("/gamecraftteam/api/team-projects")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10 seconds, 20 seconds)
.exec(http("Create new teamProject")
.post("/gamecraftteam/api/team-projects")
.headers(headers_http_authenticated)
.body(StringBody("""{"id":null, "teamId":null, "projectId":null}""")).asJSON
.check(status.is(201))
.check(headerRegex("Location", "(.*)").saveAs("new_teamProject_url"))).exitHereIfFailed
.pause(10)
.repeat(5) {
exec(http("Get created teamProject")
.get("/gamecraftteam${new_teamProject_url}")
.headers(headers_http_authenticated))
.pause(10)
}
.exec(http("Delete created teamProject")
.delete("/gamecraftteam${new_teamProject_url}")
.headers(headers_http_authenticated))
.pause(10)
}
val users = scenario("Users").exec(scn)
setUp(
users.inject(rampUsers(Integer.getInteger("users", 100)) over (Integer.getInteger("ramp", 1) minutes))
).protocols(httpConf)
}
|
iMartinezMateu/gamecraft
|
gamecraft-team/src/test/gatling/user-files/simulations/TeamProjectGatlingTest.scala
|
Scala
|
mit
| 3,458
|
/*
* Copyright (C) 2012 The Regents of The University California.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shark.memstore2
import java.nio.ByteBuffer
import org.scalatest.FunSuite
import spark.{JavaSerializer, KryoSerializer}
class TablePartitionSuite extends FunSuite {
test("serialize TablePartition backed by non-direct ByteBuffer using Java") {
val col1 = Array[Byte](0, 1, 2)
val col2 = Array[Byte](1, 2, 3)
val tp = new TablePartition(3, Array(ByteBuffer.wrap(col1), ByteBuffer.wrap(col2)))
val ser = new JavaSerializer
val bytes = ser.newInstance().serialize(tp)
val tp1 = ser.newInstance().deserialize[TablePartition](bytes)
assert(tp1.numRows === 3)
assert(tp1.columns(0).remaining() == 3)
assert(tp1.columns(0).get() == 0)
assert(tp1.columns(0).get() == 1)
assert(tp1.columns(0).get() == 2)
assert(tp1.columns(1).remaining() == 3)
assert(tp1.columns(1).get() == 1)
assert(tp1.columns(1).get() == 2)
assert(tp1.columns(1).get() == 3)
}
test("serialize TablePartition backed by direct ByteBuffer using Java") {
val col1 = ByteBuffer.allocateDirect(3)
col1.put(0.toByte)
col1.put(1.toByte)
col1.put(2.toByte)
col1.rewind()
val col2 = ByteBuffer.allocateDirect(3)
col2.put(1.toByte)
col2.put(2.toByte)
col2.put(3.toByte)
col2.rewind()
val tp = new TablePartition(3, Array(col1, col2))
val ser = new JavaSerializer
val bytes = ser.newInstance().serialize(tp)
val tp1 = ser.newInstance().deserialize[TablePartition](bytes)
assert(tp1.numRows === 3)
assert(tp1.columns(0).remaining() == 3)
assert(tp1.columns(0).get() == 0)
assert(tp1.columns(0).get() == 1)
assert(tp1.columns(0).get() == 2)
assert(tp1.columns(1).remaining() == 3)
assert(tp1.columns(1).get() == 1)
assert(tp1.columns(1).get() == 2)
assert(tp1.columns(1).get() == 3)
}
test("serialize TablePartition backed by non-direct ByteBuffer using Kryo") {
val col1 = Array[Byte](0, 1, 2)
val col2 = Array[Byte](1, 2, 3)
val tp = new TablePartition(3, Array(ByteBuffer.wrap(col1), ByteBuffer.wrap(col2)))
val ser = new KryoSerializer
val bytes = ser.newInstance().serialize(tp)
val tp1 = ser.newInstance().deserialize[TablePartition](bytes)
assert(tp1.numRows === 3)
assert(tp1.columns(0).remaining() == 3)
assert(tp1.columns(0).get() == 0)
assert(tp1.columns(0).get() == 1)
assert(tp1.columns(0).get() == 2)
assert(tp1.columns(1).remaining() == 3)
assert(tp1.columns(1).get() == 1)
assert(tp1.columns(1).get() == 2)
assert(tp1.columns(1).get() == 3)
}
test("serialize TablePartition backed by direct ByteBuffer using Kryo") {
val col1 = ByteBuffer.allocateDirect(3)
col1.put(0.toByte)
col1.put(1.toByte)
col1.put(2.toByte)
col1.rewind()
val col2 = ByteBuffer.allocateDirect(3)
col2.put(1.toByte)
col2.put(2.toByte)
col2.put(3.toByte)
col2.rewind()
val tp = new TablePartition(3, Array(col1, col2))
val ser = new KryoSerializer
val bytes = ser.newInstance().serialize(tp)
val tp1 = ser.newInstance().deserialize[TablePartition](bytes)
assert(tp1.numRows === 3)
assert(tp1.columns(0).remaining() == 3)
assert(tp1.columns(0).get() == 0)
assert(tp1.columns(0).get() == 1)
assert(tp1.columns(0).get() == 2)
assert(tp1.columns(1).remaining() == 3)
assert(tp1.columns(1).get() == 1)
assert(tp1.columns(1).get() == 2)
assert(tp1.columns(1).get() == 3)
}
}
|
vax11780/shark
|
src/test/scala/shark/memstore2/TablePartitionSuite.scala
|
Scala
|
apache-2.0
| 4,098
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.datamap
import java.io.{ByteArrayInputStream, DataOutputStream, ObjectInputStream, ObjectOutputStream}
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import com.sun.xml.internal.messaging.saaj.util.ByteOutputStream
import org.apache.hadoop.conf.Configuration
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datamap.{DataMapDistributable, DataMapMeta, Segment}
import org.apache.carbondata.core.datamap.dev.{DataMapBuilder, DataMapModel, DataMapWriter}
import org.apache.carbondata.core.datamap.dev.fgdatamap.{FineGrainBlocklet, FineGrainDataMap, FineGrainDataMapFactory}
import org.apache.carbondata.core.datastore.FileReader
import org.apache.carbondata.core.datastore.block.SegmentProperties
import org.apache.carbondata.core.datastore.compression.SnappyCompressor
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.datastore.page.ColumnPage
import org.apache.carbondata.core.features.TableOperation
import org.apache.carbondata.core.indexstore.PartitionSpec
import org.apache.carbondata.core.indexstore.blockletindex.BlockletDataMapDistributable
import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, DataMapSchema}
import org.apache.carbondata.core.metadata.CarbonMetadata
import org.apache.carbondata.core.scan.expression.Expression
import org.apache.carbondata.core.scan.expression.conditional.EqualToExpression
import org.apache.carbondata.core.scan.filter.intf.ExpressionType
import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf
import org.apache.carbondata.core.util.{ByteUtil, CarbonProperties}
import org.apache.carbondata.core.util.path.CarbonTablePath
import org.apache.carbondata.events.Event
import org.apache.carbondata.spark.testsuite.datacompaction.CompactionSupportGlobalSortBigFileTest
class FGDataMapFactory(carbonTable: CarbonTable,
dataMapSchema: DataMapSchema) extends FineGrainDataMapFactory(carbonTable, dataMapSchema) {
/**
* Return a new write for this datamap
*/
override def createWriter(segment: Segment, dataWritePath: String, segmentProperties: SegmentProperties): DataMapWriter = {
new FGDataMapWriter(carbonTable, segment, dataWritePath, dataMapSchema)
}
/**
* Get the datamap for segmentId
*/
override def getDataMaps(segment: Segment): java.util.List[FineGrainDataMap] = {
val path = CarbonTablePath.getSegmentPath(carbonTable.getTablePath, segment.getSegmentNo)
val file = FileFactory.getCarbonFile(path+ "/" +dataMapSchema.getDataMapName)
val files = file.listFiles()
files.map { f =>
val dataMap: FineGrainDataMap = new FGDataMap()
dataMap.init(new DataMapModel(f.getCanonicalPath, new Configuration(false)))
dataMap
}.toList.asJava
}
/**
* Get datamap for distributable object.
*/
override def getDataMaps(distributable: DataMapDistributable): java.util.List[FineGrainDataMap]= {
val mapDistributable = distributable.asInstanceOf[BlockletDataMapDistributable]
val dataMap: FineGrainDataMap = new FGDataMap()
dataMap.init(new DataMapModel(mapDistributable.getFilePath, new Configuration(false)))
Seq(dataMap).asJava
}
/**
* Get all distributable objects of a segmentId
*
* @return
*/
override def toDistributable(segment: Segment): java.util.List[DataMapDistributable] = {
val path = carbonTable.getTablePath
val file = FileFactory.getCarbonFile(
path+ "/" +dataMapSchema.getDataMapName + "/" + segment.getSegmentNo)
val files = file.listFiles()
files.map { f =>
val d: DataMapDistributable = new BlockletDataMapDistributable(f.getCanonicalPath)
d.setSegment(segment)
d.setDataMapSchema(getDataMapSchema)
d
}.toList.asJava
}
/**
*
* @param event
*/
override def fireEvent(event: Event):Unit = {
???
}
/**
* Clear all datamaps from memory
*/
override def clear(): Unit = {
}
/**
* Return metadata of this datamap
*/
override def getMeta: DataMapMeta = {
new DataMapMeta(carbonTable.getIndexedColumns(dataMapSchema),
List(ExpressionType.EQUALS, ExpressionType.IN).asJava)
}
/**
* delete datamap of the segment
*/
override def deleteDatamapData(segment: Segment): Unit = {
}
/**
* delete datamap data if any
*/
override def deleteDatamapData(): Unit = {
}
/**
* defines the features scopes for the datamap
*/
override def willBecomeStale(operation: TableOperation): Boolean = {
false
}
override def createBuilder(segment: Segment,
shardName: String, segmentProperties: SegmentProperties): DataMapBuilder = {
???
}
/**
* Get the datamap for segmentId
*/
override def getDataMaps(segment: Segment,
partitions: java.util.List[PartitionSpec]): java.util.List[FineGrainDataMap] = {
getDataMaps(segment)
}
}
class FGDataMap extends FineGrainDataMap {
var maxMin: ArrayBuffer[(Int, (Array[Byte], Array[Byte]), Long, Int)] = _
var FileReader: FileReader = _
var filePath: String = _
val compressor = new SnappyCompressor
var taskName:String = _
/**
* It is called to load the data map to memory or to initialize it.
*/
override def init(dataMapModel: DataMapModel): Unit = {
this.filePath = dataMapModel.getFilePath
val carbonFile = FileFactory.getCarbonFile(filePath)
taskName = carbonFile.getName
val size = carbonFile.getSize
FileReader = FileFactory.getFileHolder(FileFactory.getFileType(filePath))
val footerLen = FileReader.readInt(filePath, size - 4)
val bytes = FileReader.readByteArray(filePath, size - footerLen - 4, footerLen)
val in = new ByteArrayInputStream(compressor.unCompressByte(bytes))
val obj = new ObjectInputStream(in)
maxMin = obj.readObject()
.asInstanceOf[ArrayBuffer[(Int, (Array[Byte], Array[Byte]), Long, Int)]]
}
/**
* Prune the datamap with filter expression. It returns the list of
* blocklets where these filters can exist.
*
* @param filterExp
* @return
*/
override def prune(
filterExp: FilterResolverIntf,
segmentProperties: SegmentProperties,
partitions: java.util.List[PartitionSpec]): java.util.List[FineGrainBlocklet] = {
val buffer: ArrayBuffer[Expression] = new ArrayBuffer[Expression]()
val expression = filterExp.getFilterExpression
getEqualToExpression(expression, buffer)
val value = buffer.map { f =>
f.getChildren.get(1).evaluate(null).getString
}
val meta = findMeta(value(0).getBytes)
meta.map { f =>
readAndFindData(f, value(0).getBytes())
}.filter(_.isDefined).map(_.get).asJava
}
private def readAndFindData(meta: (Int, (Array[Byte], Array[Byte]), Long, Int),
value: Array[Byte]): Option[FineGrainBlocklet] = {
val bytes = FileReader.readByteArray(filePath, meta._3, meta._4)
val outputStream = new ByteArrayInputStream(compressor.unCompressByte(bytes))
val obj = new ObjectInputStream(outputStream)
val blockletsData = obj.readObject()
.asInstanceOf[ArrayBuffer[(Array[Byte], Seq[Seq[Int]], Seq[Int])]]
import scala.collection.Searching._
val searching = blockletsData
.search[(Array[Byte], Seq[Seq[Int]], Seq[Int])]((value, Seq(Seq(0)), Seq(0)))(new Ordering[
(Array[Byte], Seq[Seq[Int]], Seq[Int])] {
override def compare(x: (Array[Byte], Seq[Seq[Int]], Seq[Int]),
y: (Array[Byte], Seq[Seq[Int]], Seq[Int])) = {
ByteUtil.UnsafeComparer.INSTANCE.compareTo(x._1, y._1)
}
})
if (searching.insertionPoint >= 0) {
val f = blockletsData(searching.insertionPoint)
val pages = f._3.zipWithIndex.map { p =>
val pg = new FineGrainBlocklet.Page
pg.setPageId(p._1)
pg.setRowId(f._2(p._2).toArray)
pg
}
Some(new FineGrainBlocklet(taskName, meta._1.toString, pages.toList.asJava))
} else {
None
}
}
private def findMeta(value: Array[Byte]) = {
val tuples = maxMin.filter { f =>
ByteUtil.UnsafeComparer.INSTANCE.compareTo(value, f._2._1) >= 0 &&
ByteUtil.UnsafeComparer.INSTANCE.compareTo(value, f._2._2) <= 0
}
tuples
}
def getEqualToExpression(expression: Expression, buffer: ArrayBuffer[Expression]): Unit = {
if (expression.isInstanceOf[EqualToExpression]) {
buffer += expression
} else {
if (expression.getChildren != null) {
expression.getChildren.asScala.map { f =>
if (f.isInstanceOf[EqualToExpression]) {
buffer += f
}
getEqualToExpression(f, buffer)
}
}
}
}
/**
* Clear complete index table and release memory.
*/
override def clear():Unit = {
}
override def isScanRequired(filterExp: FilterResolverIntf): Boolean = ???
/**
* clears all the resources for datamaps
*/
override def finish() = {
}
override def getNumberOfEntries: Int = 1
}
class FGDataMapWriter(carbonTable: CarbonTable,
segment: Segment, shardName: String, dataMapSchema: DataMapSchema)
extends DataMapWriter(carbonTable.getTablePath, dataMapSchema.getDataMapName,
carbonTable.getIndexedColumns(dataMapSchema), segment, shardName) {
var taskName: String = _
val fgwritepath = dataMapPath
var stream: DataOutputStream = _
val blockletList = new ArrayBuffer[(Array[Byte], Seq[Int], Seq[Int])]()
val maxMin = new ArrayBuffer[(Int, (Array[Byte], Array[Byte]), Long, Int)]()
var position: Long = 0
val compressor = new SnappyCompressor
/**
* Start of new block notification.
*
* @param blockId file name of the carbondata file
*/
override def onBlockStart(blockId: String): Unit = {
this.taskName = shardName
if (stream == null) {
val path = fgwritepath.substring(0, fgwritepath.lastIndexOf("/"))
FileFactory.mkdirs(path)
stream = FileFactory
.getDataOutputStream(fgwritepath)
}
}
/**
* End of block notification
*/
override def onBlockEnd(blockId: String): Unit = {
}
/**
* Start of new blocklet notification.
*
* @param blockletId sequence number of blocklet in the block
*/
override def onBlockletStart(blockletId: Int): Unit = {
}
/**
* End of blocklet notification
*
* @param blockletId sequence number of blocklet in the block
*/
override def onBlockletEnd(blockletId: Int): Unit = {
val sorted = blockletList
.sortWith((l, r) => ByteUtil.UnsafeComparer.INSTANCE.compareTo(l._1, r._1) <= 0)
var oldValue: (Array[Byte], Seq[Seq[Int]], Seq[Int]) = null
var addedLast: Boolean = false
val blockletListUpdated = new ArrayBuffer[(Array[Byte], Seq[Seq[Int]], Seq[Int])]()
// Merge all same column values to single row.
sorted.foreach { f =>
if (oldValue != null) {
if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(f._1, oldValue._1) == 0) {
oldValue = (oldValue._1, oldValue._2 ++ Seq(f._2), oldValue._3 ++ f._3)
addedLast = false
} else {
blockletListUpdated += oldValue
oldValue = (f._1, Seq(f._2), f._3)
addedLast = true
}
} else {
oldValue = (f._1, Seq(f._2), f._3)
addedLast = false
}
}
if (!addedLast && oldValue != null) {
blockletListUpdated += oldValue
}
val out = new ByteOutputStream()
val outStream = new ObjectOutputStream(out)
outStream.writeObject(blockletListUpdated)
outStream.close()
val bytes = compressor.compressByte(out.getBytes)
stream.write(bytes)
maxMin +=
((blockletId, (blockletListUpdated.head._1, blockletListUpdated.last
._1), position, bytes.length))
position += bytes.length
blockletList.clear()
}
/**
* Add the column pages row to the datamap, order of pages is same as `indexColumns` in
* DataMapMeta returned in DataMapFactory.
*
* Implementation should copy the content of `pages` as needed, because `pages` memory
* may be freed after this method returns, if using unsafe column page.
*/
override def onPageAdded(blockletId: Int,
pageId: Int,
pageSize: Int,
pages: Array[ColumnPage]): Unit = {
val size = pages(0).getPageSize
val list = new ArrayBuffer[(Array[Byte], Int)]()
var i = 0
while (i < size) {
val bytes = pages(0).getBytes(i)
val newBytes = new Array[Byte](bytes.length - 2)
System.arraycopy(bytes, 2, newBytes, 0, newBytes.length)
list += ((newBytes, i))
i = i + 1
}
// Sort based on the column data in order to create index.
val sorted = list
.sortWith((l, r) => ByteUtil.UnsafeComparer.INSTANCE.compareTo(l._1, r._1) <= 0)
var oldValue: (Array[Byte], Seq[Int], Seq[Int]) = null
var addedLast: Boolean = false
// Merge all same column values to single row.
sorted.foreach { f =>
if (oldValue != null) {
if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(f._1, oldValue._1) == 0) {
oldValue = (oldValue._1, oldValue._2 ++ Seq(f._2), oldValue._3)
addedLast = false
} else {
blockletList += oldValue
oldValue = (f._1, Seq(f._2), Seq(pageId))
addedLast = true
}
} else {
oldValue = (f._1, Seq(f._2), Seq(pageId))
addedLast = false
}
}
if (!addedLast && oldValue != null) {
blockletList += oldValue
}
}
/**
* This is called during closing of writer.So after this call no more data will be sent to this
* class.
*/
override def finish(): Unit = {
FileFactory.mkdirs(fgwritepath)
val out = new ByteOutputStream()
val outStream = new ObjectOutputStream(out)
outStream.writeObject(maxMin)
outStream.close()
val bytes = compressor.compressByte(out.getBytes)
stream.write(bytes)
stream.writeInt(bytes.length)
stream.close()
}
}
class FGDataMapTestCase extends QueryTest with BeforeAndAfterAll {
val file2 = resourcesPath + "/compaction/fil2.csv"
override protected def beforeAll(): Unit = {
//n should be about 5000000 of reset if size is default 1024
val n = 150000
CompactionSupportGlobalSortBigFileTest.createFile(file2, n * 4, n)
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.ENABLE_QUERY_STATISTICS, "true")
sql("DROP TABLE IF EXISTS normal_test")
sql(
"""
| CREATE TABLE normal_test(id INT, name STRING, city STRING, age INT)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL_SORT')
""".stripMargin)
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE normal_test OPTIONS('header'='false')")
}
test("test fg datamap") {
sql("DROP TABLE IF EXISTS datamap_test")
sql(
"""
| CREATE TABLE datamap_test(id INT, name STRING, city STRING, age INT)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL_SORT')
""".stripMargin)
val table = CarbonMetadata.getInstance().getCarbonTable("default_datamap_test")
// register datamap writer
sql(
s"""
| CREATE DATAMAP ggdatamap ON TABLE datamap_test
| USING '${classOf[FGDataMapFactory].getName}'
| DMPROPERTIES('index_columns'='name')
""".stripMargin)
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE datamap_test OPTIONS('header'='false')")
checkAnswer(sql("select * from datamap_test where name='n502670'"),
sql("select * from normal_test where name='n502670'"))
}
test("test fg datamap with 2 datamaps ") {
sql("DROP TABLE IF EXISTS datamap_test")
sql(
"""
| CREATE TABLE datamap_test(id INT, name STRING, city STRING, age INT)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL_SORT')
""".stripMargin)
val table = CarbonMetadata.getInstance().getCarbonTable("default_datamap_test")
// register datamap writer
sql(
s"""
| CREATE DATAMAP ggdatamap1 ON TABLE datamap_test
| USING '${classOf[FGDataMapFactory].getName}'
| DMPROPERTIES('index_columns'='name')
""".stripMargin)
sql(
s"""
| CREATE DATAMAP ggdatamap2 ON TABLE datamap_test
| USING '${classOf[FGDataMapFactory].getName}'
| DMPROPERTIES('index_columns'='city')
""".stripMargin)
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE datamap_test OPTIONS('header'='false')")
checkAnswer(sql("select * from datamap_test where name='n502670' and city='c2670'"),
sql("select * from normal_test where name='n502670' and city='c2670'"))
checkAnswer(sql("select * from datamap_test where name='n502670' or city='c2670'"),
sql("select * from normal_test where name='n502670' or city='c2670'"))
}
test("test invisible datamap during query") {
val tableName = "datamap_testFG"
val dataMapName1 = "datamap1"
val dataMapName2 = "datamap2"
sql(s"DROP TABLE IF EXISTS $tableName")
sql(
s"""
| CREATE TABLE $tableName(id INT, name STRING, city STRING, age INT)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL_SORT')
""".stripMargin)
// register datamap writer
sql(
s"""
| CREATE DATAMAP $dataMapName1
| ON TABLE $tableName
| USING '${classOf[FGDataMapFactory].getName}'
| DMPROPERTIES('index_columns'='name')
""".stripMargin)
sql(
s"""
| CREATE DATAMAP $dataMapName2
| ON TABLE $tableName
| USING '${classOf[FGDataMapFactory].getName}'
| DMPROPERTIES('index_columns'='city')
""".stripMargin)
sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE $tableName OPTIONS('header'='false')")
val df1 = sql(s"EXPLAIN EXTENDED SELECT * FROM $tableName WHERE name='n502670' AND city='c2670'").collect()
assert(df1(0).getString(0).contains("FG DataMap"))
assert(df1(0).getString(0).contains(dataMapName1))
assert(df1(0).getString(0).contains(dataMapName2))
// make datamap1 invisible
sql(s"SET ${CarbonCommonConstants.CARBON_DATAMAP_VISIBLE}default.$tableName.$dataMapName1 = false")
val df2 = sql(s"EXPLAIN EXTENDED SELECT * FROM $tableName WHERE name='n502670' AND city='c2670'").collect()
val e = intercept[Exception] {
assert(df2(0).getString(0).contains(dataMapName1))
}
assert(e.getMessage.contains("did not contain \"" + dataMapName1))
assert(df2(0).getString(0).contains(dataMapName2))
checkAnswer(sql(s"SELECT * FROM $tableName WHERE name='n502670' AND city='c2670'"),
sql("SELECT * FROM normal_test WHERE name='n502670' AND city='c2670'"))
// also make datamap2 invisible
sql(s"SET ${CarbonCommonConstants.CARBON_DATAMAP_VISIBLE}default.$tableName.$dataMapName2 = false")
checkAnswer(sql(s"SELECT * FROM $tableName WHERE name='n502670' AND city='c2670'"),
sql("SELECT * FROM normal_test WHERE name='n502670' AND city='c2670'"))
val df3 = sql(s"EXPLAIN EXTENDED SELECT * FROM $tableName WHERE name='n502670' AND city='c2670'").collect()
val e31 = intercept[Exception] {
assert(df3(0).getString(0).contains(dataMapName1))
}
assert(e31.getMessage.contains("did not contain \"" + dataMapName1))
val e32 = intercept[Exception] {
assert(df3(0).getString(0).contains(dataMapName2))
}
assert(e32.getMessage.contains("did not contain \"" + dataMapName2))
// make datamap1,datamap2 visible
sql(s"SET ${CarbonCommonConstants.CARBON_DATAMAP_VISIBLE}default.$tableName.$dataMapName1 = true")
sql(s"SET ${CarbonCommonConstants.CARBON_DATAMAP_VISIBLE}default.$tableName.$dataMapName2 = true")
checkAnswer(sql(s"SELECT * FROM $tableName WHERE name='n502670' AND city='c2670'"),
sql("SELECT * FROM normal_test WHERE name='n502670' AND city='c2670'"))
val df4 = sql(s"EXPLAIN EXTENDED SELECT * FROM $tableName WHERE name='n502670' AND city='c2670'").collect()
assert(df4(0).getString(0).contains(dataMapName1))
assert(df4(0).getString(0).contains(dataMapName2))
}
override protected def afterAll(): Unit = {
defaultConfig()
// CompactionSupportGlobalSortBigFileTest.deleteFile(file2)
// sql("DROP TABLE IF EXISTS normal_test")
// sql("DROP TABLE IF EXISTS datamap_test")
// sql("DROP TABLE IF EXISTS datamap_testFG")
// CarbonProperties.getInstance()
// .addProperty(CarbonCommonConstants.ENABLE_QUERY_STATISTICS,
// CarbonCommonConstants.ENABLE_QUERY_STATISTICS_DEFAULT)
}
}
|
jackylk/incubator-carbondata
|
integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/FGDataMapTestCase.scala
|
Scala
|
apache-2.0
| 21,538
|
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.util
import scala.reflect.ClassTag
// TODO Why insist on AnyRef? Why not just Any? This would make app.nlp.DocumentProcessor a little cleaner. -akm
/** Provides member "attr" which is a map from a class to an attribute value (instance of that class).
This is used to attach arbitrary "attributes" to objects that inherit from this trait.
Conveniently these attributes do not need to be pre-compiled into the object as class members,
and yet when fetched they are returned with the correct Scala type known.
For example, attributes are used to attach a part-of-speech label to a cc.factorie.app.nlp.Token,
to attach a ParseTree to a Sentence, and coreference information to a Document.
Basic example usage: object foo extends Attr; foo.attr += "bar"; require(foo.attr[String] == "bar"); foo.attr.remove[String].
@author Andrew McCallum */
trait Attr extends Serializable {
/** A collection of attributes, keyed by the attribute class. */
def getAttr = attr
object attr extends Serializable {
private var _attr: Array[AnyRef] = new Array[AnyRef](2)
/** The number of attributes present. */
def length: Int = { var i = 0; while ((i < _attr.length) && (_attr(i) ne null)) i += 1; i }
/** The capacity of the array holding the attributes. May be more than "length", the number of attributes present. */
def capacity: Int = _attr.length
private def setCapacity(cap:Int): Unit = { val ta = new Array[AnyRef](cap); System.arraycopy(_attr, 0, ta, 0, math.min(cap, math.min(cap, _attr.length))) }
/** Make sure there is capacity of at least "cap" */
def ensureCapacity(cap:Int): Unit = if (cap > _attr.length) { val ta = new Array[AnyRef](cap); System.arraycopy(_attr, 0, ta, 0, _attr.length) }
/** Increase capacity by "incr". */
def increaseCapacity(incr:Int): Unit = { val ta = new Array[AnyRef](_attr.length+incr); System.arraycopy(_attr, 0, ta, 0, _attr.length); _attr = ta }
/** Remove the attribute at index i. */
def removeIndex(i:Int): Unit = {
val len = length
if (i == len - 1) _attr(i) = null
else {
System.arraycopy(_attr, i+1, _attr, i, len-i-1)
_attr(len-1) = null
}
}
/** Re-allocate the attribute array to remove any excess capacity */
def trimCapacity(): Unit = { val l = length; if (l < _attr.length) setCapacity(l) }
/** Add the given attribute, with key equal to its class. */
def +=[C<:AnyRef](value:C): C = {
var i = 0
val key = value.getClass
while (i < _attr.length && (_attr(i) ne null) && _attr(i).getClass != key)
i += 1
if (i == _attr.length)
increaseCapacity(1)
_attr(i) = value
value
}
/** Returns the index of the last attribute whose class is assignable from the argument.
Attributes occur in the order in which they were inserted.
Note this means you can add a:MyClass, then add b:SubclassOfMyClass, then index[MyClass] will return the index of "b". */
@inline final def index(key:Class[_]): Int = {
var i = _attr.length - 1
while (i >= 0) {
if ((_attr(i) ne null) && key.isAssignableFrom(_attr(i).getClass))
return i
i -= 1
}
-1
}
/** Returns the index of the last attribute whose class is assignable from the argument.
Attributes occur in the order in which they were inserted.
Note this means you can add a:MyClass, then add b:SubclassOfMyClass, then index[MyClass] will return the index of "b". */
@inline final def index[C<:AnyRef]()(implicit m: ClassTag[C]): Int = index(m.runtimeClass)
/** Return the index of the last attribute whose class is exactly the argument.
Attributes occur in the order in which they were inserted. */
@inline final def indexExactly(key:Class[_]): Int = {
var i = _attr.length - 1
while (i >= 0) {
if (key eq _attr(i).getClass) return i
i -= 1
}
-1
}
/** Return true if there is an attribute of class equal to or subclass of the argument. */
def contains[C<:AnyRef]()(implicit m: ClassTag[C]): Boolean = index(m.runtimeClass) >= 0
/** Return true if there is an attribute of class equal to or subclass of the argument. */
def contains(key:Class[_]): Boolean = index(key) >= 0
/** Return true if there is an attribute of class exactly equal to the argument. */
def containsExactly[C<:AnyRef]()(implicit m: ClassTag[C]): Boolean = indexExactly(m.runtimeClass) >= 0
/** Return true if there is an attribute of class exactly equal to the argument. */
def containsExactly(key: Class[_]): Boolean = indexExactly(key) >= 0
/** Returns a sequence of all attributes with classes assignable to C (i.e. that are either C or a subclass of C). */
def all[C<:AnyRef]()(implicit m: ClassTag[C]): Seq[C] = {
val key = m.runtimeClass
val result = new scala.collection.mutable.ArrayBuffer[C]
var i = 0
while (i < _attr.length) {
if ((_attr(i) ne null) && key.isAssignableFrom(_attr(i).getClass)) result += _attr(i).asInstanceOf[C]
i += 1
}
result
}
/** Remove all attributes with class matching or subclass of C.
For example, to remove all attributes call remove[AnyRef].
If call results in no removals, will not throw an Error. */
def remove[C<:AnyRef](implicit m: ClassTag[C]): Unit = {
val key = m.runtimeClass
var i = 0
while (i < _attr.length) {
if ((_attr(i) ne null) && key.isAssignableFrom(_attr(i).getClass)) removeIndex(i)
else i += 1
}
}
/** Return a sequence of all attributes */
def values: Seq[AnyRef] = {
val result = new scala.collection.mutable.ArrayBuffer[AnyRef]
var i = 0
while (i < _attr.length) {
if (_attr(i) ne null) result += _attr(i)
i += 1
}
result
}
/** Fetch the first value associated with the given class. If none present, return null. */
def apply[C<:AnyRef]()(implicit m: ClassTag[C]): C = {
var i = index(m.runtimeClass)
if (i >= 0) _attr(i).asInstanceOf[C] else null.asInstanceOf[C]
}
/** Fetch the first value associated with the given class. If none present, return null. */
def apply[C<:AnyRef](key:Class[C]): C ={
var i = index(key)
if (i >= 0) _attr(i).asInstanceOf[C] else null.asInstanceOf[C]
}
/** Fetch the first attribute who class is exactly equal to the given class. If none present, return null. */
def exactly[C<:AnyRef]()(implicit m: ClassTag[C]): C = {
var i = indexExactly(m.runtimeClass)
if (i >= 0) _attr(i).asInstanceOf[C] else null.asInstanceOf[C]
}
def get[C<:AnyRef](implicit m: ClassTag[C]): Option[C] = {
val result = this.apply[C]
if (result ne null) Option(result) else None
}
def getOrElse[C<:AnyRef](defaultValue:C)(implicit m: ClassTag[C]): C = {
val result = this.apply[C]
if (result ne null) result else defaultValue
}
def getOrElseUpdate[C<:AnyRef](defaultValue: =>C)(implicit m: ClassTag[C]): C = {
val result = this.apply[C]
if (result ne null) result else {
val value = defaultValue
this += value
value
}
}
override def toString = values.mkString(" ")
}
}
|
strubell/factorie
|
src/main/scala/cc/factorie/util/Attr.scala
|
Scala
|
apache-2.0
| 8,121
|
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.feats
import io.truthencode.ddo.activation.AtWillEvent
import io.truthencode.ddo.model.classes.HeroicCharacterClass
import io.truthencode.ddo.model.classes.HeroicCharacterClass.Monk
import io.truthencode.ddo.support.requisite.{FeatRequisiteImpl, GrantsToClass, RequiresAllOfClass}
import java.time.Duration
/**
* At the cost of 10 Ki, a monk can use this ability to make horizontal leaps, closing to targets,
* traversing chasms, or zipping past enemies with less chance of being detected. There is a
* cool-down of three seconds on this feat and can only be used on yourself.
*/
trait AbundantStep
extends FeatRequisiteImpl with ActiveFeat with AtWillEvent with GrantsToClass
with RequiresAllOfClass {
override def grantToClass: Seq[(HeroicCharacterClass, Int)] =
List((Monk, 12))
override def allOfClass: Seq[(HeroicCharacterClass, Int)] =
List((Monk, 12))
override def coolDown: Option[Duration] = Some(Duration.ofSeconds(3))
}
|
adarro/ddo-calc
|
subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/feats/AbundantStep.scala
|
Scala
|
apache-2.0
| 1,677
|
package net.cucumbersome
import org.scalatest.{MustMatchers, WordSpec}
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
class UnitSpec extends WordSpec with MustMatchers{
def futureValue[A](f: Future[A]): A = Await.result(f, 5 seconds)
}
|
CucumisSativus/rpgRollerBackend
|
src/test/scala/net/cucumbersome/UnitSpec.scala
|
Scala
|
mit
| 268
|
package com.twitter.finagle.http
import com.twitter.finagle.{Filter, Service}
import com.twitter.finagle.util.LoadService
import com.twitter.util.Future
import java.net.URI
import org.jboss.netty.handler.codec.http.{DefaultHttpResponse, HttpHeaders,
HttpRequest, HttpResponse, HttpResponseStatus, HttpVersion}
import java.util.logging.Logger
/**
* A service that dispatches incoming requests to registered handlers.
* In order to choose which handler to dispatch the request to, we take the path of the request and match it with
* the patterns of the pre-registered handlers. The pattern matching follows these rules:
* - Patterns ending with "/" use prefix matching. Eg: the pattern "foo/bar/" matches these paths:
* "foo/bar", "foo/bar/", "foo/bar/baz", etc.
* - Patterns not ending with "/" use exact matching. Eg: the pattern "foo/bar" ONLY matches these two paths:
* "foo/bar" and "foo/bar/"
* - Exact matching overrides prefix matching.
* - When multiple prefix matches exist, the longest pattern wins.
*/
class HttpMuxer(protected[this] val handlers: Seq[(String, Service[HttpRequest, HttpResponse])])
extends Service[HttpRequest, HttpResponse] {
def this() = this(Seq[(String, Service[HttpRequest, HttpResponse])]())
private[this] val sorted: Seq[(String, Service[HttpRequest, HttpResponse])] =
handlers.sortBy { case (pattern, _) => pattern.length } reverse
def patterns = sorted map { case(p, _) => p }
/**
* Create a new Mux service with the specified pattern added. If the pattern already exists, overwrite existing value.
* Pattern ending with "/" indicates prefix matching; otherwise exact matching.
*/
def withHandler(pattern: String, service: Service[HttpRequest, HttpResponse]): HttpMuxer = {
val norm = normalize(pattern)
new HttpMuxer(handlers.filterNot { case (pat, _) => pat == norm } :+ (norm, service))
}
/**
* Extract path from HttpRequest; look for a matching pattern; if found, dispatch the
* HttpRequest to the registered service; otherwise create a NOT_FOUND response
*/
def apply(request: HttpRequest): Future[HttpResponse] = {
val u = request.getUri
val uri = u.indexOf('?') match {
case -1 => u
case n => u.substring(0, n)
}
val path = normalize(new URI(uri).getPath)
// find the longest prefix of path; patterns are already sorted by length in descending order.
val matching = sorted.find { case (pattern, _) =>
(pattern.endsWith("/") && path.startsWith(pattern)) || // prefix
(!pattern.endsWith("/") && path == pattern) // exact match
}
matching match {
case Some((_, service)) => service(request)
case None =>
val response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.NOT_FOUND)
response.setHeader(HttpHeaders.Names.CONTENT_LENGTH, 0.toString)
Future.value(response)
}
}
/**
* - ensure path starts with "/"
* - get rid of excessive "/"s. For example "/a//b///c/" => "/a/b/c/"
*/
private[this] def normalize(path: String) = {
val suffix = if (path.endsWith("/")) "/" else ""
val p = path.split("/") filterNot(_.isEmpty) mkString "/"
"/" + p + suffix
}
}
/**
* Singleton default multiplex service
*/
object HttpMuxer extends Service[HttpRequest, HttpResponse] {
@volatile private[this] var underlying = new HttpMuxer()
override def apply(request: HttpRequest): Future[HttpResponse] =
underlying(request)
/**
* add handlers to mutate dispatching strategies.
*/
def addHandler(pattern: String, service: Service[HttpRequest, HttpResponse]) = synchronized {
underlying = underlying.withHandler(pattern, service)
}
private[this] val nettyToFinagle =
Filter.mk[HttpRequest, HttpResponse, Request, Response] { (req, service) =>
service(Request(req)) map { _.httpResponse }
}
def addRichHandler(pattern: String, service: Service[Request, Response]) =
addHandler(pattern, nettyToFinagle andThen service)
def patterns = underlying.patterns
private[this] val log = Logger.getLogger(getClass.getName)
for (handler <- LoadService[HttpMuxHandler]()) {
log.info("HttpMuxer[%s] = %s(%s)".format(handler.pattern, handler.getClass.getName, handler))
addHandler(handler.pattern, handler)
}
}
/**
* Trait HttpMuxHandler is used for service-loading HTTP handlers.
*/
trait HttpMuxHandler extends Service[HttpRequest, HttpResponse] {
/** The pattern on to bind this handler to */
val pattern: String
}
|
stevegury/finagle
|
finagle-http/src/main/scala/com/twitter/finagle/http/HttpMuxer.scala
|
Scala
|
apache-2.0
| 4,534
|
package org.jetbrains.plugins.scala
package lang
package psi
import com.intellij.lang.ASTNode
import com.intellij.openapi.editor.Document
import com.intellij.openapi.progress.ProgressManager
import com.intellij.openapi.util.TextRange
import com.intellij.psi._
import com.intellij.psi.impl.source.codeStyle.CodeEditUtil
import com.intellij.psi.scope._
import com.intellij.psi.stubs.StubElement
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.editor.importOptimizer._
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.ScalaElementTypes
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReferenceElement
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScBlockStatement
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScTypeAliasDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.usages.{ImportExprUsed, ImportSelectorUsed, ImportUsed, ImportWildcardSelectorUsed}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.{ScImportExpr, ScImportSelector, ScImportStmt}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.packaging.ScPackaging
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.types.ScDesignatorType
import org.jetbrains.plugins.scala.lang.psi.types.result.TypingContext
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaNamesUtil
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
trait ScImportsHolder extends ScalaPsiElement {
def getImportStatements: Seq[ScImportStmt] = {
this match {
case s: ScalaStubBasedElementImpl[_] =>
val stub: StubElement[_] = s.getStub
if (stub != null) {
return stub.getChildrenByType(ScalaElementTypes.IMPORT_STMT, JavaArrayFactoryUtil.ScImportStmtFactory).toSeq
}
case _ =>
}
findChildrenByClassScala(classOf[ScImportStmt]).toSeq
}
override def processDeclarations(processor: PsiScopeProcessor,
state : ResolveState,
lastParent: PsiElement,
place: PsiElement): Boolean = {
if (lastParent != null) {
var run = ScalaPsiUtil.getPrevStubOrPsiElement(lastParent)
// updateResolveCaches()
while (run != null) {
ProgressManager.checkCanceled()
if (run.isInstanceOf[ScImportStmt] &&
!run.processDeclarations(processor, state, lastParent, place)) return false
run = ScalaPsiUtil.getPrevStubOrPsiElement(run)
}
}
true
}
def getImportsForLastParent(lastParent: PsiElement): Seq[ScImportStmt] = {
val buffer: ArrayBuffer[ScImportStmt] = new ArrayBuffer[ScImportStmt]()
if (lastParent != null) {
var run = ScalaPsiUtil.getPrevStubOrPsiElement(lastParent)
while (run != null) {
ProgressManager.checkCanceled()
run match {
case importStmt: ScImportStmt => buffer += importStmt
case _ =>
}
run = ScalaPsiUtil.getPrevStubOrPsiElement(run)
}
}
buffer.toSeq
}
def getAllImportUsed: mutable.Set[ImportUsed] = {
val res: mutable.Set[ImportUsed] = new mutable.HashSet[ImportUsed]
def processChild(element: PsiElement) {
for (child <- element.getChildren) {
child match {
case imp: ScImportExpr =>
if (/*!imp.singleWildcard && */imp.selectorSet.isEmpty) {
res += ImportExprUsed(imp)
}
else if (imp.singleWildcard) {
res += ImportWildcardSelectorUsed(imp)
}
for (selector <- imp.selectors) {
res += ImportSelectorUsed(selector)
}
case _ => processChild(child)
}
}
}
processChild(this)
res
}
def importStatementsInHeader: Seq[ScImportStmt] = {
val buf = new ArrayBuffer[ScImportStmt]
for (child <- getChildren) {
child match {
case x: ScImportStmt => buf += x
case p: ScPackaging if !p.isExplicit && buf.isEmpty => return p.importStatementsInHeader
case _: ScTypeDefinition | _: ScPackaging => return buf.toSeq
case _ =>
}
}
buf.toSeq
}
def addImportForClass(clazz: PsiClass, ref: PsiElement = null) {
ref match {
case ref: ScReferenceElement =>
if (!ref.isValid || ref.isReferenceTo(clazz)) return
ref.bind() match {
case Some(ScalaResolveResult(t: ScTypeAliasDefinition, subst)) if t.typeParameters.isEmpty =>
for (tp <- t.aliasedType(TypingContext.empty)) {
tp match {
case ScDesignatorType(c: PsiClass) if c == clazz => return
case _ =>
}
}
case _ =>
}
case _ =>
}
addImportForPath(clazz.qualifiedName, ref)
}
def addImportForPsiNamedElement(elem: PsiNamedElement, ref: PsiElement, cClass: Option[PsiClass] = None) {
def needImport = ref match {
case null => true
case ref: ScReferenceElement => ref.isValid && !ref.isReferenceTo(elem)
case _ => false
}
ScalaNamesUtil.qualifiedName(elem) match {
case Some(qual) if needImport => addImportForPath(qual, ref)
case _ =>
}
}
def addImportsForPaths(paths: Seq[String], refsContainer: PsiElement = null): Unit = {
import ScalaImportOptimizer._
def samePackage(path: String) = {
val ref = ScalaPsiElementFactory.createReferenceFromText(path, this.getManager)
val pathQualifier = Option(ref).flatMap(_.qualifier.map(_.getText)).getOrElse("")
val ourPackageName: Option[String] =
Option(PsiTreeUtil.getParentOfType(this, classOf[ScPackaging], false)).map(_.fullPackageName)
ourPackageName.contains(pathQualifier)
}
getFirstChild match {
case pack: ScPackaging if !pack.isExplicit && children.filterByType(classOf[ScImportStmt]).isEmpty =>
pack.addImportsForPaths(paths, refsContainer)
return
case _ =>
}
val file = this.getContainingFile match {
case sf: ScalaFile => sf
case _ => return
}
val documentManager = PsiDocumentManager.getInstance(getProject)
val document: Document = documentManager.getDocument(file)
val settings = OptimizeImportSettings(getProject)
val optimizer: ScalaImportOptimizer = findOptimizerFor(file) match {
case Some(o: ScalaImportOptimizer) => o
case _ => return
}
def replaceWithNewInfos(range: TextRange, infosToAdd: Seq[ImportInfo]): Unit = {
val rangeMarker = document.createRangeMarker(range)
documentManager.doPostponedOperationsAndUnblockDocument(document)
val newRange = new TextRange(rangeMarker.getStartOffset, rangeMarker.getEndOffset)
optimizer.replaceWithNewImportInfos(newRange, infosToAdd, settings, document)
documentManager.commitDocument(document)
}
val importInfosToAdd = paths.filterNot(samePackage).flatMap { path =>
val importText = s"import $path"
val place = getImportStatements.lastOption.getOrElse(getFirstChild.getNextSibling)
val importStmt = ScalaPsiElementFactory.createImportFromTextWithContext(importText, this, place)
createInfo(importStmt)
}
val importRanges = optimizer.collectImportRanges(this, namesAtRangeStart, createInfo(_))
val needToInsertFirst =
if (importRanges.isEmpty) true
else refsContainer == null && hasCodeBeforeImports
if (needToInsertFirst) {
val dummyImport = ScalaPsiElementFactory.createImportFromText("import dummy._", getManager)
val usedNames = collectUsedImportedNames(this)
val inserted = insertFirstImport(dummyImport, getFirstChild).asInstanceOf[ScImportStmt]
val range = inserted.getTextRange
val namesAtStart = namesAtRangeStart(inserted)
val rangeInfo = RangeInfo(namesAtStart, importInfosToAdd, usedImportedNames = usedNames, isLocal = false)
val infosToAdd = optimizedImportInfos(rangeInfo, settings)
replaceWithNewInfos(range, infosToAdd)
}
else {
val sortedRanges = importRanges.toSeq.sortBy(_._1.getStartOffset)
val selectedRange =
if (refsContainer != null && ScalaCodeStyleSettings.getInstance(getProject).isAddImportMostCloseToReference)
sortedRanges.reverse.find(_._1.getEndOffset < refsContainer.getTextRange.getStartOffset)
else sortedRanges.headOption
selectedRange match {
case Some((range, RangeInfo(names, importInfos, usedImportedNames, _))) =>
val buffer = importInfos.to[ArrayBuffer]
importInfosToAdd.foreach { infoToAdd =>
insertInto(buffer, infoToAdd, usedImportedNames, settings)
}
updateRootPrefix(buffer, names)
replaceWithNewInfos(range, buffer)
case _ =>
}
}
}
def addImportForPath(path: String, ref: PsiElement = null): Unit = {
addImportsForPaths(Seq(path), ref)
}
private def hasCodeBeforeImports: Boolean = {
val firstChild = childBeforeFirstImport.getOrElse(getFirstChild)
var nextChild = firstChild
while (nextChild != null) {
nextChild match {
case _: ScImportStmt => return false
case _: ScBlockStatement => return true
case _ => nextChild = nextChild.getNextSibling
}
}
true
}
protected def insertFirstImport(importSt: ScImportStmt, first: PsiElement): PsiElement = {
childBeforeFirstImport match {
case Some(elem) if first != null && elem.getTextRange.getEndOffset > first.getTextRange.getStartOffset =>
addImportAfter(importSt, elem)
case _ => addBefore(importSt, first)
}
}
protected def childBeforeFirstImport: Option[PsiElement] = {
Option(getNode.findChildByType(ScalaTokenTypes.tLBRACE)).map(_.getPsi)
}
def addImport(element: PsiElement): PsiElement = {
CodeEditUtil.addChildren(getNode, element.getNode, element.getNode, null).getPsi
}
def addImportBefore(element: PsiElement, anchor: PsiElement): PsiElement = {
val anchorNode = anchor.getNode
CodeEditUtil.addChildren(getNode, element.getNode, element.getNode, anchorNode).getPsi
}
def addImportAfter(element: PsiElement, anchor: PsiElement): PsiElement = {
if (anchor.getNode == getNode.getLastChildNode) return addImport(element)
addImportBefore(element, anchor.getNode.getTreeNext.getPsi)
}
def plainDeleteImport(stmt: ScImportExpr) {
stmt.deleteExpr()
}
def plainDeleteSelector(sel: ScImportSelector) {
sel.deleteSelector()
}
def deleteImportStmt(stmt: ScImportStmt) {
def remove(node: ASTNode) = getNode.removeChild(node)
def shortenWhitespace(node: ASTNode) {
if (node == null) return
if (node.getText.count(_ == '\n') >= 2) {
val nl = ScalaPsiElementFactory.createNewLine(getManager, node.getText.replaceFirst("[\n]", ""))
getNode.replaceChild(node, nl.getNode)
}
}
def removeWhitespace(node: ASTNode) {
if (node == null) return
if (node.getPsi.isInstanceOf[PsiWhiteSpace]) {
if (node.getText.count(_ == '\n') < 2) remove(node)
else shortenWhitespace(node)
}
}
def removeSemicolonAndWhitespace(node: ASTNode) {
if (node == null) return
if (node.getElementType == ScalaTokenTypes.tSEMICOLON) {
removeWhitespace(node.getTreeNext)
remove(node)
}
else removeWhitespace(node)
}
val node = stmt.getNode
val next = node.getTreeNext
val prev = node.getTreePrev
removeSemicolonAndWhitespace(next)
remove(node)
shortenWhitespace(prev)
}
}
|
igrocki/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/ScImportsHolder.scala
|
Scala
|
apache-2.0
| 11,958
|
/**
* Created by android on 22/2/15.
*/
import java.sql.Timestamp
import Models._
import scala.slick.driver.DerbyDriver.simple._
object Tables {
class Positions(tag: Tag) extends Table[Position](tag, "Positions") {
def name = column[String]("positionName", O.NotNull)
def description = column[String]("description", O.NotNull)
def positionId = column[Int]("positionId", O.PrimaryKey, O.AutoInc)
def * = (name, description, positionId.?) <> (Position.tupled, Position.unapply)
}
val positions = TableQuery[Positions]
class Players(tag: Tag) extends Table[Player](tag, "Players") {
def name = column[String]("playerName", O.NotNull)
def positionId = column[Int]("positionId", O.NotNull)
def playerId = column[Int]("playerId", O.PrimaryKey, O.AutoInc)
def * = (name, positionId, playerId.?) <> (Player.tupled, Player.unapply)
def positionIdFk = foreignKey("player_positionId_fk", positionId, positions)(_.positionId)
}
val players = TableQuery[Players]
class Matches(tag: Tag) extends Table[Match](tag, "Matches") {
def tournamentName = column[String]("tournamentName", O.NotNull)
def dateAndTime = column[Timestamp]("dateAndTime", O.NotNull)
def venue = column[String]("venue", O.NotNull)
def opponentTeam = column[String]("opponentTeam", O.NotNull)
def matchId = column[Int]("matchId", O.PrimaryKey, O.AutoInc)
def * = (tournamentName, dateAndTime, venue, opponentTeam, matchId.?) <> (Match.tupled, Match.unapply)
}
val matches = TableQuery[Matches]
class Teams(tag: Tag) extends Table[Team](tag, "Teams") {
def matchId = column[Int]("matchId", O.NotNull)
def playerId = column[Int]("playerId", O.NotNull)
def teamId = column[Int]("teamId", O.PrimaryKey, O.AutoInc)
def * = (matchId, playerId, teamId.?) <> (Team.tupled, Team.unapply)
def matchIdFk = foreignKey("teams_matchId_fk", matchId, matches)(_.matchId)
def playerIdFk = foreignKey("teams_playerId_fk", playerId, players)(_.playerId)
}
val teams = TableQuery[Teams]
}
|
pamu/Slick-Demos
|
src/main/scala/Tables.scala
|
Scala
|
apache-2.0
| 2,055
|
package com.sksamuel.elastic4s.jackson
import com.fasterxml.jackson.core.JsonParser
import com.fasterxml.jackson.databind.module.SimpleModule
import com.fasterxml.jackson.databind.{DeserializationContext, JsonDeserializer, JsonMappingException, ObjectMapper}
import com.fasterxml.jackson.module.scala.experimental.ScalaObjectMapper
import com.sksamuel.elastic4s.requests.common.RefreshPolicy
import com.sksamuel.elastic4s.testkit.DockerTests
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import scala.util.Success
class ElasticJacksonIndexableTest extends AnyWordSpec with Matchers with DockerTests {
import ElasticJackson.Implicits._
"ElasticJackson implicits" should {
"index a case class" in {
client.execute {
bulk(
indexInto("jacksontest").source(Character("tyrion", "game of thrones")).withId("1"),
indexInto("jacksontest").source(Character("hank", "breaking bad")).withId("2"),
indexInto("jacksontest").source(Location("dorne", "game of thrones")).withId("3")
).refresh(RefreshPolicy.WaitFor)
}.await
}
"read a case class" in {
val resp = client.execute {
search("jacksontest").query("breaking")
}.await.result
resp.to[Character] shouldBe List(Character("hank", "breaking bad"))
}
"populate special fields" in {
val resp = client.execute {
search("jacksontest").query("breaking")
}.await.result
// should populate _id, _index and _type for us from the search result
resp.safeTo[CharacterWithIdTypeAndIndex] shouldBe
List(Success(CharacterWithIdTypeAndIndex("2", "jacksontest", "_doc", "hank", "breaking bad")))
}
"support custom mapper" in {
implicit val custom: ObjectMapper with ScalaObjectMapper = new ObjectMapper with ScalaObjectMapper
val module = new SimpleModule
module.addDeserializer(classOf[String], new JsonDeserializer[String] {
override def deserialize(p: JsonParser, ctxt: DeserializationContext): String = sys.error("boom")
})
custom.registerModule(module)
val resp = client.execute {
search("jacksontest").query("breaking")
}.await.result
// if our custom mapper has been picked up, then it should throw an exception when deserializing
intercept[JsonMappingException] {
resp.to[Character].toList
}
}
}
}
case class Character(name: String, show: String)
case class CharacterWithIdTypeAndIndex(_id: String, _index: String, _type: String, name: String, show: String)
case class Location(name: String, show: String)
|
sksamuel/elastic4s
|
elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/jackson/ElasticJacksonIndexableTest.scala
|
Scala
|
apache-2.0
| 2,638
|
package org.reactress
import collection._
trait ReactMutable {
def bindSubscription(s: Reactive.Subscription): Reactive.Subscription = s
def onMutated(): Unit = {}
}
object ReactMutable {
trait SubscriptionSet extends ReactMutable {
val subscriptions = mutable.Set[Reactive.Subscription]()
def clearSubscriptions() {
for (s <- subscriptions) s.unsubscribe()
subscriptions.clear()
}
override def bindSubscription(s: Reactive.Subscription) = new Reactive.Subscription {
subscriptions += this
def unsubscribe() {
s.unsubscribe()
subscriptions -= this
}
}
}
}
|
axel22/reactive-collections
|
src/main/scala/org/reactress/ReactMutable.scala
|
Scala
|
bsd-3-clause
| 648
|
package com.cloudray.scalapress.plugin.listings.controller.renderer
import com.cloudray.scalapress.item.Item
import com.cloudray.scalapress.util.{Scalate, UrlGenerator}
import com.cloudray.scalapress.framework.ScalapressContext
/** @author Stephen Samuel */
object ListingCompleteRenderer {
def render(context: ScalapressContext, listing: Item) = {
val url = "http://" + context.installationDao.get.domain + UrlGenerator.url(listing)
Scalate.layout("/com/cloudray/scalapress/plugin/listings/listingcomplete.ssp", Map("url" -> url))
}
}
|
vidyacraghav/scalapress
|
src/main/scala/com/cloudray/scalapress/plugin/listings/controller/renderer/ListingCompleteRenderer.scala
|
Scala
|
apache-2.0
| 551
|
package hevs.especial.generator
import java.io.File
import java.util.Date
import hevs.especial.dsl.components.{Component, ComponentManager, HwImplemented}
import hevs.especial.simulation.QemuLogger
import hevs.especial.utils._
/**
* Output C/C++ code generator corresponding to the DSL program.
*
* To generate the C/C++ code of a program, the [[Resolver]] is used to resolve the graph, and then the code for each
* connected components can be generated in the right order. Each component generates is responsible to generated its
* own code. The generated code file is divided in different sections and every component can add a part of code in
* each of them, depending on its needs.
*
* The `while` loop is divided in 3 sections: 1) Read inputs 2) Loop logic and 3) Update outputs.
* Comments will be added to the generated code if [[Settings.GEN_VERBOSE_CODE]] is set.
*
* @author Christopher Metrailler (mei@hevs.ch)
* @version 2.0
*/
class CodeGenerator extends Pipeline[Resolver.O, String] {
/** Output path of the generated code. */
private final val OUTPUT_PATH = "output/%s/"
/**
* Define all sections of the code that compose the file generated C/C++ file.
* The order is important and correspond to the generation order.
*/
private final val codeSections = Seq(
// (hw: hw_implemented) => hw.getIncludeCode, // Must remove duplicates files
(hw: HwImplemented) => hw.getGlobalCode,
(hw: HwImplemented) => hw.getFunctionsDefinitions,
(hw: HwImplemented) => hw.getInitCode,
(hw: HwImplemented) => hw.getBeginOfMainAfterInit,
(hw: HwImplemented) => hw.getLoopableCode,
(hw: HwImplemented) => hw.getExitCode
)
/**
* Generate the C/C++ code from the DSL program using the order given by the resolver.
*
* If the resolver failed, the code generator is not called. The code generator write the file to the output
* directory. The output of the pipeline is the path of the generated file.
*
* @param ctx the context of the program with the logger
* @param input the result of the resolver
* @return the path of the generated file
*/
def run(ctx: Context)(input: Resolver.O): String = {
// Test if the output directory already exist
val dirPath = String.format(OUTPUT_PATH, ctx.progName)
val dir: RichFile = new File(dirPath)
dir.createFolder() // Create if not exist only
// Generate the C file to the output folder
val path = dirPath + ctx.progName + ".cpp"
val f: RichFile = new File(path)
val code = generateCode(ctx)(input)
val res = f.write(code) // Write succeed or not
if (res)
ctx.log.info(s"Code generated to '$path'.")
else
ctx.log.error(s"Unable to save the generated code to '$path'.")
path // The file path as output
}
/**
* Generate the C/C++ source code as a String.
*
* @param ctx the context of the program
* @param resolve the resolver output
* @return the C/C++ code generated as a String (not formatted)
*/
private def generateCode(ctx: Context)(resolve: Resolver.O): String = {
// Order the result of the resolver by pass number (sort by key value).
// Each pass number as a sequence of components to generate.
val ordered = resolve.toSeq.sortBy(_._1)
// List with components only, ordered for the code generation
val cps = ordered flatMap (x => x._2)
val firstLogicIdx = ordered.head._2.size // Count the number of input component (first pass)
val nbrOfOutputs = ordered.last._2.size // Number of output (last pass)
val firstOutputIndex = cps.size - nbrOfOutputs
// Generate each code phase for each components
ctx.log.info(s"Generate the code for ${cps.size} components with ${codeSections.size} sections.")
// File preamble
val result = new StringBuilder
result ++= beginFile(ctx.progName)
// Add include files and remove duplicates files
result ++= beginSection(0)
result ++= includeFiles(cps)
result ++= endSection()
// Generic code sections for all components
for (sec <- codeSections.zipWithIndex) {
val idx = sec._2 + 1 // Section 0 already done
idx match {
case 4 =>
result ++= beginMain
if (ctx.isQemuLoggerEnabled)
result ++= QemuLogger.addStartEvent + "\\n"
result ++= beginMainInit
if (ctx.isQemuLoggerEnabled)
result ++= QemuLogger.addEndInitEvent + "\\n"
case 5 =>
if (ctx.isQemuLoggerEnabled)
result ++= QemuLogger.addLoopStartEvent + "\\n"
case _ =>
}
result ++= beginSection(idx) // Print the section name
// Add static code when sections start
idx match {
case 3 =>
// First init all outputs
result ++= beginOutputInit
result ++= initOutputs()
result ++= endInit + "\\n"
// General init
result ++= beginInit
case 5 => result ++= beginMainLoop
case _ =>
}
// Apply the current section function on all components
cps.zipWithIndex map { c =>
val cpNbr = c._2 // Iteration number
val cp = c._1 // Component to generate
// While loop code section for the first component
if (idx == 5 && cpNbr == 0) {
// QEMU logger. Ack event to start one loop iteration.
if (ctx.isQemuLoggerEnabled)
result ++= "\\n" + QemuLogger.addLoopTickEvent + "\\n"
if (Settings.GEN_VERBOSE_CODE)
result ++= "// 1) Read inputs"
result ++= "\\n"
}
if (idx == 5 && cpNbr == firstLogicIdx) {
if (Settings.GEN_VERBOSE_CODE)
result ++= "\\n// 2) Loop logic"
result ++= "\\n"
}
if (idx == 5 && cpNbr == firstOutputIndex) {
if (Settings.GEN_VERBOSE_CODE)
result ++= "\\n// 3) Update outputs"
result ++= "\\n"
}
// Add the component code for the section (if defined)
sec._1(cp.asInstanceOf[HwImplemented]) match {
case Some(code) =>
result ++= code + "\\n"
case None =>
}
}
// Add static code when sections end
idx match {
case 3 => result ++= endInit
case 5 => result ++= endMainLoop
case _ =>
}
result ++= endSection() // Print the end of the section
}
if (ctx.isQemuLoggerEnabled)
result ++= QemuLogger.addLoopExitEvent
// End of the file
result ++= endMain
result ++= endFile(ctx.progName)
result.result()
}
// Include all necessary header files, needed by the components. Remove duplicate files.
private def includeFiles(cps: Seq[Component]): String = {
// List of list of all files to include
val incs = for (c <- cps) yield c.asInstanceOf[HwImplemented].getIncludeCode
// Remove duplicates files contains in the flatten list using `distinct`
val files = for (f <- incs.flatten.distinct) yield String.format("#include \\"%s\\"", f)
files.mkString("\\n") + "\\n"
}
// Init all outputs before the general init
private def initOutputs(): String = {
val ret = new StringBuilder
val outputs = ComponentManager.findConnectedOutputHardware
outputs map { cp => cp.asInstanceOf[HwImplemented].getInitCode match {
// Add the code only if defined
case Some(code) => ret ++= code + "\\n"
case None =>
}
}
ret.result()
}
/* Static code definitions */
private final def beginSection(idx: Int) = Settings.GEN_VERBOSE_CODE match {
// Print the name of the section only if the output is verbose
case true => "//*// Section %02d\\n".format(idx)
case _ => ""
}
private final def endSection() = Settings.GEN_VERBOSE_CODE match {
case true => "//*// ----------\\n\\n"
case _ => "\\n"
}
// Header of the file. Comments describing the program and the version used.
private final def beginFile(progName: String) = {
val file = s"Code for '$progName'."
val out = new StringBuilder
out ++= "/*" + "\\n"
out ++= " " + "*".*(80) + "\\n"
out ++= s" $file\\n"
out ++= " " + "-".*(file.length) + "\\n"
out ++= s" Generated automatically on ${new Date()}.\\n"
out ++= s" $Version\\n"
out ++= " " + "*".*(80) + "\\n"
out ++= " */\\n\\n"
out.result()
}
private final val beginInit = "void init() {\\n"
private final val endInit = "}\\n"
private final val beginOutputInit = "void initOutputs() {\\n"
private final val beginMain = "int main() {\\n"
private final val beginMainInit = "initOutputs();\\ninit();\\n\\n"
private final val beginMainLoop = "while(1) {\\n"
private final val endMainLoop = "}\\n"
private final val endMain = "}\\n"
private final def endFile(fileName: String) = s"// END of file '$fileName.cpp'"
}
|
hevs-isi/especial-frontend
|
src/main/scala/hevs/especial/generator/CodeGenerator.scala
|
Scala
|
mit
| 8,817
|
import org.scalatest.FunSuite
import org.tribbloid.ispark.display.dsl.Display
import Display.Markdown
import org.tribbloid.ispark.display.{MIME, Data}
/**
* Created by peng on 1/6/15.
*/
class TestData extends FunSuite {
test("plain string") {
val text = "I'm a string"
val data = Data.parse(text)
println(data)
assert(data.items.map(_._1).contains(MIME.`text/plain`))
assert(data.items.size === 1)
}
test("HTML") {
val html =
<table>
<tr>
<th>Header 1</th>
<th>Header 2</th>
</tr>
<tr>
<td>row 1, cell 1</td>
<td>row 1, cell 2</td>
</tr>
<tr>
<td>row 2, cell 1</td>
<td>row 2, cell 2</td>
</tr>
</table>
val data = Data.parse(html)
println(data)
assert(data.items.map(_._1).contains(MIME.`text/plain`))
assert(data.items.map(_._1).contains(MIME.`text/html`))
assert(data.items.size === 2)
}
test("Markdown") {
val md = Markdown(
"""
|### title
""".stripMargin)
val data = Data.parse(md)
println(data)
assert(data.items.map(_._1).contains(MIME.`text/plain`))
assert(data.items.map(_._1).contains(MIME.`text/html`))
assert(data.items.size === 2)
}
}
|
tribbloid/ISpark
|
display/src/test/scala/TestData.scala
|
Scala
|
apache-2.0
| 1,277
|
package intellij.haskell.editor
import com.intellij.lang.ASTNode
import com.intellij.lang.folding.{FoldingBuilderEx, FoldingDescriptor}
import com.intellij.openapi.editor.Document
import com.intellij.openapi.project.DumbAware
import com.intellij.openapi.util.TextRange
import com.intellij.psi.PsiElement
import com.intellij.psi.util.PsiTreeUtil
import intellij.haskell.HaskellFile
import intellij.haskell.psi._
class HaskellFoldingBuilder extends FoldingBuilderEx with DumbAware {
override def buildFoldRegions(root: PsiElement, document: Document, quick: Boolean): Array[FoldingDescriptor] = {
root match {
case file: HaskellFile =>
HaskellPsiUtil.findImportDeclarationsBlock(file).map(createFoldingDescriptor).getOrElse(Array()) ++
HaskellPsiUtil.findFileHeader(file).map(createFoldingDescriptor).getOrElse(Array()) ++
HaskellPsiUtil.findTopLevelExpressions(file).flatMap(createFoldingDescriptor)
case _ => FoldingDescriptor.EMPTY
}
}
private def createFoldingDescriptor(element: PsiElement): Array[FoldingDescriptor] = {
Array(new FoldingDescriptor(element, createFoldingTextRange(element)))
}
private def createFoldingTextRange(element: PsiElement) = {
if (PsiTreeUtil.lastChild(element).getNode.getElementType == HaskellTypes.HS_NEWLINE) {
new TextRange(element.getTextRange.getStartOffset, element.getTextRange.getEndOffset - 1)
} else {
new TextRange(element.getTextRange.getStartOffset, element.getTextRange.getEndOffset)
}
}
override def isCollapsedByDefault(node: ASTNode): Boolean = {
val foldingSettings = HaskellFoldingSettings.getInstance()
if (node.getElementType == HaskellTypes.HS_IMPORT_DECLARATIONS) {
foldingSettings.isCollapseImports
} else if (node.getElementType == HaskellTypes.HS_FILE_HEADER) {
foldingSettings.isCollapseImports
} else if (node.getElementType == HaskellTypes.HS_EXPRESSION) {
foldingSettings.isCollapseTopLevelExpression
} else false
}
override def getPlaceholderText(node: ASTNode): String = {
node.getPsi match {
case _: HaskellImportDeclarations => "import ..."
case _: HaskellFileHeader => "{-# ... #-}"
case e: HaskellExpression => Option(e.getFirstChild.getText).getOrElse("") + " ..."
case _ => null
}
}
}
|
rikvdkleij/intellij-haskell
|
src/main/scala/intellij/haskell/editor/HaskellFoldingBuilder.scala
|
Scala
|
apache-2.0
| 2,328
|
/*
* Copyright 2010 Gregor N. Purdy, Sr.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.exoprax.graph
object Graph2 {
abstract sealed class EA[A, +B](a: A)
private case class Edge[A, +B](a: A, c: B, b: A) extends EA[A, B](a)
private case class Node[A](a: A) extends EA[A, Unit](a)
class EdgeAssoc2[A, B](x: A, y: B) {
def -> [C](z: C): Tuple3[A, B, C] = Tuple3(x, y, z)
}
class EdgeAssoc[A](x: A) {
def -- [B](y: B): EdgeAssoc2[A, B] = new EdgeAssoc2(x, y)
}
implicit def any2EdgeAssoc[A](x: A): EdgeAssoc[A] = new EdgeAssoc(x)
implicit def tuple2EA[A](x: (A, A)): EA[A, Unit] = Edge(x._1, (), x._2)
implicit def tuple3EA[A, B](x: (A, B, A)): EA[A, B] = Edge(x._1, x._2, x._3)
implicit def node2EA[A](x: Node[Node[A]]): EA[A, Unit] = x.a
implicit def any2EA[A](x: A): EA[A, Unit] = Node(x)
def apply[A, B](edges: EA[A, B]*): Graph2[A, B] = {
var v = Set[A]()
var e = Map[A, Map[A, B]]()
def addEdge(x: Edge[A, B]) = {
v = v + x.a
v = v + x.b
if (e.contains(x.a)) {
if (e(x.a).contains(x.b)) {
throw new RuntimeException("Already have an edge from " + x.a + " to " + x.b)
}
else {
e = e + ((x.a, Map[A, B]((x.b, x.c))))
}
}
else {
e = e + ((x.a, Map[A, B]((x.b, x.c))))
}
}
def addNode(x: Node[A]) {
v = v + x.a
}
for (x <- edges) {
x match {
case e@Edge(a, c, b) => addEdge(e)
case Node(n@Node(a)) => addNode(n)
case n@Node(a) => addNode(n)
}
}
return new Graph2(v, e)
}
}
class Graph2[A, +B](v: Set[A], e: Map[A, Map[A, B]]) {
override def toString = "V = { " + v.mkString(", ") + " }\\nE = {" + edgeStringList + " }"
def edgeStringList = {
val el = for (x <- e; y <- x._2) yield (x._1, y._2, y._1)
val esl = el.map { x => x._2 match { case () => x._1 + " -> " + x._3 ; case _ => x._1 + " -- " + x._2 + " -> " + x._3 } }
esl.mkString(", ")
}
}
|
gnp/GraphScala
|
src/main/scala/org/exoprax/graph/Graph2.scala
|
Scala
|
apache-2.0
| 2,614
|
package redis
import scala.concurrent._
import scala.concurrent.stm._
import redis.api.connection.Select
import scala.concurrent.duration._
class RedisPoolSpec extends RedisStandaloneServer {
sequential
"basic pool test" should {
"ok" in {
val redisPool = RedisClientPool(Seq(RedisServer( port = port,db = Some(0)), RedisServer( port = port,db = Some(1)), RedisServer( port = port,db = Some(3))))
val key = "keyPoolDb0"
redisPool.set(key, 0)
val r = for {
getDb1 <- redisPool.get(key)
getDb2 <- redisPool.get(key)
getDb0 <- redisPool.get[String](key)
select <- Future.sequence(redisPool.broadcast(Select(0)))
getKey1 <- redisPool.get[String](key)
getKey2 <- redisPool.get[String](key)
getKey0 <- redisPool.get[String](key)
} yield {
getDb1 must beNone
getDb2 must beNone
getDb0 must beSome("0")
select mustEqual Seq(true, true, true)
getKey1 must beSome("0")
getKey2 must beSome("0")
getKey0 must beSome("0")
}
Await.result(r, timeOut)
}
"check status" in {
val redisPool = RedisClientPool(Seq(RedisServer( port = port,db = Some(0)), RedisServer( port = port,db = Some(1)), RedisServer(port = 3333,db = Some(3))))
val key = "keyPoolDb0"
awaitAssert(redisPool.redisConnectionPool.size mustEqual 2,20 second)
redisPool.set(key, 0)
val r = for {
getDb1 <- redisPool.get(key)
getDb0 <- redisPool.get[String](key)
select <- Future.sequence(redisPool.broadcast(Select(0)))
getKey1 <- redisPool.get[String](key)
getKey0 <- redisPool.get[String](key)
} yield {
getDb1 must beNone
getDb0 must beSome("0")
select mustEqual Seq(true, true)
getKey1 must beSome("0")
getKey0 must beSome("0")
}
Await.result(r, timeOut)
}
}
}
|
npeters/rediscala
|
src/test/scala/redis/RedisPoolSpec.scala
|
Scala
|
apache-2.0
| 1,931
|
import sbt.Keys._
import sbt._
object CompileSettings extends AutoPlugin {
override def trigger = allRequirements
override def projectSettings = Seq(
scalacOptions ++= Seq(
"-optimise",
"-deprecation", // Emit warning and location for usages of deprecated APIs.
"-feature", // Emit warning and location for usages of features that should be imported explicitly.
"-unchecked", // Enable additional warnings where generated code depends on assumptions.
"-Xfatal-warnings", // Fail the compilation if there are any warnings.
"-Xlint", // Enable recommended additional warnings.
"-Ywarn-adapted-args", // Warn if an argument list is modified to match the receiver.
"-Ywarn-dead-code", // Warn when dead code is identified.
"-Ywarn-inaccessible", // Warn about inaccessible types in method signatures.
"-Ywarn-nullary-override", // Warn when non-nullary overrides nullary, e.g. def foo() over def foo.
"-Ywarn-numeric-widen", // Warn when numerics are widened.
"-Yinline-warnings", //
"-language:postfixOps", // See the Scala docs for value scala.language.postfixOps for a discussion
"-target:jvm-1.8" // force use jvm 1.8
),
javacOptions in compile ++= Seq("-target", "1.8", "-source", "1.8"), // force use jvm 1.8
compileOrder in Compile := CompileOrder.Mixed,
compileOrder in Test := CompileOrder.Mixed,
testOptions in Test += Tests.Argument(TestFrameworks.Specs2, "junitxml", "console"),
scalacOptions in Test ~= { (options: Seq[String]) =>
options filterNot (_ == "-Ywarn-dead-code") // Allow dead code in tests (to support using mockito).
},
parallelExecution in Test := false,
unmanagedBase := baseDirectory.value / "project/lib")
}
|
yuikns/psjrs-ra
|
project/CompileSettings.scala
|
Scala
|
apache-2.0
| 1,773
|
package db
import java.util.UUID
import anorm._
import io.apibuilder.api.v0.models.User
import io.apibuilder.internal.v0.models.{Task, TaskDataDiffVersion}
import org.joda.time.DateTime
import org.postgresql.util.PSQLException
import org.scalatestplus.play.PlaySpec
import org.scalatestplus.play.guice.GuiceOneAppPerSuite
import play.api.db._
class TasksDaoSpec extends PlaySpec with GuiceOneAppPerSuite with db.Helpers {
private[this] def setDeletedAt(task: Task, days: Int): Unit = {
val query = s"""
update tasks set deleted_at = timezone('utc', now()) - interval '$days days' where guid = {guid}::uuid
"""
injector.instanceOf[DBApi].database("default").withConnection { implicit c =>
SQL(query).on(Symbol("guid") -> task.guid).execute()
}
}
private[this] lazy val user: User = createRandomUser()
private[this] def createTaskDataDiffVersion(
oldGuid: UUID = UUID.randomUUID,
newGuid: UUID = UUID.randomUUID,
numberAttempts: Int = 0
): Task = {
val guid = injector.instanceOf[DBApi].database("default").withConnection { implicit c =>
tasksDao.insert(c, user, TaskDataDiffVersion(oldGuid, newGuid))
}
val task = tasksDao.findByGuid(guid).getOrElse {
sys.error("failed to find task")
}
(0 to numberAttempts).foreach { _ =>
tasksDao.incrementNumberAttempts(user, task)
}
tasksDao.findByGuid(guid).getOrElse {
sys.error("failed to create task")
}
}
"findByGuid" in {
val oldGuid = UUID.randomUUID
val newGuid = UUID.randomUUID
createTaskDataDiffVersion(oldGuid, newGuid).data must be(TaskDataDiffVersion(oldGuid, newGuid))
}
"softDelete" in {
val task = createTaskDataDiffVersion()
tasksDao.softDelete(user, task)
tasksDao.findByGuid(task.guid) must be(None)
}
"incrementNumberAttempts" in {
val task = createTaskDataDiffVersion()
val original = task.numberAttempts
tasksDao.incrementNumberAttempts(user, task)
tasksDao.findByGuid(task.guid).getOrElse {
sys.error("failed to find task")
}.numberAttempts must be(original + 1)
}
"recordError" in {
val task = createTaskDataDiffVersion()
tasksDao.recordError(user, task, "Test")
tasksDao.findByGuid(task.guid).getOrElse {
sys.error("failed to find task")
}.lastError must be(Some("Test"))
}
"findAll" must {
"nOrFewerAttempts" in {
val task = createTaskDataDiffVersion(numberAttempts = 2)
tasksDao.findAll(
guid = Some(task.guid),
nOrFewerAttempts = Some(task.numberAttempts)
).map(_.guid) must be(Seq(task.guid))
tasksDao.findAll(
guid = Some(task.guid),
nOrFewerAttempts = Some(task.numberAttempts - 1)
).map(_.guid) must be(Nil)
}
"nOrMoreAttempts" in {
val task = createTaskDataDiffVersion(numberAttempts = 2)
tasksDao.findAll(
guid = Some(task.guid),
nOrMoreAttempts = Some(task.numberAttempts)
).map(_.guid) must be(Seq(task.guid))
tasksDao.findAll(
guid = Some(task.guid),
nOrMoreAttempts = Some(task.numberAttempts + 1)
).map(_.guid) must be(Nil)
}
"nOrMoreMinutesOld" in {
val task = createTaskDataDiffVersion()
tasksDao.findAll(
guid = Some(task.guid),
createdOnOrBefore = Some(DateTime.now.plusHours(1))
).map(_.guid) must be(Seq(task.guid))
tasksDao.findAll(
guid = Some(task.guid),
createdOnOrBefore = Some(DateTime.now.minusHours(1))
).map(_.guid) must be(Nil)
}
"nOrMoreMinutesYoung" in {
val task = createTaskDataDiffVersion()
tasksDao.findAll(
guid = Some(task.guid),
createdOnOrAfter = Some(DateTime.now.minusHours(1))
).map(_.guid) must be(Seq(task.guid))
tasksDao.findAll(
guid = Some(task.guid),
createdOnOrAfter = Some(DateTime.now.plusHours(1))
).map(_.guid) must be(Nil)
}
"isDeleted" in {
val task = createTaskDataDiffVersion()
tasksDao.findAll(
guid = Some(task.guid),
isDeleted = Some(false)
).map(_.guid) must be(Seq(task.guid))
tasksDao.findAll(
guid = Some(task.guid),
isDeleted = Some(true)
).map(_.guid) must be(Nil)
tasksDao.findAll(
guid = Some(task.guid),
isDeleted = None
).map(_.guid) must be(Seq(task.guid))
tasksDao.softDelete(user, task)
tasksDao.findAll(
guid = Some(task.guid),
isDeleted = Some(false)
).map(_.guid) must be(Nil)
tasksDao.findAll(
guid = Some(task.guid),
isDeleted = Some(true)
).map(_.guid) must be(Seq(task.guid))
tasksDao.findAll(
guid = Some(task.guid),
isDeleted = None
).map(_.guid) must be(Seq(task.guid))
}
"deletedAtLeastNDaysAgo" in {
val task = createTaskDataDiffVersion()
tasksDao.findAll(
guid = Some(task.guid),
isDeleted = None,
deletedAtLeastNDaysAgo = Some(0)
) must be(Nil)
tasksDao.softDelete(user, task)
tasksDao.findAll(
guid = Some(task.guid),
isDeleted = None,
deletedAtLeastNDaysAgo = Some(90)
).map(_.guid) must be(Nil)
setDeletedAt(task, 89)
tasksDao.findAll(
guid = Some(task.guid),
isDeleted = None,
deletedAtLeastNDaysAgo = Some(90)
) must be(Nil)
setDeletedAt(task, 91)
tasksDao.findAll(
guid = Some(task.guid),
isDeleted = None,
deletedAtLeastNDaysAgo = Some(90)
).map(_.guid) must be(Seq(task.guid))
}
}
"purge" must {
"raises error if recently deleted" in {
val task = createTaskDataDiffVersion()
tasksDao.softDelete(user, task)
val ex = intercept[PSQLException] {
tasksDao.purge(user, task)
}
println(ex.getMessage)
ex.getMessage.contains("ERROR: Physical deletes on this table can occur only after 1 month of deleting the records") must be(true)
}
"purges if old" in {
val task = createTaskDataDiffVersion()
tasksDao.softDelete(user, task)
setDeletedAt(task, 45)
tasksDao.purge(user, task)
tasksDao.findAll(
guid = Some(task.guid),
isDeleted = None
) must be(Nil)
}
}
}
|
gheine/apidoc
|
api/test/db/TasksDaoSpec.scala
|
Scala
|
mit
| 6,316
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.json
import java.io.ByteArrayOutputStream
import scala.collection.mutable.ArrayBuffer
import scala.util.Try
import com.fasterxml.jackson.core._
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.Utils
/**
* Constructs a parser for a given schema that translates a json string to an [[InternalRow]].
*/
class JacksonParser(
schema: StructType,
val options: JSONOptions) extends Logging {
import JacksonUtils._
import com.fasterxml.jackson.core.JsonToken._
// A `ValueConverter` is responsible for converting a value from `JsonParser`
// to a value in a field for `InternalRow`.
private type ValueConverter = JsonParser => AnyRef
// `ValueConverter`s for the root schema for all fields in the schema
private val rootConverter = makeRootConverter(schema)
private val factory = new JsonFactory()
options.setJacksonOptions(factory)
/**
* Create a converter which converts the JSON documents held by the `JsonParser`
* to a value according to a desired schema. This is a wrapper for the method
* `makeConverter()` to handle a row wrapped with an array.
*/
private def makeRootConverter(st: StructType): JsonParser => Seq[InternalRow] = {
val elementConverter = makeConverter(st)
val fieldConverters = st.map(_.dataType).map(makeConverter).toArray
(parser: JsonParser) => parseJsonToken[Seq[InternalRow]](parser, st) {
case START_OBJECT => convertObject(parser, st, fieldConverters) :: Nil
// SPARK-3308: support reading top level JSON arrays and take every element
// in such an array as a row
//
// For example, we support, the JSON data as below:
//
// [{"a":"str_a_1"}]
// [{"a":"str_a_2"}, {"b":"str_b_3"}]
//
// resulting in:
//
// List([str_a_1,null])
// List([str_a_2,null], [null,str_b_3])
//
case START_ARRAY =>
val array = convertArray(parser, elementConverter)
// Here, as we support reading top level JSON arrays and take every element
// in such an array as a row, this case is possible.
if (array.numElements() == 0) {
Nil
} else {
array.toArray[InternalRow](schema).toSeq
}
}
}
/**
* Create a converter which converts the JSON documents held by the `JsonParser`
* to a value according to a desired schema.
*/
def makeConverter(dataType: DataType): ValueConverter = dataType match {
case BooleanType =>
(parser: JsonParser) => parseJsonToken[java.lang.Boolean](parser, dataType) {
case VALUE_TRUE => true
case VALUE_FALSE => false
}
case ByteType =>
(parser: JsonParser) => parseJsonToken[java.lang.Byte](parser, dataType) {
case VALUE_NUMBER_INT => parser.getByteValue
}
case ShortType =>
(parser: JsonParser) => parseJsonToken[java.lang.Short](parser, dataType) {
case VALUE_NUMBER_INT => parser.getShortValue
}
case IntegerType =>
(parser: JsonParser) => parseJsonToken[java.lang.Integer](parser, dataType) {
case VALUE_NUMBER_INT => parser.getIntValue
}
case LongType =>
(parser: JsonParser) => parseJsonToken[java.lang.Long](parser, dataType) {
case VALUE_NUMBER_INT => parser.getLongValue
}
case FloatType =>
(parser: JsonParser) => parseJsonToken[java.lang.Float](parser, dataType) {
case VALUE_NUMBER_INT | VALUE_NUMBER_FLOAT =>
parser.getFloatValue
case VALUE_STRING =>
// Special case handling for NaN and Infinity.
parser.getText match {
case "NaN" => Float.NaN
case "Infinity" => Float.PositiveInfinity
case "-Infinity" => Float.NegativeInfinity
case other => throw new RuntimeException(s"Cannot parse $other as FloatType.")
}
}
case DoubleType =>
(parser: JsonParser) => parseJsonToken[java.lang.Double](parser, dataType) {
case VALUE_NUMBER_INT | VALUE_NUMBER_FLOAT =>
parser.getDoubleValue
case VALUE_STRING =>
// Special case handling for NaN and Infinity.
parser.getText match {
case "NaN" => Double.NaN
case "Infinity" => Double.PositiveInfinity
case "-Infinity" => Double.NegativeInfinity
case other => throw new RuntimeException(s"Cannot parse $other as DoubleType.")
}
}
case StringType =>
(parser: JsonParser) => parseJsonToken[UTF8String](parser, dataType) {
case VALUE_STRING =>
UTF8String.fromString(parser.getText)
case _ =>
// Note that it always tries to convert the data as string without the case of failure.
val writer = new ByteArrayOutputStream()
Utils.tryWithResource(factory.createGenerator(writer, JsonEncoding.UTF8)) {
generator => generator.copyCurrentStructure(parser)
}
UTF8String.fromBytes(writer.toByteArray)
}
case TimestampType =>
(parser: JsonParser) => parseJsonToken[java.lang.Long](parser, dataType) {
case VALUE_STRING =>
val stringValue = parser.getText
// This one will lose microseconds parts.
// See https://issues.apache.org/jira/browse/SPARK-10681.
Long.box {
Try(options.timestampFormat.parse(stringValue).getTime * 1000L)
.getOrElse {
// If it fails to parse, then tries the way used in 2.0 and 1.x for backwards
// compatibility.
DateTimeUtils.stringToTime(stringValue).getTime * 1000L
}
}
case VALUE_NUMBER_INT =>
parser.getLongValue * 1000000L
}
case DateType =>
(parser: JsonParser) => parseJsonToken[java.lang.Integer](parser, dataType) {
case VALUE_STRING =>
val stringValue = parser.getText
// This one will lose microseconds parts.
// See https://issues.apache.org/jira/browse/SPARK-10681.x
Int.box {
Try(DateTimeUtils.millisToDays(options.dateFormat.parse(stringValue).getTime))
.orElse {
// If it fails to parse, then tries the way used in 2.0 and 1.x for backwards
// compatibility.
Try(DateTimeUtils.millisToDays(DateTimeUtils.stringToTime(stringValue).getTime))
}
.getOrElse {
// In Spark 1.5.0, we store the data as number of days since epoch in string.
// So, we just convert it to Int.
stringValue.toInt
}
}
}
case BinaryType =>
(parser: JsonParser) => parseJsonToken[Array[Byte]](parser, dataType) {
case VALUE_STRING => parser.getBinaryValue
}
case dt: DecimalType =>
(parser: JsonParser) => parseJsonToken[Decimal](parser, dataType) {
case (VALUE_NUMBER_INT | VALUE_NUMBER_FLOAT) =>
Decimal(parser.getDecimalValue, dt.precision, dt.scale)
}
case st: StructType =>
val fieldConverters = st.map(_.dataType).map(makeConverter).toArray
(parser: JsonParser) => parseJsonToken[InternalRow](parser, dataType) {
case START_OBJECT => convertObject(parser, st, fieldConverters)
}
case at: ArrayType =>
val elementConverter = makeConverter(at.elementType)
(parser: JsonParser) => parseJsonToken[ArrayData](parser, dataType) {
case START_ARRAY => convertArray(parser, elementConverter)
}
case mt: MapType =>
val valueConverter = makeConverter(mt.valueType)
(parser: JsonParser) => parseJsonToken[MapData](parser, dataType) {
case START_OBJECT => convertMap(parser, valueConverter)
}
case udt: UserDefinedType[_] =>
makeConverter(udt.sqlType)
case _ =>
(parser: JsonParser) =>
// Here, we pass empty `PartialFunction` so that this case can be
// handled as a failed conversion. It will throw an exception as
// long as the value is not null.
parseJsonToken[AnyRef](parser, dataType)(PartialFunction.empty[JsonToken, AnyRef])
}
/**
* This method skips `FIELD_NAME`s at the beginning, and handles nulls ahead before trying
* to parse the JSON token using given function `f`. If the `f` failed to parse and convert the
* token, call `failedConversion` to handle the token.
*/
private def parseJsonToken[R >: Null](
parser: JsonParser,
dataType: DataType)(f: PartialFunction[JsonToken, R]): R = {
parser.getCurrentToken match {
case FIELD_NAME =>
// There are useless FIELD_NAMEs between START_OBJECT and END_OBJECT tokens
parser.nextToken()
parseJsonToken[R](parser, dataType)(f)
case null | VALUE_NULL => null
case other => f.applyOrElse(other, failedConversion(parser, dataType))
}
}
/**
* This function throws an exception for failed conversion, but returns null for empty string,
* to guard the non string types.
*/
private def failedConversion[R >: Null](
parser: JsonParser,
dataType: DataType): PartialFunction[JsonToken, R] = {
case VALUE_STRING if parser.getTextLength < 1 =>
// If conversion is failed, this produces `null` rather than throwing exception.
// This will protect the mismatch of types.
null
case token =>
// We cannot parse this token based on the given data type. So, we throw a
// RuntimeException and this exception will be caught by `parse` method.
throw new RuntimeException(
s"Failed to parse a value for data type ${dataType.catalogString} (current token: $token).")
}
/**
* Parse an object from the token stream into a new Row representing the schema.
* Fields in the json that are not defined in the requested schema will be dropped.
*/
private def convertObject(
parser: JsonParser,
schema: StructType,
fieldConverters: Array[ValueConverter]): InternalRow = {
val row = new GenericInternalRow(schema.length)
while (nextUntil(parser, JsonToken.END_OBJECT)) {
schema.getFieldIndex(parser.getCurrentName) match {
case Some(index) =>
row.update(index, fieldConverters(index).apply(parser))
case None =>
parser.skipChildren()
}
}
row
}
/**
* Parse an object as a Map, preserving all fields.
*/
private def convertMap(
parser: JsonParser,
fieldConverter: ValueConverter): MapData = {
val keys = ArrayBuffer.empty[UTF8String]
val values = ArrayBuffer.empty[Any]
while (nextUntil(parser, JsonToken.END_OBJECT)) {
keys += UTF8String.fromString(parser.getCurrentName)
values += fieldConverter.apply(parser)
}
ArrayBasedMapData(keys.toArray, values.toArray)
}
/**
* Parse an object as a Array.
*/
private def convertArray(
parser: JsonParser,
fieldConverter: ValueConverter): ArrayData = {
val values = ArrayBuffer.empty[Any]
while (nextUntil(parser, JsonToken.END_ARRAY)) {
values += fieldConverter.apply(parser)
}
new GenericArrayData(values.toArray)
}
/**
* Parse the JSON input to the set of [[InternalRow]]s.
*
* @param recordLiteral an optional function that will be used to generate
* the corrupt record text instead of record.toString
*/
def parse[T](
record: T,
createParser: (JsonFactory, T) => JsonParser,
recordLiteral: T => UTF8String): Seq[InternalRow] = {
try {
Utils.tryWithResource(createParser(factory, record)) { parser =>
// a null first token is equivalent to testing for input.trim.isEmpty
// but it works on any token stream and not just strings
parser.nextToken() match {
case null => Nil
case _ => rootConverter.apply(parser) match {
case null => throw new RuntimeException("Root converter returned null")
case rows => rows
}
}
}
} catch {
case e @ (_: RuntimeException | _: JsonProcessingException) =>
// JSON parser currently doesn't support partial results for corrupted records.
// For such records, all fields other than the field configured by
// `columnNameOfCorruptRecord` are set to `null`.
throw BadRecordException(() => recordLiteral(record), () => None, e)
}
}
}
|
ddna1021/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JacksonParser.scala
|
Scala
|
apache-2.0
| 13,520
|
package edu.cmu.lti.nlp.amr
import scala.collection.mutable.Map
import scala.collection.mutable.Set
import scala.collection.mutable.ArrayBuffer
/****************************** Align Spans *****************************/
object AlignSpans {
def logUnalignedConcepts(node: Node) {
if (node.spans.size == 0) {
logger(1, "WARNING: Unaligned concept "+node.concept)
}
for ((_, child) <- node.topologicalOrdering) {
logUnalignedConcepts(child)
}
}
def alignSpans(sentence: Array[String], /*stemmedSentence: Array[List[String]],*/ graph: Graph, wordAlignments: Array[Option[Node]]) : Array[Option[Int]] = {
val spanAlignments = new Array[Option[Int]](sentence.size)
for (i <- Range(0, sentence.size)) {
spanAlignments(i) = None
}
createSpans(sentence, /*stemmedSentence,*/ graph.root, wordAlignments, spanAlignments, None, graph.spans)
logger(3, graph.spans.toString)
return spanAlignments
}
val specialRelations1 : List[String] = List(":ARG.*-of")
val specialRelations2 : List[String] = List(":unit")
val specialConcepts : Set[String] = Set(
"name","country","person","date-entity","organization","city","thing","company","monetary-quantity","continent","mass-quantity","religious-group","political-party","distance-quantity","criminal-organization","research-institute","date-interval","temporal-quantity","world-region","ethnic-group","university")
// "govern-01"
def createSpans(sentence: Array[String], /*stemmedSentence: Array[List[String]],*/ node: Node, wordAlignments: Array[Option[Node]], spanAlignments: Array[Option[Int]], spanIndex: Option[Int], spans: ArrayBuffer[Span]) : Option[Span] = {
// Returns the span for 'node'
//Span(var start: Int, var end: Int, var nodeIds: List[String], var words: String, var amr: Node
//Node(var id: String, name: Option[String], concept: String, var relations: List[(String, Node)], var topologicalOrdering: List[(String, Node)], var variableRelations: List[(String, Var)], var alignment: Option[Int], var span: Option[Int])
var mySpan = Span(sentence.size, 0, List(node.id), "", Node("", node.name, node.concept, List[(String, Node)](), List[(String, Node)](), List[(String, Var)](), None, ArrayBuffer()), false) // will update later
var valid = false
if (specialConcepts contains node.concept) {
var mySpanIndex = spanIndex
if (spanIndex == None) {
mySpanIndex = Some(spans.size)
spans.append(mySpan) // so we can pass a valid spanIndex
}
for ((relation, child) <- node.topologicalOrdering) {
val span = createSpans(sentence, /*stemmedSentence,*/ child, wordAlignments, spanAlignments, mySpanIndex, spans)
if (span.size != 0) {
val Some(Span(start,end,nodeIds,_,amr,_)) = span // TODO: is this code right?
mySpan.start = min(mySpan.start, start)
mySpan.end = max(mySpan.end, end)
mySpan.nodeIds = mySpan.nodeIds ::: nodeIds
mySpan.amr.topologicalOrdering = (relation, amr) :: mySpan.amr.topologicalOrdering
mySpan.amr.relations = (relation, amr) :: mySpan.amr.relations
}
}
mySpan.amr.topologicalOrdering = mySpan.amr.topologicalOrdering.reverse
mySpan.amr.relations = mySpan.amr.relations.reverse
// TODO: check that the span is valid and update spanAlignments
valid = true
for (i <- Range(mySpan.start, mySpan.end)) {
if (spanAlignments(i) != None) {
if (spanAlignments(i) != mySpanIndex) {
valid = false // there's a word in the span aligned to a different span, so this is not a valid span
}
}
}
mySpan.words = sentence.slice(mySpan.start, mySpan.end).mkString(" ")
if (spanIndex == None) { // we need to save the span
val Some(index) = mySpanIndex
spans(index) = mySpan
}
if (mySpanIndex != None) { // replaces node.spans = mySpanIndex
val Some(myspanindex) = mySpanIndex
if (node.spans.size == 0) {
node.spans += myspanindex
} else {
node.spans(0) = myspanindex
}
}
} else {
if (node.alignment != None) {
val Some(alignment) = node.alignment
mySpan.start = alignment
mySpan.end = alignment + 1
mySpan.words = sentence(alignment)
if (spanIndex == None) { // we need to insert the span ourselves
spanAlignments(alignment) = Some(spans.size)
spans.append(mySpan)
} else {
spanAlignments(alignment) = spanIndex // index to this span
}
if (node.spans.size == 0) {
node.spans += spans.size
} else {
node.spans(0) = spans.size // TODO: check to see if there are other spans already?
}
valid = true
}
for ((relation, child) <- node.topologicalOrdering) {
createSpans(sentence, /*stemmedSentence,*/ child, wordAlignments, spanAlignments, None, spans)
}
}
return if(valid) { Some(mySpan) } else { None}
}
}
|
hopshackle/wordAlignment
|
src/edu/cmu/lti/nlp/amr/AlignSpans.scala
|
Scala
|
bsd-2-clause
| 5,747
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark
import java.io.{FileWriter, PrintWriter, File}
import scala.io.Source
import com.google.common.io.Files
import org.scalatest.FunSuite
import org.apache.hadoop.io._
import org.apache.hadoop.io.compress.{DefaultCodec, CompressionCodec, GzipCodec}
import SparkContext._
class FileSuite extends FunSuite with LocalSparkContext {
test("text files") {
sc = new SparkContext("local", "test")
val tempDir = Files.createTempDir()
val outputDir = new File(tempDir, "output").getAbsolutePath
val nums = sc.makeRDD(1 to 4)
nums.saveAsTextFile(outputDir)
// Read the plain text file and check it's OK
val outputFile = new File(outputDir, "part-00000")
val content = Source.fromFile(outputFile).mkString
assert(content === "1\n2\n3\n4\n")
// Also try reading it in as a text file RDD
assert(sc.textFile(outputDir).collect().toList === List("1", "2", "3", "4"))
}
test("text files (compressed)") {
sc = new SparkContext("local", "test")
val tempDir = Files.createTempDir()
val normalDir = new File(tempDir, "output_normal").getAbsolutePath
val compressedOutputDir = new File(tempDir, "output_compressed").getAbsolutePath
val codec = new DefaultCodec()
val data = sc.parallelize("a" * 10000, 1)
data.saveAsTextFile(normalDir)
data.saveAsTextFile(compressedOutputDir, classOf[DefaultCodec])
val normalFile = new File(normalDir, "part-00000")
val normalContent = sc.textFile(normalDir).collect
assert(normalContent === Array.fill(10000)("a"))
val compressedFile = new File(compressedOutputDir, "part-00000" + codec.getDefaultExtension)
val compressedContent = sc.textFile(compressedOutputDir).collect
assert(compressedContent === Array.fill(10000)("a"))
assert(compressedFile.length < normalFile.length)
}
test("SequenceFiles") {
sc = new SparkContext("local", "test")
val tempDir = Files.createTempDir()
val outputDir = new File(tempDir, "output").getAbsolutePath
val nums = sc.makeRDD(1 to 3).map(x => (x, "a" * x)) // (1,a), (2,aa), (3,aaa)
nums.saveAsSequenceFile(outputDir)
// Try reading the output back as a SequenceFile
val output = sc.sequenceFile[IntWritable, Text](outputDir)
assert(output.map(_.toString).collect().toList === List("(1,a)", "(2,aa)", "(3,aaa)"))
}
test("SequenceFile (compressed)") {
sc = new SparkContext("local", "test")
val tempDir = Files.createTempDir()
val normalDir = new File(tempDir, "output_normal").getAbsolutePath
val compressedOutputDir = new File(tempDir, "output_compressed").getAbsolutePath
val codec = new DefaultCodec()
val data = sc.parallelize(Seq.fill(100)("abc"), 1).map(x => (x, x))
data.saveAsSequenceFile(normalDir)
data.saveAsSequenceFile(compressedOutputDir, Some(classOf[DefaultCodec]))
val normalFile = new File(normalDir, "part-00000")
val normalContent = sc.sequenceFile[String, String](normalDir).collect
assert(normalContent === Array.fill(100)("abc", "abc"))
val compressedFile = new File(compressedOutputDir, "part-00000" + codec.getDefaultExtension)
val compressedContent = sc.sequenceFile[String, String](compressedOutputDir).collect
assert(compressedContent === Array.fill(100)("abc", "abc"))
assert(compressedFile.length < normalFile.length)
}
test("SequenceFile with writable key") {
sc = new SparkContext("local", "test")
val tempDir = Files.createTempDir()
val outputDir = new File(tempDir, "output").getAbsolutePath
val nums = sc.makeRDD(1 to 3).map(x => (new IntWritable(x), "a" * x))
nums.saveAsSequenceFile(outputDir)
// Try reading the output back as a SequenceFile
val output = sc.sequenceFile[IntWritable, Text](outputDir)
assert(output.map(_.toString).collect().toList === List("(1,a)", "(2,aa)", "(3,aaa)"))
}
test("SequenceFile with writable value") {
sc = new SparkContext("local", "test")
val tempDir = Files.createTempDir()
val outputDir = new File(tempDir, "output").getAbsolutePath
val nums = sc.makeRDD(1 to 3).map(x => (x, new Text("a" * x)))
nums.saveAsSequenceFile(outputDir)
// Try reading the output back as a SequenceFile
val output = sc.sequenceFile[IntWritable, Text](outputDir)
assert(output.map(_.toString).collect().toList === List("(1,a)", "(2,aa)", "(3,aaa)"))
}
test("SequenceFile with writable key and value") {
sc = new SparkContext("local", "test")
val tempDir = Files.createTempDir()
val outputDir = new File(tempDir, "output").getAbsolutePath
val nums = sc.makeRDD(1 to 3).map(x => (new IntWritable(x), new Text("a" * x)))
nums.saveAsSequenceFile(outputDir)
// Try reading the output back as a SequenceFile
val output = sc.sequenceFile[IntWritable, Text](outputDir)
assert(output.map(_.toString).collect().toList === List("(1,a)", "(2,aa)", "(3,aaa)"))
}
test("implicit conversions in reading SequenceFiles") {
sc = new SparkContext("local", "test")
val tempDir = Files.createTempDir()
val outputDir = new File(tempDir, "output").getAbsolutePath
val nums = sc.makeRDD(1 to 3).map(x => (x, "a" * x)) // (1,a), (2,aa), (3,aaa)
nums.saveAsSequenceFile(outputDir)
// Similar to the tests above, we read a SequenceFile, but this time we pass type params
// that are convertable to Writable instead of calling sequenceFile[IntWritable, Text]
val output1 = sc.sequenceFile[Int, String](outputDir)
assert(output1.collect().toList === List((1, "a"), (2, "aa"), (3, "aaa")))
// Also try having one type be a subclass of Writable and one not
val output2 = sc.sequenceFile[Int, Text](outputDir)
assert(output2.map(_.toString).collect().toList === List("(1,a)", "(2,aa)", "(3,aaa)"))
val output3 = sc.sequenceFile[IntWritable, String](outputDir)
assert(output3.map(_.toString).collect().toList === List("(1,a)", "(2,aa)", "(3,aaa)"))
}
test("object files of ints") {
sc = new SparkContext("local", "test")
val tempDir = Files.createTempDir()
val outputDir = new File(tempDir, "output").getAbsolutePath
val nums = sc.makeRDD(1 to 4)
nums.saveAsObjectFile(outputDir)
// Try reading the output back as an object file
val output = sc.objectFile[Int](outputDir)
assert(output.collect().toList === List(1, 2, 3, 4))
}
test("object files of complex types") {
sc = new SparkContext("local", "test")
val tempDir = Files.createTempDir()
val outputDir = new File(tempDir, "output").getAbsolutePath
val nums = sc.makeRDD(1 to 3).map(x => (x, "a" * x))
nums.saveAsObjectFile(outputDir)
// Try reading the output back as an object file
val output = sc.objectFile[(Int, String)](outputDir)
assert(output.collect().toList === List((1, "a"), (2, "aa"), (3, "aaa")))
}
test("write SequenceFile using new Hadoop API") {
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat
sc = new SparkContext("local", "test")
val tempDir = Files.createTempDir()
val outputDir = new File(tempDir, "output").getAbsolutePath
val nums = sc.makeRDD(1 to 3).map(x => (new IntWritable(x), new Text("a" * x)))
nums.saveAsNewAPIHadoopFile[SequenceFileOutputFormat[IntWritable, Text]](
outputDir)
val output = sc.sequenceFile[IntWritable, Text](outputDir)
assert(output.map(_.toString).collect().toList === List("(1,a)", "(2,aa)", "(3,aaa)"))
}
test("read SequenceFile using new Hadoop API") {
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat
sc = new SparkContext("local", "test")
val tempDir = Files.createTempDir()
val outputDir = new File(tempDir, "output").getAbsolutePath
val nums = sc.makeRDD(1 to 3).map(x => (new IntWritable(x), new Text("a" * x)))
nums.saveAsSequenceFile(outputDir)
val output =
sc.newAPIHadoopFile[IntWritable, Text, SequenceFileInputFormat[IntWritable, Text]](outputDir)
assert(output.map(_.toString).collect().toList === List("(1,a)", "(2,aa)", "(3,aaa)"))
}
test("file caching") {
sc = new SparkContext("local", "test")
val tempDir = Files.createTempDir()
val out = new FileWriter(tempDir + "/input")
out.write("Hello world!\n")
out.write("What's up?\n")
out.write("Goodbye\n")
out.close()
val rdd = sc.textFile(tempDir + "/input").cache()
assert(rdd.count() === 3)
assert(rdd.count() === 3)
assert(rdd.count() === 3)
}
}
|
wgpshashank/spark
|
core/src/test/scala/spark/FileSuite.scala
|
Scala
|
apache-2.0
| 9,254
|
/*
* Copyright (c) 2017 Magomed Abdurakhmanov, Hypertino
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
*/
package com.hypertino.facade.modules
import com.hypertino.facade.ConfigsFactory
import com.hypertino.facade.raml.RamlConfiguration
import com.typesafe.config.Config
import scaldi.Module
class RamlConfigModule extends Module {
bind[RamlConfiguration] identifiedBy 'raml to ConfigsFactory.ramlConfig(inject[Config])
}
|
hypertino/hyperfacade
|
src/main/scala/com/hypertino/facade/modules/RamlConfigModule.scala
|
Scala
|
mpl-2.0
| 588
|
/*
Copyright 2013, 2014 NICTA
This file is part of t3as (Text Analysis As A Service).
t3as is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
t3as is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with t3as. If not, see <http://www.gnu.org/licenses/>.
*/
package org.t3as.patClas.parse
import scala.language.postfixOps
import scala.xml.Node
import org.t3as.patClas.common.IPCUtil.IPCEntry
import org.t3as.patClas.common.TreeNode
import org.t3as.patClas.common.IPCUtil.ipcToText
object IPCParser {
/*
* hText (hierarchy text) is textBody of this node appended to that of all its ancestors.
* This is indexed for searching, but not stored in Lucene or in the database.
*/
case class IPCNode(ipcEntry: IPCEntry, hText: String)
def parse(n: Node) = n \\ "ipcEntry" map (n => mkTree(n, 0, ""))
def mkTree(n: Node, level: Int, hText: String): TreeNode[IPCNode] = {
val e = ipcNode(n, level, hText);
new TreeNode(e, n \\ "ipcEntry" map (n => mkTree(n, level + 1, e.hText)))
}
def ipcNode(n: Node, level: Int, hText: String) = {
def attrOption(n: Node, name: String) = n.attribute(name).map(_(0).text)
def attr(n: Node, name: String) = attrOption(n, name).getOrElse(throw new Exception("ipcEntry missing @" + name))
// preserve XML elements (contains presentation elements e.g. <emdash/> and marked up refs to other classification codes)
val textBody = n \\ "textBody" toString
IPCNode(IPCEntry(None, 0, level,
// attr(n, "entryType"), always K, so omit
attr(n, "kind"),
attr(n, "symbol"),
attrOption(n, "endSymbol"),
textBody
),
hText + " " + ipcToText(textBody))
}
}
|
NICTA/t3as-pat-clas
|
pat-clas-parse/src/main/scala/org/t3as/patClas/parse/IPCParser.scala
|
Scala
|
gpl-3.0
| 2,152
|
package org.precompiler.scala101.ch03
/**
*
* @author Richard Li
*/
object AccessModifier {
def main(args: Array[String]): Unit = {
val microwave = new Microwave
//Compile error
//microwave.turnTable;
}
}
class Microwave {
def start(): Unit = println("started")
def stop(): Unit = println("stopped")
private def turnTable(): Unit = println("turning table")
}
class Vehicle {
//protected is class level - from an instance method
//of a class we can access the members decorated as protected on
//ANY instance of the same class
protected def checkEngine() {}
}
class Car extends Vehicle {
def start(): Unit = {
checkEngine()
}
def tow(vehicle: Vehicle): Unit = {
//Compile error
//vehicle.checkEngine()
}
def tow(car: Car): Unit = {
car.checkEngine()
}
}
class GasStation {
def fillGas(vehicle: Vehicle): Unit = {
//Compile error
//vehicle.checkEngine()
}
}
|
precompiler/scala-101
|
learning-scala/src/main/scala/org/precompiler/scala101/ch03/AccessModifier.scala
|
Scala
|
apache-2.0
| 940
|
package models
import anorm._
import anorm.SqlParser._
import play.api.db._
import play.api.Play.current
case class Task(id: Long, label: String)
object Task {
val task = {
get[Long]("id") ~
get[String]("label") map {
case id~label => Task(id, label)
}
}
def all(): List[Task] = DB.withConnection {
implicit c =>
SQL("select * from task").as(task *)
}
def create(label: String) {
DB.withConnection(
implicit c =>
SQL("insert into task (label) values ({label})").on(
'label -> label
).executeUpdate()
)
}
def delete(id: Long) {
DB.withConnection(
implicit c =>
SQL("delete from task where id = {id}").on(
'id -> id
).executeUpdate()
)
}
}
|
ericmoritz/play-example
|
todolist/app/models/Task.scala
|
Scala
|
mit
| 730
|
/*
* Copyright (c) 2015 Andreas Wolf
*
* See te LICENSE file in the project root for further copyright information.
*/
package info.andreaswolf.roadhopper.simulation
import org.slf4j.LoggerFactory
import scala.collection.mutable
class SimulationRepository {
type T = Simulation
val log = LoggerFactory.getLogger(this.getClass)
private val simulations = new mutable.HashMap[String, T]()
def add(sim: T) = {
simulations.put(sim.identifier, sim)
log.debug(s"Added simulation with id ${sim.identifier}")
}
def getByIdentifier[T](id: String) = simulations.get(id).get
def has(id: String) = simulations.contains(id)
}
|
andreaswolf/roadhopper
|
src/main/scala/info/andreaswolf/roadhopper/simulation/SimulationRepository.scala
|
Scala
|
mit
| 640
|
package builder.api_json
import core.{Importer, ServiceFetcher, TypesProvider, TypesProviderEnum, TypesProviderModel, TypesProviderUnion, VersionMigration}
import lib.{Methods, Primitives, ServiceConfiguration, Text, UrlKey}
import io.apibuilder.spec.v0.models._
import lib.Methods.{MethodsNotAcceptingBodies, supportsBody}
import play.api.libs.json._
import scala.util.{Failure, Success, Try}
case class ServiceBuilder(
migration: VersionMigration
) {
def apply(
config: ServiceConfiguration,
apiJson: String,
fetcher: ServiceFetcher
): Service = {
val jsValue = Json.parse(apiJson)
apply(config, InternalServiceForm(jsValue, fetcher))
}
def apply(
config: ServiceConfiguration,
internal: InternalServiceForm
): Service = {
val name = internal.name.getOrElse(sys.error("Missing name"))
val key = internal.key.getOrElse { UrlKey.generate(name) }
val namespace = internal.namespace.getOrElse { config.applicationNamespace(key) }
val resolver = TypeResolver(
defaultNamespace = Some(namespace),
RecursiveTypesProvider(internal)
)
val imports = internal.imports.map { ImportBuilder(internal.fetcher, _) }.sortWith(_.uri.toLowerCase < _.uri.toLowerCase)
val headers = internal.headers.map { HeaderBuilder(resolver, _) }
val enums = internal.enums.map { EnumBuilder(_) }.sortWith(_.name.toLowerCase < _.name.toLowerCase)
val interfaces = internal.interfaces.map { InterfaceBuilder(_) }.sortWith(_.name.toLowerCase < _.name.toLowerCase)
val unions = internal.unions.map { UnionBuilder(_) }.sortWith(_.name.toLowerCase < _.name.toLowerCase)
val models = internal.models.map { ModelBuilder(_) }.sortWith(_.name.toLowerCase < _.name.toLowerCase)
val resources = internal.resources.map { ResourceBuilder(resolver, _) }.sortWith(_.`type`.toLowerCase < _.`type`.toLowerCase)
val attributes = internal.attributes.map { AttributeBuilder(_) }
val annotations = internal.annotations.map{ AnnotationsBuilder(_) }
val info = internal.info match {
case None => Info(
contact = None,
license = None
)
case Some(i) => InfoBuilder(i)
}
Service(
apidoc = internal.apidoc.flatMap(_.version) match {
case Some(v) => Apidoc(version = v)
case None => Apidoc(version = io.apibuilder.spec.v0.Constants.Version)
},
info = info,
name = name,
namespace = namespace,
organization = Organization(key = config.orgKey),
application = Application(key = key),
version = config.version,
imports = imports,
description = internal.description,
baseUrl = internal.baseUrl,
enums = enums,
interfaces = interfaces,
unions = unions,
models = models,
headers = headers,
resources = resources,
attributes = attributes,
annotations = annotations ++ imports.flatMap(_.annotations).distinct
)
}
object ResourceBuilder {
private[api_json] case class Resolution(
`enum`: Option[TypesProviderEnum] = None,
model: Option[TypesProviderModel] = None,
union: Option[TypesProviderUnion] = None
) {
private[this] val all = Seq(`enum`, model, union).flatten
assert(all.size <= 1, s"Cannot have more than 1 resolved item: $all")
def isEmpty: Boolean = all.isEmpty
}
// @scala.annotation.tailrec
def resolve(
resolver: TypesProvider,
name: String
): Resolution = {
resolver.enums.find(o => o.name == name || o.fullName == name) match {
case Some(enum) => {
Resolution(enum = Some(enum))
}
case None => {
resolver.models.find(o => o.name == name || o.fullName == name) match {
case Some(model) => {
Resolution(model = Some(model))
}
case None => {
resolver.unions.find(o => o.name == name || o.fullName == name) match {
case Some(union) => {
Resolution(union = Some(union))
}
case None => {
Resolution()
}
}
}
}
}
}
}
def apply(
resolver: TypeResolver,
internal: InternalResourceForm
): Resource = {
val resolution = resolve(resolver.provider, internal.datatype.name)
resolution.enum match {
case Some(enum) => {
val resourcePath = internal.path.getOrElse("/" + enum.plural)
Resource(
`type` = internal.datatype.label,
plural = enum.plural,
path = Some(resourcePath),
description = internal.description,
deprecation = internal.deprecation.map(DeprecationBuilder(_)),
operations = internal.operations.map(op => OperationBuilder(op, resourcePath, resolver)),
attributes = internal.attributes.map { AttributeBuilder(_) }
)
}
case None => {
resolution.model match {
case Some(model) => {
val resourcePath = internal.path.getOrElse("/" + model.plural)
Resource(
`type` = internal.datatype.label,
plural = model.plural,
path = Some(resourcePath),
description = internal.description,
deprecation = internal.deprecation.map(DeprecationBuilder(_)),
operations = internal.operations.map(op => OperationBuilder(op, resourcePath, resolver, model = Some(model))),
attributes = internal.attributes.map { AttributeBuilder(_) }
)
}
case None => {
resolution.union match {
case Some(union) => {
val resourcePath = internal.path.getOrElse("/" + union.plural)
Resource(
`type` = internal.datatype.label,
plural = union.plural,
path = Some(resourcePath),
description = internal.description,
deprecation = internal.deprecation.map(DeprecationBuilder(_)),
operations = internal.operations.map(op => OperationBuilder(op, resourcePath, resolver, union = Some(union))),
attributes = internal.attributes.map { AttributeBuilder(_) }
)
}
case None => {
val resourcePath = internal.path.getOrElse("")
Resource(
`type` = internal.datatype.label,
plural = Text.pluralize(internal.datatype.name),
path = Some(resourcePath),
description = internal.description,
deprecation = internal.deprecation.map(DeprecationBuilder(_)),
operations = internal.operations.map(op => OperationBuilder(op, resourcePath, resolver)),
attributes = internal.attributes.map { AttributeBuilder(_) }
)
}
}
}
}
}
}
}
}
object OperationBuilder {
/**
* Historically ApiBuilder placed parameters by default for GET and DELETE in
* the query parameters. Maintain this convention to avoid breaking APIs
*/
private[this] def defaultParameterLocation(verb: String): ParameterLocation = {
if (supportsBody(verb)) {
verb.toUpperCase().trim match {
case "DELETE" => ParameterLocation.Query
case _ => ParameterLocation.Form
}
} else {
ParameterLocation.Query
}
}
def apply(
internal: InternalOperationForm,
resourcePath: String,
resolver: TypeResolver,
model: Option[TypesProviderModel] = None,
union: Option[TypesProviderUnion] = None
): Operation = {
val method = internal.method.getOrElse("")
val defaultLocation = if (internal.body.isDefined) {
ParameterLocation.Query
} else {
internal.method match {
case None => ParameterLocation.Form
case Some(m) => defaultParameterLocation(m)
}
}
val pathParameters = internal.namedPathParameters.map { name =>
internal.parameters.find(_.name.contains(name)) match {
case None => {
val datatypeLabel: String = model.flatMap(_.fields.find(_.name == name)) match {
case Some(field) => field.`type`
case None => {
union.flatMap(commonField(resolver.provider, _, name)).getOrElse {
Primitives.String.toString
}
}
}
ParameterBuilder.fromPath(name, datatypeLabel)
}
case Some(declared) => {
// Path parameter was declared in the parameters
// section. Use the explicit information provided in the
// specification
ParameterBuilder(declared, ParameterLocation.Path)
}
}
}
val internalParams = internal.parameters.filter(p => !pathParameters.exists(_.name == p.name.get)).map { p =>
ParameterBuilder(p, defaultLocation)
}
val fullPath = Seq(
resourcePath,
internal.path.getOrElse("")
).filter(!_.isEmpty).mkString("") match {
case "" => "/"
case p => p
}
Operation(
method = Method(method),
path = fullPath,
description = internal.description,
deprecation = internal.deprecation.map(DeprecationBuilder(_)),
body = internal.body.map { BodyBuilder(_) },
parameters = pathParameters ++ internalParams,
responses = internal.responses.map { ResponseBuilder(resolver, _) },
attributes = internal.attributes.map { AttributeBuilder(_) }
)
}
/**
* If all types agree on the datatype for the field with the specified Name,
* returns the field. Otherwise, returns None
*/
private def commonField(resolver: TypesProvider, union: TypesProviderUnion, fieldName: String): Option[String] = {
val fieldTypes: Seq[String] = union.types.map { u =>
Primitives(u.`type`) match {
case Some(p) => p.toString
case None => {
resolver.models.find { m => m.name == u.`type` || m.fullName == u.`type` } match {
case None => {
Primitives.String.toString
}
case Some(m) => {
m.fields.find(_.name == fieldName) match {
case None => {
Primitives.String.toString
}
case Some(f) => {
f.`type`
}
}
}
}
}
}
}
fieldTypes.distinct.toList match {
case single :: Nil => Some(single)
case _ => None
}
}
}
object BodyBuilder {
def apply(ib: InternalBodyForm): Body = {
ib.datatype match {
case Left(errs) => sys.error("Body missing type: " + errs.mkString(", "))
case Right(datatype) => Body(
`type` = datatype.label,
description = ib.description,
deprecation = ib.deprecation.map(DeprecationBuilder(_)),
attributes = ib.attributes.map { AttributeBuilder(_) }
)
}
}
}
object DeprecationBuilder {
def apply(internal: InternalDeprecationForm): Deprecation = {
Deprecation(description = internal.description)
}
}
object EnumBuilder {
def apply(ie: InternalEnumForm): Enum = {
Enum(
name = ie.name,
plural = ie.plural,
description = ie.description,
deprecation = ie.deprecation.map(DeprecationBuilder(_)),
values = ie.values.map { iv =>
EnumValue(
name = iv.name.get,
value = iv.value,
description = iv.description,
deprecation = iv.deprecation.map(DeprecationBuilder(_))
)
},
attributes = ie.attributes.map { AttributeBuilder(_) }
)
}
}
object UnionBuilder {
def apply(internal: InternalUnionForm): Union = {
Union(
name = internal.name,
plural = internal.plural,
discriminator = internal.discriminator,
description = internal.description,
deprecation = internal.deprecation.map(DeprecationBuilder(_)),
interfaces = internal.interfaces,
types = internal.types.map { it =>
val typ = rightOrError(it.datatype)
UnionType(
`type` = typ.label,
description = it.description,
deprecation = it.deprecation.map(DeprecationBuilder(_)),
default = it.default,
discriminatorValue = Some(
it.discriminatorValue.getOrElse(typ.name)
)
)
},
attributes = internal.attributes.map { AttributeBuilder(_) }
)
}
}
object HeaderBuilder {
def apply(resolver: TypeResolver, ih: InternalHeaderForm): Header = {
Header(
name = ih.name.get,
`type` = rightOrError(ih.datatype).label,
required = ih.required,
description = ih.description,
deprecation = ih.deprecation.map(DeprecationBuilder(_)),
default = ih.default,
attributes = ih.attributes.map(AttributeBuilder(_)),
)
}
}
object InfoBuilder {
def apply(internal: InternalInfoForm): Info = {
Info(
contact = internal.contact.flatMap { c =>
if (c.name.isEmpty && c.email.isEmpty && c.url.isEmpty) {
None
} else {
Some(
Contact(
name = c.name,
url = c.url,
email = c.email
)
)
}
},
license = internal.license.map { l =>
License(
name = l.name.getOrElse {
sys.error("License is missing name")
},
url = l.url
)
}
)
}
}
object ImportBuilder {
def apply(fetcher: ServiceFetcher, internal: InternalImportForm): Import = {
Importer(fetcher, internal.uri.get).fetched match {
case Left(errors) => {
sys.error("Errors in import: " + errors)
}
case Right(service) => {
Import(
uri = internal.uri.get,
organization = service.organization,
application = service.application,
namespace = service.namespace,
version = service.version,
enums = service.enums.map(_.name),
unions = service.unions.map(_.name),
interfaces = service.interfaces.map(_.name),
models = service.models.map(_.name),
annotations = service.annotations
)
}
}
}
}
object InterfaceBuilder {
def apply(im: InternalInterfaceForm): Interface = {
Interface(
name = im.name,
plural = im.plural,
description = im.description,
deprecation = im.deprecation.map(DeprecationBuilder(_)),
fields = im.fields.map { FieldBuilder(_) },
attributes = im.attributes.map { AttributeBuilder(_) }
)
}
}
object ModelBuilder {
def apply(im: InternalModelForm): Model = {
Model(
name = im.name,
plural = im.plural,
description = im.description,
deprecation = im.deprecation.map(DeprecationBuilder(_)),
fields = im.fields.map { FieldBuilder(_) },
interfaces = im.interfaces,
attributes = im.attributes.map { AttributeBuilder(_) }
)
}
}
object ResponseBuilder {
def apply(resolver: TypeResolver, internal: InternalResponseForm): Response = {
Response(
code = Try(internal.code.toInt) match {
case Success(code) => ResponseCodeInt(code)
case Failure(_) => ResponseCodeOption(internal.code)
},
`type` = rightOrError(internal.datatype).label,
headers = internal.headers.map { HeaderBuilder(resolver, _) }.toList match {
case Nil => None
case headers => Some(headers)
},
description = internal.description,
deprecation = internal.deprecation.map(DeprecationBuilder(_)),
attributes = Some(internal.attributes.map { AttributeBuilder(_) })
)
}
}
object ParameterBuilder {
def fromPath(name: String, datatypeLabel: String): Parameter = {
Parameter(
name = name,
`type` = datatypeLabel,
location = ParameterLocation.Path,
required = true
)
}
def apply(internal: InternalParameterForm, defaultLocation: ParameterLocation): Parameter = {
Parameter(
name = internal.name.get,
`type` = rightOrError(internal.datatype).label,
location = internal.location.map(ParameterLocation(_)).getOrElse(defaultLocation),
description = internal.description,
deprecation = internal.deprecation.map(DeprecationBuilder(_)),
required = if (migration.makeFieldsWithDefaultsRequired()) {
internal.default match {
case None => internal.required
case Some(_) => true
}
} else {
internal.required
},
default = internal.default,
minimum = internal.minimum,
maximum = internal.maximum,
example = internal.example
)
}
}
object FieldBuilder {
def apply(
internal: InternalFieldForm
): Field = {
Field(
name = internal.name.get,
`type` = rightOrError(internal.datatype).label,
description = internal.description,
deprecation = internal.deprecation.map(DeprecationBuilder(_)),
default = internal.default,
required = if (migration.makeFieldsWithDefaultsRequired()) {
internal.default match {
case None => internal.required
case Some(_) => true
}
} else {
internal.required
},
minimum = internal.minimum,
maximum = internal.maximum,
example = internal.example,
attributes = internal.attributes.map { AttributeBuilder(_) },
annotations = internal.annotations
)
}
}
object AttributeBuilder {
def apply(
internal: InternalAttributeForm
): Attribute = {
Attribute(
name = internal.name.get,
value = internal.value.get,
description = internal.description,
deprecation = internal.deprecation.map(DeprecationBuilder(_))
)
}
}
object AnnotationsBuilder {
def apply(
internal: InternalAnnotationForm
): Annotation = {
Annotation(
name = internal.name,
description = internal.description,
deprecation = internal.deprecation.map(DeprecationBuilder(_))
)
}
}
private[this] def rightOrError[T](value: Either[Seq[String], T]): T = {
value match {
case Left(errors) => sys.error("Unexpected errors: " + errors.mkString(", "))
case Right(v) => v
}
}
}
|
apicollective/apibuilder
|
core/src/main/scala/core/builder/api_json/ServiceBuilder.scala
|
Scala
|
mit
| 19,208
|
package articles.controllers
import articles.models.Article
import articles.services.ArticleService
import commons.repositories.ActionRunner
import play.api.libs.json.{Format, JsArray, Json}
import play.api.mvc.{AbstractController, Action, AnyContent, ControllerComponents}
import scala.concurrent.ExecutionContext
import commons.models._
case class PageRequest(limit: Long, offset: Long, orderings: List[Ordering])
case class Page[Model](models: Seq[Model], count: Long)
case class ArticlePage(articles: Seq[Article], articlesCount: Long)
class ArticlePageJsonMappings
object ArticlePageJsonMappings {
import mappings.ArticleJsonMappings._
implicit val articlePageFormat: Format[ArticlePage] = Json.format[ArticlePage]
}
class ArticleController(actionRunner: ActionRunner,
articleService: ArticleService,
components: ControllerComponents,
implicit private val ec: ExecutionContext) extends AbstractController(components) {
import mappings.ArticleJsonMappings._
import ArticlePageJsonMappings._
def all(pageRequest: PageRequest): Action[AnyContent] = {
Action.async {
actionRunner.runInTransaction(articleService.all(pageRequest))
.map(page => ArticlePage(page.models, page.count))
.map(Json.toJson(_))
.map(Ok(_))
}
}
}
|
Dasiu/play-framework-test-project
|
app/articles/controllers/ArticleController.scala
|
Scala
|
mit
| 1,354
|
package com.wuyuntao.aeneas.migration.example.migrations
import com.wuyuntao.aeneas.migration.Migration
import com.wuyuntao.aeneas.migration.dsl.DbModifier
class CreateEventTable extends Migration {
def version = 20151030182648236L
def up(db: DbModifier) = {
db.executeSql("""CREATE TABLE events (
| entity_id timeuuid,
| partition_nr bigint,
| sequence_nr bigint,
| event_type text,
| event_id timeuuid,
| timestamp timestamp,
| PRIMARY KEY ((entity_id, partition_nr), sequence_nr)
|) WITH CLUSTERING ORDER BY (sequence_nr ASC)
|""".stripMargin)
}
def down(db: DbModifier) = {
db.executeSql("DROP TABLE events")
}
}
|
wuyuntao/Aeneas
|
aeneas-migration-example/src/main/scala/com/wuyuntao/aeneas/migration/example/migrations/V20151030182648236_CreateEventTable.scala
|
Scala
|
apache-2.0
| 713
|
// Copyright © 2012 Esko Luontola <www.orfjackal.net>
// This software is released under the Apache License 2.0.
// The license text is at http://www.apache.org/licenses/LICENSE-2.0
package net.orfjackal.svngitsaclue
import org.eclipse.jgit.awtui.CommitGraphPane
import java.awt.BorderLayout
import org.eclipse.jgit.storage.file.FileRepositoryBuilder
import org.eclipse.jgit.revplot.PlotWalk
import javax.swing.{JScrollPane, JPanel, JFrame}
object GitCommitGraphSpike {
def main(args: Array[String]) {
val repository = new FileRepositoryBuilder()
.setGitDir(GitRepositorySpike.RepositoryDir)
.readEnvironment()
.build()
val walk = new PlotWalk(repository)
walk.markStart(walk.parseCommit(repository.resolve("HEAD")))
val commitGraph = new CommitGraphPane
val commits = commitGraph.getCommitList
commits.source(walk)
commits.fillTo(Integer.MAX_VALUE)
val root = new JPanel(new BorderLayout())
root.add(new JScrollPane(commitGraph), BorderLayout.CENTER)
val frame = new JFrame("JGit")
frame.setContentPane(root)
frame.pack()
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE)
frame.setLocationByPlatform(true)
frame.setVisible(true)
}
}
|
orfjackal/svn-gits-a-clue
|
src/test/scala/net/orfjackal/svngitsaclue/GitCommitGraphSpike.scala
|
Scala
|
apache-2.0
| 1,246
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.utils.serializer.converters
import com.google.protobuf.ByteString
import scala.collection.JavaConverters._
import scala.reflect.runtime.universe
import com.intel.analytics.bigdl.nn._
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity, DataFormat}
import com.intel.analytics.bigdl.optim.Regularizer
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.serializer._
import com.intel.analytics.bigdl.utils.{MultiShape, SingleShape, Shape => BigDLShape}
import com.intel.analytics.bigdl.serialization.Bigdl._
import com.intel.analytics.bigdl.serialization.Bigdl.AttrValue.ArrayValue
import scala.collection.mutable
import scala.reflect.ClassTag
/**
* Trait which defines get attribute value from saved protobuf data and convert BigDL object to
* protobuf format attribute data
*/
trait DataConverter {
/**
* Get attribute value from protobuf attribute data
* @tparam T data type
* @param context deserialization context
* @param attribute protobuf generated Attribute instance
* @return BigDL compatible param value
*/
def getAttributeValue[T : ClassTag](context: DeserializeContext,
attribute: AttrValue)(
implicit ev: TensorNumeric[T]) : AnyRef
/**
* Set attribute value to protobuf format
* @tparam T data type
* @param context serialization context
* @param attributeBuilder the attribute value writable instance
* @param value the value to be written to protobuf file
* @param valueType the type of the value to help set the data type
*/
def setAttributeValue[T : ClassTag](context: SerializeContext[T],
attributeBuilder : AttrValue.Builder, value: Any,
valueType: universe.Type = null)
(implicit ev: TensorNumeric[T]) : Unit
protected def getLock: Object = ModuleSerializer._lock
}
/**
* General implementation of [[DataConverter]], it provides the conversion entry for all types
*/
object DataConverter extends DataConverter{
private val typePlaceHolder = universe.typeOf[DataConverter]
// Customized data converter map, key is the string representation of user defined class type
private val customizedConverter = new mutable.HashMap[String, DataConverter]
def registerConverter(tpe : String, converter : DataConverter) : Unit = {
require(!customizedConverter.contains(tpe), s"converter for $tpe already exists!")
customizedConverter(tpe) = converter
}
private def getRuntimeType[T : ClassTag](value : Any) (implicit ev: TensorNumeric[T])
: universe.Type = {
getLock.synchronized {
if (value.isInstanceOf[Tensor[_]]) {
ModuleSerializer.tensorType
} else if (value.isInstanceOf[AbstractModule[_, _, _]]) {
ModuleSerializer.abstractModuleType
} else if (value.isInstanceOf[Regularizer[_]]) {
ModuleSerializer.regularizerType
} else if (value.isInstanceOf[InitializationMethod]) {
universe.typeOf[InitializationMethod]
} else if (value.isInstanceOf[VariableFormat]) {
universe.typeOf[VariableFormat]
} else if (value.isInstanceOf[DataFormat]) {
universe.typeOf[DataFormat]
} else if (value.isInstanceOf[BigDLShape]) {
universe.typeOf[BigDLShape]
} else {
val cls = value.getClass
val runtimeMirror = universe.runtimeMirror(getClass.getClassLoader)
val clsSymbol = runtimeMirror.classSymbol(cls)
clsSymbol.toType
}
}
}
override def getAttributeValue[T : ClassTag](context: DeserializeContext, attribute: AttrValue)
(implicit ev: TensorNumeric[T]) : AnyRef = {
attribute.getDataType match {
case DataType.INT32 => Integer.valueOf(attribute.getInt32Value)
case DataType.INT64 => Long.box(attribute.getInt64Value)
case DataType.DOUBLE => Double.box(attribute.getDoubleValue)
case DataType.FLOAT => Float.box(attribute.getFloatValue)
case DataType.STRING => attribute.getStringValue
case DataType.BOOL => Boolean.box(attribute.getBoolValue)
case DataType.REGULARIZER => RegularizerConverter.getAttributeValue(context, attribute)
case DataType.TENSOR => TensorConverter.getAttributeValue(context, attribute)
case DataType.VARIABLE_FORMAT =>
VariableFormatConverter.getAttributeValue(context, attribute)
case DataType.INITMETHOD => InitMethodConverter.getAttributeValue(context, attribute)
case DataType.MODULE => ModuleConverter.getAttributeValue(context, attribute)
case DataType.NAME_ATTR_LIST => NameListConverter.getAttributeValue(context, attribute)
case DataType.ARRAY_VALUE => ArrayConverter.getAttributeValue(context, attribute)
case DataType.DATA_FORMAT => DataFormatConverter.getAttributeValue(context, attribute)
case DataType.CUSTOM => CustomConverterDelegator.getAttributeValue(context, attribute)
case DataType.SHAPE => ShapeConverter.getAttributeValue(context, attribute)
case _ => throw new IllegalArgumentException
(s"${attribute.getDataType} can not be recognized")
}
}
override def setAttributeValue[T : ClassTag](
context: SerializeContext[T], attributeBuilder: AttrValue.Builder,
value: Any, valueType : universe.Type = typePlaceHolder)
(implicit ev: TensorNumeric[T]): Unit = {
getLock.synchronized {
// to make it compatible with Java types
if (valueType =:= universe.typeOf[Int] ||
valueType =:= universe.typeOf[java.lang.Integer]) {
attributeBuilder.setDataType(DataType.INT32)
attributeBuilder.setInt32Value(value.asInstanceOf[Int])
} else if (valueType =:= universe.typeOf[Long] ||
valueType =:= universe.typeOf[java.lang.Long]) {
attributeBuilder.setDataType(DataType.INT64)
attributeBuilder.setInt64Value(value.asInstanceOf[Long])
} else if (valueType =:= universe.typeOf[Float] ||
valueType =:= universe.typeOf[java.lang.Float]) {
attributeBuilder.setDataType(DataType.FLOAT)
attributeBuilder.setFloatValue(value.asInstanceOf[Float])
} else if (valueType =:= universe.typeOf[Double] ||
valueType =:= universe.typeOf[java.lang.Double]) {
attributeBuilder.setDataType(DataType.DOUBLE)
attributeBuilder.setDoubleValue(value.asInstanceOf[Double])
} else if (valueType =:= universe.typeOf[String] ||
valueType =:= universe.typeOf[java.lang.String]) {
attributeBuilder.setDataType(DataType.STRING)
attributeBuilder.setStringValue(value.asInstanceOf[String])
} else if (valueType =:= universe.typeOf[Boolean] ||
valueType =:= universe.typeOf[java.lang.Boolean]) {
attributeBuilder.setDataType(DataType.BOOL)
attributeBuilder.setBoolValue(value.asInstanceOf[Boolean])
} else if (valueType =:= universe.typeOf[VariableFormat]) {
VariableFormatConverter.setAttributeValue(context, attributeBuilder, value)
} else if (valueType =:= universe.typeOf[InitializationMethod]) {
InitMethodConverter.setAttributeValue(context, attributeBuilder, value)
} else if (valueType.toString == ModuleSerializer.regularizerType.toString) {
RegularizerConverter.setAttributeValue(context, attributeBuilder, value)
} else if (valueType <:< universe.typeOf[Tensor[_]]) {
TensorConverter.setAttributeValue(context, attributeBuilder, value)
} else if (valueType.toString == ModuleSerializer.tType.toString) {
if (ev == com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric.NumericDouble) {
attributeBuilder.setDataType(DataType.DOUBLE)
attributeBuilder.setDoubleValue(value.asInstanceOf[Double])
} else {
attributeBuilder.setDataType(DataType.FLOAT)
attributeBuilder.setFloatValue(value.asInstanceOf[Float])
}
} else if (valueType.toString == ModuleSerializer.abstractModuleType.toString
|| valueType.toString == ModuleSerializer.tensorModuleType.toString
|| valueType.toString == ModuleSerializer.moduleType.toString
|| valueType.toString == ModuleSerializer.boundedModuleType.toString
|| valueType <:< universe.typeOf[AbstractModule[_, _, _]]
) {
ModuleConverter.setAttributeValue(context, attributeBuilder, value)
} else if (value.isInstanceOf[mutable.Map[_, _]]) {
NameListConverter.setAttributeValue(context, attributeBuilder, value)
} else if (valueType <:< universe.typeOf[Array[_]] ||
valueType.typeSymbol == universe.typeOf[Array[_]].typeSymbol) {
ArrayConverter.setAttributeValue(context, attributeBuilder, value, valueType)
} else if (valueType =:= universe.typeOf[DataFormat]) {
DataFormatConverter.setAttributeValue(context, attributeBuilder, value)
} else if (valueType =:= universe.typeOf[BigDLShape]) {
ShapeConverter.setAttributeValue(context, attributeBuilder, value)
} else {
CustomConverterDelegator.setAttributeValue(context, attributeBuilder, value, valueType)
}
}
}
/**
* DataConverter for name list
*/
object NameListConverter extends DataConverter {
override def getAttributeValue[T: ClassTag]
(context: DeserializeContext, attribute: AttrValue)(implicit ev: TensorNumeric[T]): AnyRef = {
val nameListMap = new mutable.HashMap[String, mutable.Map[String, Any]]()
val listMap = new mutable.HashMap[String, Any]()
val nameAttrListValue = attribute.getNameAttrListValue
val listName = nameAttrListValue.getName
nameAttrListValue.getAttrMap.asScala.foreach(attributePair => {
val name = attributePair._1
val attrValue = attributePair._2
val convetedObj = DataConverter.getAttributeValue(context, attrValue)
listMap(name) = convetedObj
})
nameListMap(listName) = listMap
nameListMap
}
override def setAttributeValue[T: ClassTag](context: SerializeContext[T],
attributeBuilder: AttrValue.Builder,
value: Any, valueType: universe.Type = null)(implicit ev: TensorNumeric[T]): Unit = {
attributeBuilder.setDataType(DataType.NAME_ATTR_LIST)
val listMap = value.asInstanceOf[mutable.Map[String, mutable.Map[String, Any]]]
val (name, nameListMap) = listMap.head
val nameAttrList = NameAttrList.newBuilder
nameAttrList.setName(name)
nameListMap.foreach(attributePair => {
val name = attributePair._1
val obj = attributePair._2
val nextedAttr = AttrValue.newBuilder
DataConverter.setAttributeValue(context, nextedAttr, obj, getRuntimeType(obj))
nameAttrList.putAttr(name, nextedAttr.build)
})
attributeBuilder.setNameAttrListValue(nameAttrList.build)
}
}
/**
* DataConvert for array container, it's different from Array[AttrValue]
* it's an array of specific type value
* For each specific type, wrapper it as corresponding attribute and call related converter
*/
object ArrayConverter extends DataConverter {
override def getAttributeValue[T: ClassTag]
(context: DeserializeContext, attribute: AttrValue)(implicit ev: TensorNumeric[T]): AnyRef = {
val valueArray = attribute.getArrayValue
val size = valueArray.getSize
if (size == -1) {
return null
}
val listType = valueArray.getDatatype
val arr = listType match {
case DataType.INT32 =>
if (size == 0) {
return new Array[Int](0)
}
valueArray.getI32List.asScala.toArray.map(_.intValue)
case DataType.INT64 =>
if (size == 0) {
return new Array[Long](0)
}
valueArray.getI64List.asScala.toArray.map(_.longValue())
case DataType.DOUBLE =>
if (size == 0) {
return new Array[Double](0)
}
valueArray.getDblList.asScala.toArray.map(_.doubleValue())
case DataType.FLOAT =>
if (size == 0) {
return new Array[Float](0)
}
valueArray.getFltList.asScala.toArray.map(_.floatValue())
case DataType.STRING =>
if (size == 0) {
return new Array[String](0)
}
valueArray.getStrList.asScala.toArray
case DataType.BOOL =>
if (size == 0) {
return new Array[Boolean](0)
}
valueArray.getBooleanList.asScala.toArray.map(_.booleanValue())
case DataType.REGULARIZER =>
val regularizers = new Array[Regularizer[T]](size)
val regList = valueArray.getRegularizerList.asScala
var i = 0
regList.foreach(reg => {
val attrValue = AttrValue.newBuilder
attrValue.setDataType(DataType.REGULARIZER)
attrValue.setRegularizerValue(reg)
regularizers(i) = RegularizerConverter.
getAttributeValue(context, attrValue.build).asInstanceOf[Regularizer[T]]
i += 1
})
regularizers
case DataType.TENSOR =>
val tensors = new Array[Tensor[T]](size)
val tensorList = valueArray.getTensorList.asScala
var i = 0
tensorList.foreach(tensor => {
val attrValue = AttrValue.newBuilder
attrValue.setDataType(DataType.TENSOR)
attrValue.setTensorValue(tensor)
tensors(i) = TensorConverter.
getAttributeValue(context, attrValue.build).asInstanceOf[Tensor[T]]
i += 1
})
tensors
case DataType.VARIABLE_FORMAT =>
val formats = new Array[VariableFormat](size)
val formatList = valueArray.getVariableFormatList.asScala
var i = 0
formatList.foreach(format => {
val attrValue = AttrValue.newBuilder
attrValue.setDataType(DataType.VARIABLE_FORMAT)
attrValue.setVariableFormatValue(format)
formats(i) = VariableFormatConverter.
getAttributeValue(context, attrValue.build).asInstanceOf[VariableFormat]
})
formats
case DataType.INITMETHOD =>
val methods = new Array[InitializationMethod](size)
val methodList = valueArray.getInitMethodList.asScala
var i = 0
methodList.foreach(method => {
val attrValue = AttrValue.newBuilder
attrValue.setDataType(DataType.INITMETHOD)
attrValue.setInitMethodValue(method)
methods(i) = InitMethodConverter.getAttributeValue(context, attrValue.build)
.asInstanceOf[InitializationMethod]
i += 1
})
methods
case DataType.MODULE =>
val modules = new Array[AbstractModule[Activity, Activity, T]](size)
val moduleList = valueArray.getBigDLModuleList.asScala
var i = 0
moduleList.foreach(module => {
val attrValue = AttrValue.newBuilder
attrValue.setDataType(DataType.MODULE)
attrValue.setBigDLModuleValue(module)
modules(i) = ModuleConverter.getAttributeValue(context, attrValue.build)
.asInstanceOf[AbstractModule[Activity, Activity, T]]
i += 1
})
modules
case DataType.NAME_ATTR_LIST =>
val nameArray = new Array[Map[String, Map[String, Any]]](size)
val nameAttriLists = valueArray.getNameAttrListList.asScala
var i = 0
nameAttriLists.foreach(nameList => {
val attrValue = AttrValue.newBuilder
attrValue.setDataType(DataType.NAME_ATTR_LIST)
attrValue.setNameAttrListValue(nameList)
nameArray(i) = NameListConverter.getAttributeValue(context, attrValue.build)
.asInstanceOf[Map[String, Map[String, Any]]]
i += 1
})
nameArray
case DataType.DATA_FORMAT =>
val dataFormats = new Array[DataFormat](size)
val dataFormatList = valueArray.getDataFormatList.asScala
var i = 0
dataFormatList.foreach(format => {
val attrValue = AttrValue.newBuilder
attrValue.setDataType(DataType.DATA_FORMAT)
attrValue.setDataFormatValue(format)
dataFormats(i) = DataFormatConverter.
getAttributeValue(context, attrValue.build).asInstanceOf[DataFormat]
i += 1
})
dataFormats
case DataType.CUSTOM =>
val customValues = new Array[Any](size)
val customValueList = valueArray.getCustomList.asScala
var i = 0
customValueList.foreach(custom => {
val attrValue = AttrValue.newBuilder
attrValue.setDataType(DataType.CUSTOM)
attrValue.setCustomValue(custom)
customValues(i) = CustomConverterDelegator.
getAttributeValue(context, attrValue.build)
i += 1
})
customValues
case DataType.SHAPE =>
valueArray.getShapeList.asScala.map(shape => {
val attrValue = AttrValue.newBuilder
attrValue.setDataType(DataType.SHAPE)
attrValue.setShape(shape)
ShapeConverter.getAttributeValue(context, attrValue.build).asInstanceOf[BigDLShape]
}).toArray
}
arr
}
override def setAttributeValue[T: ClassTag](context: SerializeContext[T],
attributeBuilder: AttrValue.Builder,
value: Any, valueType: universe.Type = null)(implicit ev: TensorNumeric[T]): Unit = {
attributeBuilder.setDataType(DataType.ARRAY_VALUE)
getLock.synchronized {
val arrayBuilder = ArrayValue.newBuilder
arrayBuilder.setSize(-1)
if (valueType =:= universe.typeOf[Array[Int]]) {
arrayBuilder.setDatatype(DataType.INT32)
if (value != null) {
val int32s = value.asInstanceOf[Array[Int]]
int32s.foreach(i32 => arrayBuilder.addI32(i32))
arrayBuilder.setSize(int32s.size)
}
} else if (valueType =:= universe.typeOf[Array[Long]]) {
arrayBuilder.setDatatype(DataType.INT64)
if (value != null) {
val int64s = value.asInstanceOf[Array[Long]]
int64s.foreach(i64 => arrayBuilder.addI64(i64))
arrayBuilder.setSize(int64s.size)
}
} else if (valueType =:= universe.typeOf[Array[Float]]) {
arrayBuilder.setDatatype(DataType.FLOAT)
if (value != null) {
val flts = value.asInstanceOf[Array[Float]]
flts.foreach(flt => arrayBuilder.addFlt(flt))
arrayBuilder.setSize(flts.size)
}
} else if (valueType =:= universe.typeOf[Array[Double]]) {
arrayBuilder.setDatatype(DataType.DOUBLE)
if (value != null) {
val dbs = value.asInstanceOf[Array[Double]]
dbs.foreach(dbl => arrayBuilder.addDbl(dbl))
arrayBuilder.setSize(dbs.size)
}
} else if (valueType =:= universe.typeOf[Array[Boolean]]) {
arrayBuilder.setDatatype(DataType.BOOL)
if (value != null) {
val bls = value.asInstanceOf[Array[Boolean]]
bls.foreach(bl => arrayBuilder.addBoolean(bl))
arrayBuilder.setSize(bls.size)
}
} else if (valueType =:= universe.typeOf[Array[String]]) {
arrayBuilder.setDatatype(DataType.STRING)
if (value != null) {
val strs = value.asInstanceOf[Array[String]]
strs.foreach(str => arrayBuilder.addStr(str))
arrayBuilder.setSize(strs.size)
}
} else if (valueType <:< universe.typeOf[Array[_ <: Regularizer[_ <: Any]]]) {
arrayBuilder.setDatatype(DataType.REGULARIZER)
if (value != null) {
val regularizers = value.asInstanceOf[Array[Regularizer[T]]]
regularizers.foreach(reg => {
val attrValueBuilder = AttrValue.newBuilder
RegularizerConverter.setAttributeValue(context, attrValueBuilder, reg)
arrayBuilder.addRegularizer(attrValueBuilder.getRegularizerValue)
})
arrayBuilder.setSize(regularizers.size)
}
} else if (valueType <:< universe.
typeOf[Array[_ <: Tensor[_ <: Any]]]) {
arrayBuilder.setDatatype(DataType.TENSOR)
if (value != null) {
val tensors = value.asInstanceOf[Array[Tensor[T]]]
tensors.foreach(tensor => {
val attrValueBuilder = AttrValue.newBuilder
TensorConverter.setAttributeValue(context, attrValueBuilder, tensor)
arrayBuilder.addTensor(attrValueBuilder.getTensorValue)
})
arrayBuilder.setSize(tensors.size)
}
} else if (valueType =:= universe.typeOf[Array[VariableFormat]]) {
arrayBuilder.setDatatype(DataType.VARIABLE_FORMAT)
if (value != null) {
val formats = value.asInstanceOf[Array[VariableFormat]]
formats.foreach(format => {
val attrValueBuilder = AttrValue.newBuilder
VariableFormatConverter.setAttributeValue(context, attrValueBuilder, format)
arrayBuilder.addVariableFormat(attrValueBuilder.getVariableFormatValue)
})
arrayBuilder.setSize(formats.size)
}
} else if (valueType =:= universe.typeOf[Array[InitializationMethod]]) {
arrayBuilder.setDatatype(DataType.INITMETHOD)
if (value != null) {
val methods = value.asInstanceOf[Array[InitializationMethod]]
methods.foreach(method => {
val attrValueBuilder = AttrValue.newBuilder
InitMethodConverter.setAttributeValue(context, attrValueBuilder, method)
arrayBuilder.addInitMethod(attrValueBuilder.getInitMethodValue)
})
arrayBuilder.setSize(methods.size)
}
} else if (valueType <:< universe.
typeOf[Array[_ <: AbstractModule[_ <: Activity, _ <: Activity, _ <: Any]]]) {
arrayBuilder.setDatatype(DataType.MODULE)
if (value != null) {
val modules = value.asInstanceOf[Array[_ <: AbstractModule[Activity, Activity, T]]]
modules.foreach(module => {
val attrValueBuilder = AttrValue.newBuilder
ModuleConverter.setAttributeValue(context, attrValueBuilder, module)
arrayBuilder.addBigDLModule(attrValueBuilder.getBigDLModuleValue)
})
arrayBuilder.setSize(modules.size)
}
} else if (value.isInstanceOf[Array[Map[_, _]]]) {
arrayBuilder.setDatatype(DataType.NAME_ATTR_LIST)
value.asInstanceOf[Array[Map[String, Any]]].foreach(map => {
val attrValueBuilder = AttrValue.newBuilder
NameListConverter.setAttributeValue(context, attrValueBuilder, map)
arrayBuilder.addNameAttrList(attrValueBuilder.getNameAttrListValue)
})
} else if (valueType =:= universe.typeOf[Array[DataFormat]]) {
arrayBuilder.setDatatype(DataType.DATA_FORMAT)
if (value != null) {
val formats = value.asInstanceOf[Array[DataFormat]]
formats.foreach(format => {
val attrValueBuilder = AttrValue.newBuilder
DataFormatConverter.setAttributeValue(context, attrValueBuilder, format)
arrayBuilder.addDataFormat(attrValueBuilder.getDataFormatValue)
})
arrayBuilder.setSize(formats.size)
}
} else if (valueType =:= universe.typeOf[Array[BigDLShape]]) {
arrayBuilder.setDatatype(DataType.SHAPE)
if (value != null) {
val shapes = value.asInstanceOf[Array[BigDLShape]]
shapes.foreach(shape => {
val attrValueBuilder = AttrValue.newBuilder
ShapeConverter.setAttributeValue(context, attrValueBuilder, shape)
arrayBuilder.addShape(attrValueBuilder.getShape)
})
arrayBuilder.setSize(shapes.size)
}
} else {
arrayBuilder.setDatatype(DataType.CUSTOM)
if (value != null) {
val customValues = value.asInstanceOf[Array[Any]]
customValues.foreach(custom => {
val attrValueBuilder = AttrValue.newBuilder
CustomConverterDelegator.setAttributeValue(context, attrValueBuilder, custom)
arrayBuilder.addCustom(attrValueBuilder.getCustomValue)
})
arrayBuilder.setSize(customValues.size)
}
}
attributeBuilder.setArrayValue(arrayBuilder.build)
}
}
}
/**
* DataConvert for custom value
*/
object CustomConverterDelegator extends DataConverter {
override def getAttributeValue[T: ClassTag](context: DeserializeContext, attribute: AttrValue)
(implicit ev: TensorNumeric[T]): AnyRef = {
val subType = attribute.getSubType
require(customizedConverter.contains(subType), s"unrecognized type $subType")
val customConverter = customizedConverter.get(subType).get
customConverter.getAttributeValue(context, attribute)
}
override def setAttributeValue[T: ClassTag](context: SerializeContext[T],
attributeBuilder: AttrValue.Builder,
value: Any, valueType: universe.Type)(implicit ev: TensorNumeric[T]): Unit = {
require(customizedConverter.contains(valueType.toString), s"unrecognized type $valueType")
val customConverter = customizedConverter.get(valueType.toString).get
attributeBuilder.setDataType(DataType.CUSTOM)
attributeBuilder.setSubType(valueType.toString)
customConverter.setAttributeValue(context, attributeBuilder, value, valueType)
}
}
}
|
yiheng/BigDL
|
spark/dl/src/main/scala/com/intel/analytics/bigdl/utils/serializer/converters/DataConverter.scala
|
Scala
|
apache-2.0
| 26,607
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.dataSet
import org.apache.calcite.plan.{RelOptRuleCall, Convention, RelOptRule, RelTraitSet}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.calcite.rel.logical.LogicalUnion
import org.apache.calcite.rel.rules.UnionToDistinctRule
import org.apache.flink.table.plan.nodes.dataset.{DataSetConvention, DataSetUnion}
class DataSetUnionRule
extends ConverterRule(
classOf[LogicalUnion],
Convention.NONE,
DataSetConvention.INSTANCE,
"DataSetUnionRule")
{
/**
* Only translate UNION ALL.
* Note: A distinct Union are translated into
* an Aggregate on top of a UNION ALL by [[UnionToDistinctRule]]
*/
override def matches(call: RelOptRuleCall): Boolean = {
val union: LogicalUnion = call.rel(0).asInstanceOf[LogicalUnion]
union.all
}
def convert(rel: RelNode): RelNode = {
val union: LogicalUnion = rel.asInstanceOf[LogicalUnion]
val traitSet: RelTraitSet = rel.getTraitSet.replace(DataSetConvention.INSTANCE)
val convLeft: RelNode = RelOptRule.convert(union.getInput(0), DataSetConvention.INSTANCE)
val convRight: RelNode = RelOptRule.convert(union.getInput(1), DataSetConvention.INSTANCE)
new DataSetUnion(
rel.getCluster,
traitSet,
convLeft,
convRight,
rel.getRowType)
}
}
object DataSetUnionRule {
val INSTANCE: RelOptRule = new DataSetUnionRule
}
|
DieBauer/flink
|
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/rules/dataSet/DataSetUnionRule.scala
|
Scala
|
apache-2.0
| 2,270
|
package com.overviewdocs.util
import java.util.Locale
object SupportedLanguages {
// Supported languages, in ISO639-2 format
val languageCodes = Set(
"ar",
"ca",
"cs",
"de",
"en",
"es",
"fr",
"sv",
"nl",
"no",
"it",
"pt",
"ro",
"ru"
)
val languages : Set[SupportedLanguage] = languageCodes.map(SupportedLanguage)
val defaultLanguage : SupportedLanguage = SupportedLanguage("en")
/** A list of languages, sorted by their display names in callerLocale */
def languagesSortedInCallerLocale(callerLocale: Locale) : Seq[SupportedLanguage] = {
languages.toIndexedSeq.sortBy((lang) => lang.displayLanguage(callerLocale))
}
}
|
overview/overview-server
|
common/src/main/scala/com/overviewdocs/util/SupportedLanguages.scala
|
Scala
|
agpl-3.0
| 701
|
package unfiltered.oauth2
import unfiltered.request._
import unfiltered.response._
import unfiltered.filter.Plan
/**
* After your application has obtained an access token, your app can use it to access APIs by
* including it in either an access_token query parameter or an Authorization: Beader header.
*
* To call API using HTTP header.
*
* GET /api/1/feeds.js HTTP/1.1
* Host: www.example.com
* Authorization: Bearer vF9dft4qmT
*/
case class Protection(source: AuthSource) extends ProtectionLike {
/** List or ResourceServer token schemes. By default,
* this includes BearerAuth which extracts Bearer tokens for a header,
* QParamBeaerAuth which extracts Bearer tokens request params and
* MacAuth which extracts tokens from using the `Mac` authentication encoding */
val schemes = Seq(BearerAuth, QParamBearerAuth, MacAuth)
}
/** Provides OAuth2 protection implementation.
* Extend this trait to customize query string `oauth_token`, etc. */
trait ProtectionLike extends Plan {
import javax.servlet.http.HttpServletRequest
/** Provides a means of verifying a deserialized access token */
val source: AuthSource
/** Provides a list of schemes used for decoding access tokens in request */
val schemes: Seq[AuthScheme]
final def intent = ((schemes map { _.intent(this) }) :\\ fallback) { _ orElse _ }
/** If no authentication token is provided at all, demand the first
* authentication scheme of all that are supported */
def fallback: Plan.Intent = {
case r =>
schemes.head.errorResponse(Unauthorized, "", r)
}
/** Returns access token response to client */
def authenticate[T <: HttpServletRequest](
token: AccessToken, request: HttpRequest[T])(errorResp: (String => ResponseFunction[Any])) =
source.authenticateToken(token, request) match {
case Left(msg) => errorResp(msg)
case Right((user, clientId, scopes)) =>
request.underlying.setAttribute(OAuth2.XAuthorizedIdentity, user.id)
request.underlying.setAttribute(OAuth2.XAuthorizedClientIdentity, clientId)
request.underlying.setAttribute(OAuth2.XAuthorizedScopes, scopes)
Pass
}
}
/** Represents the authorization source that issued the access token. */
trait AuthSource {
/** Given an deserialized access token and request, extract the resource owner, client id, and list of scopes
* associated with the request, if there is an error return it represented as a string message
* to return the oauth client */
def authenticateToken[T](
token: AccessToken,
request: HttpRequest[T]): Either[String, (ResourceOwner, String, Seq[String])]
/**
* Auth sources which
*/
def realm: Option[String] = None
}
/** Represents the scheme used for decoding access tokens from a given requests. */
trait AuthScheme {
def intent(protection: ProtectionLike): Plan.Intent
def errorString(status: String, description: String) =
"""error="%s" error_description="%s" """.trim format(status, description)
/** The WWW-Authenticate challege returned to the clien tin a 401 response
* for invalid requests */
val challenge: String
/**
* An error header, consisting of the challenge and possibly an error and error_description attribute
* (this depends on the authentication scheme).
*/
def errorHeader(error: Option[String] = None, description: Option[String] = None) = {
val attrs = List("error" -> error, "error_description" -> description).collect { case (key, Some(value)) => key -> value }
attrs.tail.foldLeft(
attrs.headOption.foldLeft(challenge) { case (current, (key, value)) => """%s %s="%s"""".format(current, key, value) }
) { case (current, (key, value)) => current + ",%s=\\"%s\\"".format(key, value) }
}
/**
* The response for failed authentication attempts. Intended to be overridden by authentication schemes that have
* differing requirements.
*/
val failedAuthenticationResponse: (String => ResponseFunction[Any]) = { msg =>
Unauthorized ~> WWWAuthenticate(errorHeader(Some("invalid_token"), Some(msg))) ~>
ResponseString(errorString("invalid_token", msg))
}
/** Return a function representing an error response */
def errorResponse[T](status: Status, description: String,
request: HttpRequest[T]): ResponseFunction[Any] = (status, description) match {
case (Unauthorized, "") => Unauthorized ~> WWWAuthenticate(challenge) ~> ResponseString(challenge)
case (Unauthorized, _) => failedAuthenticationResponse(description)
case (BadRequest, _) => status ~> ResponseString(errorString("invalid_request", description))
case (Forbidden, _) => status ~> ResponseString(errorString("insufficient_scope", description))
case _ => status ~> ResponseString(errorString(status.toString, description))
}
}
trait AccessToken
case class BearerToken(value: String) extends AccessToken
/** Represents Bearer auth encoded in a header.
* see also [[http://tools.ietf.org/html/draft-ietf-oauth-v2-bearer-14]] */
trait BearerAuth extends AuthScheme {
val challenge = "Bearer"
val defaultBearerHeader = """Bearer ([\\w\\d!#$%&'\\(\\)\\*+\\-\\.\\/:<=>?@\\[\\]^_`{|}~\\\\,;]+)""".r
/** bearer header format */
def header = defaultBearerHeader
object BearerHeader {
val HeaderPattern = header
def unapply(hval: String) = hval match {
case HeaderPattern(token) => Some(token)
case _ => None
}
}
def intent(protection: ProtectionLike) = {
case Authorization(BearerHeader(token)) & request =>
protection.authenticate(BearerToken(token), request) { failedAuthenticationResponse }
}
}
object BearerAuth extends BearerAuth {}
/** Represents Bearer auth encoded in query params.
* ses also [[http://tools.ietf.org/html/draft-ietf-oauth-v2-bearer-14]] */
trait QParamBearerAuth extends AuthScheme {
val challenge = "Bearer"
val defaultQueryParam = "access_token"
def queryParam = defaultQueryParam
object BearerParam {
def unapply(params: Map[String, Seq[String]]) = params(queryParam).headOption
}
def intent(protection: ProtectionLike) = {
case Params(BearerParam(token)) & request =>
protection.authenticate(BearerToken(token), request) { failedAuthenticationResponse }
}
}
object QParamBearerAuth extends QParamBearerAuth {}
/** Represents MAC auth. */
trait MacAuth extends AuthScheme {
import unfiltered.mac.{ Mac, MacAuthorization }
val challenge = "MAC"
/** The algorithm used to sign the request */
def algorithm: String
/** Given a token value, returns the associated token secret */
def tokenSecret(key: String): Option[String]
def intent(protection: ProtectionLike) = {
case MacAuthorization(id, nonce, bodyhash, ext, mac) & req =>
try {
tokenSecret(id) match {
case Some(key) =>
// compare a signed request with the signature provided
Mac.sign(req, nonce, ext, bodyhash, key, algorithm).fold({ err =>
errorResponse(Unauthorized, err, req)
}, { sig =>
if(sig == mac) protection.authenticate(MacAuthToken(id, key, nonce, bodyhash, ext), req) {
failedAuthenticationResponse
}
else errorResponse(Unauthorized, "invalid MAC signature", req)
})
case _ =>
errorResponse(Unauthorized, "invalid token", req)
}
} catch {
case _: Exception =>
errorResponse(Unauthorized, "invalid MAC header.", req)
}
}
/**
* Whereas the Bearer token is supposed to return an error code in the error attribute and a human-readable
* error description in the error_description attribute of the WWW-Authenticate header, for the MAC
* authentication scheme, a human-readable error message may be supplied in the error attribute
* (see [[http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-00#section-4.1]] )
*/
override val failedAuthenticationResponse: (String => ResponseFunction[Any]) = { msg =>
Unauthorized ~> WWWAuthenticate(errorHeader(Some(msg))) ~> ResponseString("""error="%s"""".format(msg))
}
}
object MacAuth extends MacAuth {
def algorithm = "hmac-sha-1"
def tokenSecret(key: String) = None
}
case class MacAuthToken(id: String,
secret: String,
nonce: String,
bodyhash: Option[String],
ext: Option[String]
) extends AccessToken
|
jarin/unfiltered
|
oauth2/src/main/scala/protections.scala
|
Scala
|
mit
| 8,402
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.recommendation
import java.{util => ju}
import java.io.IOException
import scala.collection.mutable
import scala.reflect.ClassTag
import scala.util.{Sorting, Try}
import scala.util.hashing.byteswap64
import com.github.fommil.netlib.BLAS.{getInstance => blas}
import org.apache.hadoop.fs.Path
import org.json4s.DefaultFormats
import org.json4s.JsonDSL._
import org.apache.spark.{Dependency, Partitioner, ShuffleDependency, SparkContext}
import org.apache.spark.annotation.{DeveloperApi, Since}
import org.apache.spark.internal.Logging
import org.apache.spark.ml.{Estimator, Model}
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared._
import org.apache.spark.ml.util._
import org.apache.spark.mllib.linalg.CholeskyDecomposition
import org.apache.spark.mllib.optimization.NNLS
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.Utils
import org.apache.spark.util.collection.{OpenHashMap, OpenHashSet, SortDataFormat, Sorter}
import org.apache.spark.util.random.XORShiftRandom
/**
* Common params for ALS and ALSModel.
*/
private[recommendation] trait ALSModelParams extends Params with HasPredictionCol {
/**
* Param for the column name for user ids. Ids must be integers. Other
* numeric types are supported for this column, but will be cast to integers as long as they
* fall within the integer value range.
* Default: "user"
* @group param
*/
val userCol = new Param[String](this, "userCol", "column name for user ids. Ids must be within " +
"the integer value range.")
/** @group getParam */
def getUserCol: String = $(userCol)
/**
* Param for the column name for item ids. Ids must be integers. Other
* numeric types are supported for this column, but will be cast to integers as long as they
* fall within the integer value range.
* Default: "item"
* @group param
*/
val itemCol = new Param[String](this, "itemCol", "column name for item ids. Ids must be within " +
"the integer value range.")
/** @group getParam */
def getItemCol: String = $(itemCol)
/**
* Attempts to safely cast a user/item id to an Int. Throws an exception if the value is
* out of integer range.
*/
protected val checkedCast = udf { (n: Double) =>
if (n > Int.MaxValue || n < Int.MinValue) {
throw new IllegalArgumentException(s"ALS only supports values in Integer range for columns " +
s"${$(userCol)} and ${$(itemCol)}. Value $n was out of Integer range.")
} else {
n.toInt
}
}
}
/**
* Common params for ALS.
*/
private[recommendation] trait ALSParams extends ALSModelParams with HasMaxIter with HasRegParam
with HasPredictionCol with HasCheckpointInterval with HasSeed {
/**
* Param for rank of the matrix factorization (positive).
* Default: 10
* @group param
*/
val rank = new IntParam(this, "rank", "rank of the factorization", ParamValidators.gtEq(1))
/** @group getParam */
def getRank: Int = $(rank)
/**
* Param for number of user blocks (positive).
* Default: 10
* @group param
*/
val numUserBlocks = new IntParam(this, "numUserBlocks", "number of user blocks",
ParamValidators.gtEq(1))
/** @group getParam */
def getNumUserBlocks: Int = $(numUserBlocks)
/**
* Param for number of item blocks (positive).
* Default: 10
* @group param
*/
val numItemBlocks = new IntParam(this, "numItemBlocks", "number of item blocks",
ParamValidators.gtEq(1))
/** @group getParam */
def getNumItemBlocks: Int = $(numItemBlocks)
/**
* Param to decide whether to use implicit preference.
* Default: false
* @group param
*/
val implicitPrefs = new BooleanParam(this, "implicitPrefs", "whether to use implicit preference")
/** @group getParam */
def getImplicitPrefs: Boolean = $(implicitPrefs)
/**
* Param for the alpha parameter in the implicit preference formulation (nonnegative).
* Default: 1.0
* @group param
*/
val alpha = new DoubleParam(this, "alpha", "alpha for implicit preference",
ParamValidators.gtEq(0))
/** @group getParam */
def getAlpha: Double = $(alpha)
/**
* Param for the column name for ratings.
* Default: "rating"
* @group param
*/
val ratingCol = new Param[String](this, "ratingCol", "column name for ratings")
/** @group getParam */
def getRatingCol: String = $(ratingCol)
/**
* Param for whether to apply nonnegativity constraints.
* Default: false
* @group param
*/
val nonnegative = new BooleanParam(
this, "nonnegative", "whether to use nonnegative constraint for least squares")
/** @group getParam */
def getNonnegative: Boolean = $(nonnegative)
/**
* Param for StorageLevel for intermediate datasets. Pass in a string representation of
* `StorageLevel`. Cannot be "NONE".
* Default: "MEMORY_AND_DISK".
*
* @group expertParam
*/
val intermediateStorageLevel = new Param[String](this, "intermediateStorageLevel",
"StorageLevel for intermediate datasets. Cannot be 'NONE'.",
(s: String) => Try(StorageLevel.fromString(s)).isSuccess && s != "NONE")
/** @group expertGetParam */
def getIntermediateStorageLevel: String = $(intermediateStorageLevel)
/**
* Param for StorageLevel for ALS model factors. Pass in a string representation of
* `StorageLevel`.
* Default: "MEMORY_AND_DISK".
*
* @group expertParam
*/
val finalStorageLevel = new Param[String](this, "finalStorageLevel",
"StorageLevel for ALS model factors.",
(s: String) => Try(StorageLevel.fromString(s)).isSuccess)
/** @group expertGetParam */
def getFinalStorageLevel: String = $(finalStorageLevel)
setDefault(rank -> 10, maxIter -> 10, regParam -> 0.1, numUserBlocks -> 10, numItemBlocks -> 10,
implicitPrefs -> false, alpha -> 1.0, userCol -> "user", itemCol -> "item",
ratingCol -> "rating", nonnegative -> false, checkpointInterval -> 10,
intermediateStorageLevel -> "MEMORY_AND_DISK", finalStorageLevel -> "MEMORY_AND_DISK")
/**
* Validates and transforms the input schema.
*
* @param schema input schema
* @return output schema
*/
protected def validateAndTransformSchema(schema: StructType): StructType = {
// user and item will be cast to Int
SchemaUtils.checkNumericType(schema, $(userCol))
SchemaUtils.checkNumericType(schema, $(itemCol))
// rating will be cast to Float
SchemaUtils.checkNumericType(schema, $(ratingCol))
SchemaUtils.appendColumn(schema, $(predictionCol), FloatType)
}
}
/**
* Model fitted by ALS.
*
* @param rank rank of the matrix factorization model
* @param userFactors a DataFrame that stores user factors in two columns: `id` and `features`
* @param itemFactors a DataFrame that stores item factors in two columns: `id` and `features`
*/
@Since("1.3.0")
class ALSModel private[ml] (
@Since("1.4.0") override val uid: String,
@Since("1.4.0") val rank: Int,
@transient val userFactors: DataFrame,
@transient val itemFactors: DataFrame)
extends Model[ALSModel] with ALSModelParams with MLWritable {
/** @group setParam */
@Since("1.4.0")
def setUserCol(value: String): this.type = set(userCol, value)
/** @group setParam */
@Since("1.4.0")
def setItemCol(value: String): this.type = set(itemCol, value)
/** @group setParam */
@Since("1.3.0")
def setPredictionCol(value: String): this.type = set(predictionCol, value)
@Since("2.0.0")
override def transform(dataset: Dataset[_]): DataFrame = {
transformSchema(dataset.schema)
// Register a UDF for DataFrame, and then
// create a new column named map(predictionCol) by running the predict UDF.
val predict = udf { (userFeatures: Seq[Float], itemFeatures: Seq[Float]) =>
if (userFeatures != null && itemFeatures != null) {
blas.sdot(rank, userFeatures.toArray, 1, itemFeatures.toArray, 1)
} else {
Float.NaN
}
}
dataset
.join(userFactors,
checkedCast(dataset($(userCol)).cast(DoubleType)) === userFactors("id"), "left")
.join(itemFactors,
checkedCast(dataset($(itemCol)).cast(DoubleType)) === itemFactors("id"), "left")
.select(dataset("*"),
predict(userFactors("features"), itemFactors("features")).as($(predictionCol)))
}
@Since("1.3.0")
override def transformSchema(schema: StructType): StructType = {
// user and item will be cast to Int
SchemaUtils.checkNumericType(schema, $(userCol))
SchemaUtils.checkNumericType(schema, $(itemCol))
SchemaUtils.appendColumn(schema, $(predictionCol), FloatType)
}
@Since("1.5.0")
override def copy(extra: ParamMap): ALSModel = {
val copied = new ALSModel(uid, rank, userFactors, itemFactors)
copyValues(copied, extra).setParent(parent)
}
@Since("1.6.0")
override def write: MLWriter = new ALSModel.ALSModelWriter(this)
}
@Since("1.6.0")
object ALSModel extends MLReadable[ALSModel] {
@Since("1.6.0")
override def read: MLReader[ALSModel] = new ALSModelReader
@Since("1.6.0")
override def load(path: String): ALSModel = super.load(path)
private[ALSModel] class ALSModelWriter(instance: ALSModel) extends MLWriter {
override protected def saveImpl(path: String): Unit = {
val extraMetadata = "rank" -> instance.rank
DefaultParamsWriter.saveMetadata(instance, path, sc, Some(extraMetadata))
val userPath = new Path(path, "userFactors").toString
instance.userFactors.write.format("parquet").save(userPath)
val itemPath = new Path(path, "itemFactors").toString
instance.itemFactors.write.format("parquet").save(itemPath)
}
}
private class ALSModelReader extends MLReader[ALSModel] {
/** Checked against metadata when loading model */
private val className = classOf[ALSModel].getName
override def load(path: String): ALSModel = {
val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
implicit val format = DefaultFormats
val rank = (metadata.metadata \\ "rank").extract[Int]
val userPath = new Path(path, "userFactors").toString
val userFactors = sparkSession.read.format("parquet").load(userPath)
val itemPath = new Path(path, "itemFactors").toString
val itemFactors = sparkSession.read.format("parquet").load(itemPath)
val model = new ALSModel(metadata.uid, rank, userFactors, itemFactors)
DefaultParamsReader.getAndSetParams(model, metadata)
model
}
}
}
/**
* Alternating Least Squares (ALS) matrix factorization.
*
* ALS attempts to estimate the ratings matrix `R` as the product of two lower-rank matrices,
* `X` and `Y`, i.e. `X * Yt = R`. Typically these approximations are called 'factor' matrices.
* The general approach is iterative. During each iteration, one of the factor matrices is held
* constant, while the other is solved for using least squares. The newly-solved factor matrix is
* then held constant while solving for the other factor matrix.
*
* This is a blocked implementation of the ALS factorization algorithm that groups the two sets
* of factors (referred to as "users" and "products") into blocks and reduces communication by only
* sending one copy of each user vector to each product block on each iteration, and only for the
* product blocks that need that user's feature vector. This is achieved by pre-computing some
* information about the ratings matrix to determine the "out-links" of each user (which blocks of
* products it will contribute to) and "in-link" information for each product (which of the feature
* vectors it receives from each user block it will depend on). This allows us to send only an
* array of feature vectors between each user block and product block, and have the product block
* find the users' ratings and update the products based on these messages.
*
* For implicit preference data, the algorithm used is based on
* "Collaborative Filtering for Implicit Feedback Datasets", available at
* http://dx.doi.org/10.1109/ICDM.2008.22, adapted for the blocked approach used here.
*
* Essentially instead of finding the low-rank approximations to the rating matrix `R`,
* this finds the approximations for a preference matrix `P` where the elements of `P` are 1 if
* r > 0 and 0 if r <= 0. The ratings then act as 'confidence' values related to strength of
* indicated user
* preferences rather than explicit ratings given to items.
*/
@Since("1.3.0")
class ALS(@Since("1.4.0") override val uid: String) extends Estimator[ALSModel] with ALSParams
with DefaultParamsWritable {
import org.apache.spark.ml.recommendation.ALS.Rating
@Since("1.4.0")
def this() = this(Identifiable.randomUID("als"))
/** @group setParam */
@Since("1.3.0")
def setRank(value: Int): this.type = set(rank, value)
/** @group setParam */
@Since("1.3.0")
def setNumUserBlocks(value: Int): this.type = set(numUserBlocks, value)
/** @group setParam */
@Since("1.3.0")
def setNumItemBlocks(value: Int): this.type = set(numItemBlocks, value)
/** @group setParam */
@Since("1.3.0")
def setImplicitPrefs(value: Boolean): this.type = set(implicitPrefs, value)
/** @group setParam */
@Since("1.3.0")
def setAlpha(value: Double): this.type = set(alpha, value)
/** @group setParam */
@Since("1.3.0")
def setUserCol(value: String): this.type = set(userCol, value)
/** @group setParam */
@Since("1.3.0")
def setItemCol(value: String): this.type = set(itemCol, value)
/** @group setParam */
@Since("1.3.0")
def setRatingCol(value: String): this.type = set(ratingCol, value)
/** @group setParam */
@Since("1.3.0")
def setPredictionCol(value: String): this.type = set(predictionCol, value)
/** @group setParam */
@Since("1.3.0")
def setMaxIter(value: Int): this.type = set(maxIter, value)
/** @group setParam */
@Since("1.3.0")
def setRegParam(value: Double): this.type = set(regParam, value)
/** @group setParam */
@Since("1.3.0")
def setNonnegative(value: Boolean): this.type = set(nonnegative, value)
/** @group setParam */
@Since("1.4.0")
def setCheckpointInterval(value: Int): this.type = set(checkpointInterval, value)
/** @group setParam */
@Since("1.3.0")
def setSeed(value: Long): this.type = set(seed, value)
/** @group expertSetParam */
@Since("2.0.0")
def setIntermediateStorageLevel(value: String): this.type = set(intermediateStorageLevel, value)
/** @group expertSetParam */
@Since("2.0.0")
def setFinalStorageLevel(value: String): this.type = set(finalStorageLevel, value)
/**
* Sets both numUserBlocks and numItemBlocks to the specific value.
*
* @group setParam
*/
@Since("1.3.0")
def setNumBlocks(value: Int): this.type = {
setNumUserBlocks(value)
setNumItemBlocks(value)
this
}
@Since("2.0.0")
override def fit(dataset: Dataset[_]): ALSModel = {
transformSchema(dataset.schema)
import dataset.sparkSession.implicits._
val r = if ($(ratingCol) != "") col($(ratingCol)).cast(FloatType) else lit(1.0f)
val ratings = dataset
.select(checkedCast(col($(userCol)).cast(DoubleType)),
checkedCast(col($(itemCol)).cast(DoubleType)), r)
.rdd
.map { row =>
Rating(row.getInt(0), row.getInt(1), row.getFloat(2))
}
val instrLog = Instrumentation.create(this, ratings)
instrLog.logParams(rank, numUserBlocks, numItemBlocks, implicitPrefs, alpha,
userCol, itemCol, ratingCol, predictionCol, maxIter,
regParam, nonnegative, checkpointInterval, seed)
val (userFactors, itemFactors) = ALS.train(ratings, rank = $(rank),
numUserBlocks = $(numUserBlocks), numItemBlocks = $(numItemBlocks),
maxIter = $(maxIter), regParam = $(regParam), implicitPrefs = $(implicitPrefs),
alpha = $(alpha), nonnegative = $(nonnegative),
intermediateRDDStorageLevel = StorageLevel.fromString($(intermediateStorageLevel)),
finalRDDStorageLevel = StorageLevel.fromString($(finalStorageLevel)),
checkpointInterval = $(checkpointInterval), seed = $(seed))
val userDF = userFactors.toDF("id", "features")
val itemDF = itemFactors.toDF("id", "features")
val model = new ALSModel(uid, $(rank), userDF, itemDF).setParent(this)
instrLog.logSuccess(model)
copyValues(model)
}
@Since("1.3.0")
override def transformSchema(schema: StructType): StructType = {
validateAndTransformSchema(schema)
}
@Since("1.5.0")
override def copy(extra: ParamMap): ALS = defaultCopy(extra)
}
/**
* :: DeveloperApi ::
* An implementation of ALS that supports generic ID types, specialized for Int and Long. This is
* exposed as a developer API for users who do need other ID types. But it is not recommended
* because it increases the shuffle size and memory requirement during training. For simplicity,
* users and items must have the same type. The number of distinct users/items should be smaller
* than 2 billion.
*/
@DeveloperApi
object ALS extends DefaultParamsReadable[ALS] with Logging {
/**
* :: DeveloperApi ::
* Rating class for better code readability.
*/
@DeveloperApi
case class Rating[@specialized(Int, Long) ID](user: ID, item: ID, rating: Float)
@Since("1.6.0")
override def load(path: String): ALS = super.load(path)
/** Trait for least squares solvers applied to the normal equation. */
private[recommendation] trait LeastSquaresNESolver extends Serializable {
/** Solves a least squares problem with regularization (possibly with other constraints). */
def solve(ne: NormalEquation, lambda: Double): Array[Float]
}
/** Cholesky solver for least square problems. */
private[recommendation] class CholeskySolver extends LeastSquaresNESolver {
/**
* Solves a least squares problem with L2 regularization:
*
* min norm(A x - b)^2^ + lambda * norm(x)^2^
*
* @param ne a [[NormalEquation]] instance that contains AtA, Atb, and n (number of instances)
* @param lambda regularization constant
* @return the solution x
*/
override def solve(ne: NormalEquation, lambda: Double): Array[Float] = {
val k = ne.k
// Add scaled lambda to the diagonals of AtA.
var i = 0
var j = 2
while (i < ne.triK) {
ne.ata(i) += lambda
i += j
j += 1
}
CholeskyDecomposition.solve(ne.ata, ne.atb)
val x = new Array[Float](k)
i = 0
while (i < k) {
x(i) = ne.atb(i).toFloat
i += 1
}
ne.reset()
x
}
}
/** NNLS solver. */
private[recommendation] class NNLSSolver extends LeastSquaresNESolver {
private var rank: Int = -1
private var workspace: NNLS.Workspace = _
private var ata: Array[Double] = _
private var initialized: Boolean = false
private def initialize(rank: Int): Unit = {
if (!initialized) {
this.rank = rank
workspace = NNLS.createWorkspace(rank)
ata = new Array[Double](rank * rank)
initialized = true
} else {
require(this.rank == rank)
}
}
/**
* Solves a nonnegative least squares problem with L2 regularization:
*
* min_x_ norm(A x - b)^2^ + lambda * n * norm(x)^2^
* subject to x >= 0
*/
override def solve(ne: NormalEquation, lambda: Double): Array[Float] = {
val rank = ne.k
initialize(rank)
fillAtA(ne.ata, lambda)
val x = NNLS.solve(ata, ne.atb, workspace)
ne.reset()
x.map(x => x.toFloat)
}
/**
* Given a triangular matrix in the order of fillXtX above, compute the full symmetric square
* matrix that it represents, storing it into destMatrix.
*/
private def fillAtA(triAtA: Array[Double], lambda: Double) {
var i = 0
var pos = 0
var a = 0.0
while (i < rank) {
var j = 0
while (j <= i) {
a = triAtA(pos)
ata(i * rank + j) = a
ata(j * rank + i) = a
pos += 1
j += 1
}
ata(i * rank + i) += lambda
i += 1
}
}
}
/**
* Representing a normal equation to solve the following weighted least squares problem:
*
* minimize \\sum,,i,, c,,i,, (a,,i,,^T^ x - b,,i,,)^2^ + lambda * x^T^ x.
*
* Its normal equation is given by
*
* \\sum,,i,, c,,i,, (a,,i,, a,,i,,^T^ x - b,,i,, a,,i,,) + lambda * x = 0.
*/
private[recommendation] class NormalEquation(val k: Int) extends Serializable {
/** Number of entries in the upper triangular part of a k-by-k matrix. */
val triK = k * (k + 1) / 2
/** A^T^ * A */
val ata = new Array[Double](triK)
/** A^T^ * b */
val atb = new Array[Double](k)
private val da = new Array[Double](k)
private val upper = "U"
private def copyToDouble(a: Array[Float]): Unit = {
var i = 0
while (i < k) {
da(i) = a(i)
i += 1
}
}
/** Adds an observation. */
def add(a: Array[Float], b: Double, c: Double = 1.0): this.type = {
require(c >= 0.0)
require(a.length == k)
copyToDouble(a)
blas.dspr(upper, k, c, da, 1, ata)
if (b != 0.0) {
blas.daxpy(k, c * b, da, 1, atb, 1)
}
this
}
/** Merges another normal equation object. */
def merge(other: NormalEquation): this.type = {
require(other.k == k)
blas.daxpy(ata.length, 1.0, other.ata, 1, ata, 1)
blas.daxpy(atb.length, 1.0, other.atb, 1, atb, 1)
this
}
/** Resets everything to zero, which should be called after each solve. */
def reset(): Unit = {
ju.Arrays.fill(ata, 0.0)
ju.Arrays.fill(atb, 0.0)
}
}
/**
* :: DeveloperApi ::
* Implementation of the ALS algorithm.
*/
@DeveloperApi
def train[ID: ClassTag]( // scalastyle:ignore
ratings: RDD[Rating[ID]],
rank: Int = 10,
numUserBlocks: Int = 10,
numItemBlocks: Int = 10,
maxIter: Int = 10,
regParam: Double = 1.0,
implicitPrefs: Boolean = false,
alpha: Double = 1.0,
nonnegative: Boolean = false,
intermediateRDDStorageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK,
finalRDDStorageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK,
checkpointInterval: Int = 10,
seed: Long = 0L)(
implicit ord: Ordering[ID]): (RDD[(ID, Array[Float])], RDD[(ID, Array[Float])]) = {
require(intermediateRDDStorageLevel != StorageLevel.NONE,
"ALS is not designed to run without persisting intermediate RDDs.")
val sc = ratings.sparkContext
val userPart = new ALSPartitioner(numUserBlocks)
val itemPart = new ALSPartitioner(numItemBlocks)
val userLocalIndexEncoder = new LocalIndexEncoder(userPart.numPartitions)
val itemLocalIndexEncoder = new LocalIndexEncoder(itemPart.numPartitions)
val solver = if (nonnegative) new NNLSSolver else new CholeskySolver
val blockRatings = partitionRatings(ratings, userPart, itemPart)
.persist(intermediateRDDStorageLevel)
val (userInBlocks, userOutBlocks) =
makeBlocks("user", blockRatings, userPart, itemPart, intermediateRDDStorageLevel)
// materialize blockRatings and user blocks
userOutBlocks.count()
val swappedBlockRatings = blockRatings.map {
case ((userBlockId, itemBlockId), RatingBlock(userIds, itemIds, localRatings)) =>
((itemBlockId, userBlockId), RatingBlock(itemIds, userIds, localRatings))
}
val (itemInBlocks, itemOutBlocks) =
makeBlocks("item", swappedBlockRatings, itemPart, userPart, intermediateRDDStorageLevel)
// materialize item blocks
itemOutBlocks.count()
val seedGen = new XORShiftRandom(seed)
var userFactors = initialize(userInBlocks, rank, seedGen.nextLong())
var itemFactors = initialize(itemInBlocks, rank, seedGen.nextLong())
var previousCheckpointFile: Option[String] = None
val shouldCheckpoint: Int => Boolean = (iter) =>
sc.checkpointDir.isDefined && checkpointInterval != -1 && (iter % checkpointInterval == 0)
val deletePreviousCheckpointFile: () => Unit = () =>
previousCheckpointFile.foreach { file =>
try {
val checkpointFile = new Path(file)
checkpointFile.getFileSystem(sc.hadoopConfiguration).delete(checkpointFile, true)
} catch {
case e: IOException =>
logWarning(s"Cannot delete checkpoint file $file:", e)
}
}
if (implicitPrefs) {
for (iter <- 1 to maxIter) {
userFactors.setName(s"userFactors-$iter").persist(intermediateRDDStorageLevel)
val previousItemFactors = itemFactors
itemFactors = computeFactors(userFactors, userOutBlocks, itemInBlocks, rank, regParam,
userLocalIndexEncoder, implicitPrefs, alpha, solver)
previousItemFactors.unpersist()
itemFactors.setName(s"itemFactors-$iter").persist(intermediateRDDStorageLevel)
// TODO: Generalize PeriodicGraphCheckpointer and use it here.
val deps = itemFactors.dependencies
if (shouldCheckpoint(iter)) {
itemFactors.checkpoint() // itemFactors gets materialized in computeFactors
}
val previousUserFactors = userFactors
userFactors = computeFactors(itemFactors, itemOutBlocks, userInBlocks, rank, regParam,
itemLocalIndexEncoder, implicitPrefs, alpha, solver)
if (shouldCheckpoint(iter)) {
ALS.cleanShuffleDependencies(sc, deps)
deletePreviousCheckpointFile()
previousCheckpointFile = itemFactors.getCheckpointFile
}
previousUserFactors.unpersist()
}
} else {
for (iter <- 0 until maxIter) {
itemFactors = computeFactors(userFactors, userOutBlocks, itemInBlocks, rank, regParam,
userLocalIndexEncoder, solver = solver)
if (shouldCheckpoint(iter)) {
val deps = itemFactors.dependencies
itemFactors.checkpoint()
itemFactors.count() // checkpoint item factors and cut lineage
ALS.cleanShuffleDependencies(sc, deps)
deletePreviousCheckpointFile()
previousCheckpointFile = itemFactors.getCheckpointFile
}
userFactors = computeFactors(itemFactors, itemOutBlocks, userInBlocks, rank, regParam,
itemLocalIndexEncoder, solver = solver)
}
}
val userIdAndFactors = userInBlocks
.mapValues(_.srcIds)
.join(userFactors)
.mapPartitions({ items =>
items.flatMap { case (_, (ids, factors)) =>
ids.view.zip(factors)
}
// Preserve the partitioning because IDs are consistent with the partitioners in userInBlocks
// and userFactors.
}, preservesPartitioning = true)
.setName("userFactors")
.persist(finalRDDStorageLevel)
val itemIdAndFactors = itemInBlocks
.mapValues(_.srcIds)
.join(itemFactors)
.mapPartitions({ items =>
items.flatMap { case (_, (ids, factors)) =>
ids.view.zip(factors)
}
}, preservesPartitioning = true)
.setName("itemFactors")
.persist(finalRDDStorageLevel)
if (finalRDDStorageLevel != StorageLevel.NONE) {
userIdAndFactors.count()
itemFactors.unpersist()
itemIdAndFactors.count()
userInBlocks.unpersist()
userOutBlocks.unpersist()
itemInBlocks.unpersist()
itemOutBlocks.unpersist()
blockRatings.unpersist()
}
(userIdAndFactors, itemIdAndFactors)
}
/**
* Factor block that stores factors (Array[Float]) in an Array.
*/
private type FactorBlock = Array[Array[Float]]
/**
* Out-link block that stores, for each dst (item/user) block, which src (user/item) factors to
* send. For example, outLinkBlock(0) contains the local indices (not the original src IDs) of the
* src factors in this block to send to dst block 0.
*/
private type OutBlock = Array[Array[Int]]
/**
* In-link block for computing src (user/item) factors. This includes the original src IDs
* of the elements within this block as well as encoded dst (item/user) indices and corresponding
* ratings. The dst indices are in the form of (blockId, localIndex), which are not the original
* dst IDs. To compute src factors, we expect receiving dst factors that match the dst indices.
* For example, if we have an in-link record
*
* {srcId: 0, dstBlockId: 2, dstLocalIndex: 3, rating: 5.0},
*
* and assume that the dst factors are stored as dstFactors: Map[Int, Array[Array[Float]]], which
* is a blockId to dst factors map, the corresponding dst factor of the record is dstFactor(2)(3).
*
* We use a CSC-like (compressed sparse column) format to store the in-link information. So we can
* compute src factors one after another using only one normal equation instance.
*
* @param srcIds src ids (ordered)
* @param dstPtrs dst pointers. Elements in range [dstPtrs(i), dstPtrs(i+1)) of dst indices and
* ratings are associated with srcIds(i).
* @param dstEncodedIndices encoded dst indices
* @param ratings ratings
* @see [[LocalIndexEncoder]]
*/
private[recommendation] case class InBlock[@specialized(Int, Long) ID: ClassTag](
srcIds: Array[ID],
dstPtrs: Array[Int],
dstEncodedIndices: Array[Int],
ratings: Array[Float]) {
/** Size of the block. */
def size: Int = ratings.length
require(dstEncodedIndices.length == size)
require(dstPtrs.length == srcIds.length + 1)
}
/**
* Initializes factors randomly given the in-link blocks.
*
* @param inBlocks in-link blocks
* @param rank rank
* @return initialized factor blocks
*/
private def initialize[ID](
inBlocks: RDD[(Int, InBlock[ID])],
rank: Int,
seed: Long): RDD[(Int, FactorBlock)] = {
// Choose a unit vector uniformly at random from the unit sphere, but from the
// "first quadrant" where all elements are nonnegative. This can be done by choosing
// elements distributed as Normal(0,1) and taking the absolute value, and then normalizing.
// This appears to create factorizations that have a slightly better reconstruction
// (<1%) compared picking elements uniformly at random in [0,1].
inBlocks.map { case (srcBlockId, inBlock) =>
val random = new XORShiftRandom(byteswap64(seed ^ srcBlockId))
val factors = Array.fill(inBlock.srcIds.length) {
val factor = Array.fill(rank)(random.nextGaussian().toFloat)
val nrm = blas.snrm2(rank, factor, 1)
blas.sscal(rank, 1.0f / nrm, factor, 1)
factor
}
(srcBlockId, factors)
}
}
/**
* A rating block that contains src IDs, dst IDs, and ratings, stored in primitive arrays.
*/
private[recommendation] case class RatingBlock[@specialized(Int, Long) ID: ClassTag](
srcIds: Array[ID],
dstIds: Array[ID],
ratings: Array[Float]) {
/** Size of the block. */
def size: Int = srcIds.length
require(dstIds.length == srcIds.length)
require(ratings.length == srcIds.length)
}
/**
* Builder for [[RatingBlock]]. [[mutable.ArrayBuilder]] is used to avoid boxing/unboxing.
*/
private[recommendation] class RatingBlockBuilder[@specialized(Int, Long) ID: ClassTag]
extends Serializable {
private val srcIds = mutable.ArrayBuilder.make[ID]
private val dstIds = mutable.ArrayBuilder.make[ID]
private val ratings = mutable.ArrayBuilder.make[Float]
var size = 0
/** Adds a rating. */
def add(r: Rating[ID]): this.type = {
size += 1
srcIds += r.user
dstIds += r.item
ratings += r.rating
this
}
/** Merges another [[RatingBlockBuilder]]. */
def merge(other: RatingBlock[ID]): this.type = {
size += other.srcIds.length
srcIds ++= other.srcIds
dstIds ++= other.dstIds
ratings ++= other.ratings
this
}
/** Builds a [[RatingBlock]]. */
def build(): RatingBlock[ID] = {
RatingBlock[ID](srcIds.result(), dstIds.result(), ratings.result())
}
}
/**
* Partitions raw ratings into blocks.
*
* @param ratings raw ratings
* @param srcPart partitioner for src IDs
* @param dstPart partitioner for dst IDs
* @return an RDD of rating blocks in the form of ((srcBlockId, dstBlockId), ratingBlock)
*/
private def partitionRatings[ID: ClassTag](
ratings: RDD[Rating[ID]],
srcPart: Partitioner,
dstPart: Partitioner): RDD[((Int, Int), RatingBlock[ID])] = {
/* The implementation produces the same result as the following but generates less objects.
ratings.map { r =>
((srcPart.getPartition(r.user), dstPart.getPartition(r.item)), r)
}.aggregateByKey(new RatingBlockBuilder)(
seqOp = (b, r) => b.add(r),
combOp = (b0, b1) => b0.merge(b1.build()))
.mapValues(_.build())
*/
val numPartitions = srcPart.numPartitions * dstPart.numPartitions
ratings.mapPartitions { iter =>
val builders = Array.fill(numPartitions)(new RatingBlockBuilder[ID])
iter.flatMap { r =>
val srcBlockId = srcPart.getPartition(r.user)
val dstBlockId = dstPart.getPartition(r.item)
val idx = srcBlockId + srcPart.numPartitions * dstBlockId
val builder = builders(idx)
builder.add(r)
if (builder.size >= 2048) { // 2048 * (3 * 4) = 24k
builders(idx) = new RatingBlockBuilder
Iterator.single(((srcBlockId, dstBlockId), builder.build()))
} else {
Iterator.empty
}
} ++ {
builders.view.zipWithIndex.filter(_._1.size > 0).map { case (block, idx) =>
val srcBlockId = idx % srcPart.numPartitions
val dstBlockId = idx / srcPart.numPartitions
((srcBlockId, dstBlockId), block.build())
}
}
}.groupByKey().mapValues { blocks =>
val builder = new RatingBlockBuilder[ID]
blocks.foreach(builder.merge)
builder.build()
}.setName("ratingBlocks")
}
/**
* Builder for uncompressed in-blocks of (srcId, dstEncodedIndex, rating) tuples.
*
* @param encoder encoder for dst indices
*/
private[recommendation] class UncompressedInBlockBuilder[@specialized(Int, Long) ID: ClassTag](
encoder: LocalIndexEncoder)(
implicit ord: Ordering[ID]) {
private val srcIds = mutable.ArrayBuilder.make[ID]
private val dstEncodedIndices = mutable.ArrayBuilder.make[Int]
private val ratings = mutable.ArrayBuilder.make[Float]
/**
* Adds a dst block of (srcId, dstLocalIndex, rating) tuples.
*
* @param dstBlockId dst block ID
* @param srcIds original src IDs
* @param dstLocalIndices dst local indices
* @param ratings ratings
*/
def add(
dstBlockId: Int,
srcIds: Array[ID],
dstLocalIndices: Array[Int],
ratings: Array[Float]): this.type = {
val sz = srcIds.length
require(dstLocalIndices.length == sz)
require(ratings.length == sz)
this.srcIds ++= srcIds
this.ratings ++= ratings
var j = 0
while (j < sz) {
this.dstEncodedIndices += encoder.encode(dstBlockId, dstLocalIndices(j))
j += 1
}
this
}
/** Builds a [[UncompressedInBlock]]. */
def build(): UncompressedInBlock[ID] = {
new UncompressedInBlock(srcIds.result(), dstEncodedIndices.result(), ratings.result())
}
}
/**
* A block of (srcId, dstEncodedIndex, rating) tuples stored in primitive arrays.
*/
private[recommendation] class UncompressedInBlock[@specialized(Int, Long) ID: ClassTag](
val srcIds: Array[ID],
val dstEncodedIndices: Array[Int],
val ratings: Array[Float])(
implicit ord: Ordering[ID]) {
/** Size the of block. */
def length: Int = srcIds.length
/**
* Compresses the block into an [[InBlock]]. The algorithm is the same as converting a
* sparse matrix from coordinate list (COO) format into compressed sparse column (CSC) format.
* Sorting is done using Spark's built-in Timsort to avoid generating too many objects.
*/
def compress(): InBlock[ID] = {
val sz = length
assert(sz > 0, "Empty in-link block should not exist.")
sort()
val uniqueSrcIdsBuilder = mutable.ArrayBuilder.make[ID]
val dstCountsBuilder = mutable.ArrayBuilder.make[Int]
var preSrcId = srcIds(0)
uniqueSrcIdsBuilder += preSrcId
var curCount = 1
var i = 1
var j = 0
while (i < sz) {
val srcId = srcIds(i)
if (srcId != preSrcId) {
uniqueSrcIdsBuilder += srcId
dstCountsBuilder += curCount
preSrcId = srcId
j += 1
curCount = 0
}
curCount += 1
i += 1
}
dstCountsBuilder += curCount
val uniqueSrcIds = uniqueSrcIdsBuilder.result()
val numUniqueSrdIds = uniqueSrcIds.length
val dstCounts = dstCountsBuilder.result()
val dstPtrs = new Array[Int](numUniqueSrdIds + 1)
var sum = 0
i = 0
while (i < numUniqueSrdIds) {
sum += dstCounts(i)
i += 1
dstPtrs(i) = sum
}
InBlock(uniqueSrcIds, dstPtrs, dstEncodedIndices, ratings)
}
private def sort(): Unit = {
val sz = length
// Since there might be interleaved log messages, we insert a unique id for easy pairing.
val sortId = Utils.random.nextInt()
logDebug(s"Start sorting an uncompressed in-block of size $sz. (sortId = $sortId)")
val start = System.nanoTime()
val sorter = new Sorter(new UncompressedInBlockSort[ID])
sorter.sort(this, 0, length, Ordering[KeyWrapper[ID]])
val duration = (System.nanoTime() - start) / 1e9
logDebug(s"Sorting took $duration seconds. (sortId = $sortId)")
}
}
/**
* A wrapper that holds a primitive key.
*
* @see [[UncompressedInBlockSort]]
*/
private class KeyWrapper[@specialized(Int, Long) ID: ClassTag](
implicit ord: Ordering[ID]) extends Ordered[KeyWrapper[ID]] {
var key: ID = _
override def compare(that: KeyWrapper[ID]): Int = {
ord.compare(key, that.key)
}
def setKey(key: ID): this.type = {
this.key = key
this
}
}
/**
* [[SortDataFormat]] of [[UncompressedInBlock]] used by [[Sorter]].
*/
private class UncompressedInBlockSort[@specialized(Int, Long) ID: ClassTag](
implicit ord: Ordering[ID])
extends SortDataFormat[KeyWrapper[ID], UncompressedInBlock[ID]] {
override def newKey(): KeyWrapper[ID] = new KeyWrapper()
override def getKey(
data: UncompressedInBlock[ID],
pos: Int,
reuse: KeyWrapper[ID]): KeyWrapper[ID] = {
if (reuse == null) {
new KeyWrapper().setKey(data.srcIds(pos))
} else {
reuse.setKey(data.srcIds(pos))
}
}
override def getKey(
data: UncompressedInBlock[ID],
pos: Int): KeyWrapper[ID] = {
getKey(data, pos, null)
}
private def swapElements[@specialized(Int, Float) T](
data: Array[T],
pos0: Int,
pos1: Int): Unit = {
val tmp = data(pos0)
data(pos0) = data(pos1)
data(pos1) = tmp
}
override def swap(data: UncompressedInBlock[ID], pos0: Int, pos1: Int): Unit = {
swapElements(data.srcIds, pos0, pos1)
swapElements(data.dstEncodedIndices, pos0, pos1)
swapElements(data.ratings, pos0, pos1)
}
override def copyRange(
src: UncompressedInBlock[ID],
srcPos: Int,
dst: UncompressedInBlock[ID],
dstPos: Int,
length: Int): Unit = {
System.arraycopy(src.srcIds, srcPos, dst.srcIds, dstPos, length)
System.arraycopy(src.dstEncodedIndices, srcPos, dst.dstEncodedIndices, dstPos, length)
System.arraycopy(src.ratings, srcPos, dst.ratings, dstPos, length)
}
override def allocate(length: Int): UncompressedInBlock[ID] = {
new UncompressedInBlock(
new Array[ID](length), new Array[Int](length), new Array[Float](length))
}
override def copyElement(
src: UncompressedInBlock[ID],
srcPos: Int,
dst: UncompressedInBlock[ID],
dstPos: Int): Unit = {
dst.srcIds(dstPos) = src.srcIds(srcPos)
dst.dstEncodedIndices(dstPos) = src.dstEncodedIndices(srcPos)
dst.ratings(dstPos) = src.ratings(srcPos)
}
}
/**
* Creates in-blocks and out-blocks from rating blocks.
*
* @param prefix prefix for in/out-block names
* @param ratingBlocks rating blocks
* @param srcPart partitioner for src IDs
* @param dstPart partitioner for dst IDs
* @return (in-blocks, out-blocks)
*/
private def makeBlocks[ID: ClassTag](
prefix: String,
ratingBlocks: RDD[((Int, Int), RatingBlock[ID])],
srcPart: Partitioner,
dstPart: Partitioner,
storageLevel: StorageLevel)(
implicit srcOrd: Ordering[ID]): (RDD[(Int, InBlock[ID])], RDD[(Int, OutBlock)]) = {
val inBlocks = ratingBlocks.map {
case ((srcBlockId, dstBlockId), RatingBlock(srcIds, dstIds, ratings)) =>
// The implementation is a faster version of
// val dstIdToLocalIndex = dstIds.toSet.toSeq.sorted.zipWithIndex.toMap
val start = System.nanoTime()
val dstIdSet = new OpenHashSet[ID](1 << 20)
dstIds.foreach(dstIdSet.add)
val sortedDstIds = new Array[ID](dstIdSet.size)
var i = 0
var pos = dstIdSet.nextPos(0)
while (pos != -1) {
sortedDstIds(i) = dstIdSet.getValue(pos)
pos = dstIdSet.nextPos(pos + 1)
i += 1
}
assert(i == dstIdSet.size)
Sorting.quickSort(sortedDstIds)
val dstIdToLocalIndex = new OpenHashMap[ID, Int](sortedDstIds.length)
i = 0
while (i < sortedDstIds.length) {
dstIdToLocalIndex.update(sortedDstIds(i), i)
i += 1
}
logDebug(
"Converting to local indices took " + (System.nanoTime() - start) / 1e9 + " seconds.")
val dstLocalIndices = dstIds.map(dstIdToLocalIndex.apply)
(srcBlockId, (dstBlockId, srcIds, dstLocalIndices, ratings))
}.groupByKey(new ALSPartitioner(srcPart.numPartitions))
.mapValues { iter =>
val builder =
new UncompressedInBlockBuilder[ID](new LocalIndexEncoder(dstPart.numPartitions))
iter.foreach { case (dstBlockId, srcIds, dstLocalIndices, ratings) =>
builder.add(dstBlockId, srcIds, dstLocalIndices, ratings)
}
builder.build().compress()
}.setName(prefix + "InBlocks")
.persist(storageLevel)
val outBlocks = inBlocks.mapValues { case InBlock(srcIds, dstPtrs, dstEncodedIndices, _) =>
val encoder = new LocalIndexEncoder(dstPart.numPartitions)
val activeIds = Array.fill(dstPart.numPartitions)(mutable.ArrayBuilder.make[Int])
var i = 0
val seen = new Array[Boolean](dstPart.numPartitions)
while (i < srcIds.length) {
var j = dstPtrs(i)
ju.Arrays.fill(seen, false)
while (j < dstPtrs(i + 1)) {
val dstBlockId = encoder.blockId(dstEncodedIndices(j))
if (!seen(dstBlockId)) {
activeIds(dstBlockId) += i // add the local index in this out-block
seen(dstBlockId) = true
}
j += 1
}
i += 1
}
activeIds.map { x =>
x.result()
}
}.setName(prefix + "OutBlocks")
.persist(storageLevel)
(inBlocks, outBlocks)
}
/**
* Compute dst factors by constructing and solving least square problems.
*
* @param srcFactorBlocks src factors
* @param srcOutBlocks src out-blocks
* @param dstInBlocks dst in-blocks
* @param rank rank
* @param regParam regularization constant
* @param srcEncoder encoder for src local indices
* @param implicitPrefs whether to use implicit preference
* @param alpha the alpha constant in the implicit preference formulation
* @param solver solver for least squares problems
* @return dst factors
*/
private def computeFactors[ID](
srcFactorBlocks: RDD[(Int, FactorBlock)],
srcOutBlocks: RDD[(Int, OutBlock)],
dstInBlocks: RDD[(Int, InBlock[ID])],
rank: Int,
regParam: Double,
srcEncoder: LocalIndexEncoder,
implicitPrefs: Boolean = false,
alpha: Double = 1.0,
solver: LeastSquaresNESolver): RDD[(Int, FactorBlock)] = {
val numSrcBlocks = srcFactorBlocks.partitions.length
val YtY = if (implicitPrefs) Some(computeYtY(srcFactorBlocks, rank)) else None
val srcOut = srcOutBlocks.join(srcFactorBlocks).flatMap {
case (srcBlockId, (srcOutBlock, srcFactors)) =>
srcOutBlock.view.zipWithIndex.map { case (activeIndices, dstBlockId) =>
(dstBlockId, (srcBlockId, activeIndices.map(idx => srcFactors(idx))))
}
}
val merged = srcOut.groupByKey(new ALSPartitioner(dstInBlocks.partitions.length))
dstInBlocks.join(merged).mapValues {
case (InBlock(dstIds, srcPtrs, srcEncodedIndices, ratings), srcFactors) =>
val sortedSrcFactors = new Array[FactorBlock](numSrcBlocks)
srcFactors.foreach { case (srcBlockId, factors) =>
sortedSrcFactors(srcBlockId) = factors
}
val dstFactors = new Array[Array[Float]](dstIds.length)
var j = 0
val ls = new NormalEquation(rank)
while (j < dstIds.length) {
ls.reset()
if (implicitPrefs) {
ls.merge(YtY.get)
}
var i = srcPtrs(j)
var numExplicits = 0
while (i < srcPtrs(j + 1)) {
val encoded = srcEncodedIndices(i)
val blockId = srcEncoder.blockId(encoded)
val localIndex = srcEncoder.localIndex(encoded)
val srcFactor = sortedSrcFactors(blockId)(localIndex)
val rating = ratings(i)
if (implicitPrefs) {
// Extension to the original paper to handle b < 0. confidence is a function of |b|
// instead so that it is never negative. c1 is confidence - 1.0.
val c1 = alpha * math.abs(rating)
// For rating <= 0, the corresponding preference is 0. So the term below is only added
// for rating > 0. Because YtY is already added, we need to adjust the scaling here.
if (rating > 0) {
numExplicits += 1
ls.add(srcFactor, (c1 + 1.0) / c1, c1)
}
} else {
ls.add(srcFactor, rating)
numExplicits += 1
}
i += 1
}
// Weight lambda by the number of explicit ratings based on the ALS-WR paper.
dstFactors(j) = solver.solve(ls, numExplicits * regParam)
j += 1
}
dstFactors
}
}
/**
* Computes the Gramian matrix of user or item factors, which is only used in implicit preference.
* Caching of the input factors is handled in [[ALS#train]].
*/
private def computeYtY(factorBlocks: RDD[(Int, FactorBlock)], rank: Int): NormalEquation = {
factorBlocks.values.aggregate(new NormalEquation(rank))(
seqOp = (ne, factors) => {
factors.foreach(ne.add(_, 0.0))
ne
},
combOp = (ne1, ne2) => ne1.merge(ne2))
}
/**
* Encoder for storing (blockId, localIndex) into a single integer.
*
* We use the leading bits (including the sign bit) to store the block id and the rest to store
* the local index. This is based on the assumption that users/items are approximately evenly
* partitioned. With this assumption, we should be able to encode two billion distinct values.
*
* @param numBlocks number of blocks
*/
private[recommendation] class LocalIndexEncoder(numBlocks: Int) extends Serializable {
require(numBlocks > 0, s"numBlocks must be positive but found $numBlocks.")
private[this] final val numLocalIndexBits =
math.min(java.lang.Integer.numberOfLeadingZeros(numBlocks - 1), 31)
private[this] final val localIndexMask = (1 << numLocalIndexBits) - 1
/** Encodes a (blockId, localIndex) into a single integer. */
def encode(blockId: Int, localIndex: Int): Int = {
require(blockId < numBlocks)
require((localIndex & ~localIndexMask) == 0)
(blockId << numLocalIndexBits) | localIndex
}
/** Gets the block id from an encoded index. */
@inline
def blockId(encoded: Int): Int = {
encoded >>> numLocalIndexBits
}
/** Gets the local index from an encoded index. */
@inline
def localIndex(encoded: Int): Int = {
encoded & localIndexMask
}
}
/**
* Partitioner used by ALS. We require that getPartition is a projection. That is, for any key k,
* we have getPartition(getPartition(k)) = getPartition(k). Since the default HashPartitioner
* satisfies this requirement, we simply use a type alias here.
*/
private[recommendation] type ALSPartitioner = org.apache.spark.HashPartitioner
/**
* Private function to clean up all of the shuffles files from the dependencies and their parents.
*/
private[spark] def cleanShuffleDependencies[T](
sc: SparkContext,
deps: Seq[Dependency[_]],
blocking: Boolean = false): Unit = {
// If there is no reference tracking we skip clean up.
sc.cleaner.foreach { cleaner =>
/**
* Clean the shuffles & all of its parents.
*/
def cleanEagerly(dep: Dependency[_]): Unit = {
if (dep.isInstanceOf[ShuffleDependency[_, _, _]]) {
val shuffleId = dep.asInstanceOf[ShuffleDependency[_, _, _]].shuffleId
cleaner.doCleanupShuffle(shuffleId, blocking)
}
val rdd = dep.rdd
val rddDeps = rdd.dependencies
if (rdd.getStorageLevel == StorageLevel.NONE && rddDeps != null) {
rddDeps.foreach(cleanEagerly)
}
}
deps.foreach(cleanEagerly)
}
}
}
|
gioenn/xSpark
|
mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala
|
Scala
|
apache-2.0
| 50,863
|
package org.scurator
import org.apache.curator.framework.CuratorFrameworkFactory
import org.apache.curator.retry.RetryOneTime
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
/**
*
*/
@RunWith(classOf[JUnitRunner])
class TestImplicitConversion extends BaseSCuratorTest {
"A SCuratorClient" should "support implicitly wrapping a curator client" in {
// Create a curator client
val curatorClient = CuratorFrameworkFactory.newClient(server.getConnectString, new RetryOneTime(1))
curatorClient.start()
// Add needed imports (including implicit conversion)
import org.scurator.SCuratorClient.Implicits._
import org.scurator.components.ExistsRequest
import scala.concurrent.ExecutionContext.Implicits.global
// Call SCuratorClient methods without manualy wrapping
// If this compiles we are good (but not using compiles matcher so coverage is tracked)
val result = curatorClient.exists(ExistsRequest(path = "/test")).futureValue
result should not be 'exists
curatorClient.close()
}
}
|
granthenke/scurator
|
src/test/scala/org/scurator/TestImplicitConversion.scala
|
Scala
|
apache-2.0
| 1,061
|
package io.taig.gandalf.syntax
import io.taig.gandalf.{ Validation, ops }
import shapeless._
import scala.language.implicitConversions
trait dsl {
implicit def dslRuleSyntax[I, O, V <: HList, E](
validation: Validation.Aux[I, O, V, E]
): ops.dsl.logic[I, O, V, E] = new ops.dsl.logic( validation )
implicit def dslTransformationSyntax[I, O, V <: HList, E](
validation: Validation.Aux[I, O, V, E]
): ops.dsl.transformation[I, O, V, E] = new ops.dsl.transformation( validation )
}
object dsl extends dsl
|
Taig/Gandalf
|
core/src/main/scala/io/taig/gandalf/syntax/dsl.scala
|
Scala
|
mit
| 538
|
package se.lu.nateko.cp.meta.persistence.postgres
import java.sql.ResultSet
import se.lu.nateko.cp.meta.api.CloseableIterator
import java.sql.Connection
class ResultSetIterator[T](connectionFactory: () => Connection, resultFactory: ResultSet => T, selectQuery: String) extends CloseableIterator[T]{
private[this] val conn = connectionFactory()
private[this] val st = conn.createStatement()
private[this] val rs = st.executeQuery(selectQuery)
private[this] var doesHaveNext = false
private[this] var closed = false
increment()
final def hasNext: Boolean = !closed && doesHaveNext
final def next(): T =
if(closed || !doesHaveNext){
throw new IllegalStateException("Iterator has no more elements!")
}else try{
val nextItem = resultFactory(rs)
increment()
nextItem
}catch{
case err: Throwable => close(); throw err
}
final def close(): Unit = if(!closed){
try{
rs.close()
st.close()
}finally{
conn.close()
closed = true
}
}
private def increment(): Unit = {
doesHaveNext = rs.next()
if(!doesHaveNext) close()
}
}
|
ICOS-Carbon-Portal/meta
|
src/main/scala/se/lu/nateko/cp/meta/persistence/postgres/ResultSetIterator.scala
|
Scala
|
gpl-3.0
| 1,074
|
/*
*************************************************************************************
* Copyright 2013 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.services.policies
import net.liftweb.common._
import com.normation.rudder.domain.parameters._
import com.normation.eventlog.EventActor
import com.normation.eventlog.ModificationId
import com.normation.rudder.repository.RoParameterRepository
import com.normation.rudder.repository.WoParameterRepository
import com.normation.rudder.batch.AsyncDeploymentAgent
import com.normation.rudder.batch.AutomaticStartDeployment
import com.normation.inventory.domain.NodeId
trait RoParameterService {
/**
* Returns a Global Parameter by its name
*/
def getGlobalParameter(parameterName : ParameterName) : Box[Option[GlobalParameter]]
/**
* Returns all defined Global Parameters
*/
def getAllGlobalParameters() : Box[Seq[GlobalParameter]]
/**
* Returns all parameters applicable for a given node
*/
def getParametersByNode(nodeId: NodeId) : Box[Seq[Parameter]]
}
trait WoParameterService {
/**
* Save a parameter
* Returns the new global Parameter
* Will fail if a parameter with the same name exists
*/
def saveParameter(
parameter : GlobalParameter
, modId : ModificationId
, actor : EventActor
, reason : Option[String]
) : Box[GlobalParameter]
/**
* Updates a parameter
* Returns the new global Parameter
* Will fail if no params with the same name exists
*/
def updateParameter(
parameter : GlobalParameter
, modId : ModificationId
, actor : EventActor
, reason : Option[String]
) : Box[GlobalParameter]
/**
* Delete a global parameter
*/
def delete(parameterName:ParameterName, modId: ModificationId, actor:EventActor, reason:Option[String]) : Box[ParameterName]
}
class RoParameterServiceImpl(
roParamRepo : RoParameterRepository
) extends RoParameterService with Loggable {
/**
* Returns a Global Parameter by its name
*/
def getGlobalParameter(parameterName : ParameterName) : Box[Option[GlobalParameter]] = {
roParamRepo.getGlobalParameter(parameterName) match {
case Full(entry) => Full(Some(entry))
case Empty => Full(None)
case e:Failure =>
logger.error("Error while trying to fetch param %s : %s".format(parameterName.value, e.messageChain))
e
}
}
/**
* Returns all defined Global Parameters
*/
def getAllGlobalParameters() : Box[Seq[GlobalParameter]]= {
roParamRepo.getAllGlobalParameters() match {
case Full(seq) => Full(seq)
case Empty => Full(Seq())
case e:Failure =>
logger.error("Error while trying to fetch all parameters : %s".format(e.messageChain))
e
}
}
/**
* Returns all parameters applicable for a given node
* Hyper naive implementation : all parameters !
*/
def getParametersByNode(nodeId: NodeId) : Box[Seq[Parameter]] = {
getAllGlobalParameters()
}
}
class WoParameterServiceImpl(
roParamService : RoParameterService
, woParamRepo : WoParameterRepository
, asyncDeploymentAgent : AsyncDeploymentAgent
) extends WoParameterService with Loggable {
/**
* Save a parameter
* Returns the new global Parameter
* Will fail if a parameter with the same name exists
*/
def saveParameter(
parameter : GlobalParameter
, modId : ModificationId
, actor : EventActor
, reason : Option[String]
) : Box[GlobalParameter] = {
woParamRepo.saveParameter(parameter, modId, actor, reason) match {
case e:Failure =>
logger.error("Error while trying to create param %s : %s".format(parameter.name.value, e.messageChain))
e
case Empty =>
logger.error("Error : Empty result when trying to create param %s".format(parameter.name.value))
Failure("Something unexpected happened when trying to create parameter %s".format(parameter.name.value))
case Full(diff) =>
// Ok, it's been save, try to fetch the new value
roParamService.getGlobalParameter(parameter.name) match {
case e: EmptyBox=> e
case Full(option) =>
option match {
case Some(entry) =>
logger.debug("Successfully created parameter %s".format(parameter.name.value))
// launch a deployement
asyncDeploymentAgent ! AutomaticStartDeployment(modId,actor)
Full(entry)
case None =>
logger.error("Could not fetch back newly created global parameter with name %s".format(parameter.name.value))
Failure("Could not fetch back newly created global parameter with name %s".format(parameter.name.value))
}
}
}
}
/**
* Updates a parameter
* Returns the new global Parameter
* Will fail if no params with the same name exists
*/
def updateParameter(
parameter : GlobalParameter
, modId : ModificationId
, actor : EventActor
, reason : Option[String]
) : Box[GlobalParameter] = {
woParamRepo.updateParameter(parameter, modId, actor, reason) match {
case e:Failure =>
logger.error("Error while trying to update param %s : %s".format(parameter.name.value, e.messageChain))
e
case Empty =>
logger.error("Error : Empty result when trying to update param %s".format(parameter.name.value))
Failure("Something unexpected happened when trying to update parameter %s".format(parameter.name.value))
case Full(diff) =>
// Ok, it's been updated, try to fetch the new value
roParamService.getGlobalParameter(parameter.name) match {
case e: EmptyBox=> e
case Full(option) =>
option match {
case Some(entry) =>
logger.debug("Successfully udated parameter %s".format(parameter.name.value))
// launch a deployement
asyncDeploymentAgent ! AutomaticStartDeployment(modId,actor)
Full(entry)
case None =>
logger.error("Could not fetch back updated global parameter with name %s".format(parameter.name.value))
Failure("Could not fetch back updated global parameter with name %s".format(parameter.name.value))
}
}
}
}
def delete(parameterName:ParameterName, modId: ModificationId, actor:EventActor, reason:Option[String]) : Box[ParameterName] = {
woParamRepo.delete(parameterName, modId, actor, reason) match {
case e:Failure =>
logger.error("Error while trying to delete param %s : %s".format(parameterName.value, e.messageChain))
e
case Empty =>
logger.error("Error : Empty result when trying to delete param %s".format(parameterName.value))
Failure("Something unexpected happened when trying to update parameter %s".format(parameterName.value))
case Full(diff) =>
logger.debug("Successfully deleted parameter %s".format(parameterName.value))
asyncDeploymentAgent ! AutomaticStartDeployment(modId,actor)
Full(parameterName)
}
}
}
|
Kegeruneku/rudder
|
rudder-core/src/main/scala/com/normation/rudder/services/policies/ParameterService.scala
|
Scala
|
agpl-3.0
| 8,757
|
/* ___ _ ___ _ _ *\\
** / __| |/ (_) | | The SKilL Generator **
** \\__ \\ ' <| | | |__ (c) 2013 University of Stuttgart **
** |___/_|\\_\\_|_|____| see LICENSE **
\\* */
package de.ust.skill.generator.scala.internal
import de.ust.skill.generator.scala.GeneralOutputMaker
trait InternalInstancePropertiesMaker extends GeneralOutputMaker {
abstract override def make {
super.make
val out = open("internal/InternalInstanceProperties.scala")
//package
out.write(s"""package ${packagePrefix}internal""")
out.write("""
/**
* properties that are required on each instance, but are not exported through the interface
*
* @author Timm Felden
*/
trait InternalInstanceProperties {
/**
* mark an instance as deleted
*/
final def delete = setSkillID(0)
/**
* checks for a deleted mark
*/
final def markedForDeletion = 0 == getSkillID
/**
* @return the ID of the instance; if -1, no ID has been assigned yet, if 0, the object will not be written to disk
*/
private[internal] def getSkillID: Int
private[internal] def setSkillID(newID: Int): Unit
}
""")
//class prefix
out.close()
}
}
|
XyzNobody/skill
|
src/main/scala/de/ust/skill/generator/scala/internal/InternalInstancePorpertiesMaker.scala
|
Scala
|
bsd-3-clause
| 1,381
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.mesos
import java.io.File
import java.util.{ArrayList => JArrayList, Collections, List => JList}
import scala.collection.JavaConverters._
import scala.collection.mutable.{HashMap, HashSet}
import org.apache.mesos.Protos.{ExecutorInfo => MesosExecutorInfo, TaskInfo => MesosTaskInfo, _}
import org.apache.mesos.SchedulerDriver
import org.apache.mesos.protobuf.ByteString
import org.apache.spark.{SparkContext, SparkException, TaskState}
import org.apache.spark.deploy.mesos.config.EXECUTOR_URI
import org.apache.spark.executor.MesosExecutorBackend
import org.apache.spark.internal.config
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.ExecutorInfo
import org.apache.spark.util.Utils
/**
* A SchedulerBackend for running fine-grained tasks on Mesos. Each Spark task is mapped to a
* separate Mesos task, allowing multiple applications to share cluster nodes both in space (tasks
* from multiple apps can run on different cores) and in time (a core can switch ownership).
*/
private[spark] class MesosFineGrainedSchedulerBackend(
scheduler: TaskSchedulerImpl,
sc: SparkContext,
master: String)
extends SchedulerBackend
with org.apache.mesos.Scheduler
with MesosSchedulerUtils {
// Stores the slave ids that has launched a Mesos executor.
val slaveIdToExecutorInfo = new HashMap[String, MesosExecutorInfo]
val taskIdToSlaveId = new HashMap[Long, String]
// An ExecutorInfo for our tasks
var execArgs: Array[Byte] = null
var classLoader: ClassLoader = null
// The listener bus to publish executor added/removed events.
val listenerBus = sc.listenerBus
private[mesos] val mesosExecutorCores = sc.conf.getDouble("spark.mesos.mesosExecutor.cores", 1)
// Offer constraints
private[this] val slaveOfferConstraints =
parseConstraintString(sc.conf.get("spark.mesos.constraints", ""))
// reject offers with mismatched constraints in seconds
private val rejectOfferDurationForUnmetConstraints =
getRejectOfferDurationForUnmetConstraints(sc.conf)
private var schedulerDriver: SchedulerDriver = _
@volatile var appId: String = _
override def start() {
classLoader = Thread.currentThread.getContextClassLoader
val driver = createSchedulerDriver(
master,
MesosFineGrainedSchedulerBackend.this,
sc.sparkUser,
sc.appName,
sc.conf,
sc.conf.getOption("spark.mesos.driver.webui.url").orElse(sc.ui.map(_.webUrl)),
Option.empty,
Option.empty,
sc.conf.getOption("spark.mesos.driver.frameworkId")
)
unsetFrameworkID(sc)
startScheduler(driver)
}
/**
* Creates a MesosExecutorInfo that is used to launch a Mesos executor.
*
* @param availableResources Available resources that is offered by Mesos
* @param execId The executor id to assign to this new executor.
* @return A tuple of the new mesos executor info and the remaining available resources.
*/
def createExecutorInfo(
availableResources: JList[Resource],
execId: String): (MesosExecutorInfo, JList[Resource]) = {
val executorSparkHome = sc.conf.getOption("spark.mesos.executor.home")
.orElse(sc.getSparkHome()) // Fall back to driver Spark home for backward compatibility
.getOrElse {
throw new SparkException("Executor Spark home `spark.mesos.executor.home` is not set!")
}
val environment = Environment.newBuilder()
sc.conf.get(config.EXECUTOR_CLASS_PATH).foreach { cp =>
environment.addVariables(
Environment.Variable.newBuilder().setName("SPARK_EXECUTOR_CLASSPATH").setValue(cp).build())
}
val extraJavaOpts = sc.conf.get(config.EXECUTOR_JAVA_OPTIONS).map {
Utils.substituteAppNExecIds(_, appId, execId)
}.getOrElse("")
val prefixEnv = sc.conf.get(config.EXECUTOR_LIBRARY_PATH).map { p =>
Utils.libraryPathEnvPrefix(Seq(p))
}.getOrElse("")
environment.addVariables(
Environment.Variable.newBuilder()
.setName("SPARK_EXECUTOR_OPTS")
.setValue(extraJavaOpts)
.build())
sc.executorEnvs.foreach { case (key, value) =>
environment.addVariables(Environment.Variable.newBuilder()
.setName(key)
.setValue(value)
.build())
}
val command = CommandInfo.newBuilder()
.setEnvironment(environment)
val uri = sc.conf.get(EXECUTOR_URI).orElse(Option(System.getenv("SPARK_EXECUTOR_URI")))
val executorBackendName = classOf[MesosExecutorBackend].getName
if (uri.isEmpty) {
val executorPath = new File(executorSparkHome, "/bin/spark-class").getPath
command.setValue(s"$prefixEnv $executorPath $executorBackendName")
} else {
// Grab everything to the first '.'. We'll use that and '*' to
// glob the directory "correctly".
val basename = uri.get.split('/').last.split('.').head
command.setValue(s"cd ${basename}*; $prefixEnv ./bin/spark-class $executorBackendName")
command.addUris(CommandInfo.URI.newBuilder().setValue(uri.get))
}
val builder = MesosExecutorInfo.newBuilder()
val (resourcesAfterCpu, usedCpuResources) =
partitionResources(availableResources, "cpus", mesosExecutorCores)
val (resourcesAfterMem, usedMemResources) =
partitionResources(resourcesAfterCpu.asJava, "mem", executorMemory(sc))
builder.addAllResources(usedCpuResources.asJava)
builder.addAllResources(usedMemResources.asJava)
sc.conf.getOption("spark.mesos.uris").foreach(setupUris(_, command))
val executorInfo = builder
.setExecutorId(ExecutorID.newBuilder().setValue(execId).build())
.setCommand(command)
.setData(ByteString.copyFrom(createExecArg()))
executorInfo.setContainer(
MesosSchedulerBackendUtil.buildContainerInfo(sc.conf))
(executorInfo.build(), resourcesAfterMem.asJava)
}
/**
* Create and serialize the executor argument to pass to Mesos. Our executor arg is an array
* containing all the spark.* system properties in the form of (String, String) pairs.
*/
private def createExecArg(): Array[Byte] = {
if (execArgs == null) {
val props = new HashMap[String, String]
for ((key, value) <- sc.conf.getAll) {
props(key) = value
}
// Serialize the map as an array of (String, String) pairs
execArgs = Utils.serialize(props.toArray)
}
execArgs
}
override def offerRescinded(d: org.apache.mesos.SchedulerDriver, o: OfferID) {}
override def registered(
driver: org.apache.mesos.SchedulerDriver,
frameworkId: FrameworkID,
masterInfo: MasterInfo) {
inClassLoader() {
appId = frameworkId.getValue
logInfo("Registered as framework ID " + appId)
this.schedulerDriver = driver
markRegistered()
}
}
private def inClassLoader()(fun: => Unit) = {
val oldClassLoader = Thread.currentThread.getContextClassLoader
Thread.currentThread.setContextClassLoader(classLoader)
try {
fun
} finally {
Thread.currentThread.setContextClassLoader(oldClassLoader)
}
}
override def disconnected(d: org.apache.mesos.SchedulerDriver) {}
override def reregistered(d: org.apache.mesos.SchedulerDriver, masterInfo: MasterInfo) {}
private def getTasksSummary(tasks: JArrayList[MesosTaskInfo]): String = {
val builder = new StringBuilder
tasks.asScala.foreach { t =>
builder.append("Task id: ").append(t.getTaskId.getValue).append("\\n")
.append("Slave id: ").append(t.getSlaveId.getValue).append("\\n")
.append("Task resources: ").append(t.getResourcesList).append("\\n")
.append("Executor resources: ").append(t.getExecutor.getResourcesList)
.append("---------------------------------------------\\n")
}
builder.toString()
}
/**
* Method called by Mesos to offer resources on slaves. We respond by asking our active task sets
* for tasks in order of priority. We fill each node with tasks in a round-robin manner so that
* tasks are balanced across the cluster.
*/
override def resourceOffers(d: org.apache.mesos.SchedulerDriver, offers: JList[Offer]) {
inClassLoader() {
// Fail first on offers with unmet constraints
val (offersMatchingConstraints, offersNotMatchingConstraints) =
offers.asScala.partition { o =>
val offerAttributes = toAttributeMap(o.getAttributesList)
val meetsConstraints =
matchesAttributeRequirements(slaveOfferConstraints, offerAttributes)
// add some debug messaging
if (!meetsConstraints) {
val id = o.getId.getValue
logDebug(s"Declining offer: $id with attributes: $offerAttributes")
}
meetsConstraints
}
// These offers do not meet constraints. We don't need to see them again.
// Decline the offer for a long period of time.
offersNotMatchingConstraints.foreach { o =>
d.declineOffer(o.getId, Filters.newBuilder()
.setRefuseSeconds(rejectOfferDurationForUnmetConstraints).build())
}
// Of the matching constraints, see which ones give us enough memory and cores
val (usableOffers, unUsableOffers) = offersMatchingConstraints.partition { o =>
val mem = getResource(o.getResourcesList, "mem")
val cpus = getResource(o.getResourcesList, "cpus")
val slaveId = o.getSlaveId.getValue
val offerAttributes = toAttributeMap(o.getAttributesList)
// check offers for
// 1. Memory requirements
// 2. CPU requirements - need at least 1 for executor, 1 for task
val meetsMemoryRequirements = mem >= executorMemory(sc)
val meetsCPURequirements = cpus >= (mesosExecutorCores + scheduler.CPUS_PER_TASK)
val meetsRequirements =
(meetsMemoryRequirements && meetsCPURequirements) ||
(slaveIdToExecutorInfo.contains(slaveId) && cpus >= scheduler.CPUS_PER_TASK)
val debugstr = if (meetsRequirements) "Accepting" else "Declining"
logDebug(s"$debugstr offer: ${o.getId.getValue} with attributes: "
+ s"$offerAttributes mem: $mem cpu: $cpus")
meetsRequirements
}
// Decline offers we ruled out immediately
unUsableOffers.foreach(o => d.declineOffer(o.getId))
val workerOffers = usableOffers.map { o =>
val cpus = if (slaveIdToExecutorInfo.contains(o.getSlaveId.getValue)) {
getResource(o.getResourcesList, "cpus").toInt
} else {
// If the Mesos executor has not been started on this slave yet, set aside a few
// cores for the Mesos executor by offering fewer cores to the Spark executor
(getResource(o.getResourcesList, "cpus") - mesosExecutorCores).toInt
}
new WorkerOffer(
o.getSlaveId.getValue,
o.getHostname,
cpus)
}.toIndexedSeq
val slaveIdToOffer = usableOffers.map(o => o.getSlaveId.getValue -> o).toMap
val slaveIdToWorkerOffer = workerOffers.map(o => o.executorId -> o).toMap
val slaveIdToResources = new HashMap[String, JList[Resource]]()
usableOffers.foreach { o =>
slaveIdToResources(o.getSlaveId.getValue) = o.getResourcesList
}
val mesosTasks = new HashMap[String, JArrayList[MesosTaskInfo]]
val slavesIdsOfAcceptedOffers = HashSet[String]()
// Call into the TaskSchedulerImpl
val acceptedOffers = scheduler.resourceOffers(workerOffers).filter(!_.isEmpty)
acceptedOffers
.foreach { offer =>
offer.foreach { taskDesc =>
val slaveId = taskDesc.executorId
slavesIdsOfAcceptedOffers += slaveId
taskIdToSlaveId(taskDesc.taskId) = slaveId
val (mesosTask, remainingResources) = createMesosTask(
taskDesc,
slaveIdToResources(slaveId),
slaveId)
mesosTasks.getOrElseUpdate(slaveId, new JArrayList[MesosTaskInfo])
.add(mesosTask)
slaveIdToResources(slaveId) = remainingResources
}
}
// Reply to the offers
val filters = Filters.newBuilder().setRefuseSeconds(1).build() // TODO: lower timeout?
mesosTasks.foreach { case (slaveId, tasks) =>
slaveIdToWorkerOffer.get(slaveId).foreach(o =>
listenerBus.post(SparkListenerExecutorAdded(System.currentTimeMillis(), slaveId,
// TODO: Add support for log urls for Mesos
new ExecutorInfo(o.host, o.cores, Map.empty)))
)
logTrace(s"Launching Mesos tasks on slave '$slaveId', tasks:\\n${getTasksSummary(tasks)}")
d.launchTasks(Collections.singleton(slaveIdToOffer(slaveId).getId), tasks, filters)
}
// Decline offers that weren't used
// NOTE: This logic assumes that we only get a single offer for each host in a given batch
for (o <- usableOffers if !slavesIdsOfAcceptedOffers.contains(o.getSlaveId.getValue)) {
d.declineOffer(o.getId)
}
}
}
/** Turn a Spark TaskDescription into a Mesos task and also resources unused by the task */
def createMesosTask(
task: TaskDescription,
resources: JList[Resource],
slaveId: String): (MesosTaskInfo, JList[Resource]) = {
val taskId = TaskID.newBuilder().setValue(task.taskId.toString).build()
val (executorInfo, remainingResources) = if (slaveIdToExecutorInfo.contains(slaveId)) {
(slaveIdToExecutorInfo(slaveId), resources)
} else {
createExecutorInfo(resources, slaveId)
}
slaveIdToExecutorInfo(slaveId) = executorInfo
val (finalResources, cpuResources) =
partitionResources(remainingResources, "cpus", scheduler.CPUS_PER_TASK)
val taskInfo = MesosTaskInfo.newBuilder()
.setTaskId(taskId)
.setSlaveId(SlaveID.newBuilder().setValue(slaveId).build())
.setExecutor(executorInfo)
.setName(task.name)
.addAllResources(cpuResources.asJava)
.setData(ByteString.copyFrom(TaskDescription.encode(task)))
.build()
(taskInfo, finalResources.asJava)
}
override def statusUpdate(d: org.apache.mesos.SchedulerDriver, status: TaskStatus) {
inClassLoader() {
val tid = status.getTaskId.getValue.toLong
val state = mesosToTaskState(status.getState)
synchronized {
if (TaskState.isFailed(mesosToTaskState(status.getState))
&& taskIdToSlaveId.contains(tid)) {
// We lost the executor on this slave, so remember that it's gone
removeExecutor(taskIdToSlaveId(tid), "Lost executor")
}
if (TaskState.isFinished(state)) {
taskIdToSlaveId.remove(tid)
}
}
scheduler.statusUpdate(tid, state, status.getData.asReadOnlyByteBuffer)
}
}
override def error(d: org.apache.mesos.SchedulerDriver, message: String) {
inClassLoader() {
logError("Mesos error: " + message)
markErr()
scheduler.error(message)
}
}
override def stop() {
if (schedulerDriver != null) {
schedulerDriver.stop()
}
}
override def reviveOffers() {
schedulerDriver.reviveOffers()
}
override def frameworkMessage(
d: org.apache.mesos.SchedulerDriver, e: ExecutorID, s: SlaveID, b: Array[Byte]) {}
/**
* Remove executor associated with slaveId in a thread safe manner.
*/
private def removeExecutor(slaveId: String, reason: String) = {
synchronized {
listenerBus.post(SparkListenerExecutorRemoved(System.currentTimeMillis(), slaveId, reason))
slaveIdToExecutorInfo -= slaveId
}
}
private def recordSlaveLost(
d: org.apache.mesos.SchedulerDriver, slaveId: SlaveID, reason: ExecutorLossReason) {
inClassLoader() {
logInfo("Mesos slave lost: " + slaveId.getValue)
removeExecutor(slaveId.getValue, reason.toString)
scheduler.executorLost(slaveId.getValue, reason)
}
}
override def slaveLost(d: org.apache.mesos.SchedulerDriver, slaveId: SlaveID) {
recordSlaveLost(d, slaveId, SlaveLost())
}
override def executorLost(
d: org.apache.mesos.SchedulerDriver, executorId: ExecutorID, slaveId: SlaveID, status: Int) {
logInfo("Executor lost: %s, marking slave %s as lost".format(executorId.getValue,
slaveId.getValue))
recordSlaveLost(d, slaveId, ExecutorExited(status, exitCausedByApp = true))
}
override def killTask(
taskId: Long, executorId: String, interruptThread: Boolean, reason: String): Unit = {
schedulerDriver.killTask(
TaskID.newBuilder()
.setValue(taskId.toString).build()
)
}
// TODO: query Mesos for number of cores
override def defaultParallelism(): Int = sc.conf.getInt("spark.default.parallelism", 8)
override def applicationId(): String =
Option(appId).getOrElse {
logWarning("Application ID is not initialized yet.")
super.applicationId
}
override def maxNumConcurrentTasks(): Int = {
// TODO SPARK-25074 support this method for MesosFineGrainedSchedulerBackend
0
}
}
|
hhbyyh/spark
|
resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackend.scala
|
Scala
|
apache-2.0
| 17,837
|
/*
* Copyright 2016 Coursera Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.coursera.naptime.router2
import com.typesafe.scalalogging.StrictLogging
import org.coursera.common.stringkey.StringKey
import org.coursera.common.stringkey.StringKeyFormat
import org.coursera.naptime.path.ParseFailure
import org.coursera.naptime.path.ParseSuccess
import org.coursera.naptime.resources.CollectionResource
import play.api.http.Status
import play.api.libs.json.Json
import play.api.mvc.Action
import play.api.mvc.BodyParser
import play.api.mvc.BodyParsers
import play.api.mvc.Request
import play.api.mvc.RequestHeader
import play.api.mvc.RequestTaggingHandler
import play.api.mvc.Result
import play.api.mvc.Results
import scala.concurrent.Future
import scala.language.existentials
class NestingCollectionResourceRouter[CollectionResourceType <: CollectionResource[_, _, _]] (
val resourceInstance: CollectionResourceType)
extends ResourceRouter with StrictLogging {
override type ResourceClass = CollectionResourceType
/**
* Helper method to convert a path key to the ancestor keys.
*
* @param pathKey The path key to convert.
*/
protected[this] def pathToAncestor(
pathKey: resourceInstance.PathKey): resourceInstance.AncestorKeys = {
pathKey.tail
}
/**
* Helper method to convert an opt path key to the ancestor keys.
*
* @param pathKey The opt path key to convert.
* @return
*/
protected[this] def optPathToAncestor(
pathKey: resourceInstance.OptPathKey): resourceInstance.AncestorKeys = {
pathKey.tail
}
/**
* Constructs a Map used to tag the request.
*
* Note: because of a limitation of the mocking framework, this code gracefully handles when
* [[resourceInstance.getClass]] returns null.
*
* @param methodName The name of the scala method invoked to handle this request.
* @return
*/
protected[this] def mkRequestTags(methodName: String): Map[String, String] = {
Map(
Router.NAPTIME_RESOURCE_NAME ->
Option(resourceInstance.getClass).map(_.getName).getOrElse("nullClass"),
Router.NAPTIME_METHOD_NAME -> methodName)
}
override def routeRequest(path: String, requestHeader: RequestHeader): Option[RouteAction] = {
resourceInstance.optParse(path) match {
case ParseFailure | ParseSuccess(Some(_), _) =>
None // This request is not for us.
case ParseSuccess(None, pathKeyOpt) =>
// If the head of the list is defined, convert to the PathKey, else remain as an OptPathKey
// Note: we centralize here the casting required to get the compiler to believe us.
val pathKey: Either[resourceInstance.OptPathKey, resourceInstance.PathKey] =
if (pathKeyOpt.head.isDefined) {
Right((pathKeyOpt.head.get ::: pathKeyOpt.tail).asInstanceOf[resourceInstance.PathKey])
} else {
Left(pathKeyOpt)
}
Some(buildHandler(requestHeader, pathKey))
case null => // Test mocking error.
logger.error(s"Match error routing request $requestHeader with resource $resourceInstance")
throw new MatchError(null)
}
}
private[this] def buildHandler(
requestHeader: RequestHeader,
pathKey: Either[resourceInstance.OptPathKey, resourceInstance.PathKey]): RouteAction = {
requestHeader.method match {
case "GET" => buildGetHandler(requestHeader, pathKey)
case "POST" => buildPostHandler(requestHeader, pathKey)
case "PUT" => buildPutHandler(requestHeader, pathKey)
case "DELETE" => buildDeleteHandler(requestHeader, pathKey)
case "PATCH" => buildPatchHandler(requestHeader, pathKey)
case unknown: String => errorRoute(s"Unknown HTTP method '$unknown'.")
}
}
private[this] def buildGetHandler(
requestHeader: RequestHeader,
pathKey: Either[resourceInstance.OptPathKey, resourceInstance.PathKey]): RouteAction = {
if (pathKey.isRight) {
executeGet(requestHeader, pathKey.right.get)
} else {
val optPathKey = pathKey.left.get
requestHeader.queryString.get("q").map { queryStr =>
if (queryStr.isEmpty) {
errorRoute("Must provide a finder name.")
} else if (queryStr.length != 1) {
errorRoute("Must provide only one finder name.")
} else {
executeFinder(requestHeader, optPathKey, queryStr.head)
}
}.getOrElse {
requestHeader.queryString.get("ids").map { queryStr =>
if (queryStr.isEmpty) {
errorRoute("Must provide an 'ids' query parameter.")
} else if (queryStr.length != 1) {
errorRoute("Must provide only one 'ids' query parameter.")
} else {
val idsOrError = parseIds[resourceInstance.KeyType](
queryStr.head,
resourceInstance.keyFormat.stringKeyFormat)
idsOrError.right.map { ids =>
// Note: we have to cast to get the Scala compiler to believe us, even though
// Intellij sees this cast as redundant.
executeMultiGet(
requestHeader,
optPathKey,
ids.asInstanceOf[Set[resourceInstance.KeyType]])
}.merge
}
}.getOrElse {
executeGetAll(requestHeader, optPathKey)
}
}
}
}
private[this] def buildPostHandler(
requestHeader: RequestHeader,
pathKey: Either[resourceInstance.OptPathKey, resourceInstance.PathKey]): RouteAction = {
if (pathKey.isLeft) {
requestHeader.queryString.get("action").map { queryStr =>
if (queryStr.isEmpty) {
errorRoute("Must provide an action name.")
} else if (queryStr.length == 1) {
executeAction(requestHeader, pathKey.left.get, queryStr.head)
} else {
errorRoute("Must provide only one action name.")
}
}.getOrElse {
executeCreate(requestHeader, pathKey.left.get)
}
} else {
errorRoute("Post only to the collection resource, not individual elements.")
}
}
private[this] def buildPutHandler(
requestHeader: RequestHeader,
pathKey: Either[resourceInstance.OptPathKey, resourceInstance.PathKey]): RouteAction = {
pathKey.right.toOption.map { pathKey =>
executePut(requestHeader, pathKey)
}.getOrElse {
idRequired
}
}
private[this] def buildDeleteHandler(
requestHeader: RequestHeader,
pathKey: Either[resourceInstance.OptPathKey, resourceInstance.PathKey]): RouteAction = {
pathKey.right.toOption.map { pathKey =>
executeDelete(requestHeader, pathKey)
}.getOrElse {
idRequired
}
}
private[this] def buildPatchHandler(
requestHeader: RequestHeader,
pathKey: Either[resourceInstance.OptPathKey, resourceInstance.PathKey]): RouteAction = {
pathKey.right.toOption.map { pathKey =>
executePatch(requestHeader, pathKey)
}.getOrElse {
idRequired
}
}
protected[this] def executeGet(
requestHeader: RequestHeader,
pathKey: resourceInstance.PathKey): RouteAction = {
errorRoute("'get' not implemented")
}
protected[this] def executeMultiGet(
requestHeader: RequestHeader,
optPathKey: resourceInstance.OptPathKey,
ids: Set[resourceInstance.KeyType]): RouteAction = {
errorRoute("'multi-get' not implemented")
}
protected[this] def executeGetAll(
requestHeader: RequestHeader,
optPathKey: resourceInstance.OptPathKey): RouteAction = {
errorRoute("'get-all' not implemented")
}
protected[this] def executeFinder(
requestHeader: RequestHeader,
optPathKey: resourceInstance.OptPathKey,
finderName: String): RouteAction = {
// TODO(saeta): watch out for injection attacks!
errorRoute(s"finder '$finderName' not implemented")
}
protected[this] def executeCreate(
requestHeader: RequestHeader,
optPathKey: resourceInstance.OptPathKey): RouteAction = {
errorRoute("'create' not implemented")
}
protected[this] def executePut(
requestHeader: RequestHeader,
pathKey: resourceInstance.PathKey): RouteAction = {
errorRoute("'put' not implemented")
}
protected[this] def executeDelete(
requestHeader: RequestHeader,
pathKey: resourceInstance.PathKey): RouteAction = {
errorRoute("'delete' not implemented")
}
protected[this] def executePatch(
requestHeader: RequestHeader,
pathKey: resourceInstance.PathKey): RouteAction = {
errorRoute("'patch' not implemented")
}
protected[this] def executeAction(
requestHeader: RequestHeader,
optPathKey: resourceInstance.OptPathKey,
actionName: String): RouteAction = {
// TODO(saeta): watch out for injection attacks!
errorRoute(s"action '$actionName' not implemented")
}
// TODO(saeta): Support populating the Allow header for more useful error responses.
protected[this] def errorRoute(
msg: String,
statusCode: Int = Status.METHOD_NOT_ALLOWED): RouteAction =
NestingCollectionResourceRouter.errorRoute(resourceInstance.getClass, msg, statusCode)
/**
* Helper function to parse ids.
*
* It is not private[this] for testing purposes.
*
* @param queryString The query string to parse into ids.
* @param parser The string key format for the key type [[T]]
* @tparam T The type of keys we are parsing. Note: we use a type parameter to help the scala
* compiler correctly infer the types.
* @return either a Left(error) or a Right(Set(ids))
*/
private[naptime] def parseIds[T](
queryString: String,
parser: StringKeyFormat[T]): Either[RouteAction, Set[T]] = {
var error: Option[RouteAction] = None
// TODO(saeta): check length of idStrings to make sure it's not too long. (Potential DoS.)
val idStrings = queryString.split("(?<!\\\\\\\\),")
val ids = idStrings.flatMap { idStr =>
val parsed = parser.reads(StringKey(idStr))
if (parsed.isEmpty) {
error = Some(errorRoute(s"Could not parse key '$idStr'")) // TODO: truncate if too long.
}
parsed
}.toSet
error.toLeft(ids)
}
private[this] val idRequired = errorRoute("Requires ID in path as a path parameter.")
}
object NestingCollectionResourceRouter {
private[naptime] def errorRoute(
resourceClass: Class[_],
msg: String,
statusCode: Int = Status.BAD_REQUEST): RouteAction = {
new Action[Unit] with RequestTaggingHandler {
override def parser: BodyParser[Unit] = BodyParsers.parse.empty
override def apply(request: Request[Unit]): Future[Result] = {
// TODO(saeta): use standardized error response format.
Future.successful(Results.Status(statusCode)(Json.obj("msg" -> s"Routing error: $msg")))
}
override def tagRequest(request: RequestHeader): RequestHeader =
request.copy(tags = request.tags + (Router.NAPTIME_RESOURCE_NAME -> resourceClass.getName))
}
}
}
|
vkuo-coursera/naptime
|
naptime/src/main/scala/org/coursera/naptime/router2/NestingCollectionResourceRouter.scala
|
Scala
|
apache-2.0
| 11,525
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.lang.management.ManagementFactory
import java.lang.reflect.{Field, Modifier}
import java.util.{IdentityHashMap, Random}
import scala.collection.mutable.ArrayBuffer
import scala.runtime.ScalaRunTime
import com.google.common.collect.MapMaker
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.Tests.TEST_USE_COMPRESSED_OOPS_KEY
import org.apache.spark.util.collection.OpenHashSet
/**
* A trait that allows a class to give [[SizeEstimator]] more accurate size estimation.
* When a class extends it, [[SizeEstimator]] will query the `estimatedSize` first.
* If `estimatedSize` does not return `None`, [[SizeEstimator]] will use the returned size
* as the size of the object. Otherwise, [[SizeEstimator]] will do the estimation work.
* The difference between a [[KnownSizeEstimation]] and
* [[org.apache.spark.util.collection.SizeTracker]] is that, a
* [[org.apache.spark.util.collection.SizeTracker]] still uses [[SizeEstimator]] to
* estimate the size. However, a [[KnownSizeEstimation]] can provide a better estimation without
* using [[SizeEstimator]].
*/
private[spark] trait KnownSizeEstimation {
def estimatedSize: Long
}
/**
* :: DeveloperApi ::
* Estimates the sizes of Java objects (number of bytes of memory they occupy), for use in
* memory-aware caches.
*
* Based on the following JavaWorld article:
* http://www.javaworld.com/javaworld/javaqa/2003-12/02-qa-1226-sizeof.html
*/
@DeveloperApi
object SizeEstimator extends Logging {
/**
* Estimate the number of bytes that the given object takes up on the JVM heap. The estimate
* includes space taken up by objects referenced by the given object, their references, and so on
* and so forth.
*
* This is useful for determining the amount of heap space a broadcast variable will occupy on
* each executor or the amount of space each object will take when caching objects in
* deserialized form. This is not the same as the serialized size of the object, which will
* typically be much smaller.
*/
def estimate(obj: AnyRef): Long = estimate(obj, new IdentityHashMap[AnyRef, AnyRef])
// Sizes of primitive types
private val BYTE_SIZE = 1
private val BOOLEAN_SIZE = 1
private val CHAR_SIZE = 2
private val SHORT_SIZE = 2
private val INT_SIZE = 4
private val LONG_SIZE = 8
private val FLOAT_SIZE = 4
private val DOUBLE_SIZE = 8
// Fields can be primitive types, sizes are: 1, 2, 4, 8. Or fields can be pointers. The size of
// a pointer is 4 or 8 depending on the JVM (32-bit or 64-bit) and UseCompressedOops flag.
// The sizes should be in descending order, as we will use that information for fields placement.
private val fieldSizes = List(8, 4, 2, 1)
// Alignment boundary for objects
// TODO: Is this arch dependent ?
private val ALIGN_SIZE = 8
// A cache of ClassInfo objects for each class
// We use weakKeys to allow GC of dynamically created classes
private val classInfos = new MapMaker().weakKeys().makeMap[Class[_], ClassInfo]()
// Object and pointer sizes are arch dependent
private var is64bit = false
// Size of an object reference
// Based on https://wikis.oracle.com/display/HotSpotInternals/CompressedOops
private var isCompressedOops = false
private var pointerSize = 4
// Minimum size of a java.lang.Object
private var objectSize = 8
initialize()
// Sets object size, pointer size based on architecture and CompressedOops settings
// from the JVM.
private def initialize(): Unit = {
val arch = System.getProperty("os.arch")
is64bit = arch.contains("64") || arch.contains("s390x")
isCompressedOops = getIsCompressedOops
objectSize = if (!is64bit) 8 else {
if (!isCompressedOops) {
16
} else {
12
}
}
pointerSize = if (is64bit && !isCompressedOops) 8 else 4
classInfos.clear()
classInfos.put(classOf[Object], new ClassInfo(objectSize, Nil))
}
private def getIsCompressedOops: Boolean = {
// This is only used by tests to override the detection of compressed oops. The test
// actually uses a system property instead of a SparkConf, so we'll stick with that.
if (System.getProperty(TEST_USE_COMPRESSED_OOPS_KEY) != null) {
return System.getProperty(TEST_USE_COMPRESSED_OOPS_KEY).toBoolean
}
// java.vm.info provides compressed ref info for IBM and OpenJ9 JDKs
val javaVendor = System.getProperty("java.vendor")
if (javaVendor.contains("IBM") || javaVendor.contains("OpenJ9")) {
return System.getProperty("java.vm.info").contains("Compressed Ref")
}
try {
val hotSpotMBeanName = "com.sun.management:type=HotSpotDiagnostic"
val server = ManagementFactory.getPlatformMBeanServer()
// NOTE: This should throw an exception in non-Sun JVMs
// scalastyle:off classforname
val hotSpotMBeanClass = Class.forName("com.sun.management.HotSpotDiagnosticMXBean")
val getVMMethod = hotSpotMBeanClass.getDeclaredMethod("getVMOption",
Class.forName("java.lang.String"))
// scalastyle:on classforname
val bean = ManagementFactory.newPlatformMXBeanProxy(server,
hotSpotMBeanName, hotSpotMBeanClass)
// TODO: We could use reflection on the VMOption returned ?
getVMMethod.invoke(bean, "UseCompressedOops").toString.contains("true")
} catch {
case e: Exception =>
// Guess whether they've enabled UseCompressedOops based on whether maxMemory < 32 GB
val guess = Runtime.getRuntime.maxMemory < (32L*1024*1024*1024)
val guessInWords = if (guess) "yes" else "not"
logWarning("Failed to check whether UseCompressedOops is set; assuming " + guessInWords)
return guess
}
}
/**
* The state of an ongoing size estimation. Contains a stack of objects to visit as well as an
* IdentityHashMap of visited objects, and provides utility methods for enqueueing new objects
* to visit.
*/
private class SearchState(val visited: IdentityHashMap[AnyRef, AnyRef]) {
val stack = new ArrayBuffer[AnyRef]
var size = 0L
def enqueue(obj: AnyRef): Unit = {
if (obj != null && !visited.containsKey(obj)) {
visited.put(obj, null)
stack += obj
}
}
def isFinished(): Boolean = stack.isEmpty
def dequeue(): AnyRef = {
val elem = stack.last
stack.trimEnd(1)
elem
}
}
/**
* Cached information about each class. We remember two things: the "shell size" of the class
* (size of all non-static fields plus the java.lang.Object size), and any fields that are
* pointers to objects.
*/
private class ClassInfo(
val shellSize: Long,
val pointerFields: List[Field]) {}
private def estimate(obj: AnyRef, visited: IdentityHashMap[AnyRef, AnyRef]): Long = {
val state = new SearchState(visited)
state.enqueue(obj)
while (!state.isFinished) {
visitSingleObject(state.dequeue(), state)
}
state.size
}
private def visitSingleObject(obj: AnyRef, state: SearchState): Unit = {
val cls = obj.getClass
if (cls.isArray) {
visitArray(obj, cls, state)
} else if (cls.getName.startsWith("scala.reflect")) {
// Many objects in the scala.reflect package reference global reflection objects which, in
// turn, reference many other large global objects. Do nothing in this case.
} else if (obj.isInstanceOf[ClassLoader] || obj.isInstanceOf[Class[_]]) {
// Hadoop JobConfs created in the interpreter have a ClassLoader, which greatly confuses
// the size estimator since it references the whole REPL. Do nothing in this case. In
// general all ClassLoaders and Classes will be shared between objects anyway.
} else {
obj match {
case s: KnownSizeEstimation =>
state.size += s.estimatedSize
case _ =>
val classInfo = getClassInfo(cls)
state.size += alignSize(classInfo.shellSize)
for (field <- classInfo.pointerFields) {
state.enqueue(field.get(obj))
}
}
}
}
// Estimate the size of arrays larger than ARRAY_SIZE_FOR_SAMPLING by sampling.
private val ARRAY_SIZE_FOR_SAMPLING = 400
private val ARRAY_SAMPLE_SIZE = 100 // should be lower than ARRAY_SIZE_FOR_SAMPLING
private def visitArray(array: AnyRef, arrayClass: Class[_], state: SearchState): Unit = {
val length = ScalaRunTime.array_length(array)
val elementClass = arrayClass.getComponentType()
// Arrays have object header and length field which is an integer
var arrSize: Long = alignSize(objectSize + INT_SIZE)
if (elementClass.isPrimitive) {
arrSize += alignSize(length.toLong * primitiveSize(elementClass))
state.size += arrSize
} else {
arrSize += alignSize(length.toLong * pointerSize)
state.size += arrSize
if (length <= ARRAY_SIZE_FOR_SAMPLING) {
var arrayIndex = 0
while (arrayIndex < length) {
state.enqueue(ScalaRunTime.array_apply(array, arrayIndex).asInstanceOf[AnyRef])
arrayIndex += 1
}
} else {
// Estimate the size of a large array by sampling elements without replacement.
// To exclude the shared objects that the array elements may link, sample twice
// and use the min one to calculate array size.
val rand = new Random(42)
val drawn = new OpenHashSet[Int](2 * ARRAY_SAMPLE_SIZE)
val s1 = sampleArray(array, state, rand, drawn, length)
val s2 = sampleArray(array, state, rand, drawn, length)
val size = math.min(s1, s2)
state.size += math.max(s1, s2) +
(size * ((length - ARRAY_SAMPLE_SIZE) / (ARRAY_SAMPLE_SIZE))).toLong
}
}
}
private def sampleArray(
array: AnyRef,
state: SearchState,
rand: Random,
drawn: OpenHashSet[Int],
length: Int): Long = {
var size = 0L
for (i <- 0 until ARRAY_SAMPLE_SIZE) {
var index = 0
do {
index = rand.nextInt(length)
} while (drawn.contains(index))
drawn.add(index)
val obj = ScalaRunTime.array_apply(array, index).asInstanceOf[AnyRef]
if (obj != null) {
size += SizeEstimator.estimate(obj, state.visited).toLong
}
}
size
}
private def primitiveSize(cls: Class[_]): Int = {
if (cls == classOf[Byte]) {
BYTE_SIZE
} else if (cls == classOf[Boolean]) {
BOOLEAN_SIZE
} else if (cls == classOf[Char]) {
CHAR_SIZE
} else if (cls == classOf[Short]) {
SHORT_SIZE
} else if (cls == classOf[Int]) {
INT_SIZE
} else if (cls == classOf[Long]) {
LONG_SIZE
} else if (cls == classOf[Float]) {
FLOAT_SIZE
} else if (cls == classOf[Double]) {
DOUBLE_SIZE
} else {
throw new IllegalArgumentException(
"Non-primitive class " + cls + " passed to primitiveSize()")
}
}
/**
* Get or compute the ClassInfo for a given class.
*/
private def getClassInfo(cls: Class[_]): ClassInfo = {
// Check whether we've already cached a ClassInfo for this class
val info = classInfos.get(cls)
if (info != null) {
return info
}
val parent = getClassInfo(cls.getSuperclass)
var shellSize = parent.shellSize
var pointerFields = parent.pointerFields
val sizeCount = Array.fill(fieldSizes.max + 1)(0)
// iterate through the fields of this class and gather information.
for (field <- cls.getDeclaredFields) {
if (!Modifier.isStatic(field.getModifiers)) {
val fieldClass = field.getType
if (fieldClass.isPrimitive) {
sizeCount(primitiveSize(fieldClass)) += 1
} else {
// Note: in Java 9+ this would be better with trySetAccessible and canAccess
try {
field.setAccessible(true) // Enable future get()'s on this field
pointerFields = field :: pointerFields
} catch {
// If the field isn't accessible, we can still record the pointer size
// but can't know more about the field, so ignore it
case _: SecurityException =>
// do nothing
// Java 9+ can throw InaccessibleObjectException but the class is Java 9+-only
case re: RuntimeException
if re.getClass.getSimpleName == "InaccessibleObjectException" =>
// do nothing
}
sizeCount(pointerSize) += 1
}
}
}
// Based on the simulated field layout code in Aleksey Shipilev's report:
// http://cr.openjdk.java.net/~shade/papers/2013-shipilev-fieldlayout-latest.pdf
// The code is in Figure 9.
// The simplified idea of field layout consists of 4 parts (see more details in the report):
//
// 1. field alignment: HotSpot lays out the fields aligned by their size.
// 2. object alignment: HotSpot rounds instance size up to 8 bytes
// 3. consistent fields layouts throughout the hierarchy: This means we should layout
// superclass first. And we can use superclass's shellSize as a starting point to layout the
// other fields in this class.
// 4. class alignment: HotSpot rounds field blocks up to HeapOopSize not 4 bytes, confirmed
// with Aleksey. see https://bugs.openjdk.java.net/browse/CODETOOLS-7901322
//
// The real world field layout is much more complicated. There are three kinds of fields
// order in Java 8. And we don't consider the @contended annotation introduced by Java 8.
// see the HotSpot classloader code, layout_fields method for more details.
// hg.openjdk.java.net/jdk8/jdk8/hotspot/file/tip/src/share/vm/classfile/classFileParser.cpp
var alignedSize = shellSize
for (size <- fieldSizes if sizeCount(size) > 0) {
val count = sizeCount(size).toLong
// If there are internal gaps, smaller field can fit in.
alignedSize = math.max(alignedSize, alignSizeUp(shellSize, size) + size * count)
shellSize += size * count
}
// Should choose a larger size to be new shellSize and clearly alignedSize >= shellSize, and
// round up the instance filed blocks
shellSize = alignSizeUp(alignedSize, pointerSize)
// Create and cache a new ClassInfo
val newInfo = new ClassInfo(shellSize, pointerFields)
classInfos.put(cls, newInfo)
newInfo
}
private def alignSize(size: Long): Long = alignSizeUp(size, ALIGN_SIZE)
/**
* Compute aligned size. The alignSize must be 2^n, otherwise the result will be wrong.
* When alignSize = 2^n, alignSize - 1 = 2^n - 1. The binary representation of (alignSize - 1)
* will only have n trailing 1s(0b00...001..1). ~(alignSize - 1) will be 0b11..110..0. Hence,
* (size + alignSize - 1) & ~(alignSize - 1) will set the last n bits to zeros, which leads to
* multiple of alignSize.
*/
private def alignSizeUp(size: Long, alignSize: Int): Long =
(size + alignSize - 1) & ~(alignSize - 1)
}
|
rezasafi/spark
|
core/src/main/scala/org/apache/spark/util/SizeEstimator.scala
|
Scala
|
apache-2.0
| 15,825
|
/*
* Copyright 2015, by Vladimir Kostyukov and Contributors.
*
* This file is a part of a Finch library that may be found at
*
* https://github.com/finagle/finch
*
* Licensed under the Apache License, Version 2.0 (the "License");
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributor(s): -
*/
package io.finch
import io.finch.request.ToRequest
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicLong
import scala.collection.JavaConverters._
import com.twitter.util.Future
/**
* The ''demo'' project shows the usage of Finch's basic blocks for building a purely functional REST API backend
* emulating a set of services working with ''users'' and their ''tickets'' (i.e., cinema tickets).
*
* The following packages represent the backend:
*
* - [[demo.model]] - domain model classes: `User` and `Ticket`
* - [[demo.reader]] - [[io.finch.request.RequestReader]]s for models
* - [[demo.service]] - the application services
* - [[demo.endpoint]] - [[io.finch.route.Router]]s for services (endpoints)
*/
package demo {
import com.twitter.finagle.httpx.Request
import model._
// A custom request type that wraps an `Request`.
// We prefer composition over inheritance.
case class AuthRequest(http: Request)
object AuthRequest {
implicit val toRequest: ToRequest[AuthRequest] =
ToRequest[AuthRequest](_.http)
}
// A thread-safe ids generator.
object Id {
private val self = new AtomicLong(0)
def apply(): Long = self.getAndIncrement
}
// An abstraction that represents an async interface to a database.
object Db {
// An underlying map.
private val map = new ConcurrentHashMap[Long, User]().asScala
def select(id: Long): Future[Option[User]] = map.get(id).toFuture
def all: Future[List[User]] = map.values.toList.toFuture
def insert(id: Long, u: User): Future[User] = {
map += (id -> u)
u.toFuture
}
}
}
|
penland365/finch
|
demo/src/main/scala/io/finch/demo/package.scala
|
Scala
|
apache-2.0
| 2,397
|
import _root_.io.gatling.core.scenario.Simulation
import ch.qos.logback.classic.{Level, LoggerContext}
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
/**
* Performance test for the Country entity.
*/
class CountryGatlingTest extends Simulation {
val context: LoggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
// Log all HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("TRACE"))
// Log failed HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("DEBUG"))
val baseURL = Option(System.getProperty("baseURL")) getOrElse """http://127.0.0.1:8080"""
val httpConf = http
.baseURL(baseURL)
.inferHtmlResources()
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3")
.connection("keep-alive")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:33.0) Gecko/20100101 Firefox/33.0")
val headers_http = Map(
"Accept" -> """application/json"""
)
val headers_http_authenticated = Map(
"Accept" -> """application/json""",
"X-CSRF-TOKEN" -> "${csrf_token}"
)
val scn = scenario("Test the Country entity")
.exec(http("First unauthenticated request")
.get("/api/account")
.headers(headers_http)
.check(status.is(401))
.check(headerRegex("Set-Cookie", "CSRF-TOKEN=(.*); [P,p]ath=/").saveAs("csrf_token")))
.pause(10)
.exec(http("Authentication")
.post("/api/authentication")
.headers(headers_http_authenticated)
.formParam("j_username", "admin")
.formParam("j_password", "admin")
.formParam("remember-me", "true")
.formParam("submit", "Login"))
.pause(1)
.exec(http("Authenticated request")
.get("/api/account")
.headers(headers_http_authenticated)
.check(status.is(200))
.check(headerRegex("Set-Cookie", "CSRF-TOKEN=(.*); [P,p]ath=/").saveAs("csrf_token")))
.pause(10)
.repeat(2) {
exec(http("Get all countrys")
.get("/api/countrys")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10 seconds, 20 seconds)
.exec(http("Create new country")
.post("/api/countrys")
.headers(headers_http_authenticated)
.body(StringBody("""{"id":null, "code":"SAMPLE_TEXT", "name":"SAMPLE_TEXT"}""")).asJSON
.check(status.is(201))
.check(headerRegex("Location", "(.*)").saveAs("new_country_url")))
.pause(10)
.repeat(5) {
exec(http("Get created country")
.get("${new_country_url}")
.headers(headers_http_authenticated))
.pause(10)
}
.exec(http("Delete created country")
.delete("${new_country_url}")
.headers(headers_http_authenticated))
.pause(10)
}
val users = scenario("Users").exec(scn)
setUp(
users.inject(rampUsers(100) over (1 minutes))
).protocols(httpConf)
}
|
sandor-balazs/nosql-java
|
oracle/src/test/gatling/simulations/CountryGatlingTest.scala
|
Scala
|
bsd-2-clause
| 3,308
|
package com.twitter.rowz
import com.twitter.util.Time
object RowState extends Enumeration {
val Normal, Destroyed = Value
}
case class Row(id: Long, name: String, createdAt: Time, updatedAt: Time, state: RowState.Value)
|
twitter/Rowz
|
src/main/scala/com/twitter/rowz/Row.scala
|
Scala
|
apache-2.0
| 225
|
package com.jejking.rprng.api
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.model.HttpEntity.{Chunk, Chunked}
import akka.http.scaladsl.model.{ContentTypes, HttpResponse, StatusCodes}
import akka.http.scaladsl.server.ValidationRejection
import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.stream.scaladsl.Source
import akka.util.ByteString
import com.jejking.rprng.rng._
import org.scalamock.scalatest.MockFactory
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import scala.concurrent.Future
/**
* Tests for Routes at HTTP API level.
*/
class RoutesSpec extends AnyFlatSpec with Matchers with ScalaFutures with ScalatestRouteTest with MockFactory with SprayJsonSupport {
import RandomIntegerCollectionResponseProtocol.format
val oneKb = TestUtils.byteStringOfZeroes(1024)
val twoKb = oneKb ++ oneKb
"/byte/block" should "return 1024 'random' bytes" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForByteBlock _).expects(1024).returning(Future.successful(HttpResponse(entity = oneKb)))
val routes = new Routes(mockStreamsHelper)
Get("/byte/block") ~> routes.byteRoute ~> check {
responseAs[ByteString] shouldEqual oneKb
}
}
"/byte/block/" should "also return 1024 'random' bytes" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForByteBlock _).expects(1024).returning(Future.successful(HttpResponse(entity = oneKb)))
val routes = new Routes(mockStreamsHelper)
Get("/byte/block/") ~> routes.byteRoute ~> check {
responseAs[ByteString] shouldEqual oneKb
}
}
"/byte/block/2048" should "return 2048 'random' bytes" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForByteBlock _).expects(2048).returning(Future.successful(HttpResponse(entity = twoKb)))
val routes = new Routes(mockStreamsHelper)
Get("/byte/block/2048") ~> routes.byteRoute ~> check {
responseAs[ByteString] shouldEqual twoKb
}
}
"/byte/block/0" should "result in a ValidationRejection" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForByteBlock _).expects(*).never()
val routes = new Routes(mockStreamsHelper)
Get("/byte/block/0") ~> routes.byteRoute ~> check {
rejection shouldBe a [ValidationRejection]
}
}
"/byte/block/forty-two" should "be rejected (not matched)" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForByteBlock _).expects(*).never()
val routes = new Routes(mockStreamsHelper)
Get("/byte/block/forty-two") ~> routes.byteRoute ~> check {
handled shouldBe false
}
}
"/byte/block/-64" should "be rejected" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForByteBlock _).expects(*).never()
val routes = new Routes(mockStreamsHelper)
Get("/byte/block/-64") ~> routes.byteRoute ~> check {
handled shouldBe false
}
}
"/byte/block/" + (Integer.MAX_VALUE + 1L) should "be rejected" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForByteBlock _).expects(*).never()
val routes = new Routes(mockStreamsHelper)
Get("/byte/block/" + (Integer.MAX_VALUE + 1L)) ~> routes.byteRoute ~> check {
handled shouldBe false
}
}
"/byte/stream" should "deliver a chunked response" in {
val httpResponse = HttpResponse(StatusCodes.OK).withEntity(Chunked(ContentTypes.`application/octet-stream`, Source.single(Chunk(oneKb))))
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForByteStream _).expects(1024).returning(httpResponse)
val routes = new Routes(mockStreamsHelper)
Get("/byte/stream") ~> routes.byteRoute ~> check {
responseEntity.isChunked() shouldBe true
whenReady(responseEntity.dataBytes.runFold(ByteString.empty)((acc, in) => acc ++ in)) {
bs => bs shouldBe oneKb
}
}
}
"/int/list" should "request 1 list of 100 ints between " + Int.MinValue + " and " + Int.MaxValue in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForIntegerCollection _)
.expects(RandomIntegerCollectionRequest(RandomList))
.returning(Future.successful(RandomIntegerCollectionResponse(List(1 to 100))))
val routes = new Routes(mockStreamsHelper)
Get("/int/list") ~> routes.intRoute ~> check {
handled shouldBe true
val resp: RandomIntegerCollectionResponse = responseAs[RandomIntegerCollectionResponse]
resp.content should have size 1
resp.content.head should have size 100
}
}
"/int/list?size=10" should "deliver 1 list of 10 ints between " + Int.MinValue + " and " + Int.MaxValue in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForIntegerCollection _)
.expects(RandomIntegerCollectionRequest(RandomList, size = 10))
.returning(Future.successful(RandomIntegerCollectionResponse(List(1 to 10))))
val routes = new Routes(mockStreamsHelper)
Get("/int/list?size=10") ~> routes.intRoute ~> check {
handled shouldBe true
val resp: RandomIntegerCollectionResponse = responseAs[RandomIntegerCollectionResponse]
resp.content should have size 1
resp.content.head should have size 10
}
}
"/int/list?size=10&count=2" should "deliver 2 lists of 10 ints between " + Int.MinValue + " and " + Int.MaxValue in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForIntegerCollection _)
.expects(RandomIntegerCollectionRequest(RandomList, size = 10, count = 2))
.returning(Future.successful(RandomIntegerCollectionResponse(List(1 to 10, 1 to 10))))
val routes = new Routes(mockStreamsHelper)
Get("/int/list?size=10&count=2") ~> routes.intRoute ~> check {
handled shouldBe true
val resp: RandomIntegerCollectionResponse = responseAs[RandomIntegerCollectionResponse]
resp.content should have size 2
resp.content.foreach(it => it should have size 10)
}
}
"/int/list?min=0&max=1000" should "deliver 1 list of 100 ints between 0 and 100" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForIntegerCollection _)
.expects(RandomIntegerCollectionRequest(RandomList, minBound = 0, maxBound = 100))
.returning(Future.successful(RandomIntegerCollectionResponse(List(1 to 100))))
val routes = new Routes(mockStreamsHelper)
Get("/int/list?min=0&max=100") ~> routes.route ~> check {
handled shouldBe true
val resp: RandomIntegerCollectionResponse = responseAs[RandomIntegerCollectionResponse]
resp.content should have size 1
resp.content.head should have size 100
resp.content.head.foreach(i => i should (be >= 0 and be <= 100))
}
}
"/int/list?size=10&min=100&max=10" should "result in a 400" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForIntegerCollection _).expects(*).never()
val routes = new Routes(mockStreamsHelper)
Get("/int/list?min=100&max=10") ~> routes.route ~> check {
response.status shouldBe StatusCodes.BadRequest
}
}
"/int/set" should "give a single set of 100 ints" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForIntegerCollection _)
.expects(RandomIntegerCollectionRequest(RandomSet))
.returning(Future.successful(RandomIntegerCollectionResponse(Set(1 to 100))))
val routes = new Routes(mockStreamsHelper)
Get("/int/set") ~> routes.intRoute ~> check {
handled shouldBe true
val resp: RandomIntegerCollectionResponse = responseAs[RandomIntegerCollectionResponse]
resp.content should have size 1
resp.content.head.toSet should have size 100
}
}
"/int/set?size=100&min=0&max=50" should "result in a 400" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForIntegerCollection _).expects(*).never()
val routes = new Routes(mockStreamsHelper)
Get("/int/set?size=100&min=0&max=50") ~> routes.route ~> check {
response.status shouldBe StatusCodes.BadRequest
}
}
"/png" should "return a default sized png" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForPng _).expects(250, 250)
val routes = new Routes(mockStreamsHelper)
Get("/png") ~> routes.route
}
"/png?width=100&height=200" should "return a png of width 100 and height 200" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForPng _).expects(100, 200)
val routes = new Routes(mockStreamsHelper)
Get("/png?width=100&height=200") ~> routes.route
}
"/png?width=100" should "return a png of width 100 and default height" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForPng _).expects(100, 250)
val routes = new Routes(mockStreamsHelper)
Get("/png?width=100") ~> routes.route
}
"/png?height=50" should "ask for a png of default width and height 50" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForPng _).expects(250, 50)
val routes = new Routes(mockStreamsHelper)
Get("/png?height=50") ~> routes.route
}
"/png?width=0" should "result in a bad request" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForPng _).expects(*, *).never()
val routes = new Routes(mockStreamsHelper)
Get("/png?width=0") ~> routes.route ~> check {
rejection shouldBe a [ValidationRejection]
}
}
"/png?width=-1" should "result in a bad request" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForPng _).expects(*, *).never()
val routes = new Routes(mockStreamsHelper)
Get("/png?width=-1") ~> routes.route ~> check {
rejection shouldBe a [ValidationRejection]
}
}
"/png?height=0" should "result in a bad request" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForPng _).expects(*, *).never()
val routes = new Routes(mockStreamsHelper)
Get("/png?height=0") ~> routes.route ~> check {
rejection shouldBe a [ValidationRejection]
}
}
"/png?height=-1" should "result in a bad request" in {
val mockStreamsHelper = mock[RngStreaming]
(mockStreamsHelper.responseForPng _).expects(*, *).never()
val routes = new Routes(mockStreamsHelper)
Get("/png?height=-1") ~> routes.route ~> check {
rejection shouldBe a [ValidationRejection]
}
}
}
|
jejking/rprng
|
src/test/scala/com/jejking/rprng/api/RoutesSpec.scala
|
Scala
|
apache-2.0
| 10,673
|
package io.dylemma.spac
package impl
import scala.util.control.NonFatal
class TopLevelTransformerHandler[In, Out](inner: Transformer.Handler[In, Out], caller: SpacTraceElement) extends Transformer.Handler[In, Out] {
def push(in: In, out: Transformer.HandlerWrite[Out]) = {
try inner.push(in, out)
catch {
case NonFatal(e) => throw SpacException.addTrace(SpacException.addEarlyTrace(e, SpacTraceElement.InInput(in)), caller)
}
}
def finish(out: Transformer.HandlerWrite[Out]) = {
try inner.finish(out)
catch { case NonFatal(e) => throw SpacException.addTrace(SpacException.addEarlyTrace(e, SpacTraceElement.AtInputEnd), caller) }
}
}
|
dylemma/xml-spac
|
core/src/main/scala/io/dylemma/spac/impl/TopLevelTransformerHandler.scala
|
Scala
|
mit
| 652
|
import io.prediction.controller.LServing
class Serving
extends LServing[Query, PredictedResult] {
override
def serve(query: Query,
predictedResults: Seq[PredictedResult]): PredictedResult = {
predictedResults.maxBy(e => e.confidence)
}
}
|
gongsy945/pio-engine-text-classification-heroku
|
src/main/scala/Serving.scala
|
Scala
|
apache-2.0
| 260
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package water.app
import org.apache.spark.{SparkContext, SparkConf}
/**
* Publish useful method to configure Spark context.
*/
trait SparkContextSupport {
def configure(appName: String = "Sparkling Water Demo"): SparkConf = {
val conf = new SparkConf()
.setAppName(appName)
conf.setIfMissing("spark.master", sys.env.getOrElse("spark.master", "local[*]"))
conf
}
def addFiles(sc: SparkContext, files: String*): Unit = {
files.foreach(f => sc.addFile(f))
}
def absPath(path: String): String = new java.io.File(path).getAbsolutePath
}
|
printedheart/sparkling-water
|
core/src/main/scala/water/app/SparkContextSupport.scala
|
Scala
|
apache-2.0
| 1,357
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.