code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package org.opencompare.api.scala.io
import java.io.File
import org.opencompare.api.scala.PCM
trait PCMLoader {
/**
* Return a list of PCMs from a string representation
*
* @param pcms : string representation of a PCM
* @return the PCM represented by pcm
*/
def load(pcms : String) : List[PCM]
/**
* Return a list of PCMs from a file
*
* @param file file to load
* @return loaded PCM
*/
def load(file : File) : List[PCM]
}
|
OpenCompare/OpenCompare
|
org.opencompare/api-scala/src/main/scala/org/opencompare/api/scala/io/PCMLoader.scala
|
Scala
|
apache-2.0
| 476
|
package latis.dm
import latis.dm.implicits._
import org.junit._
import Assert._
import com.typesafe.scalalogging.LazyLogging
import latis.dm._
import latis.metadata.Metadata
import latis.time.Time
import latis.writer.AsciiWriter
class TestScalar {
@Test
def construct_scalar_from_boolean_true = {
Scalar(1==1) match {
case Text(s) => assertEquals("true", s)
}
}
@Test
def construct_scalar_from_boolean_false = {
Scalar(1!=1) match {
case Text(s) => assertEquals("false", s)
}
}
}
|
dlindhol/LaTiS
|
src/test/scala/latis/dm/TestScalar.scala
|
Scala
|
epl-1.0
| 529
|
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScValue
/**
* User: Alexander Podkhalyuzin
* Date: 17.10.2008
*/
trait ScValueStub extends ScValueOrVariableStub[ScValue] {
def isImplicit: Boolean
}
|
loskutov/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/stubs/ScValueStub.scala
|
Scala
|
apache-2.0
| 293
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.sources
/**
* Defines a logical event-time attribute for a [[TableSource]].
* The event-time attribute can be used for indicating, accessing, and working with Flink's
* event-time.
*
* A [[TableSource]] that implements this interface defines the name of
* the event-time attribute. The attribute must be present in the schema of the [[TableSource]]
* and must be of type [[Long]] or [[java.sql.Timestamp]].
*/
trait DefinedRowtimeAttribute {
/**
* Defines a name of the event-time attribute that represents Flink's event-time, i.e., an
* attribute that is aligned with the watermarks of the
* [[org.apache.flink.streaming.api.datastream.DataStream]] returned by
* [[StreamTableSource.getDataStream()]].
*
* An attribute with the given name must be present in the schema of the [[TableSource]].
* The attribute must be of type [[Long]] or [[java.sql.Timestamp]].
*
* The method should return null if no rowtime attribute is defined.
*
* @return The name of the field that represents the event-time field and which is aligned
* with the watermarks of the [[org.apache.flink.streaming.api.datastream.DataStream]]
* returned by [[StreamTableSource.getDataStream()]].
* The field must be present in the schema of the [[TableSource]] and be of type [[Long]]
* or [[java.sql.Timestamp]].
*/
def getRowtimeAttribute: String
}
/**
* Defines a logical processing-time attribute for a [[TableSource]].
* The processing-time attribute can be used for indicating, accessing, and working with Flink's
* processing-time.
*
* A [[TableSource]] that implements this interface defines the name of
* the processing-time attribute. The attribute will be added to the schema of the
* [[org.apache.flink.table.api.Table]] produced by the [[TableSource]].
*/
trait DefinedProctimeAttribute {
/**
* Defines a name of the processing-time attribute that represents Flink's
* processing-time. Null if no rowtime should be available.
*
* The field will be appended to the schema provided by the [[TableSource]].
*/
def getProctimeAttribute: String
}
|
PangZhi/flink
|
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/definedTimeAttributes.scala
|
Scala
|
apache-2.0
| 3,021
|
package liang.don.dzimageconverter.log.net
import liang.don.dzimageconverter.log.{LoggerInterface, LogLevel}
/**
* .Net (C#) console logger.
*
* @author Don Liang
* @Version 0.1, 22/09/2011
*/
trait ConsoleLogger extends LoggerInterface {
override def log(message: String) {
sys.error("[" + getClass.getName + "#log] Not implemented.")
}
override def log(message: String, logLevel: LogLevel.Value) {
sys.error("[" + getClass.getName + "#log] Not implemented.")
}
override def log(message: String, logLevel: LogLevel.Value, exception: Exception) {
sys.error("[" + getClass.getName + "#log] Not implemented.")
}
}
|
dl2k84/DeepZoomImageConverter
|
src/liang/don/dzimageconverter/log/net/ConsoleLogger.scala
|
Scala
|
mit
| 647
|
package com.twitter.cache
import com.twitter.conversions.time._
import com.twitter.util.{Await, Future}
import org.scalatest.FunSuite
abstract class AbstractLoadingFutureCacheTest extends FunSuite {
// NB we can't reuse AbstractFutureCacheTest since
// loading cache semantics are sufficiently unique
// to merit distinct tests.
def name: String
trait Ctx {
var cacheLoaderCount = 0
val cache: FutureCache[String, Int] with (String => Future[Int])
}
def mkCtx(): Ctx
test(s"$name should return CacheLoader result for unset keys") {
val ctx = mkCtx()
import ctx._
val Some(res) = cache.get("key")
assert(Await.result(res, 2.seconds) == "key".hashCode)
assert(cacheLoaderCount == 1)
}
test(s"$name should call CacheLoader one time for non-evicted keys") {
val ctx = mkCtx()
import ctx._
val Some(res) = cache.get("key")
val Some(res2) = cache.get("key")
val Some(res3) = cache.get("key")
assert(Await.result(res, 2.seconds) == "key".hashCode)
assert(Await.result(res2, 2.seconds) == "key".hashCode)
assert(Await.result(res3, 2.seconds) == "key".hashCode)
assert(cacheLoaderCount == 1)
}
test(s"$name should return set values") {
val ctx = mkCtx()
import ctx._
cache.set("key", Future.value(1234))
val Some(res) = cache.get("key")
val res2 = cache("key")
assert(Await.result(res, 2.seconds) == 1234)
assert(Await.result(res2, 2.seconds) == 1234)
assert(cacheLoaderCount == 0)
}
test(s"$name should evict") {
val ctx = mkCtx()
import ctx._
val f = Future.value(1234)
cache.set("key", f)
val Some(res1) = cache.get("key")
assert(Await.result(res1, 2.seconds) == 1234)
assert(cache.evict("key", f))
val Some(res2) = cache.get("key")
assert(Await.result(res2, 2.seconds) == "key".hashCode)
assert(cacheLoaderCount == 1)
}
test(s"$name eviction should refuse to evict incorrectly") {
val ctx = mkCtx()
import ctx._
val f = Future.value(1234)
cache.set("key", f)
val Some(res1) = cache.get("key")
assert(Await.result(res1, 2.seconds) == 1234)
assert(!cache.evict("key", Future.value(4)))
val Some(res2) = cache.get("key")
assert(Await.result(res2, 2.seconds) == 1234)
assert(cacheLoaderCount == 0)
}
test(s"$name shouldn't update gettable keys") {
val ctx = mkCtx()
import ctx._
val f = Future.value(1234)
cache.set("key", f)
var mod = false
val result = cache.getOrElseUpdate("key") {
mod = true
Future.value(321)
}
assert(Await.result(result, 2.seconds) == 1234)
assert(mod == false)
assert(cacheLoaderCount == 0)
}
test(s"$name should update if ungettable") {
val ctx = mkCtx()
import ctx._
val result = cache.getOrElseUpdate("key") { Future.value(1234) }
assert(Await.result(result, 2.seconds) == 1234)
assert(cacheLoaderCount == 0)
}
}
|
folone/util
|
util-cache/src/test/scala/com/twitter/cache/AbstractLoadingFutureCacheTest.scala
|
Scala
|
apache-2.0
| 2,944
|
package com.eevolution.context.dictionary.infrastructure.repository
import com.eevolution.context.dictionary.domain.model.PrintGraph
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: eduardo.moreno@e-evolution.com, http://www.e-evolution.com , http://github.com/e-Evolution
* Created by eduardo.moreno@e-evolution.com , www.e-evolution.com
*/
/**
* Print Graph Mapping
*/
trait PrintGraphMapping {
val queryPrintGraph = quote {
querySchema[PrintGraph]("AD_PrintGraph",
_.printGraphId -> "AD_PrintGraph_ID",
_.tenantId -> "AD_Client_ID",
_.organizationId-> "AD_Org_ID",
_.isActive -> "IsActive",
_.created -> "Created",
_.createdBy -> "CreatedBy",
_.updated -> "Updated",
_.updatedBy -> "UpdatedBy",
_.name -> "Name",
_.description -> "Description",
_.graphType -> "GraphType",
_.descriptionPrintFormatItemId -> "Description_PrintFormatItem_ID",
_.dataPrintFormatItemID -> "Data_PrintFormatItem_ID",
_.data1PrintFormatItemID -> "Data1_PrintFormatItem_ID",
_.data2PrintFormatItemID -> "Data2_PrintFormatItem_ID",
_.data3PrintFormatItemID -> "Data3_PrintFormatItem_ID",
_.data4PrintFormatItemID -> "Data4_PrintFormatItem_ID",
_.printFormatId -> "AD_PrintFormat_ID",
_.uuid -> "UUID")
}
}
|
adempiere/ADReactiveSystem
|
dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/PrintGraphMapping.scala
|
Scala
|
gpl-3.0
| 2,092
|
package org.katis.capnproto.runtime
class StructSize(val data: Short, val pointers: Short) {
def total(): Int = this.data.toInt + this.pointers.toInt
}
|
katis/capnp-scala
|
runtime/shared/src/main/scala-2.11/org/katis/capnproto/runtime/StructSize.scala
|
Scala
|
mit
| 156
|
package io.github.lyubent.util
import java.util.Properties
import scala.tools.nsc.io.{File, Path}
object FileUtil {
/**
* Uses classloader to access resource folder and Java config
* to retrieve the path to the static game dataset.
*
* @return Path to static json dataset
*/
def getConfigProperty(property: String): String = {
val p = new Properties()
p.load(getClass.getClassLoader.getResourceAsStream("toxicity.properties"))
p.getProperty(property)
}
/**
* Appends content to file, checks if exists, if not creates it.
*
* @param body Content to be appended to file.
* @param path String path to the file to save to.
*/
def appendToFile(body: String, path: String): Unit = {
val filePath = path
if (!File(filePath).exists)
Path(filePath).createFile()
File(filePath).appendAll(body)
}
}
|
lyubent/LeagueToxicity
|
src/main/scala/io/github/lyubent/util/FileUtil.scala
|
Scala
|
unlicense
| 865
|
package scorex.unit
import org.scalatest.FunSuite
import scorex.crypto.Base58
import scorex.state.wallet.Wallet
import scala.util.Random
class WalletSpecification extends FunSuite {
private val walletSize = 10
test("wallet - acc deletion") {
val w = new Wallet(None, "cookies", Base58.decode("FQgbSAm6swGbtqA3NE8PttijPhT4N3Ufh4bHFAkyVnQz").get)
w.generateNewAccounts(walletSize)
assert(w.privateKeyAccounts().size == walletSize)
val head = w.privateKeyAccounts().head
w.deleteAccount(head)
assert(w.privateKeyAccounts().size == walletSize - 1)
w.deleteAccount(w.privateKeyAccounts().head)
assert(w.privateKeyAccounts().size == walletSize - 2)
w.privateKeyAccounts().foreach(w.deleteAccount)
assert(w.privateKeyAccounts().isEmpty)
}
/*
//todo: report MapDb bug with reopening a database
test("reopening"){
val walletFile = new java.io.File(s"/tmp/wallet${Random.nextLong()}.dat")
val w = new Wallet(Some(walletFile), "cookies", Base58.decode("FQgbSAm6swGbtqA3NE8PttijPhT4N3Ufh4bHFAkyVnQz").get)
w.generateNewAccounts(10)
w.close()
assert(w.exists())
val w2 = new Wallet(Some(walletFile), "cookies", Base58.decode("FQgbSAm6swGbtqA3NE8PttijPhT4N3Ufh4bHFAkyVnQz").get)
assert(w2.privateKeyAccounts().head.address != null)
} */
}
|
pozharko/Scorex-Lagonaki
|
src/test/scala/scorex/unit/WalletSpecification.scala
|
Scala
|
cc0-1.0
| 1,325
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.io.File
import scala.collection.mutable.ArrayBuffer
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkContext
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable, CatalogTableType}
import org.apache.spark.sql.execution.command.CreateTableCommand
import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation}
import org.apache.spark.sql.hive.HiveExternalCatalog._
import org.apache.spark.sql.hive.client.HiveClient
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.StaticSQLConf._
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
/**
* Tests for persisting tables created though the data sources API into the metastore.
*/
class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
import hiveContext._
import spark.implicits._
var jsonFilePath: String = _
override def beforeAll(): Unit = {
super.beforeAll()
jsonFilePath = Utils.getSparkClassLoader.getResource("sample.json").getFile
}
test("persistent JSON table") {
withTable("jsonTable") {
sql(
s"""CREATE TABLE jsonTable
|USING org.apache.spark.sql.json.DefaultSource
|OPTIONS (
| path '$jsonFilePath'
|)
""".stripMargin)
checkAnswer(
sql("SELECT * FROM jsonTable"),
read.json(jsonFilePath).collect().toSeq)
}
}
test("persistent JSON table with a user specified schema") {
withTable("jsonTable") {
sql(
s"""CREATE TABLE jsonTable (
|a string,
|b String,
|`c_!@(3)` int,
|`<d>` Struct<`d!`:array<int>, `=`:array<struct<Dd2: boolean>>>)
|USING org.apache.spark.sql.json.DefaultSource
|OPTIONS (
| path '$jsonFilePath'
|)
""".stripMargin)
withTempView("expectedJsonTable") {
read.json(jsonFilePath).createOrReplaceTempView("expectedJsonTable")
checkAnswer(
sql("SELECT a, b, `c_!@(3)`, `<d>`.`d!`, `<d>`.`=` FROM jsonTable"),
sql("SELECT a, b, `c_!@(3)`, `<d>`.`d!`, `<d>`.`=` FROM expectedJsonTable"))
}
}
}
test("persistent JSON table with a user specified schema with a subset of fields") {
withTable("jsonTable") {
// This works because JSON objects are self-describing and JSONRelation can get needed
// field values based on field names.
sql(
s"""CREATE TABLE jsonTable (`<d>` Struct<`=`:array<struct<Dd2: boolean>>>, b String)
|USING org.apache.spark.sql.json.DefaultSource
|OPTIONS (
| path '$jsonFilePath'
|)
""".stripMargin)
val innerStruct = StructType(Seq(
StructField("=", ArrayType(StructType(StructField("Dd2", BooleanType, true) :: Nil)))))
val expectedSchema = StructType(Seq(
StructField("<d>", innerStruct, true),
StructField("b", StringType, true)))
assert(expectedSchema === table("jsonTable").schema)
withTempView("expectedJsonTable") {
read.json(jsonFilePath).createOrReplaceTempView("expectedJsonTable")
checkAnswer(
sql("SELECT b, `<d>`.`=` FROM jsonTable"),
sql("SELECT b, `<d>`.`=` FROM expectedJsonTable"))
}
}
}
test("resolve shortened provider names") {
withTable("jsonTable") {
sql(
s"""
|CREATE TABLE jsonTable
|USING org.apache.spark.sql.json
|OPTIONS (
| path '$jsonFilePath'
|)
""".stripMargin)
checkAnswer(
sql("SELECT * FROM jsonTable"),
read.json(jsonFilePath).collect().toSeq)
}
}
test("drop table") {
withTable("jsonTable") {
sql(
s"""
|CREATE TABLE jsonTable
|USING org.apache.spark.sql.json
|OPTIONS (
| path '$jsonFilePath'
|)
""".stripMargin)
checkAnswer(
sql("SELECT * FROM jsonTable"),
read.json(jsonFilePath))
sql("DROP TABLE jsonTable")
intercept[Exception] {
sql("SELECT * FROM jsonTable").collect()
}
assert(
new File(jsonFilePath).exists(),
"The table with specified path is considered as an external table, " +
"its data should not deleted after DROP TABLE.")
}
}
test("check change without refresh") {
withTempPath { tempDir =>
withTable("jsonTable") {
(("a", "b") :: Nil).toDF().toJSON.rdd.saveAsTextFile(tempDir.getCanonicalPath)
sql(
s"""CREATE TABLE jsonTable
|USING org.apache.spark.sql.json
|OPTIONS (
| path '${tempDir.toURI}'
|)
""".stripMargin)
checkAnswer(
sql("SELECT * FROM jsonTable"),
Row("a", "b"))
Utils.deleteRecursively(tempDir)
(("a1", "b1", "c1") :: Nil).toDF().toJSON.rdd.saveAsTextFile(tempDir.getCanonicalPath)
// Schema is cached so the new column does not show. The updated values in existing columns
// will show.
checkAnswer(
sql("SELECT * FROM jsonTable"),
Row("a1", "b1"))
sql("REFRESH TABLE jsonTable")
// After refresh, schema is not changed.
checkAnswer(
sql("SELECT * FROM jsonTable"),
Row("a1", "b1"))
}
}
}
test("drop, change, recreate") {
withTempPath { tempDir =>
(("a", "b") :: Nil).toDF().toJSON.rdd.saveAsTextFile(tempDir.getCanonicalPath)
withTable("jsonTable") {
sql(
s"""CREATE TABLE jsonTable
|USING org.apache.spark.sql.json
|OPTIONS (
| path '${tempDir.toURI}'
|)
""".stripMargin)
checkAnswer(
sql("SELECT * FROM jsonTable"),
Row("a", "b"))
Utils.deleteRecursively(tempDir)
(("a", "b", "c") :: Nil).toDF().toJSON.rdd.saveAsTextFile(tempDir.getCanonicalPath)
sql("DROP TABLE jsonTable")
sql(
s"""CREATE TABLE jsonTable
|USING org.apache.spark.sql.json
|OPTIONS (
| path '${tempDir.toURI}'
|)
""".stripMargin)
// New table should reflect new schema.
checkAnswer(
sql("SELECT * FROM jsonTable"),
Row("a", "b", "c"))
}
}
}
test("invalidate cache and reload") {
withTable("jsonTable") {
sql(
s"""CREATE TABLE jsonTable (`c_!@(3)` int)
|USING org.apache.spark.sql.json.DefaultSource
|OPTIONS (
| path '$jsonFilePath'
|)
""".stripMargin)
withTempView("expectedJsonTable") {
read.json(jsonFilePath).createOrReplaceTempView("expectedJsonTable")
checkAnswer(
sql("SELECT * FROM jsonTable"),
sql("SELECT `c_!@(3)` FROM expectedJsonTable").collect().toSeq)
// Discard the cached relation.
sessionState.refreshTable("jsonTable")
checkAnswer(
sql("SELECT * FROM jsonTable"),
sql("SELECT `c_!@(3)` FROM expectedJsonTable").collect().toSeq)
sessionState.refreshTable("jsonTable")
val expectedSchema = StructType(StructField("c_!@(3)", IntegerType, true) :: Nil)
assert(expectedSchema === table("jsonTable").schema)
}
}
}
test("CTAS") {
withTempPath { tempPath =>
withTable("jsonTable", "ctasJsonTable") {
sql(
s"""CREATE TABLE jsonTable
|USING org.apache.spark.sql.json.DefaultSource
|OPTIONS (
| path '$jsonFilePath'
|)
""".stripMargin)
sql(
s"""CREATE TABLE ctasJsonTable
|USING org.apache.spark.sql.json.DefaultSource
|OPTIONS (
| path '${tempPath.toURI}'
|) AS
|SELECT * FROM jsonTable
""".stripMargin)
assert(table("ctasJsonTable").schema === table("jsonTable").schema)
checkAnswer(
sql("SELECT * FROM ctasJsonTable"),
sql("SELECT * FROM jsonTable").collect())
}
}
}
test("CTAS with IF NOT EXISTS") {
withTempPath { path =>
val tempPath = path.toURI
withTable("jsonTable", "ctasJsonTable") {
sql(
s"""CREATE TABLE jsonTable
|USING org.apache.spark.sql.json.DefaultSource
|OPTIONS (
| path '$jsonFilePath'
|)
""".stripMargin)
sql(
s"""CREATE TABLE ctasJsonTable
|USING org.apache.spark.sql.json.DefaultSource
|OPTIONS (
| path '$tempPath'
|) AS
|SELECT * FROM jsonTable
""".stripMargin)
// Create the table again should trigger a AnalysisException.
val message = intercept[AnalysisException] {
sql(
s"""CREATE TABLE ctasJsonTable
|USING org.apache.spark.sql.json.DefaultSource
|OPTIONS (
| path '$tempPath'
|) AS
|SELECT * FROM jsonTable
""".stripMargin)
}.getMessage
assert(
message.contains("Table default.ctasJsonTable already exists."),
"We should complain that ctasJsonTable already exists")
// The following statement should be fine if it has IF NOT EXISTS.
// It tries to create a table ctasJsonTable with a new schema.
// The actual table's schema and data should not be changed.
sql(
s"""CREATE TABLE IF NOT EXISTS ctasJsonTable
|USING org.apache.spark.sql.json.DefaultSource
|OPTIONS (
| path '$tempPath'
|) AS
|SELECT a FROM jsonTable
""".stripMargin)
// Discard the cached relation.
sessionState.refreshTable("ctasJsonTable")
// Schema should not be changed.
assert(table("ctasJsonTable").schema === table("jsonTable").schema)
// Table data should not be changed.
checkAnswer(
sql("SELECT * FROM ctasJsonTable"),
sql("SELECT * FROM jsonTable").collect())
}
}
}
test("CTAS a managed table") {
withTable("jsonTable", "ctasJsonTable", "loadedTable") {
sql(
s"""CREATE TABLE jsonTable
|USING org.apache.spark.sql.json.DefaultSource
|OPTIONS (
| path '$jsonFilePath'
|)
""".stripMargin)
val expectedPath = sessionState.catalog.defaultTablePath(TableIdentifier("ctasJsonTable"))
val filesystemPath = new Path(expectedPath)
val fs = filesystemPath.getFileSystem(spark.sessionState.newHadoopConf())
fs.delete(filesystemPath, true)
// It is a managed table when we do not specify the location.
sql(
s"""CREATE TABLE ctasJsonTable
|USING org.apache.spark.sql.json.DefaultSource
|AS
|SELECT * FROM jsonTable
""".stripMargin)
assert(fs.exists(filesystemPath), s"$expectedPath should exist after we create the table.")
sql(
s"""CREATE TABLE loadedTable
|USING org.apache.spark.sql.json.DefaultSource
|OPTIONS (
| path '$expectedPath'
|)
""".stripMargin)
assert(table("ctasJsonTable").schema === table("loadedTable").schema)
checkAnswer(
sql("SELECT * FROM ctasJsonTable"),
sql("SELECT * FROM loadedTable"))
sql("DROP TABLE ctasJsonTable")
assert(!fs.exists(filesystemPath), s"$expectedPath should not exist after we drop the table.")
}
}
test("saveAsTable(CTAS) using append and insertInto when the target table is Hive serde") {
val tableName = "tab1"
withTable(tableName) {
sql(s"CREATE TABLE $tableName STORED AS SEQUENCEFILE AS SELECT 1 AS key, 'abc' AS value")
val df = sql(s"SELECT key, value FROM $tableName")
df.write.insertInto(tableName)
checkAnswer(
sql(s"SELECT * FROM $tableName"),
Row(1, "abc") :: Row(1, "abc") :: Nil
)
}
}
test("SPARK-5839 HiveMetastoreCatalog does not recognize table aliases of data source tables.") {
withTable("savedJsonTable") {
// Save the df as a managed table (by not specifying the path).
(1 to 10)
.map(i => i -> s"str$i")
.toDF("a", "b")
.write
.format("json")
.saveAsTable("savedJsonTable")
checkAnswer(
sql("SELECT * FROM savedJsonTable where savedJsonTable.a < 5"),
(1 to 4).map(i => Row(i, s"str$i")))
checkAnswer(
sql("SELECT * FROM savedJsonTable tmp where tmp.a > 5"),
(6 to 10).map(i => Row(i, s"str$i")))
sessionState.refreshTable("savedJsonTable")
checkAnswer(
sql("SELECT * FROM savedJsonTable where savedJsonTable.a < 5"),
(1 to 4).map(i => Row(i, s"str$i")))
checkAnswer(
sql("SELECT * FROM savedJsonTable tmp where tmp.a > 5"),
(6 to 10).map(i => Row(i, s"str$i")))
}
}
test("save table") {
withTempPath { path =>
val tempPath = path.getCanonicalPath
withTable("savedJsonTable") {
val df = (1 to 10).map(i => i -> s"str$i").toDF("a", "b")
withSQLConf(SQLConf.DEFAULT_DATA_SOURCE_NAME.key -> "json") {
// Save the df as a managed table (by not specifying the path).
df.write.saveAsTable("savedJsonTable")
checkAnswer(sql("SELECT * FROM savedJsonTable"), df)
// We can overwrite it.
df.write.mode(SaveMode.Overwrite).saveAsTable("savedJsonTable")
checkAnswer(sql("SELECT * FROM savedJsonTable"), df)
// When the save mode is Ignore, we will do nothing when the table already exists.
df.select("b").write.mode(SaveMode.Ignore).saveAsTable("savedJsonTable")
// TODO in ResolvedDataSource, will convert the schema into nullable = true
// hence the df.schema is not exactly the same as table("savedJsonTable").schema
// assert(df.schema === table("savedJsonTable").schema)
checkAnswer(sql("SELECT * FROM savedJsonTable"), df)
// Drop table will also delete the data.
sql("DROP TABLE savedJsonTable")
intercept[AnalysisException] {
read.json(
sessionState.catalog.defaultTablePath(TableIdentifier("savedJsonTable")).toString)
}
}
// Create an external table by specifying the path.
withSQLConf(SQLConf.DEFAULT_DATA_SOURCE_NAME.key -> "not a source name") {
df.write
.format("org.apache.spark.sql.json")
.mode(SaveMode.Append)
.option("path", tempPath.toString)
.saveAsTable("savedJsonTable")
checkAnswer(sql("SELECT * FROM savedJsonTable"), df)
}
// Data should not be deleted after we drop the table.
sql("DROP TABLE savedJsonTable")
checkAnswer(read.json(tempPath.toString), df)
}
}
}
test("create external table") {
withTempPath { tempPath =>
withTable("savedJsonTable", "createdJsonTable") {
val df = read.json((1 to 10).map { i =>
s"""{ "a": $i, "b": "str$i" }"""
}.toDS())
withSQLConf(SQLConf.DEFAULT_DATA_SOURCE_NAME.key -> "not a source name") {
df.write
.format("json")
.mode(SaveMode.Append)
.option("path", tempPath.toString)
.saveAsTable("savedJsonTable")
}
withSQLConf(SQLConf.DEFAULT_DATA_SOURCE_NAME.key -> "json") {
sparkSession.catalog.createExternalTable("createdJsonTable", tempPath.toString)
assert(table("createdJsonTable").schema === df.schema)
checkAnswer(sql("SELECT * FROM createdJsonTable"), df)
assert(
intercept[AnalysisException] {
sparkSession.catalog.createExternalTable("createdJsonTable", jsonFilePath.toString)
}.getMessage.contains("Table createdJsonTable already exists."),
"We should complain that createdJsonTable already exists")
}
// Data should not be deleted.
sql("DROP TABLE createdJsonTable")
checkAnswer(read.json(tempPath.toString), df)
// Try to specify the schema.
withSQLConf(SQLConf.DEFAULT_DATA_SOURCE_NAME.key -> "not a source name") {
val schema = StructType(StructField("b", StringType, true) :: Nil)
sparkSession.catalog.createExternalTable(
"createdJsonTable",
"org.apache.spark.sql.json",
schema,
Map("path" -> tempPath.toString))
checkAnswer(
sql("SELECT * FROM createdJsonTable"),
sql("SELECT b FROM savedJsonTable"))
sql("DROP TABLE createdJsonTable")
}
}
}
}
test("path required error") {
assert(
intercept[AnalysisException] {
sparkSession.catalog.createExternalTable(
"createdJsonTable",
"org.apache.spark.sql.json",
Map.empty[String, String])
table("createdJsonTable")
}.getMessage.contains("Unable to infer schema"),
"We should complain that path is not specified.")
sql("DROP TABLE IF EXISTS createdJsonTable")
}
test("scan a parquet table created through a CTAS statement") {
withSQLConf(HiveUtils.CONVERT_METASTORE_PARQUET.key -> "true") {
withTempView("jt") {
(1 to 10).map(i => i -> s"str$i").toDF("a", "b").createOrReplaceTempView("jt")
withTable("test_parquet_ctas") {
sql(
"""CREATE TABLE test_parquet_ctas STORED AS PARQUET
|AS SELECT tmp.a FROM jt tmp WHERE tmp.a < 5
""".stripMargin)
checkAnswer(
sql(s"SELECT a FROM test_parquet_ctas WHERE a > 2 "),
Row(3) :: Row(4) :: Nil)
table("test_parquet_ctas").queryExecution.optimizedPlan match {
case LogicalRelation(p: HadoopFsRelation, _, _, _) => // OK
case _ =>
fail(s"test_parquet_ctas should have be converted to ${classOf[HadoopFsRelation]}")
}
}
}
}
}
test("Pre insert nullability check (ArrayType)") {
withTable("arrayInParquet") {
{
val df = (Tuple1(Seq(Int.box(1), null: Integer)) :: Nil).toDF("a")
val expectedSchema =
StructType(
StructField(
"a",
ArrayType(IntegerType, containsNull = true),
nullable = true) :: Nil)
assert(df.schema === expectedSchema)
df.write
.format("parquet")
.mode(SaveMode.Overwrite)
.saveAsTable("arrayInParquet")
}
{
val df = (Tuple1(Seq(2, 3)) :: Nil).toDF("a")
val expectedSchema =
StructType(
StructField(
"a",
ArrayType(IntegerType, containsNull = false),
nullable = true) :: Nil)
assert(df.schema === expectedSchema)
df.write
.format("parquet")
.mode(SaveMode.Append)
.insertInto("arrayInParquet")
}
(Tuple1(Seq(4, 5)) :: Nil).toDF("a")
.write
.mode(SaveMode.Append)
.saveAsTable("arrayInParquet") // This one internally calls df2.insertInto.
(Tuple1(Seq(Int.box(6), null: Integer)) :: Nil).toDF("a")
.write
.mode(SaveMode.Append)
.saveAsTable("arrayInParquet")
sparkSession.catalog.refreshTable("arrayInParquet")
checkAnswer(
sql("SELECT a FROM arrayInParquet"),
Row(ArrayBuffer(1, null)) ::
Row(ArrayBuffer(2, 3)) ::
Row(ArrayBuffer(4, 5)) ::
Row(ArrayBuffer(6, null)) :: Nil)
}
}
test("Pre insert nullability check (MapType)") {
withTable("mapInParquet") {
{
val df = (Tuple1(Map(1 -> (null: Integer))) :: Nil).toDF("a")
val expectedSchema =
StructType(
StructField(
"a",
MapType(IntegerType, IntegerType, valueContainsNull = true),
nullable = true) :: Nil)
assert(df.schema === expectedSchema)
df.write
.format("parquet")
.mode(SaveMode.Overwrite)
.saveAsTable("mapInParquet")
}
{
val df = (Tuple1(Map(2 -> 3)) :: Nil).toDF("a")
val expectedSchema =
StructType(
StructField(
"a",
MapType(IntegerType, IntegerType, valueContainsNull = false),
nullable = true) :: Nil)
assert(df.schema === expectedSchema)
df.write
.format("parquet")
.mode(SaveMode.Append)
.insertInto("mapInParquet")
}
(Tuple1(Map(4 -> 5)) :: Nil).toDF("a")
.write
.format("parquet")
.mode(SaveMode.Append)
.saveAsTable("mapInParquet") // This one internally calls df2.insertInto.
(Tuple1(Map(6 -> null.asInstanceOf[Integer])) :: Nil).toDF("a")
.write
.format("parquet")
.mode(SaveMode.Append)
.saveAsTable("mapInParquet")
sparkSession.catalog.refreshTable("mapInParquet")
checkAnswer(
sql("SELECT a FROM mapInParquet"),
Row(Map(1 -> null)) ::
Row(Map(2 -> 3)) ::
Row(Map(4 -> 5)) ::
Row(Map(6 -> null)) :: Nil)
}
}
test("SPARK-6024 wide schema support") {
assert(spark.sparkContext.conf.get(SCHEMA_STRING_LENGTH_THRESHOLD) == 4000)
withTable("wide_schema") {
withTempDir { tempDir =>
// We will need 80 splits for this schema if the threshold is 4000.
val schema = StructType((1 to 5000).map(i => StructField(s"c_$i", StringType)))
val tableDesc = CatalogTable(
identifier = TableIdentifier("wide_schema"),
tableType = CatalogTableType.EXTERNAL,
storage = CatalogStorageFormat.empty.copy(
properties = Map("path" -> tempDir.getCanonicalPath)
),
schema = schema,
provider = Some("json")
)
spark.sessionState.catalog.createTable(tableDesc, ignoreIfExists = false)
sessionState.refreshTable("wide_schema")
val actualSchema = table("wide_schema").schema
assert(schema === actualSchema)
}
}
}
test("SPARK-6655 still support a schema stored in spark.sql.sources.schema") {
val tableName = "spark6655"
withTable(tableName) {
val schema = StructType(StructField("int", IntegerType, true) :: Nil)
val hiveTable = CatalogTable(
identifier = TableIdentifier(tableName, Some("default")),
tableType = CatalogTableType.MANAGED,
schema = HiveExternalCatalog.EMPTY_DATA_SCHEMA,
provider = Some("json"),
storage = CatalogStorageFormat(
locationUri = None,
inputFormat = None,
outputFormat = None,
serde = None,
compressed = false,
properties = Map(
"path" -> sessionState.catalog.defaultTablePath(TableIdentifier(tableName)).toString)
),
properties = Map(
DATASOURCE_PROVIDER -> "json",
DATASOURCE_SCHEMA -> schema.json,
"EXTERNAL" -> "FALSE"))
hiveClient.createTable(hiveTable, ignoreIfExists = false)
sessionState.refreshTable(tableName)
val actualSchema = table(tableName).schema
assert(schema === actualSchema)
}
}
test("Saving partitionBy columns information") {
val df = (1 to 10).map(i => (i, i + 1, s"str$i", s"str${i + 1}")).toDF("a", "b", "c", "d")
val tableName = s"partitionInfo_${System.currentTimeMillis()}"
withTable(tableName) {
df.write.format("parquet").partitionBy("d", "b").saveAsTable(tableName)
sessionState.refreshTable(tableName)
val metastoreTable = hiveClient.getTable("default", tableName)
val expectedPartitionColumns = StructType(df.schema("d") :: df.schema("b") :: Nil)
val numPartCols = metastoreTable.properties(DATASOURCE_SCHEMA_NUMPARTCOLS).toInt
assert(numPartCols == 2)
val actualPartitionColumns =
StructType(
(0 until numPartCols).map { index =>
df.schema(metastoreTable.properties(s"$DATASOURCE_SCHEMA_PARTCOL_PREFIX$index"))
})
// Make sure partition columns are correctly stored in metastore.
assert(
expectedPartitionColumns.sameType(actualPartitionColumns),
s"Partitions columns stored in metastore $actualPartitionColumns is not the " +
s"partition columns defined by the saveAsTable operation $expectedPartitionColumns.")
// Check the content of the saved table.
checkAnswer(
table(tableName).select("c", "b", "d", "a"),
df.select("c", "b", "d", "a"))
}
}
test("Saving information for sortBy and bucketBy columns") {
val df = (1 to 10).map(i => (i, i + 1, s"str$i", s"str${i + 1}")).toDF("a", "b", "c", "d")
val tableName = s"bucketingInfo_${System.currentTimeMillis()}"
withTable(tableName) {
df.write
.format("parquet")
.bucketBy(8, "d", "b")
.sortBy("c")
.saveAsTable(tableName)
sessionState.refreshTable(tableName)
val metastoreTable = hiveClient.getTable("default", tableName)
val expectedBucketByColumns = StructType(df.schema("d") :: df.schema("b") :: Nil)
val expectedSortByColumns = StructType(df.schema("c") :: Nil)
val numBuckets = metastoreTable.properties(DATASOURCE_SCHEMA_NUMBUCKETS).toInt
assert(numBuckets == 8)
val numBucketCols = metastoreTable.properties(DATASOURCE_SCHEMA_NUMBUCKETCOLS).toInt
assert(numBucketCols == 2)
val numSortCols = metastoreTable.properties(DATASOURCE_SCHEMA_NUMSORTCOLS).toInt
assert(numSortCols == 1)
val actualBucketByColumns =
StructType(
(0 until numBucketCols).map { index =>
df.schema(metastoreTable.properties(s"$DATASOURCE_SCHEMA_BUCKETCOL_PREFIX$index"))
})
// Make sure bucketBy columns are correctly stored in metastore.
assert(
expectedBucketByColumns.sameType(actualBucketByColumns),
s"Partitions columns stored in metastore $actualBucketByColumns is not the " +
s"partition columns defined by the saveAsTable operation $expectedBucketByColumns.")
val actualSortByColumns =
StructType(
(0 until numSortCols).map { index =>
df.schema(metastoreTable.properties(s"$DATASOURCE_SCHEMA_SORTCOL_PREFIX$index"))
})
// Make sure sortBy columns are correctly stored in metastore.
assert(
expectedSortByColumns.sameType(actualSortByColumns),
s"Partitions columns stored in metastore $actualSortByColumns is not the " +
s"partition columns defined by the saveAsTable operation $expectedSortByColumns.")
// Check the content of the saved table.
checkAnswer(
table(tableName).select("c", "b", "d", "a"),
df.select("c", "b", "d", "a"))
}
}
test("insert into a table") {
def createDF(from: Int, to: Int): DataFrame = {
(from to to).map(i => i -> s"str$i").toDF("c1", "c2")
}
withTable("insertParquet") {
createDF(0, 9).write.format("parquet").saveAsTable("insertParquet")
checkAnswer(
sql("SELECT p.c1, p.c2 FROM insertParquet p WHERE p.c1 > 5"),
(6 to 9).map(i => Row(i, s"str$i")))
intercept[AnalysisException] {
createDF(10, 19).write.format("parquet").saveAsTable("insertParquet")
}
createDF(10, 19).write.mode(SaveMode.Append).format("parquet").saveAsTable("insertParquet")
checkAnswer(
sql("SELECT p.c1, p.c2 FROM insertParquet p WHERE p.c1 > 5"),
(6 to 19).map(i => Row(i, s"str$i")))
createDF(20, 29).write.mode(SaveMode.Append).format("parquet").saveAsTable("insertParquet")
checkAnswer(
sql("SELECT p.c1, c2 FROM insertParquet p WHERE p.c1 > 5 AND p.c1 < 25"),
(6 to 24).map(i => Row(i, s"str$i")))
intercept[AnalysisException] {
createDF(30, 39).write.saveAsTable("insertParquet")
}
createDF(30, 39).write.mode(SaveMode.Append).saveAsTable("insertParquet")
checkAnswer(
sql("SELECT p.c1, c2 FROM insertParquet p WHERE p.c1 > 5 AND p.c1 < 35"),
(6 to 34).map(i => Row(i, s"str$i")))
createDF(40, 49).write.mode(SaveMode.Append).insertInto("insertParquet")
checkAnswer(
sql("SELECT p.c1, c2 FROM insertParquet p WHERE p.c1 > 5 AND p.c1 < 45"),
(6 to 44).map(i => Row(i, s"str$i")))
createDF(50, 59).write.mode(SaveMode.Overwrite).saveAsTable("insertParquet")
checkAnswer(
sql("SELECT p.c1, c2 FROM insertParquet p WHERE p.c1 > 51 AND p.c1 < 55"),
(52 to 54).map(i => Row(i, s"str$i")))
createDF(60, 69).write.mode(SaveMode.Ignore).saveAsTable("insertParquet")
checkAnswer(
sql("SELECT p.c1, c2 FROM insertParquet p"),
(50 to 59).map(i => Row(i, s"str$i")))
createDF(70, 79).write.mode(SaveMode.Overwrite).insertInto("insertParquet")
checkAnswer(
sql("SELECT p.c1, c2 FROM insertParquet p"),
(70 to 79).map(i => Row(i, s"str$i")))
}
}
test("append table using different formats") {
def createDF(from: Int, to: Int): DataFrame = {
(from to to).map(i => i -> s"str$i").toDF("c1", "c2")
}
withTable("appendOrcToParquet") {
createDF(0, 9).write.format("parquet").saveAsTable("appendOrcToParquet")
val e = intercept[AnalysisException] {
createDF(10, 19).write.mode(SaveMode.Append).format("orc").saveAsTable("appendOrcToParquet")
}
assert(e.getMessage.contains(
"The format of the existing table default.appendOrcToParquet is `ParquetFileFormat`. " +
"It doesn't match the specified format `OrcFileFormat`"))
}
withTable("appendParquetToJson") {
createDF(0, 9).write.format("json").saveAsTable("appendParquetToJson")
val e = intercept[AnalysisException] {
createDF(10, 19).write.mode(SaveMode.Append).format("parquet")
.saveAsTable("appendParquetToJson")
}
assert(e.getMessage.contains(
"The format of the existing table default.appendParquetToJson is `JsonFileFormat`. " +
"It doesn't match the specified format `ParquetFileFormat`"))
}
withTable("appendTextToJson") {
createDF(0, 9).write.format("json").saveAsTable("appendTextToJson")
val e = intercept[AnalysisException] {
createDF(10, 19).write.mode(SaveMode.Append).format("text")
.saveAsTable("appendTextToJson")
}
assert(e.getMessage.contains(
"The format of the existing table default.appendTextToJson is `JsonFileFormat`. " +
"It doesn't match the specified format `TextFileFormat`"))
}
}
test("append a table using the same formats but different names") {
def createDF(from: Int, to: Int): DataFrame = {
(from to to).map(i => i -> s"str$i").toDF("c1", "c2")
}
withTable("appendParquet") {
createDF(0, 9).write.format("parquet").saveAsTable("appendParquet")
createDF(10, 19).write.mode(SaveMode.Append).format("org.apache.spark.sql.parquet")
.saveAsTable("appendParquet")
checkAnswer(
sql("SELECT p.c1, p.c2 FROM appendParquet p WHERE p.c1 > 5"),
(6 to 19).map(i => Row(i, s"str$i")))
}
withTable("appendParquet") {
createDF(0, 9).write.format("org.apache.spark.sql.parquet").saveAsTable("appendParquet")
createDF(10, 19).write.mode(SaveMode.Append).format("parquet").saveAsTable("appendParquet")
checkAnswer(
sql("SELECT p.c1, p.c2 FROM appendParquet p WHERE p.c1 > 5"),
(6 to 19).map(i => Row(i, s"str$i")))
}
withTable("appendParquet") {
createDF(0, 9).write.format("org.apache.spark.sql.parquet.DefaultSource")
.saveAsTable("appendParquet")
createDF(10, 19).write.mode(SaveMode.Append)
.format("org.apache.spark.sql.execution.datasources.parquet.DefaultSource")
.saveAsTable("appendParquet")
checkAnswer(
sql("SELECT p.c1, p.c2 FROM appendParquet p WHERE p.c1 > 5"),
(6 to 19).map(i => Row(i, s"str$i")))
}
}
test("SPARK-8156:create table to specific database by 'use dbname' ") {
val df = (1 to 3).map(i => (i, s"val_$i", i * 2)).toDF("a", "b", "c")
spark.sql("""create database if not exists testdb8156""")
spark.sql("""use testdb8156""")
df.write
.format("parquet")
.mode(SaveMode.Overwrite)
.saveAsTable("ttt3")
checkAnswer(
spark.sql("show TABLES in testdb8156").filter("tableName = 'ttt3'"),
Row("testdb8156", "ttt3", false))
spark.sql("""use default""")
spark.sql("""drop database if exists testdb8156 CASCADE""")
}
test("skip hive metadata on table creation") {
withTempDir { tempPath =>
val schema = StructType((1 to 5).map(i => StructField(s"c_$i", StringType)))
val tableDesc1 = CatalogTable(
identifier = TableIdentifier("not_skip_hive_metadata"),
tableType = CatalogTableType.EXTERNAL,
storage = CatalogStorageFormat.empty.copy(
locationUri = Some(tempPath.toURI),
properties = Map("skipHiveMetadata" -> "false")
),
schema = schema,
provider = Some("parquet")
)
spark.sessionState.catalog.createTable(tableDesc1, ignoreIfExists = false)
// As a proxy for verifying that the table was stored in Hive compatible format,
// we verify that each column of the table is of native type StringType.
assert(hiveClient.getTable("default", "not_skip_hive_metadata").schema
.forall(_.dataType == StringType))
val tableDesc2 = CatalogTable(
identifier = TableIdentifier("skip_hive_metadata", Some("default")),
tableType = CatalogTableType.EXTERNAL,
storage = CatalogStorageFormat.empty.copy(
properties = Map("path" -> tempPath.getCanonicalPath, "skipHiveMetadata" -> "true")
),
schema = schema,
provider = Some("parquet")
)
spark.sessionState.catalog.createTable(tableDesc2, ignoreIfExists = false)
// As a proxy for verifying that the table was stored in SparkSQL format,
// we verify that the table has a column type as array of StringType.
assert(hiveClient.getTable("default", "skip_hive_metadata").schema
.forall(_.dataType == ArrayType(StringType)))
}
}
test("CTAS: persisted partitioned data source table") {
withTempPath { dir =>
withTable("t") {
sql(
s"""CREATE TABLE t USING PARQUET
|OPTIONS (PATH '${dir.toURI}')
|PARTITIONED BY (a)
|AS SELECT 1 AS a, 2 AS b
""".stripMargin
)
val metastoreTable = hiveClient.getTable("default", "t")
assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMPARTCOLS).toInt === 1)
assert(!metastoreTable.properties.contains(DATASOURCE_SCHEMA_NUMBUCKETS))
assert(!metastoreTable.properties.contains(DATASOURCE_SCHEMA_NUMBUCKETCOLS))
assert(!metastoreTable.properties.contains(DATASOURCE_SCHEMA_NUMSORTCOLS))
checkAnswer(table("t"), Row(2, 1))
}
}
}
test("CTAS: persisted bucketed data source table") {
withTempPath { dir =>
withTable("t") {
sql(
s"""CREATE TABLE t USING PARQUET
|OPTIONS (PATH '${dir.toURI}')
|CLUSTERED BY (a) SORTED BY (b) INTO 2 BUCKETS
|AS SELECT 1 AS a, 2 AS b
""".stripMargin
)
val metastoreTable = hiveClient.getTable("default", "t")
assert(!metastoreTable.properties.contains(DATASOURCE_SCHEMA_NUMPARTCOLS))
assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMBUCKETS).toInt === 2)
assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMBUCKETCOLS).toInt === 1)
assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMSORTCOLS).toInt === 1)
checkAnswer(table("t"), Row(1, 2))
}
}
withTempPath { dir =>
withTable("t") {
sql(
s"""CREATE TABLE t USING PARQUET
|OPTIONS (PATH '${dir.toURI}')
|CLUSTERED BY (a) INTO 2 BUCKETS
|AS SELECT 1 AS a, 2 AS b
""".stripMargin
)
val metastoreTable = hiveClient.getTable("default", "t")
assert(!metastoreTable.properties.contains(DATASOURCE_SCHEMA_NUMPARTCOLS))
assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMBUCKETS).toInt === 2)
assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMBUCKETCOLS).toInt === 1)
assert(!metastoreTable.properties.contains(DATASOURCE_SCHEMA_NUMSORTCOLS))
checkAnswer(table("t"), Row(1, 2))
}
}
}
test("CTAS: persisted partitioned bucketed data source table") {
withTempPath { dir =>
withTable("t") {
sql(
s"""CREATE TABLE t USING PARQUET
|OPTIONS (PATH '${dir.toURI}')
|PARTITIONED BY (a)
|CLUSTERED BY (b) SORTED BY (c) INTO 2 BUCKETS
|AS SELECT 1 AS a, 2 AS b, 3 AS c
""".stripMargin
)
val metastoreTable = hiveClient.getTable("default", "t")
assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMPARTCOLS).toInt === 1)
assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMBUCKETS).toInt === 2)
assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMBUCKETCOLS).toInt === 1)
assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMSORTCOLS).toInt === 1)
checkAnswer(table("t"), Row(2, 3, 1))
}
}
}
test("saveAsTable[append]: the column order doesn't matter") {
withTable("saveAsTable_column_order") {
Seq((1, 2)).toDF("i", "j").write.saveAsTable("saveAsTable_column_order")
Seq((3, 4)).toDF("j", "i").write.mode("append").saveAsTable("saveAsTable_column_order")
checkAnswer(
table("saveAsTable_column_order"),
Seq((1, 2), (4, 3)).toDF("i", "j"))
}
}
test("saveAsTable[append]: mismatch column names") {
withTable("saveAsTable_mismatch_column_names") {
Seq((1, 2)).toDF("i", "j").write.saveAsTable("saveAsTable_mismatch_column_names")
val e = intercept[AnalysisException] {
Seq((3, 4)).toDF("i", "k")
.write.mode("append").saveAsTable("saveAsTable_mismatch_column_names")
}
assert(e.getMessage.contains("cannot resolve"))
}
}
test("saveAsTable[append]: too many columns") {
withTable("saveAsTable_too_many_columns") {
Seq((1, 2)).toDF("i", "j").write.saveAsTable("saveAsTable_too_many_columns")
val e = intercept[AnalysisException] {
Seq((3, 4, 5)).toDF("i", "j", "k")
.write.mode("append").saveAsTable("saveAsTable_too_many_columns")
}
assert(e.getMessage.contains("doesn't match"))
}
}
test("create a temp view using hive") {
val tableName = "tab1"
withTable(tableName) {
val e = intercept[AnalysisException] {
sql(
s"""
|CREATE TEMPORARY VIEW $tableName
|(col1 int)
|USING hive
""".stripMargin)
}.getMessage
assert(e.contains("Hive data source can only be used with tables, you can't use it with " +
"CREATE TEMP VIEW USING"))
}
}
test("saveAsTable - source and target are the same table") {
val tableName = "tab1"
withTable(tableName) {
Seq((1, 2)).toDF("i", "j").write.saveAsTable(tableName)
table(tableName).write.mode(SaveMode.Append).saveAsTable(tableName)
checkAnswer(table(tableName),
Seq(Row(1, 2), Row(1, 2)))
table(tableName).write.mode(SaveMode.Ignore).saveAsTable(tableName)
checkAnswer(table(tableName),
Seq(Row(1, 2), Row(1, 2)))
var e = intercept[AnalysisException] {
table(tableName).write.mode(SaveMode.Overwrite).saveAsTable(tableName)
}.getMessage
assert(e.contains(s"Cannot overwrite table default.$tableName that is also being read from"))
e = intercept[AnalysisException] {
table(tableName).write.mode(SaveMode.ErrorIfExists).saveAsTable(tableName)
}.getMessage
assert(e.contains(s"Table `$tableName` already exists"))
}
}
test("insertInto - source and target are the same table") {
val tableName = "tab1"
withTable(tableName) {
Seq((1, 2)).toDF("i", "j").write.saveAsTable(tableName)
table(tableName).write.mode(SaveMode.Append).insertInto(tableName)
checkAnswer(
table(tableName),
Seq(Row(1, 2), Row(1, 2)))
table(tableName).write.mode(SaveMode.Ignore).insertInto(tableName)
checkAnswer(
table(tableName),
Seq(Row(1, 2), Row(1, 2), Row(1, 2), Row(1, 2)))
table(tableName).write.mode(SaveMode.ErrorIfExists).insertInto(tableName)
checkAnswer(
table(tableName),
Seq(Row(1, 2), Row(1, 2), Row(1, 2), Row(1, 2), Row(1, 2), Row(1, 2), Row(1, 2), Row(1, 2)))
val e = intercept[AnalysisException] {
table(tableName).write.mode(SaveMode.Overwrite).insertInto(tableName)
}.getMessage
assert(e.contains(s"Cannot overwrite a path that is also being read from"))
}
}
test("saveAsTable[append]: less columns") {
withTable("saveAsTable_less_columns") {
Seq((1, 2)).toDF("i", "j").write.saveAsTable("saveAsTable_less_columns")
val e = intercept[AnalysisException] {
Seq((4)).toDF("j")
.write.mode("append").saveAsTable("saveAsTable_less_columns")
}
assert(e.getMessage.contains("doesn't match"))
}
}
test("SPARK-15025: create datasource table with path with select") {
withTempPath { dir =>
withTable("t") {
sql(
s"""CREATE TABLE t USING PARQUET
|OPTIONS (PATH '${dir.toURI}')
|AS SELECT 1 AS a, 2 AS b, 3 AS c
""".stripMargin
)
sql("insert into t values (2, 3, 4)")
checkAnswer(table("t"), Seq(Row(1, 2, 3), Row(2, 3, 4)))
val catalogTable = hiveClient.getTable("default", "t")
assert(catalogTable.storage.locationUri.isDefined)
}
}
}
test("SPARK-15269 external data source table creation") {
withTempPath { dir =>
val path = dir.toURI.toString
spark.range(1).write.json(path)
withTable("t") {
sql(s"CREATE TABLE t USING json OPTIONS (PATH '$path')")
sql("DROP TABLE t")
sql(s"CREATE TABLE t USING json AS SELECT 1 AS c")
}
}
}
test("read table with corrupted schema") {
try {
val schema = StructType(StructField("int", IntegerType, true) :: Nil)
val hiveTable = CatalogTable(
identifier = TableIdentifier("t", Some("default")),
tableType = CatalogTableType.MANAGED,
schema = HiveExternalCatalog.EMPTY_DATA_SCHEMA,
provider = Some("json"),
storage = CatalogStorageFormat.empty,
properties = Map(
DATASOURCE_PROVIDER -> "json",
// no DATASOURCE_SCHEMA_NUMPARTS
DATASOURCE_SCHEMA_PART_PREFIX + 0 -> schema.json))
hiveClient.createTable(hiveTable, ignoreIfExists = false)
val e = intercept[AnalysisException] {
sharedState.externalCatalog.getTable("default", "t")
}.getMessage
assert(e.contains(s"Could not read schema from the hive metastore because it is corrupted"))
withDebugMode {
val tableMeta = sharedState.externalCatalog.getTable("default", "t")
assert(tableMeta.identifier == TableIdentifier("t", Some("default")))
assert(tableMeta.properties(DATASOURCE_PROVIDER) == "json")
}
} finally {
hiveClient.dropTable("default", "t", ignoreIfNotExists = true, purge = true)
}
}
test("should keep data source entries in table properties when debug mode is on") {
withDebugMode {
val newSession = sparkSession.newSession()
newSession.sql("CREATE TABLE abc(i int) USING json")
val tableMeta = newSession.sessionState.catalog.getTableMetadata(TableIdentifier("abc"))
assert(tableMeta.properties(DATASOURCE_SCHEMA_NUMPARTS).toInt == 1)
assert(tableMeta.properties(DATASOURCE_PROVIDER) == "json")
}
}
test("Infer schema for Hive serde tables") {
val tableName = "tab1"
val avroSchema =
"""{
| "name": "test_record",
| "type": "record",
| "fields": [ {
| "name": "f0",
| "type": "int"
| }]
|}
""".stripMargin
Seq(true, false).foreach { isPartitioned =>
withTable(tableName) {
val partitionClause = if (isPartitioned) "PARTITIONED BY (ds STRING)" else ""
// Creates the (non-)partitioned Avro table
val plan = sql(
s"""
|CREATE TABLE $tableName
|$partitionClause
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
|STORED AS
| INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
| OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
|TBLPROPERTIES ('avro.schema.literal' = '$avroSchema')
""".stripMargin
).queryExecution.analyzed
assert(plan.isInstanceOf[CreateTableCommand] &&
plan.asInstanceOf[CreateTableCommand].table.dataSchema.nonEmpty)
if (isPartitioned) {
sql(s"INSERT OVERWRITE TABLE $tableName partition (ds='a') SELECT 1")
checkAnswer(spark.table(tableName), Row(1, "a"))
} else {
sql(s"INSERT OVERWRITE TABLE $tableName SELECT 1")
checkAnswer(spark.table(tableName), Row(1))
}
}
}
}
Seq("orc", "parquet", "csv", "json", "text").foreach { format =>
test(s"SPARK-22146: read files containing special characters using $format") {
val nameWithSpecialChars = s"sp&cial%chars"
withTempDir { dir =>
val tmpFile = s"$dir/$nameWithSpecialChars"
spark.createDataset(Seq("a", "b")).write.format(format).save(tmpFile)
val fileContent = spark.read.format(format).load(tmpFile)
checkAnswer(fileContent, Seq(Row("a"), Row("b")))
}
}
}
private def withDebugMode(f: => Unit): Unit = {
val previousValue = sparkSession.sparkContext.conf.get(DEBUG_MODE)
try {
sparkSession.sparkContext.conf.set(DEBUG_MODE, true)
f
} finally {
sparkSession.sparkContext.conf.set(DEBUG_MODE, previousValue)
}
}
}
|
ron8hu/spark
|
sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
|
Scala
|
apache-2.0
| 47,782
|
/**
* Original work: Swagger Codegen (https://github.com/swagger-api/swagger-codegen)
* Copyright 2016 Swagger (http://swagger.io)
*
* Derivative work: Swagger Codegen - Play Scala (https://github.com/mohiva/swagger-codegen-play-scala)
* Modifications Copyright 2016 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.swagger.codegen.core
import scala.util.Try
/**
* A successful API response.
*
* @param code The HTTP status code.
* @param content The response content.
* @param headers The response headers.
* @tparam T The type of the response content.
*/
case class ApiResponse[T](code: Int, content: T, headers: Map[String, Seq[String]] = Map.empty)
/**
* A API error.
*
* @param code The HTTP status code.
* @param message The error message.
* @param content The response content.
* @param cause The cause.
* @param headers The response headers.
* @tparam T The type of the response content.
*/
case class ApiError[T](
code: Int,
message: String,
content: Option[T],
cause: Throwable = null,
headers: Map[String, Seq[String]] = Map.empty)
extends RuntimeException(s"($code) $message.${content.map(s => s" Content : $s").getOrElse("")}", cause)
/**
* An helper that can extract headers from a response.
*
* @param headers A list of headers.
*/
case class ApiHeaderExtractor(headers: Map[String, Seq[String]]) {
/**
* Gets a header as string.
*
* @param name The name of the header to return.
* @param index The index of the value to select in the header sequence.
* @return The header value as string.
*/
def asString(name: String, index: Int = 0): Option[String] = headers.get(name).flatMap { seq =>
Try(seq(index)).toOption
}
/**
* Gets a header as int.
*
* @param name The name of the header to return.
* @param index The index of the value to select in the header sequence.
* @return The header value as int.
*/
def asInt(name: String, index: Int = 0): Option[Int] = castedHeader(name, index, java.lang.Integer.parseInt)
/**
* Gets a header as long.
*
* @param name The name of the header to return.
* @param index The index of the value to select in the header sequence.
* @return The header value as long.
*/
def asLong(name: String, index: Int = 0): Option[Long] = castedHeader(name, index, java.lang.Long.parseLong)
/**
* Gets a header as float.
*
* @param name The name of the header to return.
* @param index The index of the value to select in the header sequence.
* @return The header value as float.
*/
def asFloat(name: String, index: Int = 0): Option[Float] = castedHeader(name, index, java.lang.Float.parseFloat)
/**
* Gets a header as double.
*
* @param name The name of the header to return.
* @param index The index of the value to select in the header sequence.
* @return The header value as double.
*/
def asDouble(name: String, index: Int = 0): Option[Double] = castedHeader(name, index, java.lang.Double.parseDouble)
/**
* Gets a header as boolean.
*
* @param name The name of the header to return.
* @param index The index of the value to select in the header sequence.
* @return The header value as boolean.
*/
def asBoolean(name: String, index: Int = 0): Option[Boolean] = castedHeader(name, index, java.lang.Boolean.parseBoolean)
/**
* Tries to cast the header value to the appropriated type.
*
* @param name The name of the header to cast.
* @param index The index of the value to select in the header sequence.
* @param conversion The cast function.
* @return The header value casted by the given conversion.
*/
private def castedHeader[U](name: String, index: Int, conversion: String => U): Option[U] = {
Try {
asString(name, index).map(conversion)
}.get
}
}
|
akkie/swagger-codegen-play-scala
|
clientstub/src/main/scala/com/mohiva/swagger/codegen/core/ApiResponse.scala
|
Scala
|
apache-2.0
| 4,420
|
// scalastyle:off line.size.limit
/*
* Ported by Alistair Johnson from
* https://github.com/gwtproject/gwt/blob/master/user/test/com/google/gwt/emultest/java/math/MathContextTest.java
*/
// scalastyle:on line.size.limit
package org.scalajs.testsuite.javalib.math
import java.math.MathContext
import java.math.RoundingMode
import org.scalajs.jasminetest.JasmineTest
object MathContextTest extends JasmineTest {
describe("MathContextTest") {
it("testMathContextSingleArgConstructor") {
val mc1 = new MathContext("precision=16 roundingMode=CEILING")
expect(mc1.getPrecision == 16).toBeTruthy
expect(mc1.getRoundingMode == RoundingMode.CEILING).toBeTruthy
val mc2 = new MathContext("precision=17 roundingMode=DOWN")
expect(mc2.getPrecision == 17).toBeTruthy
expect(mc2.getRoundingMode == RoundingMode.DOWN).toBeTruthy
val mc3 = new MathContext("precision=18 roundingMode=FLOOR")
expect(mc3.getPrecision == 18).toBeTruthy
expect(mc3.getRoundingMode == RoundingMode.FLOOR).toBeTruthy
val mc4 = new MathContext("precision=19 roundingMode=HALF_DOWN")
expect(mc4.getPrecision == 19).toBeTruthy
expect(mc4.getRoundingMode == RoundingMode.HALF_DOWN).toBeTruthy
val mc5 = new MathContext("precision=20 roundingMode=HALF_EVEN")
expect(mc5.getPrecision == 20).toBeTruthy
expect(mc5.getRoundingMode == RoundingMode.HALF_EVEN).toBeTruthy
val mc6 = new MathContext("precision=21 roundingMode=HALF_UP")
expect(mc6.getPrecision == 21).toBeTruthy
expect(mc6.getRoundingMode == RoundingMode.HALF_UP).toBeTruthy
val mc7 = new MathContext("precision=22 roundingMode=UNNECESSARY")
expect(mc7.getPrecision == 22).toBeTruthy
expect(mc7.getRoundingMode == RoundingMode.UNNECESSARY).toBeTruthy
val mc8 = new MathContext("precision=23 roundingMode=UP")
expect(mc8.getPrecision == 23).toBeTruthy
expect(mc8.getRoundingMode == RoundingMode.UP).toBeTruthy
expect(() => new MathContext("prcision=27 roundingMode=CEILING")).toThrow
expect(() => new MathContext("precision=26 roundingMoe=CEILING")).toThrow
expect(() => new MathContext("precision=25 roundingMode=CEILINGFAN")).toThrow
expect(() => new MathContext("precision=24 roundingMode=HALF")).toThrow
expect(() => new MathContext("precision=23 roundingMode=UPSIDEDOWN")).toThrow
expect(() => new MathContext("precision=22roundingMode=UP")).toThrow
expect(() => new MathContext("")).toThrow
expect(() => new MathContext(null)).toThrow
}
it("testMathContextConstructorEquality") {
val mc1 = new MathContext(16, RoundingMode.CEILING)
val mc1a = new MathContext("precision=16 roundingMode=CEILING")
expect(mc1 == mc1a).toBeTruthy
val mc2 = new MathContext(17, RoundingMode.DOWN)
val mc2a = new MathContext("precision=17 roundingMode=DOWN")
expect(mc2 == mc2a).toBeTruthy
val mc3 = new MathContext(18, RoundingMode.FLOOR)
val mc3a = new MathContext("precision=18 roundingMode=FLOOR")
expect(mc3 == mc3a).toBeTruthy
val mc4 = new MathContext(19, RoundingMode.HALF_DOWN)
val mc4a = new MathContext("precision=19 roundingMode=HALF_DOWN")
expect(mc4 == mc4a).toBeTruthy
val mc5 = new MathContext(20, RoundingMode.HALF_EVEN)
val mc5a = new MathContext("precision=20 roundingMode=HALF_EVEN")
expect(mc5 == mc5a).toBeTruthy
val mc6 = new MathContext(21, RoundingMode.HALF_UP)
val mc6a = new MathContext("precision=21 roundingMode=HALF_UP")
expect(mc6 == mc6a).toBeTruthy
val mc7 = new MathContext(22, RoundingMode.UNNECESSARY)
val mc7a = new MathContext("precision=22 roundingMode=UNNECESSARY")
expect(mc7 == mc7a).toBeTruthy
val mc8 = new MathContext(23, RoundingMode.UP)
val mc8a = new MathContext("precision=23 roundingMode=UP")
expect(mc8 == mc8a).toBeTruthy
}
}
}
|
renyaoxiang/scala-js
|
test-suite/src/test/scala/org/scalajs/testsuite/javalib/math/MathContextTest.scala
|
Scala
|
bsd-3-clause
| 3,945
|
package org.influxdb.scala
/**
* Combines the AsyncHttpClientComponent and Json4sJsonConverterComponent traits
* for a complete configuration of a standalone influxdb client using the asyncHttp client
* and json4s converters.
* See http://jonasboner.com/2008/10/06/real-world-scala-dependency-injection-di/
*/
trait StandaloneConfig extends AsyncHttpClientComponent with Json4sJsonConverterComponent
|
fsauer65/influxdb-scala
|
standalone/src/main/scala/org/influxdb/scala/StandaloneConfig.scala
|
Scala
|
mit
| 405
|
package simx.components.ai.atn.core
import simx.components.ai.atn.elements.{Atn, Arc, State}
import simx.core.entity.description.SValSet
import simx.core.ontology.Symbols.debug
import simx.core.ontology.{Symbols, types}
import simx.core.worldinterface.eventhandling.Event
import scala.collection.mutable
import scala.util.control.Breaks._
trait Functions {
import simx.components.ai.atn.elements.Atn._
protected def registerState(stateID: Symbol, atnNetwork: mutable.HashMap[State, List[(Arc, State)]]): State = {
atnNetwork.keySet.find(state => state.id == stateID) match {
case None => {
val newState = new State(stateID)
val newList: List[(Arc, State)] = Nil
atnNetwork += (newState -> newList)
newState
}
case Some(state) => {
state
}
}
}
protected def registerArc(arcID: Symbol, arcIndex: mutable.HashMap[Symbol, Arc]): Arc = {
arcIndex.get(arcID) match {
case None => {
val newArc = new Arc(arcID)
arcIndex += (arcID -> newArc)
newArc
}
case Some(arc) => {
arc
}
}
}
// protected def postAtnCreation(network: mutable.HashMap[State, List[(Arc, State)]]) {
// //registerForAllArcs(registerTime _ :: Nil, network)
// prepareMergeSplitConditions(network)
// }
// private def registerForAllArcs(funcs: List[((Event, ArcRep, StateRep, StateRep, ATNMachine) => List[Event])], network: mutable.HashMap[State, List[(Arc, State)]]){
// network.foreach(entry => entry._2.foreach(trans => funcs.foreach(func => trans._1.functions ::= func)))
// }
// private def prepareMergeSplitConditions(network: mutable.HashMap[State, List[(Arc, State)]]) {
// network.keySet.foreach(state => {
// if(state.isSplit){
// var splitArc: Option[Arc] = None
// var outgoingArcs: List[Arc] = Nil
// //var conditions: List[(Event, StateRep, StateRep, ATNMachine) => Condition.Result] = Nil
// // adding the split condition from the split state to every arc
// val(foundMerge, foundStates, foundArcs, recentArcs) = findMergeTo(state, network)
// foundMerge.collect{case merge => {
// foundArcs.foreach(arc => {
// arc.conditions = state.conditions ::: arc.conditions
// arc.functions ::= registerTime
// })
// val mergeArc = network(merge).head._1
// mergeArc.conditions = isSuccessfullMerge(recentArcs)_ :: mergeArc.conditions
// mergeArc.conditions = state.conditions ::: mergeArc.conditions
// mergeArc.functions ::= doSuccessfullMerge(network.map(e=> e._1).toList)
// recentArcs.foreach(arc => arc.functions ::= arrivedAtMerge(arc.id))
// }}
//
// // getting conditions from all outgoing arcs and adding them to splitTOArc
// network.get(state).collect{case list =>
// list.foreach(trans => {
// // conditions = trans._1.conditions ::: conditions
// if(trans._1.isHiddenSplit) splitArc = Some(trans._1)
// else {
// outgoingArcs = trans._1 :: outgoingArcs
// trans._1.functions ::= wentFromSplit
// }
// })
// if(debug){
// println("Hidden Split Arc: " + splitArc.map(_.id).getOrElse("not found"))
// println("Outgoing Arcs from SplitState: " + outgoingArcs.map(_.id))
// println("-----------------------------------")
// }
// }
// splitArc.foreach(arc => {
// arc.functions ::= registerTime
// arc.conditions = checkForHiddenSplit(outgoingArcs)_ :: arc.conditions
// })
// //outgoingArcs.foreach(arc => arc.conditions ::= alreadyTriggerdInMerge)
// //conditions = state.conditions ::: conditions
// //splitArc.foreach(arc => arc.conditions = conditions)
// }
// })
// }
protected def frontEndRep(network: mutable.HashMap[State, List[(Arc, State)]]) = {
def createFrontEndArcRep(arc: Arc) = {
new ArcRep(arc.id)
}
def createFrontEndStateRep(state: State, arcReps: List[ArcRep]) = {
new GeneralStateRep(
id = state.id,
register = state.register,
arcs = arcReps
)
}
var frontEndRepresentation: Map[Symbol, GeneralStateRep] = Map()
network.foreach(entry => {
var arcReps: List[ArcRep] = Nil
entry._2.foreach(trans => arcReps = createFrontEndArcRep(trans._1) :: arcReps)
frontEndRepresentation += (entry._1.id -> createFrontEndStateRep(entry._1, arcReps))
})
frontEndRepresentation
}
protected def extractInComingArcs(network: mutable.HashMap[State, List[(Arc, State)]]) = {
val result: mutable.HashMap[State, List[Arc]] = mutable.HashMap()
network.keySet.foreach(state => {
var newList: List[Arc] = Nil
network.values.foreach(list => list.foreach(trans => {
if (trans._2 == state) newList = trans._1 :: newList
}))
result += (state -> newList)
})
result
}
protected def clearRegister(reg: SValSet) {
reg - mergeIdentifier
reg - mergeTimestamp
}
}
|
simulator-x/atn
|
src/simx/components/ai/atn/core/Functions.scala
|
Scala
|
apache-2.0
| 5,249
|
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*************************************************************************************
*/
package com.normation.eventlog
import org.joda.time.DateTime
import org.joda.time.format._
import scala.collection._
import scala.xml._
import java.security.Principal
import com.normation.utils.HashcodeCaching
final case class EventActor(name:String) extends HashcodeCaching
/**
* A type that describe on what category an event belongs to.
*/
trait EventLogCategory
private[eventlog] final case object UnknownLogCategory extends EventLogCategory
/**
* Define the event log type, that will be serialized
* the event class name minus "EventLog" is OK
* It is a PartialFunction so the pattern matching are not a bottleneck anymore
* (too much match ina pattern matching usually fail)
*/
trait EventLogType extends PartialFunction[String, EventLogType] {
def serialize : String
override def isDefinedAt(x : String) : Boolean = {
serialize == x
}
def apply(x : String) = this
}
trait EventLogFilter extends PartialFunction[(EventLogType, EventLogDetails) , EventLog] {
/**
* An EventLogType used as identifier for that type of event.
* Must be unique among all events.
* Most of the time, the event class name plus Type is OK.
*/
val eventType : EventLogType
override def isDefinedAt(x : (EventLogType, EventLogDetails)) : Boolean = {
eventType == x._1
}
/**
* This is used to simply build object from
*/
def apply(x : (EventLogType, EventLogDetails)) : EventLog
}
/**
* An EventLog is an object tracing activities on an entity.
* It has an id (generated by the serialisation method), a type, a creation date,
* a principal (the actor doing the action), a cause, a severity (like in syslog) and some raw data
*/
trait EventLog {
def eventDetails : EventLogDetails
def id : Option[Int] = eventDetails.id // autogenerated id, by the serialization system
//event log type is given by the implementation class.
//we only precise the category.
/**
* Big category of the event
*/
def eventLogCategory : EventLogCategory
/**
* An EventLogType used as identifier for that type of event.
* Must be unique among all events.
* Most of the time, the event class name plus Type is OK.
*/
def eventType : EventLogType
def principal : EventActor = eventDetails.principal
def creationDate : DateTime = eventDetails.creationDate
/**
* When we create the EventLog, it usually shouldn't have an id, so the cause cannot be set
* That why we have the EventLogTree that holds the hierarchy of EventLogs, the cause being used only when deserializing the object
*/
def cause : Option[Int] = eventDetails.cause
def severity : Int = eventDetails.severity
/**
* Some more (technical) details about the event, in a semi-structured
* format (XML).
*
* Usually, the rawData will be computed from the fields when serializing,
* and be used to fill the fields when deserializing
*/
def details : NodeSeq = eventDetails.details
/**
* Return a copy of the object with the cause set to given Id
*/
def copySetCause(causeId:Int) : EventLog
}
/**
* The unspecialized Event Log. Used as a container when unserializing data, to be specialized later by the EventLogSpecializers
*/
case class UnspecializedEventLog(
override val eventDetails : EventLogDetails
) extends EventLog with HashcodeCaching {
override val eventType = UnspecializedEventLog.eventType
override val eventLogCategory = UnknownLogCategory
override def copySetCause(causeId:Int) = this.copy(eventDetails.copy(cause = Some(causeId)))
}
object UnspecializedEventLog extends EventLogFilter {
override val eventType = UnknownEventLogType
override def apply(x : (EventLogType, EventLogDetails)) : UnspecializedEventLog = UnspecializedEventLog(x._2)
}
object EventLog {
def withContent(nodes:NodeSeq) = <entry>{nodes}</entry>
val emptyDetails = withContent(NodeSeq.Empty)
}
case object UnknownEventLogType extends EventLogType {
def serialize = "UnknownType"
}
/**
* This case class holds all the important information
* about the EventLog
*/
final case class EventLogDetails(
val id : Option[Int] = None
, val principal : EventActor
, val creationDate : DateTime = DateTime.now()
, val cause : Option[Int] = None
, val severity : Int = 100
, val reason : Option[String]
, val details : NodeSeq
) extends HashcodeCaching
|
fanf/rudder-commons
|
eventlog-api/src/main/scala/com/normation/eventlog/EventLog.scala
|
Scala
|
apache-2.0
| 5,243
|
package chapter.eighteen
object ExerciseFive extends App {
}
|
deekim/impatient-scala
|
src/main/scala/chapter/eighteen/ExerciseFive.scala
|
Scala
|
apache-2.0
| 64
|
package saveyourstack
import scalaz._
import Scalaz._
import org.joda.time._
import scala.annotation.tailrec
object Dates {
def generateDates(sd: DateTime, ed: DateTime): List[DateTime] = {
if (sd isBefore ed)
sd :: generateDates(sd.plusDays(1), ed)
else
List(sd)
}
def generateDatesSafe(sd: DateTime, ed: DateTime): List[DateTime] = {
def rec(s: DateTime, l: List[DateTime]): List[DateTime] =
if (s isBefore ed)
rec(s.plusDays(1), s :: l)
else
l
rec(sd, List())
}
}
object MyTrampoline {
implicit val m: Monad[VTrampoline] = new Monad[VTrampoline] {
def point[A](a: => A): VTrampoline[A] = NoMore(a)
def bind[A,B](vt: VTrampoline[A])(f: A => VTrampoline[B]): VTrampoline[B] = vt.flatMap(f)
}
case class More[A,B](a: () => VTrampoline[A], f: A => VTrampoline[B]) extends VTrampoline[B]
case class NoMore[A](a: A) extends VTrampoline[A]
trait VTrampoline[A] {
def flatMap[B](f: A => VTrampoline[B]): VTrampoline[B] =
More(() => this, f)
@tailrec
final def gogo(): A = this match {
case NoMore(a) => a
case More(ta, f) => ta() match {
case NoMore(a) => f(a).gogo
case More(tb, ff) =>
tb().flatMap(b => ff(b).flatMap(f)).gogo
}
}
}
}
object FileSystemStuff {
sealed trait FS
case class File(s: String) extends FS
case class Directory(s: String, l: List[FS]) extends FS {
override def toString(): String =
s + " children size = " + l.size
}
def generateFakeFiles(h: Int, w: Int): FS = {
def rec(h: Int): FS = h match {
case 0 => Directory(h.toString, (0 to w).map(i => File(i.toString)).toList) //we're done
case 1 => Directory(h.toString, (0 to w).map(_ => rec(h-1)).toList)
case _ => Directory(h.toString, List(rec(h-1)))
}
rec(h)
}
import scalaz.Free._
def generateDeepFakeFilesTrampolined(h: Int, w: Int): FS = {
def rec(h: Int): Trampoline[FS] = h match {
case 0 => Trampoline.done(Directory(h.toString, (0 to w).map(i => File("filefile")).toList))
case 1 => (0 to w)
.map(_ => rec(h -1))
.toList
.sequence //sequence goes from F[G[A]] to G[F[A]], so in this case a List[Trampoline[FS]] to Trampoline[List[FS]]
.map(l => Directory(h.toString, l)) //map on the Trampoline to get access to the thunked recursive call
case _ => rec(h-1).map(n => Directory(h.toString, List(n)))
}
rec(h).run
}
def findDepth(f: FS): Int = {
def rec(files: List[(Int,FS)], s: Set[Int]): Int = files match {
case (ctr, Directory(n, subfiles)) :: tail => rec(subfiles.map(f => (ctr+1,f)) ::: tail, s + ctr)
case (ctr, File(n)) :: tail => rec(tail, s + ctr)
case _ => s.max
}
rec(List((0,f)), Set())
}
def countEntries(f: FS): Int = {
def rec(ctr: Int, files: List[FS]): Int = files match {
case Directory(n, subfiles) :: tail => rec(ctr+1, subfiles ::: tail)
case File(n) :: tail => rec(ctr, tail)
case _ => ctr
}
rec(0,List(f))
}
}
object MonadTransformerProblems {
import MyTrampoline._
def handleFiles(): Unit = {
val a = (0 to 10000).map(ii => State[Int,Int](i => (i,ii)) ).foldLeft( State[Int,Int](i => (i,0)) )( (s,a) => s.flatMap(i => a.map(ii => (ii+i) )))
val b = (0 to 10000).map(ii => StateT[Free.Trampoline,Int,Int](i => Trampoline.done((i,ii))) ).foldLeft( StateT[Free.Trampoline, Int,Int](i => Trampoline.done((i,0))) )( (s,a) => s.flatMap(i => a.map(ii => (ii+i) )))
val c = (0 to 10000).map(ii => JDState[Free.Trampoline,Int,Int](Trampoline.done((i:Int) => Trampoline.done((i,ii)))) ).foldLeft( JDState[Free.Trampoline, Int,Int](Trampoline.done(i => Trampoline.done((i,0)))) )( (s,a) => s.flatMap(i => a))
val e = (0 to 10000).map(ii => liftToJDVT(ii) ).foldLeft( liftToJDVT(1) )( (s,a) => s.flatMap(i => a))
//a and b will fail. c won't
val res = c.sf.map(i => i(0)).join.run //(0).run //weird to pull out a bit
val d = (0 to 10000).map(ii => State[Int,Int](i => (i,ii)).liftF ).foldLeft( State[Int,Int](i => (i,0)).liftF )( (s,a) => s.flatMap(i => a.map(ii => (ii+i) )))
val otherResult = d.foldRun(0)( (a,b) => b(a))
(0 to 10000).map(ii => StateT[Free.Trampoline,Int,Int](i => Trampoline.done((i,ii))) ).foldLeft( StateT[Free.Trampoline, Int,Int](i => Trampoline.done((i,0))) )( (s,a) => s.flatMap(i => a.map(ii => (ii+i) )))
}
def liftToJDVT[A](a: A): JDState[VTrampoline, A, A] = JDState(NoMore(((aa:A) => NoMore((aa,a)))))
//Ok John Degoes had a great idea, if F[] is stack safe and we do all our binding *before* we pass a lambda to the state constructor, we're OK
case class JDState[F[_], S, A](sf: F[S => F[(S,A)]]) {
def flatMap[B](f: A => JDState[F, S, B])(implicit M: Monad[F]): JDState[F, S, B] =
JDState[F, S, B](((s1: S) => {
sf.flatMap(sfa => {
println("sfa = " + sfa)
sfa(s1).flatMap(t =>
f(t._2).sf.flatMap(z => z(s1))
)
})
}).point[F])
}
}
|
bwmcadams/lambdaconf-2015
|
speakers/vmarquez/src/main/scala/saveyourstack/Stack.scala
|
Scala
|
artistic-2.0
| 5,151
|
package com.td.dedupe
import java.io.File
import java.awt.image.BufferedImage
import java.awt.Color
import javax.imageio.ImageIO
object ImageDeDupe {
def main(args: Array[String]) {
println("Begin Scanning given directories")
val files = args flatMap {
dirName => new File(dirName) match {
case file: File => if (file.exists && file.isDirectory) file.listFiles() else List.empty
}
}
val imageMap = files.map {
file => println("Reading: " + file.getAbsolutePath);
(file.getAbsolutePath, getLuminance(resizeImage(file.getAbsolutePath, 32, 32)))
}
println("Comparing Images")
imageMap.foreach {
file1 =>
imageMap.splitAt(imageMap.indexOf(file1))._2.foreach {
file2 => if (file1._1 != file2._1 && compareImages(file1._2, file2._2))
println("Duplicate: " + file1._1 + " and " + file2._1)
}
}
println("Done")
}
def compareImages(file1: Seq[Double], file2: Seq[Double]): Boolean = {
val mean1 = file1.sum / file1.size
val mean2 = file2.sum / file2.size
val denom = Math.sqrt(file1.map(x => Math.pow((x - mean1), 2)).sum * file2.map(x => Math.pow((x - mean2), 2)).sum);
file1.zip(file2).map {
case (lum1: Double, lum2: Double) => (lum1 - mean1) * (lum2 - mean2)
}.sum / denom > 0.95
}
def getLuminance(image: BufferedImage): Seq[Double] = {
(for (i <- 0 to image.getHeight - 1; j <- 0 to (image.getWidth - 1)) yield {
val c = new Color(image.getRGB(i, j));
0.299 * c.getRed + 0.587 * c.getGreen + 0.114 * c.getBlue
}).toList
}
def resizeImage(path: String, width: Integer, height: Integer): BufferedImage = {
val originalImage = ImageIO.read(new File(path));
val resizedImage = new BufferedImage(width, height, BufferedImage.TYPE_INT_ARGB);
val g = resizedImage.createGraphics();
g.drawImage(originalImage, 0, 0, width, height, null);
g.dispose();
resizedImage;
}
}
|
tonyd3/image-de-dupe
|
src/main/scala/com/td/dedupe/ImageDeDupe.scala
|
Scala
|
mit
| 1,967
|
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Sun Dec 28 21:52:38 EST 2014
* @see LICENSE (MIT style license file).
*/
// FIX: needs improved optimization
package scalation.analytics.classifier
import scala.math.{exp, log}
import scalation.analytics.LogisticFunction
import scalation.linalgebra.{MatrixD, VectorD, VectoD, VectorI}
import scalation.minima.QuasiNewton
import scalation.plot.Plot
import LogisticFunction.sigmoid
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `LogisticRegression` class supports (binomial) logistic regression. In this
* case, 'x' may be multi-dimensional '[1, x_1, ... x_k]'. Fit the parameter
* vector 'b' in the logistic regression equation
* <p>
* y = b dot x + e = b_0 + b_1 * x_1 + ... b_k * x_k + e
* <p>
* where 'e' represents the residuals (the part not explained by the model)
* and 'y' is now binary.
* @see see.stanford.edu/materials/lsoeldsee263/05-ls.pdf
* @param x the input/design matrix augmented with a first column of ones
* @param y the binary response vector, y_i in {0, 1}
* @param fn the names for all factors
* @param cn the names for both classes
*/
class LogisticRegression (x: MatrixD, y: VectorI, fn: Array [String], cn: Array [String] = Array ("no", "yes"))
extends ClassifierReal (x, y, fn, 2, cn)
{
if (y != null && x.dim1 != y.dim) flaw ("constructor", "dimensions of x and y are incompatible")
private val DEBUG = false // debug flag
private val k = x.dim2 - 1 // number of variables
private val r_df = (n-1.0) / (n-k-1.0) // ratio of degrees of freedom
private var b: VectorD = null // parameter vector (b_0, b_1, ... b_k)
private var n_dev = -1.0 // null dev: -2LL, for null model (intercept only)
private var r_dev = -1.0 // residual dev: -2LL, for full model
private var aic = -1.0 // Akaike’s Information Criterion
private var pseudo_rSq = -1.0 // McFaffen's pseudo R-squared
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** For a given parameter vector 'b', compute '-2 * Log-Likelihood (-2LL)'.
* '-2LL' is the standard measure that follows a Chi-Square distribution.
* @see www.stat.cmu.edu/~cshalizi/350/lectures/26/lecture-26.pdf
* @see www.statisticalhorizons.com/wp-content/uploads/Allison.StatComp.pdf
* @param b the parameters to fit
*/
def ll (b: VectorD): Double =
{
var sum = 0.0
for (i <- 0 until x.dim1) {
val bx = b dot x(i)
// sum += y(i) * bx - log (1.0 + exp (bx))
sum += y(i) * bx - bx - log (exp (-bx) + 1.0) // less prone to overflow (infinity)
} // for
-2.0 * sum // set up for minimization
} // ll
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** For a given parameter vector 'b = [b(0)]', compute '-2 * Log-Likelihood (-2LL)'.
* '-2LL' is the standard measure that follows a Chi-Square distribution.
* @see www.stat.cmu.edu/~cshalizi/350/lectures/26/lecture-26.pdf
* @see www.statisticalhorizons.com/wp-content/uploads/Allison.StatComp.pdf
* @param b the parameters to fit
*/
def ll_null (b: VectorD): Double =
{
var sum = 0.0
val bx = b(0) // only use the intercept
for (i <- 0 until x.dim1) {
// sum += y(i) * bx - log (1.0 + exp (bx))
sum += y(i) * bx - bx - log (exp (-bx) + 1.0) // less prone to overflow (infinity)
} // for
-2.0 * sum // set up for minimization
} // ll_null
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** For the full model, train the classifier by fitting the parameter vector
* (b-vector) in the logistic regression equation using maximum likelihood.
* Do this by minimizing '-2LL'.
* FIX: Use improved BFGS implementation or IRWLS
* @see stats.stackexchange.com/questions/81000/calculate-coefficients-in-a-logistic-regression-with-r
* @see en.wikipedia.org/wiki/Iteratively_reweighted_least_squares
* @param testStart starting index of test region (inclusive) used in cross-validation.
* @param testEnd ending index of test region (exclusive) used in cross-validation.
*/
def train (testStart: Int, testEnd: Int) // FIX - use these parameters
{
val b0 = new VectorD (x.dim2) // use b_0 = 0 for starting guess for parameters
val bfgs = new QuasiNewton (ll) // minimizer for -2LL
b = bfgs.solve (b0) // find optimal solution for parameters
r_dev = ll (b) // measure of fitness for full model
aic = r_dev + 2.0 * x.dim2
} // train
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** For the null model, train the classifier by fitting the parameter vector
* (b-vector) in the logistic regression equation using maximum likelihood.
* Do this by minimizing -2LL.
*/
def train_null ()
{
val b0 = new VectorD (x.dim2) // use b0 = 0 for starting guess for parameters
val bfgs = new QuasiNewton (ll_null) // minimizer for -2LL
val b_n = bfgs.solve (b0) // find optimal solution for parameters
n_dev = ll_null (b_n) // measure of fitness for null model
} // train_null
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the fit (parameter vector b, quality of fit). Assumes both
* train_null and train have already been called.
*/
def fit: Tuple5 [VectorD, Double, Double, Double, Double] =
{
pseudo_rSq = 1.0 - r_dev / n_dev
(b, n_dev, r_dev, aic, pseudo_rSq)
} // fit
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Classify the value of y = f(z) by evaluating the formula y = sigmoid (b dot z).
* Return the best class, its name and FIX.
* @param z the new vector to classify
*/
def classify (z: VectoD): (Int, String, Double) =
{
val c = if (sigmoid (b dot z) > 0.5) 1 else 0
(c, cn(c), -1.0) // Fix - need metric
} // classify
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Classify the value of 'y = f(z)' by evaluating the formula 'y = sigmoid (b dot z)',
* for an integer vector.
* @param z the new integer vector to classify
*/
// def classify (z: VectorI): (Int, String, Double) = classify (z.toDouble)
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Reset or re-initialize the frequency tables and the probability tables.
*/
def reset ()
{
// FIX: to be implemented
} // reset
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Perform backward elimination to remove the least predictive variable
* from the model, returning the variable to eliminate, the new parameter
* vector, the new R-squared value and the new F statistic.
* FIX or remove
*/
// def backElim (): Tuple4 [Int, VectorD, Double, Double] =
// {
// var j_max = -1 // index of variable to eliminate
// var b_max: VectorD = null // parameter values for best solution
// var rSq_max = -1.0 // currently maximizing R squared
// var fS_max = -1.0 // could optimize on F statistic
//
// for (j <- 1 to k) {
// val keep = m // i-value large enough to not exclude any rows in slice
// val rg_j = new LogisticRegression (x.sliceExclude (keep, j), y) // regress with x_j removed
// rg_j.train ()
// val (b, rSq, fS, rBar) = rg_j.fit
// if (rSq > rSq_max) { j_max = j; b_max = b; rSq_max = rSq; fS_max = fS}
// } // for
// (j_max, b_max, rSq_max, fS_max)
// } // backElim
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the Variance Inflation Factor (VIF) for each variable to test
* for multi-collinearity by regressing 'xj' against the rest of the variables.
* A VIF over 10 indicates that over 90% of the variance of 'xj' can be predicted
* from the other variables, so 'xj' is a candidate for removal from the model.
* FIX or remove
*/
// def vif: VectorD =
// {
// val vifV = new VectorD (k) // VIF vector
// for (j <- 1 to k) {
// val keep = m // i-value large enough to not exclude any rows in slice
// val x_j = x.col(j) // x_j is jth column in x
// val rg_j = new LogisticRegression (x.sliceExclude (keep, j), x_j) // regress with x_j removed
// rg_j.train ()
// vifV(j-1) = 1.0 / (1.0 - rg_j.fit._2) // store vif for x_1 in vifV(0)
// } // for
// vifV
// } // vif
} // LogisticRegression class
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `LogisticRegressionTest` object tests the `LogisticRegression` class.
* @see www.cookbook-r.com/Statistical_analysis/Logistic_regression/
* Answer: b = (-8.8331, 0.4304),
* n_dev = 43.860, r_dev = 25.533, aci = 29.533, pseudo_rSq = 0.4178
* > run-main scalation.analytics.classifier.LogisticRegressionTest
*/
object LogisticRegressionTest extends App
{
// 32 data points: One Mpg
val x = new MatrixD ((32, 2), 1.0, 21.0, // 1 - Mazda RX4
1.0, 21.0, // 2 - Mazda RX4 Wa
1.0, 22.8, // 3 - Datsun 710
1.0, 21.4, // 4 - Hornet 4 Drive
1.0, 18.7, // 5 - Hornet Sportabout
1.0, 18.1, // 6 - Valiant
1.0, 14.3, // 7 - Duster 360
1.0, 24.4, // 8 - Merc 240D
1.0, 22.8, // 9 - Merc 230
1.0, 19.2, // 10 - Merc 280
1.0, 17.8, // 11 - Merc 280C
1.0, 16.4, // 12 - Merc 450S
1.0, 17.3, // 13 - Merc 450SL
1.0, 15.2, // 14 - Merc 450SLC
1.0, 10.4, // 15 - Cadillac Fleetwood
1.0, 10.4, // 16 - Lincoln Continental
1.0, 14.7, // 17 - Chrysler Imperial
1.0, 32.4, // 18 - Fiat 128
1.0, 30.4, // 19 - Honda Civic
1.0, 33.9, // 20 - Toyota Corolla
1.0, 21.5, // 21 - Toyota Corona
1.0, 15.5, // 22 - Dodge Challenger
1.0, 15.2, // 23 - AMC Javelin
1.0, 13.3, // 24 - Camaro Z28
1.0, 19.2, // 25 - Pontiac Firebird
1.0, 27.3, // 26 - Fiat X1-9
1.0, 26.0, // 27 - Porsche 914-2
1.0, 30.4, // 28 - Lotus Europa
1.0, 15.8, // 29 - Ford Pantera L
1.0, 19.7, // 30 - Ferrari Dino
1.0, 15.0, // 31 - Maserati Bora
1.0, 21.4) // 32 - Volvo 142E
val y = VectorI (0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1)
var z: VectorD = null
println ("x = " + x)
println ("y = " + y)
val fn = Array ("One", "Mpg")
val rg = new LogisticRegression (x, y, fn)
rg.train_null () // train based on null model
rg.train () // train based on full model
val res = rg.fit // obtain results
println ("---------------------------------------------------------------")
println ("Logistic Regression Results")
println ("b = " + res._1)
println ("n_dev = " + res._2)
println ("r_dev = " + res._3)
println ("aic = " + res._4)
println ("pseudo_rSq = " + res._5)
z = VectorD (1.0, 15.0) // classify point z
println ("classify (" + z + ") = " + rg.classify (z))
z = VectorD (1.0, 30.0) // classify point z
println ("classify (" + z + ") = " + rg.classify (z))
} // LogisticRegressionTest object
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `LogisticRegressionTest` object tests the `LogisticRegression` class.
* @see statmaster.sdu.dk/courses/st111/module03/index.html
* @see www.stat.wisc.edu/~mchung/teaching/.../GLM.logistic.Rpackage.pdf
* > run-main scalation.analytics.classifier.classifier.LogisticRegressionTest2
*/
object LogisticRegressionTest2 extends App
{
// 40 data points: One Low Medium High
val x = new MatrixD ((40, 4), 1.0, 102.0, 89.0, 0.0,
1.0, 7.0, 233.0, 1.0,
1.0, 0.0, 4.0, 41.0,
1.0, 8.0, 37.0, 13.0,
1.0, 40.0, 79.0, 26.0,
1.0, 0.0, 625.0, 156.0,
1.0, 0.0, 12.0, 79.0,
1.0, 0.0, 3.0, 119.0,
1.0, 115.0, 136.0, 65.0,
1.0, 428.0, 416.0, 435.0,
1.0, 34.0, 174.0, 56.0,
1.0, 0.0, 0.0, 37.0,
1.0, 97.0, 162.0, 89.0,
1.0, 56.0, 47.0, 132.0,
1.0, 1214.0, 1515.0, 324.0,
1.0, 30.0, 103.0, 161.0,
1.0, 8.0, 11.0, 158.0,
1.0, 52.0, 155.0, 144.0,
1.0, 142.0, 119.0, 24.0,
1.0, 1370.0, 2968.0, 1083.0,
1.0, 790.0, 161.0, 231.0,
1.0, 1142.0, 157.0, 131.0,
1.0, 0.0, 2.0, 49.0,
1.0, 0.0, 0.0, 50.0,
1.0, 5.0, 68.0, 49.0,
1.0, 0.0, 0.0, 48.0,
1.0, 0.0, 6.0, 40.0,
1.0, 1.0, 8.0, 64.0,
1.0, 0.0, 998.0, 551.0,
1.0, 253.0, 99.0, 60.0,
1.0, 1395.0, 799.0, 244.0,
1.0, 0.0, 0.0, 50.0,
1.0, 1.0, 68.0, 145.0,
1.0, 1318.0, 1724.0, 331.0,
1.0, 0.0, 0.0, 79.0,
1.0, 3.0, 31.0, 37.0,
1.0, 195.0, 108.0, 206.0,
1.0, 0.0, 15.0, 121.0,
1.0, 0.0, 278.0, 513.0,
1.0, 0.0, 0.0, 253.0)
val y = VectorI (0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1,
1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1)
val fn = Array ("One", "Low", "Medium", "High")
val cn = Array ("no", "yes")
println ("x = " + x)
println ("y = " + y)
// val rg = new LogisticRegression (x(0 until x.dim1, 0 until 2), y, cn)
val rg = new LogisticRegression (x, y, fn, cn)
rg.train_null () // train based on null model
rg.train () // train based on full model
val res = rg.fit // obtain results
println ("---------------------------------------------------------------")
println ("Logistic Regression Results")
println ("b = " + res._1)
println ("n_dev = " + res._2)
println ("r_dev = " + res._3)
println ("aic = " + res._4)
println ("pseudo_rSq = " + res._5)
val z = VectorD (1.0, 100.0, 100.0, 100.0) // classify point z
println ("classify (" + z + ") = " + rg.classify (z))
// new Plot (x.col(1), y, yyp)
// new Plot (x.col(2), y, yyp)
} // LogisticRegressionTest2 object
|
NBKlepp/fda
|
scalation_1.2/src/main/scala/scalation/analytics/classifier/LogisticRegression.scala
|
Scala
|
mit
| 17,881
|
package com.github.al.roulette.load.impl
import java.time.Duration
import java.util.UUID
import java.util.concurrent._
import java.util.concurrent.atomic.AtomicInteger
import akka.actor.Scheduler
import akka.http.javadsl.model.headers
import akka.stream.scaladsl.{Flow, Source}
import akka.{Done, NotUsed}
import com.github.al.roulette.bet.api.{Bet, BetService}
import com.github.al.roulette.game.api.{Game, GameEvent, GameId, GameService}
import com.github.al.roulette.load.api.{LoadTestParameters, LoadTestService}
import com.github.al.roulette.load.impl.FutureExtension._
import com.github.al.roulette.player.api._
import com.github.al.roulette.winnings.api.WinningsService
import com.github.al.roulette.{bet, game}
import com.lightbend.lagom.scaladsl.api.ServiceCall
import com.lightbend.lagom.scaladsl.pubsub.{PubSubRegistry, TopicId}
import com.typesafe.scalalogging.LazyLogging
import scala.concurrent.duration.DurationDouble
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.language.{implicitConversions, postfixOps}
import scala.util.{Random, Try}
class LoadTestServiceImpl(gameService: GameService, betService: BetService,
playerService: PlayerService, winningsService: WinningsService,
pubSubRegistry: PubSubRegistry, scheduler: Scheduler)
(implicit executionContext: ExecutionContext)
extends LoadTestService with LazyLogging {
private final val GameDefaultDuration = Duration.ofSeconds(10)
private lazy val loadTestEventsTopic = pubSubRegistry.refFor(TopicId[LoadTestEvent])
private lazy val throttlingAccumulator = ThrottlingAccumulator(scheduler, logMessage)
private def logMessage: String => Unit = msg => {
loadTestEventsTopic.publish(LoadTestEvent(msg))
logger.info(msg)
}
private final val PlayersCounter = new AtomicInteger(0)
private final val GamesCounter = new AtomicInteger(0)
private final val GamesToPlay = new ArrayBlockingQueue[UUID](50)
private final val FinishedGames = ConcurrentHashMap.newKeySet[UUID]()
gameService.gameEvents.subscribe.atLeastOnce(Flow[GameEvent].map {
case e: game.api.GameStarted => GamesToPlay.add(e.gameId); Done
case e: game.api.GameFinished => FinishedGames.add(e.gameId); Done
case _ => Done
})
override def startLoadTest: ServiceCall[LoadTestParameters, Source[String, NotUsed]] = {
GamesToPlay.clear()
FinishedGames.clear()
ServiceCall { parameters =>
scheduler.scheduleOnce(1 second)(startLoadTest(parameters))
Future.successful(loadTestEventsTopic.subscriber.map(_.msg))
}
}
private def startLoadTest(parameters: LoadTestParameters): Unit = {
loadTestEventsTopic.publish(LoadTestEvent(s"Load test started with next parameters: $parameters"))
val playerIds: IndexedSeq[(String, Future[PlayerId])] = createPlayers(parameters.numberOfPlayers)
val playerIdsWithAccessToken: Future[IndexedSeq[(PlayerId, PlayerAccessToken)]] = loginPlayers(playerIds)
val gameIds: Future[IndexedSeq[GameId]] = createGames(parameters.numberOfConcurrentGames)
startPlacingBets(playerIdsWithAccessToken, parameters.numberOfBetsToPlace)
}
private def loginPlayers(playerIdsFuturesSequence: IndexedSeq[(String, Future[PlayerId])]) = {
val playerIdToAccessTokenSequenceOfFutureTries = for {
(playerName, playerIdFuture) <- playerIdsFuturesSequence
accessTokenFuture = playerService.login.invoke(PlayerCredentials(playerName))
playerIdToAccessToken = for {playerId <- playerIdFuture; accessToken <- accessTokenFuture} yield playerId -> accessToken
} yield playerIdToAccessToken.toFutureTry
val playerIdToAccessTokenFutureTriesSequence = Future.sequence(playerIdToAccessTokenSequenceOfFutureTries)
val playerIdToAccessTokenFutureSequence = playerIdToAccessTokenFutureTriesSequence.getSuccessfulFutures(enqueueMsg("Successfully created and logged in a user"))
playerIdToAccessTokenFutureTriesSequence.forAllFailureFutures(msg => enqueueMsg(s"Failed to create and login a user:$msg"))
playerIdToAccessTokenFutureSequence
}
private def createPlayers(numberOfPlayers: Int): IndexedSeq[(String, Future[PlayerId])] = {
val playerIdsFuturesSequence = PlayersCounter.get() until numberOfPlayers map {
playerName => s"$playerName" -> playerService.registerPlayer.invoke(Player(s"$playerName"))
}
playerIdsFuturesSequence
}
private def createGames(numberOfConcurrentGames: Int) = {
val gameIdsSequenceOfFutureTries = for {
gameName <- GamesCounter.get() until GamesCounter.addAndGet(numberOfConcurrentGames)
gameIdFuture = gameService.createGame.invoke(Game(s"$gameName", GameDefaultDuration))
} yield gameIdFuture.toFutureTry
val gameIdsFutureTriesSequence = Future.sequence(gameIdsSequenceOfFutureTries)
val gameIdsFutureSequence = gameIdsFutureTriesSequence.getSuccessfulFutures(enqueueMsg("Successfully created a game"))
gameIdsFutureTriesSequence.forAllFailureFutures(msg => enqueueMsg(s"Failed to create a game:$msg"))
gameIdsFutureSequence
}
private def startPlacingBets(playerIdToAccessTokenFutureSequence: Future[IndexedSeq[(PlayerId, PlayerAccessToken)]],
numberOfBetsToPlace: Int) = {
val betsFuture = for {
playerIdsToAccessTokensSequence <- playerIdToAccessTokenFutureSequence
bets <- placeBets(playerIdsToAccessTokensSequence, numberOfBetsToPlace)
} yield bets
betsFuture.forAllFailureFutures(msg => enqueueMsg(s"Failed to put a bet:$msg"))
Await.ready(betsFuture.getSuccessfulFutures(enqueueMsg("Bet has been successfully put")), 30 seconds)
}
private def placeBets(playerIdsToAccessTokens: Seq[(PlayerId, PlayerAccessToken)], numberOfBets: Int): Future[IndexedSeq[Try[NotUsed]]] = {
def pollForNextNotPlayedGame: Option[UUID] =
Try(GamesToPlay.poll(15, TimeUnit.SECONDS)).toOption match {
case option@Some(gameId) => if (!FinishedGames.contains(gameId)) option else pollForNextNotPlayedGame
case none => none
}
val bets = for {
_ <- 0 to numberOfBets
gameIdOption = pollForNextNotPlayedGame
(playerId, accessToken) = playerIdsToAccessTokens(Random.nextInt(playerIdsToAccessTokens.length))
bet = gameIdOption match {
case Some(gameId) =>
val placeBetResult = placeBet(gameId, playerId.playerId, accessToken.token)
if (!FinishedGames.contains(gameId)) GamesToPlay.add(gameId)
placeBetResult
case None => Future.successful(NotUsed)
}
} yield bet.toFutureTry
Future.sequence(bets)
}
private def placeBet(gameId: String, playerId: String, playerAccessToken: String): Future[NotUsed] = {
def randomBet: Bet = {
Random.nextInt(3) match {
case 0 => Bet(Some(Random.nextInt(37)), bet.api.Number, Random.nextInt(100))
case 1 => Bet(None, bet.api.Odd, Random.nextInt(4000))
case 2 => Bet(None, bet.api.Even, Random.nextInt(2500))
}
}
val jwtAuthorizationHeader = headers.Authorization.oauth2(playerAccessToken)
betService
.placeBet(gameId)
.handleRequestHeader(header => header.addHeader(jwtAuthorizationHeader.name(), jwtAuthorizationHeader.value()))
.invoke(randomBet)
}
private def enqueueMsg(msg: String): Unit = throttlingAccumulator.enqueue(msg)
private implicit def stringToUUID(s: String): UUID = UUID.fromString(s)
private implicit def uuidToString(uuid: UUID): String = uuid.toString
}
|
andrei-l/reactive-roulette
|
load-test-impl/src/main/scala/com/github/al/roulette/load/impl/LoadTestServiceImpl.scala
|
Scala
|
mit
| 7,525
|
/*
* Shadowsocks - A shadowsocks client for Android
* Copyright (C) 2014 <max.c.lv@gmail.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* ___====-_ _-====___
* _--^^^#####// \\\\#####^^^--_
* _-^##########// ( ) \\\\##########^-_
* -############// |\\^^/| \\\\############-
* _/############// (@::@) \\\\############\\_
* /#############(( \\\\// ))#############\\
* -###############\\\\ (oo) //###############-
* -#################\\\\ / VV \\ //#################-
* -###################\\\\/ \\//###################-
* _#/|##########/\\######( /\\ )######/\\##########|\\#_
* |/ |#/\\#/\\#/\\/ \\#/\\##\\ | | /##/\\#/ \\/\\#/\\#/\\#| \\|
* ` |/ V V ` V \\#\\| | | |/#/ V ' V V \\| '
* ` ` ` ` / | | | | \\ ' ' ' '
* ( | | | | )
* __\\ | | | | /__
* (vvv(VVV)(VVV)vvv)
*
* HERE BE DRAGONS
*
*/
package com.github.shadowsocks.database
import com.j256.ormlite.field.{DataType, DatabaseField}
class Profile {
@DatabaseField(generatedId = true)
var id: Int = 0
@DatabaseField
var name: String = "Untitled"
@DatabaseField
var host: String = ""
@DatabaseField
var localPort: Int = 1080
@DatabaseField
var remotePort: Int = 8388
@DatabaseField
var password: String = ""
@DatabaseField
var method: String = "aes-256-cfb"
@DatabaseField
var route: String = "all"
@DatabaseField
var proxyApps: Boolean = false
@DatabaseField
var bypass: Boolean = false
@DatabaseField
var udpdns: Boolean = false
@DatabaseField
var auth: Boolean = false
@DatabaseField
var ipv6: Boolean = false
@DatabaseField(dataType = DataType.LONG_STRING)
var individual: String = ""
@DatabaseField
var tx: Long = 0
@DatabaseField
var rx: Long = 0
@DatabaseField
val date: java.util.Date = new java.util.Date()
@DatabaseField
var userOrder: Long = _
}
|
magic282/shadowsocks-android
|
src/main/scala/com/github/shadowsocks/database/Profile.scala
|
Scala
|
gpl-3.0
| 2,789
|
package cas.analysis.estimation
import cas.analysis.subject.Subject
import cas.analysis.subject.components.Description
import cas.math.Mathf.sigmoid
import cas.utils.StdImplicits.RightBiasedEither
import cas.utils.UtilAliases.ErrorMsg
class SubjectsClassificator(weights: Array[Double],
loyaltyEstim: StaticLoyaltyEstimator,
relevanceEstim: ContinuousInvRelEstimator,
correctnessEstim: CorrectnessEstimator,
estimWeight: Double = 0.5,
threshold: Double = 0.5) extends ActualityEstimator(new EstimatorConfigs(estimWeight)) {
require(weights.length > 3)
override def estimateActuality(subj: Subject): Either[String, Double] = for {
loyalty <- loyaltyEstim.estimateActuality(subj)
rel <- relevanceEstim.estimateActuality(subj)
corr <- correctnessEstim.estimateActuality(subj)
} yield {
1.0 - sigmoid(weights(0) + weights(1) * loyalty + weights(2) * rel + weights(3) * corr)
}
def predictClass(subj: Subject): Either[ErrorMsg, SubjectClass.Value] = for {
loyalty <- loyaltyEstim.estimateActuality(subj)
rel <- relevanceEstim.estimateActuality(subj)
corr <- correctnessEstim.estimateActuality(subj)
} yield {
SubjectClass.fromBoolean(sigmoid(weights(0) + weights(1) * loyalty + weights(2) * rel + weights(3) * corr) >= threshold)
}
}
object SubjectClass extends Enumeration {
type SubjectClass = Value
val delete = Value("delete")
val stay = Value("stay")
def fromBoolean(clazz: Boolean) = if (clazz) delete else stay
def fromInt(clazz: Int) = if (clazz > 0) delete else stay
def toInt(clazz: SubjectClass) = if (clazz == delete) 1 else 0
}
object ClassificationError extends Enumeration {
import cas.analysis.estimation.SubjectClass.SubjectClass
type ClassificationError = Value
val TP = Value("TP")
val TN = Value("TN")
val FP = Value("FP")
val FN = Value("FN")
val True = Value("True")
def fromSubjClass(predicted: SubjectClass, actual: SubjectClass) = {
if (predicted == SubjectClass.delete && actual == SubjectClass.stay) FP
else {
if (predicted != actual) FN
else if (actual == SubjectClass.stay) TN else TP
}
}
}
|
bk0606/CAS
|
src/main/scala/cas/analysis/estimation/SubjectsClassificator.scala
|
Scala
|
mit
| 2,263
|
package com.sksamuel.elastic4s.requests.cat
import com.sksamuel.elastic4s.Indexes
import com.sksamuel.elastic4s.requests.common.HealthStatus
import com.sksamuel.exts.OptionImplicits._
trait CatsApi {
def catAliases(): CatAliases = CatAliases()
def catAllocation(): CatAllocation = CatAllocation()
def catCount(): CatCount = CatCount()
def catCount(first: String, rest: String*): CatCount = CatCount(first +: rest)
def catHealth(): CatHealth = CatHealth()
def catIndices(): CatIndexes = CatIndexes(None, None)
def catIndices(health: HealthStatus): CatIndexes = CatIndexes(health.some, None)
def catIndices(indexPattern: String): CatIndexes = CatIndexes(None, indexPattern.some)
def catMaster(): CatMaster = CatMaster()
def catNodes(): CatNodes = CatNodes()
def catPlugins(): CatPlugins = CatPlugins()
def catSegments(indices: Indexes = Indexes.All): CatSegments = CatSegments(indices)
def catShards(): CatShards = CatShards()
def catThreadPool(): CatThreadPool = CatThreadPool()
}
case class CatSegments(indices: Indexes)
case class CatPlugins()
case class CatShards()
case class CatCount(indices: Seq[String] = Nil)
case class CatNodes()
case class CatHealth()
case class CatThreadPool()
case class CatAllocation()
case class CatAliases()
case class CatMaster()
case class CatIndexes(health: Option[HealthStatus], indexPattern: Option[String])
|
stringbean/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/cat/CatsApi.scala
|
Scala
|
apache-2.0
| 1,472
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.expressions.utils
import org.apache.flink.api.common.typeinfo.Types
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.table.dataformat.Decimal
import org.apache.flink.table.typeutils.DecimalTypeInfo
import org.apache.flink.table.util.DateTimeTestUtil._
import org.apache.flink.types.Row
abstract class ScalarOperatorsTestBase extends ExpressionTestBase {
override def testData: Row = {
val testData = new Row(18)
testData.setField(0, 1: Byte)
testData.setField(1, 1: Short)
testData.setField(2, 1)
testData.setField(3, 1L)
testData.setField(4, 1.0f)
testData.setField(5, 1.0d)
testData.setField(6, true)
testData.setField(7, 0.0d)
testData.setField(8, 5)
testData.setField(9, 10)
testData.setField(10, "String")
testData.setField(11, false)
testData.setField(12, null)
testData.setField(13, Row.of("foo", null))
testData.setField(14, null)
testData.setField(15, UTCDate("1996-11-10"))
testData.setField(16, Decimal.castFrom("0.00000000", 19, 8))
testData.setField(17, Decimal.castFrom("10.0", 19, 1))
testData
}
override def typeInfo: RowTypeInfo = {
new RowTypeInfo(
/* 0 */ Types.BYTE,
/* 1 */ Types.SHORT,
/* 2 */ Types.INT,
/* 3 */ Types.LONG,
/* 4 */ Types.FLOAT,
/* 5 */ Types.DOUBLE,
/* 6 */ Types.BOOLEAN,
/* 7 */ Types.DOUBLE,
/* 8 */ Types.INT,
/* 9 */ Types.INT,
/* 10 */ Types.STRING,
/* 11 */ Types.BOOLEAN,
/* 12 */ Types.BOOLEAN,
/* 13 */ Types.ROW(Types.STRING, Types.STRING),
/* 14 */ Types.STRING,
/* 15 */ Types.SQL_DATE,
/* 16 */ DecimalTypeInfo.of(19, 8),
/* 17 */ DecimalTypeInfo.of(19, 1)
)
}
}
|
shaoxuan-wang/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/expressions/utils/ScalarOperatorsTestBase.scala
|
Scala
|
apache-2.0
| 2,603
|
package pictureshow
object Files {
import java.io.{File => JFile}
/** build path from parts */
def path(parts: Seq[String]): String = parts map { _.trim } mkString(System.getProperty("file.separator"))
/** creator from path */
def apply(path: String): Option[JFile] = new JFile(path) match {
case f:JFile if(f.exists) => Some(f)
case _ => None
}
/** creator from parts */
def apply(parts: Seq[String]): Option[JFile] = apply(path(parts))
/** recursivly lists file paths */
def ls(path: String)(f: String => Boolean): List[String] = {
val root = new java.io.File(path)
(root.isDirectory match {
case true => (List[String]() /: (root.listFiles.toList map { _.getPath })) ((s, p) => ls(p)(f) ::: s)
case _ => root.getPath :: Nil
}).filter(f)
}
}
|
softprops/picture-show
|
core/src/main/scala/Files.scala
|
Scala
|
mit
| 799
|
package rww.ontology
import org.w3.banana.PointedGraph
import org.w3.banana.plantain.Plantain.ops._
import rww.Rdf._
import rww.ui.rdf.NPGPath
/**
* Created by hjs on 07/05/2015.
*/
case class ContactLocation(npg: NPGPath) {
val ct = ContactPrefix[Rdf]
def address = (npg /-> ct.address) map { Address(_) }
def phone = (npg /-> ct.phone)
// map (_.pointer) collect {
// case URI(u) => {
// val uu = URI(u)
// uu.getScheme.toLowerCase match {
// case "tel" => uu.getAuthority
// case _ => "(unknown)"
// }
// }
// case Literal(lexicalForm, xsd.string, _) => lexicalForm
// }
}
|
read-write-web/rww-scala-js
|
src/main/scala/rww/ontology/ContactLocation.scala
|
Scala
|
apache-2.0
| 631
|
/*
* Copyright 2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package bootstrap.liftweb {
import _root_.net.liftweb.util._
import _root_.net.liftweb.http._
import _root_.net.liftweb.http.provider.HTTPRequest
import _root_.net.liftweb.sitemap._
import _root_.net.liftweb.sitemap.Loc._
import Helpers._
import _root_.net.liftweb.mapper.{DB, ConnectionManager, Schemifier, DefaultConnectionIdentifier, ConnectionIdentifier,StandardDBVendor}
import _root_.java.sql.{Connection, DriverManager}
import _root_.fbc.example.model._
import _root_.javax.servlet.http.{HttpServletRequest}
import _root_.net.liftweb.common._
import net.liftweb.ext_api.facebook.FacebookConnect
/**
* A class that's instantiated early and run. It allows the application
* to modify lift's environment
*/
class Boot {
def boot {
if (!DB.jndiJdbcConnAvailable_?)
DB.defineConnectionManager(DefaultConnectionIdentifier, DBVendor)
// where to search snippet
LiftRules.addToPackages("fbc.example")
Schemifier.schemify(true, Schemifier.infoF _, User)
// Build SiteMap
val entries = Menu(Loc("Home", List("index"), "Home")) :: User.sitemap
LiftRules.setSiteMap(SiteMap(entries:_*))
/*
* Show the spinny image when an Ajax call starts
*/
LiftRules.ajaxStart =
Full(() => LiftRules.jsArtifacts.show("ajax-loader").cmd)
/*
* Make the spinny image go away when it ends
*/
LiftRules.ajaxEnd =
Full(() => LiftRules.jsArtifacts.hide("ajax-loader").cmd)
LiftRules.early.append(makeUtf8)
LiftRules.loggedInTest = Full(() => User.loggedIn_?)
S.addAround(DB.buildLoanWrapper)
//this is optional. Provides SSO for users already logged in to facebook.com
S.addAround(List(new LoanWrapper{
def apply[N](f: => N):N = {
if (!User.loggedIn_?){
for (c <- FacebookConnect.client; user <- User.findByFbId(c.session.uid)){
User.logUserIn(user)
}
}
f
}
}))
//this is really important for fb connect
LiftRules.useXhtmlMimeType = false
LiftRules.liftRequest.append {
case Req("xd_receiver" :: Nil, _, _) => false
}
}
/**
* Force the request to be UTF-8
*/
private def makeUtf8(req: HTTPRequest): Unit = {req.setCharacterEncoding("UTF-8")}
}
object DBVendor extends StandardDBVendor("org.h2.Driver",
"jdbc:h2:mem:lift;DB_CLOSE_DELAY=-1",
Empty,
Empty)
}
|
wsaccaco/lift
|
examples/hellofbc/src/main/scala/bootstrap/liftweb/Boot.scala
|
Scala
|
apache-2.0
| 3,114
|
import java.io.File
import java.io.FileInputStream
import java.io.PrintWriter
import org.apache.poi.xssf.usermodel.{XSSFCell, XSSFRow, XSSFSheet, XSSFWorkbook}
import org.apache.poi.ss.usermodel.{Sheet, Row, Cell}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.language.postfixOps
import scala.util.matching.Regex
import scala.xml
/**
* Transform xlsx file to xml.
* usage: execl2xml <src> <dest>
* src: a xlsx file or a directory contains xlsx file while in turn.
* dest: the destination directory to output files
* @author zzc
* @version 1.0.0
* @since 4/4/2016
*/
object Excel2Xml extends App {
if (args.length >= 2) {
var sheetName: String = null
if( args.length > 2)
sheetName = args(2)
excel2Xml(new File(args(0)), new File(args(1)), sheetName)
} else
println("usage: excel2xml <src> <dest> [sheet]")
def excel2Xml(src: File, dst: File, sheetName: String): Unit = {
if (!src.exists) {
println("source file or directory not found.")
return
}
if (!dst.exists) {
println("target directory not found.")
return
}
val r = """[^~\\.].*\\.xlsx$""".r
def h(f: File) = { transform(f, dst, sheetName) }
def recursiveHandleFiles(f: File): Unit = {
if (f.isDirectory)
f.listFiles.foreach(recursiveHandleFiles)
else if (r.findFirstIn(f.getName).isDefined)
h(f)
}
recursiveHandleFiles(src)
}
def transform(f: File, dstDir: File, sheetName: String): Unit = {
new XSSFWorkbook(new FileInputStream(f)).iterator.asScala.foreach(txSheet)
def txSheet(s: Sheet): Unit = {
if( sheetName != null && !sheetName.equals(s.getSheetName) )
return
println("transforming sheet " + s.getSheetName)
// transform the first row
def txFirstRow(r: Row) : Array[String] = {
val ths: Array[String] = new Array(r.getLastCellNum.toInt)
def txCell(c: Cell) = {
ths(c.getColumnIndex) = c.getStringCellValue
}
r.cellIterator().asScala.foreach(txCell)
ths
}
val ths = txFirstRow(s.getRow(0))
// transform the data rows
val sb: StringBuilder = StringBuilder.newBuilder
def txRow(r: Row) = {
def txCell(c: Cell) = {
def cell2string(): String = {
c.getCellType match {
case Cell.CELL_TYPE_BLANK =>
"<" + ths(c.getColumnIndex) + ">" + "</" + ths(c.getColumnIndex) + ">"
case Cell.CELL_TYPE_BOOLEAN =>
<e>{ c.getBooleanCellValue }</e>.
copy(label = ths(c.getColumnIndex)).toString
case Cell.CELL_TYPE_ERROR =>
<e>{ c.getErrorCellValue }</e>.
copy(label = ths(c.getColumnIndex)).toString
case Cell.CELL_TYPE_FORMULA =>
"<" + ths(c.getColumnIndex) + ">" + "</" + ths(c.getColumnIndex) + ">"
case Cell.CELL_TYPE_NUMERIC =>
if ((c.getNumericCellValue % 1) == 0)
<e>{ c.getNumericCellValue.toInt }</e>.
copy(label = ths(c.getColumnIndex)).toString
else
<e>{ c.getNumericCellValue }</e>.
copy(label = ths(c.getColumnIndex)).toString
case Cell.CELL_TYPE_STRING =>
<e>{ c.getStringCellValue }</e>.
copy(label = ths(c.getColumnIndex)).toString
}
} // toString
sb ++= cell2string()
} // txCell
sb ++= "<value>"
r.iterator.asScala.foreach(txCell)
sb ++= "</value>"
} // txRow
sb ++= """<?xml version="1.0" encoding="utf-8"?> <root>"""
s.iterator.asScala.filter(_.getRowNum>1).foreach(txRow)
sb ++= "</root>"
new PrintWriter(new File(dstDir, s.getSheetName + ".xml")) {
write(sb.toString); close
}
}
}
}
|
zhongzichang/excel2xml
|
src/main/scala/execl2xml.scala
|
Scala
|
gpl-3.0
| 3,929
|
package com.nulabinc.backlog.migration.common.codec
trait Decoder[A, B] {
def decode(a: A): B
}
|
nulab/backlog-migration-common
|
core/src/main/scala/com/nulabinc/backlog/migration/common/codec/Decoder.scala
|
Scala
|
mit
| 99
|
package com.github.fellowship_of_the_bus
package tdtd
package game
import scala.math._
import IDMap._
import GameMap._
object Projectile {
val width = 1.0f
val height = 1.0f
def apply(x: Float, y: Float, tar: Enemy, tower: Tower) = {
tower.id match {
case NetTowerID => new Net(x, y, tar, tower)
case _ => new Projectile(x, y, tar, tower)
}
}
def apply(x: Float, y: Float, dir: Int, tower: Tower) = {
new Steam(x, y, dir, tower)
}
}
class Projectile (x: Float, y: Float, val tar: Enemy, val tower:Tower) extends GameObject(x,y) {
val width = Projectile.width
val height = Projectile.height
val dmg = tower.damage
val speed = tower.speed
val aoe = tower.aoe
val id = tower.kind.projectileID
def explode() : Explosion = {
if (aoe != 0) {
new Explosion(tar.r, tar.c, aoe, map)
} else {
null
}
}
def tick() = {
val rVec = tar.r - r
val cVec = tar.c - c
val dist = sqrt((rVec * rVec) + (cVec * cVec)).asInstanceOf[Float]
var totalDmg = 0.0f
var money = 0
var kills = 0
if (dist < speed) {
val enemies = map.aoe(tar.r, tar.c, aoe)
for (e <- enemies) {
val data = e.hit(dmg)
totalDmg += data.dmg
money += data.money
if (data.money != 0) {
kills += 1
}
}
tower.kills += kills
tower.dmgDone += totalDmg
inactivate
(money, explode())
} else {
val theta = atan2(rVec, cVec)
rotation = toDegrees(theta).asInstanceOf[Float] + 90f
r += (rVec / dist) * speed
c += (cVec / dist) * speed
(0, null)
}
}
}
class Steam(x: Float, y: Float, val dir: Int, tower:Tower) extends Projectile(x, y, null, tower) {
var place: Tile = null
var nTiles = 0
override def tick() = {
if (dir == Right) {
c = c + speed
} else if (dir == Left) {
c = c - speed
} else if (dir == Up) {
r = r - speed
} else {// down
r = r + speed
}
val nextPlace = map(r,c)
nextPlace match {
case Some(tile) =>
var money = 0
if (place != tile) {
nTiles += 1
if (nTiles < 4) {
var totalDmg = 0.0f
var kills = 0
val enemies = tile.enemies
for (e <- enemies) {
val data = e.hit(dmg)
totalDmg += data.dmg
money += data.money
if (data.money != 0) {
kills += 1
}
}
tower.kills += kills
tower.dmgDone += totalDmg
place = tile
} else {
inactivate
}
}
(money, null)
case _ =>
inactivate
(0, null)
}
}
}
class Net(x: Float, y: Float, tar: Enemy, tower: Tower) extends Projectile(x, y, tar, tower) {
override def tick = {
val rVec = tar.r - r
val cVec = tar.c - c
val dist = sqrt((rVec * rVec) + (cVec * cVec)).asInstanceOf[Float]
if (dist < speed) {
val enemies = map.aoe(tar.r, tar.c, aoe)
for (e <- enemies) {
e.slow(new SlowEffect(0, 40))
}
inactivate
} else {
val theta = atan2(rVec, cVec)
rotation = toDegrees(theta).asInstanceOf[Float] + 90f
r += (rVec / dist) * speed
c += (cVec / dist) * speed
}
(0, null)
}
}
|
Fellowship-of-the-Bus/tdtd
|
src/main/scala/game/Projectile.scala
|
Scala
|
apache-2.0
| 3,360
|
/* sbt -- Simple Build Tool
* Copyright 2008, 2009, 2010 Mark Harrah
*/
package sbt
// The following code is based on scala.tools.nsc.reporters.{AbstractReporter, ConsoleReporter, Reporter}
// Copyright 2002-2009 LAMP/EPFL
// see licenses/LICENSE_Scala
// Original author: Martin Odersky
import xsbti.{Maybe,Position,Problem,Reporter,Severity}
import java.io.File
import java.util.EnumMap
import scala.collection.mutable
import LoggerReporter._
import Logger.{m2o,o2m,position,problem}
import Severity.{Error,Info => SInfo,Warn}
object LoggerReporter
{
final class PositionKey(pos: Position)
{
def offset = pos.offset
def sourceFile = pos.sourceFile
override def equals(o: Any) =
o match { case pk: PositionKey => equalsKey(pk); case _ => false }
def equalsKey(o: PositionKey) =
m2o(pos.offset) == m2o(o.offset) &&
m2o(pos.sourceFile) == m2o(o.sourceFile)
override def hashCode =
m2o(pos.offset).hashCode * 31
m2o(pos.sourceFile).hashCode
}
def countElementsAsString(n: Int, elements: String): String =
n match {
case 0 => "no " + elements + "s"
case 1 => "one " + elements
case 2 => "two " + elements + "s"
case 3 => "three " + elements + "s"
case 4 => "four " + elements + "s"
case _ => "" + n + " " + elements + "s"
}
}
class LoggerReporter(maximumErrors: Int, log: Logger) extends xsbti.Reporter
{
val positions = new mutable.HashMap[PositionKey, Severity]
val count = new EnumMap[Severity, Int](classOf[Severity])
private[this] val allProblems = new mutable.ListBuffer[Problem]
reset()
def reset()
{
count.put(Warn, 0)
count.put(SInfo, 0)
count.put(Error, 0)
positions.clear()
allProblems.clear()
}
def hasWarnings = count.get(Warn) > 0
def hasErrors = count.get(Error) > 0
def problems: Array[Problem] = allProblems.toArray
def printSummary()
{
val warnings = count.get(Severity.Warn)
if(warnings > 0)
log.warn(countElementsAsString(warnings, "warning") + " found")
val errors = count.get(Severity.Error)
if(errors > 0)
log.error(countElementsAsString(errors, "error") + " found")
}
def inc(sev: Severity) = count.put(sev, count.get(sev) + 1)
def display(pos: Position, msg: String, severity: Severity)
{
inc(severity)
if(severity != Error || maximumErrors <= 0 || count.get(severity) <= maximumErrors)
print(severityLogger(severity), pos, msg)
}
def severityLogger(severity: Severity): (=> String) => Unit =
m =>
{
(severity match
{
case Error => log.error(m)
case Warn => log.warn(m)
case SInfo => log.info(m)
})
}
def print(log: (=> String) => Unit, pos: Position, msg: String)
{
if(pos.sourcePath.isEmpty && pos.line.isEmpty)
log(msg)
else
{
val sourcePrefix = m2o(pos.sourcePath).getOrElse("")
val lineNumberString = m2o(pos.line).map(":" + _ + ":").getOrElse(":") + " "
log(sourcePrefix + lineNumberString + msg)
val lineContent = pos.lineContent
if(!lineContent.isEmpty)
{
log(lineContent)
for(space <- m2o(pos.pointerSpace))
log(space + "^") // pointer to the column position of the error/warning
}
}
}
def log(pos: Position, msg: String, severity: Severity): Unit =
{
allProblems += problem("", pos, msg, severity)
severity match
{
case Warn | Error =>
{
if(!testAndLog(pos, severity))
display(pos, msg, severity)
}
case _ => display(pos, msg, severity)
}
}
def testAndLog(pos: Position, severity: Severity): Boolean =
{
if(pos.offset.isEmpty || pos.sourceFile.isEmpty)
false
else
{
val key = new PositionKey(pos)
if(positions.get(key).map(_.ordinal >= severity.ordinal).getOrElse(false))
true
else
{
positions(key) = severity
false
}
}
}
}
|
jroper/sbt
|
compile/LoggerReporter.scala
|
Scala
|
bsd-3-clause
| 3,740
|
package com.enkidu.lignum.parsers.ast.expression.discardable.literals
case class FloatLiteral(value: String) extends Literal
|
marek1840/java-parser
|
src/main/scala/com/enkidu/lignum/parsers/ast/expression/discardable/literals/FloatLiteral.scala
|
Scala
|
mit
| 126
|
/**
* Swaggy Jenkins
* Jenkins API clients generated from Swagger / Open API specification
*
* The version of the OpenAPI document: 1.1.2-pre.0
* Contact: blah@cliffano.com
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
package io.swagger.client.model
import play.api.libs.json._
case class HudsonassignedLabels (
`class`: Option[String]
)
object HudsonassignedLabels {
implicit val format: Format[HudsonassignedLabels] = Json.format
}
|
cliffano/swaggy-jenkins
|
clients/scala-lagom-server/generated/src/main/scala/io/swagger/client/model/HudsonassignedLabels.scala
|
Scala
|
mit
| 585
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.jdbc
import java.sql.DriverManager
import java.util.Properties
import scala.collection.JavaConverters.propertiesAsScalaMapConverter
import org.scalatest.BeforeAndAfter
import org.apache.spark.SparkException
import org.apache.spark.sql.{AnalysisException, DataFrame, Row, SaveMode}
import org.apache.spark.sql.catalyst.parser.ParseException
import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JdbcUtils}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
class JDBCWriteSuite extends SharedSparkSession with BeforeAndAfter {
val url = "jdbc:h2:mem:testdb2"
var conn: java.sql.Connection = null
val url1 = "jdbc:h2:mem:testdb3"
var conn1: java.sql.Connection = null
val properties = new Properties()
properties.setProperty("user", "testUser")
properties.setProperty("password", "testPass")
properties.setProperty("rowId", "false")
val testH2Dialect = new JdbcDialect {
override def canHandle(url: String) : Boolean = url.startsWith("jdbc:h2")
override def isCascadingTruncateTable(): Option[Boolean] = Some(false)
}
before {
Utils.classForName("org.h2.Driver")
conn = DriverManager.getConnection(url)
conn.prepareStatement("create schema test").executeUpdate()
conn1 = DriverManager.getConnection(url1, properties)
conn1.prepareStatement("create schema test").executeUpdate()
conn1.prepareStatement("drop table if exists test.people").executeUpdate()
conn1.prepareStatement(
"create table test.people (name TEXT(32) NOT NULL, theid INTEGER NOT NULL)").executeUpdate()
conn1.prepareStatement("insert into test.people values ('fred', 1)").executeUpdate()
conn1.prepareStatement("insert into test.people values ('mary', 2)").executeUpdate()
conn1.prepareStatement("drop table if exists test.people1").executeUpdate()
conn1.prepareStatement(
"create table test.people1 (name TEXT(32) NOT NULL, theid INTEGER NOT NULL)").executeUpdate()
conn1.commit()
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW PEOPLE
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url1', dbtable 'TEST.PEOPLE', user 'testUser', password 'testPass')
""".stripMargin.replaceAll("\\n", " "))
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW PEOPLE1
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url1', dbtable 'TEST.PEOPLE1', user 'testUser', password 'testPass')
""".stripMargin.replaceAll("\\n", " "))
}
after {
conn.close()
conn1.close()
}
private lazy val arr2x2 = Array[Row](Row.apply("dave", 42), Row.apply("mary", 222))
private lazy val arr1x2 = Array[Row](Row.apply("fred", 3))
private lazy val schema2 = StructType(
StructField("name", StringType) ::
StructField("id", IntegerType) :: Nil)
private lazy val arr2x3 = Array[Row](Row.apply("dave", 42, 1), Row.apply("mary", 222, 2))
private lazy val schema3 = StructType(
StructField("name", StringType) ::
StructField("id", IntegerType) ::
StructField("seq", IntegerType) :: Nil)
private lazy val schema4 = StructType(
StructField("NAME", StringType) ::
StructField("ID", IntegerType) :: Nil)
test("Basic CREATE") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
df.write.jdbc(url, "TEST.BASICCREATETEST", new Properties())
assert(2 === spark.read.jdbc(url, "TEST.BASICCREATETEST", new Properties()).count())
assert(
2 === spark.read.jdbc(url, "TEST.BASICCREATETEST", new Properties()).collect()(0).length)
}
test("Basic CREATE with illegal batchsize") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
(-1 to 0).foreach { size =>
val properties = new Properties()
properties.setProperty(JDBCOptions.JDBC_BATCH_INSERT_SIZE, size.toString)
val e = intercept[IllegalArgumentException] {
df.write.mode(SaveMode.Overwrite).jdbc(url, "TEST.BASICCREATETEST", properties)
}.getMessage
assert(e.contains(s"Invalid value `$size` for parameter `batchsize`"))
}
}
test("Basic CREATE with batchsize") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
(1 to 3).foreach { size =>
val properties = new Properties()
properties.setProperty(JDBCOptions.JDBC_BATCH_INSERT_SIZE, size.toString)
df.write.mode(SaveMode.Overwrite).jdbc(url, "TEST.BASICCREATETEST", properties)
assert(2 === spark.read.jdbc(url, "TEST.BASICCREATETEST", new Properties()).count())
}
}
test("CREATE with ignore") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x3), schema3)
val df2 = spark.createDataFrame(sparkContext.parallelize(arr1x2), schema2)
df.write.mode(SaveMode.Ignore).jdbc(url1, "TEST.DROPTEST", properties)
assert(2 === spark.read.jdbc(url1, "TEST.DROPTEST", properties).count())
assert(3 === spark.read.jdbc(url1, "TEST.DROPTEST", properties).collect()(0).length)
df2.write.mode(SaveMode.Ignore).jdbc(url1, "TEST.DROPTEST", properties)
assert(2 === spark.read.jdbc(url1, "TEST.DROPTEST", properties).count())
assert(3 === spark.read.jdbc(url1, "TEST.DROPTEST", properties).collect()(0).length)
}
test("CREATE with overwrite") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x3), schema3)
val df2 = spark.createDataFrame(sparkContext.parallelize(arr1x2), schema2)
df.write.jdbc(url1, "TEST.DROPTEST", properties)
assert(2 === spark.read.jdbc(url1, "TEST.DROPTEST", properties).count())
assert(3 === spark.read.jdbc(url1, "TEST.DROPTEST", properties).collect()(0).length)
df2.write.mode(SaveMode.Overwrite).jdbc(url1, "TEST.DROPTEST", properties)
assert(1 === spark.read.jdbc(url1, "TEST.DROPTEST", properties).count())
assert(2 === spark.read.jdbc(url1, "TEST.DROPTEST", properties).collect()(0).length)
}
test("CREATE then INSERT to append") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
val df2 = spark.createDataFrame(sparkContext.parallelize(arr1x2), schema2)
df.write.jdbc(url, "TEST.APPENDTEST", new Properties())
df2.write.mode(SaveMode.Append).jdbc(url, "TEST.APPENDTEST", new Properties())
assert(3 === spark.read.jdbc(url, "TEST.APPENDTEST", new Properties()).count())
assert(2 === spark.read.jdbc(url, "TEST.APPENDTEST", new Properties()).collect()(0).length)
}
test("SPARK-18123 Append with column names with different cases") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
val df2 = spark.createDataFrame(sparkContext.parallelize(arr1x2), schema4)
df.write.jdbc(url, "TEST.APPENDTEST", new Properties())
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
val m = intercept[AnalysisException] {
df2.write.mode(SaveMode.Append).jdbc(url, "TEST.APPENDTEST", new Properties())
}.getMessage
assert(m.contains("Column \\"NAME\\" not found"))
}
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
df2.write.mode(SaveMode.Append).jdbc(url, "TEST.APPENDTEST", new Properties())
assert(3 === spark.read.jdbc(url, "TEST.APPENDTEST", new Properties()).count())
assert(2 === spark.read.jdbc(url, "TEST.APPENDTEST", new Properties()).collect()(0).length)
}
}
test("Truncate") {
JdbcDialects.registerDialect(testH2Dialect)
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
val df2 = spark.createDataFrame(sparkContext.parallelize(arr1x2), schema2)
val df3 = spark.createDataFrame(sparkContext.parallelize(arr2x3), schema3)
df.write.jdbc(url1, "TEST.TRUNCATETEST", properties)
df2.write.mode(SaveMode.Overwrite).option("truncate", true)
.jdbc(url1, "TEST.TRUNCATETEST", properties)
assert(1 === spark.read.jdbc(url1, "TEST.TRUNCATETEST", properties).count())
assert(2 === spark.read.jdbc(url1, "TEST.TRUNCATETEST", properties).collect()(0).length)
val m = intercept[AnalysisException] {
df3.write.mode(SaveMode.Overwrite).option("truncate", true)
.jdbc(url1, "TEST.TRUNCATETEST", properties)
}.getMessage
assert(m.contains("Column \\"seq\\" not found"))
assert(0 === spark.read.jdbc(url1, "TEST.TRUNCATETEST", properties).count())
JdbcDialects.unregisterDialect(testH2Dialect)
}
test("createTableOptions") {
JdbcDialects.registerDialect(testH2Dialect)
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
val m = intercept[org.h2.jdbc.JdbcSQLException] {
df.write.option("createTableOptions", "ENGINE tableEngineName")
.jdbc(url1, "TEST.CREATETBLOPTS", properties)
}.getMessage
assert(m.contains("Class \\"TABLEENGINENAME\\" not found"))
JdbcDialects.unregisterDialect(testH2Dialect)
}
test("Incompatible INSERT to append") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
val df2 = spark.createDataFrame(sparkContext.parallelize(arr2x3), schema3)
df.write.jdbc(url, "TEST.INCOMPATIBLETEST", new Properties())
val m = intercept[AnalysisException] {
df2.write.mode(SaveMode.Append).jdbc(url, "TEST.INCOMPATIBLETEST", new Properties())
}.getMessage
assert(m.contains("Column \\"seq\\" not found"))
}
test("INSERT to JDBC Datasource") {
sql("INSERT INTO TABLE PEOPLE1 SELECT * FROM PEOPLE")
assert(2 === spark.read.jdbc(url1, "TEST.PEOPLE1", properties).count())
assert(2 === spark.read.jdbc(url1, "TEST.PEOPLE1", properties).collect()(0).length)
}
test("INSERT to JDBC Datasource with overwrite") {
sql("INSERT INTO TABLE PEOPLE1 SELECT * FROM PEOPLE")
sql("INSERT OVERWRITE TABLE PEOPLE1 SELECT * FROM PEOPLE")
assert(2 === spark.read.jdbc(url1, "TEST.PEOPLE1", properties).count())
assert(2 === spark.read.jdbc(url1, "TEST.PEOPLE1", properties).collect()(0).length)
}
test("save works for format(\\"jdbc\\") if url and dbtable are set") {
val df = sqlContext.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
df.write.format("jdbc")
.options(Map("url" -> url, "dbtable" -> "TEST.SAVETEST"))
.save()
assert(2 === sqlContext.read.jdbc(url, "TEST.SAVETEST", new Properties).count)
assert(
2 === sqlContext.read.jdbc(url, "TEST.SAVETEST", new Properties).collect()(0).length)
}
test("save API with SaveMode.Overwrite") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
val df2 = spark.createDataFrame(sparkContext.parallelize(arr1x2), schema2)
df.write.format("jdbc")
.option("url", url1)
.option("dbtable", "TEST.SAVETEST")
.options(properties.asScala)
.save()
df2.write.mode(SaveMode.Overwrite).format("jdbc")
.option("url", url1)
.option("dbtable", "TEST.SAVETEST")
.options(properties.asScala)
.save()
assert(1 === spark.read.jdbc(url1, "TEST.SAVETEST", properties).count())
assert(2 === spark.read.jdbc(url1, "TEST.SAVETEST", properties).collect()(0).length)
}
test("save errors if url is not specified") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
val e = intercept[RuntimeException] {
df.write.format("jdbc")
.option("dbtable", "TEST.SAVETEST")
.options(properties.asScala)
.save()
}.getMessage
assert(e.contains("Option 'url' is required"))
}
test("save errors if dbtable is not specified") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
val e1 = intercept[RuntimeException] {
df.write.format("jdbc")
.option("url", url1)
.options(properties.asScala)
.save()
}.getMessage
assert(e1.contains("Option 'dbtable' or 'query' is required"))
val e2 = intercept[RuntimeException] {
df.write.format("jdbc")
.option("url", url1)
.options(properties.asScala)
.option("query", "select * from TEST.SAVETEST")
.save()
}.getMessage
val msg = "Option 'dbtable' is required. Option 'query' is not applicable while writing."
assert(e2.contains(msg))
}
test("save errors if wrong user/password combination") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
val e = intercept[org.h2.jdbc.JdbcSQLException] {
df.write.format("jdbc")
.option("dbtable", "TEST.SAVETEST")
.option("url", url1)
.save()
}.getMessage
assert(e.contains("Wrong user name or password"))
}
test("save errors if partitionColumn and numPartitions and bounds not set") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
val e = intercept[java.lang.IllegalArgumentException] {
df.write.format("jdbc")
.option("dbtable", "TEST.SAVETEST")
.option("url", url1)
.option("partitionColumn", "foo")
.save()
}.getMessage
assert(e.contains("When reading JDBC data sources, users need to specify all or none " +
"for the following options: 'partitionColumn', 'lowerBound', 'upperBound', and " +
"'numPartitions'"))
}
test("SPARK-18433: Improve DataSource option keys to be more case-insensitive") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
df.write.format("jdbc")
.option("Url", url1)
.option("dbtable", "TEST.SAVETEST")
.options(properties.asScala)
.save()
}
test("SPARK-18413: Use `numPartitions` JDBCOption") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
val e = intercept[IllegalArgumentException] {
df.write.format("jdbc")
.option("dbtable", "TEST.SAVETEST")
.option("url", url1)
.option("user", "testUser")
.option("password", "testPass")
.option(s"${JDBCOptions.JDBC_NUM_PARTITIONS}", "0")
.save()
}.getMessage
assert(e.contains("Invalid value `0` for parameter `numPartitions` in table writing " +
"via JDBC. The minimum value is 1."))
}
test("SPARK-19318 temporary view data source option keys should be case-insensitive") {
withTempView("people_view") {
sql(
s"""
|CREATE TEMPORARY VIEW people_view
|USING org.apache.spark.sql.jdbc
|OPTIONS (uRl '$url1', DbTaBlE 'TEST.PEOPLE1', User 'testUser', PassWord 'testPass')
""".stripMargin.replaceAll("\\n", " "))
sql("INSERT OVERWRITE TABLE PEOPLE_VIEW SELECT * FROM PEOPLE")
assert(sql("select * from people_view").count() == 2)
}
}
test("SPARK-10849: test schemaString - from createTableColumnTypes option values") {
def testCreateTableColDataTypes(types: Seq[String]): Unit = {
val colTypes = types.zipWithIndex.map { case (t, i) => (s"col$i", t) }
val schema = colTypes
.foldLeft(new StructType())((schema, colType) => schema.add(colType._1, colType._2))
val createTableColTypes =
colTypes.map { case (col, dataType) => s"$col $dataType" }.mkString(", ")
val df = spark.createDataFrame(sparkContext.parallelize(Seq(Row.empty)), schema)
val expectedSchemaStr =
colTypes.map { case (col, dataType) => s""""$col" $dataType """ }.mkString(", ")
assert(JdbcUtils.schemaString(df, url1, Option(createTableColTypes)) == expectedSchemaStr)
}
testCreateTableColDataTypes(Seq("boolean"))
testCreateTableColDataTypes(Seq("tinyint", "smallint", "int", "bigint"))
testCreateTableColDataTypes(Seq("float", "double"))
testCreateTableColDataTypes(Seq("string", "char(10)", "varchar(20)"))
testCreateTableColDataTypes(Seq("decimal(10,0)", "decimal(10,5)"))
testCreateTableColDataTypes(Seq("date", "timestamp"))
testCreateTableColDataTypes(Seq("binary"))
}
test("SPARK-10849: create table using user specified column type and verify on target table") {
def testUserSpecifiedColTypes(
df: DataFrame,
createTableColTypes: String,
expectedTypes: Map[String, String]): Unit = {
df.write
.mode(SaveMode.Overwrite)
.option("createTableColumnTypes", createTableColTypes)
.jdbc(url1, "TEST.DBCOLTYPETEST", properties)
// verify the data types of the created table by reading the database catalog of H2
val query =
"""
|(SELECT column_name, type_name, character_maximum_length
| FROM information_schema.columns WHERE table_name = 'DBCOLTYPETEST')
""".stripMargin
val rows = spark.read.jdbc(url1, query, properties).collect()
rows.foreach { row =>
val typeName = row.getString(1)
// For CHAR and VARCHAR, we also compare the max length
if (typeName.contains("CHAR")) {
val charMaxLength = row.getInt(2)
assert(expectedTypes(row.getString(0)) == s"$typeName($charMaxLength)")
} else {
assert(expectedTypes(row.getString(0)) == typeName)
}
}
}
val data = Seq[Row](Row(1, "dave", "Boston"))
val schema = StructType(
StructField("id", IntegerType) ::
StructField("first#name", StringType) ::
StructField("city", StringType) :: Nil)
val df = spark.createDataFrame(sparkContext.parallelize(data), schema)
// out-of-order
val expected1 = Map("id" -> "BIGINT", "first#name" -> "VARCHAR(123)", "city" -> "CHAR(20)")
testUserSpecifiedColTypes(df, "`first#name` VARCHAR(123), id BIGINT, city CHAR(20)", expected1)
// partial schema
val expected2 = Map("id" -> "INTEGER", "first#name" -> "VARCHAR(123)", "city" -> "CHAR(20)")
testUserSpecifiedColTypes(df, "`first#name` VARCHAR(123), city CHAR(20)", expected2)
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
// should still respect the original column names
val expected = Map("id" -> "INTEGER", "first#name" -> "VARCHAR(123)", "city" -> "CLOB")
testUserSpecifiedColTypes(df, "`FiRsT#NaMe` VARCHAR(123)", expected)
}
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
val schema = StructType(
StructField("id", IntegerType) ::
StructField("First#Name", StringType) ::
StructField("city", StringType) :: Nil)
val df = spark.createDataFrame(sparkContext.parallelize(data), schema)
val expected = Map("id" -> "INTEGER", "First#Name" -> "VARCHAR(123)", "city" -> "CLOB")
testUserSpecifiedColTypes(df, "`First#Name` VARCHAR(123)", expected)
}
}
test("SPARK-10849: jdbc CreateTableColumnTypes option with invalid data type") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
val msg = intercept[ParseException] {
df.write.mode(SaveMode.Overwrite)
.option("createTableColumnTypes", "name CLOB(2000)")
.jdbc(url1, "TEST.USERDBTYPETEST", properties)
}.getMessage()
assert(msg.contains("DataType clob(2000) is not supported."))
}
test("SPARK-10849: jdbc CreateTableColumnTypes option with invalid syntax") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
val msg = intercept[ParseException] {
df.write.mode(SaveMode.Overwrite)
.option("createTableColumnTypes", "`name char(20)") // incorrectly quoted column
.jdbc(url1, "TEST.USERDBTYPETEST", properties)
}.getMessage()
assert(msg.contains("extraneous input"))
}
test("SPARK-10849: jdbc CreateTableColumnTypes duplicate columns") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
val msg = intercept[AnalysisException] {
df.write.mode(SaveMode.Overwrite)
.option("createTableColumnTypes", "name CHAR(20), id int, NaMe VARCHAR(100)")
.jdbc(url1, "TEST.USERDBTYPETEST", properties)
}.getMessage()
assert(msg.contains(
"Found duplicate column(s) in the createTableColumnTypes option value: `name`"))
}
}
test("SPARK-10849: jdbc CreateTableColumnTypes invalid columns") {
// schema2 has the column "id" and "name"
val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2)
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val msg = intercept[AnalysisException] {
df.write.mode(SaveMode.Overwrite)
.option("createTableColumnTypes", "firstName CHAR(20), id int")
.jdbc(url1, "TEST.USERDBTYPETEST", properties)
}.getMessage()
assert(msg.contains("createTableColumnTypes option column firstName not found in " +
"schema struct<name:string,id:int>"))
}
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
val msg = intercept[AnalysisException] {
df.write.mode(SaveMode.Overwrite)
.option("createTableColumnTypes", "id int, Name VARCHAR(100)")
.jdbc(url1, "TEST.USERDBTYPETEST", properties)
}.getMessage()
assert(msg.contains("createTableColumnTypes option column Name not found in " +
"schema struct<name:string,id:int>"))
}
}
test("SPARK-19726: INSERT null to a NOT NULL column") {
val e = intercept[SparkException] {
sql("INSERT INTO PEOPLE1 values (null, null)")
}.getMessage
assert(e.contains("NULL not allowed for column \\"NAME\\""))
}
ignore("SPARK-23856 Spark jdbc setQueryTimeout option") {
// The behaviour of the option `queryTimeout` depends on how JDBC drivers implement the API
// `setQueryTimeout`. For example, in the h2 JDBC driver, `executeBatch` invokes multiple
// INSERT queries in a batch and `setQueryTimeout` means that the driver checks the timeout
// of each query. In the PostgreSQL JDBC driver, `setQueryTimeout` means that the driver
// checks the timeout of an entire batch in a driver side. So, the test below fails because
// this test suite depends on the h2 JDBC driver and the JDBC write path internally
// uses `executeBatch`.
val errMsg = intercept[SparkException] {
spark.range(10000000L).selectExpr("id AS k", "id AS v").coalesce(1).write
.mode(SaveMode.Overwrite)
.option("queryTimeout", 1)
.option("batchsize", Int.MaxValue)
.jdbc(url1, "TEST.TIMEOUTTEST", properties)
}.getMessage
assert(errMsg.contains("Statement was canceled or the session timed out"))
}
}
|
pgandhi999/spark
|
sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala
|
Scala
|
apache-2.0
| 23,202
|
/*
* Copyright 2013 TeamNexus
*
* TeamNexus Licenses this file to you under the MIT License (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://opensource.org/licenses/mit-license.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License
*/
package com.nexus.webserver
import io.netty.channel.ChannelHandlerContext
import io.netty.handler.codec.http.{QueryStringDecoder, FullHttpRequest}
import java.net.InetSocketAddress
import com.nexus.util.Utils
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder
/**
* No description given
*
* @author jk-5
*/
class WebServerRequest(private final val ctx: ChannelHandlerContext, private final val request: FullHttpRequest) {
private final val queryStringDecoder = new QueryStringDecoder(this.request.getUri)
private final val postParameters = new HttpPostRequestDecoder(this.request)
private final val params = this.queryStringDecoder.parameters()
def getAddress = this.ctx.channel().remoteAddress().asInstanceOf[InetSocketAddress].getAddress
def getHttpVersion = this.request.getProtocolVersion
def getMethod = this.request.getMethod
def getPath = Utils.sanitizeURI(this.request.getUri)
def isHeaderPresent(key:String) = this.request.headers().contains(key)
def isParameterPresent(key:String) = this.params.containsKey(key)
def getHeader(key:String): Option[String] = this.request.headers().get(key) match{
case s: String => Some(s)
case _ => None
}
def getParameter(key:String): Option[String] = if(this.params.get(key) == null || this.params.get(key).size() == 0) None else Some(this.params.get(key).get(0))
def getPostData = this.postParameters
def getContext = this.ctx
def getHttpRequest = this.request
//TODO: enable me!
//def getUserFromParameter(key:String) = User.fromID(this.getParameter(key).toInt)
}
|
crvidya/nexus-scala
|
src/main/scala/com/nexus/webserver/WebServerRequest.scala
|
Scala
|
mit
| 2,189
|
class SCL2055 {
def m2(p: String) = p
def m2(p: => Unit) = 1
/*start*/m2()/*end*/
}
//Int
|
ilinum/intellij-scala
|
testdata/typeInference/bugs5/SCL2055.scala
|
Scala
|
apache-2.0
| 95
|
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.effect
/**
* Trait indicating effect or item may occur via random loot
*/
trait RandomLoot
|
adarro/ddo-calc
|
subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/effect/RandomLoot.scala
|
Scala
|
apache-2.0
| 794
|
package com.arcusys.valamis.lesson.service
import com.arcusys.learn.liferay.LiferayClasses.LNoSuchEntryException
import com.arcusys.learn.liferay.constants.ContentTypesHelper
import com.arcusys.learn.liferay.services.{ AssetEntryLocalServiceHelper, ClassNameLocalServiceHelper, CounterLocalServiceHelper }
import com.arcusys.learn.liferay.util.IndexerRegistryUtilHelper
import com.arcusys.valamis.lesson.model.BaseManifest
import com.arcusys.valamis.lesson.scorm.model
import com.arcusys.valamis.lesson.scorm.model.manifest
import com.arcusys.valamis.lesson.scorm.model.manifest.Manifest
import com.arcusys.valamis.lesson.scorm.storage.ScormPackagesStorage
import com.arcusys.valamis.lesson.tincan.model.TincanManifest
import com.arcusys.valamis.lesson.tincan.storage.TincanPackageStorage
import com.escalatesoft.subcut.inject.{ BindingModule, Injectable }
class AssetHelper(implicit val bindingModule: BindingModule) extends Injectable {
lazy val scormRepository = inject[ScormPackagesStorage]
lazy val tincanRepository = inject[TincanPackageStorage]
lazy val packageService = inject[ValamisPackageService]
def deletePackageAssetEntry(entryId: Long, manifest: BaseManifest) {
try {
if (AssetEntryLocalServiceHelper.getAssetEntry(entryId) != null) {
val indexer = IndexerRegistryUtilHelper.getIndexer(classOf[model.manifest.Manifest])
indexer.delete(manifest)
AssetEntryLocalServiceHelper.deleteAssetEntry(entryId)
}
} catch {
case e: LNoSuchEntryException => System.out.println("Asset not found")
}
}
def addTincanPackageAssetEntry(userId: Long, groupId: Long, packageId: Long, title: String, summary: Option[String]): Long = {
val assetRefId = createAsset(userId, groupId, title, summary)
tincanRepository.setAssetRefID(packageId, assetRefId)
val newManifest = packageService.getPackage(packageId)
val indexer = IndexerRegistryUtilHelper.getIndexer(classOf[TincanManifest])
if (indexer != null) indexer.reindex(newManifest)
assetRefId
}
def addScormPackageAssetEntry(userId: Long, groupId: Long, packageId: Long, title: String, summary: Option[String]): Long = {
val assetRefId = createAsset(userId, groupId, title, summary)
scormRepository.setAssetRefID(packageId, assetRefId)
val newManifest = packageService.getPackage(packageId)
val indexer = IndexerRegistryUtilHelper.getIndexer(classOf[manifest.Manifest])
if (indexer != null) indexer.reindex(newManifest)
assetRefId
}
private def createAsset(userId: Long, groupId: Long, title: String, description: Option[String]): Long = {
val content: String = ""
val categoriesIds: Array[Long] = null
val classNameId = ClassNameLocalServiceHelper.getClassNameId(classOf[manifest.Manifest].getName)
val classPK = CounterLocalServiceHelper.increment
val entry = AssetEntryLocalServiceHelper.updateEntry(userId, groupId, classOf[Manifest].getName,
classPK, "", classNameId, categoriesIds, null, visible = true, null,
null, null, null, ContentTypesHelper.TEXT_HTML, title,
content, description.getOrElse(""), null, null, 0, 0, null, sync = false)
entry.setClassPK(entry.getPrimaryKey)
AssetEntryLocalServiceHelper.updateAssetEntry(entry)
entry.getPrimaryKey
}
}
|
ViLPy/Valamis
|
valamis-lesson/src/main/scala/com/arcusys/valamis/lesson/service/AssetHelper.scala
|
Scala
|
lgpl-3.0
| 3,291
|
/**
* Copyright 2015 Peter Nerg
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package javascalautils.converters.s2j
import org.scalatest.funsuite.AnyFunSuite
import javascalautils.{ None => JNone, Option => JOption, Some => JSome }
import javascalautils.converters.s2j.Implicits._
import scala.util.{ Try, Success, Failure }
/**
* Test suite for Implicits scala.Try/Success/Failure conversions.
* @author Peter Nerg
*/
class ImplicitsTrySuite extends AnyFunSuite {
val expected = "Failure is not an Option"
test("Scala Failure as Java") {
val jfailure = Failure(new Exception(expected)).asJava
assert(jfailure.isFailure)
assertResult(expected)(jfailure.failed().get.getMessage)
}
test("Scala Success as Java") {
val jSuccess = Success(expected).asJava
assert(jSuccess.isSuccess)
assertResult(expected)(jSuccess.get)
}
test("Scala Try-Success as Java") {
val success: Try[String] = Success(expected)
val jSuccess = success.asJava
assert(jSuccess.isSuccess)
assertResult(expected)(jSuccess.get)
}
test("Test Try-Failure with Failure") {
val failure: Try[String] = Failure(new Exception(expected))
val jfailure = failure.asJava
assert(jfailure.isFailure)
assertResult(expected)(jfailure.failed().get.getMessage)
}
}
|
pnerg/java-scala-util-converter
|
src/test/scala/javascalautils/converters/s2j/ImplicitsTrySuite.scala
|
Scala
|
apache-2.0
| 1,824
|
package loaders
import java.io.FileInputStream
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import nodes.images._
import pipelines.CKMConf
import utils.{Image, ImageMetadata, LabeledImage, RowMajorArrayVectorizedImage, ColumnMajorArrayVectorizedImage}
/**
* Loads images from the CIFAR-10 Dataset.
*/
object CifarWhitenedLoader {
// We hardcode this because these are properties of the CIFAR-10 dataset.
val xDim = 32
val yDim = 32
val numChannels = 3
val labelSize = 1
def apply(sc: SparkContext, path: String): Dataset= {
val featurized = CKMFeatureLoader(sc, path, "cifar_whitened")
val trainFeatures= featurized.XTrain
val testFeatures = featurized.XTest
val yTrain = featurized.yTrain
val yTest = featurized.yTest
val trainImages:RDD[Image] = trainFeatures.map(CKMLayerLoader.convertVectorToImage(_, xDim, yDim, numChannels))
val testImages:RDD[Image] = testFeatures.map(CKMLayerLoader.convertVectorToImage(_, xDim, yDim, numChannels))
val trainLabeledImages:RDD[LabeledImage] = trainImages.zip(yTrain).map(x => LabeledImage(x._1, x._2))
val testLabeledImages:RDD[LabeledImage] = testImages.zip(yTest).map(x => LabeledImage(x._1, x._2))
Dataset(trainLabeledImages, testLabeledImages)
}
}
|
Vaishaal/ckm
|
keystone_pipeline/src/main/scala/loaders/CifarWhitenedLoader.scala
|
Scala
|
apache-2.0
| 1,299
|
/*
*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.locationtech.geomesa.utils.stats
import java.util.concurrent.atomic.AtomicLong
import com.typesafe.scalalogging.slf4j.Logging
trait MethodProfiling {
import java.lang.System.{currentTimeMillis => ctm}
def profile[R](code: => R)(implicit timing: Timing): R = {
val (startTime, r) = (ctm, code)
timing.occurrence(ctm - startTime)
r
}
def profile[R](code: => R, identifier: String)(implicit timings: Timings) = {
val (startTime, r) = (ctm, code)
timings.occurrence(identifier, ctm - startTime)
r
}
}
/**
* Class to hold timing results
*/
class Timing extends Serializable {
private var total = 0L
private var count = 0L
/**
* Updates this instance with a new timing
*
* @param time
* @return
*/
def occurrence(time: Long): Unit = {
total += time
count += 1
this
}
/**
* Gets the total time
*
* @return
*/
def time: Long = total
/**
* Gets the number of event occurrences
*
* @return
*/
def occurrences: Long = count
/**
* Computes the average for this instance
*
* @return
*/
def average(): Double = total / count.toDouble
}
trait Timings extends Serializable {
/**
* Updates the given identifier with a new timing
*
* @param identifier
* @param time
*/
def occurrence(identifier: String, time: Long): Unit
/**
* Gets the total time for the given identifier
*
* @param identifier
* @return
*/
def time(identifier: String): Long
/**
* Gets the total occurrences for the given identifier
*
* @param identifier
* @return
*/
def occurrences(identifier: String): Long
/**
* Creates a printed string with the computed averages
*
* @return
*/
def averageOccurrences(): String
/**
* Creates a printed string with the computed averages
*
* @return
*/
def averageTimes(): String
}
/**
* Class to hold timing results. Thread-safe.
*/
class TimingsImpl extends Timings {
private val map = scala.collection.mutable.Map.empty[String, Timing]
override def occurrence(identifier: String, time: Long): Unit = {
val timing = map.synchronized(map.getOrElseUpdate(identifier, new Timing))
timing.synchronized(timing.occurrence(time))
}
override def time(identifier: String): Long =
map.synchronized(map.getOrElseUpdate(identifier, new Timing)).time
override def occurrences(identifier: String): Long =
map.synchronized(map.getOrElseUpdate(identifier, new Timing)).occurrences
override def averageOccurrences(): String = if (map.isEmpty) {
"No occurrences"
} else {
val entries = map.synchronized(map.toList).sortBy(_._1)
val total = entries.map(_._2.occurrences).sum
val percentOccurrences = entries.map { case (id, timing) =>
s"$id: ${(timing.occurrences * 100 / total.toDouble).formatted("%.1f%%")}"
}
percentOccurrences.mkString(s"Total occurrences: $total. Percent of occurrences - ", ", ", "")
}
override def averageTimes(): String = if (map.isEmpty) {
"No occurrences"
} else {
val entries = map.synchronized(map.toList).sortBy(_._1)
val total = entries.map(_._2.time).sum
val percentTimes = entries.map { case (id, timing) =>
timing.synchronized(s"$id: ${(timing.time * 100 / total.toDouble).formatted("%.1f%%")}" +
s" ${timing.occurrences} times at ${timing.average.formatted("%.4f")} ms avg")
}
percentTimes.mkString(s"Total time: $total ms. Percent of time - ", ", ", "")
}
}
/**
* Useful for sharing timings between instances of a certain class
*
* @param moduloToLog
*/
class AutoLoggingTimings(moduloToLog: Int = 1000) extends TimingsImpl with Logging {
val count = new AtomicLong()
override def occurrence(identifier: String, time: Long) = {
super.occurrence(identifier, time)
if (count.incrementAndGet() % moduloToLog == 0) {
logger.debug(averageTimes())
}
}
}
class NoOpTimings extends Timings {
override def occurrence(identifier: String, time: Long) = {}
override def occurrences(identifier: String) = 0L
override def time(identifier: String) = 0L
override def averageTimes() = ""
override def averageOccurrences() = ""
}
|
kevinwheeler/geomesa
|
geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/stats/MethodProfiling.scala
|
Scala
|
apache-2.0
| 4,849
|
package gapt.proofs.context.facet
import gapt.expr.Const
/** Inductive types, for each type we store its list of constructors. */
case class StructurallyInductiveTypes( constructors: Map[String, Vector[Const]] ) {
def +( ty: String, ctrs: Vector[Const] ) =
copy( constructors + ( ty -> ctrs ) )
override def toString: String = constructors.toSeq.sortBy( _._1 ).
map { case ( t, cs ) => s"$t: ${cs.mkString( ", " )}" }.mkString( "\\n" )
}
object StructurallyInductiveTypes {
implicit val structIndTysFacet: Facet[StructurallyInductiveTypes] = Facet( StructurallyInductiveTypes( Map() ) )
}
|
gapt/gapt
|
core/src/main/scala/gapt/proofs/context/facet/StructurallyInductiveTypes.scala
|
Scala
|
gpl-3.0
| 605
|
package org.bitcoins.core.script.bitwise
import org.bitcoins.core.script.constant._
import org.bitcoins.core.script.control.{ ControlOperationsInterpreter, OP_VERIFY }
import org.bitcoins.core.script.result._
import org.bitcoins.core.script.{ ExecutedScriptProgram, ExecutionInProgressScriptProgram, PreExecutionScriptProgram, ScriptProgram }
import org.bitcoins.core.util.BitcoinSLogger
/**
* Created by chris on 1/6/16.
*/
sealed abstract class BitwiseInterpreter {
private def logger = BitcoinSLogger.logger
/** Returns 1 if the inputs are exactly equal, 0 otherwise. */
def opEqual(program: ScriptProgram): ScriptProgram = {
require(program.script.headOption.contains(OP_EQUAL), "Script operation must be OP_EQUAL")
if (program.stack.size < 2) {
ScriptProgram(program, ScriptErrorInvalidStackOperation)
} else {
val h = program.stack.head
val h1 = program.stack.tail.head
val result = (h, h1) match {
case (OP_0, ScriptNumber.zero) | (ScriptNumber.zero, OP_0) =>
OP_0.underlying == ScriptNumber.zero.toLong
case (OP_FALSE, ScriptNumber.zero) | (ScriptNumber.zero, OP_FALSE) =>
OP_FALSE.underlying == ScriptNumber.zero.toLong
case (OP_TRUE, ScriptNumber.one) | (ScriptNumber.one, OP_TRUE) =>
OP_TRUE.underlying == ScriptNumber.one.toLong
case (OP_1, ScriptNumber.one) | (ScriptNumber.one, OP_1) =>
OP_1.underlying == ScriptNumber.one.toLong
case _ => h.bytes == h1.bytes
}
val scriptBoolean = if (result) OP_TRUE else OP_FALSE
ScriptProgram(program, scriptBoolean :: program.stack.tail.tail, program.script.tail)
}
}
/** Same as [[OP_EQUAL]], but runs [[OP_VERIFY]] afterward. */
def opEqualVerify(program: ScriptProgram): ScriptProgram = {
require(program.script.headOption.contains(OP_EQUALVERIFY), "Script operation must be OP_EQUALVERIFY")
if (program.stack.size > 1) {
//first replace OP_EQUALVERIFY with OP_EQUAL and OP_VERIFY
val simpleScript = OP_EQUAL :: OP_VERIFY :: program.script.tail
val newProgram: ScriptProgram = opEqual(ScriptProgram(program, program.stack, simpleScript))
ControlOperationsInterpreter.opVerify(newProgram) match {
case p: PreExecutionScriptProgram => p
case p: ExecutedScriptProgram =>
if (p.error.isDefined) ScriptProgram(p, ScriptErrorEqualVerify)
else p
case p: ExecutionInProgressScriptProgram => p
}
} else {
logger.error("OP_EQUALVERIFY requires at least 2 elements on the stack")
ScriptProgram(program, ScriptErrorInvalidStackOperation)
}
}
}
object BitwiseInterpreter extends BitwiseInterpreter
|
Christewart/bitcoin-s-core
|
src/main/scala/org/bitcoins/core/script/bitwise/BitwiseInterpreter.scala
|
Scala
|
mit
| 2,699
|
package net.stsmedia.akka.http.support
import akka.http.marshalling.{Marshaller, ToEntityMarshaller}
import akka.http.model.HttpCharsets
import akka.http.model.MediaTypes.`application/json`
import akka.http.unmarshalling.{FromEntityUnmarshaller, Unmarshaller}
import akka.stream.FlowMaterializer
import spray.json._
import scala.concurrent.ExecutionContext
import scala.language.implicitConversions
/**
* A trait providing automatic to and from JSON marshalling/unmarshalling using an in-scope *spray-json* protocol.
*/
trait SprayJsonSupport {
implicit def sprayJsonUnmarshallerConverter[T](reader: RootJsonReader[T])(implicit ec: ExecutionContext, mat: FlowMaterializer): FromEntityUnmarshaller[T] =
sprayJsonUnmarshaller(reader, ec, mat)
implicit def sprayJsonUnmarshaller[T](implicit reader: RootJsonReader[T], ec: ExecutionContext, mat: FlowMaterializer): FromEntityUnmarshaller[T] =
sprayJsValueUnmarshaller.map(jsonReader[T].read)
implicit def sprayJsValueUnmarshaller(implicit ec: ExecutionContext, mat: FlowMaterializer): FromEntityUnmarshaller[JsValue] =
Unmarshaller.byteStringUnmarshaller.mapWithCharset { (data, charset) ⇒
val input =
if (charset == HttpCharsets.`UTF-8`) ParserInput(data.toArray)
else ParserInput(data.decodeString(charset.nioCharset.name)) // FIXME
JsonParser(input)
}.filterMediaType(`application/json`)
implicit def sprayJsonMarshallerConverter[T](writer: RootJsonWriter[T])(implicit printer: JsonPrinter = PrettyPrinter, ec: ExecutionContext): ToEntityMarshaller[T] =
sprayJsonMarshaller[T](writer, printer, ec)
implicit def sprayJsonMarshaller[T](implicit writer: RootJsonWriter[T], printer: JsonPrinter = PrettyPrinter, ec: ExecutionContext): ToEntityMarshaller[T] =
sprayJsValueMarshaller[T].compose(writer.write)
implicit def sprayJsValueMarshaller[T](implicit writer: RootJsonWriter[T], printer: JsonPrinter = PrettyPrinter, ec: ExecutionContext): ToEntityMarshaller[JsValue] =
Marshaller.StringMarshaller.wrap(`application/json`)(printer.apply)
}
object SprayJsonSupport extends SprayJsonSupport
|
stsmedia/akka-http-server
|
src/main/scala/net/stsmedia/akka/http/support/SprayJsonSupport.scala
|
Scala
|
mit
| 2,112
|
package aerospiker
package task
package monix
import _root_.monix.eval.Task
import _root_.monix.execution.Cancelable
import io.circe.{ Decoder, Encoder }
import scala.collection.generic.CanBuildFrom
trait Aerospike {
def get[A: Decoder](settings: Settings, binNames: String*): Action[Task, A]
def put[A: Encoder](settings: Settings, bins: A): Action[Task, Unit]
def delete(settings: Settings): Action[Task, Boolean]
def all[C[_], A](settings: Settings, binNames: String*)(
implicit
decoder: Decoder[A],
cbf: CanBuildFrom[Nothing, (Key, Option[Record[A]]), C[(Key, Option[Record[A]])]]
): Action[Task, C[(Key, Option[Record[A]])]]
def exists(settings: Settings): Action[Task, Boolean]
}
object Aerospike {
def apply(): Aerospike = new Impl with Functions0
class Impl extends Aerospike { self: Functions =>
def get[A: Decoder](settings: Settings, binNames: String*): Action[Task, A] =
Action[Task, A] { implicit c =>
Task.async[A] { (s, cb) =>
s.execute(new Runnable {
override def run(): Unit =
getAsync[A](_.fold(cb.onError, cb.onSuccess), settings, binNames: _*)
})
Cancelable.empty
}
}
def put[A: Encoder](settings: Settings, bins: A): Action[Task, Unit] =
Action[Task, Unit] { implicit c =>
Task.async[Unit] { (s, cb) =>
s.execute(new Runnable {
override def run(): Unit =
putAsync[A](_.fold(cb.onError, cb.onSuccess), settings, bins)
})
Cancelable.empty
}
}
// def puts[A](settings: Settings, kv: Map[String, A])(implicit encoder: Encoder[A]): Action[Task, Seq[String]] =
// kv.toList.traverse { case (k: String, v: A) => put(settings.copy(key = k), v).map(_ => k) }
def delete(settings: Settings): Action[Task, Boolean] =
Action[Task, Boolean] { implicit c =>
Task.async[Boolean] { (s, cb) =>
s.execute(new Runnable {
override def run(): Unit =
deleteAsync(_.fold(cb.onError, cb.onSuccess), settings)
})
Cancelable.empty
}
}
// def deletes(settings: Settings, keys: Seq[String]): Action[Task, Seq[String]] =
// keys.toList.traverse(k => delete(settings.copy(key = k)).map(_ => k))
def all[C[_], A](settings: Settings, binNames: String*)(
implicit
decoder: Decoder[A],
cbf: CanBuildFrom[Nothing, (Key, Option[Record[A]]), C[(Key, Option[Record[A]])]]
): Action[Task, C[(Key, Option[Record[A]])]] =
Action[Task, C[(Key, Option[Record[A]])]] { implicit c =>
Task.async[C[(Key, Option[Record[A]])]] { (s, cb) =>
s.execute(new Runnable {
override def run(): Unit =
allAsync[C, A](_.fold(cb.onError, cb.onSuccess), settings, binNames: _*)
})
Cancelable.empty
}
}
def exists(settings: Settings): Action[Task, Boolean] =
Action[Task, Boolean] { implicit c =>
Task.async[Boolean] { (s, cb) =>
s.execute(new Runnable {
override def run(): Unit =
existsAsync(_.fold(cb.onError, cb.onSuccess), settings)
})
Cancelable.empty
}
}
}
}
|
tkrs/aerospiker
|
task/src/main/scala/aerospiker/task/monix/Aerospike.scala
|
Scala
|
mit
| 3,254
|
package io.taig.android.resource
/**
* Helper class to create Android quantity Strings
*/
case class Quantity(message: Int, count: Int)
|
Taig/Toolbelt
|
resource/src/main/scala/io/taig/android/resource/Quantity.scala
|
Scala
|
mit
| 141
|
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.receiver.scribe
import com.twitter.app.{App, Flaggable}
import com.twitter.conversions.time._
import com.twitter.finagle.ThriftMux
import com.twitter.finagle.stats.{Stat, DefaultStatsReceiver, StatsReceiver}
import com.twitter.finagle.util.{DefaultTimer, InetSocketAddressUtil}
import com.twitter.logging.Logger
import com.twitter.scrooge.BinaryThriftStructSerializer
import com.twitter.util.{Base64StringEncoder, Closable, Future, NonFatal, Return, Throw, Time}
import com.twitter.zipkin.collector.{QueueFullException, SpanReceiver}
import com.twitter.zipkin.conversions.thrift._
import com.twitter.zipkin.thriftscala.{LogEntry, ResultCode, Scribe, Span => ThriftSpan, ZipkinCollector}
import com.twitter.zipkin.zookeeper._
import com.twitter.zk.ZkClient
import java.net.{InetSocketAddress, URI}
/**
* A SpanReceiverFactory that should be mixed into a base ZipkinCollector. This
* provides `newScribeSpanReceiver` which will create a `ScribeSpanReceiver`
* listening on a configurable port (-zipkin.receiver.scribe.port) and announced to
* ZooKeeper via a given path (-zipkin.receiver.scribe.zk.path). If a path is not
* explicitly provided no announcement will be made (this is helpful for instance
* during development). This factory must also be mixed into an App trait along with
* a ZooKeeperClientFactory.
*/
trait ScribeSpanReceiverFactory { self: App with ZooKeeperClientFactory =>
val scribeAddr = flag(
"zipkin.receiver.scribe.addr",
new InetSocketAddress(1490),
"the address to listen on")
val scribeCategories = flag(
"zipkin.receiver.scribe.categories",
Seq("zipkin"),
"a whitelist of categories to process")
val scribeZkPath = flag(
"zipkin.receiver.scribe.zk.path",
"/com/twitter/zipkin/receiver/scribe",
"the zookeeper path to announce on. blank does not announce")
def newScribeSpanReceiver(
process: Seq[ThriftSpan] => Future[Unit],
stats: StatsReceiver = DefaultStatsReceiver.scope("ScribeSpanReceiver")
): SpanReceiver = new SpanReceiver {
val zkNode: Option[Closable] = scribeZkPath.get.map { path =>
val addr = InetSocketAddressUtil.toPublic(scribeAddr()).asInstanceOf[InetSocketAddress]
val nodeName = "%s:%d".format(addr.getHostName, addr.getPort)
zkClient.createEphemeral(path + "/" + nodeName, nodeName.getBytes)
}
val service = ThriftMux.serveIface(
scribeAddr(),
new ScribeReceiver(scribeCategories().toSet, process, stats))
val closer: Closable = zkNode map { Closable.sequence(_, service) } getOrElse { service }
def close(deadline: Time): Future[Unit] = closeAwaitably { closer.close(deadline) }
}
}
class ScribeReceiver(
categories: Set[String],
process: Seq[ThriftSpan] => Future[Unit],
stats: StatsReceiver = DefaultStatsReceiver.scope("ScribeReceiver")
) extends Scribe[Future] {
private[this] val deserializer = new BinaryThriftStructSerializer[ThriftSpan] {
override val encoder = Base64StringEncoder
val codec = ThriftSpan
}
private[this] val log = Logger.get
private[this] val tryLater = Future.value(ResultCode.TryLater)
private[this] val ok = Future.value(ResultCode.Ok)
private[this] val logCallStat = stats.stat("logCallBatches")
private[this] val pushbackCounter = stats.counter("pushBack")
private[this] val errorStats = stats.scope("processingError")
private[this] val fatalStats = stats.scope("fatalException")
private[this] val batchesProcessedStat = stats.stat("processedBatches")
private[this] val messagesStats = stats.scope("messages")
private[this] val totalMessagesCounter = messagesStats.counter("total")
private[this] val InvalidMessagesCounter = messagesStats.counter("invalid")
private[this] val categoryCounters = categories.map { category =>
val cat = category.toLowerCase
(cat, messagesStats.scope("perCategory").counter(cat))
}.toMap
private[this] def entryToSpan(entry: LogEntry): Option[ThriftSpan] = try {
val span = Stat.time(stats.stat("deserializeSpan")) { deserializer.fromString(entry.message) }
Some(span)
} catch {
case e: Exception => {
// scribe doesn't have any ResultCode.ERROR or similar
// let's just swallow this invalid msg
log.warning(e, "Invalid msg: %s", entry.message)
InvalidMessagesCounter.incr()
None
}
}
def log(entries: Seq[LogEntry]): Future[ResultCode] = {
logCallStat.add(entries.size)
val spans = entries flatMap { entry =>
totalMessagesCounter.incr()
categoryCounters.get(entry.category.toLowerCase) flatMap { counter =>
counter.incr()
entryToSpan(entry)
}
}
if (spans.isEmpty) ok else {
process(spans) transform {
case Return(_) =>
batchesProcessedStat.add(spans.size)
ok
case Throw(NonFatal(e)) =>
if (!e.isInstanceOf[QueueFullException])
log.warning("Exception in process(): %s".format(e.getMessage))
errorStats.counter(e.getClass.getName).incr()
pushbackCounter.incr()
tryLater
case Throw(e) =>
fatalStats.counter(e.getClass.getName).incr()
Future.exception(e)
}
}
}
}
|
zhoffice/zipkin
|
zipkin-receiver-scribe/src/main/scala/com/twitter/zipkin/receiver/scribe/ScribeSpanReceiver.scala
|
Scala
|
apache-2.0
| 5,812
|
package org.schedoscope.scheduler.driver
import java.io.File
import org.scalatest.{FlatSpec, Matchers}
import org.schedoscope.dsl.transformations.ShellTransformation
import org.schedoscope.test.resources.LocalTestResources
import org.schedoscope.test.resources.TestDriverRunCompletionHandlerCallCounter._
import scala.io.Source
class ShellDriverTest extends FlatSpec with Matchers {
lazy val driver = new LocalTestResources().driverFor[ShellTransformation]("shell")
"ShellDriver" should "have transformation name shell" in {
driver.transformationName shouldBe "shell"
}
it should "execute shell transformations synchronously" in {
val driverRunState = driver.runAndWait(ShellTransformation("ls -l > /dev/null"))
driverRunState shouldBe a[DriverRunSucceeded[_]]
}
it should "execute another shell transformations synchronously" in {
val driverRunState = driver.runAndWait(ShellTransformation("ls -ld > /dev/null"))
driverRunState shouldBe a[DriverRunSucceeded[_]]
}
it should "pass environment to the shell" in {
val file = File.createTempFile("_schedoscope", ".sh")
file.deleteOnExit()
val driverRunState = driver.runAndWait(
ShellTransformation("echo $testvar" + s">${file.getAbsolutePath()}")
.configureWith(Map("testvar" -> "foobar"))
.asInstanceOf[ShellTransformation])
Source.fromFile(file).getLines.next shouldBe "foobar"
driverRunState shouldBe a[DriverRunSucceeded[_]]
}
it should "execute shell transformations and return errors when running synchronously" in {
val driverRunState = driver.runAndWait(ShellTransformation("exit 1"))
driverRunState shouldBe a[DriverRunFailed[_]]
}
it should "call its DriverRunCompletitionHandlers' driverRunCompleted upon request" in {
val runHandle = driver.run(ShellTransformation("#"))
while (driver.getDriverRunState(runHandle).isInstanceOf[DriverRunOngoing[_]]) {}
driver.driverRunCompleted(runHandle)
driverRunCompletedCalled(runHandle, driver.getDriverRunState(runHandle)) shouldBe true
}
it should "call its DriverRunCompletitionHandlers' driverRunStarted upon request" in {
val runHandle = driver.run(ShellTransformation("#"))
driver.driverRunStarted(runHandle)
driverRunStartedCalled(runHandle) shouldBe true
}
}
|
utzwestermann/schedoscope
|
schedoscope-core/src/test/scala/org/schedoscope/scheduler/driver/ShellDriverTest.scala
|
Scala
|
apache-2.0
| 2,315
|
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package kafka.manager
import java.util.Properties
import java.util.concurrent.atomic.AtomicBoolean
import com.typesafe.config.{Config, ConfigFactory}
import kafka.manager.features.KMDeleteTopicFeature
import kafka.manager.model.{Kafka_0_8_1_1, ActorModel}
import kafka.manager.utils.CuratorAwareTest
import kafka.manager.model.ActorModel.{KafkaManagedConsumer, ZKManagedConsumer, TopicList}
import kafka.test.{NewKafkaManagedConsumer, SimpleProducer, HighLevelConsumer, SeededBroker}
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Try
/**
* @author hiral
*/
class TestKafkaManager extends CuratorAwareTest with BaseTest {
private[this] val seededTopic = "km-api-test"
private[this] val broker = new SeededBroker(seededTopic,4)
private[this] val kafkaServerZkPath = broker.getZookeeperConnectionString
private[this] val akkaConfig: Properties = new Properties()
akkaConfig.setProperty("pinned-dispatcher.type","PinnedDispatcher")
akkaConfig.setProperty("pinned-dispatcher.executor","thread-pool-executor")
akkaConfig.setProperty(KafkaManager.ZkHosts,testServer.getConnectString)
akkaConfig.setProperty(KafkaManager.BrokerViewUpdateSeconds,"1")
akkaConfig.setProperty(KafkaManager.KafkaManagerUpdateSeconds,"1")
akkaConfig.setProperty(KafkaManager.DeleteClusterUpdateSeconds,"1")
akkaConfig.setProperty(KafkaManager.ConsumerPropertiesFile,"conf/consumer.properties")
private[this] val config : Config = ConfigFactory.parseProperties(akkaConfig)
private[this] val kafkaManager : KafkaManager = new KafkaManager(config)
private[this] val duration = FiniteDuration(10,SECONDS)
private[this] val createTopicNameA = "km-unit-test-a"
private[this] val createTopicNameB = "km-unit-test-b"
private[this] val createLogkafkaLogkafkaId = "km-unit-test-logkafka-logkafka_id"
private[this] val createLogkafkaLogPath = "/km-unit-test-logkafka-logpath"
private[this] val createLogkafkaTopic = "km-unit-test-logkafka-topic"
private[this] var hlConsumer : Option[HighLevelConsumer] = None
private[this] var hlConsumerThread : Option[Thread] = None
private[this] val hlShutdown = new AtomicBoolean(false)
private[this] var newConsumer : Option[NewKafkaManagedConsumer] = None
private[this] var newConsumerThread : Option[Thread] = None
private[this] val newShutdown = new AtomicBoolean(false)
private[this] var simpleProducer : Option[SimpleProducer] = None
private[this] var simpleProducerThread : Option[Thread] = None
override protected def beforeAll() : Unit = {
super.beforeAll()
Thread.sleep(2000)
hlConsumer = Option(broker.getHighLevelConsumer)
hlConsumerThread = Option(new Thread() {
override def run(): Unit = {
while(!hlShutdown.get()) {
hlConsumer.map(_.read { ba =>
Option(ba).map(asString).foreach( s => println(s"hl consumer read message : $s"))
})
Thread.sleep(500)
}
}
})
hlConsumerThread.foreach(_.start())
newConsumer = Option(broker.getNewConsumer)
newConsumerThread = Option(new Thread() {
override def run(): Unit = {
while(!newShutdown.get()) {
newConsumer.map(_.read { message =>
Option(message).foreach( s => println(s"new consumer read message : $s"))
})
Thread.sleep(500)
}
}
})
newConsumerThread.foreach(_.start())
simpleProducer = Option(broker.getSimpleProducer)
simpleProducerThread = Option(new Thread() {
override def run(): Unit = {
var count = 0
while(!hlShutdown.get()) {
simpleProducer.foreach { p =>
p.send(s"simple message $count")
count+=1
Thread.sleep(500)
}
}
}
})
simpleProducerThread.foreach(_.start())
Thread.sleep(1000)
}
override protected def afterAll(): Unit = {
Try(newShutdown.set(true))
Try(hlShutdown.set(true))
Try(simpleProducerThread.foreach(_.interrupt()))
Try(hlConsumerThread.foreach(_.interrupt()))
Try(hlConsumer.foreach(_.close()))
Try(newConsumerThread.foreach(_.interrupt()))
Try(newConsumer.foreach(_.close()))
if(kafkaManager!=null) {
kafkaManager.shutdown()
}
Try(broker.shutdown())
super.afterAll()
}
private[this] def getTopicList() : TopicList = {
val future = kafkaManager.getTopicList("dev")
val result = Await.result(future,duration)
result.toOption.get
}
test("add cluster") {
val future = kafkaManager.addCluster("dev","0.8.2.0",kafkaServerZkPath, jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(kafkaManager.defaultTuning))
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("create topic") {
val futureA = kafkaManager.createTopic("dev",createTopicNameA,4,1)
val resultA = Await.result(futureA,duration)
val futureB = kafkaManager.createTopic("dev",createTopicNameB,4,1)
val resultB = Await.result(futureB,duration)
assert(resultA.isRight === true)
assert(resultB.isRight === true)
Thread.sleep(2000)
}
test("fail to create topic again") {
val future = kafkaManager.createTopic("dev",createTopicNameA,4,1)
val result = Await.result(future,duration)
assert(result.isLeft === true)
Thread.sleep(2000)
}
test("get topic list") {
val future = kafkaManager.getTopicList("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.get.list.nonEmpty === true)
}
test("query request for invalid cluster") {
val future = kafkaManager.getTopicList("blah")
val result = Await.result(future,duration)
assert(result.isLeft === true)
assert(result.swap.toOption.get.msg.contains("blah") === true)
}
test("get broker list") {
val future = kafkaManager.getBrokerList("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.nonEmpty === true)
}
test("get topic identity") {
val future = kafkaManager.getTopicList("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.get.list.nonEmpty === true)
result.toOption.get.list.foreach { topic =>
val future2 = kafkaManager.getTopicIdentity("dev",topic)
val result2 = Await.result(future2, duration)
assert(result2.isRight === true)
}
//seeded topic should have offsets
val future2 = kafkaManager.getTopicIdentity("dev",seededTopic)
val result2 = Await.result(future2, duration)
assert(result2.isRight === true)
assert(result2.toOption.get.summedTopicOffsets > 0)
}
test("get cluster list") {
val future = kafkaManager.getClusterList
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.get.active.nonEmpty === true)
}
test("get cluster view") {
val future = kafkaManager.getClusterView("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
}
test("get cluster config") {
val future = kafkaManager.getClusterConfig("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
}
test("get cluster context") {
val future = kafkaManager.getClusterContext("dev")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterFeatures.features(KMDeleteTopicFeature))
}
test("get consumer list passive mode") {
val future = kafkaManager.getConsumerListExtended("dev")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.list.map(_._1).contains((newConsumer.get.groupId, KafkaManagedConsumer)), s"Failed : ${result}")
assert(result.toOption.get.list.map(_._1).contains((hlConsumer.get.groupId, ZKManagedConsumer)), s"Failed : ${result}")
}
test("get consumer identity passive mode for old consumer") {
val future = kafkaManager.getConsumerIdentity("dev", hlConsumer.get.groupId, "ZK")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.topicMap.head._1 === seededTopic, s"Failed : ${result}")
}
test("get consumer identity passive mode for new consumer") {
val future = kafkaManager.getConsumerIdentity("dev", newConsumer.get.groupId, "KF")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.topicMap.head._1 === seededTopic, s"Failed : ${result}")
}
test("run preferred leader election") {
val topicList = getTopicList()
val future = kafkaManager.runPreferredLeaderElection("dev",topicList.list.toSet)
val result = Await.result(future,duration)
//TODO: this is a failure since there is nothing to do, need a better test
assert(result.isLeft === true)
Thread.sleep(2000)
}
test("get preferred leader election") {
val future = kafkaManager.getPreferredLeaderElection("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
println(result.toOption.get)
}
test("generate partition assignments") {
val topicList = getTopicList()
val future = kafkaManager.generatePartitionAssignments("dev",topicList.list.toSet,Set(0))
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("run reassign partitions") {
val topicList = getTopicList()
val future = kafkaManager.runReassignPartitions("dev",topicList.list.toSet)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("get reassign partitions") {
val future = kafkaManager.getReassignPartitions("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
}
test("add topic partitions") {
val tiFuture= kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiOrError = Await.result(tiFuture, duration)
assert(tiOrError.isRight, "Failed to get topic identity!")
val ti = tiOrError.toOption.get
val future = kafkaManager.addTopicPartitions("dev",createTopicNameA,Seq(0),ti.partitions + 1,ti.readVersion)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
//check new partition num
{
val tiFuture= kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiOrError = Await.result(tiFuture, duration)
assert(tiOrError.isRight, "Failed to get topic identity!")
val ti = tiOrError.toOption.get
assert(ti.partitions === 5)
}
}
test("add multiple topics partitions") {
val tiFutureA = kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiFutureB = kafkaManager.getTopicIdentity("dev",createTopicNameB)
val tiOrErrorA = Await.result(tiFutureA,duration)
val tiOrErrorB = Await.result(tiFutureB,duration)
assert(tiOrErrorA.isRight, "Failed to get topic identity for topic A!")
assert(tiOrErrorB.isRight, "Failed to get topic identity for topic B!")
val tiA = tiOrErrorA.toOption.get
val tiB = tiOrErrorB.toOption.get
val newPartitionNum = tiA.partitions + 1
val future = kafkaManager.addMultipleTopicsPartitions("dev",Seq(createTopicNameA, createTopicNameB),Set(0),newPartitionNum,Map(createTopicNameA->tiA.readVersion,createTopicNameB->tiB.readVersion))
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
{
val tiFutureA = kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiFutureB = kafkaManager.getTopicIdentity("dev",createTopicNameB)
val tiOrErrorA = Await.result(tiFutureA,duration)
val tiOrErrorB = Await.result(tiFutureB,duration)
assert(tiOrErrorA.isRight, "Failed to get topic identity for topic A!")
assert(tiOrErrorB.isRight, "Failed to get topic identity for topic B!")
val tiA = tiOrErrorA.toOption.get
val tiB = tiOrErrorB.toOption.get
assert(tiA.partitions === newPartitionNum)
assert(tiB.partitions === newPartitionNum)
}
}
test("update topic config") {
val tiFuture= kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiOrError = Await.result(tiFuture, duration)
assert(tiOrError.isRight, "Failed to get topic identity!")
val ti = tiOrError.toOption.get
val config = new Properties()
config.put(kafka.manager.utils.zero82.LogConfig.RententionMsProp,"1800000")
val configReadVersion = ti.configReadVersion
val future = kafkaManager.updateTopicConfig("dev",createTopicNameA,config,configReadVersion)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
//check new topic config
{
val tiFuture= kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiOrError = Await.result(tiFuture, duration)
assert(tiOrError.isRight, "Failed to get topic identity!")
val ti = tiOrError.toOption.get
assert(ti.configReadVersion > configReadVersion)
assert(ti.config.toMap.apply(kafka.manager.utils.zero82.LogConfig.RententionMsProp) === "1800000")
}
}
test("delete topic") {
val futureA = kafkaManager.deleteTopic("dev",createTopicNameA)
val resultA = Await.result(futureA,duration)
assert(resultA.isRight === true, resultA)
Thread.sleep(2000)
val futureA2 = kafkaManager.getTopicList("dev")
val resultA2 = Await.result(futureA2,duration)
assert(resultA2.isRight === true, resultA2)
assert(resultA2.toOption.get.deleteSet(createTopicNameA),"Topic not in delete set")
val futureB = kafkaManager.deleteTopic("dev",createTopicNameB)
val resultB = Await.result(futureB,duration)
assert(resultB.isRight === true, resultB)
Thread.sleep(2000)
val futureB2 = kafkaManager.getTopicList("dev")
val resultB2 = Await.result(futureB2,duration)
assert(resultB2.isRight === true, resultB2)
assert(resultB2.toOption.get.deleteSet(createTopicNameB),"Topic not in delete set")
}
test("fail to delete non-existent topic") {
val future = kafkaManager.deleteTopic("dev","delete_me")
val result = Await.result(future,duration)
assert(result.isLeft === true)
}
test("update cluster zkhost") {
val future = kafkaManager.updateCluster("dev","0.8.2.0",testServer.getConnectString, jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxSsl = false, jmxPass = None, tuning = Option(defaultTuning))
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert((result2.toOption.get.pending.nonEmpty === true) ||
(result2.toOption.get.active.find(c => c.name == "dev").get.curatorConfig.zkConnect === testServer.getConnectString))
Thread.sleep(2000)
}
test("disable cluster") {
val future = kafkaManager.disableCluster("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert((result2.toOption.get.pending.nonEmpty === true) ||
(result2.toOption.get.active.find(c => c.name == "dev").get.enabled === false))
Thread.sleep(2000)
}
test("enable cluster") {
val future = kafkaManager.enableCluster("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("update cluster version") {
val future = kafkaManager.updateCluster("dev","0.8.1.1",testServer.getConnectString, jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(defaultTuning))
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert((result2.toOption.get.pending.nonEmpty === true) ||
(result2.toOption.get.active.find(c => c.name == "dev").get.version === Kafka_0_8_1_1))
Thread.sleep(2000)
}
test("delete topic not supported prior to 0.8.2.0") {
val future = kafkaManager.deleteTopic("dev",createTopicNameA)
val result = Await.result(future,duration)
assert(result.isLeft === true, result)
assert(result.swap.toOption.get.msg.contains("not supported"))
Thread.sleep(2000)
}
test("update cluster logkafka enabled and activeOffsetCache enabled") {
val future = kafkaManager.updateCluster("dev","0.8.2.0",testServer.getConnectString, jmxEnabled = false, pollConsumers = true, filterConsumers = true, logkafkaEnabled = true, activeOffsetCacheEnabled = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(defaultTuning))
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert((result2.toOption.get.active.find(c => c.name == "dev").get.logkafkaEnabled === true) &&
(result2.toOption.get.active.find(c => c.name == "dev").get.activeOffsetCacheEnabled === true))
Thread.sleep(2000)
}
/*
test("get consumer list active mode") {
val future = kafkaManager.getConsumerListExtended("dev")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.list.head._1 === hlConsumer.get.groupId, s"Failed : ${result}")
}
test("get consumer identity active mode") {
val future = kafkaManager.getConsumerIdentity("dev", hlConsumer.get.groupId)
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.topicMap.head._1 === seededTopic, s"Failed : ${result}")
}*/
test("create logkafka") {
val config = new Properties()
config.put(kafka.manager.utils.logkafka82.LogConfig.TopicProp,createLogkafkaTopic)
val future = kafkaManager.createLogkafka("dev",createLogkafkaLogkafkaId,createLogkafkaLogPath,config)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("get logkafka identity") {
val future = kafkaManager.getLogkafkaLogkafkaIdList("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.get.list.nonEmpty === true)
result.toOption.get.list.foreach { logkafka_id =>
val future2 = kafkaManager.getLogkafkaIdentity("dev",logkafka_id)
val result2 = Await.result(future2, duration)
assert(result2.isRight === true)
}
}
test("update logkafka config") {
val liFuture= kafkaManager.getLogkafkaIdentity("dev",createLogkafkaLogkafkaId)
val liOrError = Await.result(liFuture, duration)
assert(liOrError.isRight, "Failed to get logkafka identity!")
val li = liOrError.toOption.get
val config = new Properties()
config.put(kafka.manager.utils.logkafka82.LogConfig.TopicProp,createLogkafkaTopic)
config.put(kafka.manager.utils.logkafka82.LogConfig.PartitionProp,"1")
val future = kafkaManager.updateLogkafkaConfig("dev",createLogkafkaLogkafkaId,createLogkafkaLogPath,config)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(1000)
//check new logkafka config
{
val liFuture= kafkaManager.getLogkafkaIdentity("dev",createLogkafkaLogkafkaId)
val liOrError = Await.result(liFuture, duration)
assert(liOrError.isRight, "Failed to get logkafka identity!")
val li = liOrError.toOption.get
assert(li.identityMap.get(createLogkafkaLogPath).get._1.get.apply(kafka.manager.utils.logkafka82.LogConfig.PartitionProp) === "1")
}
}
test("delete logkafka") {
val future = kafkaManager.deleteLogkafka("dev",createLogkafkaLogkafkaId,createLogkafkaLogPath)
val result = Await.result(future,duration)
assert(result.isRight === true, result)
Thread.sleep(2000)
val liFuture= kafkaManager.getLogkafkaIdentity("dev",createLogkafkaLogkafkaId)
val liOrError = Await.result(liFuture, duration)
assert(liOrError.isRight, "Failed to get logkafka identity!")
val li = liOrError.toOption.get
assert(li.identityMap.get(createLogkafkaLogPath) === None)
Thread.sleep(2000)
}
test("delete cluster") {
//first have to disable in order to delete
{
val future = kafkaManager.disableCluster("dev")
val result = Await.result(future, duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
val future = kafkaManager.deleteCluster("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert(result2.toOption.get.pending.isEmpty === true)
assert(result2.toOption.get.active.isEmpty === true)
}
}
|
herokumx/heroku-kafka-manager
|
test/kafka/manager/TestKafkaManager.scala
|
Scala
|
apache-2.0
| 22,307
|
package com.monkeygroover.frontend
import akka.actor.ActorSystem
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.server.{Directives, Route}
import akka.stream.Materializer
import com.monkeygroover.commands._
/**
* Created by monkeygroover on 09/10/15.
*/
abstract class RestRoutes(implicit system: ActorSystem, materializer: Materializer) extends Directives {
lazy val routes = addData ~ updateData ~ deleteData ~ getData ~ getHistory
val addData =
post {
path("customer" / Segment) { customerId =>
entity(as[PartialRecord]) { record =>
addOperation(customerId, record)
}
}
}
def addOperation(customerId: String, record: PartialRecord): Route
val getData =
get {
path("customer" / Segment) { customerId =>
getOperation(customerId)
}
}
def getOperation(customerId: String): Route
val getHistory =
get {
path("customer" / Segment / "history") { customerId =>
getHistoryOperation(customerId)
}
}
def getHistoryOperation(customerId: String): Route
val updateData =
patch {
path("customer" / Segment / Segment) { (customerId, uuid) =>
entity(as[UpdateRecord]) { update =>
updateOperation(customerId, uuid, update)
}
}
}
def updateOperation(customerId: String, uuid: String, update: UpdateRecord): Route
val deleteData =
post {
path("customer" / Segment / Segment) { (customerId, uuid) =>
deleteOperation(customerId, uuid)
}
}
def deleteOperation(customerId: String, uuid: String): Route
}
|
monkeygroover/cluster-sharding
|
rest/src/main/scala/com/monkeygroover/frontend/RestRoutes.scala
|
Scala
|
gpl-2.0
| 1,676
|
package models
case class Page[A](items: Seq[A], page: Int, size: Int, offset: Long, total: Long) {
lazy val prev = Option(page - 1).filter(_ >= 0)
lazy val next = Option(page + 1).filter(_ => (offset + items.size) < total)
def isEmpty: Boolean = items.isEmpty
def map[B](f: (A) => B): Page[B] = this.copy(items.map(f))
}
|
studiocredo/ticket-reservation
|
app/models/Page.scala
|
Scala
|
apache-2.0
| 333
|
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.repository.ldap
import com.normation.rudder.domain.nodes._
import com.normation.inventory.domain.NodeId
import com.normation.rudder.domain.queries.Query
import net.liftweb.common._
import com.normation.eventlog.EventActor
import com.normation.utils.HashcodeCaching
import scala.collection.SortedMap
import com.normation.rudder.services.marshalling.DirectiveUnserialisation
import com.normation.rudder.services.marshalling.ActiveTechniqueUnserialisation
import com.normation.rudder.services.marshalling.ActiveTechniqueCategoryUnserialisation
import com.normation.rudder.repository.ParseActiveTechniqueLibrary
import com.normation.utils.XmlUtils
import com.normation.rudder.repository.NodeGroupCategoryContent
import java.io.File
import java.io.FileInputStream
import com.normation.utils.UuidRegex
import com.normation.utils.Control.sequence
import com.normation.rudder.repository.ImportTechniqueLibrary
import com.normation.rudder.domain.policies.ActiveTechniqueId
import com.normation.rudder.domain.policies.DirectiveId
import com.unboundid.ldap.sdk.RDN
import com.normation.rudder.domain.RudderDit
import com.normation.ldap.sdk.LDAPConnectionProvider
import com.normation.utils.ScalaReadWriteLock
import com.unboundid.ldap.sdk.DN
import com.normation.rudder.domain.policies.ActiveTechniqueCategoryId
import com.normation.cfclerk.domain.TechniqueName
import com.normation.ldap.sdk.RwLDAPConnection
import org.joda.time.DateTime
import org.joda.time.format.ISODateTimeFormat
import com.normation.rudder.domain.RudderLDAPConstants._
import com.normation.inventory.ldap.core.LDAPConstants.A_OC
import com.normation.ldap.sdk.GeneralizedTime
import com.normation.rudder.repository.NodeGroupCategoryContent
import com.normation.rudder.repository.ImportGroupLibrary
import com.normation.rudder.repository.NodeGroupLibraryArchiveId
import com.normation.rudder.domain.policies.GroupTarget
trait LDAPImportLibraryUtil extends Loggable {
//move user lib to archive branch
def moveToArchive(connection:RwLDAPConnection, sourceLibraryDN:DN, targetArchiveDN:DN) : Box[Unit] = {
for {
ok <- connection.move(sourceLibraryDN, targetArchiveDN.getParent, Some(targetArchiveDN.getRDN)) ?~! "Error when arching current Library with DN '%s' to LDAP".format(targetArchiveDN)
} yield {
{}
}
}
//copy back system categories/groups if includeSystem is FALSE
def copyBackSystemEntrie(con:RwLDAPConnection, sourceLibraryDN:DN, targetArchiveDN:DN) : Box[Unit] = {
//the only hard part could be for system group in non system categories, because
//we may miss a parent. But it should not be allowed, so we consider such cases
//as errors
import com.normation.ldap.sdk._
import com.normation.ldap.sdk.BuildFilter.EQ
//a method that change the root of a dn from archive to user lib root
def setUserLibRoot(dn:DN) : Option[DN] = {
def recParent(x:DN) : Option[List[RDN]] = {
if(null == x) None
else if(x == targetArchiveDN) Some(Nil)
else recParent(x.getParent).map( x.getRDN :: _ )
}
def recBuildDN(root:DN, rdns:List[RDN]) : DN = rdns match {
case Nil => root
case h::t => recBuildDN(new DN(h,root),t)
}
val relatives = recParent(dn)
relatives.map( rdns => recBuildDN(sourceLibraryDN, rdns.reverse))
}
val entries = con.searchSub(targetArchiveDN, EQ(A_IS_SYSTEM,true.toLDAPString))
val allDNs = entries.map( _.dn ).toSet
//update DN to UserLib DN, remove root entry and entries without parent in that set
val updatedDNEntries = (entries.collect {
case entry if(entry.dn == targetArchiveDN) =>
logger.trace("Skipping root entry, already taken into account")
None
case entry if(allDNs.exists( _ == entry.dn.getParent)) =>
//change the DN to user lib
setUserLibRoot(entry.dn) match {
case None =>
logger.error("Ignoring entry with DN '%s' because it does not belong to archive '%s'".format(entry.dn, targetArchiveDN))
None
case Some(dn) =>
Some(LDAPEntry(dn, entry.attributes))
}
case entry =>
logger.error("Error when trying to save entry '%s' marked as system: its parent is not available, perhaps it is not marked as system?".format(entry.dn))
None
}).flatten
//actually save system entries in User Lib
(sequence(updatedDNEntries.sortWith( (x,y) => DN.compare(x.dn.toString, y.dn.toString) < 0)) {
entry => con.save(entry) ?~! "Error when copying back system entry '%s' from archive '%s'".format(entry.dn, targetArchiveDN)
}).map { x => ; /*unit*/}
}
//restore in case of error
def restoreArchive(con:RwLDAPConnection, sourceLibraryDN:DN, targetArchiveDN:DN) : Box[Unit] = {
for {
delete <- if(con.exists(sourceLibraryDN)) {
con.delete(sourceLibraryDN)
} else Full("ok")
movedBack <- con.move(targetArchiveDN, sourceLibraryDN.getParent, Some(sourceLibraryDN.getRDN))
} yield {
() // unit is expected
}
}
}
class ImportGroupLibraryImpl(
rudderDit : RudderDit
, ldap : LDAPConnectionProvider[RwLDAPConnection]
, mapper : LDAPEntityMapper
, groupLibMutex: ScalaReadWriteLock //that's a scala-level mutex to have some kind of consistency with LDAP
) extends ImportGroupLibrary with LDAPImportLibraryUtil {
/**
* That method swap an existing active technique library in LDAP
* to a new one.
*
* In case of error, we try to restore the old technique library.
*/
def swapGroupLibrary(rootCategory:NodeGroupCategoryContent, includeSystem:Boolean = false) : Box[Unit] = {
/*
* Hight level behaviour:
* - check that Group Library respects global rules
* no two categories or group with the same name, etc)
* If not, remove duplicates with error logs (because perhaps they are no duplicate,
* user will want to know)
* - move current group lib elsewhere in the LDAP
* (with the root and the system)
* - create back the root
* - copy back system if kept
* - save all categories, groups
* - if everything goes well, delete the old group library
* - else, rollback: delete new group lib, move back old group lib
*
*/
//as far atomic as we can :)
//don't bother with system and consistency here, it is taken into account elsewhere
def atomicSwap(userLib:NodeGroupCategoryContent) : Box[NodeGroupLibraryArchiveId] = {
//save the new one
//we need to keep the git commit id
def saveUserLib(con:RwLDAPConnection, userLib:NodeGroupCategoryContent) : Box[Unit] = {
def recSaveUserLib(parentDN:DN, content:NodeGroupCategoryContent) : Box[Unit] = {
//start with the category
//then with technique/directive for that category
//then recurse on sub-categories
val categoryEntry = mapper.nodeGroupCategory2ldap(content.category, parentDN)
for {
category <- con.save(categoryEntry) ?~! "Error when persisting category with DN '%s' in LDAP".format(categoryEntry.dn)
groups <- sequence(content.groups.toSeq) { nodeGroup =>
val nodeGroupEntry = rudderDit.GROUP.groupModel(
nodeGroup.id.value,
categoryEntry.dn,
nodeGroup.name,
nodeGroup.description,
nodeGroup.query,
nodeGroup.isDynamic,
nodeGroup.serverList,
nodeGroup.isEnabled,
nodeGroup.isSystem
)
con.save(nodeGroupEntry,true) ?~! "Error when persisting group entry with DN '%s' in LDAP".format(nodeGroupEntry.dn)
}
subCategories <- sequence(content.categories.toSeq) { cat =>
recSaveUserLib(categoryEntry.dn, cat)
}
} yield {
() // unit is expected
}
}
recSaveUserLib(rudderDit.GROUP.dn.getParent,userLib)
}
val archiveId = NodeGroupLibraryArchiveId(DateTime.now().toString(ISODateTimeFormat.dateTime))
val targetArchiveDN = rudderDit.ARCHIVES.groupLibDN(archiveId)
//the sequence of operation to actually perform the swap with rollback
for {
con <- ldap
archived <- moveToArchive(con, rudderDit.GROUP.dn, targetArchiveDN)
finished <- {
(for {
saved <- saveUserLib(con, userLib)
system <-if(includeSystem) Full("OK")
else copyBackSystemEntrie(con, rudderDit.GROUP.dn, targetArchiveDN) ?~! "Error when copying back system entries in the imported library"
} yield {
system
}) match {
case Full(unit) => Full(unit)
case eb:EmptyBox =>
logger.error("Error when trying to load archived active technique library. Rollbaching to previous one.")
restoreArchive(con, rudderDit.GROUP.dn, targetArchiveDN) match {
case eb2: EmptyBox => eb ?~! "Error when trying to restore archive with ID '%s' for the active technique library".format(archiveId.value)
case Full(_) => eb ?~! "Error when trying to load archived active technique library. A rollback to previous state was executed"
}
}
}
} yield {
archiveId
}
}
/**
* Check that the user lib match our global rules:
* - two NodeGroup can't referenced the same PT (arbitrary skip the second one)
* - two categories can not have the same name (arbitrary skip the second one)
* - all ids must be uniques
* + remove system library if we don't want them
*/
def checkUserLibConsistance(userLib:NodeGroupCategoryContent) : Box[NodeGroupCategoryContent] = {
import scala.collection.mutable.{Set,Map}
val nodeGroupIds = Set[NodeGroupId]()
val nodeGroupNames = Map[String, NodeGroupId]()
val categoryIds = Set[NodeGroupCategoryId]()
val categoryNames = Map[String, NodeGroupCategoryId]()
def sanitizeNodeGroup(nodeGroup:NodeGroup) : Option[NodeGroup] = {
if(nodeGroup.isSystem && includeSystem == false) None
else if(nodeGroupIds.contains(nodeGroup.id)) {
logger.error("Ignoring Active Technique because is ID was already processed: " + nodeGroup)
None
} else nodeGroupNames.get(nodeGroup.name) match {
case Some(id) =>
logger.error("Ignoring Active Technique with ID '%s' because it references technique with name '%s' already referenced by active technique with ID '%s'".format(
nodeGroup.id.value, nodeGroup.name, id.value
))
None
case None =>
Some(nodeGroup)
}
}
def recSanitizeCategory(content:NodeGroupCategoryContent, isRoot:Boolean = false) : Option[NodeGroupCategoryContent] = {
val cat = content.category
if( !isRoot && content.category.isSystem && includeSystem == false) None
else if(categoryIds.contains(cat.id)) {
logger.error("Ignoring Active Technique Category because its ID was already processed: " + cat)
None
} else if(cat.name == null || cat.name.size < 1) {
logger.error("Ignoring Active Technique Category because its name is empty: " + cat)
None
} else categoryNames.get(cat.name) match { //name is mandatory
case Some(id) =>
logger.error("Ignoring Active Technique Categor with ID '%s' because its name is '%s' already referenced by category with ID '%s'".format(
cat.id.value, cat.name, id.value
))
None
case None => //OK, process PT and sub categories !
categoryIds += cat.id
categoryNames += (cat.name -> cat.id)
val subCategories = content.categories.flatMap( recSanitizeCategory(_) ).toSet
val subNodeGroups = content.groups.flatMap( sanitizeNodeGroup(_) ).toSet
//remove from sub cat groups that where not correct
val directiveTargetInfos = cat.items.filter { info =>
info.target match {
case GroupTarget(id) => subNodeGroups.exists(g => g.id == id )
case x => true
}
}
Some(content.copy(
category = cat.copy(
children = subCategories.toList.map( _.category.id )
, items = directiveTargetInfos
)
, categories = subCategories
, groups = subNodeGroups
))
}
}
Box(recSanitizeCategory(userLib, true)) ?~! "Error when trying to sanitize serialised user library for consistency errors"
}
//all the logic for a library swap.
for {
cleanLib <- checkUserLibConsistance(rootCategory)
moved <- groupLibMutex.writeLock { atomicSwap(cleanLib) } ?~! "Error when swapping serialised library and existing one in LDAP"
} yield {
//delete archive - not a real error if fails
val dn = rudderDit.ARCHIVES.groupLibDN(moved)
(for {
con <- ldap
deleted <- con.delete(dn)
} yield {
deleted
}) match {
case eb:EmptyBox =>
logger.warn("Error when deleting archived library in LDAP with DN '%s'".format(dn))
case _ => //
}
() // unit is expected
}
}
}
|
jooooooon/rudder
|
rudder-core/src/main/scala/com/normation/rudder/repository/ldap/LDAPSwapGroupLibrary.scala
|
Scala
|
agpl-3.0
| 16,134
|
/*
* Copyright (c) 2013 Habla Computing
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.hablapps.updatable
import scala.language.higherKinds
import scala.reflect.{ ClassTag, classTag }
import scala.reflect.runtime.universe
import universe.{ typeOf, TypeTag, WeakTypeTag }
trait WithElemClass[C[_]] {
def classOfElements(i: C[_]): Class[_]
}
class FilterConsWithClass[C1[_]: Modifiable, C2[_]: WithElemClass, V](c: C1[C2[V]]) {
def filterSubtypesWithElemClass[U <: V: ClassTag] =
(imodifiable.filter(c) {
classTag[U].runtimeClass isAssignableFrom classOfElements(_)
}).asInstanceOf[C1[C2[U]]]
}
|
hablapps/updatable
|
src/main/scala/org/hablapps/updatable/filter.scala
|
Scala
|
apache-2.0
| 1,143
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.io.File
import java.util.UUID
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkConf
import org.apache.spark.sql.streaming.StreamTest
class StreamMetadataSuite extends StreamTest {
override def sparkConf: SparkConf =
super.sparkConf
.setAppName("test")
.set("spark.sql.parquet.columnarReaderBatchSize", "4096")
.set("spark.sql.sources.useV1SourceList", "avro")
.set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin")
.set("spark.sql.execution.arrow.maxRecordsPerBatch", "4096")
//.set("spark.shuffle.manager", "org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "50m")
.set("spark.sql.join.preferSortMergeJoin", "false")
.set("spark.sql.columnar.codegen.hashAggregate", "false")
.set("spark.oap.sql.columnar.wholestagecodegen", "false")
.set("spark.sql.columnar.window", "false")
.set("spark.unsafe.exceptionOnMemoryLeak", "false")
//.set("spark.sql.columnar.tmp_dir", "/codegen/nativesql/")
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
test("writing and reading") {
withTempDir { dir =>
val id = UUID.randomUUID.toString
val metadata = StreamMetadata(id)
val file = new Path(new File(dir, "test").toString)
StreamMetadata.write(metadata, file, hadoopConf)
val readMetadata = StreamMetadata.read(file, hadoopConf)
assert(readMetadata.nonEmpty)
assert(readMetadata.get.id === id)
}
}
ignore("read Spark 2.1.0 format") {
// query-metadata-logs-version-2.1.0.txt has the execution metadata generated by Spark 2.1.0
assert(
readForResource("query-metadata-logs-version-2.1.0.txt") ===
StreamMetadata("d366a8bf-db79-42ca-b5a4-d9ca0a11d63e"))
}
private def readForResource(fileName: String): StreamMetadata = {
val input = getClass.getResource(s"/structured-streaming/$fileName")
StreamMetadata.read(new Path(input.toString), hadoopConf).get
}
private val hadoopConf = new Configuration()
}
|
Intel-bigdata/OAP
|
oap-native-sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/StreamMetadataSuite.scala
|
Scala
|
apache-2.0
| 3,057
|
package nak
import org.scalatest.FunSpec
import nak.data.FeatureObservation
/**
* Make sure NakContext functions work as expected.
*/
class NakContextSpec extends FunSpec {
import NakContext._
describe("feature observation condensing") {
it ("should merge counts and sort") {
val orig =
Seq(FeatureObservation(1,1.0),
FeatureObservation(0,1.0),
FeatureObservation(1,2.0))
val goal =
Seq(FeatureObservation(0,1.0),
FeatureObservation(1,3.0))
assert(goal === condense(orig))
}
}
}
|
scalanlp/nak
|
src/test/scala/nak/NakContextTest.scala
|
Scala
|
apache-2.0
| 579
|
/*
* Copyright (c) 2011-13 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless.examples
/**
* Sized collection examples.
*
* @author Miles Sabin
*/
object SizedExamples extends App {
import shapeless._
import syntax.sized._
def sequence[T](lo : List[Option[T]]) = if (lo.exists(_.isEmpty)) None else Some(lo.map(_.get))
def row(cols : Seq[String]) = cols.mkString("\\"", "\\", \\"", "\\"")
def csv[N <: Nat](hdrs : Sized[Seq[String], N], rows : List[Sized[Seq[String], N]]) =
row(hdrs) :: rows.map(row(_))
def fullyStatic: Unit = {
val hdrs = Sized("Title", "Author") // Sized[IndexedSeq[String], _2]
val rows = List( // List[Sized[IndexedSeq[String], _2]]
Sized("Types and Programming Languages", "Benjamin Pierce"),
Sized("The Implementation of Functional Programming Languages", "Simon Peyton-Jones")
)
// hdrs and rows statically known to have the same number of columns
val formatted = csv(hdrs, rows)
formatted foreach println // Compiles
println
// extendedHdrs has the wrong number of columns for rows
val extendedHdrs = Sized("Title", "Author", "ISBN") // Sized[IndexedSeq[Int], _3]
//val badFormatted = csv(threeHdrs, rows) // Does not compile
// Extend the rows to match ...
val extendedRows = rows map (_ :+ "-") // List[Sized[IndexedSeq[String], _3]]
val extendedFormatted = csv(extendedHdrs, extendedRows) // Compiles
extendedFormatted foreach println
}
def mixedDynamicStatic: Unit = {
val hdrs = List("Title", "Author")
val rows = List(
List("Types and Programming Languages", "Benjamin Pierce"),
List("The Implementation of Functional Programming Languages", "Simon Peyton-Jones")
)
for {
shdrs <- hdrs.sized(2)
srows <- sequence(rows map (_.sized(2)))
} {
// If we get here then our lists are statically know to be
// of the appropriate sizes
val formatted = csv(shdrs, srows)
formatted foreach println
}
println
// extendedHdrs has the wrong number of columns for rows
val extendedHdrs = List("Title", "Author", "ISBN")
for {
shdrs <- extendedHdrs.sized(2) // This will be empty ...
srows <- sequence(rows map (_.sized(2)))
} {
// ... hence, not reached
val formatted = csv(shdrs, srows)
formatted foreach println
}
// Extend the rows to match ...
val extendedRows = rows map (_ :+ "-")
for {
shdrs <- extendedHdrs.sized(3)
srows <- sequence(extendedRows map (_.sized(3)))
} {
// ... reached this time
val formatted = csv(shdrs, srows)
formatted foreach println
}
}
println("Fully static: ")
fullyStatic
println
println("Mixed dynamic/static")
mixedDynamicStatic
}
|
lambdista/shapeless
|
examples/src/main/scala/shapeless/examples/sized.scala
|
Scala
|
apache-2.0
| 3,498
|
package scoobie.doobie
import _root_.doobie.imports._
import org.specs2._
import scoobie.doobie.mysql._
import scoobie.dsl.schemaless.ansi.sql
import scoobie.dsl.schemaless.ansi.sql._
import scalaz.NonEmptyList
import scalaz.concurrent.Task
/**
* Created by jbarber on 5/14/16.
*/
class MySqlTest extends Specification {
implicit val logger = scoobie.doobie.log.verboseTestLogger
val xa: Transactor[Task] = DriverManagerTransactor[Task](
"com.mysql.cj.jdbc.Driver", "jdbc:mysql://localhost/world?serverTimezone=America/Chicago", "root", ""
)
def is =
s2"""
Building queries should work properly
semi-complex select $semiComplexSelectResult
where in select $selectWhereInResult
record life-cycle ${endToEndTest.transact(xa).unsafePerformSync}
"""
case class Country(name: String, gnp: Int, code: String)
lazy val semiComplexSelect =
(
select(
p"c1.name",
(p"c1.gnp" + 5) as "c1gnp",
p"c1.code",
p"c2.name",
p"c2.gnp",
p"c2.code"
) from (
p"country" as "c1"
) innerJoin (
p"country" as "c2" on (
p"c2.code" === func"reverse" (p"c1.code")
)
) where (
sql.not(p"c2.code" === "USA") and
p"c1.lifeexpectancy" > 50
)
).build
.query[(Country, Country)]
.list
.transact(xa)
.unsafePerformSync
lazy val selectWhereIn = {
val codes = NonEmptyList("TUV", "YUG")
(
select(p"name") from p"country" where (p"code" in ("USA", "BRA", codes))
).build
.query[String]
.list
.transact(xa)
.unsafePerformSync
}
lazy val endToEndTest = {
for {
inserted <- (insertInto(p"city") values(
p"id" ==> 4080,
p"name" ==> "test",
p"countrycode" ==> "SHT",
p"district" ==> "District of unlawful testing",
p"population" ==> 1
)).update.run
select1 <- (select(p"district") from p"city" where (p"id" === 4080))
.build
.query[String]
.option
updated <- (update(p"city") set (p"population" ==> 10) where (p"id" === 4080))
.build
.update
.run
select2 <- (select(p"population") from p"city" where (p"id" === 4080))
.build
.query[Int]
.option
deleted <- (deleteFrom(p"city") where (p"id" === 4080))
.update
.run
select3 <- (select(p"population") from p"city" where (p"id" === 4080))
.build
.query[Int]
.option
} yield {
inserted must beEqualTo(1)
select1 must beEqualTo(Some("District of unlawful testing"))
updated must beEqualTo(1)
select2 must beEqualTo(Some(10))
deleted must beEqualTo(1)
select3 must beEqualTo(None)
}
}
// id integer NO
// name varchar
// countrycode c
// district varc
// population in
def semiComplexSelectResult = {
semiComplexSelect must haveSize(10)
}
def selectWhereInResult = {
selectWhereIn must haveSize(4)
}
}
|
Jacoby6000/Scala-SQL-AST
|
doobie/mysql/src/it/scala/scoobie/doobie/MySqlTest.scala
|
Scala
|
mit
| 3,437
|
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.iterators
import java.text.DecimalFormat
import com.google.common.collect.HashBasedTable
import com.typesafe.scalalogging.LazyLogging
import com.vividsolutions.jts.geom.Envelope
import org.geotools.data._
import org.geotools.data.simple.SimpleFeatureIterator
import org.geotools.filter.text.ecql.ECQL
import org.geotools.filter.visitor.ExtractBoundsFilterVisitor
import org.geotools.geometry.jts.ReferencedEnvelope
import org.geotools.referencing.crs.DefaultGeographicCRS
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.data.{AccumuloDataStore, AccumuloFeatureStore}
import org.locationtech.geomesa.accumulo.index.QueryHints
import org.locationtech.geomesa.utils.geotools.Conversions.RichSimpleFeature
import org.locationtech.geomesa.utils.geotools.GridSnap
import org.opengis.feature.simple.SimpleFeature
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class LiveDensityIteratorTest extends Specification with LazyLogging {
sequential
/**
* WARNING: this test runs against a live accumulo instance
*/
val params = Map(
"instanceId" -> "mycloud",
"zookeepers" -> "zoo1,zoo2,zoo3",
"user" -> "myuser",
"password" -> "mypass",
"auths" -> "user,admin",
"visibilities" -> "",
"tableName" -> "mytable",
"featureEncoding" -> "avro")
val sftName = "fr"
val bbox = new { val lowx = -78.57; val lowy = 40.96; val highx = -77.23; val highy = 42.29 }
val dates = "'2013-01-06T00:00:00.000Z' and '2013-01-09T00:00:00.000Z'"
val size = new { val width = 300; val height = 150 }
var snap: GridSnap = null
val map = HashBasedTable.create[Double, Double, Long]()
def getDataStore: AccumuloDataStore = {
DataStoreFinder.getDataStore(params).asInstanceOf[AccumuloDataStore]
}
def printFeatures(featureIterator: SimpleFeatureIterator): Unit = {
val features = new Iterator[SimpleFeature] {
def hasNext = {
val next = featureIterator.hasNext
if (!next)
featureIterator.close
next
}
def next = {
featureIterator.next
}
}.toList
logger.debug("dates: {}", dates)
logger.debug(s"total points: ${features.size}")
logger.debug(s"unique points: ${features.groupBy(_.getDefaultGeometry).size}")
val weights = features.map(_.getProperty("weight").getValue.toString.toDouble)
logger.debug(s"total weight: ${weights.sum}")
logger.debug(s"max weight: ${weights.max}")
features.foreach {
f =>
val point = f.point
map.put(point.getY, point.getX, map.get(point.getY, point.getX) + f.getProperty("weight").getValue.toString.toDouble.toLong)
}
logger.debug(s"max joined weight: ${map.values().max}")
val output = new StringBuilder()
val df = new DecimalFormat("0")
map.rowMap().foreach {
case (rowIdx, cols) =>
cols.foreach {
case (colIdx, v) =>
if (v == 0) {
output.append(" ")
} else {
output.append(df.format(v))
}
}
output.append("\\n")
}
logger.trace(output.toString())
}
def getQuery(query: String, width: Int, height: Int): Query = {
val q = new Query(sftName, ECQL.toFilter(query))
val geom = q.getFilter.accept(ExtractBoundsFilterVisitor.BOUNDS_VISITOR, null).asInstanceOf[Envelope]
val env = new ReferencedEnvelope(geom, DefaultGeographicCRS.WGS84)
q.getHints.put(QueryHints.DENSITY_BBOX_KEY, env)
q.getHints.put(QueryHints.WIDTH_KEY, width)
q.getHints.put(QueryHints.HEIGHT_KEY, height)
// re-create the snap and populate each point
snap = new GridSnap(env, width, height)
var i = 0
while(i < width) {
var j = 0
while(j < height) {
map.put(snap.y(j), snap.x(i), 0)
j = j + 1
}
i = i + 1
}
q
}
"AccumuloDataStore" should {
"connect to accumulo" in {
skipped("Meant for integration testing")
val ds = getDataStore
val query = getQuery(s"(dtg between $dates) and BBOX(geom, ${bbox.lowx}, ${bbox.lowy}, ${bbox.highx}, ${bbox.highy})", size.width, size.height)
// get the feature store used to query the GeoMesa data
val featureStore = ds.getFeatureSource(sftName).asInstanceOf[AccumuloFeatureStore]
// execute the query
val results = featureStore.getFeatures(query)
// loop through all results
printFeatures(results.features)
success
}
}
}
|
vpipkt/geomesa
|
geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/iterators/LiveDensityIteratorTest.scala
|
Scala
|
apache-2.0
| 5,137
|
package water.sparkling
import water.sparkling.demo.RDDFrameExtractor
import org.apache.spark.rdd.RDD
import water.fvec.Vec.VectorGroup
import water.fvec.{NewChunk, AppendableVec,Frame}
import water.{H2O, DRemoteTask}
import org.apache.spark.Partition
import scala.reflect.runtime.universe.TypeTag
/** A frame extractor which goes around H2O cloud and force
* each node to load data from a specified part of RDD.
*
* NOTE: this does not work since RDD cannot be shared among multiple processes
*/
object DistributedFrameExtractor extends RDDFrameExtractor {
def apply[S <: Product : TypeTag](rdd: RDD[org.apache.spark.sql.Row]): Frame = {
val sc = rdd.context
val vg = new VectorGroup()
val vec = new AppendableVec(vg.addVec())
val result = sc.runJob(rdd, (partIt:Iterator[org.apache.spark.sql.Row]) => {
val a = new NewChunk(vec, 1)
a.close(null)
a
})
//new PartitionExtractor(rdd).invokeOnAllNodes()
result.foreach(println)
null
}
/**
* Distributed task to extract a partition data into frame.
*/
class PartitionExtractor(val rdd: RDD[org.apache.spark.sql.Row]) extends DRemoteTask[PartitionExtractor] {
def lcompute():Unit = {
// Connect to Spark cloud
val sc = rdd.context
println(sc)
//sc.get
// Get create RDD and query for its partition data assigned for this node
tryComplete()
}
private def isMyPartition(p: Partition):Boolean = (p.index % H2O.CLOUD.size() == H2O.SELF.index())
def reduce(drt: PartitionExtractor):Unit = {}
}
override def name: String = "distributed"
}
|
h2oai/h2o-sparkling
|
src/main/scala/water/sparkling/DistributedFrameExtractor.scala
|
Scala
|
apache-2.0
| 1,668
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.lang.{Long => JLong}
import java.net.InetAddress
import java.util
import java.util.Collections
import kafka.api.{ApiVersion, KAFKA_0_10_2_IV0}
import kafka.cluster.Replica
import kafka.controller.KafkaController
import kafka.coordinator.group.GroupCoordinator
import kafka.coordinator.transaction.TransactionCoordinator
import kafka.log.{Log, TimestampOffset}
import kafka.network.RequestChannel
import kafka.network.RequestChannel.SendResponse
import kafka.security.auth.Authorizer
import kafka.server.QuotaFactory.QuotaManagers
import kafka.utils.{MockTime, TestUtils}
import kafka.zk.KafkaZkClient
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.UnsupportedVersionException
import org.apache.kafka.common.memory.MemoryPool
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record.RecordBatch
import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse
import org.apache.kafka.common.requests.UpdateMetadataRequest.{Broker, EndPoint}
import org.apache.kafka.common.requests.WriteTxnMarkersRequest.TxnMarkerEntry
import org.apache.kafka.common.requests._
import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol}
import org.apache.kafka.common.utils.Utils
import org.easymock.{Capture, EasyMock, IAnswer}
import org.junit.Assert.{assertEquals, assertTrue}
import org.junit.{After, Test}
import scala.collection.JavaConverters._
import scala.collection.Map
class KafkaApisTest {
private val requestChannel = EasyMock.createNiceMock(classOf[RequestChannel])
private val requestChannelMetrics = EasyMock.createNiceMock(classOf[RequestChannel.Metrics])
private val replicaManager = EasyMock.createNiceMock(classOf[ReplicaManager])
private val groupCoordinator = EasyMock.createNiceMock(classOf[GroupCoordinator])
private val adminManager = EasyMock.createNiceMock(classOf[AdminManager])
private val txnCoordinator = EasyMock.createNiceMock(classOf[TransactionCoordinator])
private val controller = EasyMock.createNiceMock(classOf[KafkaController])
private val zkClient = EasyMock.createNiceMock(classOf[KafkaZkClient])
private val metrics = new Metrics()
private val brokerId = 1
private val metadataCache = new MetadataCache(brokerId)
private val authorizer: Option[Authorizer] = None
private val clientQuotaManager = EasyMock.createNiceMock(classOf[ClientQuotaManager])
private val clientRequestQuotaManager = EasyMock.createNiceMock(classOf[ClientRequestQuotaManager])
private val replicaQuotaManager = EasyMock.createNiceMock(classOf[ReplicationQuotaManager])
private val quotas = QuotaManagers(clientQuotaManager, clientQuotaManager, clientRequestQuotaManager,
replicaQuotaManager, replicaQuotaManager, replicaQuotaManager, None)
private val fetchManager = EasyMock.createNiceMock(classOf[FetchManager])
private val brokerTopicStats = new BrokerTopicStats
private val clusterId = "clusterId"
private val time = new MockTime
@After
def tearDown() {
quotas.shutdown()
metrics.close()
}
def createKafkaApis(interBrokerProtocolVersion: ApiVersion = ApiVersion.latestVersion): KafkaApis = {
val properties = TestUtils.createBrokerConfig(brokerId, "zk")
properties.put(KafkaConfig.InterBrokerProtocolVersionProp, interBrokerProtocolVersion.toString)
properties.put(KafkaConfig.LogMessageFormatVersionProp, interBrokerProtocolVersion.toString)
new KafkaApis(requestChannel,
replicaManager,
adminManager,
groupCoordinator,
txnCoordinator,
controller,
zkClient,
brokerId,
new KafkaConfig(properties),
metadataCache,
metrics,
authorizer,
quotas,
fetchManager,
brokerTopicStats,
clusterId,
time,
null
)
}
@Test
def testOffsetCommitWithInvalidPartition(): Unit = {
val topic = "topic"
setupBasicMetadataCache(topic, numPartitions = 1)
def checkInvalidPartition(invalidPartitionId: Int): Unit = {
EasyMock.reset(replicaManager, clientRequestQuotaManager, requestChannel)
val invalidTopicPartition = new TopicPartition(topic, invalidPartitionId)
val partitionOffsetCommitData = new OffsetCommitRequest.PartitionData(15L, "")
val (offsetCommitRequest, request) = buildRequest(new OffsetCommitRequest.Builder("groupId",
Map(invalidTopicPartition -> partitionOffsetCommitData).asJava))
val capturedResponse = expectNoThrottling()
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel)
createKafkaApis().handleOffsetCommitRequest(request)
val response = readResponse(ApiKeys.OFFSET_COMMIT, offsetCommitRequest, capturedResponse)
.asInstanceOf[OffsetCommitResponse]
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response.responseData().get(invalidTopicPartition))
}
checkInvalidPartition(-1)
checkInvalidPartition(1) // topic has only one partition
}
@Test
def testTxnOffsetCommitWithInvalidPartition(): Unit = {
val topic = "topic"
setupBasicMetadataCache(topic, numPartitions = 1)
def checkInvalidPartition(invalidPartitionId: Int): Unit = {
EasyMock.reset(replicaManager, clientRequestQuotaManager, requestChannel)
val invalidTopicPartition = new TopicPartition(topic, invalidPartitionId)
val partitionOffsetCommitData = new TxnOffsetCommitRequest.CommittedOffset(15L, "")
val (offsetCommitRequest, request) = buildRequest(new TxnOffsetCommitRequest.Builder("txnlId", "groupId",
15L, 0.toShort, Map(invalidTopicPartition -> partitionOffsetCommitData).asJava))
val capturedResponse = expectNoThrottling()
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel)
createKafkaApis().handleTxnOffsetCommitRequest(request)
val response = readResponse(ApiKeys.TXN_OFFSET_COMMIT, offsetCommitRequest, capturedResponse)
.asInstanceOf[TxnOffsetCommitResponse]
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response.errors().get(invalidTopicPartition))
}
checkInvalidPartition(-1)
checkInvalidPartition(1) // topic has only one partition
}
@Test
def testAddPartitionsToTxnWithInvalidPartition(): Unit = {
val topic = "topic"
setupBasicMetadataCache(topic, numPartitions = 1)
def checkInvalidPartition(invalidPartitionId: Int): Unit = {
EasyMock.reset(replicaManager, clientRequestQuotaManager, requestChannel)
val invalidTopicPartition = new TopicPartition(topic, invalidPartitionId)
val (addPartitionsToTxnRequest, request) = buildRequest(new AddPartitionsToTxnRequest.Builder(
"txnlId", 15L, 0.toShort, List(invalidTopicPartition).asJava))
val capturedResponse = expectNoThrottling()
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel)
createKafkaApis().handleAddPartitionToTxnRequest(request)
val response = readResponse(ApiKeys.ADD_PARTITIONS_TO_TXN, addPartitionsToTxnRequest, capturedResponse)
.asInstanceOf[AddPartitionsToTxnResponse]
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response.errors().get(invalidTopicPartition))
}
checkInvalidPartition(-1)
checkInvalidPartition(1) // topic has only one partition
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleAddOffsetToTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleAddOffsetsToTxnRequest(null)
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleAddPartitionsToTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleAddPartitionToTxnRequest(null)
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleTxnOffsetCommitRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleAddPartitionToTxnRequest(null)
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleEndTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleEndTxnRequest(null)
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleWriteTxnMarkersRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleWriteTxnMarkersRequest(null)
}
@Test
def shouldRespondWithUnsupportedForMessageFormatOnHandleWriteTxnMarkersWhenMagicLowerThanRequired(): Unit = {
val topicPartition = new TopicPartition("t", 0)
val (writeTxnMarkersRequest, request) = createWriteTxnMarkersRequest(Utils.mkList(topicPartition))
val expectedErrors = Map(topicPartition -> Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT).asJava
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
EasyMock.expect(replicaManager.getMagic(topicPartition))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V1))
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, replicaQuotaManager, requestChannel)
createKafkaApis().handleWriteTxnMarkersRequest(request)
val markersResponse = readResponse(ApiKeys.WRITE_TXN_MARKERS, writeTxnMarkersRequest, capturedResponse)
.asInstanceOf[WriteTxnMarkersResponse]
assertEquals(expectedErrors, markersResponse.errors(1))
}
@Test
def shouldRespondWithUnknownTopicWhenPartitionIsNotHosted(): Unit = {
val topicPartition = new TopicPartition("t", 0)
val (writeTxnMarkersRequest, request) = createWriteTxnMarkersRequest(Utils.mkList(topicPartition))
val expectedErrors = Map(topicPartition -> Errors.UNKNOWN_TOPIC_OR_PARTITION).asJava
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
EasyMock.expect(replicaManager.getMagic(topicPartition))
.andReturn(None)
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, replicaQuotaManager, requestChannel)
createKafkaApis().handleWriteTxnMarkersRequest(request)
val markersResponse = readResponse(ApiKeys.WRITE_TXN_MARKERS, writeTxnMarkersRequest, capturedResponse)
.asInstanceOf[WriteTxnMarkersResponse]
assertEquals(expectedErrors, markersResponse.errors(1))
}
@Test
def shouldRespondWithUnsupportedMessageFormatForBadPartitionAndNoErrorsForGoodPartition(): Unit = {
val tp1 = new TopicPartition("t", 0)
val tp2 = new TopicPartition("t1", 0)
val (writeTxnMarkersRequest, request) = createWriteTxnMarkersRequest(Utils.mkList(tp1, tp2))
val expectedErrors = Map(tp1 -> Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT, tp2 -> Errors.NONE).asJava
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
val responseCallback: Capture[Map[TopicPartition, PartitionResponse] => Unit] = EasyMock.newCapture()
EasyMock.expect(replicaManager.getMagic(tp1))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V1))
EasyMock.expect(replicaManager.getMagic(tp2))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V2))
EasyMock.expect(replicaManager.appendRecords(EasyMock.anyLong(),
EasyMock.anyShort(),
EasyMock.eq(true),
EasyMock.eq(false),
EasyMock.anyObject(),
EasyMock.capture(responseCallback),
EasyMock.anyObject(),
EasyMock.anyObject())).andAnswer(new IAnswer[Unit] {
override def answer(): Unit = {
responseCallback.getValue.apply(Map(tp2 -> new PartitionResponse(Errors.NONE)))
}
})
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, replicaQuotaManager, requestChannel)
createKafkaApis().handleWriteTxnMarkersRequest(request)
val markersResponse = readResponse(ApiKeys.WRITE_TXN_MARKERS, writeTxnMarkersRequest, capturedResponse)
.asInstanceOf[WriteTxnMarkersResponse]
assertEquals(expectedErrors, markersResponse.errors(1))
EasyMock.verify(replicaManager)
}
@Test
def shouldRespondWithUnknownTopicOrPartitionForBadPartitionAndNoErrorsForGoodPartition(): Unit = {
val tp1 = new TopicPartition("t", 0)
val tp2 = new TopicPartition("t1", 0)
val (writeTxnMarkersRequest, request) = createWriteTxnMarkersRequest(Utils.mkList(tp1, tp2))
val expectedErrors = Map(tp1 -> Errors.UNKNOWN_TOPIC_OR_PARTITION, tp2 -> Errors.NONE).asJava
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
val responseCallback: Capture[Map[TopicPartition, PartitionResponse] => Unit] = EasyMock.newCapture()
EasyMock.expect(replicaManager.getMagic(tp1))
.andReturn(None)
EasyMock.expect(replicaManager.getMagic(tp2))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V2))
EasyMock.expect(replicaManager.appendRecords(EasyMock.anyLong(),
EasyMock.anyShort(),
EasyMock.eq(true),
EasyMock.eq(false),
EasyMock.anyObject(),
EasyMock.capture(responseCallback),
EasyMock.anyObject(),
EasyMock.anyObject())).andAnswer(new IAnswer[Unit] {
override def answer(): Unit = {
responseCallback.getValue.apply(Map(tp2 -> new PartitionResponse(Errors.NONE)))
}
})
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, replicaQuotaManager, requestChannel)
createKafkaApis().handleWriteTxnMarkersRequest(request)
val markersResponse = readResponse(ApiKeys.WRITE_TXN_MARKERS, writeTxnMarkersRequest, capturedResponse)
.asInstanceOf[WriteTxnMarkersResponse]
assertEquals(expectedErrors, markersResponse.errors(1))
EasyMock.verify(replicaManager)
}
@Test
def shouldAppendToLogOnWriteTxnMarkersWhenCorrectMagicVersion(): Unit = {
val topicPartition = new TopicPartition("t", 0)
val request = createWriteTxnMarkersRequest(Utils.mkList(topicPartition))._2
EasyMock.expect(replicaManager.getMagic(topicPartition))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V2))
EasyMock.expect(replicaManager.appendRecords(EasyMock.anyLong(),
EasyMock.anyShort(),
EasyMock.eq(true),
EasyMock.eq(false),
EasyMock.anyObject(),
EasyMock.anyObject(),
EasyMock.anyObject(),
EasyMock.anyObject()))
EasyMock.replay(replicaManager)
createKafkaApis().handleWriteTxnMarkersRequest(request)
EasyMock.verify(replicaManager)
}
@Test
def testReadUncommittedConsumerListOffsetLimitedAtHighWatermark(): Unit = {
testConsumerListOffsetLimit(IsolationLevel.READ_UNCOMMITTED)
}
@Test
def testReadCommittedConsumerListOffsetLimitedAtLastStableOffset(): Unit = {
testConsumerListOffsetLimit(IsolationLevel.READ_COMMITTED)
}
private def testConsumerListOffsetLimit(isolationLevel: IsolationLevel): Unit = {
val tp = new TopicPartition("foo", 0)
val timestamp: JLong = time.milliseconds()
val limitOffset = 15L
val replica = EasyMock.mock(classOf[Replica])
val log = EasyMock.mock(classOf[Log])
EasyMock.expect(replicaManager.getLeaderReplicaIfLocal(tp)).andReturn(replica)
if (isolationLevel == IsolationLevel.READ_UNCOMMITTED)
EasyMock.expect(replica.highWatermark).andReturn(LogOffsetMetadata(messageOffset = limitOffset))
else
EasyMock.expect(replica.lastStableOffset).andReturn(LogOffsetMetadata(messageOffset = limitOffset))
EasyMock.expect(replicaManager.getLog(tp)).andReturn(Some(log))
EasyMock.expect(log.fetchOffsetsByTimestamp(timestamp)).andReturn(Some(TimestampOffset(timestamp = timestamp, offset = limitOffset)))
val capturedResponse = expectNoThrottling()
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel, replica, log)
val builder = ListOffsetRequest.Builder.forConsumer(true, isolationLevel)
.setTargetTimes(Map(tp -> timestamp).asJava)
val (listOffsetRequest, request) = buildRequest(builder)
createKafkaApis().handleListOffsetRequest(request)
val response = readResponse(ApiKeys.LIST_OFFSETS, listOffsetRequest, capturedResponse).asInstanceOf[ListOffsetResponse]
assertTrue(response.responseData.containsKey(tp))
val partitionData = response.responseData.get(tp)
assertEquals(Errors.NONE, partitionData.error)
assertEquals(ListOffsetResponse.UNKNOWN_OFFSET, partitionData.offset)
assertEquals(ListOffsetResponse.UNKNOWN_TIMESTAMP, partitionData.timestamp)
}
@Test
def testReadUncommittedConsumerListOffsetEarliestOffsetEqualsHighWatermark(): Unit = {
testConsumerListOffsetEarliestOffsetEqualsLimit(IsolationLevel.READ_UNCOMMITTED)
}
@Test
def testReadCommittedConsumerListOffsetEarliestOffsetEqualsLastStableOffset(): Unit = {
testConsumerListOffsetEarliestOffsetEqualsLimit(IsolationLevel.READ_COMMITTED)
}
private def testConsumerListOffsetEarliestOffsetEqualsLimit(isolationLevel: IsolationLevel): Unit = {
val tp = new TopicPartition("foo", 0)
val limitOffset = 15L
val replica = EasyMock.mock(classOf[Replica])
val log = EasyMock.mock(classOf[Log])
EasyMock.expect(replicaManager.getLeaderReplicaIfLocal(tp)).andReturn(replica)
if (isolationLevel == IsolationLevel.READ_UNCOMMITTED)
EasyMock.expect(replica.highWatermark).andReturn(LogOffsetMetadata(messageOffset = limitOffset))
else
EasyMock.expect(replica.lastStableOffset).andReturn(LogOffsetMetadata(messageOffset = limitOffset))
EasyMock.expect(replicaManager.getLog(tp)).andReturn(Some(log))
EasyMock.expect(log.fetchOffsetsByTimestamp(ListOffsetRequest.EARLIEST_TIMESTAMP))
.andReturn(Some(TimestampOffset(timestamp = ListOffsetResponse.UNKNOWN_TIMESTAMP, offset = limitOffset)))
val capturedResponse = expectNoThrottling()
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel, replica, log)
val builder = ListOffsetRequest.Builder.forConsumer(true, isolationLevel)
.setTargetTimes(Map(tp -> (ListOffsetRequest.EARLIEST_TIMESTAMP: JLong)).asJava)
val (listOffsetRequest, request) = buildRequest(builder)
createKafkaApis().handleListOffsetRequest(request)
val response = readResponse(ApiKeys.LIST_OFFSETS, listOffsetRequest, capturedResponse).asInstanceOf[ListOffsetResponse]
assertTrue(response.responseData.containsKey(tp))
val partitionData = response.responseData.get(tp)
assertEquals(Errors.NONE, partitionData.error)
assertEquals(limitOffset, partitionData.offset)
assertEquals(ListOffsetResponse.UNKNOWN_TIMESTAMP, partitionData.timestamp)
}
@Test
def testReadUncommittedConsumerListOffsetLatest(): Unit = {
testConsumerListOffsetLatest(IsolationLevel.READ_UNCOMMITTED)
}
@Test
def testReadCommittedConsumerListOffsetLatest(): Unit = {
testConsumerListOffsetLatest(IsolationLevel.READ_COMMITTED)
}
/**
* Verifies that the metadata response is correct if the broker listeners are inconsistent (i.e. one broker has
* more listeners than another) and the request is sent on the listener that exists in both brokers.
*/
@Test
def testMetadataRequestOnSharedListenerWithInconsistentListenersAcrossBrokers(): Unit = {
val (plaintextListener, _) = updateMetadataCacheWithInconsistentListeners()
val response = sendMetadataRequestWithInconsistentListeners(plaintextListener)
assertEquals(Set(0, 1), response.brokers.asScala.map(_.id).toSet)
}
/*
* Verifies that the metadata response is correct if the broker listeners are inconsistent (i.e. one broker has
* more listeners than another) and the request is sent on the listener that exists in one broker.
*/
@Test
def testMetadataRequestOnDistinctListenerWithInconsistentListenersAcrossBrokers(): Unit = {
val (_, anotherListener) = updateMetadataCacheWithInconsistentListeners()
val response = sendMetadataRequestWithInconsistentListeners(anotherListener)
assertEquals(Set(0), response.brokers.asScala.map(_.id).toSet)
}
/**
* Return pair of listener names in the metadataCache: PLAINTEXT and LISTENER2 respectively.
*/
private def updateMetadataCacheWithInconsistentListeners(): (ListenerName, ListenerName) = {
val plaintextListener = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)
val anotherListener = new ListenerName("LISTENER2")
val brokers = Set(
new Broker(0, Seq(new EndPoint("broker0", 9092, SecurityProtocol.PLAINTEXT, plaintextListener),
new EndPoint("broker0", 9093, SecurityProtocol.PLAINTEXT, anotherListener)).asJava, "rack"),
new Broker(1, Seq(new EndPoint("broker1", 9092, SecurityProtocol.PLAINTEXT, plaintextListener)).asJava,
"rack")
)
val updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion, 0,
0, Map.empty[TopicPartition, UpdateMetadataRequest.PartitionState].asJava, brokers.asJava).build()
metadataCache.updateCache(correlationId = 0, updateMetadataRequest)
(plaintextListener, anotherListener)
}
private def sendMetadataRequestWithInconsistentListeners(requestListener: ListenerName): MetadataResponse = {
val capturedResponse = expectNoThrottling()
EasyMock.replay(clientRequestQuotaManager, requestChannel)
val (metadataRequest, requestChannelRequest) = buildRequest(MetadataRequest.Builder.allTopics, requestListener)
createKafkaApis().handleTopicMetadataRequest(requestChannelRequest)
readResponse(ApiKeys.METADATA, metadataRequest, capturedResponse).asInstanceOf[MetadataResponse]
}
private def testConsumerListOffsetLatest(isolationLevel: IsolationLevel): Unit = {
val tp = new TopicPartition("foo", 0)
val latestOffset = 15L
val replica = EasyMock.mock(classOf[Replica])
val log = EasyMock.mock(classOf[Log])
EasyMock.expect(replicaManager.getLeaderReplicaIfLocal(tp)).andReturn(replica)
if (isolationLevel == IsolationLevel.READ_UNCOMMITTED)
EasyMock.expect(replica.highWatermark).andReturn(LogOffsetMetadata(messageOffset = latestOffset))
else
EasyMock.expect(replica.lastStableOffset).andReturn(LogOffsetMetadata(messageOffset = latestOffset))
val capturedResponse = expectNoThrottling()
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel, replica, log)
val builder = ListOffsetRequest.Builder.forConsumer(true, isolationLevel)
.setTargetTimes(Map(tp -> (ListOffsetRequest.LATEST_TIMESTAMP: JLong)).asJava)
val (listOffsetRequest, request) = buildRequest(builder)
createKafkaApis().handleListOffsetRequest(request)
val response = readResponse(ApiKeys.LIST_OFFSETS, listOffsetRequest, capturedResponse).asInstanceOf[ListOffsetResponse]
assertTrue(response.responseData.containsKey(tp))
val partitionData = response.responseData.get(tp)
assertEquals(Errors.NONE, partitionData.error)
assertEquals(latestOffset, partitionData.offset)
assertEquals(ListOffsetResponse.UNKNOWN_TIMESTAMP, partitionData.timestamp)
}
private def createWriteTxnMarkersRequest(partitions: util.List[TopicPartition]) = {
val requestBuilder = new WriteTxnMarkersRequest.Builder(Utils.mkList(
new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, partitions)))
buildRequest(requestBuilder)
}
private def buildRequest[T <: AbstractRequest](builder: AbstractRequest.Builder[T],
listenerName: ListenerName = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)): (T, RequestChannel.Request) = {
val request = builder.build()
val buffer = request.serialize(new RequestHeader(builder.apiKey, request.version, "", 0))
// read the header from the buffer first so that the body can be read next from the Request constructor
val header = RequestHeader.parse(buffer)
val context = new RequestContext(header, "1", InetAddress.getLocalHost, KafkaPrincipal.ANONYMOUS,
listenerName, SecurityProtocol.PLAINTEXT)
(request, new RequestChannel.Request(processor = 1, context = context, startTimeNanos = 0, MemoryPool.NONE, buffer,
requestChannelMetrics))
}
private def readResponse(api: ApiKeys, request: AbstractRequest, capturedResponse: Capture[RequestChannel.Response]): AbstractResponse = {
val response = capturedResponse.getValue
assertTrue(s"Unexpected response type: ${response.getClass}", response.isInstanceOf[SendResponse])
val sendResponse = response.asInstanceOf[SendResponse]
val send = sendResponse.responseSend
val channel = new ByteBufferChannel(send.size)
send.writeTo(channel)
channel.close()
channel.buffer.getInt() // read the size
ResponseHeader.parse(channel.buffer)
val struct = api.responseSchema(request.version).read(channel.buffer)
AbstractResponse.parseResponse(api, struct)
}
private def expectNoThrottling(): Capture[RequestChannel.Response] = {
EasyMock.expect(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(EasyMock.anyObject[RequestChannel.Request]()))
.andReturn(0)
EasyMock.expect(clientRequestQuotaManager.throttle(EasyMock.anyObject[RequestChannel.Request](), EasyMock.eq(0),
EasyMock.anyObject[RequestChannel.Response => Unit]()))
val capturedResponse = EasyMock.newCapture[RequestChannel.Response]()
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
capturedResponse
}
private def setupBasicMetadataCache(topic: String, numPartitions: Int): Unit = {
val replicas = List(0.asInstanceOf[Integer]).asJava
val partitionState = new UpdateMetadataRequest.PartitionState(1, 0, 1, replicas, 0, replicas, Collections.emptyList())
val plaintextListener = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)
val broker = new Broker(0, Seq(new EndPoint("broker0", 9092, SecurityProtocol.PLAINTEXT, plaintextListener)).asJava, "rack")
val partitions = (0 until numPartitions).map(new TopicPartition(topic, _) -> partitionState).toMap
val updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion, 0,
0, partitions.asJava, Set(broker).asJava).build()
metadataCache.updateCache(correlationId = 0, updateMetadataRequest)
}
}
|
richhaase/kafka
|
core/src/test/scala/unit/kafka/server/KafkaApisTest.scala
|
Scala
|
apache-2.0
| 27,241
|
package feh.util
import scala.concurrent.duration.Duration
trait Debugging {
outer =>
def debug: Boolean
protected implicit class DebugLogWrapper[R](r: => R){
def debugLog(f: R => String): R = {
val a = r
outer.debugLog(f(a))
a
}
def debugLog(msg: String): R = debugLog(_ => msg + ": " + r)
def debugLogElapsedTime(msg: Duration => String): R = {
val (res, time) = elapsed(r)
outer.debugLog(msg(time))
res
}
}
def debugMessagePrefix: String
protected def debugLog(a: => Any) = if(debug) println(debugMessagePrefix + a)
}
trait GlobalDebugging extends Debugging{
protected def setup: DebuggingSetup
def debug: Boolean = setup.debug
}
trait DebuggingSetup{
def debug: Boolean
}
trait GlobalDebuggingSetup extends DebuggingSetup{
private var _debug: Boolean = false
def debug: Boolean = _debug
def apply = debug
def update(d: Boolean) = _debug = d
}
trait ThreadLoacalDebuggingSetup extends ThreadLocal[Boolean] with DebuggingSetup{
def debug: Boolean = get()
}
|
fehu/util
|
src/main/scala/feh/util/Debugging.scala
|
Scala
|
mit
| 1,055
|
package com.sjsu.bikeshare.web
import javax.inject.Inject
import org.springframework.social.facebook.api.Facebook
import org.springframework.social.facebook.api.PagedList
import org.springframework.social.facebook.api.Post
import org.springframework.stereotype.Controller
import org.springframework.ui.Model
import org.springframework.web.bind.annotation.RequestMapping
import org.springframework.web.bind.annotation.RequestMethod
//remove if not needed
import scala.collection.JavaConversions._
import com.sjsu.bikeshare.domain._
@Controller
@RequestMapping(value = Array("/api/v1/connect/facebook"))
class FaceBookController@Inject
(private var facebook: Facebook) {
@RequestMapping(method = Array( RequestMethod.GET))
def helloFacebook(model: Model): String = {
if (!facebook.isAuthorized) {
return "redirect:/api/v1/users/userlogin"
}
var userLogin=new UserLogin()
//userLogin.s
println("HI")
var email=facebook.userOperations().getUserProfile().getEmail();
var name=facebook.userOperations().getUserProfile().getName();
println(facebook.userOperations().getUserProfile().getEmail())
println(facebook.userOperations().getUserProfile().getId())
println(facebook.userOperations().getUserProfile().getName())
// model.addAttribute(facebook.userOperations().getUserProfile)
// val homeFeed = facebook.feedOperations().getHomeFeed
//model.addAttribute("feed", homeFeed)
userLogin.setEmail(email)
userLogin.setName(name)
model.addAttribute("userLogin", userLogin)
"homepage"
}
}
|
komaldedhia/cmpe-273-project
|
src/main/scala/com/sjsu/bikeshare/web/FaceBookController.scala
|
Scala
|
mit
| 1,559
|
package org.pico.statsd.impl
import java.io.ByteArrayOutputStream
class AccessibleByteArrayOutputStream(capacity: Int) extends ByteArrayOutputStream(capacity) {
def byteArray: Array[Byte] = buf
def takeWindow(): ByteArrayWindow = {
val window = ByteArrayWindow(buf, 0, count)
buf = new Array[Byte](capacity)
count = 0
window
}
def drop(n: Int): Unit = {
System.arraycopy(buf, n, buf, 0, count - n)
count = (count - n) max 0
}
}
|
pico-works/pico-statsd
|
pico-statsd/src/main/scala/org/pico/statsd/impl/AccessibleByteArrayOutputStream.scala
|
Scala
|
mit
| 466
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.security.auth
import java.util
import java.util.concurrent.locks.ReentrantReadWriteLock
import com.typesafe.scalalogging.Logger
import kafka.api.KAFKA_2_0_IV1
import kafka.network.RequestChannel.Session
import kafka.security.auth.SimpleAclAuthorizer.VersionedAcls
import kafka.server.KafkaConfig
import kafka.utils.CoreUtils.{inReadLock, inWriteLock}
import kafka.utils._
import kafka.zk.{AclChangeNotificationHandler, AclChangeSubscription, KafkaZkClient, ZkAclChangeStore, ZkAclStore}
import org.apache.kafka.common.errors.UnsupportedVersionException
import org.apache.kafka.common.resource.PatternType
import org.apache.kafka.common.security.auth.KafkaPrincipal
import org.apache.kafka.common.utils.{SecurityUtils, Time}
import scala.collection.JavaConverters._
import scala.util.Random
object SimpleAclAuthorizer {
//optional override zookeeper cluster configuration where acls will be stored, if not specified acls will be stored in
//same zookeeper where all other kafka broker info is stored.
val ZkUrlProp = "authorizer.zookeeper.url"
val ZkConnectionTimeOutProp = "authorizer.zookeeper.connection.timeout.ms"
val ZkSessionTimeOutProp = "authorizer.zookeeper.session.timeout.ms"
val ZkMaxInFlightRequests = "authorizer.zookeeper.max.in.flight.requests"
//List of users that will be treated as super users and will have access to all the resources for all actions from all hosts, defaults to no super users.
val SuperUsersProp = "super.users"
//If set to true when no acls are found for a resource , authorizer allows access to everyone. Defaults to false.
val AllowEveryoneIfNoAclIsFoundProp = "allow.everyone.if.no.acl.found"
case class VersionedAcls(acls: Set[Acl], zkVersion: Int)
}
class SimpleAclAuthorizer extends Authorizer with Logging {
private val authorizerLogger = Logger("kafka.authorizer.logger")
private var superUsers = Set.empty[KafkaPrincipal]
private var shouldAllowEveryoneIfNoAclIsFound = false
private var zkClient: KafkaZkClient = _
private var aclChangeListeners: Iterable[AclChangeSubscription] = Iterable.empty
private var extendedAclSupport: Boolean = _
@volatile
private var aclCache = new scala.collection.immutable.TreeMap[Resource, VersionedAcls]()(ResourceOrdering)
private val lock = new ReentrantReadWriteLock()
// The maximum number of times we should try to update the resource acls in zookeeper before failing;
// This should never occur, but is a safeguard just in case.
protected[auth] var maxUpdateRetries = 10
private val retryBackoffMs = 100
private val retryBackoffJitterMs = 50
/**
* Guaranteed to be called before any authorize call is made.
*/
override def configure(javaConfigs: util.Map[String, _]) {
val configs = javaConfigs.asScala
val props = new java.util.Properties()
configs.foreach { case (key, value) => props.put(key, value.toString) }
superUsers = configs.get(SimpleAclAuthorizer.SuperUsersProp).collect {
case str: String if str.nonEmpty => str.split(";").map(s => SecurityUtils.parseKafkaPrincipal(s.trim)).toSet
}.getOrElse(Set.empty[KafkaPrincipal])
shouldAllowEveryoneIfNoAclIsFound = configs.get(SimpleAclAuthorizer.AllowEveryoneIfNoAclIsFoundProp).exists(_.toString.toBoolean)
// Use `KafkaConfig` in order to get the default ZK config values if not present in `javaConfigs`. Note that this
// means that `KafkaConfig.zkConnect` must always be set by the user (even if `SimpleAclAuthorizer.ZkUrlProp` is also
// set).
val kafkaConfig = KafkaConfig.fromProps(props, doLog = false)
val zkUrl = configs.get(SimpleAclAuthorizer.ZkUrlProp).map(_.toString).getOrElse(kafkaConfig.zkConnect)
val zkConnectionTimeoutMs = configs.get(SimpleAclAuthorizer.ZkConnectionTimeOutProp).map(_.toString.toInt).getOrElse(kafkaConfig.zkConnectionTimeoutMs)
val zkSessionTimeOutMs = configs.get(SimpleAclAuthorizer.ZkSessionTimeOutProp).map(_.toString.toInt).getOrElse(kafkaConfig.zkSessionTimeoutMs)
val zkMaxInFlightRequests = configs.get(SimpleAclAuthorizer.ZkMaxInFlightRequests).map(_.toString.toInt).getOrElse(kafkaConfig.zkMaxInFlightRequests)
val time = Time.SYSTEM
zkClient = KafkaZkClient(zkUrl, kafkaConfig.zkEnableSecureAcls, zkSessionTimeOutMs, zkConnectionTimeoutMs,
zkMaxInFlightRequests, time, "kafka.security", "SimpleAclAuthorizer")
zkClient.createAclPaths()
extendedAclSupport = kafkaConfig.interBrokerProtocolVersion >= KAFKA_2_0_IV1
loadCache()
startZkChangeListeners()
}
override def authorize(session: Session, operation: Operation, resource: Resource): Boolean = {
if (resource.patternType != PatternType.LITERAL) {
throw new IllegalArgumentException("Only literal resources are supported. Got: " + resource.patternType)
}
val principal = session.principal
val host = session.clientAddress.getHostAddress
val acls = getMatchingAcls(resource.resourceType, resource.name)
// Check if there is any Deny acl match that would disallow this operation.
val denyMatch = aclMatch(operation, resource, principal, host, Deny, acls)
// Check if there are any Allow ACLs which would allow this operation.
// Allowing read, write, delete, or alter implies allowing describe.
// See #{org.apache.kafka.common.acl.AclOperation} for more details about ACL inheritance.
val allowOps = operation match {
case Describe => Set[Operation](Describe, Read, Write, Delete, Alter)
case DescribeConfigs => Set[Operation](DescribeConfigs, AlterConfigs)
case _ => Set[Operation](operation)
}
val allowMatch = allowOps.exists(operation => aclMatch(operation, resource, principal, host, Allow, acls))
//we allow an operation if a user is a super user or if no acls are found and user has configured to allow all users
//when no acls are found or if no deny acls are found and at least one allow acls matches.
val authorized = isSuperUser(operation, resource, principal, host) ||
isEmptyAclAndAuthorized(operation, resource, principal, host, acls) ||
(!denyMatch && allowMatch)
logAuditMessage(principal, authorized, operation, resource, host)
authorized
}
def isEmptyAclAndAuthorized(operation: Operation, resource: Resource, principal: KafkaPrincipal, host: String, acls: Set[Acl]): Boolean = {
if (acls.isEmpty) {
authorizerLogger.debug(s"No acl found for resource $resource, authorized = $shouldAllowEveryoneIfNoAclIsFound")
shouldAllowEveryoneIfNoAclIsFound
} else false
}
def isSuperUser(operation: Operation, resource: Resource, principal: KafkaPrincipal, host: String): Boolean = {
if (superUsers.contains(principal)) {
authorizerLogger.debug(s"principal = $principal is a super user, allowing operation without checking acls.")
true
} else false
}
private def aclMatch(operation: Operation, resource: Resource, principal: KafkaPrincipal, host: String, permissionType: PermissionType, acls: Set[Acl]): Boolean = {
acls.find { acl =>
acl.permissionType == permissionType &&
(acl.principal == principal || acl.principal == Acl.WildCardPrincipal) &&
(operation == acl.operation || acl.operation == All) &&
(acl.host == host || acl.host == Acl.WildCardHost)
}.exists { acl =>
authorizerLogger.debug(s"operation = $operation on resource = $resource from host = $host is $permissionType based on acl = $acl")
true
}
}
override def addAcls(acls: Set[Acl], resource: Resource) {
if (acls != null && acls.nonEmpty) {
if (!extendedAclSupport && resource.patternType == PatternType.PREFIXED) {
throw new UnsupportedVersionException(s"Adding ACLs on prefixed resource patterns requires " +
s"${KafkaConfig.InterBrokerProtocolVersionProp} of $KAFKA_2_0_IV1 or greater")
}
inWriteLock(lock) {
updateResourceAcls(resource) { currentAcls =>
currentAcls ++ acls
}
}
}
}
override def removeAcls(aclsTobeRemoved: Set[Acl], resource: Resource): Boolean = {
inWriteLock(lock) {
updateResourceAcls(resource) { currentAcls =>
currentAcls -- aclsTobeRemoved
}
}
}
override def removeAcls(resource: Resource): Boolean = {
inWriteLock(lock) {
val result = zkClient.deleteResource(resource)
updateCache(resource, VersionedAcls(Set(), 0))
updateAclChangedFlag(resource)
result
}
}
override def getAcls(resource: Resource): Set[Acl] = {
inReadLock(lock) {
aclCache.get(resource).map(_.acls).getOrElse(Set.empty[Acl])
}
}
override def getAcls(principal: KafkaPrincipal): Map[Resource, Set[Acl]] = {
inReadLock(lock) {
aclCache.mapValues { versionedAcls =>
versionedAcls.acls.filter(_.principal == principal)
}.filter { case (_, acls) =>
acls.nonEmpty
}
}
}
def getMatchingAcls(resourceType: ResourceType, resourceName: String): Set[Acl] = {
inReadLock(lock) {
val wildcard = aclCache.get(Resource(resourceType, Acl.WildCardResource, PatternType.LITERAL))
.map(_.acls)
.getOrElse(Set.empty[Acl])
val literal = aclCache.get(Resource(resourceType, resourceName, PatternType.LITERAL))
.map(_.acls)
.getOrElse(Set.empty[Acl])
val prefixed = aclCache.range(
Resource(resourceType, resourceName, PatternType.PREFIXED),
Resource(resourceType, resourceName.substring(0, 1), PatternType.PREFIXED)
)
.filterKeys(resource => resourceName.startsWith(resource.name))
.flatMap { case (resource, versionedAcls) => versionedAcls.acls }
.toSet
prefixed ++ wildcard ++ literal
}
}
override def getAcls(): Map[Resource, Set[Acl]] = {
inReadLock(lock) {
aclCache.mapValues(_.acls)
}
}
def close() {
aclChangeListeners.foreach(listener => listener.close())
if (zkClient != null) zkClient.close()
}
private def loadCache() {
inWriteLock(lock) {
ZkAclStore.stores.foreach(store => {
val resourceTypes = zkClient.getResourceTypes(store.patternType)
for (rType <- resourceTypes) {
val resourceType = ResourceType.fromString(rType)
val resourceNames = zkClient.getResourceNames(store.patternType, resourceType)
for (resourceName <- resourceNames) {
val resource = new Resource(resourceType, resourceName, store.patternType)
val versionedAcls = getAclsFromZk(resource)
updateCache(resource, versionedAcls)
}
}
})
}
}
private def startZkChangeListeners(): Unit = {
aclChangeListeners = ZkAclChangeStore.stores
.map(store => store.createListener(AclChangedNotificationHandler, zkClient))
}
private def logAuditMessage(principal: KafkaPrincipal, authorized: Boolean, operation: Operation, resource: Resource, host: String) {
def logMessage: String = {
val authResult = if (authorized) "Allowed" else "Denied"
s"Principal = $principal is $authResult Operation = $operation from host = $host on resource = $resource"
}
if (authorized) authorizerLogger.debug(logMessage)
else authorizerLogger.info(logMessage)
}
/**
* Safely updates the resources ACLs by ensuring reads and writes respect the expected zookeeper version.
* Continues to retry until it successfully updates zookeeper.
*
* Returns a boolean indicating if the content of the ACLs was actually changed.
*
* @param resource the resource to change ACLs for
* @param getNewAcls function to transform existing acls to new ACLs
* @return boolean indicating if a change was made
*/
private def updateResourceAcls(resource: Resource)(getNewAcls: Set[Acl] => Set[Acl]): Boolean = {
var currentVersionedAcls =
if (aclCache.contains(resource))
getAclsFromCache(resource)
else
getAclsFromZk(resource)
var newVersionedAcls: VersionedAcls = null
var writeComplete = false
var retries = 0
while (!writeComplete && retries <= maxUpdateRetries) {
val newAcls = getNewAcls(currentVersionedAcls.acls)
val (updateSucceeded, updateVersion) =
if (newAcls.nonEmpty) {
zkClient.conditionalSetOrCreateAclsForResource(resource, newAcls, currentVersionedAcls.zkVersion)
} else {
trace(s"Deleting path for $resource because it had no ACLs remaining")
(zkClient.conditionalDelete(resource, currentVersionedAcls.zkVersion), 0)
}
if (!updateSucceeded) {
trace(s"Failed to update ACLs for $resource. Used version ${currentVersionedAcls.zkVersion}. Reading data and retrying update.")
Thread.sleep(backoffTime)
currentVersionedAcls = getAclsFromZk(resource)
retries += 1
} else {
newVersionedAcls = VersionedAcls(newAcls, updateVersion)
writeComplete = updateSucceeded
}
}
if(!writeComplete)
throw new IllegalStateException(s"Failed to update ACLs for $resource after trying a maximum of $maxUpdateRetries times")
if (newVersionedAcls.acls != currentVersionedAcls.acls) {
debug(s"Updated ACLs for $resource to ${newVersionedAcls.acls} with version ${newVersionedAcls.zkVersion}")
updateCache(resource, newVersionedAcls)
updateAclChangedFlag(resource)
true
} else {
debug(s"Updated ACLs for $resource, no change was made")
updateCache(resource, newVersionedAcls) // Even if no change, update the version
false
}
}
private def getAclsFromCache(resource: Resource): VersionedAcls = {
aclCache.getOrElse(resource, throw new IllegalArgumentException(s"ACLs do not exist in the cache for resource $resource"))
}
private def getAclsFromZk(resource: Resource): VersionedAcls = {
zkClient.getVersionedAclsForResource(resource)
}
private def updateCache(resource: Resource, versionedAcls: VersionedAcls) {
if (versionedAcls.acls.nonEmpty) {
aclCache = aclCache + (resource -> versionedAcls)
} else {
aclCache = aclCache - resource
}
}
private def updateAclChangedFlag(resource: Resource) {
zkClient.createAclChangeNotification(resource)
}
private def backoffTime = {
retryBackoffMs + Random.nextInt(retryBackoffJitterMs)
}
object AclChangedNotificationHandler extends AclChangeNotificationHandler {
override def processNotification(resource: Resource) {
inWriteLock(lock) {
val versionedAcls = getAclsFromZk(resource)
updateCache(resource, versionedAcls)
}
}
}
// Orders by resource type, then resource pattern type and finally reverse ordering by name.
private object ResourceOrdering extends Ordering[Resource] {
def compare(a: Resource, b: Resource): Int = {
val rt = a.resourceType compare b.resourceType
if (rt != 0)
rt
else {
val rnt = a.patternType compareTo b.patternType
if (rnt != 0)
rnt
else
(a.name compare b.name) * -1
}
}
}
}
|
Ishiihara/kafka
|
core/src/main/scala/kafka/security/auth/SimpleAclAuthorizer.scala
|
Scala
|
apache-2.0
| 15,896
|
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.hbase.tools
import com.beust.jcommander.JCommander
import org.locationtech.geomesa.hbase.tools.data._
import org.locationtech.geomesa.hbase.tools.export.{HBaseBinExportCommand, HBaseExportCommand}
import org.locationtech.geomesa.hbase.tools.ingest.HBaseIngestCommand
import org.locationtech.geomesa.hbase.tools.stats._
import org.locationtech.geomesa.hbase.tools.status._
import org.locationtech.geomesa.tools.export.GenerateAvroSchemaCommand
import org.locationtech.geomesa.tools.status._
import org.locationtech.geomesa.tools.{Command, ConvertCommand, Runner}
object HBaseRunner extends Runner {
override val name: String = "geomesa-hbase"
override def createCommands(jc: JCommander): Seq[Command] = Seq(
new HBaseCreateSchemaCommand,
new HBaseDeleteCatalogCommand,
new HBaseDeleteFeaturesCommand,
new HBaseDescribeSchemaCommand,
new EnvironmentCommand,
new HBaseExplainCommand,
new HBaseExportCommand,
new HelpCommand(this, jc),
new HBaseIngestCommand,
new HBaseKeywordsCommand,
new HBaseGetTypeNamesCommand,
new HBaseRemoveSchemaCommand,
new HBaseVersionRemoteCommand,
new VersionCommand,
new HBaseGetSftConfigCommand,
new GenerateAvroSchemaCommand,
new HBaseStatsAnalyzeCommand,
new HBaseStatsBoundsCommand,
new HBaseStatsCountCommand,
new HBaseStatsTopKCommand,
new HBaseStatsHistogramCommand,
new ConvertCommand,
new HBaseBinExportCommand,
new ConfigureCommand,
new ClasspathCommand
)
override def environmentErrorInfo(): Option[String] = {
if (sys.env.get("HBASE_HOME").isEmpty || sys.env.get("HADOOP_HOME").isEmpty) {
Option("Warning: you have not set HBASE_HOME and/or HADOOP_HOME as environment variables." +
"\\nGeoMesa tools will not run without the appropriate HBase and Hadoop jars in the tools classpath." +
"\\nPlease ensure that those jars are present in the classpath by running 'geomesa-hbase classpath'." +
"\\nTo take corrective action, please place the necessary jar files in the lib directory of geomesa-tools.")
} else { None }
}
}
|
ronq/geomesa
|
geomesa-hbase/geomesa-hbase-tools/src/main/scala/org/locationtech/geomesa/hbase/tools/HBaseRunner.scala
|
Scala
|
apache-2.0
| 2,605
|
/*
* This file is part of Evo2DSim.
*
* Evo2DSim is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Evo2DSim is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Evo2DSim. If not, see <http://www.gnu.org/licenses/>.
*/
package org.vastness.evo2dsim.core.environment
import org.vastness.evo2dsim.core.simulator.{AgentID, Agent, Simulator}
import scala.concurrent.promise
import org.vastness.evo2dsim.core.gui.EnvironmentManager
import scala.collection.Map
import org.jbox2d.common.Vec2
import org.vastness.evo2dsim.core.evolution.genomes.Genome
import org.vastness.evo2dsim.core.data.{Recordable, Recorder, RecordLevel}
import scala.collection.parallel.ParSeq
import scalax.file.Path
import org.vastness.evo2dsim.core.evolution.Evolution.Generation
import spire.math._
import spire.implicits._
/**
* Implements the very basics for an environment
*/
abstract class Environment(val timeStep: Int, val steps: Int) {
def origin: Vec2
def halfSize: Float
def spawnSize: Float
def signallingStrategy: Option[Double] = None
var generation: Int = 0
var group: Int = 0
var iteration = 0
def newRandomPosition: Vec2 = {
origin add new Vec2(randomFloat * spawnSize, randomFloat * spawnSize)
}
protected def randomFloat: Float = (sim.random.nextFloat * 2) - 1
// Angle in Radian
def newRandomAngle: Float = sim.random.nextFloat * 2 * pi[Float]
private var _stepCounter = 0
def stepCounter = _stepCounter
val sim = new Simulator(scala.util.Random.nextLong())
var agents = Map.empty[AgentID, Agent]
val p = promise[Environment]()
var running = true
var recording: Boolean = false
var recorders: ParSeq[Recorder] = ParSeq.empty
def updateSimulation() {
sim.step(timeStep/1000.0f)
_stepCounter += 1
if(recording) recorders map {_.step()}
if(steps == stepCounter) {
running = false
EnvironmentManager.remove(this)
p success this
}
}
def run(){
while(running){
updateSimulation()
}
p failure(throw new Exception)
}
def initializeStatic()
def initializeAgents(genomes: Generation)
def startRecording(rL: RecordLevel, iteration: Int, baseDir: Path) {
recording = true
var tempRecorders = Seq.empty[Recorder]
if(rL.record(RecordLevel.Agents)) {
tempRecorders ++= ( for((id, a) <- agents) yield {
createRecorder(baseDir, iteration, "agent", id, a)
} ).toSeq
}
if(rL.record(RecordLevel.Controller)) {
tempRecorders ++= ( for((id, a) <- agents) yield {
createRecorder(baseDir, iteration, "controller", id, a.controller)
} ).toSeq
}
recorders = tempRecorders.par
}
def agentBySimpleID(id: Int) = {
agents.find(_._1.id == id).map(_._2)
}
private def createRecorder(baseDir: Path, iteration: Int, name: String, id: AgentID, r: Recordable) = {
val dir: Path = baseDir / (s"${id.generation}/${id.group}/$iteration", '/')
if(dir.nonExistent) dir.createDirectory(createParents = true)
Recorder(dir, s"${id.id}_$name", r)
}
}
|
vchuravy/Evo2DSim
|
core/src/main/scala/org/vastness/evo2dsim/core/environment/Environment.scala
|
Scala
|
mit
| 3,505
|
package pipelines
import org.apache.spark.rdd.RDD
/**
* An Unsupervised estimator. An estimator can emit a transformer via a call to its fit method on unlabeled data.
*
* @tparam I Input type.
* @tparam O Output type.
*/
abstract class UnsupervisedEstimator[I, O] extends Serializable with PipelineStage[I, O] {
def fit(in: RDD[I]): Transformer[I, O]
}
/**
* A Supervised estimator. A supervised estimator emits a transformer via a call to its fit method on labeled data.
*
* @tparam I Input type.
* @tparam O Output type.
* @tparam L Label type.
*/
abstract class SupervisedEstimator[I, O, L] extends Serializable with PipelineStage[I, O] {
def fit(data: RDD[I], labels: RDD[L]): Transformer[I, O]
}
/**
* An unsupervised estimator that carries its data with it.
* @param e Estimator.
* @param data Data.
* @tparam I Input type.
* @tparam O Output type.
*/
private case class UnsupervisedEstimatorWithData[I, O](
e: UnsupervisedEstimator[I, O],
data: RDD[I]) extends PipelineStage[I, O]
/**
* A supervised estimator that carries its data with it.
* @param e Estimator.
* @param data Data.
* @param labels Labels.
* @tparam I Input type.
* @tparam O Output type.
* @tparam L Label type.
*/
private case class SupervisedEstimatorWithData[I, O, L](
e: SupervisedEstimator[I, O, L],
data: RDD[I],
labels: RDD[L]) extends PipelineStage[I, O]
|
concretevitamin/keystone
|
src/main/scala/pipelines/Estimator.scala
|
Scala
|
apache-2.0
| 1,391
|
/**
* Created by rewrite on 04/02/16.
*/
import scala.util.Random
class Player(name : String) {
var coinOption : String = ""
def setCoinOption(opponentFlip : String) : Unit = {
coinOption = if (opponentFlip.equals("Tails")) "Heads" else "Tails"
}
def getRandCoinOption : String = {
val r = new Random
coinOption = if (r.nextInt(2) == 0) "Heads" else "Tails"
coinOption
}
def didPlayerWin(winningFlip : String) : Unit = {
val maybe = if (winningFlip == coinOption) "indeed" else "not"
print(s"$name did have $coinOption. He did $maybe win.\n")
}
}
|
cirquit/Personal-Repository
|
Scala/coinflip-0.1/src/main/scala/Player.scala
|
Scala
|
mit
| 633
|
/**
* Copyright (c) 2016, Cloudera, Inc. All Rights Reserved.
*
* Cloudera, Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"). You may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* This software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for
* the specific language governing permissions and limitations under the
* License.
*/
package com.cloudera.sparkts.models
import breeze.linalg._
import com.cloudera.sparkts.Lag
import com.cloudera.sparkts.MatrixUtil.{matToRowArrs, toBreeze}
import org.apache.commons.math3.stat.regression.OLSMultipleLinearRegression
/**
* Models a time series as a function of itself (autoregressive terms) and exogenous variables, which
* are lagged up to degree xMaxLag.
*/
object AutoregressionX {
/**
* Fit an autoregressive model with additional exogenous variables. The model predicts a value
* at time t of a dependent variable, Y, as a function of previous values of Y, and a combination
* of previous values of exogenous regressors X_i, and current values of exogenous regressors X_i.
* This is a generalization of an AR model, which is simply an ARX with no exogenous regressors.
* The fitting procedure here is the same, using least squares. Note that all lags up to the
* maxlag are included. In the case of the dependent variable the max lag is 'yMaxLag', while
* for the exogenous variables the max lag is 'xMaxLag', with which each column in the original
* matrix provided is lagged accordingly.
*
* @param y the dependent variable, time series
* @param x a matrix of exogenous variables
* @param yMaxLag the maximum lag order for the dependent variable
* @param xMaxLag the maximum lag order for exogenous variables
* @param includeOriginalX a boolean flag indicating if the non-lagged exogenous variables should
* be included. Default is true
* @param noIntercept a boolean flag indicating if the intercept should be dropped. Default is
* false
* @return an ARXModel, which is an autoregressive model with exogenous variables
*/
def fitModel(
y: Vector[Double],
x: Matrix[Double],
yMaxLag: Int,
xMaxLag: Int,
includeOriginalX: Boolean = true,
noIntercept: Boolean = false): ARXModel = {
val maxLag = max(yMaxLag, xMaxLag)
val arrY = y.toArray
// Make left hand side, note that we must drop the first maxLag terms
val trimY = arrY.drop(maxLag)
// Create predictors
val predictors = assemblePredictors(arrY, matToRowArrs(x), yMaxLag, xMaxLag, includeOriginalX)
val regression = new OLSMultipleLinearRegression()
regression.setNoIntercept(noIntercept) // drop intercept in regression
regression.newSampleData(trimY, predictors)
val params = regression.estimateRegressionParameters()
val (c, coeffs) = if (noIntercept) (0.0, params) else (params.head, params.tail)
new ARXModel(c, coeffs, yMaxLag, xMaxLag, includeOriginalX)
}
private[sparkts] def assemblePredictors(
y: Array[Double],
x: Array[Array[Double]],
yMaxLag: Int,
xMaxLag: Int,
includeOriginalX: Boolean = true): Array[Array[Double]] = {
val maxLag = max(yMaxLag, xMaxLag)
// AR terms from dependent variable (autoregressive portion)
val arY = Lag.lagMatTrimBoth(y, yMaxLag)
// exogenous variables lagged as appropriate
val laggedX = Lag.lagMatTrimBoth(x, xMaxLag)
// adjust difference in size for arY and laggedX so that they match up
val arYAdj = arY.drop(maxLag - yMaxLag)
val laggedXAdj = laggedX.drop(maxLag - xMaxLag)
val trimmedX = if (includeOriginalX) x.drop(maxLag) else Array[Array[Double]]()
// combine matrices by concatenating column-wise
Array(arYAdj, laggedXAdj, trimmedX).transpose.map(_.reduceLeft(_ ++_))
}
}
/**
* An autoregressive model with exogenous variables.
*
* @param c An intercept term, zero if none desired.
* @param coefficients The coefficients for the various terms. The order of coefficients is as
* follows:
* - Autoregressive terms for the dependent variable, in increasing order of lag
* - For each column in the exogenous matrix (in their original order), the
* lagged terms in increasing order of lag (excluding the non-lagged versions).
* - The coefficients associated with the non-lagged exogenous matrix
* @param yMaxLag The maximum lag order for the dependent variable.
* @param xMaxLag The maximum lag order for exogenous variables.
* @param includesOriginalX A boolean flag indicating if the non-lagged exogenous variables should
* be included.
*/
class ARXModel(
val c: Double,
val coefficients: Array[Double],
val yMaxLag: Int,
val xMaxLag: Int,
includesOriginalX: Boolean) {
def predict(y: Vector[Double], x: Matrix[Double]): Vector[Double] = {
val predictors = AutoregressionX.assemblePredictors(y.toArray, matToRowArrs(x), yMaxLag,
xMaxLag, includesOriginalX)
val results = DenseVector.zeros[Double](predictors.length)
for ((rowArray, rowIndex) <- predictors.zipWithIndex) {
results(rowIndex) = c
for ((value, colIndex) <- rowArray.zipWithIndex) {
results(rowIndex) += value * coefficients(colIndex)
}
}
results
}
}
|
cloudera/spark-timeseries
|
src/main/scala/com/cloudera/sparkts/models/AutoregressionX.scala
|
Scala
|
apache-2.0
| 5,607
|
package org.nexbook.app
import com.softwaremill.macwire._
import org.nexbook.core.Handler
import org.nexbook.neworderhandler.{NewOrderHandler, OrderHandler}
import org.nexbook.orderbookresponsehandler.handler.{FixMessageResponseSender, JsonFileLogger, TradeDatabaseSaver}
import org.nexbook.orderbookresponsehandler.response.OrderBookResponse
import org.nexbook.orderchange.{DbUpdateOrderChangeHandler, OrderChangeCommand}
/**
* Created by milczu on 12/21/15.
*/
trait DelegatorsProvider extends BasicComponentProvider {
def orderHandlers: List[NewOrderHandler] = List(wire[OrderHandler])
def orderResponseHandlers: List[Handler[OrderBookResponse]] = {
var handlers: List[Handler[OrderBookResponse]] = List(wire[JsonFileLogger])
if(AppConfig.dbPersist) {
handlers = wire[TradeDatabaseSaver] :: handlers
}
if(AppConfig.runningMode == Live) {
handlers = wire[FixMessageResponseSender] :: handlers
}
handlers.reverse
}
def orderChangeCommandHandlers: List[Handler[OrderChangeCommand]] = if(AppConfig.dbPersist) List(wire[DbUpdateOrderChangeHandler]) else List()
}
|
milczarekIT/nexbook
|
src/main/scala/org/nexbook/app/DelegatorsProvider.scala
|
Scala
|
apache-2.0
| 1,091
|
package dispatch.github.specs
import org.specs2.mutable._
import dispatch._
import dispatch.github._
class GhUserSpec extends Specification {
"When retrieving anonymous github user profile" should {
"return something when the user is valid" in {
val userRes = GhUser.get_user("juandebravo")
val user = userRes()
(user must not beNull)
user.id must beEqualTo(367029)
user.`type` must beEqualTo("User")
}
"return None when the user is invalid" in {
val userRes = GhUser.get_user("juandebravoInvalidName")
val user = userRes.completeOption
user should be(None)
}
}
}
|
andreazevedo/dispatch-github
|
src/test/scala/GhUserSpec.scala
|
Scala
|
mit
| 703
|
/*
* Copyright 2016 rdbc contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.rdbc.pgsql.core.internal.fsm.streaming
import io.rdbc.pgsql.core.ChannelWriter
import io.rdbc.pgsql.core.internal.{PgMsgHandler, PgRowPublisher}
import io.rdbc.pgsql.core.internal.fsm._
import io.rdbc.pgsql.core.internal.protocol.messages.backend.CloseComplete
import scala.concurrent.ExecutionContext
private[core] class StrmWaitingAfterClose(onIdle: => Unit,
publisher: PgRowPublisher)
(implicit out: ChannelWriter, ec: ExecutionContext)
extends State {
protected val msgHandler: PgMsgHandler = {
case CloseComplete =>
goto(
new WaitingForReady(
onIdle = onIdle,
onFailure = publisher.failure
))
}
private def sendFailureToClient(ex: Throwable): Unit = {
publisher.failure(ex)
}
protected def onNonFatalError(ex: Throwable): StateAction = {
goto(State.Streaming.queryFailed(txMgmt = true, publisher.portalName) {
sendFailureToClient(ex)
})
}
protected def onFatalError(ex: Throwable): Unit = {
sendFailureToClient(ex)
}
}
|
rdbc-io/rdbc-pgsql
|
rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmWaitingAfterClose.scala
|
Scala
|
apache-2.0
| 1,713
|
package ml.combust.mleap.runtime.function
import ml.combust.mleap.runtime.frame.Row
import scala.language.implicitConversions
/** Trait for a LeapFrame selector.
*
* A selector generates values based on other values found
* in a [[Row]]. The name parameters
* to a selector specifies which column of the row to get
* the values from.
*
* Currently there are two supported selectors: a field selector and
* and array selector.
*
* [[FieldSelector]] selects the value of a given field.
* [[StructSelector]] creates an array from the values of a given set of fields.
*/
sealed trait Selector
/** Companion object for selectors.
*
* Provides implicit conversions for convenience.
*/
object Selector {
/** Create a [[FieldSelector]] for a given name.
*
* @param name name of field
* @return field selector
*/
implicit def apply(name: String): FieldSelector = FieldSelector(name)
/** Create an [[StructSelector]] for a given list of names.
*
* @param names fields names used to construct the array
* @return array selector
*/
implicit def apply(names: Seq[String]): StructSelector = StructSelector(names)
}
/** Class for a selector that extracts the value of a field from a [[Row]].
*
* @param field name of field to extract
*/
case class FieldSelector(field: String) extends Selector
/** Class for a selector that constructs an array from values in a [[Row]].
*
* @param fields names of fields used to construct array
*/
case class StructSelector(fields: Seq[String]) extends Selector
|
combust-ml/mleap
|
mleap-runtime/src/main/scala/ml/combust/mleap/runtime/function/Selector.scala
|
Scala
|
apache-2.0
| 1,569
|
import sbt._
import Keys._
import org.scalatra.sbt._
import org.scalatra.sbt.PluginKeys._
import sbt.ScalaVersion
import twirl.sbt.TwirlPlugin._
import com.typesafe.sbteclipse.plugin.EclipsePlugin.EclipseKeys
object MyBuild extends Build {
val Organization = "jp.sf.amateras"
val Name = "gitbucket"
val Version = "0.0.1"
val ScalaVersion = "2.10.3"
val ScalatraVersion = "2.2.1"
lazy val project = Project (
"gitbucket",
file("."),
settings = Defaults.defaultSettings ++ ScalatraPlugin.scalatraWithJRebel ++ Seq(
organization := Organization,
name := Name,
version := Version,
scalaVersion := ScalaVersion,
resolvers ++= Seq(
Classpaths.typesafeReleases,
"amateras-repo" at "http://amateras.sourceforge.jp/mvn/"
),
scalacOptions := Seq("-deprecation"),
libraryDependencies ++= Seq(
"org.eclipse.jgit" % "org.eclipse.jgit.http.server" % "3.0.0.201306101825-r",
"org.scalatra" %% "scalatra" % ScalatraVersion,
"org.scalatra" %% "scalatra-specs2" % ScalatraVersion % "test",
"org.scalatra" %% "scalatra-json" % ScalatraVersion,
"org.json4s" %% "json4s-jackson" % "3.2.5",
"jp.sf.amateras" %% "scalatra-forms" % "0.0.8",
"commons-io" % "commons-io" % "2.4",
"org.pegdown" % "pegdown" % "1.4.1",
"org.apache.commons" % "commons-compress" % "1.5",
"org.apache.commons" % "commons-email" % "1.3.1",
"org.apache.httpcomponents" % "httpclient" % "4.3",
"com.typesafe.slick" %% "slick" % "1.0.1",
"com.novell.ldap" % "jldap" % "2009-10-07",
"com.h2database" % "h2" % "1.3.173",
"ch.qos.logback" % "logback-classic" % "1.0.13" % "runtime",
"org.eclipse.jetty" % "jetty-webapp" % "8.1.8.v20121106" % "container;provided",
"org.eclipse.jetty.orbit" % "javax.servlet" % "3.0.0.v201112011016" % "container;provided;test" artifacts (Artifact("javax.servlet", "jar", "jar")),
"junit" % "junit" % "4.11" % "test"
),
EclipseKeys.withSource := true,
javacOptions in compile ++= Seq("-target", "6", "-source", "6"),
testOptions in Test += Tests.Argument(TestFrameworks.Specs2, "junitxml", "console"),
packageOptions += Package.MainClass("JettyLauncher")
) ++ seq(Twirl.settings: _*)
)
}
|
chu888chu888/gitbucket
|
project/build.scala
|
Scala
|
apache-2.0
| 2,336
|
package com.twitter.diffy.proxy
import java.net.SocketAddress
import com.twitter.diffy.analysis.{DifferenceAnalyzer, JoinedDifferences, InMemoryDifferenceCollector}
import com.twitter.diffy.lifter.{HttpLifter, Message}
import com.twitter.finagle.{Http, Filter}
import com.twitter.finagle.http.{Method, Request}
import com.twitter.util.{Try, Future}
import org.jboss.netty.handler.codec.http.{HttpResponse, HttpRequest}
trait HttpDifferenceProxy extends DifferenceProxy {
val servicePort: SocketAddress
val lifter = new HttpLifter(settings.excludeHttpHeadersComparison)
override type Req = HttpRequest
override type Rep = HttpResponse
override type Srv = HttpService
override def serviceFactory(serverset: String, label: String) =
HttpService(Http.newClient(serverset, label).toService)
override lazy val server = Http.serve(servicePort, proxy)
override def liftRequest(req: HttpRequest): Future[Message] =
lifter.liftRequest(req)
override def liftResponse(resp: Try[HttpResponse]): Future[Message] =
lifter.liftResponse(resp)
}
object SimpleHttpDifferenceProxy {
/**
* Side effects can be dangerous if replayed on production backends. This
* filter ignores all POST, PUT, and DELETE requests if the
* "allowHttpSideEffects" flag is set to false.
*/
lazy val httpSideEffectsFilter =
Filter.mk[HttpRequest, HttpResponse, HttpRequest, HttpResponse] { (req, svc) =>
val hasSideEffects =
Set(Method.Post, Method.Put, Method.Delete).contains(Request(req).method)
if (hasSideEffects) DifferenceProxy.NoResponseException else svc(req)
}
}
/**
* A Twitter-specific difference proxy that adds custom filters to unpickle
* TapCompare traffic from TFE and optionally drop requests that have side
* effects
* @param settings The settings needed by DifferenceProxy
*/
case class SimpleHttpDifferenceProxy (
settings: Settings,
collector: InMemoryDifferenceCollector,
joinedDifferences: JoinedDifferences,
analyzer: DifferenceAnalyzer)
extends HttpDifferenceProxy
{
import SimpleHttpDifferenceProxy._
override val servicePort = settings.servicePort
override val proxy =
Filter.identity andThenIf
(!settings.allowHttpSideEffects, httpSideEffectsFilter) andThen
super.proxy
}
|
fengshao0907/diffy
|
src/main/scala/com/twitter/diffy/proxy/HttpDifferenceProxy.scala
|
Scala
|
apache-2.0
| 2,292
|
/*
* (c) Copyright 2014 LinkedIn Corp. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.linkedin.rookboom.util
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import java.lang.reflect.{Type, ParameterizedType}
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.core.`type`.TypeReference
import java.io.{InputStream, File, OutputStream}
import java.io.StringWriter
/**
* Helper functions to deal with JSON.
* @author Dmitriy Yefremov
*/
object JsonUtils {
val mapper = new ObjectMapper()
mapper.registerModule(DefaultScalaModule)
def serialize(value: Any): String = {
val writer = new StringWriter()
mapper.writeValue(writer, value)
writer.toString
}
def serialize(value: Any, file: File) {
mapper.writeValue(file, value)
}
def serialize(value: Any, out: OutputStream) {
mapper.writeValue(out, value)
}
def deserialize[T: Manifest](value: String): T = {
mapper.readValue(value, typeReference[T])
}
def deserialize[T: Manifest](in: InputStream): T = {
mapper.readValue(in, typeReference[T])
}
def deserialize[T: Manifest](file: File): T = {
mapper.readValue(file, typeReference[T])
}
private[this] def typeReference[T: Manifest] = new TypeReference[T] {
override def getType = typeFromManifest(manifest[T])
}
private[this] def typeFromManifest(m: Manifest[_]): Type = {
if (m.typeArguments.isEmpty) {
m.runtimeClass
}
else new ParameterizedType {
def getRawType = m.runtimeClass
def getActualTypeArguments = m.typeArguments.map(typeFromManifest).toArray
def getOwnerType = null
}
}
}
|
linkedin/RookBoom
|
services/src/main/scala/com/linkedin/rookboom/util/JsonUtils.scala
|
Scala
|
apache-2.0
| 2,200
|
package dao
import com.google.inject.ImplementedBy
import dao.impl.RoleDAOImpl
import model.Role
import scala.concurrent.Future
/**
* Performs role database actions
*
* @author Camilo Sampedro <camilo.sampedro@udea.edu.co>
*/
@ImplementedBy(classOf[RoleDAOImpl])
trait RoleDAO {
/**
* Adds a new Role
*
* @param role Role to add
* @return Result String
*/
def add(role: Role): Future[String]
/**
* Gets a Role with its ID
*
* @param roleId Role's ID
* @return Some Role found, None if its not found.
*/
def get(roleId: Int): Future[Option[Role]]
/**
* Deletes a Role from database.
*
* @param roleId Role's ID
* @return Operation result
*/
def delete(roleId: Int): Future[Int]
/**
* List all the ROles on the database.
*
* @return All Roles found.
*/
def listAll: Future[Seq[Role]]
}
|
ProjectAton/AtonLab
|
app/dao/RoleDAO.scala
|
Scala
|
gpl-3.0
| 892
|
package impulsestorm.stargame.lib
import akka.actor._
import akka.util.duration._
import akka.util.Timeout
import net.liftweb.common.{SimpleActor, Logger}
import java.util.Date
import akka.pattern.ask
class StateSupervisor(
newStateMaster: (String, ActorContext) => Option[ActorRef],
SMTimeout: Int = 300)
extends Actor with Logger
{
implicit val timeout = Timeout(5 seconds)
object CleanOldActorsMsg
private var activeSMs =
scala.collection.mutable.HashMap[String, (Long, ActorRef)]()
// gets the state master if can, spawning if needed. On failure, fail.
// on a successful get, will touch the timestamp
def getStateMaster(id: String) = {
def touchAndGive(sMaster: ActorRef) = {
activeSMs.update(id, ((new Date).getTime, sMaster))
Some(sMaster)
}
activeSMs.get(id) match {
case Some((_, sMaster)) => touchAndGive(sMaster) // already in map
case None => newStateMaster(id, context) match {
case Some(sMaster) => touchAndGive(sMaster) // successful spawn
case None => None // failure to spawn SM
}
}
}
def receive = {
case msg: FwdedMsg => getStateMaster(msg.stateId) match {
// if can get stateMaster, forward, otherwise, drop
case Some(sMaster) => sMaster ! msg
case None => {
error("Requested: %s, but no such game".format(msg.stateId))
msg.sender ! NoSuchGame
}
}
case CleanOldActorsMsg => cleanOldActors()
case _ => error("Unknown message received by StateSupervisor")
}
def cleanOldActors() = {
val now = (new Date).getTime
// filter out all the ones which are more stale than SMTimeout
val killMap = activeSMs.filter( kvtup => now - kvtup._2._1 > SMTimeout)
killMap.values.foreach(dateSMasterTuple => {
val sMaster = dateSMasterTuple._2
// send the PrepareShutdownMsg and stop the actors.
(sMaster ? PrepareShutdown) onComplete {
case Left(throwable) => throw throwable
case Right(OK) => {
sMaster ! PoisonPill
}
}
})
info("Cleaned old actors: " + killMap.keys.toString)
killMap.keys.foreach(activeSMs.remove)
}
}
trait FwdedMsg {
val stateId: String
val sender: SimpleActor[Any]
}
case class Subscribe(stateId: String, sender: SimpleActor[Any])
extends FwdedMsg
case class Unsubscribe(stateId: String, sender: SimpleActor[Any])
extends FwdedMsg
object NoSuchGame
object PrepareShutdown
object OK
// Coordinates mutation, persistence, and notification of ONE state
trait StateMaster[StateType <: State[StateType]] extends Actor {
var state: StateType
var listeners: List[SimpleActor[Any]] = Nil
def receive = {
case Subscribe(id, sender) =>
listeners = sender :: listeners // set
case Unsubscribe(id, sender) =>
listeners = listeners.filter(_!=sender) // set
case PrepareShutdown =>
saveToStorage()
sender ! OK
}
// non-atomic saving to durable storage
def saveToStorage()
}
trait State[T] {
def updated() : T
}
|
tommycli/stargame
|
src/main/scala/impulsestorm/stargame/lib/StateCoordination.scala
|
Scala
|
agpl-3.0
| 3,105
|
/*
* TimelineRenderingImpl.scala
* (Mellite)
*
* Copyright (c) 2012-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.mellite.impl.timeline
import de.sciss.lucre.synth.Txn
import de.sciss.mellite.TimelineTool.{Add, Cursor, Fade, Gain, Move, Mute, Patch, Resize}
import de.sciss.mellite.impl.timeline.tool.{AddImpl, AuditionImpl, CursorImpl, FadeImpl, GainImpl, MoveImpl, MuteImpl, PatchImpl, ResizeImpl}
import de.sciss.mellite.{TimelineTool, TimelineTrackCanvas, TimelineView, UniverseHandler}
object TimelineToolImpl extends TimelineTool.Companion {
def install(): Unit =
TimelineTool.peer = this
def cursor [T <: Txn[T]](canvas: TimelineTrackCanvas[T]): TimelineTool[T, Cursor ] = new CursorImpl (canvas)
def move [T <: Txn[T]](canvas: TimelineTrackCanvas[T]): TimelineTool[T, Move ] = new MoveImpl (canvas)
def resize [T <: Txn[T]](canvas: TimelineTrackCanvas[T]): TimelineTool[T, Resize ] = new ResizeImpl (canvas)
def gain [T <: Txn[T]](canvas: TimelineTrackCanvas[T]): TimelineTool[T, Gain ] = new GainImpl (canvas)
def mute [T <: Txn[T]](canvas: TimelineTrackCanvas[T]): TimelineTool[T, Mute ] = new MuteImpl (canvas)
def fade [T <: Txn[T]](canvas: TimelineTrackCanvas[T]): TimelineTool[T, Fade ] = new FadeImpl (canvas)
def function[T <: Txn[T]](canvas: TimelineTrackCanvas[T], view: TimelineView[T])
(implicit handler: UniverseHandler[T]): TimelineTool[T, Add] =
new AddImpl(canvas, view)
def patch [T <: Txn[T]](canvas: TimelineTrackCanvas[T]): TimelineTool[T, Patch[T]] = new PatchImpl (canvas)
def audition[T <: Txn[T]](canvas: TimelineTrackCanvas[T], view: TimelineView[T]): TimelineTool[T, Unit] =
new AuditionImpl(canvas, view)
}
|
Sciss/Mellite
|
app/src/main/scala/de/sciss/mellite/impl/timeline/TimelineToolImpl.scala
|
Scala
|
agpl-3.0
| 1,955
|
package rta.parser.triggers
import fastparse.noApi._
import java.nio.charset.Charset
import rta.common.Uses
import rta.model.triggers.Implicits._
import rta.model.triggers.Trigger
import rta.parser.TriggerParser
object BluetoothParser extends TriggerParser[Bluetooth] {
import white._
def Prefix: String = Uses.categoryOf[Bluetooth]
def state: P[Trigger.Condition[BluetoothState]] = {
mapParser(BluetoothState.namesToValuesMap).map(v => Trigger.Condition[BluetoothState](_ == v))
}
def connection: P[Trigger.Condition[BluetoothConnection]] = {
lazy val Name = String.filter(_.getBytes(Charset.forName("UTF-8")).length <= 248)
P("connected".withWS ~ "to".withWS ~ (MacAddress.map(v => Trigger.Condition[BluetoothConnection] {
case BluetoothConnection.Connected(_, address) => v == address
case _ => false
}) | (Name map (v => Trigger.Condition[BluetoothConnection] {
case BluetoothConnection.Connected(name, _) => v == name
case _ => false
}))) | ("disconnected".withWS ~ "from".withWS ~ (
MacAddress.map(v => Trigger.Condition[BluetoothConnection] {
case BluetoothConnection.Disconnected => true
case BluetoothConnection.Connected(_, address) => v != address
}) | Name.map(v => Trigger.Condition[BluetoothConnection] {
case BluetoothConnection.Disconnected => true
case BluetoothConnection.Connected(name, _) => v != name
})))
)
}
def Main: P[Trigger.Standalone[_ <: Bluetooth]] = state | connection
}
|
kjanosz/RuleThemAll
|
app/src/main/scala/rta/parser/triggers/BluetoothParser.scala
|
Scala
|
apache-2.0
| 1,533
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.util
import java.io.File
import org.scalatest.Suite
import org.apache.spark.SparkContext
import org.apache.spark.ml.{PredictionModel, Transformer}
import org.apache.spark.ml.linalg.Vector
import org.apache.spark.sql.{DataFrame, Dataset, Encoder, Row}
import org.apache.spark.sql.execution.streaming.MemoryStream
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.streaming.StreamTest
import org.apache.spark.sql.test.TestSparkSession
import org.apache.spark.util.Utils
trait MLTest extends StreamTest with TempDirectory { self: Suite =>
@transient var sc: SparkContext = _
@transient var checkpointDir: String = _
protected override def createSparkSession: TestSparkSession = {
new TestSparkSession(new SparkContext("local[2]", "MLlibUnitTest", sparkConf))
}
override def beforeAll(): Unit = {
super.beforeAll()
sc = spark.sparkContext
checkpointDir = Utils.createDirectory(tempDir.getCanonicalPath, "checkpoints").toString
sc.setCheckpointDir(checkpointDir)
}
override def afterAll() {
try {
Utils.deleteRecursively(new File(checkpointDir))
} finally {
super.afterAll()
}
}
private[util] def testTransformerOnStreamData[A : Encoder](
dataframe: DataFrame,
transformer: Transformer,
firstResultCol: String,
otherResultCols: String*)
(globalCheckFunction: Seq[Row] => Unit): Unit = {
val columnNames = dataframe.schema.fieldNames
val stream = MemoryStream[A]
val columnsWithMetadata = dataframe.schema.map { structField =>
col(structField.name).as(structField.name, structField.metadata)
}
val streamDF = stream.toDS().toDF(columnNames: _*).select(columnsWithMetadata: _*)
val data = dataframe.as[A].collect()
val streamOutput = transformer.transform(streamDF)
.select(firstResultCol, otherResultCols: _*)
testStream(streamOutput) (
AddData(stream, data: _*),
CheckAnswer(globalCheckFunction)
)
}
private[util] def testTransformerOnDF(
dataframe: DataFrame,
transformer: Transformer,
firstResultCol: String,
otherResultCols: String*)
(globalCheckFunction: Seq[Row] => Unit): Unit = {
val dfOutput = transformer.transform(dataframe)
val outputs = dfOutput.select(firstResultCol, otherResultCols: _*).collect()
globalCheckFunction(outputs)
}
def testTransformer[A : Encoder](
dataframe: DataFrame,
transformer: Transformer,
firstResultCol: String,
otherResultCols: String*)
(checkFunction: Row => Unit): Unit = {
testTransformerByGlobalCheckFunc(
dataframe,
transformer,
firstResultCol,
otherResultCols: _*) { rows: Seq[Row] => rows.foreach(checkFunction(_)) }
}
def testTransformerByGlobalCheckFunc[A : Encoder](
dataframe: DataFrame,
transformer: Transformer,
firstResultCol: String,
otherResultCols: String*)
(globalCheckFunction: Seq[Row] => Unit): Unit = {
testTransformerOnStreamData(dataframe, transformer, firstResultCol,
otherResultCols: _*)(globalCheckFunction)
testTransformerOnDF(dataframe, transformer, firstResultCol,
otherResultCols: _*)(globalCheckFunction)
}
def testTransformerByInterceptingException[A : Encoder](
dataframe: DataFrame,
transformer: Transformer,
expectedMessagePart : String,
firstResultCol: String) {
def hasExpectedMessage(exception: Throwable): Boolean =
exception.getMessage.contains(expectedMessagePart) ||
(exception.getCause != null && exception.getCause.getMessage.contains(expectedMessagePart))
withClue(s"""Expected message part "${expectedMessagePart}" is not found in DF test.""") {
val exceptionOnDf = intercept[Throwable] {
testTransformerOnDF(dataframe, transformer, firstResultCol)(_ => Unit)
}
assert(hasExpectedMessage(exceptionOnDf))
}
withClue(s"""Expected message part "${expectedMessagePart}" is not found in stream test.""") {
val exceptionOnStreamData = intercept[Throwable] {
testTransformerOnStreamData(dataframe, transformer, firstResultCol)(_ => Unit)
}
assert(hasExpectedMessage(exceptionOnStreamData))
}
}
def testPredictionModelSinglePrediction(model: PredictionModel[Vector, _],
dataset: Dataset[_]): Unit = {
model.transform(dataset).select(model.getFeaturesCol, model.getPredictionCol)
.collect().foreach {
case Row(features: Vector, prediction: Double) =>
assert(prediction === model.predict(features))
}
}
}
|
brad-kaiser/spark
|
mllib/src/test/scala/org/apache/spark/ml/util/MLTest.scala
|
Scala
|
apache-2.0
| 5,404
|
package ch.ninecode.cim
import org.apache.spark.rdd.RDD
import ch.ninecode.model.Element
case class CIMDifferenceRaw (options: CIMDifferenceOptions) extends CIMDifferenceProcessor
{
def delete (prefix: String, doomed: Element, suffix: String): Option[String] =
{
val strings = doomed.export.split("\\n")
Some(strings.mkString(prefix, "\\n", suffix))
}
def add (prefix: String, newby: Element, suffix: String): Option[String] =
{
val strings = newby.export.split("\\n")
Some(strings.mkString(prefix, "\\n", suffix))
}
def diff (prefix: String, left: Element, right: Element, suffix: String): Option[String] =
{
val ls = left.export
val rs = right.export
if (ls == rs)
None
else
{
val lefts = ls.split("\\n")
val rights = rs.split("\\n")
val l = for (left <- lefts; if !rights.contains(left))
yield left
val r = for (right <- rights; if !lefts.contains(right))
yield right
Some(Array.concat(l.map(x => s"-$x"), r.map(x => s"+$x")).mkString(prefix, "\\n", suffix))
}
}
def process (elements: (String, (Option[Element], Option[Element]))): Option[String] =
{
val (id, (left, right)) = elements
left match
{
case Some(l) =>
right match
{
case Some(r) =>
diff(s"modify ${l.baseclass} $id:\\n", l, r, "")
case None =>
delete(s"delete ${l.baseclass} $id:\\n", l, "")
}
case None =>
right match
{
case Some(r) =>
add(s"add ${r.baseclass} $id:\\n", r, "")
case None =>
None
}
}
}
def execute (join: RDD[(String, (Option[Element], Option[Element]))]): Unit =
{
val diff = join.flatMap(process)
save(diff, options.output)
}
}
|
derrickoswald/CIMScala
|
CIMDifference/src/main/scala/ch/ninecode/cim/CIMDifferenceRaw.scala
|
Scala
|
mit
| 2,098
|
object Main {
class C[T] {}
class D[T] {}
class E {}
def foo[T, V <: T](c: C[T], d: D[V]) {print(1)}
def foo[T](c : C[T], t: T) {print(2)}
def bar {
/* line: 6 */foo(new C[E], new D[E])
}
}
|
ilinum/intellij-scala
|
testdata/resolve2/bug3/SCL3898.scala
|
Scala
|
apache-2.0
| 209
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver
import java.io.{File, FilenameFilter}
import java.net.URL
import java.nio.charset.StandardCharsets
import java.sql.{Date, DriverManager, SQLException, Statement}
import java.util.{Locale, UUID}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.concurrent.duration._
import scala.io.Source
import scala.util.{Random, Try}
import com.google.common.io.Files
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hive.jdbc.HiveDriver
import org.apache.hive.service.auth.PlainSaslHelper
import org.apache.hive.service.cli.{FetchOrientation, FetchType, GetInfoType}
import org.apache.hive.service.cli.thrift.ThriftCLIServiceClient
import org.apache.thrift.protocol.TBinaryProtocol
import org.apache.thrift.transport.TSocket
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.{SparkException, SparkFunSuite}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.hive.HiveUtils
import org.apache.spark.sql.hive.test.HiveTestUtils
import org.apache.spark.sql.internal.StaticSQLConf.HIVE_THRIFT_SERVER_SINGLESESSION
import org.apache.spark.sql.test.ProcessTestUtils.ProcessOutputCapturer
import org.apache.spark.util.{ThreadUtils, Utils}
object TestData {
def getTestDataFilePath(name: String): URL = {
Thread.currentThread().getContextClassLoader.getResource(s"data/files/$name")
}
val smallKv = getTestDataFilePath("small_kv.txt")
val smallKvWithNull = getTestDataFilePath("small_kv_with_null.txt")
}
class HiveThriftBinaryServerSuite extends HiveThriftJdbcTest {
override def mode: ServerMode.Value = ServerMode.binary
private def withCLIServiceClient(f: ThriftCLIServiceClient => Unit): Unit = {
// Transport creation logic below mimics HiveConnection.createBinaryTransport
val rawTransport = new TSocket("localhost", serverPort)
val user = System.getProperty("user.name")
val transport = PlainSaslHelper.getPlainTransport(user, "anonymous", rawTransport)
val protocol = new TBinaryProtocol(transport)
val client = new ThriftCLIServiceClient(new ThriftserverShimUtils.Client(protocol))
transport.open()
try f(client) finally transport.close()
}
test("GetInfo Thrift API") {
withCLIServiceClient { client =>
val user = System.getProperty("user.name")
val sessionHandle = client.openSession(user, "")
assertResult("Spark SQL", "Wrong GetInfo(CLI_DBMS_NAME) result") {
client.getInfo(sessionHandle, GetInfoType.CLI_DBMS_NAME).getStringValue
}
assertResult("Spark SQL", "Wrong GetInfo(CLI_SERVER_NAME) result") {
client.getInfo(sessionHandle, GetInfoType.CLI_SERVER_NAME).getStringValue
}
assertResult(true, "Spark version shouldn't be \\"Unknown\\"") {
val version = client.getInfo(sessionHandle, GetInfoType.CLI_DBMS_VER).getStringValue
logInfo(s"Spark version: $version")
version != "Unknown"
}
}
}
test("SPARK-16563 ThriftCLIService FetchResults repeat fetching result") {
withCLIServiceClient { client =>
val user = System.getProperty("user.name")
val sessionHandle = client.openSession(user, "")
withJdbcStatement("test_16563") { statement =>
val queries = Seq(
"CREATE TABLE test_16563(key INT, val STRING)",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_16563")
queries.foreach(statement.execute)
val confOverlay = new java.util.HashMap[java.lang.String, java.lang.String]
val operationHandle = client.executeStatement(
sessionHandle,
"SELECT * FROM test_16563",
confOverlay)
// Fetch result first time
assertResult(5, "Fetching result first time from next row") {
val rows_next = client.fetchResults(
operationHandle,
FetchOrientation.FETCH_NEXT,
1000,
FetchType.QUERY_OUTPUT)
rows_next.numRows()
}
// Fetch result second time from first row
assertResult(5, "Repeat fetching result from first row") {
val rows_first = client.fetchResults(
operationHandle,
FetchOrientation.FETCH_FIRST,
1000,
FetchType.QUERY_OUTPUT)
rows_first.numRows()
}
}
}
}
test("Support beeline --hiveconf and --hivevar") {
withJdbcStatement() { statement =>
executeTest(hiveConfList)
executeTest(hiveVarList)
def executeTest(hiveList: String): Unit = {
hiveList.split(";").foreach{ m =>
val kv = m.split("=")
// select "${a}"; ---> avalue
val resultSet = statement.executeQuery("select \\"${" + kv(0) + "}\\"")
resultSet.next()
assert(resultSet.getString(1) === kv(1))
}
}
}
}
test("JDBC query execution") {
withJdbcStatement("test") { statement =>
val queries = Seq(
"SET spark.sql.shuffle.partitions=3",
"CREATE TABLE test(key INT, val STRING)",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test",
"CACHE TABLE test")
queries.foreach(statement.execute)
assertResult(5, "Row count mismatch") {
val resultSet = statement.executeQuery("SELECT COUNT(*) FROM test")
resultSet.next()
resultSet.getInt(1)
}
}
}
test("Checks Hive version") {
withJdbcStatement() { statement =>
val resultSet = statement.executeQuery("SET spark.sql.hive.version")
resultSet.next()
assert(resultSet.getString(1) === "spark.sql.hive.version")
assert(resultSet.getString(2) === HiveUtils.builtinHiveVersion)
}
}
test("SPARK-3004 regression: result set containing NULL") {
withJdbcStatement("test_null") { statement =>
val queries = Seq(
"CREATE TABLE test_null(key INT, val STRING)",
s"LOAD DATA LOCAL INPATH '${TestData.smallKvWithNull}' OVERWRITE INTO TABLE test_null")
queries.foreach(statement.execute)
val resultSet = statement.executeQuery("SELECT * FROM test_null WHERE key IS NULL")
(0 until 5).foreach { _ =>
resultSet.next()
assert(resultSet.getInt(1) === 0)
assert(resultSet.wasNull())
}
assert(!resultSet.next())
}
}
test("SPARK-4292 regression: result set iterator issue") {
withJdbcStatement("test_4292") { statement =>
val queries = Seq(
"CREATE TABLE test_4292(key INT, val STRING)",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_4292")
queries.foreach(statement.execute)
val resultSet = statement.executeQuery("SELECT key FROM test_4292")
Seq(238, 86, 311, 27, 165).foreach { key =>
resultSet.next()
assert(resultSet.getInt(1) === key)
}
}
}
test("SPARK-4309 regression: Date type support") {
withJdbcStatement("test_date") { statement =>
val queries = Seq(
"CREATE TABLE test_date(key INT, value STRING)",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_date")
queries.foreach(statement.execute)
assertResult(Date.valueOf("2011-01-01")) {
val resultSet = statement.executeQuery(
"SELECT CAST('2011-01-01' as date) FROM test_date LIMIT 1")
resultSet.next()
resultSet.getDate(1)
}
}
}
test("SPARK-4407 regression: Complex type support") {
withJdbcStatement("test_map") { statement =>
val queries = Seq(
"CREATE TABLE test_map(key INT, value STRING)",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_map")
queries.foreach(statement.execute)
assertResult("""{238:"val_238"}""") {
val resultSet = statement.executeQuery("SELECT MAP(key, value) FROM test_map LIMIT 1")
resultSet.next()
resultSet.getString(1)
}
assertResult("""["238","val_238"]""") {
val resultSet = statement.executeQuery(
"SELECT ARRAY(CAST(key AS STRING), value) FROM test_map LIMIT 1")
resultSet.next()
resultSet.getString(1)
}
}
}
test("SPARK-12143 regression: Binary type support") {
withJdbcStatement("test_binary") { statement =>
val queries = Seq(
"CREATE TABLE test_binary(key INT, value STRING)",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_binary")
queries.foreach(statement.execute)
val expected: Array[Byte] = "val_238".getBytes
assertResult(expected) {
val resultSet = statement.executeQuery(
"SELECT CAST(value as BINARY) FROM test_binary LIMIT 1")
resultSet.next()
resultSet.getObject(1)
}
}
}
test("test multiple session") {
import org.apache.spark.sql.internal.SQLConf
var defaultV1: String = null
var defaultV2: String = null
var data: ArrayBuffer[Int] = null
withMultipleConnectionJdbcStatement("test_map", "db1.test_map2")(
// create table
{ statement =>
val queries = Seq(
"CREATE TABLE test_map(key INT, value STRING)",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_map",
"CACHE TABLE test_table AS SELECT key FROM test_map ORDER BY key DESC",
"CREATE DATABASE db1")
queries.foreach(statement.execute)
val plan = statement.executeQuery("explain select * from test_table")
plan.next()
plan.next()
assert(plan.getString(1).contains("Scan In-memory table `test_table`"))
val rs1 = statement.executeQuery("SELECT key FROM test_table ORDER BY KEY DESC")
val buf1 = new collection.mutable.ArrayBuffer[Int]()
while (rs1.next()) {
buf1 += rs1.getInt(1)
}
rs1.close()
val rs2 = statement.executeQuery("SELECT key FROM test_map ORDER BY KEY DESC")
val buf2 = new collection.mutable.ArrayBuffer[Int]()
while (rs2.next()) {
buf2 += rs2.getInt(1)
}
rs2.close()
assert(buf1 === buf2)
data = buf1
},
// first session, we get the default value of the session status
{ statement =>
val rs1 = statement.executeQuery(s"SET ${SQLConf.SHUFFLE_PARTITIONS.key}")
rs1.next()
defaultV1 = rs1.getString(1)
assert(defaultV1 != "200")
rs1.close()
val rs2 = statement.executeQuery("SET hive.cli.print.header")
rs2.next()
defaultV2 = rs2.getString(1)
assert(defaultV1 != "true")
rs2.close()
},
// second session, we update the session status
{ statement =>
val queries = Seq(
s"SET ${SQLConf.SHUFFLE_PARTITIONS.key}=291",
"SET hive.cli.print.header=true"
)
queries.map(statement.execute)
val rs1 = statement.executeQuery(s"SET ${SQLConf.SHUFFLE_PARTITIONS.key}")
rs1.next()
assert("spark.sql.shuffle.partitions" === rs1.getString(1))
assert("291" === rs1.getString(2))
rs1.close()
val rs2 = statement.executeQuery("SET hive.cli.print.header")
rs2.next()
assert("hive.cli.print.header" === rs2.getString(1))
assert("true" === rs2.getString(2))
rs2.close()
},
// third session, we get the latest session status, supposed to be the
// default value
{ statement =>
val rs1 = statement.executeQuery(s"SET ${SQLConf.SHUFFLE_PARTITIONS.key}")
rs1.next()
assert(defaultV1 === rs1.getString(1))
rs1.close()
val rs2 = statement.executeQuery("SET hive.cli.print.header")
rs2.next()
assert(defaultV2 === rs2.getString(1))
rs2.close()
},
// try to access the cached data in another session
{ statement =>
// Cached temporary table can't be accessed by other sessions
intercept[SQLException] {
statement.executeQuery("SELECT key FROM test_table ORDER BY KEY DESC")
}
val plan = statement.executeQuery("explain select key from test_map ORDER BY key DESC")
plan.next()
plan.next()
assert(plan.getString(1).contains("Scan In-memory table `test_table`"))
val rs = statement.executeQuery("SELECT key FROM test_map ORDER BY KEY DESC")
val buf = new collection.mutable.ArrayBuffer[Int]()
while (rs.next()) {
buf += rs.getInt(1)
}
rs.close()
assert(buf === data)
},
// switch another database
{ statement =>
statement.execute("USE db1")
// there is no test_map table in db1
intercept[SQLException] {
statement.executeQuery("SELECT key FROM test_map ORDER BY KEY DESC")
}
statement.execute("CREATE TABLE test_map2(key INT, value STRING)")
},
// access default database
{ statement =>
// current database should still be `default`
intercept[SQLException] {
statement.executeQuery("SELECT key FROM test_map2")
}
statement.execute("USE db1")
// access test_map2
statement.executeQuery("SELECT key from test_map2")
}
)
}
// This test often hangs and then times out, leaving the hanging processes.
// Let's ignore it and improve the test.
ignore("test jdbc cancel") {
withJdbcStatement("test_map") { statement =>
val queries = Seq(
"CREATE TABLE test_map(key INT, value STRING)",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_map")
queries.foreach(statement.execute)
implicit val ec = ExecutionContext.fromExecutorService(
ThreadUtils.newDaemonSingleThreadExecutor("test-jdbc-cancel"))
try {
// Start a very-long-running query that will take hours to finish, then cancel it in order
// to demonstrate that cancellation works.
val f = Future {
statement.executeQuery(
"SELECT COUNT(*) FROM test_map " +
List.fill(10)("join test_map").mkString(" "))
}
// Note that this is slightly race-prone: if the cancel is issued before the statement
// begins executing then we'll fail with a timeout. As a result, this fixed delay is set
// slightly more conservatively than may be strictly necessary.
Thread.sleep(1000)
statement.cancel()
val e = intercept[SparkException] {
ThreadUtils.awaitResult(f, 3.minute)
}.getCause
assert(e.isInstanceOf[SQLException])
assert(e.getMessage.contains("cancelled"))
// Cancellation is a no-op if spark.sql.hive.thriftServer.async=false
statement.executeQuery("SET spark.sql.hive.thriftServer.async=false")
try {
val sf = Future {
statement.executeQuery(
"SELECT COUNT(*) FROM test_map " +
List.fill(4)("join test_map").mkString(" ")
)
}
// Similarly, this is also slightly race-prone on fast machines where the query above
// might race and complete before we issue the cancel.
Thread.sleep(1000)
statement.cancel()
val rs1 = ThreadUtils.awaitResult(sf, 3.minute)
rs1.next()
assert(rs1.getInt(1) === math.pow(5, 5))
rs1.close()
val rs2 = statement.executeQuery("SELECT COUNT(*) FROM test_map")
rs2.next()
assert(rs2.getInt(1) === 5)
rs2.close()
} finally {
statement.executeQuery("SET spark.sql.hive.thriftServer.async=true")
}
} finally {
ec.shutdownNow()
}
}
}
test("test add jar") {
withMultipleConnectionJdbcStatement("smallKV", "addJar")(
{
statement =>
val jarFile = HiveTestUtils.getHiveHcatalogCoreJar.getCanonicalPath
statement.executeQuery(s"ADD JAR $jarFile")
},
{
statement =>
val queries = Seq(
"CREATE TABLE smallKV(key INT, val STRING)",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE smallKV",
"""CREATE TABLE addJar(key string)
|ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe'
""".stripMargin)
queries.foreach(statement.execute)
statement.executeQuery(
"""
|INSERT INTO TABLE addJar SELECT 'k1' as key FROM smallKV limit 1
""".stripMargin)
val actualResult =
statement.executeQuery("SELECT key FROM addJar")
val actualResultBuffer = new collection.mutable.ArrayBuffer[String]()
while (actualResult.next()) {
actualResultBuffer += actualResult.getString(1)
}
actualResult.close()
val expectedResult =
statement.executeQuery("SELECT 'k1'")
val expectedResultBuffer = new collection.mutable.ArrayBuffer[String]()
while (expectedResult.next()) {
expectedResultBuffer += expectedResult.getString(1)
}
expectedResult.close()
assert(expectedResultBuffer === actualResultBuffer)
}
)
}
test("Checks Hive version via SET -v") {
withJdbcStatement() { statement =>
val resultSet = statement.executeQuery("SET -v")
val conf = mutable.Map.empty[String, String]
while (resultSet.next()) {
conf += resultSet.getString(1) -> resultSet.getString(2)
}
if (HiveUtils.isHive23) {
assert(conf.get(HiveUtils.FAKE_HIVE_VERSION.key) === Some("2.3.6"))
} else {
assert(conf.get(HiveUtils.FAKE_HIVE_VERSION.key) === Some("1.2.1"))
}
}
}
test("Checks Hive version via SET") {
withJdbcStatement() { statement =>
val resultSet = statement.executeQuery("SET")
val conf = mutable.Map.empty[String, String]
while (resultSet.next()) {
conf += resultSet.getString(1) -> resultSet.getString(2)
}
if (HiveUtils.isHive23) {
assert(conf.get(HiveUtils.FAKE_HIVE_VERSION.key) === Some("2.3.6"))
} else {
assert(conf.get(HiveUtils.FAKE_HIVE_VERSION.key) === Some("1.2.1"))
}
}
}
test("SPARK-11595 ADD JAR with input path having URL scheme") {
withJdbcStatement("test_udtf") { statement =>
try {
val jarPath = "../hive/src/test/resources/TestUDTF.jar"
val jarURL = s"file://${System.getProperty("user.dir")}/$jarPath"
Seq(
s"ADD JAR $jarURL",
s"""CREATE TEMPORARY FUNCTION udtf_count2
|AS 'org.apache.spark.sql.hive.execution.GenericUDTFCount2'
""".stripMargin
).foreach(statement.execute)
val rs1 = statement.executeQuery("DESCRIBE FUNCTION udtf_count2")
assert(rs1.next())
assert(rs1.getString(1) === "Function: udtf_count2")
assert(rs1.next())
assertResult("Class: org.apache.spark.sql.hive.execution.GenericUDTFCount2") {
rs1.getString(1)
}
assert(rs1.next())
assert(rs1.getString(1) === "Usage: N/A.")
val dataPath = "../hive/src/test/resources/data/files/kv1.txt"
Seq(
"CREATE TABLE test_udtf(key INT, value STRING)",
s"LOAD DATA LOCAL INPATH '$dataPath' OVERWRITE INTO TABLE test_udtf"
).foreach(statement.execute)
val rs2 = statement.executeQuery(
"SELECT key, cc FROM test_udtf LATERAL VIEW udtf_count2(value) dd AS cc")
assert(rs2.next())
assert(rs2.getInt(1) === 97)
assert(rs2.getInt(2) === 500)
assert(rs2.next())
assert(rs2.getInt(1) === 97)
assert(rs2.getInt(2) === 500)
} finally {
statement.executeQuery("DROP TEMPORARY FUNCTION udtf_count2")
}
}
}
test("SPARK-11043 check operation log root directory") {
val expectedLine =
"Operation log root directory is created: " + operationLogPath.getAbsoluteFile
val bufferSrc = Source.fromFile(logPath)
Utils.tryWithSafeFinally {
assert(bufferSrc.getLines().exists(_.contains(expectedLine)))
} {
bufferSrc.close()
}
}
test("SPARK-23547 Cleanup the .pipeout file when the Hive Session closed") {
def pipeoutFileList(sessionID: UUID): Array[File] = {
lScratchDir.listFiles(new FilenameFilter {
override def accept(dir: File, name: String): Boolean = {
name.startsWith(sessionID.toString) && name.endsWith(".pipeout")
}
})
}
withCLIServiceClient { client =>
val user = System.getProperty("user.name")
val sessionHandle = client.openSession(user, "")
val sessionID = sessionHandle.getSessionId
if (HiveUtils.isHive23) {
assert(pipeoutFileList(sessionID).length == 2)
} else {
assert(pipeoutFileList(sessionID).length == 1)
}
client.closeSession(sessionHandle)
assert(pipeoutFileList(sessionID).length == 0)
}
}
test("SPARK-24829 Checks cast as float") {
withJdbcStatement() { statement =>
val resultSet = statement.executeQuery("SELECT CAST('4.56' AS FLOAT)")
resultSet.next()
assert(resultSet.getString(1) === "4.56")
}
}
test("SPARK-28463: Thriftserver throws BigDecimal incompatible with HiveDecimal") {
withJdbcStatement() { statement =>
val rs = statement.executeQuery("SELECT CAST(1 AS decimal(38, 18))")
assert(rs.next())
assert(rs.getBigDecimal(1) === new java.math.BigDecimal("1.000000000000000000"))
}
}
}
class SingleSessionSuite extends HiveThriftJdbcTest {
override def mode: ServerMode.Value = ServerMode.binary
override protected def extraConf: Seq[String] =
s"--conf ${HIVE_THRIFT_SERVER_SINGLESESSION.key}=true" :: Nil
test("share the temporary functions across JDBC connections") {
withMultipleConnectionJdbcStatement()(
{ statement =>
val jarPath = "../hive/src/test/resources/TestUDTF.jar"
val jarURL = s"file://${System.getProperty("user.dir")}/$jarPath"
// Configurations and temporary functions added in this session should be visible to all
// the other sessions.
Seq(
"SET foo=bar",
s"ADD JAR $jarURL",
s"""CREATE TEMPORARY FUNCTION udtf_count2
|AS 'org.apache.spark.sql.hive.execution.GenericUDTFCount2'
""".stripMargin
).foreach(statement.execute)
},
{ statement =>
try {
val rs1 = statement.executeQuery("SET foo")
assert(rs1.next())
assert(rs1.getString(1) === "foo")
assert(rs1.getString(2) === "bar")
val rs2 = statement.executeQuery("DESCRIBE FUNCTION udtf_count2")
assert(rs2.next())
assert(rs2.getString(1) === "Function: udtf_count2")
assert(rs2.next())
assertResult("Class: org.apache.spark.sql.hive.execution.GenericUDTFCount2") {
rs2.getString(1)
}
assert(rs2.next())
assert(rs2.getString(1) === "Usage: N/A.")
} finally {
statement.executeQuery("DROP TEMPORARY FUNCTION udtf_count2")
}
}
)
}
test("unable to changing spark.sql.hive.thriftServer.singleSession using JDBC connections") {
withJdbcStatement() { statement =>
// JDBC connections are not able to set the conf spark.sql.hive.thriftServer.singleSession
val e = intercept[SQLException] {
statement.executeQuery("SET spark.sql.hive.thriftServer.singleSession=false")
}.getMessage
assert(e.contains(
"Cannot modify the value of a static config: spark.sql.hive.thriftServer.singleSession"))
}
}
test("share the current database and temporary tables across JDBC connections") {
withMultipleConnectionJdbcStatement()(
{ statement =>
statement.execute("CREATE DATABASE IF NOT EXISTS db1")
},
{ statement =>
val rs1 = statement.executeQuery("SELECT current_database()")
assert(rs1.next())
assert(rs1.getString(1) === "default")
statement.execute("USE db1")
val rs2 = statement.executeQuery("SELECT current_database()")
assert(rs2.next())
assert(rs2.getString(1) === "db1")
statement.execute("CREATE TEMP VIEW tempView AS SELECT 123")
},
{ statement =>
// the current database is set to db1 by another JDBC connection.
val rs1 = statement.executeQuery("SELECT current_database()")
assert(rs1.next())
assert(rs1.getString(1) === "db1")
val rs2 = statement.executeQuery("SELECT * from tempView")
assert(rs2.next())
assert(rs2.getString(1) === "123")
statement.execute("USE default")
statement.execute("DROP VIEW tempView")
statement.execute("DROP DATABASE db1 CASCADE")
}
)
}
}
class HiveThriftHttpServerSuite extends HiveThriftJdbcTest {
override def mode: ServerMode.Value = ServerMode.http
test("JDBC query execution") {
withJdbcStatement("test") { statement =>
val queries = Seq(
"SET spark.sql.shuffle.partitions=3",
"CREATE TABLE test(key INT, val STRING)",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test",
"CACHE TABLE test")
queries.foreach(statement.execute)
assertResult(5, "Row count mismatch") {
val resultSet = statement.executeQuery("SELECT COUNT(*) FROM test")
resultSet.next()
resultSet.getInt(1)
}
}
}
test("Checks Hive version") {
withJdbcStatement() { statement =>
val resultSet = statement.executeQuery("SET spark.sql.hive.version")
resultSet.next()
assert(resultSet.getString(1) === "spark.sql.hive.version")
assert(resultSet.getString(2) === HiveUtils.builtinHiveVersion)
}
}
test("SPARK-24829 Checks cast as float") {
withJdbcStatement() { statement =>
val resultSet = statement.executeQuery("SELECT CAST('4.56' AS FLOAT)")
resultSet.next()
assert(resultSet.getString(1) === "4.56")
}
}
}
object ServerMode extends Enumeration {
val binary, http = Value
}
abstract class HiveThriftJdbcTest extends HiveThriftServer2Test {
Utils.classForName(classOf[HiveDriver].getCanonicalName)
private def jdbcUri = if (mode == ServerMode.http) {
s"""jdbc:hive2://localhost:$serverPort/
|default?
|hive.server2.transport.mode=http;
|hive.server2.thrift.http.path=cliservice;
|${hiveConfList}#${hiveVarList}
""".stripMargin.split("\\n").mkString.trim
} else {
s"jdbc:hive2://localhost:$serverPort/?${hiveConfList}#${hiveVarList}"
}
def withMultipleConnectionJdbcStatement(tableNames: String*)(fs: (Statement => Unit)*) {
val user = System.getProperty("user.name")
val connections = fs.map { _ => DriverManager.getConnection(jdbcUri, user, "") }
val statements = connections.map(_.createStatement())
try {
statements.zip(fs).foreach { case (s, f) => f(s) }
} finally {
tableNames.foreach { name =>
// TODO: Need a better way to drop the view.
if (name.toUpperCase(Locale.ROOT).startsWith("VIEW")) {
statements(0).execute(s"DROP VIEW IF EXISTS $name")
} else {
statements(0).execute(s"DROP TABLE IF EXISTS $name")
}
}
statements.foreach(_.close())
connections.foreach(_.close())
}
}
def withDatabase(dbNames: String*)(fs: (Statement => Unit)*) {
val user = System.getProperty("user.name")
val connections = fs.map { _ => DriverManager.getConnection(jdbcUri, user, "") }
val statements = connections.map(_.createStatement())
try {
statements.zip(fs).foreach { case (s, f) => f(s) }
} finally {
dbNames.foreach { name =>
statements(0).execute(s"DROP DATABASE IF EXISTS $name")
}
statements.foreach(_.close())
connections.foreach(_.close())
}
}
def withJdbcStatement(tableNames: String*)(f: Statement => Unit) {
withMultipleConnectionJdbcStatement(tableNames: _*)(f)
}
}
abstract class HiveThriftServer2Test extends SparkFunSuite with BeforeAndAfterAll with Logging {
def mode: ServerMode.Value
private val CLASS_NAME = HiveThriftServer2.getClass.getCanonicalName.stripSuffix("$")
private val LOG_FILE_MARK = s"starting $CLASS_NAME, logging to "
protected val startScript = "../../sbin/start-thriftserver.sh".split("/").mkString(File.separator)
protected val stopScript = "../../sbin/stop-thriftserver.sh".split("/").mkString(File.separator)
private var listeningPort: Int = _
protected def serverPort: Int = listeningPort
protected val hiveConfList = "a=avalue;b=bvalue"
protected val hiveVarList = "c=cvalue;d=dvalue"
protected def user = System.getProperty("user.name")
protected var warehousePath: File = _
protected var metastorePath: File = _
protected def metastoreJdbcUri = s"jdbc:derby:;databaseName=$metastorePath;create=true"
private val pidDir: File = Utils.createTempDir(namePrefix = "thriftserver-pid")
protected var logPath: File = _
protected var operationLogPath: File = _
protected var lScratchDir: File = _
private var logTailingProcess: Process = _
private var diagnosisBuffer: ArrayBuffer[String] = ArrayBuffer.empty[String]
protected def extraConf: Seq[String] = Nil
protected def serverStartCommand(port: Int) = {
val portConf = if (mode == ServerMode.binary) {
ConfVars.HIVE_SERVER2_THRIFT_PORT
} else {
ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT
}
val driverClassPath = {
// Writes a temporary log4j.properties and prepend it to driver classpath, so that it
// overrides all other potential log4j configurations contained in other dependency jar files.
val tempLog4jConf = Utils.createTempDir().getCanonicalPath
Files.write(
"""log4j.rootCategory=DEBUG, console
|log4j.appender.console=org.apache.log4j.ConsoleAppender
|log4j.appender.console.target=System.err
|log4j.appender.console.layout=org.apache.log4j.PatternLayout
|log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n
""".stripMargin,
new File(s"$tempLog4jConf/log4j.properties"),
StandardCharsets.UTF_8)
tempLog4jConf
}
s"""$startScript
| --master local
| --hiveconf ${ConfVars.METASTORECONNECTURLKEY}=$metastoreJdbcUri
| --hiveconf ${ConfVars.METASTOREWAREHOUSE}=$warehousePath
| --hiveconf ${ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST}=localhost
| --hiveconf ${ConfVars.HIVE_SERVER2_TRANSPORT_MODE}=$mode
| --hiveconf ${ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION}=$operationLogPath
| --hiveconf ${ConfVars.LOCALSCRATCHDIR}=$lScratchDir
| --hiveconf $portConf=$port
| --driver-class-path $driverClassPath
| --driver-java-options -Dlog4j.debug
| --conf spark.ui.enabled=false
| ${extraConf.mkString("\\n")}
""".stripMargin.split("\\\\s+").toSeq
}
/**
* String to scan for when looking for the thrift binary endpoint running.
* This can change across Hive versions.
*/
val THRIFT_BINARY_SERVICE_LIVE = "Starting ThriftBinaryCLIService on port"
/**
* String to scan for when looking for the thrift HTTP endpoint running.
* This can change across Hive versions.
*/
val THRIFT_HTTP_SERVICE_LIVE = "Started ThriftHttpCLIService in http"
val SERVER_STARTUP_TIMEOUT = 3.minutes
private def startThriftServer(port: Int, attempt: Int) = {
warehousePath = Utils.createTempDir()
warehousePath.delete()
metastorePath = Utils.createTempDir()
metastorePath.delete()
operationLogPath = Utils.createTempDir()
operationLogPath.delete()
lScratchDir = Utils.createTempDir()
lScratchDir.delete()
logPath = null
logTailingProcess = null
val command = serverStartCommand(port)
diagnosisBuffer ++=
s"""
|### Attempt $attempt ###
|HiveThriftServer2 command line: $command
|Listening port: $port
|System user: $user
""".stripMargin.split("\\n")
logInfo(s"Trying to start HiveThriftServer2: port=$port, mode=$mode, attempt=$attempt")
logPath = {
val lines = Utils.executeAndGetOutput(
command = command,
extraEnvironment = Map(
// Disables SPARK_TESTING to exclude log4j.properties in test directories.
"SPARK_TESTING" -> "0",
// But set SPARK_SQL_TESTING to make spark-class happy.
"SPARK_SQL_TESTING" -> "1",
// Points SPARK_PID_DIR to SPARK_HOME, otherwise only 1 Thrift server instance can be
// started at a time, which is not Jenkins friendly.
"SPARK_PID_DIR" -> pidDir.getCanonicalPath),
redirectStderr = true)
logInfo(s"COMMAND: $command")
logInfo(s"OUTPUT: $lines")
lines.split("\\n").collectFirst {
case line if line.contains(LOG_FILE_MARK) => new File(line.drop(LOG_FILE_MARK.length))
}.getOrElse {
throw new RuntimeException("Failed to find HiveThriftServer2 log file.")
}
}
val serverStarted = Promise[Unit]()
// Ensures that the following "tail" command won't fail.
logPath.createNewFile()
val successLines = Seq(THRIFT_BINARY_SERVICE_LIVE, THRIFT_HTTP_SERVICE_LIVE)
logTailingProcess = {
val command = s"/usr/bin/env tail -n +0 -f ${logPath.getCanonicalPath}".split(" ")
// Using "-n +0" to make sure all lines in the log file are checked.
val builder = new ProcessBuilder(command: _*)
val captureOutput = (line: String) => diagnosisBuffer.synchronized {
diagnosisBuffer += line
successLines.foreach { r =>
if (line.contains(r)) {
serverStarted.trySuccess(())
}
}
}
val process = builder.start()
new ProcessOutputCapturer(process.getInputStream, captureOutput).start()
new ProcessOutputCapturer(process.getErrorStream, captureOutput).start()
process
}
ThreadUtils.awaitResult(serverStarted.future, SERVER_STARTUP_TIMEOUT)
}
private def stopThriftServer(): Unit = {
// The `spark-daemon.sh' script uses kill, which is not synchronous, have to wait for a while.
Utils.executeAndGetOutput(
command = Seq(stopScript),
extraEnvironment = Map("SPARK_PID_DIR" -> pidDir.getCanonicalPath))
Thread.sleep(3.seconds.toMillis)
warehousePath.delete()
warehousePath = null
metastorePath.delete()
metastorePath = null
operationLogPath.delete()
operationLogPath = null
lScratchDir.delete()
lScratchDir = null
Option(logPath).foreach(_.delete())
logPath = null
Option(logTailingProcess).foreach(_.destroy())
logTailingProcess = null
}
private def dumpLogs(): Unit = {
logError(
s"""
|=====================================
|HiveThriftServer2Suite failure output
|=====================================
|${diagnosisBuffer.mkString("\\n")}
|=========================================
|End HiveThriftServer2Suite failure output
|=========================================
""".stripMargin)
}
override protected def beforeAll(): Unit = {
super.beforeAll()
// Chooses a random port between 10000 and 19999
listeningPort = 10000 + Random.nextInt(10000)
diagnosisBuffer.clear()
// Retries up to 3 times with different port numbers if the server fails to start
(1 to 3).foldLeft(Try(startThriftServer(listeningPort, 0))) { case (started, attempt) =>
started.orElse {
listeningPort += 1
stopThriftServer()
Try(startThriftServer(listeningPort, attempt))
}
}.recover {
case cause: Throwable =>
dumpLogs()
throw cause
}.get
logInfo(s"HiveThriftServer2 started successfully")
}
override protected def afterAll(): Unit = {
try {
stopThriftServer()
logInfo("HiveThriftServer2 stopped")
} finally {
super.afterAll()
}
}
}
|
kiszk/spark
|
sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala
|
Scala
|
apache-2.0
| 37,162
|
package fpinscala.answers.localeffects
import fpinscala.answers.monads._
object Mutable {
def quicksort(xs: List[Int]): List[Int] = if (xs.isEmpty) xs else {
val arr = xs.toArray
def swap(x: Int, y: Int) = {
val tmp = arr(x)
arr(x) = arr(y)
arr(y) = tmp
}
def partition(l: Int, r: Int, pivot: Int) = {
val pivotVal = arr(pivot)
swap(pivot, r)
var j = l
for (i <- l until r) if (arr(i) < pivotVal) {
swap(i, j)
j += 1
}
swap(j, r)
j
}
def qs(l: Int, r: Int): Unit = if (l < r) {
val pi = partition(l, r, l + (r - l) / 2)
qs(l, pi - 1)
qs(pi + 1, r)
}
qs(0, arr.length - 1)
arr.toList
}
}
sealed trait ST[S,A] { self =>
protected def run(s: S): (A,S)
def map[B](f: A => B): ST[S,B] = new ST[S,B] {
def run(s: S) = {
val (a, s1) = self.run(s)
(f(a), s1)
}
}
def flatMap[B](f: A => ST[S,B]): ST[S,B] = new ST[S,B] {
def run(s: S) = {
val (a, s1) = self.run(s)
f(a).run(s1)
}
}
}
object ST {
def apply[S,A](a: => A) = {
lazy val memo = a
new ST[S,A] {
def run(s: S) = (memo, s)
}
}
def runST[A](st: RunnableST[A]): A =
st[Unit].run(())._1
}
sealed trait STRef[S,A] {
protected var cell: A
def read: ST[S,A] = ST(cell)
def write(a: => A): ST[S,Unit] = new ST[S,Unit] {
def run(s: S) = {
cell = a
((), s)
}
}
}
object STRef {
def apply[S,A](a: A): ST[S, STRef[S,A]] = ST(new STRef[S,A] {
var cell = a
})
}
trait RunnableST[A] {
def apply[S]: ST[S,A]
}
// Scala requires an implicit Manifest for constructing arrays.
sealed abstract class STArray[S,A](implicit manifest: Manifest[A]) {
protected def value: Array[A]
def size: ST[S,Int] = ST(value.size)
// Write a value at the give index of the array
def write(i: Int, a: A): ST[S,Unit] = new ST[S,Unit] {
def run(s: S) = {
value(i) = a
((), s)
}
}
// Read the value at the given index of the array
def read(i: Int): ST[S,A] = ST(value(i))
// Turn the array into an immutable list
def freeze: ST[S,List[A]] = ST(value.toList)
def fill(xs: Map[Int,A]): ST[S,Unit] =
xs.foldRight(ST[S,Unit](())) {
case ((k, v), st) => st flatMap (_ => write(k, v))
}
def swap(i: Int, j: Int): ST[S,Unit] = for {
x <- read(i)
y <- read(j)
_ <- write(i, y)
_ <- write(j, x)
} yield ()
}
object STArray {
// Construct an array of the given size filled with the value v
def apply[S,A:Manifest](sz: Int, v: A): ST[S, STArray[S,A]] =
ST(new STArray[S,A] {
lazy val value = Array.fill(sz)(v)
})
def fromList[S,A:Manifest](xs: List[A]): ST[S, STArray[S,A]] =
ST(new STArray[S,A] {
lazy val value = xs.toArray
})
}
object Immutable {
def noop[S] = ST[S,Unit](())
def partition[S](a: STArray[S,Int], l: Int, r: Int, pivot: Int): ST[S,Int] = for {
vp <- a.read(pivot)
_ <- a.swap(pivot, r)
j <- STRef(l)
_ <- (l until r).foldLeft(noop[S])((s, i) => for {
_ <- s
vi <- a.read(i)
_ <- if (vi < vp) (for {
vj <- j.read
_ <- a.swap(i, vj)
_ <- j.write(vj + 1)
} yield ()) else noop[S]
} yield ())
x <- j.read
_ <- a.swap(x, r)
} yield x
def qs[S](a: STArray[S,Int], l: Int, r: Int): ST[S, Unit] = if (l < r) for {
pi <- partition(a, l, r, l + (r - l) / 2)
_ <- qs(a, l, pi - 1)
_ <- qs(a, pi + 1, r)
} yield () else noop[S]
def quicksort(xs: List[Int]): List[Int] =
if (xs.isEmpty) xs else ST.runST(new RunnableST[List[Int]] {
def apply[S] = for {
arr <- STArray.fromList(xs)
size <- arr.size
_ <- qs(arr, 0, size - 1)
sorted <- arr.freeze
} yield sorted
})
}
import scala.collection.mutable.HashMap
sealed trait STMap[S,K,V] {
protected def table: HashMap[K,V]
def size: ST[S,Int] = ST(table.size)
// Get the value under a key
def apply(k: K): ST[S,V] = ST(table(k))
// Get the value under a key, or None if the key does not exist
def get(k: K): ST[S, Option[V]] = ST(table.get(k))
// Add a value under a key
def +=(kv: (K, V)): ST[S,Unit] = ST(table += kv)
// Remove a key
def -=(k: K): ST[S,Unit] = ST(table -= k)
}
object STMap {
def empty[S,K,V]: ST[S, STMap[S,K,V]] = ST(new STMap[S,K,V] {
val table = HashMap.empty[K,V]
})
def fromMap[S,K,V](m: Map[K,V]): ST[S, STMap[S,K,V]] = ST(new STMap[S,K,V] {
val table = (HashMap.newBuilder[K,V] ++= m).result
})
}
|
peterbecich/fpinscala
|
answers/src/main/scala/fpinscala/localeffects/LocalEffects.scala
|
Scala
|
mit
| 4,558
|
/*
* Copyright (c) 2013-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.collectors
package scalastream
package sinks
import java.util.Properties
import org.apache.kafka.clients.producer._
import model._
import scala.collection.JavaConverters._
/**
* Kafka Sink for the Scala collector
*/
class KafkaSink(
kafkaConfig: Kafka,
bufferConfig: BufferConfig,
topicName: String
) extends Sink {
// Records must not exceed MaxBytes - 1MB
val MaxBytes = 1000000L
private val kafkaProducer = createProducer
/**
* Creates a new Kafka Producer with the given
* configuration options
*
* @return a new Kafka Producer
*/
private def createProducer: KafkaProducer[String, Array[Byte]] = {
log.info(s"Create Kafka Producer to brokers: ${kafkaConfig.brokers}")
val props = new Properties()
props.put("bootstrap.servers", kafkaConfig.brokers)
props.put("acks", "all")
props.put("retries", kafkaConfig.retries.toString)
props.put("buffer.memory", bufferConfig.byteLimit.toString)
props.put("linger.ms", bufferConfig.timeLimit.toString)
props.put("key.serializer",
"org.apache.kafka.common.serialization.StringSerializer")
props.put("value.serializer",
"org.apache.kafka.common.serialization.ByteArraySerializer")
props.putAll(kafkaConfig.producerConf.getOrElse(Map()).asJava)
new KafkaProducer[String, Array[Byte]](props)
}
/**
* Store raw events to the topic
*
* @param events The list of events to send
* @param key The partition key to use
*/
override def storeRawEvents(events: List[Array[Byte]], key: String): List[Array[Byte]] = {
log.debug(s"Writing ${events.size} Thrift records to Kafka topic $topicName at key $key")
events.foreach { event =>
kafkaProducer.send(
new ProducerRecord(topicName, key, event),
new Callback {
override def onCompletion(metadata: RecordMetadata, e: Exception): Unit =
if (e != null) log.error(s"Sending event failed: ${e.getMessage}")
}
)
}
Nil
}
}
|
RetentionGrid/snowplow
|
2-collectors/scala-stream-collector/kafka/src/main/scala/com.snowplowanalytics.snowplow.collectors.scalastream/sinks/KafkaSink.scala
|
Scala
|
apache-2.0
| 2,736
|
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.nlp.annotators
import com.johnsnowlabs.collections.SearchTrie
import com.johnsnowlabs.nlp.AnnotatorType._
import com.johnsnowlabs.nlp._
import com.johnsnowlabs.nlp.serialization.StructFeature
import org.apache.spark.ml.param.{BooleanParam, Param}
import org.apache.spark.ml.util.Identifiable
import scala.annotation.{tailrec => tco}
import scala.collection.mutable.ArrayBuffer
/**
* Instantiated model of the [[TextMatcher]].
* For usage and examples see the documentation of the main class.
*
* @param uid internally renquired UID to make it writable
* @groupname anno Annotator types
* @groupdesc anno Required input and expected output annotator types
* @groupname Ungrouped Members
* @groupname param Parameters
* @groupname setParam Parameter setters
* @groupname getParam Parameter getters
* @groupname Ungrouped Members
* @groupprio param 1
* @groupprio anno 2
* @groupprio Ungrouped 3
* @groupprio setParam 4
* @groupprio getParam 5
* @groupdesc param A list of (hyper-)parameter keys this annotator can take. Users can set and get the parameter values through setters and getters, respectively.
*/
class TextMatcherModel(override val uid: String) extends AnnotatorModel[TextMatcherModel] with HasSimpleAnnotate[TextMatcherModel] {
/** Output annotator type : CHUNK
*
* @group anno
**/
override val outputAnnotatorType: AnnotatorType = CHUNK
/** input annotator type : DOCUMENT, TOKEN
*
* @group anno
**/
override val inputAnnotatorTypes: Array[AnnotatorType] = Array(DOCUMENT, TOKEN)
/** searchTrie for Searching words
*
* @group param
**/
val searchTrie = new StructFeature[SearchTrie](this, "searchTrie")
/** whether to merge overlapping matched chunks. Defaults false
*
* @group param
**/
val mergeOverlapping = new BooleanParam(this, "mergeOverlapping", "whether to merge overlapping matched chunks. Defaults false")
/** Value for the entity metadata field
*
* @group param
**/
val entityValue = new Param[String](this, "entityValue", "Value for the entity metadata field")
/** Whether the TextMatcher should take the CHUNK from TOKEN or not
*
* @group param
**/
val buildFromTokens = new BooleanParam(this, "buildFromTokens", "Whether the TextMatcher should take the CHUNK from TOKEN or not")
/** SearchTrie of Tokens
*
* @group setParam
**/
def setSearchTrie(value: SearchTrie): this.type = set(searchTrie, value)
/** Whether to merge overlapping matched chunks. Defaults false
*
* @group setParam
**/
def setMergeOverlapping(v: Boolean): this.type = set(mergeOverlapping, v)
/** Whether to merge overlapping matched chunks. Defaults false
*
* @group getParam
**/
def getMergeOverlapping: Boolean = $(mergeOverlapping)
/** Setter for Value for the entity metadata field
*
* @group setParam
**/
def setEntityValue(v: String): this.type = set(entityValue, v)
/** Getter for Value for the entity metadata field
*
* @group getParam
**/
def getEntityValue: String = $(entityValue)
/** internal constructor for writabale annotator */
def this() = this(Identifiable.randomUID("ENTITY_EXTRACTOR"))
/** Setter for buildFromTokens param
*
* @group setParam
**/
def setBuildFromTokens(v: Boolean): this.type = set(buildFromTokens, v)
/** Getter for buildFromTokens param
*
* @group getParam
**/
def getBuildFromTokens: Boolean = $(buildFromTokens)
setDefault(inputCols, Array(TOKEN))
setDefault(mergeOverlapping, false)
setDefault(entityValue, "entity")
@tco final protected def collapse(rs: List[(Int, Int)], sep: List[(Int, Int)] = Nil): List[(Int, Int)] = rs match {
case x :: y :: rest =>
if (y._1 > x._2) collapse(y :: rest, x :: sep)
else collapse((x._1, x._2 max y._2) :: rest, sep)
case _ =>
(rs ::: sep).reverse
}
protected def merge(rs: List[(Int,Int)]): List[(Int,Int)] = collapse(rs.sortBy(_._1))
/**
*
* Searches entities and stores them in the annotation. Defines annotator phrase matching depending on whether we are using SBD or not
*
* @return Extracted Entities
*/
override def annotate(annotations: Seq[Annotation]): Seq[Annotation] = {
val result = ArrayBuffer[Annotation]()
val sentences = annotations.filter(_.annotatorType == AnnotatorType.DOCUMENT)
sentences.zipWithIndex.foreach{case (sentence, sentenceIndex) =>
val tokens = annotations.filter( token =>
token.annotatorType == AnnotatorType.TOKEN &&
token.begin >= sentence.begin &&
token.end <= sentence.end)
val foundTokens = $$(searchTrie).search(tokens.map(_.result)).toList
val finalTokens = if($(mergeOverlapping)) merge(foundTokens) else foundTokens
for ((begin, end) <- finalTokens) {
val firstTokenBegin = tokens(begin).begin
val lastTokenEnd = tokens(end).end
/** token indices are not relative to sentence but to document, adjust offset accordingly */
val normalizedText = if(!$(buildFromTokens)) sentence.result.substring(firstTokenBegin - sentence.begin, lastTokenEnd - sentence.begin + 1)
else tokens.filter(t => t.begin >= firstTokenBegin && t.end <= lastTokenEnd).map(_.result).mkString(" ")
val annotation = Annotation(
outputAnnotatorType,
firstTokenBegin,
lastTokenEnd,
normalizedText,
Map("entity"->$(entityValue), "sentence" -> sentenceIndex.toString, "chunk" -> result.length.toString)
)
result.append(annotation)
}
}
result
}
}
trait ReadablePretrainedTextMatcher extends ParamsAndFeaturesReadable[TextMatcherModel] with HasPretrained[TextMatcherModel] {
override val defaultModelName = None
override def pretrained(): TextMatcherModel = super.pretrained()
override def pretrained(name: String): TextMatcherModel = super.pretrained(name)
override def pretrained(name: String, lang: String): TextMatcherModel = super.pretrained(name, lang)
override def pretrained(name: String, lang: String, remoteLoc: String): TextMatcherModel = super.pretrained(name, lang, remoteLoc)
}
/**
* This is the companion object of [[TextMatcherModel]]. Please refer to that class for the documentation.
*/
object TextMatcherModel extends ReadablePretrainedTextMatcher
|
JohnSnowLabs/spark-nlp
|
src/main/scala/com/johnsnowlabs/nlp/annotators/TextMatcherModel.scala
|
Scala
|
apache-2.0
| 7,033
|
package com.twitter.finagle
import java.net.{InetAddress, InetSocketAddress}
import java.util.{Map => JMap}
import scala.collection.JavaConverters._
import scala.util.control.NoStackTrace
/**
* An [[Address]] represents the physical location of a single host or
* endpoint. It also includes [[Addr.Metadata]] (typically set by [[Namer]]s
* and [[Resolver]]s) that provides additional configuration to client stacks.
*
* Note that a bound [[Addr]] contains a set of [[Address]]es and [[Addr.Metadata]]
* that pertains to the entire set.
*/
sealed trait Address
object Address {
private[finagle] val failing: Address =
Address.Failed(new IllegalArgumentException("failing") with NoStackTrace)
/**
* An address represented by an Internet socket address.
*/
case class Inet(
addr: InetSocketAddress,
metadata: Addr.Metadata)
extends Address
/**
* An address that fails with the given `cause`.
*/
case class Failed(cause: Throwable) extends Address
/** Create a new [[Address]] with given [[java.net.InetSocketAddress]]. */
def apply(addr: InetSocketAddress): Address =
Address.Inet(addr, Addr.Metadata.empty)
/** Create a new [[Address]] with given `host` and `port`. */
def apply(host: String, port: Int): Address =
Address(new InetSocketAddress(host, port))
/** Create a new loopback [[Address]] with the given `port`. */
def apply(port: Int): Address =
Address(new InetSocketAddress(InetAddress.getLoopbackAddress, port))
}
package exp {
object Address {
/** Create a new [[Address]] with the given [[com.twitter.finagle.ServiceFactory]]. */
def apply[Req, Rep](factory: com.twitter.finagle.ServiceFactory[Req, Rep]): Address =
Address.ServiceFactory(factory, Addr.Metadata.empty)
/**
* An endpoint address represented by a [[com.twitter.finagle.ServiceFactory]]
* that implements the endpoint.
*/
case class ServiceFactory[Req, Rep](
factory: com.twitter.finagle.ServiceFactory[Req, Rep],
metadata: Addr.Metadata)
extends Address
}
}
/**
* A Java adaptation of the [[com.twitter.finagle.Address]] companion object.
*/
object Addresses {
/**
* @see com.twitter.finagle.Address.Inet
*/
def newInetAddress(ia: InetSocketAddress): Address =
Address.Inet(ia, Addr.Metadata.empty)
/**
* @see com.twitter.finagle.Address.Inet
*/
def newInetAddress(ia: InetSocketAddress, metadata: JMap[String, Any]): Address =
Address.Inet(ia, metadata.asScala.toMap)
/**
* @see com.twitter.finagle.Address.Failed
*/
def newFailedAddress(cause: Throwable): Address =
Address.Failed(cause)
}
|
adriancole/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/Address.scala
|
Scala
|
apache-2.0
| 2,661
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver
import java.util.UUID
import org.apache.commons.lang3.exception.ExceptionUtils
import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType
import org.apache.hive.service.cli._
import org.apache.hive.service.cli.operation.GetTableTypesOperation
import org.apache.hive.service.cli.session.HiveSession
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.catalyst.catalog.CatalogTableType
import org.apache.spark.util.{Utils => SparkUtils}
/**
* Spark's own GetTableTypesOperation
*
* @param sqlContext SQLContext to use
* @param parentSession a HiveSession from SessionManager
*/
private[hive] class SparkGetTableTypesOperation(
val sqlContext: SQLContext,
parentSession: HiveSession)
extends GetTableTypesOperation(parentSession)
with SparkOperation
with Logging {
override def runInternal(): Unit = {
statementId = UUID.randomUUID().toString
val logMsg = "Listing table types"
logInfo(s"$logMsg with $statementId")
setState(OperationState.RUNNING)
// Always use the latest class loader provided by executionHive's state.
val executionHiveClassLoader = sqlContext.sharedState.jarClassLoader
Thread.currentThread().setContextClassLoader(executionHiveClassLoader)
if (isAuthV2Enabled) {
authorizeMetaGets(HiveOperationType.GET_TABLETYPES, null)
}
HiveThriftServer2.eventManager.onStatementStart(
statementId,
parentSession.getSessionHandle.getSessionId.toString,
logMsg,
statementId,
parentSession.getUsername)
try {
val tableTypes = CatalogTableType.tableTypes.map(tableTypeString).toSet
tableTypes.foreach { tableType =>
rowSet.addRow(Array[AnyRef](tableType))
}
setState(OperationState.FINISHED)
} catch {
case e: Throwable =>
logError(s"Error executing get table types operation with $statementId", e)
setState(OperationState.ERROR)
e match {
case hiveException: HiveSQLException =>
HiveThriftServer2.eventManager.onStatementError(
statementId, hiveException.getMessage, SparkUtils.exceptionString(hiveException))
throw hiveException
case _ =>
val root = ExceptionUtils.getRootCause(e)
HiveThriftServer2.eventManager.onStatementError(
statementId, root.getMessage, SparkUtils.exceptionString(root))
throw new HiveSQLException("Error getting table types: " + root.toString, root)
}
}
HiveThriftServer2.eventManager.onStatementFinish(statementId)
}
}
|
ConeyLiu/spark
|
sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetTableTypesOperation.scala
|
Scala
|
apache-2.0
| 3,476
|
package com.socrata.geospace.lib.shapefile
import java.io.File
import com.socrata.geospace.lib.Utils._
import com.socrata.geospace.lib.errors.InvalidShapefileSet
import com.typesafe.scalalogging.slf4j.Logging
import org.opengis.referencing.NoSuchAuthorityCodeException
import org.geoscript.feature._
import org.geoscript.layer._
import org.geoscript.projection._
import org.geotools.factory.Hints
import org.geoscript.projection.Projection
import org.geotools.referencing.ReferencingFactoryFinder
import org.opengis.referencing.crs.CoordinateReferenceSystem
import org.geoscript.feature.schemaBuilder._
import org.geotools.factory.Hints
trait ShapeReader extends Logging {
def read(file: File): AnyRef
def validate(file: File): AnyRef
/**
* performs feature and schema reprojections will not handle exceptions.
*/
final def doProjections(projection: Projection, file: File): (Traversable[Feature], Schema) = {
val shapeFile = Shapefile(file)
try{
logger.info("Reprojecting shapefile schema and {} features to {}",
shapeFile.features.size.toString,
projection.getName)
logMemoryUsage("Before reprojecting features...")
var i = 0
// projecting features
val features: Traversable[Feature] = shapeFile.features.map { feature =>
i += 1
if (i % 1000 == 0) checkFreeMemAndDie(runGC = true)
reproject(feature, projection)
}
// projecting schema
val schema: Schema = reproject(shapeFile.schema, projection)
logMemoryUsage("Done with reprojection")
(features, schema)
} finally {
// Geotools holds a lock on the .shp file if the above blows up.
// Releasing resources cleanly in case of an exception.
// TODO : We still aren't 100% sure this actually works
shapeFile.getDataStore.dispose
}
}
/**
* Provides the projection object necessary to re-project when parsing shape-files
*/
def getTargetProjection(epsgCode: String, forceLonLat: Boolean): Either[InvalidShapefileSet, Projection] =
try {
val hints = if (forceLonLat) new Hints(Hints.FORCE_LONGITUDE_FIRST_AXIS_ORDER, true) else new Hints()
val factory = ReferencingFactoryFinder.getCRSAuthorityFactory("EPSG", hints)
Right(factory.createCoordinateReferenceSystem(epsgCode))
} catch {
case notFound: NoSuchAuthorityCodeException =>
Left(InvalidShapefileSet("Unable to find target projection: " + notFound.getMessage))
}
}
|
socrata-platform/geospace
|
src/main/scala/com/socrata/geospace/lib/shapefile/ShapeFileReader.scala
|
Scala
|
apache-2.0
| 2,506
|
package pregnaware.database
import java.time.LocalDate
import akka.util.Timeout
import pregnaware.{DbTest, UnitSpec}
import pregnaware.user.entities.WrappedFriend
import scala.concurrent.duration._
import scala.concurrent.{Future, Await, ExecutionContext}
import scala.util.{Failure, Success}
class FriendTest extends UnitSpec {
self =>
private implicit val timeout = 10.seconds
private implicit val executor = scala.concurrent.ExecutionContext.global
private val dbWrapper = new DatabaseWrapper() {
override implicit def executor: ExecutionContext = self.executor
override implicit def timeout: Timeout = self.timeout
}
"User" should "be added" taggedAs(DbTest) in {
val user1 = Await.result(dbWrapper.addUser("TEST_1", "TEST_EMAIL_1", "TEST_PASSWORD_1"), timeout)
user1.displayName should be("TEST_1")
user1.email should be("TEST_EMAIL_1")
user1.passwordHash should be("TEST_PASSWORD_1")
user1.babyNames should be(empty)
user1.friends should be(empty)
user1.dueDate should not be defined
val user2 = Await.result(dbWrapper.addUser("TEST_2", "TEST_EMAIL_2", "TEST_PASSWORD_2"), timeout)
user2.displayName should be("TEST_2")
user2.email should be("TEST_EMAIL_2")
user2.passwordHash should be("TEST_PASSWORD_2")
user2.babyNames should be(empty)
user2.friends should be(empty)
user2.dueDate should not be defined
}
"User" should "add friend" taggedAs(DbTest) in {
val user1 = Await.result(dbWrapper.getUser("TEST_EMAIL_1"), timeout).get
val user2 = Await.result(dbWrapper.getUser("TEST_EMAIL_2"), timeout).get
// Add due dates and names to the users, and ensure they cannot be accessed by another user
// until the friendship is confirmed
val user1DueDate = LocalDate.of(2016, 1, 1)
val user2DueDate = LocalDate.of(2016, 2, 2)
Await.result(dbWrapper.setDueDate(user1.userId, user1DueDate), timeout)
Await.result(dbWrapper.setDueDate(user2.userId, user2DueDate), timeout)
Await.result(dbWrapper.addName(user1.userId, user1.userId, "NAME1", true), timeout)
Await.result(dbWrapper.addName(user2.userId, user2.userId, "NAME2", true), timeout)
val friendToBe : WrappedFriend = Await.result(dbWrapper.addFriend(user1.userId, user2.userId), timeout)
friendToBe.userId should be(user2.userId)
friendToBe.displayName should be ("TEST_2")
friendToBe.email should be ("TEST_EMAIL_2")
friendToBe.dueDate should not be defined
friendToBe.babyNames should be (empty)
val user1PostFriend = Await.result(dbWrapper.getUser("TEST_EMAIL_1"), timeout).get
val user2PostFriend = Await.result(dbWrapper.getUser("TEST_EMAIL_2"), timeout).get
user1PostFriend.friendRequestsReceived should be (empty)
user1PostFriend.friendRequestsSent should not be empty
user1PostFriend.friendRequestsSent should have size 1
user1PostFriend.friendRequestsSent.head.userId should be (user2.userId)
user1PostFriend.friendRequestsSent.head.displayName should be ("TEST_2")
user1PostFriend.friendRequestsSent.head.email should be ("TEST_EMAIL_2")
user1PostFriend.friendRequestsSent.head.dueDate should not be defined
user1PostFriend.friendRequestsSent.head.babyNames should be (empty)
user2PostFriend.friendRequestsSent should be (empty)
user2PostFriend.friendRequestsReceived should not be empty
user2PostFriend.friendRequestsReceived should have size 1
user2PostFriend.friendRequestsReceived.head.userId should be (user1.userId)
user2PostFriend.friendRequestsReceived.head.displayName should be ("TEST_1")
user2PostFriend.friendRequestsReceived.head.email should be ("TEST_EMAIL_1")
user2PostFriend.friendRequestsReceived.head.dueDate should not be defined
user2PostFriend.friendRequestsReceived.head.babyNames should be (empty)
// User2 now confirms User1 as a friend
val confirmedFriend = Await.result(dbWrapper.confirmFriend(user2.userId, user1.userId), timeout)
confirmedFriend.userId should be (user1.userId)
confirmedFriend.displayName should be ("TEST_1")
val user1PostFriendAccept = Await.result(dbWrapper.getUser("TEST_EMAIL_1"), timeout).get
val user2PostFriendAccept = Await.result(dbWrapper.getUser("TEST_EMAIL_2"), timeout).get
user1PostFriendAccept.friends should not be empty
user1PostFriendAccept.friends should have size 1
user1PostFriendAccept.friends.head.userId should be (user2PostFriend.userId)
user1PostFriendAccept.friends.head.displayName should be ("TEST_2")
user1PostFriendAccept.friends.head.email should be ("TEST_EMAIL_2")
user1PostFriendAccept.friends.head.dueDate should be (Some(user2DueDate))
user1PostFriendAccept.friends.head.babyNames.head.name should be ("NAME2")
user1PostFriendAccept.friendRequestsSent should be (empty)
user1PostFriendAccept.friendRequestsReceived should be (empty)
user2PostFriendAccept.friends should not be empty
user2PostFriendAccept.friends should have size 1
user2PostFriendAccept.friends.head.userId should be (user1PostFriend.userId)
user2PostFriendAccept.friends.head.displayName should be ("TEST_1")
user2PostFriendAccept.friends.head.email should be ("TEST_EMAIL_1")
user2PostFriendAccept.friends.head.dueDate should be (Some(user1DueDate))
user2PostFriendAccept.friends.head.babyNames.head.name should be ("NAME1")
user2PostFriendAccept.friendRequestsSent should be (empty)
user2PostFriendAccept.friendRequestsReceived should be (empty)
}
"User" should "delete friend" taggedAs(DbTest) in {
val user1 = Await.result(dbWrapper.getUser("TEST_EMAIL_1"), timeout).get
val user2 = Await.result(dbWrapper.getUser("TEST_EMAIL_2"), timeout).get
Await.ready(dbWrapper.deleteFriend(user1.userId, user2.userId), timeout)
val user1PostFriendDelete = Await.result(dbWrapper.getUser("TEST_EMAIL_1"), timeout).get
val user2PostFriendDelete = Await.result(dbWrapper.getUser("TEST_EMAIL_2"), timeout).get
user1PostFriendDelete.friends should be(empty)
user2PostFriendDelete.friends should be(empty)
}
// This cleans up the users created in this test
"Users" should "be deleted" taggedAs(DbTest) in {
Seq("TEST_EMAIL_1", "TEST_EMAIL_2").foreach { email =>
val deleteFut = dbWrapper.getUser(email).flatMap {
case None => Future.successful(())
case Some(user) => dbWrapper.deleteUser(user.userId)
}
Await.ready(deleteFut, timeout)
Await.result(dbWrapper.getUser(email), timeout) should not be defined
}
}
}
|
jds106/pregnaware
|
service/src/test/scala/pregnaware/database/FriendTest.scala
|
Scala
|
mit
| 6,540
|
/*
* Contributions:
* Jean-Francois GUENA: implement "suffixed collection name" feature (issue #39 partially fulfilled)
* ...
*/
package akka.contrib.persistence.mongodb
class CasbahJournalLoadSpec extends JournalLoadSpec(classOf[CasbahPersistenceExtension],"casbah")
//class CasbahSuffixJournalLoadSpec extends JournalLoadSpec(classOf[CasbahPersistenceExtension],"casbah", SuffixCollectionNamesTest.extendedConfig)
|
alari/akka-persistence-mongo
|
casbah/src/test/scala/akka/contrib/persistence/mongodb/CasbahJournalLoadSpec.scala
|
Scala
|
apache-2.0
| 424
|
package com.twitter.finagle.protobuf.rpc
import java.util.concurrent._
import java.util.concurrent.atomic._
object RpcProtobufSpec extends SpecificationWithJUnit {
def CLIENT_TIMEOUT_SECONDS = 1
def THREAD_COUNT = 40
def REQ_PER_THREAD = 100
def port = 8080
def executorService = Executors.newFixedThreadPool(4)
def factory = new RpcFactoryImpl()
def serverBuilder = ServerBuilder.get().maxConcurrentRequests(10)
def clientBuilder = ClientBuilder
.get()
.hosts(String.format("localhost:%s", port.toString()))
.hostConnectionLimit(1)
.retries(2)
.requestTimeout(Duration(CLIENT_TIMEOUT_SECONDS, TimeUnit.SECONDS))
"A client" should {
val totalRequests = new AtomicInteger()
val service = new SampleWeatherServiceImpl(80, null)
val server = factory.createServer(serverBuilder.asInstanceOf[ServerBuilder[(String, com.google.protobuf.Message),(String, com.google.protobuf.Message),Any,Any,Any]], port, service, executorService)
val stub = factory.createStub(clientBuilder.asInstanceOf[ClientBuilder[(String, com.google.protobuf.Message),(String, com.google.protobuf.Message),Any,Any,Any]], WeatherService.newStub(null).asInstanceOf[{ def newStub(c: RpcChannel): WeatherService }], executorService)
val finishBarrier = new CyclicBarrier(THREAD_COUNT + 1)
val startBarrier = new CyclicBarrier(THREAD_COUNT)
for (i <- 0 until THREAD_COUNT) {
new Thread(new Runnable() {
def run() {
startBarrier.await();
try {
for (k <- 0 until REQ_PER_THREAD) {
makeRequest(service, stub, totalRequests)
}
}
finally {
finishBarrier.await(60l, TimeUnit.SECONDS)
}
}
}).start()
}
finishBarrier.await(60l, TimeUnit.SECONDS)
server.close(Duration(1, TimeUnit.SECONDS))
"receive THREAD_COUNT * REQ_PER_THREAD responses." in {
THREAD_COUNT * REQ_PER_THREAD mustEqual totalRequests.get()
}
}
def makeRequest(service: SampleWeatherServiceImpl, stub: WeatherService, totalRequests: AtomicInteger) {
val controller = factory.createController().asInstanceOf[RpcControllerWithOnFailureCallback]
val l = new java.util.concurrent.CountDownLatch(1);
val request = GetWeatherForecastRequest.newBuilder().setZip("80301").build()
stub.getWeatherForecast(controller.onFailure(new RpcCallback[Throwable]() {
def run(e: Throwable) {
l.countDown()
}
}), request, new RpcCallback[GetWeatherForecastResponse]() {
def run(resp: GetWeatherForecastResponse) {
totalRequests.incrementAndGet()
l.countDown()
}
});
l.await(CLIENT_TIMEOUT_SECONDS + 2, TimeUnit.SECONDS)
}
}
class SampleWeatherServiceImpl(val temperature: Int, val getHistoricWeather: Callable[Any]) extends WeatherService {
def getTemperature() = temperature
def getWeatherForecast(controller: RpcController, request: GetWeatherForecastRequest, done:
RpcCallback[GetWeatherForecastResponse]) {
done.run(GetWeatherForecastResponse.newBuilder().setTemp(temperature).build())
}
def getHistoricWeather(controller: RpcController, request: GetHistoricWeatherRequest,
done: RpcCallback[GetHistoricWeatherResponse]) {
if (getHistoricWeather != null) {
getHistoricWeather.call()
}
}
}
|
jamescway/finagle
|
finagle-protobuf/src/test/scala/com/twitter/finagle/protobuf/rpc/LoadSpec.scala
|
Scala
|
apache-2.0
| 3,754
|
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
/**
* Trait that causes that the nested suites of any suite it is mixed into to be run sequentially even if
* a <code>Distributor</code> is passed to <code>runNestedSuites</code>. This trait overrides the
* <code>runNestedSuites</code> method and fowards every parameter passed to it to a superclass invocation
* of <code>runNestedSuites</code>, except it always passes <code>None</code> for the <code>Distributor</code>.
* Mix in this trait into any suite whose nested suites need to be run sequentially even with the rest of the
* run is being executed concurrently.
*/
trait SequentialNestedSuiteExecution extends SuiteMixin { this: Suite =>
/**
* This trait's implementation of <code>runNestedSuites</code>s invokes <code>runNestedSuites</code> on <code>super</code>,
* passing in <code>None</code> for the <code>Distributor</code>.
*
* @param args the <code>Args</code> for this run
* @return a <code>Status</code> object that indicates when all nested suites started by this method have completed, and whether or not a failure occurred.
*
* @throws NullPointerException if any passed parameter is <code>null</code>.
*/
abstract override protected def runNestedSuites(args: Args): Status = {
if (args == null)
throw new NullPointerException("args was null")
super.runNestedSuites(args)
}
}
|
hubertp/scalatest
|
src/main/scala/org/scalatest/SequentialNestedSuiteExecution.scala
|
Scala
|
apache-2.0
| 1,973
|
/**
* Copyright (C) 2020 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.xforms
import cats.data.NonEmptyList
import cats.syntax.option._
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Future, Promise}
import org.scalajs.macrotaskexecutor.MacrotaskExecutor.Implicits._
import scala.scalajs.js.timers
import scala.scalajs.js.timers.SetTimeoutHandle
// Basic event queue
//
// - holds events until they are scheduled
// - sends them in groups via `eventsReady` when the schedule is ready
// - handles two different delays: "incremental" and "non-incremental" events
//
// This implementation removes support for multiple "forms", specifically the ability to
// update the queue with remaining events. Instead, use multiple queues to handle multiple
// forms.
trait AjaxEventQueue[EventType] {
import Private._
// The event queue calls this when events are ready, that is when a delay has passed
// and they are ready to be dispatched as a group. The implementation of this function
// can return some of the events to the queue.
def eventsReady(eventsReversed: NonEmptyList[EventType]): Unit
def canSendEvents: Boolean
// Configurable delays
val shortDelay : FiniteDuration
val incrementalDelay: FiniteDuration
def newestEventTime: Long = state.newestEventTime // used by heartbeat only
def eventsReversed : List[EventType] = state.events
def isEmpty : Boolean = state.events.isEmpty
def addEventAndUpdateQueueSchedule(event: EventType, incremental: Boolean): Unit = {
addEvent(event, incremental)
updateQueueSchedule()
}
def updateQueueSchedule(): Unit =
state = state.copy(
schedule = updatedQueueSchedule(state.schedule) match {
case newScheduleOpt @ Some(EventSchedule(_, _, done)) =>
done foreach { _ =>
if (canSendEvents) {
val events = state.events
state = emptyState
NonEmptyList.fromList(events) foreach eventsReady
} else {
// We expect that once `canSendEvents` becomes true again, `updateQueueSchedule()`
// will be called again. A new schedule will be created then.
state = state.copy(schedule = None)
}
}
newScheduleOpt
case None =>
state.schedule
}
)
def debugScheduledTime: Option[Long] =
state.schedule map (_.time)
def debugPrintEventQueue(): Unit =
println(s"Event queue: ${state.events.reverse mkString ", "}")
object Private {
case class EventSchedule(
handle : SetTimeoutHandle,
time : Long,
done : Future[Unit]
)
case class State(
events : List[EventType],
hasNonIncremental: Boolean,
oldestEventTime : Long,
newestEventTime : Long,
schedule : Option[EventSchedule]
)
def emptyState: State =
State(
events = Nil,
hasNonIncremental = false,
oldestEventTime = 0,
newestEventTime = 0,
schedule = None
)
var state: State = emptyState
def addEvent(event: EventType, incremental: Boolean): Unit = {
val currentTime = System.currentTimeMillis()
state = state.copy(
events = event :: state.events,
hasNonIncremental = state.hasNonIncremental | ! incremental,
oldestEventTime = if (state.events.isEmpty) currentTime else state.oldestEventTime,
newestEventTime = currentTime
)
}
// Return `None` if we don't need to create a new schedule
def updatedQueueSchedule(existingSchedule: Option[EventSchedule]): Option[EventSchedule] = {
val currentTime = System.currentTimeMillis()
val newScheduleTimeAtLeastCurrentTime = {
val newScheduleDelay =
if (state.hasNonIncremental)
shortDelay
else
incrementalDelay
(state.oldestEventTime + newScheduleDelay.toMillis) max currentTime
}
// There is only *one* timer set at a time at most
def createNewSchedule = {
val p = Promise[Unit]()
EventSchedule(
handle = timers.setTimeout(newScheduleTimeAtLeastCurrentTime - currentTime) { p.success(())},
time = newScheduleTimeAtLeastCurrentTime,
done = p.future
)
}
existingSchedule match {
case Some(existingSchedule) if newScheduleTimeAtLeastCurrentTime < existingSchedule.time =>
timers.clearTimeout(existingSchedule.handle)
createNewSchedule.some
case None if state.events.nonEmpty =>
createNewSchedule.some
case _ =>
None
}
}
}
}
|
orbeon/orbeon-forms
|
xforms-web/src/main/scala/org/orbeon/xforms/AjaxEventQueue.scala
|
Scala
|
lgpl-2.1
| 5,343
|
package org.yotchang4s.ch2.thread
import org.yotchang4s.ch2._
import org.yotchang4s.ch2.board.BoardId
case class ThreadId(value: (BoardId, String)) extends Identity[(BoardId, String)]
trait Thread extends Entity[ThreadId] {
val subject: String
val resCount: Int
}
|
yotchang4s/gikolet
|
src/org/yotchang4s/ch2/thread/Thread.scala
|
Scala
|
bsd-3-clause
| 270
|
package com.twitter.finagle.http
import com.twitter.finagle.{ChannelException, FailureFlags}
import java.net.SocketAddress
/** The Message was too long to be handled correctly */
final class TooLongMessageException private(
ex: Option[Exception],
remote: Option[SocketAddress],
private[finagle] val flags: Long)
extends ChannelException(ex, remote) with FailureFlags[TooLongMessageException] {
protected def copyWithFlags(flags: Long): TooLongMessageException =
new TooLongMessageException(ex, remote, flags)
}
object TooLongMessageException {
def apply(ex: Exception, remote: SocketAddress): TooLongMessageException =
new TooLongMessageException(Option(ex), Option(remote), FailureFlags.NonRetryable)
def apply(ex: Exception): TooLongMessageException = apply(ex, null)
}
|
koshelev/finagle
|
finagle-base-http/src/main/scala/com/twitter/finagle/http/TooLongMessageException.scala
|
Scala
|
apache-2.0
| 804
|
package tadp_grupo5
case class Sucursal (volumenDeposito : Int, pais : String){
var paquetesPorSalir : List[Paquete] = List()
var paquetesPorEntrar : List[Paquete] = List()
var transportes : List[Transporte] = List()
var enviosRealizados: List [Envio] = List()
var pedidosPendientes: List [Paquete] = List()
def capacidad : Int = volumenDeposito - paquetesPorEntrar.map(_.volumen).sum - paquetesPorSalir.map(_.volumen).sum
def actualizarTransportes(transporteAnterior: Transporte, transporteNuevo: Transporte) = { //reemplazo la referencia vieja del transporte por una nueva
var filtrados : List[Transporte] = transportes.filterNot(_.equals(transporteAnterior))//elimino el estado anterior del transporte
transportes = filtrados :+ transporteNuevo //agrego el nuevo estado del transporte
}
def esCasaCentral : Boolean = this match {
case _ : CasaCentral => true
case _ => false
}
def asignarPaquete(paquete: Paquete) = {
var transportesValidos: List[Transporte] = filtrarTransportesValidos(paquete,transportePuedeLlevar)
if(!transportesValidos.isEmpty){
var trans: Transporte = transportesValidos.head.agregarPedido(paquete)
actualizarTransportes(transportesValidos.head, trans)
pedidosPendientes = pedidosPendientes.filterNot(_.equals(paquete))//elimino pedido de la lista de pendientes
}
else pedidosPendientes = pedidosPendientes :+ paquete
}
def asignarPendientes()= {
if(!pedidosPendientes.isEmpty) pedidosPendientes.foreach(x => asignarPaquete(x))
}
def notificarPaqueteAEntrar(paquete : Paquete) {
validarCapacidad(paquete)
paquetesPorEntrar = paquetesPorEntrar :+ paquete
}
def notificarPaqueteASalir(paquete : Paquete) {
validarCapacidad(paquete)
paquetesPorSalir = paquetesPorSalir :+ paquete
asignarPaquete(paquete)
asignarPendientes
}
def validarCapacidad(paquete : Paquete) = if (capacidad < paquete.volumen) throw new SucursalSinCapacidad()
def descargarEnvio(envio: Envio) = {
for (pedido <- envio.paquetes) descargarPedido(pedido)
if(envio.sucursalOrigen == this){
enviosRealizados = enviosRealizados :+ envio
var unTransporte = envio.transporte.vaciarTransporte
actualizarTransportes(envio.transporte, unTransporte)
}
}
def descargarPedido(pedido : Paquete){
if(pedido.sucursalDestino == this){
paquetesPorEntrar = paquetesPorEntrar.filterNot(_== pedido)
} else paquetesPorSalir = paquetesPorSalir.filterNot(_== pedido)
}
def filtrarTransportes(f: Transporte => Boolean): List[Transporte] = {
transportes.filter(x => f(x))
}
val transporteCargado: Transporte => Boolean = !_.pedidos.isEmpty
val transportePuedeLlevar: (Transporte,Paquete) => Boolean = (transporte,paquete) =>
try { transporte.puedeLlevar(paquete) }
catch {
case tex : TransporteException => false
case ex : Exception => throw ex
}
def filtrarTransportesValidos : (Paquete, (Transporte,Paquete) => Boolean) => List[Transporte] = {
(paquete,f) => for { transporte <- transportes if(f(transporte,paquete))} yield transporte
}
def despacharEnvios = {
filtrarTransportes(transporteCargado).foreach(_.hacerEnvio)
}
}
class CasaCentral(volumenDeposito : Int, override val pais : String) extends Sucursal(volumenDeposito, pais)
case class SucursalSinCapacidad() extends Exception
|
JonaC22/TADP_2C2014_GRUPO_5
|
TP2_Scala/Objeto-Funcional/src/tadp_grupo5/Sucursal.scala
|
Scala
|
mit
| 3,427
|
/**
* Licensed to the Minutemen Group under one or more contributor license
* agreements. See the COPYRIGHT file distributed with this work for
* additional information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package silhouette.provider.oauth2
import java.net.URI
import java.nio.file.Paths
import silhouette.http.BodyWrites._
import silhouette.http.Method.GET
import silhouette.http.client.{ Request, Response }
import silhouette.http.{ Body, Status }
import silhouette.provider.oauth2.InstagramProvider._
import silhouette.provider.oauth2.OAuth2Provider.UnexpectedResponse
import silhouette.provider.social.SocialProvider.ProfileError
import silhouette.provider.social.{ CommonSocialProfile, ProfileRetrievalException }
import silhouette.specs2.BaseFixture
import silhouette.{ ConfigURI, LoginInfo }
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
/**
* Test case for the [[InstagramProvider]] class.
*/
class InstagramProviderSpec extends OAuth2ProviderSpec {
"The `retrieveProfile` method" should {
"fail with ProfileRetrievalException if API returns error" in new Context {
val apiResult = ErrorJson.asJson
val httpResponse = Response(Status.`Bad Request`, Body.from(apiResult))
val httpRequest = Request(GET, DefaultApiURI.format(oAuth2Info.accessToken))
httpClient.execute(httpRequest) returns Future.successful(httpResponse)
failed[ProfileRetrievalException](provider.retrieveProfile(oAuth2Info)) {
case e =>
e.getMessage must equalTo(ProfileError.format(provider.id))
e.getCause.getMessage must equalTo(UnexpectedResponse.format(
provider.id,
apiResult,
Status.`Bad Request`
))
}
}
"fail with ProfileRetrievalException if an unexpected error occurred" in new Context {
val httpResponse = mock[Response].smart
val httpRequest = Request(GET, DefaultApiURI.format(oAuth2Info.accessToken))
httpResponse.status returns Status.`Internal Server Error`
httpResponse.body throws new RuntimeException("")
httpClient.execute(httpRequest) returns Future.successful(httpResponse)
failed[ProfileRetrievalException](provider.retrieveProfile(oAuth2Info)) {
case e => e.getMessage must equalTo(ProfileError.format(ID))
}
}
"use the overridden API URI" in new Context {
val uri = DefaultApiURI.copy(uri = DefaultApiURI.uri + "&new")
val apiResult = UserProfileJson.asJson
val httpResponse = Response(Status.OK, Body.from(apiResult))
val httpRequest = Request(GET, uri.format(oAuth2Info.accessToken))
config.apiURI returns Some(uri)
httpClient.execute(httpRequest) returns Future.successful(httpResponse)
await(provider.retrieveProfile(oAuth2Info))
there was one(httpClient).execute(httpRequest)
}
"return the social profile" in new Context {
val apiResult = UserProfileJson.asJson
val httpResponse = Response(Status.OK, Body.from(apiResult))
val httpRequest = Request(GET, DefaultApiURI.format(oAuth2Info.accessToken))
httpClient.execute(httpRequest) returns Future.successful(httpResponse)
profile(provider.retrieveProfile(oAuth2Info)) { p =>
p must be equalTo CommonSocialProfile(
loginInfo = LoginInfo(provider.id, "1574083"),
fullName = Some("Apollonia Vanova"),
avatarUri = Some(new URI("http://distillery.s3.amazonaws.com/profiles/profile_1574083_75sq_1295469061.jpg"))
)
}
}
}
/**
* Defines the context for the abstract OAuth2 provider spec.
*
* @return The Context to use for the abstract OAuth2 provider spec.
*/
override protected def context: BaseContext = new Context {}
/**
* The context.
*/
trait Context extends BaseContext {
/**
* Paths to the Json fixtures.
*/
override val ErrorJson = BaseFixture.load(Paths.get("instagram.error.json"))
override val AccessTokenJson = BaseFixture.load(Paths.get("instagram.access.token.json"))
override val UserProfileJson = BaseFixture.load(Paths.get("instagram.profile.json"))
/**
* The OAuth2 config.
*/
override lazy val config = spy(OAuth2Config(
authorizationURI = Some(ConfigURI("https://api.instagram.com/oauth/authorize")),
accessTokenURI = ConfigURI("https://api.instagram.com/oauth/access_token"),
redirectURI = Some(ConfigURI("https://minutemen.group")),
clientID = "my.client.id",
clientSecret = "my.client.secret",
scope = Some("basic")
))
/**
* The provider to test.
*/
lazy val provider = new InstagramProvider(httpClient, stateHandler, clock, config)
}
}
|
mohiva/silhouette
|
modules/provider-oauth2/src/test/scala/silhouette/provider/oauth2/InstagramProviderSpec.scala
|
Scala
|
apache-2.0
| 5,290
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.