code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/* sbt -- Simple Build Tool
* Copyright 2009 Mark Harrah
*/
package xsbt
import java.lang.{Process => JProcess, ProcessBuilder => JProcessBuilder}
import java.io.{Closeable, File, IOException}
import java.io.{BufferedReader, InputStream, InputStreamReader, OutputStream, PipedInputStream, PipedOutputStream}
import java.net.URL
import OutputStreamBuilder.{fileInput, fileOutput, urlInput}
/** Methods for constructing simple commands that can then be combined. */
object Process
{
implicit def apply(command: String): ProcessBuilder = apply(command, None)
implicit def apply(command: Seq[String]): ProcessBuilder = apply (command.toArray, None)
def apply(command: String, arguments: Seq[String]): ProcessBuilder = apply(command :: arguments.toList, None)
/** create ProcessBuilder with working dir set to File and extra environment variables */
def apply(command: String, cwd: File, extraEnv: (String,String)*): ProcessBuilder =
apply(command, Some(cwd), extraEnv : _*)
/** create ProcessBuilder with working dir optionaly set to File and extra environment variables */
def apply(command: String, cwd: Option[File], extraEnv: (String,String)*): ProcessBuilder =
apply(command.split("""\s+"""), cwd, extraEnv : _*)
/** create ProcessBuilder with working dir optionaly set to File and extra environment variables */
def apply(command: Seq[String], cwd: Option[File], extraEnv: (String,String)*): ProcessBuilder = {
val jpb = new JProcessBuilder(command.toArray : _*)
cwd.foreach(jpb directory _)
extraEnv.foreach { case (k, v) => jpb.environment.put(k, v) }
apply(jpb)
}
implicit def apply(builder: JProcessBuilder): ProcessBuilder = new SimpleProcessBuilder(builder)
implicit def apply(file: File): FilePartialBuilder = new FileBuilder(file)
implicit def apply(url: URL): URLPartialBuilder = new URLBuilder(url)
implicit def apply(command: scala.xml.Elem): ProcessBuilder = apply(command.text.trim)
implicit def applySeq[T](builders: Seq[T])(implicit convert: T => SourcePartialBuilder): Seq[SourcePartialBuilder] = builders.map(convert)
def apply(value: Boolean): ProcessBuilder = apply(value.toString, if(value) 0 else 1)
def apply(name: String, exitValue: => Int): ProcessBuilder = new DummyProcessBuilder(name, exitValue)
def cat(file: SourcePartialBuilder, files: SourcePartialBuilder*): ProcessBuilder = cat(file :: files.toList)
def cat(files: Seq[SourcePartialBuilder]): ProcessBuilder =
{
require(!files.isEmpty)
files.map(_.cat).reduceLeft(_ #&& _)
}
}
trait SourcePartialBuilder extends NotNull
{
/** Writes the output stream of this process to the given file. */
def #> (f: File): ProcessBuilder = toFile(f, false)
/** Appends the output stream of this process to the given file. */
def #>> (f: File): ProcessBuilder = toFile(f, true)
/** Writes the output stream of this process to the given OutputStream. The
* argument is call-by-name, so the stream is recreated, written, and closed each
* time this process is executed. */
def #>(out: => OutputStream): ProcessBuilder = #> (new OutputStreamBuilder(out))
def #>(b: ProcessBuilder): ProcessBuilder = SequentialProcessBuilder.piped(toSource, b, false, true)
private def toFile(f: File, append: Boolean) = #> (fileOutput(f, append))
def cat = toSource
protected def toSource: ProcessBuilder
}
trait SinkPartialBuilder extends NotNull
{
/** Reads the given file into the input stream of this process. */
def #< (f: File): ProcessBuilder = #< (fileInput(f))
/** Reads the given URL into the input stream of this process. */
def #< (f: URL): ProcessBuilder = #< (urlInput(f))
/** Reads the given InputStream into the input stream of this process. The
* argument is call-by-name, so the stream is recreated, read, and closed each
* time this process is executed. */
def #<(in: => InputStream): ProcessBuilder = #< (new InputStreamBuilder(in))
def #<(b: ProcessBuilder): ProcessBuilder = SequentialProcessBuilder.piped(b, toSink, false, false)
protected def toSink: ProcessBuilder
}
trait URLPartialBuilder extends SourcePartialBuilder
trait FilePartialBuilder extends SinkPartialBuilder with SourcePartialBuilder
{
def #<<(f: File): ProcessBuilder
def #<<(u: URL): ProcessBuilder
def #<<(i: => InputStream): ProcessBuilder
def #<<(p: ProcessBuilder): ProcessBuilder
}
/** Represents a process that is running or has finished running.
* It may be a compound process with several underlying native processes (such as 'a #&& b`).*/
trait Process extends NotNull
{
/** Blocks until this process exits and returns the exit code.*/
def exitValue(): Int
/** Destroys this process. */
def destroy(): Unit
}
/** Represents a runnable process. */
trait ProcessBuilder extends SourcePartialBuilder with SinkPartialBuilder
{
/** Starts the process represented by this builder, blocks until it exits, and returns the exit code. Standard output and error are
* sent to the console.*/
def ! : Int
/** Starts the process represented by this builder, blocks until it exits, and returns the exit code. Standard output and error are
* sent to the console. The newly started process reads from standard input of the current process if `connectInput` is true.*/
def !< : Int
/** Starts the process represented by this builder. Standard output and error are sent to the console.*/
def run(): Process
/** Starts the process represented by this builder. I/O is handled by the given ProcessIO instance.*/
def run(io: ProcessIO): Process
/** Starts the process represented by this builder. Standard output and error are sent to the console.
* The newly started process reads from standard input of the current process if `connectInput` is true.*/
def run(connectInput: Boolean): Process
/** Constructs a command that runs this command first and then `other` if this command succeeds.*/
def #&& (other: ProcessBuilder): ProcessBuilder
/** Constructs a command that runs this command first and then `other` if this command does not succeed.*/
def #|| (other: ProcessBuilder): ProcessBuilder
/** Constructs a command that will run this command and pipes the output to `other`. `other` must be a simple command.*/
def #| (other: ProcessBuilder): ProcessBuilder
/** Constructs a command that will run this command and then `other`. The exit code will be the exit code of `other`.*/
def ## (other: ProcessBuilder): ProcessBuilder
def canPipeTo: Boolean
}
/** Each method will be called in a separate thread.*/
final class ProcessIO(val writeInput: OutputStream => Unit, val processOutput: InputStream => Unit, val processError: InputStream => Unit) extends NotNull
{
def withOutput(process: InputStream => Unit): ProcessIO = new ProcessIO(writeInput, process, processError)
def withError(process: InputStream => Unit): ProcessIO = new ProcessIO(writeInput, processOutput, process)
def withInput(write: OutputStream => Unit): ProcessIO = new ProcessIO(write, processOutput, processError)
} | harrah/process | Process.scala | Scala | bsd-3-clause | 6,926 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn.ops
import com.intel.analytics.bigdl.dllib.nn.abstractnn.Activity
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath._
import com.intel.analytics.bigdl.dllib.utils.{T, Table}
import com.intel.analytics.bigdl.dllib.utils.{Engine, OptimizerV1, OptimizerV2}
import scala.concurrent.Future
import scala.reflect.ClassTag
/**
* This operation creates a new tensor by replicating input multiples times.
* The output tensor's i'th dimension has input.dims(i) * multiples[i] elements,
* and the values of input are replicated multiples[i] times along the 'i'th dimension.
*
* For example, tiling [a b c d] by [1, 2] produces [a b c d a b c d].
*
* @param ev$1
* @param ev
* @tparam T Numeric type. Only support float/double now
*/
class Tile[T: ClassTag]()(implicit ev: TensorNumeric[T])
extends Operation[Table, Tensor[_], T] {
@transient
private var results: Array[Future[Unit]] = _
def updateOutput(inputs: Table): Tensor[_] = {
val input = inputs[Tensor[Tensor[NumericWildcard]]](1)
val multiples = inputs[Tensor[Int]](2)
if (multiples.isEmpty) {
output = input
return output
}
require(input.nDimension() == multiples.size(1),
"Length of multiples must be the same as the number of dimensions in input")
output.asInstanceOf[Tensor[Tensor[NumericWildcard]]].resizeAs(input).copy(input)
for (j <- 1 to input.nDimension()) {
val currentOutput = output.clone()
val mult = multiples(Array(j))
val newSize = output.size()
newSize(j - 1) = newSize(j - 1) * mult
output.resize(newSize)
var offset = 1
var i = 0
while (i < mult) {
val _offset = offset
if (results == null || results.length != mult) {
results = new Array[Future[Unit]](mult)
}
results(i) = Engine.model.invoke(() => {
val target = this.output.narrow(j, _offset,
currentOutput.size(j))
if (target.isContiguous() || j > 2) {
// Copy directly when target is Contiguous or dimension is larger than 2
// in which case the contiguous region in target tensor is fairly small in practice
target.asInstanceOf[Tensor[NumericWildcard]]
.copy(currentOutput.asInstanceOf[Tensor[NumericWildcard]])
} else {
// Divide target into contiguous frames when target isn't contiguous
var f = 1
while (f <= target.size(1)) {
val curFrame = target.select(1, f)
val outputFrame = currentOutput.select(1, f)
require(curFrame.isContiguous())
require(outputFrame.isContiguous())
curFrame.asInstanceOf[Tensor[NumericWildcard]]
.copy(outputFrame.asInstanceOf[Tensor[NumericWildcard]])
f += 1
}
}
})
i += 1
offset += currentOutput.size(j)
}
}
output
}
}
object Tile {
def apply[T: ClassTag]()(implicit ev: TensorNumeric[T]): Operation[Activity, Activity, T]
= ModuleToOperation[T](new Tile())
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/nn/ops/Tile.scala | Scala | apache-2.0 | 3,787 |
package controllers
import java.util.UUID
import javax.inject.Inject
import com.mohiva.play.silhouette.api.{Environment, Silhouette}
import com.mohiva.play.silhouette.impl.authenticators.JWTAuthenticator
import forms.BookClubForm
import models.services.BookClubService
import models.{BookClub, User}
import play.api.i18n.MessagesApi
import play.api.libs.concurrent.Execution.Implicits._
import play.api.libs.json.Json
import scala.concurrent.Future
class BookClubController @Inject()(val bookClubService: BookClubService,
val messagesApi: MessagesApi,
val env: Environment[User, JWTAuthenticator])
extends Silhouette[User, JWTAuthenticator] {
import models.BookClub.bookClubFormat
import models.Book.bookFormat
import models.User.jsonFormat
def index = SecuredAction.async { implicit request =>
bookClubService.list(request.identity.userID).map { bookClubs =>
Ok(Json.toJson(bookClubs))
}
}
def find(id: UUID) = SecuredAction.async { implicit request =>
bookClubService.find(id)(request.identity.userID).map {
case (Some(bookClub), users, books) => {
Ok(Json.obj("bookClub" -> bookClub, "users" -> users, "books" -> books))
}
case _ => NotFound
}.recover {
case error => InternalServerError("BookClub controller error")
}
}
def create = SecuredAction.async(parse.json) { implicit request =>
request.body.validate[BookClubForm.Data].map { bookData =>
val bookClub = BookClub(
id = UUID.randomUUID,
name = bookData.name,
about = bookData.about)
for {
createdBookClub <- bookClubService.save(bookClub)(request.identity.userID)
} yield {
Ok(Json.toJson(createdBookClub))
}
}.recoverTotal {
case error =>
Future.successful(InternalServerError("Can't create book"))
}
}
}
| slopyjoe/book-club | app/controllers/BookClubController.scala | Scala | apache-2.0 | 1,914 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.redis.data.util
import java.nio.charset.StandardCharsets
import org.locationtech.geomesa.index.metadata.{KeyValueStoreMetadata, MetadataSerializer}
import org.locationtech.geomesa.utils.collection.CloseableIterator
import org.locationtech.geomesa.utils.io.WithClose
import redis.clients.jedis.JedisPool
/**
* Redis-backed metadata implementation. Metadata is stored as a redis hashset
*
* @param connection connection pool
* @param table metadata table name
* @param serializer serializer
* @tparam T type param
*/
class RedisBackedMetadata[T](connection: JedisPool, table: String, val serializer: MetadataSerializer[T])
extends KeyValueStoreMetadata[T] {
import scala.collection.JavaConverters._
val key: Array[Byte] = table.getBytes(StandardCharsets.UTF_8)
override protected def write(rows: Seq[(Array[Byte], Array[Byte])]): Unit = {
if (rows.lengthCompare(1) == 0) {
val (k, v) = rows.head
WithClose(connection.getResource)(_.hset(key, k, v))
} else {
val map = new java.util.HashMap[Array[Byte], Array[Byte]](rows.size)
rows.foreach { case (k, v) => map.put(k, v) }
WithClose(connection.getResource)(_.hset(key, map))
}
}
override protected def delete(rows: Seq[Array[Byte]]): Unit =
WithClose(connection.getResource)(_.hdel(key, rows: _*))
override protected def scanValue(row: Array[Byte]): Option[Array[Byte]] =
Option(WithClose(connection.getResource)(_.hget(key, row)))
override protected def scanRows(prefix: Option[Array[Byte]]): CloseableIterator[(Array[Byte], Array[Byte])] = {
val all = WithClose(connection.getResource)(_.hgetAll(key)).asScala.iterator
prefix match {
case None => CloseableIterator(all)
case Some(p) => CloseableIterator(all.filter { case (k, _) => k.startsWith(p) })
}
}
override protected def createEmptyBackup(timestamp: String): RedisBackedMetadata[T] =
new RedisBackedMetadata(connection, s"${table}_${timestamp}_bak", serializer)
override protected def checkIfTableExists: Boolean = true
override protected def createTable(): Unit = {}
override def close(): Unit = {}
}
| elahrvivaz/geomesa | geomesa-redis/geomesa-redis-datastore/src/main/scala/org/locationtech/geomesa/redis/data/util/RedisBackedMetadata.scala | Scala | apache-2.0 | 2,643 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.batch.sql
import java.sql.{Date, Time, Timestamp}
import java.util
import org.apache.flink.api.scala._
import org.apache.flink.api.scala.util.CollectionDataSets
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.{TableEnvironment, ValidationException}
import org.apache.flink.table.expressions.utils.SplitUDF
import org.apache.flink.table.functions.ScalarFunction
import org.apache.flink.table.runtime.batch.table.OldHashCode
import org.apache.flink.table.runtime.utils.TableProgramsTestBase.TableConfigMode
import org.apache.flink.table.runtime.utils.{TableProgramsCollectionTestBase, TableProgramsTestBase}
import org.apache.flink.test.util.TestBaseUtils
import org.apache.flink.types.Row
import org.junit._
import org.junit.Assert.assertEquals
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import scala.collection.JavaConverters._
@RunWith(classOf[Parameterized])
class CalcITCase(
configMode: TableConfigMode)
extends TableProgramsCollectionTestBase(configMode) {
@Test
def testSelectStarFromTable(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT * FROM MyTable"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv).as('a, 'b, 'c)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "1,1,Hi\\n" + "2,2,Hello\\n" + "3,2,Hello world\\n" +
"4,3,Hello world, how are you?\\n" + "5,3,I am fine.\\n" + "6,3,Luke Skywalker\\n" +
"7,4,Comment#1\\n" + "8,4,Comment#2\\n" + "9,4,Comment#3\\n" + "10,4,Comment#4\\n" +
"11,5,Comment#5\\n" + "12,5,Comment#6\\n" + "13,5,Comment#7\\n" + "14,5,Comment#8\\n" +
"15,5,Comment#9\\n" + "16,6,Comment#10\\n" + "17,6,Comment#11\\n" + "18,6,Comment#12\\n" +
"19,6,Comment#13\\n" + "20,6,Comment#14\\n" + "21,6,Comment#15\\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testSelectStarFromNestedTable(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT * FROM MyTable"
val ds = CollectionDataSets.getSmallNestedTupleDataSet(env).toTable(tEnv).as('a, 'b)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "(1,1),one\\n" + "(2,2),two\\n" + "(3,3),three\\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testSelectStarFromDataSet(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT * FROM MyTable"
val ds = CollectionDataSets.get3TupleDataSet(env)
tEnv.registerDataSet("MyTable", ds, 'a, 'b, 'c)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "1,1,Hi\\n" + "2,2,Hello\\n" + "3,2,Hello world\\n" +
"4,3,Hello world, how are you?\\n" + "5,3,I am fine.\\n" + "6,3,Luke Skywalker\\n" +
"7,4,Comment#1\\n" + "8,4,Comment#2\\n" + "9,4,Comment#3\\n" + "10,4,Comment#4\\n" +
"11,5,Comment#5\\n" + "12,5,Comment#6\\n" + "13,5,Comment#7\\n" + "14,5,Comment#8\\n" +
"15,5,Comment#9\\n" + "16,6,Comment#10\\n" + "17,6,Comment#11\\n" + "18,6,Comment#12\\n" +
"19,6,Comment#13\\n" + "20,6,Comment#14\\n" + "21,6,Comment#15\\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testSimpleSelectAll(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT a, b, c FROM MyTable"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv).as('a, 'b, 'c)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "1,1,Hi\\n" + "2,2,Hello\\n" + "3,2,Hello world\\n" +
"4,3,Hello world, how are you?\\n" + "5,3,I am fine.\\n" + "6,3,Luke Skywalker\\n" +
"7,4,Comment#1\\n" + "8,4,Comment#2\\n" + "9,4,Comment#3\\n" + "10,4,Comment#4\\n" +
"11,5,Comment#5\\n" + "12,5,Comment#6\\n" + "13,5,Comment#7\\n" + "14,5,Comment#8\\n" +
"15,5,Comment#9\\n" + "16,6,Comment#10\\n" + "17,6,Comment#11\\n" + "18,6,Comment#12\\n" +
"19,6,Comment#13\\n" + "20,6,Comment#14\\n" + "21,6,Comment#15\\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testSelectWithNaming(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT `1-_./Ü`, b FROM (SELECT _1 as `1-_./Ü`, _2 as b FROM MyTable)"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "1,1\\n" + "2,2\\n" + "3,2\\n" + "4,3\\n" + "5,3\\n" + "6,3\\n" + "7,4\\n" +
"8,4\\n" + "9,4\\n" + "10,4\\n" + "11,5\\n" + "12,5\\n" + "13,5\\n" + "14,5\\n" + "15,5\\n" +
"16,6\\n" + "17,6\\n" + "18,6\\n" + "19,6\\n" + "20,6\\n" + "21,6\\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test(expected = classOf[ValidationException])
def testInvalidFields(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT a, foo FROM MyTable"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv).as('a, 'b, 'c)
tEnv.registerTable("MyTable", ds)
tEnv.sqlQuery(sqlQuery)
}
@Test
def testAllRejectingFilter(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT * FROM MyTable WHERE false"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "\\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAllPassingFilter(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT * FROM MyTable WHERE true"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "1,1,Hi\\n" + "2,2,Hello\\n" + "3,2,Hello world\\n" + "4,3,Hello world, " +
"how are you?\\n" + "5,3,I am fine.\\n" + "6,3,Luke Skywalker\\n" + "7,4," +
"Comment#1\\n" + "8,4,Comment#2\\n" + "9,4,Comment#3\\n" + "10,4,Comment#4\\n" + "11,5," +
"Comment#5\\n" + "12,5,Comment#6\\n" + "13,5,Comment#7\\n" + "14,5,Comment#8\\n" + "15,5," +
"Comment#9\\n" + "16,6,Comment#10\\n" + "17,6,Comment#11\\n" + "18,6,Comment#12\\n" + "19," +
"6,Comment#13\\n" + "20,6,Comment#14\\n" + "21,6,Comment#15\\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testFilterOnString(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT * FROM MyTable WHERE c LIKE '%world%'"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv).as('a, 'b, 'c)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "3,2,Hello world\\n" + "4,3,Hello world, how are you?\\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testFilterOnInteger(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT * FROM MyTable WHERE MOD(a,2)=0"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv).as('a, 'b, 'c)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "2,2,Hello\\n" + "4,3,Hello world, how are you?\\n" +
"6,3,Luke Skywalker\\n" + "8,4," + "Comment#2\\n" + "10,4,Comment#4\\n" +
"12,5,Comment#6\\n" + "14,5,Comment#8\\n" + "16,6," +
"Comment#10\\n" + "18,6,Comment#12\\n" + "20,6,Comment#14\\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testDisjunctivePredicate(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT * FROM MyTable WHERE a < 2 OR a > 20"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv).as('a, 'b, 'c)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "1,1,Hi\\n" + "21,6,Comment#15\\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testFilterWithAnd(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT * FROM MyTable WHERE MOD(a,2)<>0 AND MOD(b,2)=0"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv).as('a, 'b, 'c)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "3,2,Hello world\\n" + "7,4,Comment#1\\n" +
"9,4,Comment#3\\n" + "17,6,Comment#11\\n" +
"19,6,Comment#13\\n" + "21,6,Comment#15\\n"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAdvancedDataTypes(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT a, b, c, DATE '1984-07-12', TIME '14:34:24', " +
"TIMESTAMP '1984-07-12 14:34:24' FROM MyTable"
val ds = env.fromElements((
Date.valueOf("1984-07-12"),
Time.valueOf("14:34:24"),
Timestamp.valueOf("1984-07-12 14:34:24")))
tEnv.registerDataSet("MyTable", ds, 'a, 'b, 'c)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "1984-07-12,14:34:24,1984-07-12 14:34:24.0," +
"1984-07-12,14:34:24,1984-07-12 14:34:24.0"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testValueConstructor(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT (a, b, c), ARRAY[12, b], MAP[a, c] FROM MyTable " +
"WHERE (a, b, c) = ('foo', 12, TIMESTAMP '1984-07-12 14:34:24')"
val rowValue = ("foo", 12, Timestamp.valueOf("1984-07-12 14:34:24"))
val ds = env.fromElements(rowValue)
tEnv.registerDataSet("MyTable", ds, 'a, 'b, 'c)
val result = tEnv.sqlQuery(sqlQuery)
val expected = "foo,12,1984-07-12 14:34:24.0,[12, 12],{foo=1984-07-12 14:34:24.0}"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
// Compare actual object to avoid undetected Calcite flattening
val resultRow = results.asJava.get(0)
assertEquals(rowValue._1, resultRow.getField(0).asInstanceOf[Row].getField(0))
assertEquals(rowValue._2, resultRow.getField(1).asInstanceOf[Array[Integer]](1))
assertEquals(rowValue._3,
resultRow.getField(2).asInstanceOf[util.Map[String, Timestamp]].get(rowValue._1))
}
@Test
def testUserDefinedScalarFunction(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
tEnv.registerFunction("hashCode", OldHashCode)
tEnv.registerFunction("hashCode", MyHashCode)
val ds = env.fromElements("a", "b", "c")
tEnv.registerDataSet("MyTable", ds, 'text)
val result = tEnv.sqlQuery("SELECT hashCode(text) FROM MyTable")
val expected = "97\\n98\\n99"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testFunctionWithUnicodeParameters(): Unit = {
val data = List(
("a\\u0001b", "c\\"d", "e\\\\\\"\\u0004f"), // uses Java/Scala escaping
("x\\u0001y", "y\\"z", "z\\\\\\"\\u0004z")
)
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val splitUDF0 = new SplitUDF(deterministic = true)
val splitUDF1 = new SplitUDF(deterministic = false)
tEnv.registerFunction("splitUDF0", splitUDF0)
tEnv.registerFunction("splitUDF1", splitUDF1)
// uses SQL escaping (be aware that even Scala multi-line strings parse backslash!)
val sqlQuery = s"""
|SELECT
| splitUDF0(a, U&'${'\\\\'}0001', 0) AS a0,
| splitUDF1(a, U&'${'\\\\'}0001', 0) AS a1,
| splitUDF0(b, U&'"', 1) AS b0,
| splitUDF1(b, U&'"', 1) AS b1,
| splitUDF0(c, U&'${'\\\\'}${'\\\\'}"${'\\\\'}0004', 0) AS c0,
| splitUDF1(c, U&'${'\\\\'}"#0004' UESCAPE '#', 0) AS c1
|FROM T1
|""".stripMargin
val t1 = env.fromCollection(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T1", t1)
val results = tEnv.sqlQuery(sqlQuery).toDataSet[Row].collect()
val expected = List("a,a,d,d,e,e", "x,x,z,z,z,z").mkString("\\n")
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
}
object MyHashCode extends ScalarFunction {
def eval(s: String): Int = s.hashCode()
}
object CalcITCase {
@Parameterized.Parameters(name = "Table config = {0}")
def parameters(): util.Collection[Array[java.lang.Object]] = {
Seq[Array[AnyRef]](
Array(TableProgramsTestBase.DEFAULT),
Array(TableProgramsTestBase.NO_NULL)).asJava
}
}
| yew1eb/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/batch/sql/CalcITCase.scala | Scala | apache-2.0 | 15,020 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.connector
import java.util
import java.util.Collections
import test.org.apache.spark.sql.connector.catalog.functions.{JavaAverage, JavaLongAdd, JavaStrLen}
import test.org.apache.spark.sql.connector.catalog.functions.JavaLongAdd.{JavaLongAddDefault, JavaLongAddMagic, JavaLongAddMismatchMagic, JavaLongAddStaticMagic}
import test.org.apache.spark.sql.connector.catalog.functions.JavaStrLen._
import org.apache.spark.SparkException
import org.apache.spark.sql.{AnalysisException, Row}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.CodegenObjectFactoryMode.{FALLBACK, NO_CODEGEN}
import org.apache.spark.sql.connector.catalog.{BasicInMemoryTableCatalog, Identifier, InMemoryCatalog, SupportsNamespaces}
import org.apache.spark.sql.connector.catalog.functions.{AggregateFunction, _}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
class DataSourceV2FunctionSuite extends DatasourceV2SQLBase {
private val emptyProps: util.Map[String, String] = Collections.emptyMap[String, String]
private def addFunction(ident: Identifier, fn: UnboundFunction): Unit = {
catalog("testcat").asInstanceOf[InMemoryCatalog].createFunction(ident, fn)
}
test("undefined function") {
assert(intercept[AnalysisException](
sql("SELECT testcat.non_exist('abc')").collect()
).getMessage.contains("Undefined function"))
}
test("non-function catalog") {
withSQLConf("spark.sql.catalog.testcat" -> classOf[BasicInMemoryTableCatalog].getName) {
assert(intercept[AnalysisException](
sql("SELECT testcat.strlen('abc')").collect()
).getMessage.contains("is not a FunctionCatalog"))
}
}
test("built-in with non-function catalog should still work") {
withSQLConf(SQLConf.DEFAULT_CATALOG.key -> "testcat",
"spark.sql.catalog.testcat" -> classOf[BasicInMemoryTableCatalog].getName) {
checkAnswer(sql("SELECT length('abc')"), Row(3))
}
}
test("built-in with default v2 function catalog") {
withSQLConf(SQLConf.DEFAULT_CATALOG.key -> "testcat") {
checkAnswer(sql("SELECT length('abc')"), Row(3))
}
}
test("looking up higher-order function with non-session catalog") {
checkAnswer(sql("SELECT transform(array(1, 2, 3), x -> x + 1)"),
Row(Array(2, 3, 4)) :: Nil)
}
test("built-in override with default v2 function catalog") {
// a built-in function with the same name should take higher priority
withSQLConf(SQLConf.DEFAULT_CATALOG.key -> "testcat") {
addFunction(Identifier.of(Array.empty, "length"), new JavaStrLen(new JavaStrLenNoImpl))
checkAnswer(sql("SELECT length('abc')"), Row(3))
}
}
test("built-in override with non-session catalog") {
addFunction(Identifier.of(Array.empty, "length"), new JavaStrLen(new JavaStrLenNoImpl))
checkAnswer(sql("SELECT length('abc')"), Row(3))
}
test("temp function override with default v2 function catalog") {
val className = "test.org.apache.spark.sql.JavaStringLength"
sql(s"CREATE FUNCTION length AS '$className'")
withSQLConf(SQLConf.DEFAULT_CATALOG.key -> "testcat") {
addFunction(Identifier.of(Array.empty, "length"), new JavaStrLen(new JavaStrLenNoImpl))
checkAnswer(sql("SELECT length('abc')"), Row(3))
}
}
test("view should use captured catalog and namespace for function lookup") {
val viewName = "my_view"
withView(viewName) {
withSQLConf(SQLConf.DEFAULT_CATALOG.key -> "testcat") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "my_avg"), IntegralAverage)
sql("USE ns")
sql(s"CREATE TEMPORARY VIEW $viewName AS SELECT my_avg(col1) FROM values (1), (2), (3)")
}
// change default catalog and namespace and add a function with the same name but with no
// implementation
withSQLConf(SQLConf.DEFAULT_CATALOG.key -> "testcat2") {
catalog("testcat2").asInstanceOf[SupportsNamespaces]
.createNamespace(Array("ns2"), emptyProps)
addFunction(Identifier.of(Array("ns2"), "my_avg"), NoImplAverage)
sql("USE ns2")
checkAnswer(sql(s"SELECT * FROM $viewName"), Row(2.0) :: Nil)
}
}
}
test("scalar function: with default produceResult method") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "strlen"), StrLen(StrLenDefault))
checkAnswer(sql("SELECT testcat.ns.strlen('abc')"), Row(3) :: Nil)
}
test("scalar function: with default produceResult method w/ expression") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "strlen"), StrLen(StrLenDefault))
checkAnswer(sql("SELECT testcat.ns.strlen(substr('abcde', 3))"), Row(3) :: Nil)
}
test("scalar function: lookup magic method") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "strlen"), StrLen(StrLenMagic))
checkAnswer(sql("SELECT testcat.ns.strlen('abc')"), Row(3) :: Nil)
}
test("scalar function: lookup magic method w/ expression") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "strlen"), StrLen(StrLenMagic))
checkAnswer(sql("SELECT testcat.ns.strlen(substr('abcde', 3))"), Row(3) :: Nil)
}
test("scalar function: bad magic method") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "strlen"), StrLen(StrLenBadMagic))
assert(intercept[SparkException](sql("SELECT testcat.ns.strlen('abc')").collect())
.getMessage.contains("Cannot find a compatible"))
}
test("scalar function: bad magic method with default impl") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "strlen"), StrLen(StrLenBadMagicWithDefault))
checkAnswer(sql("SELECT testcat.ns.strlen('abc')"), Row(3) :: Nil)
}
test("scalar function: no implementation found") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "strlen"), StrLen(StrLenNoImpl))
intercept[SparkException](sql("SELECT testcat.ns.strlen('abc')").collect())
}
test("scalar function: invalid parameter type or length") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "strlen"), StrLen(StrLenDefault))
assert(intercept[AnalysisException](sql("SELECT testcat.ns.strlen(42)"))
.getMessage.contains("Expect StringType"))
assert(intercept[AnalysisException](sql("SELECT testcat.ns.strlen('a', 'b')"))
.getMessage.contains("Expect exactly one argument"))
}
test("scalar function: default produceResult in Java") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "strlen"),
new JavaStrLen(new JavaStrLenDefault))
checkAnswer(sql("SELECT testcat.ns.strlen('abc')"), Row(3) :: Nil)
}
test("scalar function: magic method in Java") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "strlen"),
new JavaStrLen(new JavaStrLenMagic))
checkAnswer(sql("SELECT testcat.ns.strlen('abc')"), Row(3) :: Nil)
}
test("scalar function: static magic method in Java") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "strlen"),
new JavaStrLen(new JavaStrLenStaticMagic))
checkAnswer(sql("SELECT testcat.ns.strlen('abc')"), Row(3) :: Nil)
}
test("scalar function: magic method should take higher precedence in Java") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "strlen"),
new JavaStrLen(new JavaStrLenBoth))
// to differentiate, the static method returns string length + 100
checkAnswer(sql("SELECT testcat.ns.strlen('abc')"), Row(103) :: Nil)
}
test("scalar function: bad static magic method should fallback to non-static") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "strlen"),
new JavaStrLen(new JavaStrLenBadStaticMagic))
checkAnswer(sql("SELECT testcat.ns.strlen('abc')"), Row(103) :: Nil)
}
test("scalar function: no implementation found in Java") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "strlen"),
new JavaStrLen(new JavaStrLenNoImpl))
assert(intercept[AnalysisException](sql("SELECT testcat.ns.strlen('abc')").collect())
.getMessage.contains("neither implement magic method nor override 'produceResult'"))
}
test("SPARK-35390: scalar function w/ bad input types") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "strlen"), StrLen(StrLenBadInputTypes))
assert(intercept[AnalysisException](sql("SELECT testcat.ns.strlen('abc')").collect())
.getMessage.contains("parameters returned from 'inputTypes()'"))
}
test("SPARK-35390: scalar function w/ mismatch type parameters from magic method") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "add"), new JavaLongAdd(new JavaLongAddMismatchMagic))
assert(intercept[AnalysisException](sql("SELECT testcat.ns.add(1L, 2L)").collect())
.getMessage.contains("neither implement magic method nor override 'produceResult'"))
}
test("SPARK-35390: scalar function w/ type coercion") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "add"), new JavaLongAdd(new JavaLongAddDefault(false)))
addFunction(Identifier.of(Array("ns"), "add2"), new JavaLongAdd(new JavaLongAddMagic(false)))
addFunction(Identifier.of(Array("ns"), "add3"),
new JavaLongAdd(new JavaLongAddStaticMagic(false)))
Seq("add", "add2", "add3").foreach { name =>
checkAnswer(sql(s"SELECT testcat.ns.$name(42, 58)"), Row(100) :: Nil)
checkAnswer(sql(s"SELECT testcat.ns.$name(42L, 58)"), Row(100) :: Nil)
checkAnswer(sql(s"SELECT testcat.ns.$name(42, 58L)"), Row(100) :: Nil)
// can't cast date time interval to long
assert(intercept[AnalysisException](
sql(s"SELECT testcat.ns.$name(date '2021-06-01' - date '2011-06-01', 93)").collect())
.getMessage.contains("due to data type mismatch"))
}
}
test("SPARK-35389: magic function should handle null arguments") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "strlen"), new JavaStrLen(new JavaStrLenMagicNullSafe))
addFunction(Identifier.of(Array("ns"), "strlen2"),
new JavaStrLen(new JavaStrLenStaticMagicNullSafe))
Seq("strlen", "strlen2").foreach { name =>
checkAnswer(sql(s"SELECT testcat.ns.$name(CAST(NULL as STRING))"), Row(0) :: Nil)
}
}
test("SPARK-35389: magic function should handle null primitive arguments") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "add"), new JavaLongAdd(new JavaLongAddMagic(false)))
addFunction(Identifier.of(Array("ns"), "static_add"),
new JavaLongAdd(new JavaLongAddMagic(false)))
Seq("add", "static_add").foreach { name =>
Seq(true, false).foreach { codegenEnabled =>
val codeGenFactoryMode = if (codegenEnabled) FALLBACK else NO_CODEGEN
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> codegenEnabled.toString,
SQLConf.CODEGEN_FACTORY_MODE.key -> codeGenFactoryMode.toString) {
checkAnswer(sql(s"SELECT testcat.ns.$name(CAST(NULL as BIGINT), 42L)"), Row(null) :: Nil)
checkAnswer(sql(s"SELECT testcat.ns.$name(42L, CAST(NULL as BIGINT))"), Row(null) :: Nil)
checkAnswer(sql(s"SELECT testcat.ns.$name(42L, 58L)"), Row(100) :: Nil)
checkAnswer(sql(s"SELECT testcat.ns.$name(CAST(NULL as BIGINT), CAST(NULL as BIGINT))"),
Row(null) :: Nil)
}
}
}
}
test("bad bound function (neither scalar nor aggregate)") {
catalog("testcat").asInstanceOf[SupportsNamespaces].createNamespace(Array("ns"), emptyProps)
addFunction(Identifier.of(Array("ns"), "strlen"), StrLen(BadBoundFunction))
assert(intercept[AnalysisException](sql("SELECT testcat.ns.strlen('abc')"))
.getMessage.contains("does not implement ScalarFunction or AggregateFunction"))
}
test("aggregate function: lookup int average") {
import testImplicits._
val t = "testcat.ns.t"
withTable(t) {
addFunction(Identifier.of(Array("ns"), "avg"), IntegralAverage)
(1 to 100).toDF("i").write.saveAsTable(t)
checkAnswer(sql(s"SELECT testcat.ns.avg(i) from $t"), Row(50) :: Nil)
}
}
test("aggregate function: lookup long average") {
import testImplicits._
val t = "testcat.ns.t"
withTable(t) {
addFunction(Identifier.of(Array("ns"), "avg"), IntegralAverage)
(1L to 100L).toDF("i").write.saveAsTable(t)
checkAnswer(sql(s"SELECT testcat.ns.avg(i) from $t"), Row(50) :: Nil)
}
}
test("aggregate function: lookup double average in Java") {
import testImplicits._
val t = "testcat.ns.t"
withTable(t) {
addFunction(Identifier.of(Array("ns"), "avg"), new JavaAverage)
Seq(1.toDouble, 2.toDouble, 3.toDouble).toDF("i").write.saveAsTable(t)
checkAnswer(sql(s"SELECT testcat.ns.avg(i) from $t"), Row(2.0) :: Nil)
}
}
test("aggregate function: lookup int average w/ expression") {
import testImplicits._
val t = "testcat.ns.t"
withTable(t) {
addFunction(Identifier.of(Array("ns"), "avg"), IntegralAverage)
(1 to 100).toDF("i").write.saveAsTable(t)
checkAnswer(sql(s"SELECT testcat.ns.avg(i * 10) from $t"), Row(505) :: Nil)
}
}
test("aggregate function: unsupported input type") {
import testImplicits._
val t = "testcat.ns.t"
withTable(t) {
addFunction(Identifier.of(Array("ns"), "avg"), IntegralAverage)
Seq(1.toShort, 2.toShort).toDF("i").write.saveAsTable(t)
assert(intercept[AnalysisException](sql(s"SELECT testcat.ns.avg(i) from $t"))
.getMessage.contains("Unsupported non-integral type: ShortType"))
}
}
test("SPARK-35390: aggregate function w/ type coercion") {
import testImplicits._
withTable("t1", "t2") {
addFunction(Identifier.of(Array("ns"), "avg"), UnboundDecimalAverage)
(1 to 100).toDF().write.saveAsTable("testcat.ns.t1")
checkAnswer(sql("SELECT testcat.ns.avg(value) from testcat.ns.t1"),
Row(BigDecimal(50.5)) :: Nil)
(1 to 100).map(BigDecimal(_)).toDF().write.saveAsTable("testcat.ns.t2")
checkAnswer(sql("SELECT testcat.ns.avg(value) from testcat.ns.t2"),
Row(BigDecimal(50.5)) :: Nil)
// can't cast interval to decimal
assert(intercept[AnalysisException](sql("SELECT testcat.ns.avg(*) from values" +
" (date '2021-06-01' - date '2011-06-01'), (date '2000-01-01' - date '1900-01-01')"))
.getMessage.contains("due to data type mismatch"))
}
}
private case class StrLen(impl: BoundFunction) extends UnboundFunction {
override def description(): String =
"""strlen: returns the length of the input string
| strlen(string) -> int""".stripMargin
override def name(): String = "strlen"
override def bind(inputType: StructType): BoundFunction = {
if (inputType.fields.length != 1) {
throw new UnsupportedOperationException("Expect exactly one argument");
}
inputType.fields(0).dataType match {
case StringType => impl
case _ =>
throw new UnsupportedOperationException("Expect StringType")
}
}
}
private case object StrLenDefault extends ScalarFunction[Int] {
override def inputTypes(): Array[DataType] = Array(StringType)
override def resultType(): DataType = IntegerType
override def name(): String = "strlen_default"
override def produceResult(input: InternalRow): Int = {
val s = input.getString(0)
s.length
}
}
case object StrLenMagic extends ScalarFunction[Int] {
override def inputTypes(): Array[DataType] = Array(StringType)
override def resultType(): DataType = IntegerType
override def name(): String = "strlen_magic"
def invoke(input: UTF8String): Int = {
input.toString.length
}
}
case object StrLenBadMagic extends ScalarFunction[Int] {
override def inputTypes(): Array[DataType] = Array(StringType)
override def resultType(): DataType = IntegerType
override def name(): String = "strlen_bad_magic"
def invoke(input: String): Int = {
input.length
}
}
case object StrLenBadMagicWithDefault extends ScalarFunction[Int] {
override def inputTypes(): Array[DataType] = Array(StringType)
override def resultType(): DataType = IntegerType
override def name(): String = "strlen_bad_magic"
def invoke(input: String): Int = {
input.length
}
override def produceResult(input: InternalRow): Int = {
val s = input.getString(0)
s.length
}
}
private case object StrLenNoImpl extends ScalarFunction[Int] {
override def inputTypes(): Array[DataType] = Array(StringType)
override def resultType(): DataType = IntegerType
override def name(): String = "strlen_noimpl"
}
// input type doesn't match arguments accepted by `UnboundFunction.bind`
private case object StrLenBadInputTypes extends ScalarFunction[Int] {
override def inputTypes(): Array[DataType] = Array(StringType, IntegerType)
override def resultType(): DataType = IntegerType
override def name(): String = "strlen_bad_input_types"
}
private case object BadBoundFunction extends BoundFunction {
override def inputTypes(): Array[DataType] = Array(StringType)
override def resultType(): DataType = IntegerType
override def name(): String = "bad_bound_func"
}
object IntegralAverage extends UnboundFunction {
override def name(): String = "iavg"
override def bind(inputType: StructType): BoundFunction = {
if (inputType.fields.length > 1) {
throw new UnsupportedOperationException("Too many arguments")
}
inputType.fields(0).dataType match {
case _: IntegerType => IntAverage
case _: LongType => LongAverage
case dataType =>
throw new UnsupportedOperationException(s"Unsupported non-integral type: $dataType")
}
}
override def description(): String =
"""iavg: produces an average using integer division, ignoring nulls
| iavg(int) -> int
| iavg(bigint) -> bigint""".stripMargin
}
object IntAverage extends AggregateFunction[(Int, Int), Int] {
override def name(): String = "iavg"
override def inputTypes(): Array[DataType] = Array(IntegerType)
override def resultType(): DataType = IntegerType
override def newAggregationState(): (Int, Int) = (0, 0)
override def update(state: (Int, Int), input: InternalRow): (Int, Int) = {
if (input.isNullAt(0)) {
state
} else {
val i = input.getInt(0)
state match {
case (_, 0) =>
(i, 1)
case (total, count) =>
(total + i, count + 1)
}
}
}
override def merge(leftState: (Int, Int), rightState: (Int, Int)): (Int, Int) = {
(leftState._1 + rightState._1, leftState._2 + rightState._2)
}
override def produceResult(state: (Int, Int)): Int = state._1 / state._2
}
object LongAverage extends AggregateFunction[(Long, Long), Long] {
override def name(): String = "iavg"
override def inputTypes(): Array[DataType] = Array(LongType)
override def resultType(): DataType = LongType
override def newAggregationState(): (Long, Long) = (0L, 0L)
override def update(state: (Long, Long), input: InternalRow): (Long, Long) = {
if (input.isNullAt(0)) {
state
} else {
val l = input.getLong(0)
state match {
case (_, 0L) =>
(l, 1)
case (total, count) =>
(total + l, count + 1L)
}
}
}
override def merge(leftState: (Long, Long), rightState: (Long, Long)): (Long, Long) = {
(leftState._1 + rightState._1, leftState._2 + rightState._2)
}
override def produceResult(state: (Long, Long)): Long = state._1 / state._2
}
object UnboundDecimalAverage extends UnboundFunction {
override def name(): String = "decimal_avg"
override def bind(inputType: StructType): BoundFunction = {
if (inputType.fields.length > 1) {
throw new UnsupportedOperationException("Too many arguments")
}
// put interval type here for testing purpose
inputType.fields(0).dataType match {
case _: NumericType | _: DayTimeIntervalType => DecimalAverage
case dataType =>
throw new UnsupportedOperationException(s"Unsupported input type: $dataType")
}
}
override def description(): String =
"decimal_avg: produces an average using decimal division"
}
object DecimalAverage extends AggregateFunction[(Decimal, Int), Decimal] {
override def name(): String = "decimal_avg"
override def inputTypes(): Array[DataType] = Array(DecimalType.SYSTEM_DEFAULT)
override def resultType(): DataType = DecimalType.SYSTEM_DEFAULT
override def newAggregationState(): (Decimal, Int) = (Decimal.ZERO, 0)
override def update(state: (Decimal, Int), input: InternalRow): (Decimal, Int) = {
if (input.isNullAt(0)) {
state
} else {
val l = input.getDecimal(0, DecimalType.SYSTEM_DEFAULT.precision,
DecimalType.SYSTEM_DEFAULT.scale)
state match {
case (_, d) if d == 0 =>
(l, 1)
case (total, count) =>
(total + l, count + 1)
}
}
}
override def merge(leftState: (Decimal, Int), rightState: (Decimal, Int)): (Decimal, Int) = {
(leftState._1 + rightState._1, leftState._2 + rightState._2)
}
override def produceResult(state: (Decimal, Int)): Decimal = state._1 / Decimal(state._2)
}
object NoImplAverage extends UnboundFunction {
override def name(): String = "no_impl_avg"
override def description(): String = name()
override def bind(inputType: StructType): BoundFunction = {
throw new UnsupportedOperationException(s"Not implemented")
}
}
}
| hvanhovell/spark | sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2FunctionSuite.scala | Scala | apache-2.0 | 24,186 |
package feh.tec.rubik
import feh.tec.rubik.RubikCube._
import feh.util._
/** Immutable Rubik's Cube */
case class RubikCubeInstance[T: WithSideName] (cubesPositions: Map[CubeId, CubeWithOrientation[T]],
parent: Option[RubikCubeInstance[T]],
description: RubikCube.Description )
extends RubikCube[T, RubikCubeInstance[T]]
{
type ThisType = RubikCubeInstance[T]
def rawSides: Map[SideName, Map[(Int, Int), CubeWithOrientation[T]]] =
SideName.values.toSeq.zipMap{ sideName => RubikCube.sideCubes(sideName).mapValues(cubesPositions) }.toMap
def rotate(side: SideName): RubikCubeInstance[T] = {
val upd = rotateUpdate(side).map{ case Update(c, o, pos) => pos -> CubeWithOrientation(c, o) }
RubikCubeInstance(cubesPositions ++ upd, Some(this), RubikCube.Rotation(RotationAngle.Rot90, side))
}
def findCubeById(cId: CubeId) = cubesPositions.values.find(_.cube.cubeId == cId)
def snapshot = this
override def equals(obj: scala.Any) = canEqual(obj) && (obj match{
case that: RubikCubeInstance[T] => this.cubesPositions == that.cubesPositions
})
override def hashCode() = cubesPositions.hashCode()
}
object RubikCubeInstance{
class MutableContainer[T](protected var instance: RubikCubeInstance[T]) extends RubikCube[T, MutableContainer[T]]
{
type ThisType = MutableContainer[T]
def set(i: RubikCubeInstance[T]) = instance = i
def get = instance
/** rotate a side 90 degrees clockwise */
def rotate(sideName: SideName) = {
instance = instance.rotate(sideName)
this
}
def cubesPositions = instance.cubesPositions
def snapshot = get
}
} | fehu/int-sis--AStar | rubik/src/main/scala/feh/tec/rubik/RubikCubeInstance.scala | Scala | mit | 1,720 |
package scala.meta.internal.classpath
import java.io.File
import java.io.FilterInputStream
import java.io.InputStream
import java.nio.file.Files
import java.nio.file.Path
import java.util.zip.ZipEntry
import java.util.zip.ZipFile
import scala.collection.mutable
import scala.meta.io.AbsolutePath
/** Represents a entry in a classpath that is either a package, file on disk or zip entry. */
sealed abstract class ClasspathElement {
def relativeUri: String
}
/** A classpath entry that can be read as an InputStream. */
sealed abstract class Classfile extends ClasspathElement {
/**
* Returns an input stream to read the bytes of this classpath entry.
*
* @note
* The caller is responsible for closing the InputStream.
*/
def openInputStream(): InputStream
}
/** A classpath entry that is a directory. */
final case class Classdir(relativeUri: String) extends ClasspathElement {
def resolve(filename: String): Option[ClasspathElement] = {
members.get(filename).orElse {
val uri = relativeUri + filename
modules.iterator
.map(_.resolve(uri))
.find(Files.exists(_))
.map(x => UncompressedClassfile(uri, AbsolutePath(x)))
}
}
val members = mutable.Map.empty[String, ClasspathElement]
/**
* Java 9+ modules for this package based on JEP-220
*
* Details: https://bugs.openjdk.java.net/browse/JDK-8066492
*
* For example, the package "java/lang/" will have a module "/modules/java.base" which is the root
* directory containing classfiles for JDK libraries like `java/lang/Thread#`.
*/
var modules: List[Path] = Nil
}
/** A classpath entry that is a classfile on disk. */
final case class UncompressedClassfile(relativeUri: String, path: AbsolutePath) extends Classfile {
def openInputStream(): InputStream =
Files.newInputStream(path.toNIO)
}
/** A classpath entry that is a classfile inside a jar file. */
final case class CompressedClassfile(entry: ZipEntry, zip: File) extends Classfile {
override def relativeUri: String = entry.getName
def openInputStream(): InputStream = {
val openFile = new ZipFile(zip)
val delegate = openFile.getInputStream(entry)
new FilterInputStream(delegate) {
override def close(): Unit = openFile.close()
}
}
}
| scalameta/scalameta | semanticdb/metacp/src/main/scala/scala/meta/internal/classpath/ClasspathElement.scala | Scala | bsd-3-clause | 2,277 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.it.i18n
import play.api.test.{ PlaySpecification, WithApplication }
import play.api.mvc.Controller
import play.api.i18n._
class MessagesSpec extends PlaySpecification with Controller {
sequential
implicit val lang = Lang("en-US")
"Messages" should {
"provide default messages" in new WithApplication(_.requireExplicitBindings()) {
val messagesApi = app.injector.instanceOf[MessagesApi]
val javaMessagesApi = app.injector.instanceOf[play.i18n.MessagesApi]
val msg = messagesApi("constraint.email")
val javaMsg = javaMessagesApi.get(new play.i18n.Lang(lang), "constraint.email")
msg must ===("Email")
msg must ===(javaMsg)
}
"permit default override" in new WithApplication(_.requireExplicitBindings()) {
val messagesApi = app.injector.instanceOf[MessagesApi]
val msg = messagesApi("constraint.required")
msg must ===("Required!")
}
}
"Messages@Java" should {
import play.i18n._
import java.util
val enUS: Lang = new play.i18n.Lang(play.api.i18n.Lang("en-US"))
"allow translation without parameters" in new WithApplication() {
val messagesApi = app.injector.instanceOf[MessagesApi]
val msg = messagesApi.get(enUS, "constraint.email")
msg must ===("Email")
}
"allow translation with any non-list parameter" in new WithApplication() {
val messagesApi = app.injector.instanceOf[MessagesApi]
val msg = messagesApi.get(enUS, "constraint.min", "Croissant")
msg must ===("Minimum value: Croissant")
}
"allow translation with any list parameter" in new WithApplication() {
val messagesApi = app.injector.instanceOf[MessagesApi]
val msg = {
val list: util.ArrayList[String] = new util.ArrayList[String]()
list.add("Croissant")
messagesApi.get(enUS, "constraint.min", list)
}
msg must ===("Minimum value: Croissant")
}
}
}
| aradchykov/playframework | framework/src/play-integration-test/src/test/scala/play/it/i18n/MessagesSpec.scala | Scala | apache-2.0 | 2,017 |
import scala.quoted.*
object Macros {
inline def matches[A, B]: Unit = ${ matchesExpr[A, B] }
private def matchesExpr[A, B](using a: Type[A], b: Type[B])(using Quotes) : Expr[Unit] = {
import quotes.reflect.*
val res = quotes.asInstanceOf[scala.quoted.runtime.QuoteMatching].TypeMatch.unapply[Tuple, Tuple](a)(using b).map { tup =>
tup.toArray.toList.map {
case r: Type[_] =>
s"Type(${TypeTree.of(using r).show})"
}
}
'{
println("Scrutinee: " + ${Expr(TypeTree.of[A].show)})
println("Pattern: " + ${Expr(TypeTree.of[B].show)})
println("Result: " + ${Expr(res.toString)})
println()
}
}
}
| dotty-staging/dotty | tests/run-macros/quote-type-matcher/quoted_1.scala | Scala | apache-2.0 | 673 |
/*
* Copyright (c) 2013. Regents of the University of California
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.avocado.calls.reads
import org.bdgenomics.adam.avro.ADAMRecord
import org.bdgenomics.adam.models.ADAMVariantContext
import org.bdgenomics.avocado.calls.VariantCallCompanion
import org.bdgenomics.avocado.partitioners.PartitionSet
import org.bdgenomics.avocado.stats.AvocadoConfigAndStats
import org.apache.commons.configuration.SubnodeConfiguration
import org.apache.spark.{ SparkContext, Logging }
import org.apache.spark.rdd.RDD
object ReadCallUnspecified extends VariantCallCompanion {
val callName = "ReadUnspecified"
def apply(stats: AvocadoConfigAndStats,
config: SubnodeConfiguration,
partitions: PartitionSet): ReadCallUnspecified = {
new ReadCallUnspecified()
}
}
/**
* Abstract class for calling variants on reads.
*/
class ReadCallUnspecified extends ReadCall {
val companion = ReadCallUnspecified
/**
* Empty calling method.
*/
def call(pileupGroups: RDD[ADAMRecord]): RDD[ADAMVariantContext] = {
throw new IllegalArgumentException(companion.callName + " is not callable.")
}
// Call is generic, so is not callable
override def isCallable() = false
}
| hammerlab/avocado | avocado-core/src/main/scala/org/bdgenomics/avocado/calls/reads/ReadCallUnspecified.scala | Scala | apache-2.0 | 1,781 |
package mesosphere.marathon.state
import java.io.{ ByteArrayInputStream, ObjectInputStream }
import javax.inject.Inject
import mesosphere.marathon.Protos.StorageVersion
import mesosphere.marathon.metrics.Metrics
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.state.StorageVersions._
import mesosphere.marathon.tasks.TaskTracker
import mesosphere.marathon.tasks.TaskTracker.InternalApp
import mesosphere.marathon.{ BuildInfo, MarathonConf }
import mesosphere.util.Logging
import mesosphere.util.ThreadPoolContext.context
import mesosphere.util.state.{ PersistentStoreManagement, PersistentEntity, PersistentStore }
import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.concurrent.{ Await, Future }
import scala.util.{ Failure, Success }
class Migration @Inject() (
store: PersistentStore,
appRepo: AppRepository,
groupRepo: GroupRepository,
config: MarathonConf,
metrics: Metrics) extends Logging {
//scalastyle:off magic.number
type MigrationAction = (StorageVersion, () => Future[Any])
/**
* All the migrations, that have to be applied.
* They get applied after the master has been elected.
*/
def migrations: List[MigrationAction] = List(
StorageVersions(0, 5, 0) -> {
() => changeApps(app => app.copy(id = app.id.toString.toLowerCase.replaceAll("_", "-").toRootPath))
},
StorageVersions(0, 7, 0) -> {
() =>
for {
_ <- changeTasks(app => new InternalApp(app.appName.canonicalPath(), app.tasks, app.shutdown))
_ <- changeApps(app => app.copy(id = app.id.canonicalPath()))
_ <- putAppsIntoGroup()
} yield ()
}
)
def applyMigrationSteps(from: StorageVersion): Future[List[StorageVersion]] = {
val result = migrations.filter(_._1 > from).sortBy(_._1).map {
case (migrateVersion, change) =>
log.info(
s"Migration for storage: ${from.str} to current: ${current.str}: " +
s"apply change for version: ${migrateVersion.str} "
)
change.apply().map(_ => migrateVersion)
}
Future.sequence(result)
}
def initializeStore(): Future[Unit] = store match {
case manager: PersistentStoreManagement => manager.initialize()
case _: PersistentStore => Future.successful(())
}
def migrate(): StorageVersion = {
val result = for {
_ <- initializeStore()
changes <- currentStorageVersion.flatMap(applyMigrationSteps)
storedVersion <- storeCurrentVersion
} yield storedVersion
result.onComplete {
case Success(version) => log.info(s"Migration successfully applied for version ${version.str}")
case Failure(ex) => log.error("Migration failed!", ex)
}
Await.result(result, Duration.Inf)
}
private val storageVersionName = "internal:storage:version"
def currentStorageVersion: Future[StorageVersion] = {
store.load(storageVersionName).map {
case Some(variable) => StorageVersion.parseFrom(variable.bytes.toArray)
case None => StorageVersions.empty
}
}
def storeCurrentVersion: Future[StorageVersion] = {
val bytes = StorageVersions.current.toByteArray
store.load(storageVersionName).flatMap {
case Some(entity) => store.update(entity.withNewContent(bytes))
case None => store.create(storageVersionName, bytes)
}.map{ _ => StorageVersions.current }
}
// specific migration helper methods
private def changeApps(fn: AppDefinition => AppDefinition): Future[Any] = {
appRepo.apps().flatMap { apps =>
val mappedApps = apps.map { app => appRepo.store(fn(app)) }
Future.sequence(mappedApps)
}
}
private def changeTasks(fn: InternalApp => InternalApp): Future[Any] = {
val taskTracker = new TaskTracker(store, config, metrics)
def fetchApp(appId: PathId): Option[InternalApp] = {
Await.result(store.load("tasks:" + appId.safePath), config.zkTimeoutDuration).map { entity =>
val source = new ObjectInputStream(new ByteArrayInputStream(entity.bytes.toArray))
val fetchedTasks = taskTracker.legacyDeserialize(appId, source).map {
case (key, task) =>
val builder = task.toBuilder.clearOBSOLETEStatuses()
task.getOBSOLETEStatusesList.asScala.lastOption.foreach(builder.setStatus)
key -> builder.build()
}
new InternalApp(appId, fetchedTasks, false)
}
}
def storeApp(app: InternalApp): Future[Seq[PersistentEntity]] = {
Future.sequence(app.tasks.values.toSeq.map(taskTracker.store(app.appName, _)))
}
appRepo.allPathIds().flatMap { apps =>
val res = apps.flatMap(fetchApp).map{ app => storeApp(fn(app)) }
Future.sequence(res)
}
}
private def putAppsIntoGroup(): Future[Any] = {
groupRepo.group("root").map(_.getOrElse(Group.empty)).map { group =>
appRepo.apps().flatMap { apps =>
val updatedGroup = apps.foldLeft(group) { (group, app) =>
val updatedApp = app.copy(id = app.id.canonicalPath())
group.updateApp(updatedApp.id, _ => updatedApp, Timestamp.now())
}
groupRepo.store("root", updatedGroup)
}
}
}
}
object StorageVersions {
val VersionRegex = """^(\\d+)\\.(\\d+)\\.(\\d+).*""".r
def apply(major: Int, minor: Int, patch: Int): StorageVersion = {
StorageVersion
.newBuilder()
.setMajor(major)
.setMinor(minor)
.setPatch(patch)
.build()
}
def current: StorageVersion = {
BuildInfo.version match {
case VersionRegex(major, minor, patch) =>
StorageVersions(
major.toInt,
minor.toInt,
patch.toInt
)
}
}
implicit class OrderedStorageVersion(val version: StorageVersion) extends AnyVal with Ordered[StorageVersion] {
override def compare(that: StorageVersion): Int = {
def by(left: Int, right: Int, fn: => Int): Int = if (left.compareTo(right) != 0) left.compareTo(right) else fn
by(version.getMajor, that.getMajor, by(version.getMinor, that.getMinor, by(version.getPatch, that.getPatch, 0)))
}
def str: String = s"Version(${version.getMajor}, ${version.getMinor}, ${version.getPatch})"
}
def empty: StorageVersion = StorageVersions(0, 0, 0)
}
| MrMarvin/marathon | src/main/scala/mesosphere/marathon/state/Migration.scala | Scala | apache-2.0 | 6,290 |
package net.countercraft.movecraft.utils
import com.google.common.collect.ImmutableSet
import com.alexknvl.shipcraft.MaterialDataPredicate
import net.minecraft.server.v1_12_R1.Item
import net.minecraft.server.v1_12_R1.ItemStack
import org.bukkit.Material
import org.bukkit.material.MaterialData
import scala.annotation.tailrec
object BlockNames {
def properCase(text: String): String = {
@tailrec def go(ix: Int, makeUpper: Boolean, chars: Array[Char]): Array[Char] =
if (ix < chars.length) {
val ch =
if (makeUpper) Character.toUpperCase(chars(ix))
else Character.toLowerCase(chars(ix))
val isLetter = Character.isLetter(ch)
chars(ix) = if (isLetter) ch else ' '
go(ix + 1, !isLetter, chars)
} else chars
new String(go(0, makeUpper = true, text.toCharArray)).replaceAll("\\\\s\\\\s+", " ").trim
}
def materialDataPredicateNames(predicate: MaterialDataPredicate): Set[String] =
predicate.allMaterials.map(itemName) ++ predicate.allMaterialDataPairs.map(itemName)
private def itemName(material: Material, data: Byte, hasData: Boolean) = {
var tmp =
try new ItemStack(Item.getById(material.getId), 1, data).getName
catch { case ignored: Exception => null }
if (tmp == null || tmp.isEmpty) tmp = material.name
tmp = properCase(tmp).replaceAll("\\\\s(On|Off)$", "")
if (!hasData && tmp.startsWith("White "))
tmp = tmp.substring(6)
tmp
}
def itemName(materialData: MaterialData): String =
itemName(materialData.getItemType, materialData.getData, hasData = true)
def itemName(material: Material, data: Byte): String =
itemName(material, data, hasData = true)
def itemName(material: Material): String =
itemName(material, 0.toByte, hasData = false)
private val COLORED_MATERIALS = Set(
Material.WOOL, Material.CARPET, Material.STAINED_GLASS,
Material.STAINED_GLASS_PANE, Material.STAINED_CLAY)
def itemNames(blk: Material): Set[String] = {
// Wool, Carpet, Stained Glass, Glass Pane, Clay
if (COLORED_MATERIALS.contains(blk)) Set(itemName(blk))
else {
val builder = Set.newBuilder[String]
for (data <- 0 until 16) {
builder.+=(itemName(blk, data.toByte, true))
}
builder.result()
}
}
def itemNames(blk: MaterialData): Set[String] = Set(itemName(blk))
} | steamcraft/Movecraft-3 | movecraft-core/src/main/java/net/countercraft/movecraft/utils/BlockNames.scala | Scala | gpl-3.0 | 2,355 |
/**
* Copyright (C) 2015 DANS - Data Archiving and Networked Services (info@dans.knaw.nl)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy
import java.io.File
import com.yourmediashelf.fedora.client.FedoraCredentials
package object fsrdb {
case class Settings(fedoraCredentials: FedoraCredentials,
databaseUrl: String,
databaseUser: String,
databasePassword: String,
datasetPidsFile: Option[File] = None,
datasetPids: Option[List[String]] = None) {
override def toString: String = {
s"FS-RDB.Settings(Database($databaseUrl, $databaseUser, ****), " +
s"Fedora(${ fedoraCredentials.getBaseUrl }, ${ fedoraCredentials.getUsername }, ****), " +
s"${ datasetPidsFile.map(file => s"Pids file: $file")
.orElse(datasetPids.map(pids => s"Pids: ${pids.mkString("[", ", ", "]")}"))
.getOrElse("<no input specified>") })"
}
}
abstract class Item(val pid: String,
val parentSid: String,
val datasetSid: String,
val path: String)
case class FolderItem(override val pid: String,
override val parentSid: String,
override val datasetSid: String,
override val path: String,
name: String) extends Item(pid, parentSid, datasetSid, path)
case class FileItem(override val pid: String,
override val parentSid: String,
override val datasetSid: String,
override val path: String,
filename: String,
size: Long,
mimetype: String,
creatorRole: String,
visibleTo: String,
accessibleTo: String,
sha1Checksum: Option[String]) extends Item(pid, parentSid, datasetSid, path)
}
| DANS-KNAW/easy-update-fs-rdb | lib/src/main/scala/nl.knaw.dans.easy.fsrdb/package.scala | Scala | apache-2.0 | 2,538 |
/* This file is part of gnieh-coroutines.
*
* See the NOTICE file distributed with this work for copyright information.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gnieh
import scala.util._
import continuations._
package object coroutines {
def fun[Param, Ret](p: Param): Ret = throw new NoSuchMethodException("this code has to be compiled with the Scala coroutines plugin enabled")
def yld[T, V](v: T): V = throw new NoSuchMethodException("this code has to be compiled with the Scala coroutines plugin enabled")
def create[Param, Ret](fun: Param => Ret): Coroutine[Param, Ret] = throw new NoSuchMethodException("this code has to be compiled with the Scala coroutines plugin enabled")
def wrap[Param, Ret](fun: Param => Ret): Param => Ret = throw new NoSuchMethodException("this code has to be compiled with the Scala coroutines plugin enabled")
def wrap[Param,Ret](cor: Coroutine[Param,Ret]) = (p: Param) => cor.resume(p)
/** this allows use of yield from within a while loop
* see http://stackoverflow.com/questions/2201882/implementing-yield-yield-return-using-scala-continuations/2218589#2218589
*/
def cowhile[Ret](cond: Boolean)(body: =>Ret): Ret = throw new NoSuchMethodException("this code has to be compiled with the Scala coroutines plugin enabled")
}
// vim: set ts=4 sw=4 et:
| satabin/gnieh-coroutines | library/gnieh/coroutines/package.scala | Scala | apache-2.0 | 1,828 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.blockchain
import org.scalatest.{Matchers, WordSpec}
class GetResourcesFromDirectoryFnTest extends WordSpec with Matchers {
"GetResourcesFromDirectoryFn" should {
"list all the files in a specified resource folder" in {
val actualFiles = GetResourcesFromDirectoryFn("/testResources")
actualFiles.nonEmpty shouldBe true
actualFiles.head.getName shouldBe "sample.txt"
}
}
}
| CodeSmell/stream-reactor | kafka-connect-blockchain/src/test/scala/com/datamountaineer/streamreactor/connect/blockchain/GetResourcesFromDirectoryFnTest.scala | Scala | apache-2.0 | 1,051 |
package net.softler.processor
import akka.http.scaladsl.model.{HttpResponse, ResponseEntity, StatusCodes}
import akka.stream.Materializer
import net.softler.exception._
/**
* The response processor is handling all common [[StatusCodes]]
* The processor can be overridden with any logic you want
* In the future there a multiple response processors planed
* (e.g. some with retry logic and so on...)
*/
trait ResponseProcessor {
type ResponseHandler = PartialFunction[HttpResponse, ResponseEntity]
/**
* Success handler override this when you want to match only a single status
*/
def success(implicit materializer: Materializer): ResponseHandler
/**
* Client error handler (e.g. Bad Request) override this when you want to match only a single status
*/
def clientError(implicit materializer: Materializer): ResponseHandler
/**
* Error handler (e.g. Internal Server error) override this when you want to match only a single status
*/
def error(implicit materializer: Materializer): ResponseHandler
/**
* Informational handler (e.g. Continue...) override this when you want to match only a single status
*/
def informational(implicit materializer: Materializer): ResponseHandler
/**
* Redirect handler (e.g. Redirect...) override this when you want to match your own redirection logic
*/
def redirect(implicit materializer: Materializer): ResponseHandler
/**
* Default response handler override this when you want to match your own custom codes
*/
def default(implicit materializer: Materializer): ResponseHandler
/**
* Processor which runs the handlers from top to bottom
* The materializer is necessary to discard the underlying response entity (onError)
* See this: https://doc.akka.io/docs/akka-http/current/scala/http/implications-of-streaming-http-entity.html
*/
def process(response: HttpResponse)(implicit materializer: Materializer): ResponseEntity
}
object ResponseProcessor {
/**
* The default http response processor
*/
implicit object DefaultProcessor extends ResponseProcessor {
override def success(implicit materializer: Materializer): ResponseHandler = {
case HttpResponse(_: StatusCodes.Success, _, entity, _) => entity
}
override def clientError(implicit materializer: Materializer): ResponseHandler = {
case r @ HttpResponse(status: StatusCodes.ClientError, _, entity, _) =>
r.discardEntityBytes()
throw ClientErrorRestException(
s"Client error occurred for status code [${status.intValue}] with response entity [${entity.toString}]"
)
}
override def error(implicit materializer: Materializer): ResponseHandler = {
case r @ HttpResponse(status: StatusCodes.ServerError, _, entity, _) =>
r.discardEntityBytes()
throw ServerErrorRestException(
s"Server error occurred for status code [${status.intValue}] with response entity [${entity.toString}]"
)
}
override def informational(implicit materializer: Materializer): ResponseHandler = {
case r @ HttpResponse(status: StatusCodes.Informational, _, entity, _) =>
r.discardEntityBytes()
throw InformationalErrorRestException(
s"Information error occurred for status code [${status.intValue}] with response entity [${entity.toString}]"
)
}
/**
* Redirect handler (e.g. Redirect...) override this when you want to match your own redirection logic
*/
override def redirect(implicit materializer: Materializer): ResponseHandler = {
case r @ HttpResponse(status: StatusCodes.Redirection, _, entity, _) =>
r.discardEntityBytes()
throw RedirectionErrorRestException(
s"Redirection error occurred for status code [${status.intValue}] with response entity [${entity.toString}]"
)
}
/**
* Custom status code handler override this when you want to match your own custom codes
*/
override def default(implicit materializer: Materializer): ResponseHandler = {
case r @ HttpResponse(status, _, entity, _) =>
r.discardEntityBytes()
throw CustomRestException(
s"Unknown response for status code [${status.intValue}] with response entity [${entity.toString}]"
)
}
override def process(response: HttpResponse)(
implicit materializer: Materializer): ResponseEntity =
(success orElse clientError orElse error orElse informational orElse redirect orElse default)(
response)
}
}
| Freshwood/akka-http-rest-client | client/akka-http/src/main/scala/net/softler/processor/ResponseProcessor.scala | Scala | mit | 4,570 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.keras.layers
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.Shape
import com.intel.analytics.bigdl.dllib.keras.autograd.Parameter
import com.intel.analytics.bigdl.dllib.keras.Model
import com.intel.analytics.bigdl.dllib.keras.Sequential
import com.intel.analytics.bigdl.dllib.keras.serializer.ModuleSerializationTest
class Expand_dimSpec extends KerasBaseSpec {
// seq.add(new Expand_dim[Float](dim = 0))
"ExpandDim0" should "be test" in {
val input = Parameter[Float](inputShape = Shape(3), name = "input1")
val expand = new ExpandDim[Float](dim = 0).from(input)
val seq = Model(input, expand)
seq.getOutputShape().toSingle().toArray should be(Array(1, 3))
val inputData = Tensor[Float](Array(3)).randn()
val out = seq.forward(inputData).toTensor[Float]
assert(out.size().deep == Array(1, 3).deep)
out.toTensor[Float].almostEqual(inputData.addSingletonDimension(dim = 1), 1e-4)
}
"ExpandDim1" should "be test" in {
val input = Parameter[Float](inputShape = Shape(3, 4), name = "input2")
val expand = new ExpandDim[Float](dim = 1).from(input)
val seq = Model(input, expand)
seq.getOutputShape().toSingle().toArray should be(Array(3, 1, 4))
val inputData = Tensor[Float](Array(2, 1, 6)).rand()
val out = seq.forward(inputData).toTensor[Float]
assert(out.size().deep == Array(3, 1, 4).deep)
out.toTensor[Float].almostEqual(inputData.addSingletonDimension(dim = 1), 1e-4)
}
}
class ExpandDimSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val ss = new ExpandDim[Float](inputShape = Shape(3, 2))
ss.build(Shape(2, 3, 2))
val input = Tensor[Float](2, 3, 2).rand()
runSerializationTest(ss, input)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/keras/layers/ExpandDimSpec.scala | Scala | apache-2.0 | 2,417 |
/*
Copyright (c) 2014 by Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ml.dmlc.xgboost4j.scala.example.util
import ml.dmlc.xgboost4j.java.XGBoostError
import ml.dmlc.xgboost4j.scala.{ DMatrix, EvalTrait }
import org.apache.commons.logging.{ Log, LogFactory }
class CustomEval extends EvalTrait {
private val logger: Log = LogFactory.getLog(classOf[CustomEval])
/**
* get evaluate metric
*
* @return evalMetric
*/
override def getMetric: String = {
"custom_error"
}
/**
* evaluate with predicts and data
*
* @param predicts predictions as array
* @param dmat data matrix to evaluate
* @return result of the metric
*/
override def eval(predicts: Array[Array[Float]], dmat: DMatrix): Float = {
var error: Float = 0f
var labels: Array[Float] = null
try {
labels = dmat.getLabel
} catch {
case ex: XGBoostError =>
logger.error(ex)
return -1f
}
val nrow: Int = predicts.length
for (i <- 0 until nrow) {
if (labels(i) == 0.0 && predicts(i)(0) > 0.5) {
error += 1
} else if (labels(i) == 1.0 && predicts(i)(0) <= 0.5) {
error += 1
}
}
error / labels.length
}
}
| yuikns/iphigenia | src/main/scala/ml/dmlc/xgboost4j/scala/example/util/CustomEval.scala | Scala | mit | 1,716 |
package org.locationtech.geomesa.process
import com.google.common.hash.Hashing
import org.geotools.data.DataUtilities
import org.geotools.data.simple.SimpleFeatureCollection
import org.geotools.feature.collection.DelegateSimpleFeatureIterator
import org.geotools.feature.simple.{SimpleFeatureBuilder, SimpleFeatureTypeBuilder}
import org.geotools.process.ProcessException
import org.geotools.process.factory.{DescribeParameter, DescribeProcess, DescribeResult}
import org.geotools.process.vector.VectorProcess
trait HashAttribute {
import org.locationtech.geomesa.utils.geotools.Conversions._
import scala.collection.JavaConversions._
val hashFn = Hashing.goodFastHash(64)
def transformHash(hash: Int): AnyRef
// note - augmentSft needs to add an attribute called 'hash'
def augmentSft(sft: SimpleFeatureTypeBuilder): Unit
@throws(classOf[ProcessException])
@DescribeResult(name = "result", description = "Output collection")
def execute(@DescribeParameter(name = "data", description = "Input features")
obsFeatures: SimpleFeatureCollection,
@DescribeParameter(name = "attribute", description = "The attribute to hash on")
attribute: String,
@DescribeParameter(name = "modulo", description = "The divisor")
modulo: Integer): SimpleFeatureCollection = {
val sft = obsFeatures.getSchema
val sftBuilder = new SimpleFeatureTypeBuilder()
sftBuilder.init(sft)
augmentSft(sftBuilder)
val targetSft = sftBuilder.buildFeatureType()
val hashIndex = targetSft.indexOf("hash")
val featureBuilder = new SimpleFeatureBuilder(targetSft)
val results =
obsFeatures.features().map { sf =>
featureBuilder.reset()
featureBuilder.init(sf)
val attr = Option(sf.getAttribute(attribute)).map(_.toString).getOrElse("")
val hash = math.abs(hashFn.hashString(attr).asInt()) % modulo
featureBuilder.set(hashIndex, transformHash(hash))
featureBuilder.buildFeature(sf.getID)
}
DataUtilities.collection(new DelegateSimpleFeatureIterator(results))
}
}
@DescribeProcess(
title = "Hash Attribute Process",
description = "Adds an attribute to each SimpleFeature that hashes the configured attribute modulo the configured param"
)
class HashAttributeProcess extends VectorProcess with HashAttribute {
override def transformHash(hash: Int): AnyRef = Int.box(hash)
override def augmentSft(sftBuilder: SimpleFeatureTypeBuilder): Unit = {
sftBuilder.add("hash", classOf[Integer])
}
}
@DescribeProcess(
title = "Hash Attribute Color Process",
description = "Adds an attribute to each SimpleFeature that hashes the configured attribute modulo the configured param and emits a color"
)
class HashAttributeColorProcess extends VectorProcess with HashAttribute {
val colors =
Array[String](
"#6495ED",
"#B0C4DE",
"#00FFFF",
"#9ACD32",
"#00FA9A",
"#FFF8DC",
"#F5DEB3")
override def transformHash(hash: Int): AnyRef = colors(hash % colors.length)
override def augmentSft(sftBuilder: SimpleFeatureTypeBuilder): Unit = {
sftBuilder.add("hash", classOf[String])
}
} | drackaer/geomesa | geomesa-process/src/main/scala/org/locationtech/geomesa/process/HashAttributeProcess.scala | Scala | apache-2.0 | 3,191 |
object Foo {
def isGood(x: Int): Boolean = { x % 2 == 0 }
}
object ArrayUtils {
def filter(xs: Array[Int], /* something */): Array[Int] = ???
}
trait Function1[T, R] {
def apply(x: T): R
}
| agconti/scala-school | 04-functions-as-values/slides/slide022.scala | Scala | mit | 201 |
package com.coiney.akka.rabbit.actors
import akka.actor.ActorSystem
import akka.testkit.{TestProbe, ImplicitSender, TestKit}
import com.coiney.akka.rabbit.protocol._
import org.scalacheck.Gen.Choose
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import org.scalatest.{BeforeAndAfterEach, BeforeAndAfterAll, Matchers, WordSpecLike}
import scala.concurrent.duration._
class RPCSystemSpec(_actorSystem: ActorSystem) extends TestKit(_actorSystem)
with ImplicitSender
with WordSpecLike
with Matchers
with BeforeAndAfterAll
with BeforeAndAfterEach
with GeneratorDrivenPropertyChecks
with RabbitSpec
with ChannelKeeperSpec {
def this() = this(ActorSystem("RPCServerSpec"))
val rabbitRPCProcessor = new RabbitRPCProcessor {
override def process(hd: HandleDelivery): RabbitRPCResult = {
val message = new String(hd.body)
val result = message.reverse
RabbitRPCResult(Some(result.getBytes("UTF-8")))
}
override def recover(hd: HandleDelivery, cause: Throwable): RabbitRPCResult = {
RabbitRPCResult(Some(cause.getMessage.getBytes("UTF-8")))
}
}
"An RPC System" when {
"single server and single requester" should {
"correctly respond in a request-single response configuration" in {
val rpcQueue = randomQueue
// create the server
rabbitSystem waitFor rabbitSystem.createRPCServer(connectionKeeper, rabbitRPCProcessor, rpcQueue)
// create the client
val rpcClient = rabbitSystem waitFor rabbitSystem.createRPCClient(connectionKeeper)
forAll { (s: String) =>
rpcClient ! RabbitRPCRequest(List(Publish("", rpcQueue.name, s.getBytes("UTF-8"))), 1)
val RabbitRPCResponse(List(HandleDelivery(_, _, _, body))) = receiveOne(2.seconds)
new String(body) should be(s.reverse)
}
}
}
"single server and multiple requesters" should {
"correctly respond to clients in a request-single response configuration" in {
val rpcQueue = randomQueue
val probe1 = TestProbe()
val probe2 = TestProbe()
// create the server
rabbitSystem waitFor rabbitSystem.createRPCServer(connectionKeeper, rabbitRPCProcessor, rpcQueue)
// create the clients
val rpcClient = rabbitSystem waitFor rabbitSystem.createRPCClient(connectionKeeper)
forAll { (s: String) =>
probe1.send(rpcClient, RabbitRPCRequest(List(Publish("", rpcQueue.name, s.getBytes("UTF-8"))), 1))
probe2.send(rpcClient, RabbitRPCRequest(List(Publish("", rpcQueue.name, s.getBytes("UTF-8"))), 1))
val RabbitRPCResponse(List(HandleDelivery(_, _, _, body1))) = probe1.receiveOne(2.seconds)
val RabbitRPCResponse(List(HandleDelivery(_, _, _, body2))) = probe2.receiveOne(2.seconds)
new String(body1) should be(s.reverse)
new String(body2) should be(s.reverse)
}
}
}
"single server and multiple clients" should {
"correctly respond to clients in a request-single response configuration" in {
val rpcQueue = randomQueue
// create the server
rabbitSystem waitFor rabbitSystem.createRPCServer(connectionKeeper, rabbitRPCProcessor, rpcQueue)
// create the clients
val rpcClient1 = rabbitSystem waitFor rabbitSystem.createRPCClient(connectionKeeper)
val rpcClient2 = rabbitSystem waitFor rabbitSystem.createRPCClient(connectionKeeper)
forAll { (s1: String, s2: String) =>
rpcClient1 ! RabbitRPCRequest(List(Publish("", rpcQueue.name, s1.getBytes("UTF-8"))), 1)
rpcClient2 ! RabbitRPCRequest(List(Publish("", rpcQueue.name, s2.getBytes("UTF-8"))), 1)
val Seq(RabbitRPCResponse(List(HandleDelivery(_, _, _, body1))), RabbitRPCResponse(List(HandleDelivery(_, _, _, body2)))) = receiveN(2)
val responses = List(new String(body1), new String(body2))
responses should contain(s1.reverse)
responses should contain(s2.reverse)
}
}
}
}
}
| Coiney/akka-rabbit | akka-rabbit-core/src/test/scala/com/coiney/akka/rabbit/actors/RPCSystemSpec.scala | Scala | bsd-3-clause | 4,405 |
package org.jetbrains.plugins.scala.lang
package resolve
package processor
package precedence
import java.util
import com.intellij.psi.util.PsiTreeUtil.getContextOfType
import com.intellij.psi.{PsiElement, PsiPackage}
import com.intellij.util.containers.SmartHashSet
import org.jetbrains.plugins.scala.lang.psi.api.base.ScStableCodeReferenceElement
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScPackaging
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScImportExpr
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.usages._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScObject
import scala.annotation.tailrec
/**
* User: Alexander Podkhalyuzin
* Date: 01.12.11
*/
//todo: logic is too complicated, too many connections between classes. Rewrite?
trait PrecedenceHelper {
import PrecedenceHelper._
def getPlace: PsiElement
protected lazy val placePackageName: String = ResolveUtils.getPlacePackage(getPlace)
protected val holder: TopPrecedenceHolder
protected def nameUniquenessStrategy: NameUniquenessStrategy
protected val levelSet : util.Set[ScalaResolveResult] = new SmartHashSet()
protected val uniqueNamesSet : util.Set[ScalaResolveResult] = new UniqueNamesSet(nameUniquenessStrategy)
protected val levelUniqueNamesSet: util.Set[ScalaResolveResult] = new UniqueNamesSet(nameUniquenessStrategy)
protected def clear(): Unit = {
levelUniqueNamesSet.clear()
uniqueNamesSet.clear()
levelSet.clear()
}
private lazy val suspiciousPackages: Set[String] = collectPackages(getPlace)
protected def ignored(results: Seq[ScalaResolveResult]): Boolean =
results.headOption.flatMap(findQualifiedName)
.exists((IgnoredPackages ++ suspiciousPackages).contains)
protected def isCheckForEqualPrecedence = true
protected def clearLevelQualifiedSet(result: ScalaResolveResult) {
levelUniqueNamesSet.clear()
}
protected def getLevelSet(result: ScalaResolveResult): util.Set[ScalaResolveResult] = levelSet
/**
* Do not add ResolveResults through candidatesSet. It may break precedence. Use this method instead.
*/
protected def addResult(result: ScalaResolveResult): Boolean = addResults(Seq(result))
protected def addResults(results: Seq[ScalaResolveResult]): Boolean = {
if (results.isEmpty) return true
val result: ScalaResolveResult = results.head
lazy val levelSet = getLevelSet(result)
def addResults() {
levelUniqueNamesSet.add(result)
val iterator = results.iterator
while (iterator.hasNext) {
levelSet.add(iterator.next())
}
}
val currentPrecedence = precedence(result)
val topPrecedence = holder(result)
if (currentPrecedence < topPrecedence) return false
else if (currentPrecedence == topPrecedence && levelSet.isEmpty) return false
else if (currentPrecedence == topPrecedence) {
if (isCheckForEqualPrecedence &&
(levelUniqueNamesSet.contains(result) || uniqueNamesSet.contains(result))) {
return false
} else if (uniqueNamesSet.contains(result)) return false
if (!ignored(results)) addResults()
} else {
if (uniqueNamesSet.contains(result)) {
return false
} else {
if (!ignored(results)) {
holder(result) = currentPrecedence
val levelSetIterator = levelSet.iterator()
while (levelSetIterator.hasNext) {
val next = levelSetIterator.next()
if (holder.filterNot(next, result)(precedence)) {
levelSetIterator.remove()
}
}
clearLevelQualifiedSet(result)
addResults()
}
}
}
true
}
protected def precedence(result: ScalaResolveResult): Int =
if (result.prefixCompletion) PrecedenceTypes.PREFIX_COMPLETION
else result.getPrecedence(getPlace, placePackageName)
}
object PrecedenceHelper {
private val IgnoredPackages: Set[String] =
Set("java.lang", "scala", "scala.Predef")
private def collectPackages(element: PsiElement): Set[String] = {
@tailrec
def collectPackages(element: PsiElement, result: Set[String] = Set.empty): Set[String] =
getContextOfType(element, true, classOf[ScPackaging]) match {
case packaging: ScPackaging => collectPackages(packaging, result + packaging.fullPackageName)
case null => result
}
collectPackages(element)
}
private def findQualifiedName(result: ScalaResolveResult): Option[String] =
findImportReference(result)
.flatMap(_.bind())
.map(_.element)
.collect {
case p: PsiPackage => p.getQualifiedName
case o: ScObject => o.qualifiedName
}
private def findImportReference(result: ScalaResolveResult): Option[ScStableCodeReferenceElement] =
result.importsUsed.toSeq match {
case Seq(head) =>
val importExpression = head match {
case ImportExprUsed(expr) => expr
case ImportSelectorUsed(selector) => getContextOfType(selector, true, classOf[ScImportExpr])
case ImportWildcardSelectorUsed(expr) => expr
}
Some(importExpression.qualifier)
case _ => None
}
} | jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/resolve/processor/precedence/PrecedenceHelper.scala | Scala | apache-2.0 | 5,210 |
package sampleclean.eval
import sampleclean.clean.deduplication.join._
import sampleclean.clean.deduplication.blocker._
import sampleclean.clean.deduplication.matcher._
import sampleclean.clean.deduplication._
import sampleclean.api.SampleCleanContext
import org.apache.spark.sql.{SchemaRDD, Row}
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import sampleclean.clean.featurize.AnnotatedSimilarityFeaturizer
import sampleclean.clean.featurize.AnnotatedSimilarityFeaturizer._
import org.apache.spark.rdd.RDD
import edu.stanford.math.plex._
private [sampleclean] class PersistentHomologyThresholdTuner(scc: SampleCleanContext,
simfeature: AnnotatedSimilarityFeaturizer) extends Serializable {
def rowsToSimilarity[K,V](rows:Set[Any], params: collection.immutable.Map[K,V]=null):Double = {
return simfeature.getSimilarityDouble(rows.asInstanceOf[Set[Row]],params)._2
}
def tuneThreshold(sampleTableName: String):Double = {
val data = scc.getCleanSample(sampleTableName)
//todo add error handling clean up
var tokenWeights = collection.immutable.Map[String, Double]()
var tokenCounts = collection.immutable.Map[String, Int]()
tokenCounts = computeTokenCount(data.map(simfeature.tokenizer.tokenize(_, simfeature.getCols())))
tokenWeights = tokenCounts.map(x => (x._1, math.log10(data.count.toDouble / x._2)))
//map reduce tasks to get the data into the matrix
val dataMatrix = data.rdd
.cartesian(data.rdd)
.map(x => (x._1(0).toString(),
(x._2(0).toString(),
rowsToSimilarity(x.productIterator.toSet, tokenWeights))))
.groupByKey()
.sortByKey()
.map(x => x._2.toArray
.sortBy(_._1)
.map(x => (1.0 - x._2)))
.collect()
.toArray
var pdatarelative = Plex.DistanceData(dataMatrix);
var ripsrelative = Plex.RipsStream(0.01,3,1.0,pdatarelative);
var intervals = Plex.Persistence().computeIntervals(ripsrelative);
for(i <- intervals)
{
println(i.dimension+" " + i.toDouble().mkString(" "))
}
return 1.0 - intervals(0).toDouble()(1)
}
def getCandidatePairsCount(sampleTableName: String, thresh:Double):Long = {
val data = scc.getCleanSample(sampleTableName)
return data.rdd.cartesian(data.rdd).map(x => rowsToSimilarity(x.productIterator.toSet)).filter(x => x > thresh).count()
}
def computeTokenCount(data: RDD[(Seq[String])]): collection.immutable.Map[String, Int] = {
val m = data.flatMap{
case tokens =>
for (x <- tokens.distinct)
yield (x, 1)
}.reduceByKeyLocally(_ + _)
collection.immutable.Map(m.toList: _*)
}
} | sjyk/sampleclean-async | src/main/scala/sampleclean/eval/PersistentHomologyThresholdTuner.scala | Scala | apache-2.0 | 2,672 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views
import models.registrationprogress.{Completed, NotStarted, Started}
import org.scalatestplus.mockito.MockitoSugar
import play.api.mvc.Call
import play.twirl.api.HtmlFormat
import utils.AmlsViewSpec
class SectionSpec extends AmlsViewSpec with MockitoSugar {
val sectionPath = "-status a"
val statusPath = "div span.section-status"
trait ViewFixture extends Fixture {
implicit val requestWithToken = addTokenForView()
}
"The section view" when {
"status is NotStarted" must {
"show Add [SectionName] link text" in new ViewFixture {
override def view: HtmlFormat.Appendable = views.html.registrationprogress.section("hvd", NotStarted, mock[Call])
doc.select(s"#high-value-dealer${sectionPath}").text() must be("Add high value dealer")
}
"show Not started info text" in new ViewFixture {
override def view: HtmlFormat.Appendable = views.html.registrationprogress.section("hvd", NotStarted, mock[Call])
doc.select(statusPath).text() must be("Not started")
}
}
"status is Started" must {
"show Add [SectionName] link text" in new ViewFixture {
override def view: HtmlFormat.Appendable = views.html.registrationprogress.section("hvd", Started, mock[Call])
doc.select(s"#high-value-dealer${sectionPath}").text() must be("Add high value dealer")
}
"show Incomplete info text" in new ViewFixture {
override def view: HtmlFormat.Appendable = views.html.registrationprogress.section("hvd", Started, mock[Call])
doc.select(statusPath).text() must be ("Incomplete")
}
}
"status is Complete" must {
"show Edit [SectionName] link text" in new ViewFixture {
override def view: HtmlFormat.Appendable = views.html.registrationprogress.section("hvd", Completed, mock[Call])
doc.select(s"#high-value-dealer${sectionPath}").first().ownText() must be("Edit high value dealer")
}
"show Complete info text" in new ViewFixture {
override def view: HtmlFormat.Appendable = views.html.registrationprogress.section("hvd", Completed, mock[Call])
doc.select(statusPath).first().ownText() must be ("Completed")
}
}
}
}
| hmrc/amls-frontend | test/views/registrationprogress/sectionSpec.scala | Scala | apache-2.0 | 2,831 |
package com.twitter.util.registry
import java.util.logging.Logger
/**
* Utility for library owners to register information about their libraries in
* the registry.
*/
object Library {
private[this] val log = Logger.getLogger(getClass.getName)
private[registry] val Registered = "__registered"
/**
* Registers your library with util-registry under the "library" namespace.
*
* May only be called once with a given `name`. `params` and `name` must abide
* by the guidelines for keys and values set in [[Registry]].
*
* @returns None if a library has already been registered with the given `name`,
* or a [[Roster]] for resetting existing fields in the map otherwise.
*/
def register(name: String, params: Map[String, String]): Option[Roster] = {
val registry = GlobalRegistry.get
val prefix = Seq("library", name)
val old = registry.put(prefix, Registered)
old match {
case Some(oldValue) =>
registry.put(prefix, oldValue)
log.warning(s"""Tried to register a second library named "$name"""")
None
case None =>
params.foreach { case (key, value) =>
registry.put(prefix :+ key, value)
}
Some(new Roster(prefix, params.keySet, log))
}
}
}
/**
* Can change the value of params that were already set in the registry, but cannot
* add new ones.
*/
class Roster private[registry](scope: Seq[String], keys: Set[String], log: Logger) {
private[this] val registry = GlobalRegistry.get
/**
* Changes a key's value if it already exists in the registry.
*
* Only works in the existing `scope`, and only works for
*
* `key` and `value` must abide by the guidelines for keys and values set
* in [[Registry]].
*/
def update(key: String, value: String): Boolean =
keys(key) && {
val newKey = scope :+ key
val result = registry.put(newKey, value)
// TODO: it's impossible to remove bad entries with the current registry API
// but we should be OK because this is impossible in theory
if (result.isEmpty) {
val serialized = s""""(${newKey.mkString(",")})""""
log.warning(
s"expected there to be a value at key $serialized in registry but it was empty.")
}
result.isDefined
}
}
| BuoyantIO/twitter-util | util-registry/src/main/scala/com/twitter/util/registry/Library.scala | Scala | apache-2.0 | 2,296 |
package net.liftmodules.staticsitemap
import net.liftweb.sitemap.{Menu, Loc}
import net.liftweb.sitemap.Loc.LocParam
import net.liftweb.util.NamedPartialFunction
import net.liftweb.common.Box
import net.liftweb.sitemap.Menu.Menuable
import net.liftmodules.staticsitemap.path.PathParts
/**
* A Route without a parameter.
*
* @param name A unique name for this route.
* @param url The url that should match this route.
* @param templatePath The path to the template that this Route is intended to render
* @param linkText The text for the link in the generated SiteMap (probably won't use this, but
* I've included it for flexibility)
*/
case class ParameterlessRoute(
override val name: String,
url: String,
templatePath: PathParts,
override val linkText: Loc.LinkText[Unit],
override val params: List[LocParam[Unit]],
postExtractionHooks: Seq[NamedPartialFunction[Box[Unit], Unit]] = Nil
) extends Menuable(
name,
linkText,
Route.locPathFor(templatePath),
false,
params,
Nil
) with Route[Unit] {
route =>
lazy val linkable =
new StaticUrlPFLoc(name, templatePath, url, linkText, params) {
override def postExtraction(param: Box[Unit]) {
postExtractionHooks foreach {
f =>
if (f.isDefinedAt(param)) {
f.apply(param)
}
}
}
}
override lazy val toMenu: Menu = {
Menu(linkable, submenus: _*)
}
override def >>(appendParam: Loc.LocParam[Unit]): Menuable = this >+ appendParam
def >+(appendParam: Loc.LocParam[Unit]): ParameterlessRoute =
ParameterlessRoute(name, url, templatePath, linkText, params ::: List(appendParam))
def >++(appendParams: List[Loc.LocParam[Unit]]): ParameterlessRoute =
ParameterlessRoute(name, url, templatePath, linkText, params ::: appendParams)
def >::(prependParam: Loc.LocParam[Unit]): ParameterlessRoute =
ParameterlessRoute(name, url, templatePath, linkText, prependParam :: params)
def >:::(prependParams: List[Loc.LocParam[Unit]]): ParameterlessRoute =
ParameterlessRoute(name, url, templatePath, linkText, prependParams ::: params)
override def toString =
"ParameterlessRoute(name=\\"%s\\", url=\\"%s\\", template=\\"%s\\", params=%s)"
.format(url, name, templatePath, params)
}
| jeffmay/lift-staticsitemap | src/main/scala/net/liftmodules/staticsitemap/ParameterlessRoute.scala | Scala | apache-2.0 | 2,294 |
/**
* ____ __ ____ ____ ____,,___ ____ __ __ ____
* ( _ \\ /__\\ (_ )(_ _)( ___)/ __) ( _ \\( )( )( _ \\ Read
* ) / /(__)\\ / /_ _)(_ )__) \\__ \\ )___/ )(__)( ) _ < README.txt
* (_)\\_)(__)(__)(____)(____)(____)(___/ (__) (______)(____/ LICENSE.txt
*/
package razie.wiki.model
import razie.diesel.dom.WikiDomain
import razie.hosting.WikiReactors
import razie.wiki.util.DslProps
import scala.concurrent.{Future, Promise}
import scala.util.Try
/** a hosted wiki instance, i.e. independent hosted website.
*
* It has its own index, domain and is independent of other wikis
*
* It has its own users and admins/mods etc
*
* Wikis can mixin other wikis - linearized multiple inheritance.
*/
trait Reactor {
val ready: Promise[Boolean] = Promise[Boolean]()
def realm: String
def fallBacks: List[Reactor]
def we: Option[WikiEntry]
def wiki: WikiInst
def domain: WikiDomain
/** list of supers - all mixins reactors linearized */
val supers: Array[String]
/** all mixed in reactors, linearized */
val mixins: Mixins[Reactor]
def club: Option[WikiEntry]
def userRoles: List[String]
def adminEmails: List[String]
/** Admin:UserHome if user or Admin:Home or Reactor:realm if nothing else is defined */
def mainPage(au: Option[WikiUser]): WID
def websiteProps: DslProps
//todo fallback also in real time to rk, per prop
// todo listen to updates and reload
def props: DslProps
/** the membership level of the owner (see if it's paid etc) */
def membershipLevel:Option[String]
}
/** a hosted wiki instance, i.e. independent hosted website.
*
* It has its own index, domain and is independent of other wikis
*
* It has its own users and admins/mods etc
*
* Wikis can mixin other wikis - linearized multiple inheritance.
*/
abstract class ReactorImpl (val realm:String, val fallBacks:List[Reactor] = Nil, val we:Option[WikiEntry]) extends Reactor {
val mixins = new Mixins[Reactor](fallBacks)
lazy val club = props.wprop("club").flatMap(Wikis.find)
lazy val userRoles = websiteProps.prop("userRoles").toList.flatMap(_.split(','))
lazy val adminEmails = websiteProps.prop("adminEmails").toList.flatMap(_.split(','))
// list of super reactors linearized
val supers : Array[String] = {
if(realm == WikiReactors.RK) Array(WikiReactors.RK)
else mixins.flattened.map(_.realm).toArray
}
/** Admin:UserHome if user or Admin:Home or Reactor:realm if nothing else is defined */
def mainPage(au:Option[WikiUser]) = {
def dflt = WID("Admin", "UserHome").r(realm).page.map(_.wid)
// val p = au.flatMap {user=>
// if(adminEmails.contains(Dec(user.email)))
// WID("Admin", "AdminHome").r(realm).page.map(_.wid) orElse dflt
// else
// club.flatMap(c=> user.myPages(c.realm, "Club").find(_.uwid.id == c._id)).flatMap {uw=>
// WID("Admin", uw.role+"Home").r(realm).page.map(_.wid) orElse dflt
// } orElse
// dflt
val p = au.flatMap {user=>
dflt
} orElse WID("Admin", "Home").r(realm).page.map(_.wid) getOrElse WID("Reactor", realm).r(realm)
p
}
private def sectionProps(section:String) = {
we.orElse(WID("Reactor", realm).r(realm).page).map{p=>
new DslProps(Some(p), section)
} getOrElse
WikiReactors.fallbackProps
}
/* deprecated - use props */
lazy val websiteProps = sectionProps("website,properties")// :: sectionProps("properties")
//todo fallback also in real time to rk, per prop
// todo listen to updates and reload
lazy val props = sectionProps ("properties,website") //:: sectionProps("website")
/** the membership level of the owner (see if it's paid etc) */
override def membershipLevel:Option[String] = {
we.flatMap(_.owner).map(_.membershipLevel)
}
}
| razie/diesel-hydra | diesel/src/main/scala/razie/wiki/model/Reactor.scala | Scala | apache-2.0 | 3,835 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle
import _root_.scalariform.parser.CompilationUnit
import _root_.scalariform.lexer.Token
// scalastyle:off regex
object PrintAst {
def main(args: Array[String]): Unit = {
val source = """package foobar
class Foobar {
def foobar() = {
val f1 = 5
val f2 = 5 :: Nil
println("it=" + it.toList)
}
}
"""
printAst(source)
}
def printAst(source: String): Unit = {
val cu = new CheckerUtils()
val lines = Checker.parseLines(source)
val scalariformAst = cu.parseScalariform(source)
scalariformAst match {
case None => println("Parse error")
case Some(ast) => printAst(lines, ast.ast)
}
}
private def lineNumber(lines: Lines, token: Token) = lines.toLineColumn(token.offset)
private def printAst(lines: Lines, ast: CompilationUnit): Unit = {
val lineMap = ast.tokens.groupBy(t => lines.toLineColumn(t.offset).get.line)
val lineNumbers = lineMap.keys.toList.sortWith((a, b) => a < b)
lineNumbers.foreach(ln => println(("%02d" format ln) + ": " + lineMap.get(ln).get))
println("ast=" + ast)
}
}
| kahosato/scalastyle | src/main/scala/org/scalastyle/PrintAst.scala | Scala | apache-2.0 | 1,830 |
package com.github.j5ik2o.forseti.domain.accessToken
import java.time.ZonedDateTime
import com.github.j5ik2o.forseti.domain.Scope
import com.github.j5ik2o.forseti.domain.client.ClientId
import com.github.j5ik2o.forseti.domain.support.Entity
import com.github.j5ik2o.forseti.domain.user.UserId
import scala.concurrent.duration.Duration
import scalaz.Maybe
trait AccessToken extends Entity[AccessTokenId] {
val clientId: ClientId
val userId: Maybe[UserId]
val tokenType: AccessTokenType.Value
val value: String
val expiresIn: Duration
val refreshToken: Maybe[String]
val refreshTokenExpiresIn: Maybe[Duration]
val scope: Scope
val createAt: ZonedDateTime
val updateAt: ZonedDateTime
}
object AccessToken {
def apply(
id: AccessTokenId,
clientId: ClientId,
userId: Maybe[UserId],
tokenType: AccessTokenType.Value,
value: String,
expiresIn: Duration,
refreshToken: Maybe[String],
refreshTokenExpiresIn: Maybe[Duration],
scope: Scope,
createAt: ZonedDateTime,
updateAt: ZonedDateTime
): AccessToken =
Default(
id,
clientId,
userId,
tokenType,
value,
expiresIn,
refreshToken,
refreshTokenExpiresIn,
scope,
createAt,
updateAt
)
private case class Default(
id: AccessTokenId,
clientId: ClientId,
userId: Maybe[UserId],
tokenType: AccessTokenType.Value,
value: String,
expiresIn: Duration,
refreshToken: Maybe[String],
refreshTokenExpiresIn: Maybe[Duration],
scope: Scope,
createAt: ZonedDateTime,
updateAt: ZonedDateTime
) extends AccessToken
}
| j5ik2o/forseti | domain/src/main/scala/com/github/j5ik2o/forseti/domain/accessToken/AccessToken.scala | Scala | mit | 1,690 |
package modules
import com.google.inject.AbstractModule
import models.daos.{ AuthTokenDAO, AuthTokenDAOImpl }
import models.services.{ AuthTokenService, AuthTokenServiceImpl }
import net.codingwell.scalaguice.ScalaModule
import play.api.ApplicationLoader
/**
* The base Guice module.
*/
class BaseModule extends AbstractModule with ScalaModule {
/**
* Configures the module.
*/
def configure(): Unit = {
ApplicationLoader
bind[AuthTokenDAO].to[AuthTokenDAOImpl]
bind[AuthTokenService].to[AuthTokenServiceImpl]
}
}
| stanikol/walnuts | server/app/modules/BaseModule.scala | Scala | apache-2.0 | 544 |
package bytecode
import sai.bytecode.Method
import sai.bytecode.instruction.Instruction
object BasicBlocks {
def apply(method: Method): List[BasicBlock] = {
for (instruction <- method.instructions if isLeader(instruction))
yield new BasicBlock(method, leader = instruction)
}
private def isLeader(i: Instruction) = {
i.predecessors.flatMap(_.successors) != ::(i, Nil)
}
}
| oliverhaase/sai | src/sai/bytecode/BasicBlocks.scala | Scala | mit | 399 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.manual
import java.io._
import java.util.Random
import org.apache.flink.api.common.ExecutionConfig
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.common.typeutils.CompositeType
import org.apache.flink.api.java.typeutils.runtime.RuntimeSerializerFactory
import org.apache.flink.api.scala._
import org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync
import org.apache.flink.runtime.memory.MemoryManagerBuilder
import org.apache.flink.runtime.operators.sort.UnilateralSortMerger
import org.apache.flink.runtime.operators.testutils.DummyInvokable
import org.apache.flink.util.{MutableObjectIterator, TestLogger}
import org.junit.Assert._
/**
* This test is wrote as manual test.
*/
class MassiveCaseClassSortingITCase extends TestLogger {
val SEED : Long = 347569784659278346L
def testStringTuplesSorting() {
val NUM_STRINGS = 3000000
var input: File = null
var sorted: File = null
try {
input = generateFileWithStringTuples(NUM_STRINGS,
"http://some-uri.com/that/is/a/common/prefix/to/all")
sorted = File.createTempFile("sorted_strings", "txt")
val command = Array("/bin/bash", "-c", "export LC_ALL=\"C\" && cat \""
+ input.getAbsolutePath + "\" | sort > \"" + sorted.getAbsolutePath + "\"")
var p: Process = null
try {
p = Runtime.getRuntime.exec(command)
val retCode = p.waitFor()
if (retCode != 0) {
throw new Exception("Command failed with return code " + retCode)
}
p = null
}
finally {
if (p != null) {
p.destroy()
}
}
var sorter: UnilateralSortMerger[StringTuple] = null
var reader: BufferedReader = null
var verifyReader: BufferedReader = null
try {
reader = new BufferedReader(new FileReader(input))
val inputIterator = new StringTupleReader(reader)
val typeInfo = implicitly[TypeInformation[StringTuple]]
.asInstanceOf[CompositeType[StringTuple]]
val serializer = typeInfo.createSerializer(new ExecutionConfig)
val comparator = typeInfo.createComparator(
Array(0, 1),
Array(true, true),
0,
new ExecutionConfig)
val mm = MemoryManagerBuilder.newBuilder.setMemorySize(1024 * 1024).build
val ioMan = new IOManagerAsync()
sorter = new UnilateralSortMerger[StringTuple](mm, ioMan, inputIterator,
new DummyInvokable(),
new RuntimeSerializerFactory[StringTuple](serializer, classOf[StringTuple]),
comparator, 1.0, 4, 0.8f, true /*use large record handler*/, false)
val sortedData = sorter.getIterator
reader.close()
verifyReader = new BufferedReader(new FileReader(sorted))
val verifyIterator = new StringTupleReader(verifyReader)
var num = 0
var hasMore = true
while (hasMore) {
val next = verifyIterator.next(null)
if (next != null ) {
num += 1
val nextFromFlinkSort = sortedData.next(null)
assertNotNull(nextFromFlinkSort)
assertEquals(next.key1, nextFromFlinkSort.key1)
assertEquals(next.key2, nextFromFlinkSort.key2)
// assert array equals does not work here
assertEquals(next.value.length, nextFromFlinkSort.value.length)
for (i <- 0 until next.value.length) {
assertEquals(next.value(i), nextFromFlinkSort.value(i))
}
}
else {
hasMore = false
}
}
assertNull(sortedData.next(null))
assertEquals(NUM_STRINGS, num)
}
finally {
if (reader != null) {
reader.close()
}
if (verifyReader != null) {
verifyReader.close()
}
if (sorter != null) {
sorter.close()
}
}
}
catch {
case e: Exception => {
System.err.println(e.getMessage)
e.printStackTrace()
e.getMessage
}
}
finally {
if (input != null) {
input.delete()
}
if (sorted != null) {
sorted.delete()
}
}
}
private def generateFileWithStringTuples(numStrings: Int, prefix: String): File = {
val rnd = new Random(SEED)
val bld = new StringBuilder()
val f = File.createTempFile("strings", "txt")
var wrt: BufferedWriter = null
try {
wrt = new BufferedWriter(new FileWriter(f))
for (i <- 0 until numStrings) {
bld.setLength(0)
val numComps = rnd.nextInt(5) + 2
for (z <- 0 until numComps) {
if (z > 0) {
bld.append(' ')
}
bld.append(prefix)
val len = rnd.nextInt(20) + 10
for (k <- 0 until len) {
val c = (rnd.nextInt(80) + 40).toChar
bld.append(c)
}
}
val str = bld.toString
wrt.write(str)
wrt.newLine()
}
}
finally {
wrt.close()
}
f
}
}
object MassiveCaseClassSortingITCase {
def main(args: Array[String]) {
new MassiveCaseClassSortingITCase().testStringTuplesSorting()
}
}
case class StringTuple(key1: String, key2: String, value: Array[String])
class StringTupleReader(val reader: BufferedReader) extends MutableObjectIterator[StringTuple] {
override def next(reuse: StringTuple): StringTuple = {
val line = reader.readLine()
if (line == null) {
return null
}
val parts = line.split(" ")
StringTuple(parts(0), parts(1), parts)
}
override def next(): StringTuple = {
val line = reader.readLine()
if (line == null) {
return null
}
val parts = line.split(" ")
StringTuple(parts(0), parts(1), parts)
}
}
| hequn8128/flink | flink-tests/src/test/scala/org/apache/flink/api/scala/manual/MassiveCaseClassSortingITCase.scala | Scala | apache-2.0 | 6,943 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.controller.test
import scala.concurrent.duration.DurationInt
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import spray.http.StatusCodes._
import spray.httpx.SprayJsonSupport._
import spray.json.DefaultJsonProtocol._
import spray.json._
import whisk.core.controller.WhiskActionsApi
import whisk.core.entity._
import whisk.core.entitlement.Resource
import whisk.core.entitlement.Privilege._
import scala.concurrent.Await
import scala.language.postfixOps
import whisk.http.ErrorResponse
import whisk.http.Messages
/**
* Tests Packages API.
*
* Unit tests of the controller service as a standalone component.
* These tests exercise a fresh instance of the service object in memory -- these
* tests do NOT communication with a whisk deployment.
*
*
* @Idioglossia
* "using Specification DSL to write unit tests, as in should, must, not, be"
* "using Specs2RouteTest DSL to chain HTTP requests for unit testing, as in ~>"
*/
@RunWith(classOf[JUnitRunner])
class PackageActionsApiTests extends ControllerTestCommon with WhiskActionsApi {
/** Package Actions API tests */
behavior of "Package Actions API"
val creds = WhiskAuth(Subject(), AuthKey()).toIdentity
val namespace = EntityPath(creds.subject.asString)
val collectionPath = s"/${EntityPath.DEFAULT}/${collection.path}"
def aname = MakeName.next("package_action_tests")
//// GET /actions/package/
it should "list all actions in package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname)
val actions = (1 to 2).map { _ =>
WhiskAction(provider.fullPath, aname, jsDefault("??"))
}
put(entityStore, provider)
actions foreach { put(entityStore, _) }
whisk.utils.retry {
Get(s"$collectionPath/${provider.name}/") ~> sealRoute(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[List[JsObject]]
actions.length should be(response.length)
actions forall { a => response contains a.summaryAsJson } should be(true)
}
}
}
it should "list all actions in package binding" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname)
val reference = WhiskPackage(namespace, aname, provider.bind)
val actions = (1 to 2).map { _ =>
WhiskAction(provider.fullPath, aname, jsDefault("??"))
}
put(entityStore, provider)
put(entityStore, reference)
actions foreach { put(entityStore, _) }
whisk.utils.retry {
Get(s"$collectionPath/${reference.name}/") ~> sealRoute(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[List[JsObject]]
actions.length should be(response.length)
actions forall { a => response contains a.summaryAsJson } should be(true)
}
}
}
it should "include action in package when listing all actions" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname, None)
val action1 = WhiskAction(namespace, aname, jsDefault("??"), Parameters(), ActionLimits())
val action2 = WhiskAction(provider.fullPath, aname, jsDefault("??"))
put(entityStore, provider)
put(entityStore, action1)
put(entityStore, action2)
whisk.utils.retry {
Get(s"$collectionPath") ~> sealRoute(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[List[JsObject]]
response.length should be(2)
response contains action1.summaryAsJson should be(true)
response contains action2.summaryAsJson should be(true)
}
}
}
it should "reject ambiguous list actions in package without trailing slash" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname, None)
put(entityStore, provider)
whisk.utils.retry {
Get(s"$collectionPath/${provider.name}") ~> sealRoute(routes(creds)) ~> check {
status should be(Conflict)
}
}
}
it should "reject invalid verb on get package actions" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname, None)
put(entityStore, provider)
Delete(s"$collectionPath/${provider.name}/") ~> sealRoute(routes(creds)) ~> check {
status should be(NotFound)
}
}
//// PUT /actions/package/name
it should "put action in package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname)
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"))
val content = WhiskActionPut(Some(action.exec))
put(entityStore, provider)
Put(s"$collectionPath/${provider.name}/${action.name}", content) ~> sealRoute(routes(creds)) ~> check {
deleteAction(action.docid)
status should be(OK)
val response = responseAs[WhiskAction]
response should be(WhiskAction(action.namespace, action.name, action.exec,
action.parameters, action.limits, action.version,
action.publish, action.annotations ++ Parameters(WhiskAction.execFieldName, NODEJS6)))
}
}
it should "reject put action in package that does not exist" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname)
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"))
val content = WhiskActionPut(Some(action.exec))
Put(s"$collectionPath/${provider.name}/${action.name}", content) ~> sealRoute(routes(creds)) ~> check {
status should be(NotFound)
}
}
it should "reject put action in package binding where package doesn't exist" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname, None, publish = true)
val binding = WhiskPackage(namespace, aname, provider.bind)
val content = WhiskActionPut(Some(jsDefault("??")))
put(entityStore, binding)
Put(s"$collectionPath/${binding.name}/$aname", content) ~> sealRoute(routes(creds)) ~> check {
status should be(BadRequest)
}
}
it should "reject put action in package binding" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname, None, publish = true)
val binding = WhiskPackage(namespace, aname, provider.bind)
val content = WhiskActionPut(Some(jsDefault("??")))
put(entityStore, provider)
put(entityStore, binding)
Put(s"$collectionPath/${binding.name}/$aname", content) ~> sealRoute(routes(creds)) ~> check {
status should be(BadRequest)
}
}
it should "reject put action in package owned by different subject" in {
implicit val tid = transid()
val provider = WhiskPackage(EntityPath(Subject().asString), aname, publish = true)
val content = WhiskActionPut(Some(jsDefault("??")))
put(entityStore, provider)
Put(s"/${provider.namespace}/${collection.path}/${provider.name}/$aname", content) ~> sealRoute(routes(creds)) ~> check {
status should be(Forbidden)
}
}
//// DEL /actions/package/name
it should "delete action in package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname)
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"))
put(entityStore, provider)
put(entityStore, action)
// it should "reject delete action in package owned by different subject" in {
val auser = WhiskAuth(Subject(), AuthKey()).toIdentity
Delete(s"/${provider.namespace}/${collection.path}/${provider.name}/${action.name}") ~> sealRoute(routes(auser)) ~> check {
status should be(Forbidden)
}
Delete(s"$collectionPath/${provider.name}/${action.name}") ~> sealRoute(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[WhiskAction]
response should be(action)
}
}
it should "reject delete action in package that does not exist" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname)
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"))
put(entityStore, action)
Delete(s"$collectionPath/${provider.name}/${action.name}") ~> sealRoute(routes(creds)) ~> check {
status should be(NotFound)
}
}
it should "reject delete non-existent action in package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname)
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"))
put(entityStore, provider)
Delete(s"$collectionPath/${provider.name}/${action.name}") ~> sealRoute(routes(creds)) ~> check {
status should be(NotFound)
}
}
it should "reject delete action in package binding where package doesn't exist" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname, None, publish = true)
val binding = WhiskPackage(namespace, aname, provider.bind)
val content = WhiskActionPut(Some(jsDefault("??")))
put(entityStore, binding)
Delete(s"$collectionPath/${binding.name}/$aname") ~> sealRoute(routes(creds)) ~> check {
status should be(BadRequest)
}
}
it should "reject delete action in package binding" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname, None, publish = true)
val binding = WhiskPackage(namespace, aname, provider.bind)
val content = WhiskActionPut(Some(jsDefault("??")))
put(entityStore, provider)
put(entityStore, binding)
Delete(s"$collectionPath/${binding.name}/$aname") ~> sealRoute(routes(creds)) ~> check {
status should be(BadRequest)
}
}
it should "reject delete action in package owned by different subject" in {
implicit val tid = transid()
val provider = WhiskPackage(EntityPath(Subject().asString), aname, publish = true)
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"))
put(entityStore, provider)
put(entityStore, action)
Delete(s"/${provider.namespace}/${collection.path}/${provider.name}/${action.name}") ~> sealRoute(routes(creds)) ~> check {
status should be(Forbidden)
}
}
//// GET /actions/package/name
it should "get action in package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname, None, Parameters("p", "P"), publish = true)
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"), Parameters("a", "A"))
put(entityStore, provider)
put(entityStore, action)
whisk.utils.retry {
Get(s"$collectionPath/${provider.name}/${action.name}") ~> sealRoute(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[WhiskAction]
response should be(action inherit provider.parameters)
}
}
}
it should "get action in package binding with public package" in {
implicit val tid = transid()
val auser = WhiskAuth(Subject(), AuthKey()).toIdentity
val provider = WhiskPackage(namespace, aname, None, publish = true)
val binding = WhiskPackage(EntityPath(auser.subject.asString), aname, provider.bind, Parameters("b", "B"))
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"))
put(entityStore, provider)
put(entityStore, binding)
put(entityStore, action)
whisk.utils.retry {
Get(s"$collectionPath/${binding.name}/${action.name}") ~> sealRoute(routes(auser)) ~> check {
status should be(OK)
val response = responseAs[WhiskAction]
response should be(action inherit (provider.parameters ++ binding.parameters))
}
}
}
it should "get action in package binding with public package with overriding parameters" in {
implicit val tid = transid()
val auser = WhiskAuth(Subject(), AuthKey()).toIdentity
val provider = WhiskPackage(namespace, aname, None, Parameters("p", "P"), publish = true)
val binding = WhiskPackage(EntityPath(auser.subject.asString), aname, provider.bind, Parameters("b", "B"))
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"), Parameters("a", "A") ++ Parameters("b", "b"))
put(entityStore, provider)
put(entityStore, binding)
put(entityStore, action)
whisk.utils.retry {
Get(s"$collectionPath/${binding.name}/${action.name}") ~> sealRoute(routes(auser)) ~> check {
status should be(OK)
val response = responseAs[WhiskAction]
response should be(action inherit (provider.parameters ++ binding.parameters))
}
}
}
// NOTE: does not work because entitlement model does not allow for an explicit
// check on either one or both of the binding and package
ignore should "get action in package binding with explicit entitlement grant" in {
implicit val tid = transid()
val auser = WhiskAuth(Subject(), AuthKey()).toIdentity
val provider = WhiskPackage(namespace, aname, None, Parameters("p", "P"), publish = false)
val binding = WhiskPackage(EntityPath(auser.subject.asString), aname, provider.bind, Parameters("b", "B"))
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"), Parameters("a", "A"))
put(entityStore, provider)
put(entityStore, binding)
put(entityStore, action)
val pkgaccess = Resource(provider.namespace, PACKAGES, Some(provider.name.asString))
Await.result(entitlementProvider.grant(auser.subject, READ, pkgaccess), 1 second)
Get(s"$collectionPath/${binding.name}/${action.name}") ~> sealRoute(routes(auser)) ~> check {
status should be(OK)
val response = responseAs[WhiskAction]
response should be(action inherit (provider.parameters ++ binding.parameters))
}
}
it should "reject get action in package that does not exist" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname)
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"))
put(entityStore, action)
Get(s"$collectionPath/${provider.name}/${action.name}") ~> sealRoute(routes(creds)) ~> check {
status should be(NotFound)
}
}
it should "reject get non-existent action in package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname)
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"))
put(entityStore, provider)
Get(s"$collectionPath/${provider.name}/${action.name}") ~> sealRoute(routes(creds)) ~> check {
status should be(NotFound)
}
}
it should "reject get action in package binding that does not exist" in {
implicit val tid = transid()
val name = aname
val auser = WhiskAuth(Subject(), AuthKey()).toIdentity
val provider = WhiskPackage(namespace, aname, None, Parameters("p", "P"), publish = true)
val binding = WhiskPackage(EntityPath(auser.subject.asString), aname, provider.bind, Parameters("b", "B"))
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"), Parameters("a", "A"))
put(entityStore, provider)
put(entityStore, action)
Get(s"$collectionPath/${binding.name}/${action.name}") ~> sealRoute(routes(auser)) ~> check {
status should be(NotFound)
}
}
it should "reject get action in package binding with package that does not exist" in {
implicit val tid = transid()
val name = aname
val auser = WhiskAuth(Subject(), AuthKey()).toIdentity
val provider = WhiskPackage(namespace, aname, None, Parameters("p", "P"), publish = true)
val binding = WhiskPackage(EntityPath(auser.subject.asString), aname, provider.bind, Parameters("b", "B"))
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"), Parameters("a", "A"))
put(entityStore, binding)
put(entityStore, action)
Get(s"$collectionPath/${binding.name}/${action.name}") ~> sealRoute(routes(auser)) ~> check {
status should be(Forbidden) // do not leak that package does not exist
}
}
it should "reject get non-existing action in package binding" in {
implicit val tid = transid()
val name = aname
val auser = WhiskAuth(Subject(), AuthKey()).toIdentity
val provider = WhiskPackage(namespace, aname, None, Parameters("p", "P"), publish = true)
val binding = WhiskPackage(EntityPath(auser.subject.asString), aname, provider.bind, Parameters("b", "B"))
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"), Parameters("a", "A"))
put(entityStore, provider)
put(entityStore, binding)
Get(s"$collectionPath/${binding.name}/${action.name}") ~> sealRoute(routes(auser)) ~> check {
status should be(NotFound)
}
}
it should "reject get action in package binding with private package" in {
implicit val tid = transid()
val auser = WhiskAuth(Subject(), AuthKey()).toIdentity
val provider = WhiskPackage(namespace, aname, None, Parameters("p", "P"), publish = false)
val binding = WhiskPackage(EntityPath(auser.subject.asString), aname, provider.bind, Parameters("b", "B"))
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"), Parameters("a", "A"))
put(entityStore, provider)
put(entityStore, binding)
put(entityStore, action)
Get(s"$collectionPath/${binding.name}/${action.name}") ~> sealRoute(routes(auser)) ~> check {
status should be(Forbidden)
}
}
//// POST /actions/name
it should "allow owner to invoke an action in package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname)
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"))
val content = JsObject("xxx" -> "yyy".toJson)
put(entityStore, provider)
put(entityStore, action)
Post(s"$collectionPath/${provider.name}/${action.name}", content) ~> sealRoute(routes(creds)) ~> check {
status should be(Accepted)
val response = responseAs[JsObject]
response.fields("activationId") should not be None
}
}
it should "allow non-owner to invoke an action in public package" in {
implicit val tid = transid()
val auser = WhiskAuth(Subject(), AuthKey()).toIdentity
val provider = WhiskPackage(namespace, aname, publish = true)
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"))
val content = JsObject("xxx" -> "yyy".toJson)
put(entityStore, provider)
put(entityStore, action)
Post(s"/$namespace/${collection.path}/${provider.name}/${action.name}", content) ~> sealRoute(routes(auser)) ~> check {
status should be(Accepted)
val response = responseAs[JsObject]
response.fields("activationId") should not be None
}
}
it should "invoke action in package binding with public package" in {
implicit val tid = transid()
val auser = WhiskAuth(Subject(), AuthKey()).toIdentity
val provider = WhiskPackage(namespace, aname, publish = true)
val reference = WhiskPackage(EntityPath(auser.subject.asString), aname, provider.bind)
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"))
val content = JsObject("x" -> "x".toJson, "z" -> "Z".toJson)
put(entityStore, provider)
put(entityStore, reference)
put(entityStore, action)
Post(s"$collectionPath/${reference.name}/${action.name}", content) ~> sealRoute(routes(auser)) ~> check {
status should be(Accepted)
val response = responseAs[JsObject]
response.fields("activationId") should not be None
}
}
// NOTE: does not work because entitlement model does not allow for an explicit
// check on either one or both of the binding and package
ignore should "invoke action in package binding with explicit entitlement grant even if package is not public" in {
implicit val tid = transid()
val auser = WhiskAuth(Subject(), AuthKey()).toIdentity
val provider = WhiskPackage(namespace, aname, publish = false)
val reference = WhiskPackage(EntityPath(auser.subject.asString), aname, provider.bind)
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"))
val content = JsObject("x" -> "x".toJson, "z" -> "Z".toJson)
put(entityStore, provider)
put(entityStore, reference)
put(entityStore, action)
val pkgaccess = Resource(provider.namespace, PACKAGES, Some(provider.name.asString))
Await.result(entitlementProvider.grant(auser.subject, ACTIVATE, pkgaccess), 1 second)
Post(s"$collectionPath/${reference.name}/${action.name}", content) ~> sealRoute(routes(auser)) ~> check {
status should be(Accepted)
val response = responseAs[JsObject]
response.fields("activationId") should not be None
}
}
it should "reject non-owner invoking an action in private package" in {
implicit val tid = transid()
val auser = WhiskAuth(Subject(), AuthKey()).toIdentity
val provider = WhiskPackage(namespace, aname, publish = false)
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"))
val content = JsObject("xxx" -> "yyy".toJson)
put(entityStore, provider)
put(entityStore, action)
Post(s"/$namespace/${collection.path}/${provider.name}/${action.name}", content) ~> sealRoute(routes(auser)) ~> check {
status should be(Forbidden)
}
}
it should "reject invoking an action in package that does not exist" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname, publish = false)
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"))
val content = JsObject("xxx" -> "yyy".toJson)
put(entityStore, action)
Post(s"$collectionPath/${provider.name}/${action.name}", content) ~> sealRoute(routes(creds)) ~> check {
status should be(NotFound)
}
}
it should "reject invoking a non-existent action in package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname, publish = false)
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"))
val content = JsObject("xxx" -> "yyy".toJson)
put(entityStore, action)
Post(s"$collectionPath/${provider.name}/${action.name}", content) ~> sealRoute(routes(creds)) ~> check {
status should be(NotFound)
}
}
it should "reject invoke action in package binding with private package" in {
implicit val tid = transid()
val auser = WhiskAuth(Subject(), AuthKey()).toIdentity
val provider = WhiskPackage(namespace, aname, publish = false)
val reference = WhiskPackage(EntityPath(auser.subject.asString), aname, provider.bind)
val action = WhiskAction(provider.fullPath, aname, jsDefault("??"))
val content = JsObject("x" -> "x".toJson, "z" -> "Z".toJson)
put(entityStore, provider)
put(entityStore, reference)
put(entityStore, action)
Post(s"$collectionPath/${reference.name}/${action.name}", content) ~> sealRoute(routes(auser)) ~> check {
status should be(Forbidden)
}
}
it should "report proper error when provider record is corrupted on delete" in {
implicit val tid = transid()
val provider = BadEntity(namespace, aname)
val entity = BadEntity(provider.namespace.addPath(provider.name), aname)
put(entityStore, provider)
put(entityStore, entity)
Delete(s"$collectionPath/${provider.name}/${entity.name}") ~> sealRoute(routes(creds)) ~> check {
responseAs[ErrorResponse].error shouldBe Messages.corruptedEntity
}
}
it should "report proper error when record is corrupted on delete" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname)
val entity = BadEntity(provider.fullPath, aname)
put(entityStore, provider, false)
put(entityStore, entity, false)
Delete(s"$collectionPath/${provider.name}/${entity.name}") ~> sealRoute(routes(creds)) ~> check {
status should be(InternalServerError)
responseAs[ErrorResponse].error shouldBe Messages.corruptedEntity
}
}
it should "report proper error when provider record is corrupted on get" in {
implicit val tid = transid()
val provider = BadEntity(namespace, aname)
val entity = BadEntity(provider.namespace.addPath(provider.name), aname)
put(entityStore, provider)
put(entityStore, entity)
Get(s"$collectionPath/${provider.name}/${entity.name}") ~> sealRoute(routes(creds)) ~> check {
status should be(InternalServerError)
responseAs[ErrorResponse].error shouldBe Messages.corruptedEntity
}
val auser = WhiskAuth(Subject(), AuthKey()).toIdentity
Get(s"/${provider.namespace}/${collection.path}/${provider.name}/${entity.name}") ~> sealRoute(routes(auser)) ~> check {
status should be(Forbidden)
responseAs[ErrorResponse].error shouldBe Messages.notAuthorizedtoOperateOnResource
}
}
it should "report proper error when record is corrupted on get" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname)
val entity = BadEntity(provider.fullPath, aname)
put(entityStore, provider)
put(entityStore, entity)
Get(s"$collectionPath/${provider.name}/${entity.name}") ~> sealRoute(routes(creds)) ~> check {
status should be(InternalServerError)
responseAs[ErrorResponse].error shouldBe Messages.corruptedEntity
}
}
it should "report proper error when provider record is corrupted on put" in {
implicit val tid = transid()
val provider = BadEntity(namespace, aname)
val entity = BadEntity(provider.namespace.addPath(provider.name), aname)
put(entityStore, provider)
put(entityStore, entity)
val content = WhiskActionPut()
Put(s"$collectionPath/${provider.name}/${entity.name}", content) ~> sealRoute(routes(creds)) ~> check {
status should be(InternalServerError)
responseAs[ErrorResponse].error shouldBe Messages.corruptedEntity
}
}
it should "report proper error when record is corrupted on put" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname)
val entity = BadEntity(provider.fullPath, aname)
put(entityStore, provider)
put(entityStore, entity)
val content = WhiskActionPut()
Put(s"$collectionPath/${provider.name}/${entity.name}", content) ~> sealRoute(routes(creds)) ~> check {
status should be(InternalServerError)
responseAs[ErrorResponse].error shouldBe Messages.corruptedEntity
}
}
}
| lzbj/openwhisk | tests/src/test/scala/whisk/core/controller/test/PackageActionsApiTests.scala | Scala | apache-2.0 | 28,997 |
package org.rovak.steamclient.steam3.handlers
import akka.actor.Actor
import rovak.steamkit.steam.IPacketMsg
import org.rovak.steamclient.steam3.SteamClient
trait MessageHandlerStack extends Actor {
def wrappedHandleMessage(message: IPacketMsg)
def wrappedReceive: Receive
def receive = {
case x => if (wrappedReceive.isDefinedAt(x)) wrappedReceive(x) else unhandled(x)
}
def internalHandleMessage(message: IPacketMsg) = {
wrappedHandleMessage(message)
}
}
| Rovak/scala-steamkit | steamkit/src/main/scala/org/rovak/steamclient/steam3/handlers/MessageHandlerStack.scala | Scala | mit | 482 |
package com.example.infrastructure
import org.apache.spark.streaming.{Duration => SparkDuration}
import scala.concurrent.duration.Duration
object Utils {
implicit def scalaDurationToSparkDuration(time: Duration): SparkDuration =
new SparkDuration(time.toMillis)
}
| btrofimov/spark-enterprise-example | nonblocking-bulletinboard/bl-processor/src/main/scala/com/example/infrastructure/Utils.scala | Scala | apache-2.0 | 276 |
package com.datawizards.dmg.dialects
import com.datawizards.dmg.TestModel._
import com.datawizards.dmg.{DataModelGenerator, DataModelGeneratorBaseTest}
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class GenerateJavaClassTest extends DataModelGeneratorBaseTest {
test("Simple model") {
val expected =
"""public class Person {
| private String name;
| private Integer age;
|
| public Person() {}
|
| public Person(String name, Integer age) {
| this.name = name;
| this.age = age;
| }
|
| public String getName() {
| return name;
| }
|
| public void setName(String name) {
| this.name = name;
| }
|
| public Integer getAge() {
| return age;
| }
|
| public void setAge(Integer age) {
| this.age = age;
| }
|}""".stripMargin
assertResultIgnoringNewLines(expected) {
DataModelGenerator.generate[Person](JavaDialect)
}
}
test("ClassWithAllSimpleTypes") {
val expected =
"""public class ClassWithAllSimpleTypes {
| private String strVal;
| private Integer intVal;
| private Long longVal;
| private Double doubleVal;
| private Float floatVal;
| private Short shortVal;
| private Boolean booleanVal;
| private Byte byteVal;
| private java.util.Date dateVal;
| private java.sql.Timestamp timestampVal;
|
| public ClassWithAllSimpleTypes() {}
|
| public ClassWithAllSimpleTypes(String strVal, Integer intVal, Long longVal, Double doubleVal, Float floatVal, Short shortVal, Boolean booleanVal, Byte byteVal, java.util.Date dateVal, java.sql.Timestamp timestampVal) {
| this.strVal = strVal;
| this.intVal = intVal;
| this.longVal = longVal;
| this.doubleVal = doubleVal;
| this.floatVal = floatVal;
| this.shortVal = shortVal;
| this.booleanVal = booleanVal;
| this.byteVal = byteVal;
| this.dateVal = dateVal;
| this.timestampVal = timestampVal;
| }
|
| public String getStrVal() {
| return strVal;
| }
|
| public void setStrVal(String strVal) {
| this.strVal = strVal;
| }
|
| public Integer getIntVal() {
| return intVal;
| }
|
| public void setIntVal(Integer intVal) {
| this.intVal = intVal;
| }
|
| public Long getLongVal() {
| return longVal;
| }
|
| public void setLongVal(Long longVal) {
| this.longVal = longVal;
| }
|
| public Double getDoubleVal() {
| return doubleVal;
| }
|
| public void setDoubleVal(Double doubleVal) {
| this.doubleVal = doubleVal;
| }
|
| public Float getFloatVal() {
| return floatVal;
| }
|
| public void setFloatVal(Float floatVal) {
| this.floatVal = floatVal;
| }
|
| public Short getShortVal() {
| return shortVal;
| }
|
| public void setShortVal(Short shortVal) {
| this.shortVal = shortVal;
| }
|
| public Boolean getBooleanVal() {
| return booleanVal;
| }
|
| public void setBooleanVal(Boolean booleanVal) {
| this.booleanVal = booleanVal;
| }
|
| public Byte getByteVal() {
| return byteVal;
| }
|
| public void setByteVal(Byte byteVal) {
| this.byteVal = byteVal;
| }
|
| public java.util.Date getDateVal() {
| return dateVal;
| }
|
| public void setDateVal(java.util.Date dateVal) {
| this.dateVal = dateVal;
| }
|
| public java.sql.Timestamp getTimestampVal() {
| return timestampVal;
| }
|
| public void setTimestampVal(java.sql.Timestamp timestampVal) {
| this.timestampVal = timestampVal;
| }
|}""".stripMargin
assertResultIgnoringNewLines(expected) {
DataModelGenerator.generate[ClassWithAllSimpleTypes](JavaDialect)
}
}
test("Array type") {
val expected =
"""public class CV {
| private java.util.List<String> skills;
| private java.util.List<Integer> grades;
|
| public CV() {}
|
| public CV(java.util.List<String> skills, java.util.List<Integer> grades) {
| this.skills = skills;
| this.grades = grades;
| }
|
| public java.util.List<String> getSkills() {
| return skills;
| }
|
| public void setSkills(java.util.List<String> skills) {
| this.skills = skills;
| }
|
| public java.util.List<Integer> getGrades() {
| return grades;
| }
|
| public void setGrades(java.util.List<Integer> grades) {
| this.grades = grades;
| }
|}""".stripMargin
assertResultIgnoringNewLines(expected) {
DataModelGenerator.generate[CV](JavaDialect)
}
}
test("Nested array type") {
val expected =
"""public class NestedArray {
| private java.util.List<java.util.List<String>> nested;
| private java.util.List<java.util.List<java.util.List<Integer>>> nested3;
|
| public NestedArray() {}
|
| public NestedArray(java.util.List<java.util.List<String>> nested, java.util.List<java.util.List<java.util.List<Integer>>> nested3) {
| this.nested = nested;
| this.nested3 = nested3;
| }
|
| public java.util.List<java.util.List<String>> getNested() {
| return nested;
| }
|
| public void setNested(java.util.List<java.util.List<String>> nested) {
| this.nested = nested;
| }
|
| public java.util.List<java.util.List<java.util.List<Integer>>> getNested3() {
| return nested3;
| }
|
| public void setNested3(java.util.List<java.util.List<java.util.List<Integer>>> nested3) {
| this.nested3 = nested3;
| }
|}""".stripMargin
assertResultIgnoringNewLines(expected) {
DataModelGenerator.generate[NestedArray](JavaDialect)
}
}
test("Struct types") {
val expected =
"""public class Book {
| private String title;
| private Integer year;
| private com.datawizards.dmg.Person owner;
| private java.util.List<com.datawizards.dmg.Person> authors;
|
| public Book() {}
|
| public Book(String title, Integer year, com.datawizards.dmg.Person owner, java.util.List<com.datawizards.dmg.Person> authors) {
| this.title = title;
| this.year = year;
| this.owner = owner;
| this.authors = authors;
| }
|
| public String getTitle() {
| return title;
| }
|
| public void setTitle(String title) {
| this.title = title;
| }
|
| public Integer getYear() {
| return year;
| }
|
| public void setYear(Integer year) {
| this.year = year;
| }
|
| public com.datawizards.dmg.Person getOwner() {
| return owner;
| }
|
| public void setOwner(com.datawizards.dmg.Person owner) {
| this.owner = owner;
| }
|
| public java.util.List<com.datawizards.dmg.Person> getAuthors() {
| return authors;
| }
|
| public void setAuthors(java.util.List<com.datawizards.dmg.Person> authors) {
| this.authors = authors;
| }
|}""".stripMargin
assertResultIgnoringNewLines(expected) {
DataModelGenerator.generate[Book](JavaDialect)
}
}
test("Map type") {
val expected =
"""public class ClassWithMap {
| private java.util.Map<Integer, Boolean> map;
|
| public ClassWithMap() {}
|
| public ClassWithMap(java.util.Map<Integer, Boolean> map) {
| this.map = map;
| }
|
| public java.util.Map<Integer, Boolean> getMap() {
| return map;
| }
|
| public void setMap(java.util.Map<Integer, Boolean> map) {
| this.map = map;
| }
|}""".stripMargin
assertResultIgnoringNewLines(expected) {
DataModelGenerator.generate[ClassWithMap](JavaDialect)
}
}
test("ClassWithArrayByte") {
val expected =
"""public class ClassWithArrayByte {
| private java.util.List<Byte> arr;
|
| public ClassWithArrayByte() {}
|
| public ClassWithArrayByte(java.util.List<Byte> arr) {
| this.arr = arr;
| }
|
| public java.util.List<Byte> getArr() {
| return arr;
| }
|
| public void setArr(java.util.List<Byte> arr) {
| this.arr = arr;
| }
|}""".stripMargin
assertResultIgnoringNewLines(expected) {
DataModelGenerator.generate[ClassWithArrayByte](JavaDialect)
}
}
test("ClassWithBigInteger") {
val expected =
"""public class ClassWithBigInteger {
| private java.math.BigInteger n1;
|
| public ClassWithBigInteger() {}
|
| public ClassWithBigInteger(java.math.BigInteger n1) {
| this.n1 = n1;
| }
|
| public java.math.BigInteger getN1() {
| return n1;
| }
|
| public void setN1(java.math.BigInteger n1) {
| this.n1 = n1;
| }
|}""".stripMargin
assertResultIgnoringNewLines(expected) {
DataModelGenerator.generate[ClassWithBigInteger](JavaDialect)
}
}
test("ClassWithBigDecimal") {
val expected =
"""public class ClassWithBigDecimal {
| private java.math.BigDecimal n1;
|
| public ClassWithBigDecimal() {}
|
| public ClassWithBigDecimal(java.math.BigDecimal n1) {
| this.n1 = n1;
| }
|
| public java.math.BigDecimal getN1() {
| return n1;
| }
|
| public void setN1(java.math.BigDecimal n1) {
| this.n1 = n1;
| }
|}""".stripMargin
assertResultIgnoringNewLines(expected) {
DataModelGenerator.generate[ClassWithBigDecimal](JavaDialect)
}
}
} | mateuszboryn/data-model-generator | src/test/scala/com/datawizards/dmg/dialects/GenerateJavaClassTest.scala | Scala | apache-2.0 | 11,389 |
package org.bitcoins.core.protocol.blockchain
import org.bitcoins.core.crypto.DoubleSha256Digest
import org.bitcoins.core.number.UInt32
import org.bitcoins.core.protocol.NetworkElement
import org.bitcoins.core.serializers.blockchain.RawBlockHeaderSerializer
import org.bitcoins.core.util.{ BitcoinSUtil, CryptoUtil, BitcoinSLogger, Factory }
/**
* Created by chris on 5/19/16.
* Nodes collect new transactions into a block, hash them into a hash tree,
* and scan through nonce values to make the block's hash satisfy proof-of-work
* requirements. When they solve the proof-of-work, they broadcast the block
* to everyone and the block is added to the block chain. The first transaction
* in the block is a special one that creates a new coin owned by the creator
* of the block.
* Bitcoin Developer reference link
* https://bitcoin.org/en/developer-reference#block-headers
* Bitcoin Core implementation:
* https://github.com/bitcoin/bitcoin/blob/master/src/primitives/block.h#L20
*/
sealed trait BlockHeader extends NetworkElement {
/**
* The block version number indicates which set of block validation rules to follow.
* See the list of block versions below.
* See BIP9 for more information on what version number signify
* https://github.com/bitcoin/bips/blob/master/bip-0009.mediawiki
*
* @return the version number for this block
*/
def version: UInt32
/**
* A SHA256(SHA256()) hash in internal byte order of the previous block’s header.
* This ensures no previous block can be changed without also changing this block’s header.
*
* @return the previous block's hash
*/
def previousBlockHash: DoubleSha256Digest
/**
* A SHA256(SHA256()) hash in internal byte order.
* The merkle root is derived from the hashes of all transactions included in this block,
* ensuring that none of those transactions can be modified without modifying the header.
* https://bitcoin.org/en/developer-reference#merkle-trees
*
* @return the merkle root of the merkle tree
*/
def merkleRootHash: DoubleSha256Digest
/**
* The block time is a Unix epoch time when the miner started hashing the header (according to the miner).
* Must be greater than or equal to the median time of the previous 11 blocks.
* Full nodes will not accept blocks with headers more than two hours in the future according to their clock.
*
* @return the time when the miner started solving the block
*/
def time: UInt32
/**
* An encoded version of the target threshold this block’s header hash must be less than or equal to.
* See the nBits format described below.
* https://bitcoin.org/en/developer-reference#target-nbits
*
* @return
*/
def nBits: UInt32
/**
* An arbitrary number miners change to modify the header hash in order to produce a hash below the target threshold.
* If all 32-bit values are tested, the time can be updated or the coinbase
* transaction can be changed and the merkle root updated.
*
* @return the nonce used to try and solve a block
*/
def nonce: UInt32
/** Returns the block's hash */
def hash: DoubleSha256Digest = CryptoUtil.doubleSHA256(bytes)
override def bytes: Seq[Byte] = RawBlockHeaderSerializer.write(this)
}
/**
* Companion object used for creating BlockHeaders
*/
object BlockHeader extends Factory[BlockHeader] {
private sealed case class BlockHeaderImpl(version: UInt32, previousBlockHash: DoubleSha256Digest,
merkleRootHash: DoubleSha256Digest, time: UInt32, nBits: UInt32, nonce: UInt32) extends BlockHeader
def apply(version: UInt32, previousBlockHash: DoubleSha256Digest, merkleRootHash: DoubleSha256Digest,
time: UInt32, nBits: UInt32, nonce: UInt32): BlockHeader = {
BlockHeaderImpl(version, previousBlockHash, merkleRootHash, time, nBits, nonce)
}
def fromBytes(bytes: Seq[Byte]): BlockHeader = RawBlockHeaderSerializer.read(bytes)
} | Christewart/bitcoin-s-core | src/main/scala/org/bitcoins/core/protocol/blockchain/BlockHeader.scala | Scala | mit | 3,939 |
class InnerCompanionTraitRename {
sealed trait Mode
object Mode {
case object DropAllCreate extends Mode
case object DropCreate extends Mode
case object Create extends Mode
case object None extends Mode
}
class Instance
( mode : /*caret*/Mode = Mode.None )
}
/*
class InnerCompanionTraitRename {
sealed trait NameAfterRename
object NameAfterRename {
case object DropAllCreate extends NameAfterRename
case object DropCreate extends NameAfterRename
case object Create extends NameAfterRename
case object None extends NameAfterRename
}
class Instance
( mode : /*caret*/NameAfterRename = NameAfterRename.None )
}
*/ | ilinum/intellij-scala | testdata/rename/class/InnerCompanionTraitRename.scala | Scala | apache-2.0 | 665 |
/*
* Copyright (C) 2007-2008 Artima, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Example code from:
*
* Programming in Scala (First Edition, Version 6)
* by Martin Odersky, Lex Spoon, Bill Venners
*
* http://booksites.artima.com/programming_in_scala
*/
object Ex8 {
class Cat {
val dangerous = false
}
class Tiger(param1: Boolean, param2: Int) extends Cat {
override val dangerous = param1
private var age = param2
}
def main(args: Array[String]) {
val tiger = new Tiger(true, 12)
println("tiger [" + tiger + "]")
}
}
| peachyy/scalastu | compo-inherit/Ex8.scala | Scala | apache-2.0 | 1,109 |
package im.mange.little
import javax.servlet.http.HttpServlet
import org.eclipse.jetty.server._
import org.eclipse.jetty.servlet.{ServletContextHandler, ServletHolder}
//TODO: merge wiht LittleServer
class LittleServletServer(port: Int, autoStart: Boolean = true) {
private val server = buildServer
private val context = createContext
server.setHandler(context)
def add(path: String, servlet: HttpServlet) {
context.addServlet(new ServletHolder(servlet), path)
}
def start() {
try {
server.start()
println("### Started web server on port %d...".format(port))
while (!server.isStarted) Thread.sleep(100)
} catch {
case e: Exception => {
println("### Failed to start web server on port %d".format(port))
e.printStackTrace()
throw e
}
}
}
def stop() {
server.stop()
val end = System.currentTimeMillis() + 10000
while (!server.isStopped && end > System.currentTimeMillis()) Thread.sleep(100)
if (!server.isStopped) println("!!!!!!! SERVER FAILED TO STOP !!!!!!!")
}
private def buildServer = {
// val server = new Server
//
// val httpConfiguration = new HttpConfiguration()
// httpConfiguration.setOutputBufferSize(1000000)
//
// val httpConnector = new ServerConnector(server, new HttpConnectionFactory(httpConfiguration))
// httpConnector.setPort(port)
// httpConnector.setAcceptQueueSize(Runtime.getRuntime.availableProcessors() * 2)
// server.setConnectors(Array(httpConnector))
//
// server.setStopAtShutdown(true)
// server
val server = new Server
val httpConnector = new ServerConnector(server)
httpConnector.setPort(port)
server.setConnectors(Array(httpConnector))
server
}
private def createContext = {
val context = new ServletContextHandler
context.setServer(server)
context.setContextPath("/")
context
}
// OnShutdown.execute("Stop web server", () => stop())
if (autoStart) start()
}
| alltonp/little-server | src/main/scala/im/mange/little/LittleServletServer.scala | Scala | apache-2.0 | 1,981 |
package org.scaladebugger.api.profiles.traits.requests.classes
import org.scaladebugger.api.lowlevel.JDIArgument
import org.scaladebugger.api.lowlevel.classes.ClassUnloadRequestInfo
import org.scaladebugger.api.lowlevel.events.data.JDIEventDataResult
import org.scaladebugger.api.pipelines.Pipeline
import org.scaladebugger.api.pipelines.Pipeline.IdentityPipeline
import org.scaladebugger.api.profiles.traits.info.events.ClassUnloadEventInfo
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import scala.util.{Failure, Success, Try}
class ClassUnloadRequestSpec extends ParallelMockFunSpec
{
private val TestThrowable = new Throwable
// Pipeline that is parent to the one that just streams the event
private val TestPipelineWithData = Pipeline.newPipeline(
classOf[ClassUnloadRequest#ClassUnloadEventAndData]
)
private val successClassUnloadProfile = new Object with ClassUnloadRequest {
override def tryGetOrCreateClassUnloadRequestWithData(
extraArguments: JDIArgument*
): Try[IdentityPipeline[ClassUnloadEventAndData]] = {
Success(TestPipelineWithData)
}
override def removeClassUnloadRequestWithArgs(
extraArguments: JDIArgument*
): Option[ClassUnloadRequestInfo] = ???
override def removeAllClassUnloadRequests(): Seq[ClassUnloadRequestInfo] = ???
override def isClassUnloadRequestWithArgsPending(
extraArguments: JDIArgument*
): Boolean = ???
override def classUnloadRequests: Seq[ClassUnloadRequestInfo] = ???
}
private val failClassUnloadProfile = new Object with ClassUnloadRequest {
override def tryGetOrCreateClassUnloadRequestWithData(
extraArguments: JDIArgument*
): Try[IdentityPipeline[ClassUnloadEventAndData]] = {
Failure(TestThrowable)
}
override def removeClassUnloadRequestWithArgs(
extraArguments: JDIArgument*
): Option[ClassUnloadRequestInfo] = ???
override def removeAllClassUnloadRequests(): Seq[ClassUnloadRequestInfo] = ???
override def isClassUnloadRequestWithArgsPending(
extraArguments: JDIArgument*
): Boolean = ???
override def classUnloadRequests: Seq[ClassUnloadRequestInfo] = ???
}
describe("ClassUnloadRequest") {
describe("#tryGetOrCreateClassUnloadRequest") {
it("should return a pipeline with the event data results filtered out") {
val expected = mock[ClassUnloadEventInfo]
// Data to be run through pipeline
val data = (expected, Seq(mock[JDIEventDataResult]))
var actual: ClassUnloadEventInfo = null
successClassUnloadProfile.tryGetOrCreateClassUnloadRequest().get.foreach(actual = _)
// Funnel the data through the parent pipeline that contains data to
// demonstrate that the pipeline with just the event is merely a
// mapping on top of the pipeline containing the data
TestPipelineWithData.process(data)
actual should be (expected)
}
it("should capture any exception as a failure") {
val expected = TestThrowable
// Data to be run through pipeline
val data = (mock[ClassUnloadEventInfo], Seq(mock[JDIEventDataResult]))
var actual: Throwable = null
failClassUnloadProfile.tryGetOrCreateClassUnloadRequest().failed.foreach(actual = _)
actual should be (expected)
}
}
describe("#getOrCreateClassUnloadRequest") {
it("should return a pipeline of events if successful") {
val expected = mock[ClassUnloadEventInfo]
// Data to be run through pipeline
val data = (expected, Seq(mock[JDIEventDataResult]))
var actual: ClassUnloadEventInfo = null
successClassUnloadProfile.getOrCreateClassUnloadRequest().foreach(actual = _)
// Funnel the data through the parent pipeline that contains data to
// demonstrate that the pipeline with just the event is merely a
// mapping on top of the pipeline containing the data
TestPipelineWithData.process(data)
actual should be (expected)
}
it("should throw the exception if unsuccessful") {
intercept[Throwable] {
failClassUnloadProfile.getOrCreateClassUnloadRequest()
}
}
}
describe("#getOrCreateClassUnloadRequestWithData") {
it("should return a pipeline of events and data if successful") {
// Data to be run through pipeline
val expected = (mock[ClassUnloadEventInfo], Seq(mock[JDIEventDataResult]))
var actual: (ClassUnloadEventInfo, Seq[JDIEventDataResult]) = null
successClassUnloadProfile
.getOrCreateClassUnloadRequestWithData()
.foreach(actual = _)
// Funnel the data through the parent pipeline that contains data to
// demonstrate that the pipeline with just the event is merely a
// mapping on top of the pipeline containing the data
TestPipelineWithData.process(expected)
actual should be (expected)
}
it("should throw the exception if unsuccessful") {
intercept[Throwable] {
failClassUnloadProfile.getOrCreateClassUnloadRequestWithData()
}
}
}
}
}
| ensime/scala-debugger | scala-debugger-api/src/test/scala/org/scaladebugger/api/profiles/traits/requests/classes/ClassUnloadRequestSpec.scala | Scala | apache-2.0 | 5,172 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}
trait MLlibTestSparkContext extends BeforeAndAfterAll { self: Suite =>
@transient var sc: SparkContext = _
@transient var sqlContext: SQLContext = _
override def beforeAll() {
super.beforeAll()
val conf = new SparkConf()
.setMaster("local[2]")
.setAppName("MLlibUnitTest")
sc = new SparkContext(conf)
SQLContext.clearActive()
sqlContext = new SQLContext(sc)
SQLContext.setActive(sqlContext)
}
override def afterAll() {
try {
sqlContext = null
SQLContext.clearActive()
if (sc != null) {
sc.stop()
}
sc = null
} finally {
super.afterAll()
}
}
}
| yu-iskw/criteo-display-advertising-challenge-with-spark | src/test/scala/org/apache/spark/util/MLlibTestSparkContext.scala | Scala | apache-2.0 | 1,621 |
package svez.akka.stream.stages
import akka.NotUsed
import akka.stream.scaladsl.Flow
import cats.data.Ior
import scala.concurrent.{ExecutionContext, Future}
object ior {
def mapLeft[L, R, O](f: L ⇒ O): Flow[Ior[L, R], Ior[O, R], NotUsed] =
Flow[Ior[L, R]].map { _.leftMap(f) }
def mapRight[L, R, O](f: R ⇒ O): Flow[Ior[L, R], Ior[L, O], NotUsed] =
Flow[Ior[L, R]].map { _.map(f) }
def mapAsyncLeft[L, R, O](parallelism: Int)(f: L ⇒ Future[O])(implicit ec: ExecutionContext): Flow[Ior[L, R], Ior[O, R], NotUsed] =
Flow[Ior[L, R]].mapAsync(parallelism) {
case Ior.Left(a) ⇒ f(a).map(Ior.Left(_))
case Ior.Both(a, b) ⇒ f(a).map(Ior.both(_, b))
case r@Ior.Right(_) ⇒ Future.successful(r)
}
def mapAsyncRight[L, R, O](parallelism: Int)(f: R ⇒ Future[O])(implicit ec: ExecutionContext): Flow[Ior[L, R], Ior[L, O], NotUsed] =
Flow[Ior[L, R]].mapAsync(parallelism) {
case Ior.Right(b) ⇒ f(b).map(Ior.Right(_))
case Ior.Both(a, b) ⇒ f(b).map(Ior.both(a, _))
case l@Ior.Left(_) ⇒ Future.successful(l)
}
} | svezfaz/akka-stream-fp | core/src/main/scala/svez/akka/stream/stages/ior.scala | Scala | apache-2.0 | 1,098 |
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.github.microburn.integration.support.kanban
import java.util.Date
import net.liftmodules.ng.Angular.NgModel
import net.liftweb.actor.{LAFuture, LiftActor}
import net.liftweb.common._
import org.github.microburn.domain.actors._
import org.github.microburn.domain.{MajorSprintDetails, SprintDetails, UserStory}
import scala.collection.immutable.TreeMap
import scala.concurrent.duration.FiniteDuration
class ScrumSimulatorActor(boardStateProvider: BoardStateProvider, projectActor: ProjectActor)
(initializationTimeout: FiniteDuration) extends LiftActor {
import org.github.microburn.util.concurrent.ActorEnrichments._
import org.github.microburn.util.concurrent.BoxEnrichments._
import org.github.microburn.util.concurrent.FutureEnrichments._
private var currentSprints: TreeMap[Int, SprintDetails] = TreeMap.empty
this ! Init
override protected def messageHandler: PartialFunction[Any, Unit] = {
case Init =>
val lastSprints = for {
projectState <- (projectActor ?? GetFullProjectState).mapTo[FullProjectState]
} yield {
val sprintsSeq = projectState.sprints.map {
case SprintIdWithDetails(sprintId, details, _) =>
(sprintId, details)
}
TreeMap(sprintsSeq: _*)
}
// czekamy, żeby poprawnie zostaną pobrane sprinty zanim ktoś zdąży wykonać inną akcję
currentSprints = lastSprints.await(initializationTimeout)
case FetchCurrentSprintsBoardState =>
val fetchedStateFuture = lastActive.map { last =>
boardStateProvider.currentUserStories.map { userStories =>
FetchedBoardState(last, userStories)
}
}.toFutureOfOption
reply(fetchedStateFuture)
case StartSprint(name, start, end) =>
val startFuture =
if (lastActive.isDefined)
LAFuture[Box[Any]](() => throw new IllegalArgumentException("You must finish current sprint before start new"))
else
doStartSprint(name, start, end)
reply(startFuture)
case NextSprint(major, userStories) =>
val future = (for {
validatedFullDetails <- SprintDetails.create(major)
} yield {
val next = optionalLastNumericalSprint.map(_.next(validatedFullDetails)).getOrElse(AutoIncSprintDetails.zero(validatedFullDetails))
currentSprints += next.id -> next.details
projectActor !< CreateNewSprint(next.id, major, userStories, new Date)
}).toFutureOfBox
reply(future)
case FinishCurrentActiveSprint =>
val finishFuture = lastActive.map { sprint =>
this ?? FinishSprint(sprint.id)
}.toFutureOfOption
reply(finishFuture)
case FinishSprint(sprintId) =>
reply(updateSprintDetails(sprintId, _.finish))
case RemoveSprint(sprintId) =>
reply(updateSprintDetails(sprintId, _.markRemoved))
case UpdateStartDate(sprintId, start) =>
reply(updateSprintDetails(sprintId, _.updateStartDate(start)))
case UpdateEndDate(sprintId, end) =>
reply(updateSprintDetails(sprintId, _.updateEndDate(end)))
case DefineBaseStoryPoints(sprintId, base) =>
reply(updateSprintDetails(sprintId, _.defineBaseStoryPoints(BigDecimal(base))))
}
private def lastActive: Option[AutoIncSprintDetails] = {
optionalLastNumericalSprint.filter(_.isActive)
}
private def optionalLastNumericalSprint: Option[AutoIncSprintDetails] = {
currentSprints.lastOption.map(AutoIncSprintDetails.apply _ tupled)
}
private def doStartSprint(name: String, start: Date, end: Date): LAFuture[Any] = {
for {
userStories <- boardStateProvider.currentUserStories
details = MajorSprintDetails(name, start, end)
// wysyłamy do siebie, żeby mieć pewność, że fetch będzie miał dobry currentSprintsInfo
createResult <- this ?? NextSprint(details, userStories)
} yield createResult
}
private def updateSprintDetails(sprintId: Int, f: SprintDetails => Box[SprintDetails]): LAFuture[Box[Any]] = {
(for {
details <- currentSprints.get(sprintId).toBox or
Failure(s"Cannot find sprint with given id $sprintId")
updatedDetails <- f(details)
} yield {
currentSprints = currentSprints.updated(sprintId, updatedDetails)
projectActor ?? UpdateSprintDetails(sprintId, updatedDetails, new Date)
}).toFutureOfBox
}
private case object Init
private case class NextSprint(details: MajorSprintDetails, userStories: Seq[UserStory])
}
case class AutoIncSprintDetails(id: Int, details: SprintDetails) {
def isActive: Boolean = details.isActive
def next(details: SprintDetails): AutoIncSprintDetails = {
AutoIncSprintDetails(id + 1, details)
}
}
object AutoIncSprintDetails {
def zero(details: SprintDetails): AutoIncSprintDetails = AutoIncSprintDetails(0, details)
}
case object FetchCurrentSprintsBoardState
case class FetchedBoardState(sprintId: Int, details: MajorSprintDetails, userStories: Seq[UserStory]) {
override def toString: String = s"id: $sprintId, details: $details, user stories count: ${userStories.size}"
}
object FetchedBoardState {
def apply(idWithDetails: AutoIncSprintDetails, userStories: Seq[UserStory]): FetchedBoardState = {
FetchedBoardState(idWithDetails.id, idWithDetails.details.toMajor, userStories)
}
}
case class StartSprint(name: String, start: Date, end: Date) extends NgModel
case object FinishCurrentActiveSprint
case class FinishSprint(id: Int) extends NgModel
case class RemoveSprint(id: Int) extends NgModel
case class UpdateStartDate(id: Int, startDate: Date) extends NgModel
case class UpdateEndDate(id: Int, endDate: Date) extends NgModel
case class DefineBaseStoryPoints(id: Int, baseStoryPoints: Double) extends NgModel | arkadius/micro-burn | src/main/scala/org/github/microburn/integration/support/kanban/ScrumSimulatorActor.scala | Scala | apache-2.0 | 6,366 |
package io.getquill
import com.datastax.driver.core.BoundStatement
import com.datastax.driver.core.Row
import com.typesafe.config.Config
import io.getquill.util.LoadConfig
import io.getquill.context.cassandra.CassandraSessionContext
import scala.collection.JavaConverters._
import com.datastax.driver.core.Cluster
class CassandraSyncContext[N <: NamingStrategy](
cluster: Cluster,
keyspace: String,
preparedStatementCacheSize: Long
)
extends CassandraSessionContext[N](cluster, keyspace, preparedStatementCacheSize) {
def this(config: CassandraContextConfig) = this(config.cluster, config.keyspace, config.preparedStatementCacheSize)
def this(config: Config) = this(CassandraContextConfig(config))
def this(configPrefix: String) = this(LoadConfig(configPrefix))
override type RunQueryResult[T] = List[T]
override type RunQuerySingleResult[T] = T
override type RunActionResult = Unit
override type RunBatchActionResult = Unit
def executeQuery[T](cql: String, prepare: BoundStatement => BoundStatement = identity, extractor: Row => T = identity[Row] _): List[T] =
session.execute(prepare(super.prepare(cql)))
.all.asScala.toList.map(extractor)
def executeQuerySingle[T](cql: String, prepare: BoundStatement => BoundStatement = identity, extractor: Row => T = identity[Row] _): T =
handleSingleResult(executeQuery(cql, prepare, extractor))
def executeAction[T](cql: String, prepare: BoundStatement => BoundStatement = identity): Unit = {
session.execute(prepare(super.prepare(cql)))
()
}
def executeBatchAction(groups: List[BatchGroup]): Unit =
groups.foreach {
case BatchGroup(cql, prepare) =>
prepare.foreach(executeAction(cql, _))
}
}
| jcranky/quill | quill-cassandra/src/main/scala/io/getquill/CassandraSyncContext.scala | Scala | apache-2.0 | 1,756 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kudu.index
import java.nio.charset.StandardCharsets
import java.util.Collections
import org.apache.kudu.client.{CreateTableOptions, PartialRow}
import org.locationtech.geomesa.index.api._
import org.locationtech.geomesa.index.conf.splitter.DefaultSplitter
import org.locationtech.geomesa.kudu.KuduValue
import org.locationtech.geomesa.kudu.schema.KuduIndexColumnAdapter.{FeatureIdAdapter, ZColumnAdapter}
object Z2ColumnMapper {
private val columns = Seq(ZColumnAdapter, FeatureIdAdapter)
}
class Z2ColumnMapper(index: GeoMesaFeatureIndex[_, _]) extends KuduColumnMapper(index, Z2ColumnMapper.columns) {
override def configurePartitions(): CreateTableOptions = {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
val options = new CreateTableOptions()
// add hash splits based on our shards, which we don't need to actually store as a separate column
val shards = index.sft.getZShards
if (shards > 1) {
options.addHashPartitions(Collections.singletonList(FeatureIdAdapter.name), shards)
}
options.setRangePartitionColumns(Collections.singletonList(ZColumnAdapter.name))
val bitSplits = {
val configured = DefaultSplitter.Parser.z2Splits(splitters)
if (configured.isEmpty) { Seq(0L, Long.MaxValue) } else {
// add upper and lower bounds as our splits don't have endpoints
val builder = Seq.newBuilder[Long]
builder.sizeHint(configured.length + 2)
builder += 0L
builder ++= configured.sorted
builder += Long.MaxValue
builder.result.distinct
}
}
bitSplits.sliding(2).foreach { case Seq(lo, hi) =>
val lower = tableSchema.newPartialRow()
val upper = tableSchema.newPartialRow()
lower.addLong(0, lo)
upper.addLong(0, hi)
options.addRangePartition(lower, upper)
}
options
}
override def createKeyValues(value: SingleRowKeyValue[_]): Seq[KuduValue[_]] = {
val fid = KuduValue(new String(value.id, StandardCharsets.UTF_8), FeatureIdAdapter)
value.key match {
case z: Long => Seq(KuduValue(z, ZColumnAdapter), fid)
case _ => throw new IllegalStateException(s"Expected z value but got '${value.key}'")
}
}
override def toRowRanges(ranges: Seq[ScanRange[_]],
tieredKeyRanges: Seq[ByteRange]): Seq[(Option[PartialRow], Option[PartialRow])] = {
def lower(z: Long): Some[PartialRow] = {
val row = tableSchema.newPartialRow()
ZColumnAdapter.writeToRow(row, z)
FeatureIdAdapter.writeToRow(row, "")
Some(row)
}
def upper(z: Long): Some[PartialRow] = {
val row = tableSchema.newPartialRow()
// note: shouldn't have to worry about overflow, as our z2/xz2 curves don't use the full 64 bits
ZColumnAdapter.writeToRow(row, z + 1L)
FeatureIdAdapter.writeToRow(row, "")
Some(row)
}
ranges.asInstanceOf[Seq[ScanRange[Long]]].map {
case BoundedRange(lo, hi) => (lower(lo), upper(hi))
case UnboundedRange(_) => (None, None)
case r => throw new IllegalArgumentException(s"Unexpected range type $r")
}
}
}
| locationtech/geomesa | geomesa-kudu/geomesa-kudu-datastore/src/main/scala/org/locationtech/geomesa/kudu/index/Z2ColumnMapper.scala | Scala | apache-2.0 | 3,654 |
/*
* Copyright (C) 2010 Lalit Pant <pant.lalit@gmail.com>
*
* The contents of this file are subject to the GNU General Public License
* Version 3 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.gnu.org/copyleft/gpl.html
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
*/
package net.kogics.kojo.story
import java.awt.Desktop
import java.net.URL
import javax.swing._
import javax.swing.event._
class LinkListener(st: StoryTeller) extends HyperlinkListener {
val linkRegex = """(?i)http://localpage/(\d+)#?(\d*)""".r
val linkPnameRegex = """(?i)http://localpage/([\w-]+)#?(\d*)""".r
val handlerLinkRegex = """(?i)http://runHandler/(\w+)\/?(\w*)""".r
def localpageLocation(url: String): (Int, Int) = {
url match {
case linkRegex(page, para) =>
(page.toInt, if (para=="") 1 else para.toInt)
case linkPnameRegex(pageName, para) =>
val pageNum = st.pageNumber(pageName)
if (pageNum.isDefined)
(pageNum.get, if (para=="") 1 else para.toInt)
else
throw new IllegalArgumentException()
case _ =>
throw new IllegalArgumentException()
}
}
// extract handler and data from runhandler url.
def handlerData(url: String): (String, String) = {
url.trim match {
case handlerLinkRegex(handler, data) =>
(handler, data)
case _ =>
throw new IllegalArgumentException()
}
}
// satisfy url click
def gotoUrl(url: URL) {
if (url.getProtocol == "http") {
if (url.getHost.toLowerCase == "localpage") {
try {
val loc = localpageLocation(url.toString)
st.viewPage(loc._1, loc._2)
}
catch {
case ex: IllegalArgumentException =>
st.showStatusError("Invalid page/view in Link - " + url.toString)
case t: Throwable =>
st.showStatusError("Problem handling Url - %s: %s" format(url.toString, t.getMessage))
}
}
else if (url.getHost.toLowerCase == "runhandler") {
try {
val d = handlerData(url.toString)
st.handleLink(d._1, d._2)
}
catch {
case ex: IllegalArgumentException =>
st.showStatusError("Invalid RunHandler Url - " + url.toString)
case t: Throwable =>
st.showStatusError("Problem handling Url - %s: %s" format(url.toString, t.getMessage))
}
}
else {
Desktop.getDesktop().browse(url.toURI)
}
}
else {
st.showStatusError("Trying to use link with unsupported protocol - " + url.getProtocol)
}
}
def hyperlinkUpdate(e: HyperlinkEvent) {
if (e.getEventType == HyperlinkEvent.EventType.ACTIVATED) {
gotoUrl(e.getURL)
}
else if (e.getEventType == HyperlinkEvent.EventType.ENTERED) {
st.showStatusMsg(e.getURL.toString, false)
}
else if (e.getEventType == HyperlinkEvent.EventType.EXITED) {
st.clearStatusBar()
}
}
// for tests
private [story] def setStory(story: Story) {
st.currStory = Some(story)
}
}
| vnkmr7620/kojo | KojoEnv/src/net/kogics/kojo/story/LinkListener.scala | Scala | gpl-3.0 | 3,332 |
package dbtarzan.db
case class IdentifierDelimiters(start: Char, end: Char) {
def withDelimiters(identifier: String) : String = start + identifier + end
}
object IdentifierDelimitersValues {
val squareBrackets: IdentifierDelimiters = IdentifierDelimiters('[', ']')
val doubleQuotes: IdentifierDelimiters = IdentifierDelimiters('"', '"')
}
case class DBDefinition(schema : Option[Schema], catalog : Option[String])
case class QueryAttributes(delimiters : Option[IdentifierDelimiters], definition : DBDefinition, maxFieldSize: Option[Int])
object QueryAttributes {
def none(): QueryAttributes = QueryAttributes(None, DBDefinition(None, None), None)
} | aferrandi/dbtarzan | src/main/scala/dbtarzan/db/QueryAttributes.scala | Scala | apache-2.0 | 663 |
package GraphX
import org.apache.spark.graphx._
/**
* Created by troy on 21/02/16.
*/
class Util {
def removeLoop(graph: Graph[Int, Int]): Graph[Int, PartitionID] = {
val convert = graph.edges.filter{s => s.srcId != s.dstId}
val raw_graph: Graph[Int, PartitionID] =
Graph.fromEdges(convert, 0)
raw_graph.cache()
}
def calculateCC(triplets: Graph[(Int, Int), Int], triangles: Graph[(List[Int], Int, Int), Int]):
(Graph[(Int, Int, Double), PartitionID], Double, Double) = {
/**
* gathering triplets and triangles
* */
val triangleAndTriplets = triplets.joinVertices(triangles.vertices)((id, trip, trian) => (trian._2, trip._2))
/**
* calculate the local clustering coefficient of each vertex
* */
val localClusteringCoefficient = triangleAndTriplets.mapVertices{(id, attr) =>
(attr._1, attr._2, if (attr._2 == 0) 0.0 else attr._1.toDouble / attr._2.toDouble)}
val GloAveCC: (Double, Double) = GlobalAndAverageCC(localClusteringCoefficient)
(localClusteringCoefficient, GloAveCC._1, GloAveCC._2)
}
/**
* this function is to calculate the global and average clustering coefficient
* the param of this function based on the results of previous process
* */
def GlobalAndAverageCC(graph: Graph[(Int, Int, Double), PartitionID]): (Double, Double) = {
val triangles = graph.mapVertices((id, attr) => attr._1)
val triplets = graph.mapVertices((id, attr) => attr._2)
val localCC = graph.mapVertices((id, attr) => attr._3)
val numTriangles = triangles.vertices.values.sum()
val numTriplets = triplets.vertices.values.sum()
val globalClusteringCoefficient = (3 * numTriangles) / numTriplets
val averageClusteringCoefficient = localCC.vertices.values.sum() / localCC.vertices.values.count()
(globalClusteringCoefficient, averageClusteringCoefficient)
}
} | MagicTroy/Clustering-Coefficient | CC-Final-Version/Util.scala | Scala | apache-2.0 | 1,883 |
package packagesAPI
import scala.concurrent.duration._
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import io.gatling.jdbc.Predef._
class GetPackages3 extends Simulation {
val httpProtocol = http
.baseURL("http://sp.int3.sonata-nfv.eu:4002")
.inferHtmlResources()
.acceptHeader("*/*")
.userAgentHeader("curl/7.35.0")
val uri1 = "http://sp.int3.sonata-nfv.eu:4002/catalogues/api/v2/packages"
val testHeaders = Map("Content-Type" -> "application/json")
val scn = scenario("GetPackages3")
.exec(http("packages_3")
.get("/catalogues/api/v2/packages")
.headers(testHeaders)
)
setUp(scn.inject(
nothingFor(5 seconds),
//atOnceUsers(1000))
rampUsers(100) over (5 seconds))
)
.protocols(httpProtocol)
}
| sonata-nfv/son-qual | qual-stress-gtkapi/src/test/scala/3_packages_API/Recorder3.scala | Scala | apache-2.0 | 788 |
/*
* Copyright 2013 - 2020 Outworkers Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.outworkers.phantom.builder.query.execution
import com.datastax.driver.core.{Session, Statement}
import com.outworkers.phantom.ResultSet
import scala.concurrent.{ExecutionContext, ExecutionContextExecutor}
import scala.collection.compat._
trait MultiQueryInterface[M[X] <: IterableOnce[X], F[_]] {
def future()(
implicit session: Session,
ctx: ExecutionContext
): F[M[ResultSet]]
/**
* This will convert the underlying call to Cassandra done with Google Guava ListenableFuture to a consumable
* Scala Future that will be completed once the operation is completed on the
* database end.
*
* The execution context of the transformation is provided by phantom via
* based on the execution engine used.
*
* @param modifyStatement The function allowing to modify underlying [[Statement]]
* @param session The implicit session provided by a [[com.outworkers.phantom.connectors.Connector]].
* @param executor The implicit Scala executor.
* @return An asynchronous Scala future wrapping the Datastax result set.
*/
def future(modifyStatement: Statement => Statement)(
implicit session: Session,
executor: ExecutionContext
): F[M[ResultSet]]
}
abstract class QueryInterface[F[_]]()(implicit adapter: GuavaAdapter[F]) {
def executableQuery: ExecutableCqlQuery
/**
* Default asynchronous query execution method. This will convert the underlying
* call to Cassandra done with Google Guava ListenableFuture to a consumable
* Scala Future that will be completed once the operation is completed on the
* database end.
*
* The execution context of the transformation is provided by phantom via
* based on the execution engine used.
*
* @param session The implicit session provided by a [[com.outworkers.phantom.connectors.Connector]].
* @param ec The implicit Scala execution context.
* @return An asynchronous Scala future wrapping the Datastax result set.
*/
def future()(
implicit session: Session,
ec: ExecutionContext
): F[ResultSet] = {
adapter.fromGuava(executableQuery)
}
/**
* This will convert the underlying call to Cassandra done with Google Guava ListenableFuture to a consumable
* Scala Future that will be completed once the operation is completed on the
* database end.
*
* The execution context of the transformation is provided by phantom via
* based on the execution engine used.
*
* @param modifyStatement The function allowing to modify underlying [[Statement]]
* @param session The implicit session provided by a [[com.outworkers.phantom.connectors.Connector]].
* @param executor The implicit Scala executor.
* @return An asynchronous Scala future wrapping the Datastax result set.
*/
def future(modifyStatement: Statement => Statement)(
implicit session: Session,
executor: ExecutionContext
): F[ResultSet] = adapter.fromGuava(modifyStatement(executableQuery.statement()))
}
| outworkers/phantom | phantom-dsl/src/main/scala/com/outworkers/phantom/builder/query/execution/QueryInterface.scala | Scala | apache-2.0 | 3,616 |
/*
* Copyright 2015 eleflow.com.br.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.param.shared
import org.apache.spark.ml.param.{Param, Params}
/**
* Created by dirceu on 17/05/16.
*/
private[ml] trait HasGamma extends Params {
/**
* Param for label column name.
*
* @group param
*/
final val gamma: Param[Double] =
new Param[Double](this, "gamma", "Holt Winters gamma param")
setDefault(gamma, 0d)
/** @group getParam */
final def getGamma: Double = $(gamma)
}
| eleflow/uberdata | iuberdata_core/src/main/scala/org/apache/spark/ml/param/shared/HasGamma.scala | Scala | apache-2.0 | 1,043 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.evaluation
import org.apache.spark.annotation.Since
import org.apache.spark.internal.Logging
import org.apache.spark.mllib.evaluation.binary._
import org.apache.spark.rdd.{RDD, UnionRDD}
import org.apache.spark.sql.DataFrame
/**
* Evaluator for binary classification.
*
* @param scoreAndLabels an RDD of (score, label) pairs.
* @param numBins if greater than 0, then the curves (ROC curve, PR curve) computed internally
* will be down-sampled to this many "bins". If 0, no down-sampling will occur.
* This is useful because the curve contains a point for each distinct score
* in the input, and this could be as large as the input itself -- millions of
* points or more, when thousands may be entirely sufficient to summarize
* the curve. After down-sampling, the curves will instead be made of approximately
* `numBins` points instead. Points are made from bins of equal numbers of
* consecutive points. The size of each bin is
* `floor(scoreAndLabels.count() / numBins)`, which means the resulting number
* of bins may not exactly equal numBins. The last bin in each partition may
* be smaller as a result, meaning there may be an extra sample at
* partition boundaries.
*/
@Since("1.0.0")
class BinaryClassificationMetrics @Since("1.3.0") (
@Since("1.3.0") val scoreAndLabels: RDD[(Double, Double)],
@Since("1.3.0") val numBins: Int) extends Logging {
require(numBins >= 0, "numBins must be nonnegative")
/**
* Defaults `numBins` to 0.
*/
@Since("1.0.0")
def this(scoreAndLabels: RDD[(Double, Double)]) = this(scoreAndLabels, 0)
/**
* An auxiliary constructor taking a DataFrame.
* @param scoreAndLabels a DataFrame with two double columns: score and label
*/
private[mllib] def this(scoreAndLabels: DataFrame) =
this(scoreAndLabels.rdd.map(r => (r.getDouble(0), r.getDouble(1))))
/**
* Unpersist intermediate RDDs used in the computation.
*/
@Since("1.0.0")
def unpersist() {
cumulativeCounts.unpersist()
}
/**
* Returns thresholds in descending order.
*/
@Since("1.0.0")
def thresholds(): RDD[Double] = cumulativeCounts.map(_._1)
/**
* Returns the receiver operating characteristic (ROC) curve,
* which is an RDD of (false positive rate, true positive rate)
* with (0.0, 0.0) prepended and (1.0, 1.0) appended to it.
* @see http://en.wikipedia.org/wiki/Receiver_operating_characteristic
*/
@Since("1.0.0")
def roc(): RDD[(Double, Double)] = {
val rocCurve = createCurve(FalsePositiveRate, Recall)
val sc = confusions.context
val first = sc.makeRDD(Seq((0.0, 0.0)), 1)
val last = sc.makeRDD(Seq((1.0, 1.0)), 1)
new UnionRDD[(Double, Double)](sc, Seq(first, rocCurve, last))
}
/**
* Computes the area under the receiver operating characteristic (ROC) curve.
*/
@Since("1.0.0")
def areaUnderROC(): Double = AreaUnderCurve.of(roc())
/**
* Returns the precision-recall curve, which is an RDD of (recall, precision),
* NOT (precision, recall), with (0.0, 1.0) prepended to it.
* @see http://en.wikipedia.org/wiki/Precision_and_recall
*/
@Since("1.0.0")
def pr(): RDD[(Double, Double)] = {
val prCurve = createCurve(Recall, Precision)
val sc = confusions.context
val first = sc.makeRDD(Seq((0.0, 1.0)), 1)
first.union(prCurve)
}
/**
* Computes the area under the precision-recall curve.
*/
@Since("1.0.0")
def areaUnderPR(): Double = AreaUnderCurve.of(pr())
/**
* Returns the (threshold, F-Measure) curve.
* @param beta the beta factor in F-Measure computation.
* @return an RDD of (threshold, F-Measure) pairs.
* @see http://en.wikipedia.org/wiki/F1_score
*/
@Since("1.0.0")
def fMeasureByThreshold(beta: Double): RDD[(Double, Double)] = createCurve(FMeasure(beta))
/**
* Returns the (threshold, F-Measure) curve with beta = 1.0.
*/
@Since("1.0.0")
def fMeasureByThreshold(): RDD[(Double, Double)] = fMeasureByThreshold(1.0)
/**
* Returns the (threshold, precision) curve.
*/
@Since("1.0.0")
def precisionByThreshold(): RDD[(Double, Double)] = createCurve(Precision)
/**
* Returns the (threshold, recall) curve.
*/
@Since("1.0.0")
def recallByThreshold(): RDD[(Double, Double)] = createCurve(Recall)
private lazy val (
cumulativeCounts: RDD[(Double, BinaryLabelCounter)],
confusions: RDD[(Double, BinaryConfusionMatrix)]) = {
// Create a bin for each distinct score value, count positives and negatives within each bin,
// and then sort by score values in descending order.
val counts = scoreAndLabels.combineByKey(
createCombiner = (label: Double) => new BinaryLabelCounter(0L, 0L) += label,
mergeValue = (c: BinaryLabelCounter, label: Double) => c += label,
mergeCombiners = (c1: BinaryLabelCounter, c2: BinaryLabelCounter) => c1 += c2
).sortByKey(ascending = false)
val binnedCounts =
// Only down-sample if bins is > 0
if (numBins == 0) {
// Use original directly
counts
} else {
val countsSize = counts.count()
// Group the iterator into chunks of about countsSize / numBins points,
// so that the resulting number of bins is about numBins
var grouping = countsSize / numBins
if (grouping < 2) {
// numBins was more than half of the size; no real point in down-sampling to bins
logInfo(s"Curve is too small ($countsSize) for $numBins bins to be useful")
counts
} else {
if (grouping >= Int.MaxValue) {
logWarning(
s"Curve too large ($countsSize) for $numBins bins; capping at ${Int.MaxValue}")
grouping = Int.MaxValue
}
counts.mapPartitions(_.grouped(grouping.toInt).map { pairs =>
// The score of the combined point will be just the first one's score
val firstScore = pairs.head._1
// The point will contain all counts in this chunk
val agg = new BinaryLabelCounter()
pairs.foreach(pair => agg += pair._2)
(firstScore, agg)
})
}
}
val agg = binnedCounts.values.mapPartitions { iter =>
val agg = new BinaryLabelCounter()
iter.foreach(agg += _)
Iterator(agg)
}.collect()
val partitionwiseCumulativeCounts =
agg.scanLeft(new BinaryLabelCounter())((agg, c) => agg.clone() += c)
val totalCount = partitionwiseCumulativeCounts.last
logInfo(s"Total counts: $totalCount")
val cumulativeCounts = binnedCounts.mapPartitionsWithIndex(
(index: Int, iter: Iterator[(Double, BinaryLabelCounter)]) => {
val cumCount = partitionwiseCumulativeCounts(index)
iter.map { case (score, c) =>
cumCount += c
(score, cumCount.clone())
}
}, preservesPartitioning = true)
cumulativeCounts.persist()
val confusions = cumulativeCounts.map { case (score, cumCount) =>
(score, BinaryConfusionMatrixImpl(cumCount, totalCount).asInstanceOf[BinaryConfusionMatrix])
}
(cumulativeCounts, confusions)
}
/** Creates a curve of (threshold, metric). */
private def createCurve(y: BinaryClassificationMetricComputer): RDD[(Double, Double)] = {
confusions.map { case (s, c) =>
(s, y(c))
}
}
/** Creates a curve of (metricX, metricY). */
private def createCurve(
x: BinaryClassificationMetricComputer,
y: BinaryClassificationMetricComputer): RDD[(Double, Double)] = {
confusions.map { case (_, c) =>
(x(c), y(c))
}
}
}
| gioenn/xSpark | mllib/src/main/scala/org/apache/spark/mllib/evaluation/BinaryClassificationMetrics.scala | Scala | apache-2.0 | 8,548 |
package com.overviewdocs.metadata
import org.specs2.matcher.JsonMatchers
import org.specs2.mutable.Specification
import org.specs2.specification.Scope
import play.api.libs.json.{JsObject,Json}
class MetadataSchemaSpec extends Specification with JsonMatchers {
trait BaseScope extends Scope
"#toJson" should {
trait ToJsonScope extends BaseScope {
val version: Int = 1
val fields: Vector[MetadataField] = Vector()
def json: String = MetadataSchema(version, fields).toJson.toString
}
"include a version" in new ToJsonScope {
json must /("version" -> 1)
}
"include a String field" in new ToJsonScope {
override val fields = Vector(MetadataField("foo", MetadataFieldType.String, MetadataFieldDisplay.TextInput))
json must /("fields") /#(0) /("name" -> "foo")
json must /("fields") /#(0) /("type" -> "String")
json must /("fields") /#(0) /("display" -> "TextInput")
}
}
"::fromJson" should {
trait FromJsonScope extends BaseScope {
def from(json: String) = MetadataSchema.fromJson(Json.parse(json))
}
"parse the version" in new FromJsonScope {
from("""{"version": 1,"fields":[]}""").version must beEqualTo(1)
}
"fail on too-high version" in new FromJsonScope {
from("""{"version":2,"fields":[]}""") must throwA[IllegalArgumentException]
}
"fail when version is missing" in new FromJsonScope {
from("""{"fields":[{"name":"foo","type":"String"}]}""") must throwA[IllegalArgumentException]
}
"parse a String field" in new FromJsonScope {
from("""{"version":1,"fields":[{"name":"foo","type":"String","display":"TextInput"}]}""").fields must beEqualTo(Vector(
MetadataField("foo", MetadataFieldType.String, MetadataFieldDisplay.TextInput)
))
}
"parse a Div field" in new FromJsonScope {
from("""{"version":1,"fields":[{"name":"foo","type":"String","display":"Div"}]}""").fields must beEqualTo(Vector(
MetadataField("foo", MetadataFieldType.String, MetadataFieldDisplay.Div)
))
}
"parse a Pre field" in new FromJsonScope {
from("""{"version":1,"fields":[{"name":"foo","type":"String","display":"Pre"}]}""").fields must beEqualTo(Vector(
MetadataField("foo", MetadataFieldType.String, MetadataFieldDisplay.Pre)
))
}
"default to String+TextInput" in new FromJsonScope {
from("""{"version":1,"fields":[{"name":"foo"}]}""").fields must beEqualTo(List(
MetadataField("foo", MetadataFieldType.String, MetadataFieldDisplay.TextInput)
))
}
"parse fields in order" in new FromJsonScope {
from("""{"version":1,"fields":[{"name":"foo","type":"String","display":"TextInput"},{"name":"bar","type":"String","display":"TextInput"}]}""").fields must beEqualTo(Vector(
MetadataField("foo", MetadataFieldType.String, MetadataFieldDisplay.TextInput),
MetadataField("bar", MetadataFieldType.String, MetadataFieldDisplay.TextInput)
))
}
"not parse an invalid type" in new FromJsonScope {
from("""{"version":1,"fields":[{"name":"foo","type":"string"}]}""") must throwA[IllegalArgumentException]
}
"not parse an invalid display" in new FromJsonScope {
from("""{"version":1,"fields":[{"name":"foo","display":"textinput"}]}""") must throwA[IllegalArgumentException]
}
"provide an implicit Reads for parsing" in {
import MetadataSchema.Json.reads
val result = Json.parse("""{"version":1,"fields":[{"name":"foo","type":"String","display":"TextInput"}]}""").as[MetadataSchema]
result must beEqualTo(MetadataSchema(1, Vector(MetadataField("foo", MetadataFieldType.String, MetadataFieldDisplay.TextInput))))
}
}
"::inferFromMetadataJson" should {
trait InferScope extends BaseScope {
def from(jsonString: String) = MetadataSchema.inferFromMetadataJson(Json.parse(jsonString).as[JsObject])
}
"give version 1" in new InferScope {
from("""{"foo":"bar"}""").version must beEqualTo(1)
}
"parse String fields" in new InferScope {
val result = from("""{"foo":"bar","moo":"mar"}""")
result.fields must containTheSameElementsAs(Vector(
MetadataField("foo", MetadataFieldType.String, MetadataFieldDisplay.TextInput),
MetadataField("moo", MetadataFieldType.String, MetadataFieldDisplay.TextInput)
))
}
"parse empty JSON" in new InferScope {
from("{}") must beEqualTo(MetadataSchema.empty)
}
}
"::empty" should {
trait EmptyScope extends BaseScope {
val subject = MetadataSchema.empty
}
"use version 1" in new EmptyScope {
subject.version must beEqualTo(1)
}
"not have any fields" in new EmptyScope {
subject.fields must beEqualTo(Vector())
}
}
}
| overview/overview-server | common/src/test/scala/com/overviewdocs/metadata/MetadataSchemaSpec.scala | Scala | agpl-3.0 | 4,792 |
package org.smitt.conf
import akka.actor.Props
import org.smitt.Endpoint
trait MessengerConf {
def emailMessengerProps(endpoint: Endpoint): Props
}
| sergius/smitt | src/main/scala/org/smitt/conf/MessengerConf.scala | Scala | mit | 154 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.tree
import java.io.IOException
import scala.collection.mutable
import scala.collection.JavaConverters._
import org.apache.spark.Logging
import org.apache.spark.annotation.Experimental
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.configuration.Strategy
import org.apache.spark.mllib.tree.configuration.Algo._
import org.apache.spark.mllib.tree.configuration.QuantileStrategy._
import org.apache.spark.mllib.tree.impl.{BaggedPoint, DecisionTreeMetadata, NodeIdCache,
TimeTracker, TreePoint}
import org.apache.spark.mllib.tree.impurity.Impurities
import org.apache.spark.mllib.tree.model._
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.Utils
/**
* :: Experimental ::
* A class that implements a [[http://en.wikipedia.org/wiki/Random_forest Random Forest]]
* learning algorithm for classification and regression.
* It supports both continuous and categorical features.
*
* The settings for featureSubsetStrategy are based on the following references:
* - log2: tested in Breiman (2001)
* - sqrt: recommended by Breiman manual for random forests
* - The defaults of sqrt (classification) and onethird (regression) match the R randomForest
* package.
* @see [[http://www.stat.berkeley.edu/~breiman/randomforest2001.pdf Breiman (2001)]]
* @see [[http://www.stat.berkeley.edu/~breiman/Using_random_forests_V3.1.pdf Breiman manual for
* random forests]]
*
* @param strategy The configuration parameters for the random forest algorithm which specify
* the type of algorithm (classification, regression, etc.), feature type
* (continuous, categorical), depth of the tree, quantile calculation strategy,
* etc.
* @param numTrees If 1, then no bootstrapping is used. If > 1, then bootstrapping is done.
* @param featureSubsetStrategy Number of features to consider for splits at each node.
* Supported: "auto", "all", "sqrt", "log2", "onethird".
* If "auto" is set, this parameter is set based on numTrees:
* if numTrees == 1, set to "all";
* if numTrees > 1 (forest) set to "sqrt" for classification and
* to "onethird" for regression.
* @param seed Random seed for bootstrapping and choosing feature subsets.
*/
@Experimental
private class RandomForest (
private val strategy: Strategy,
private val numTrees: Int,
featureSubsetStrategy: String,
private val seed: Int)
extends Serializable with Logging {
/*
ALGORITHM
This is a sketch of the algorithm to help new developers.
The algorithm partitions data by instances (rows).
On each iteration, the algorithm splits a set of nodes. In order to choose the best split
for a given node, sufficient statistics are collected from the distributed data.
For each node, the statistics are collected to some worker node, and that worker selects
the best split.
This setup requires discretization of continuous features. This binning is done in the
findSplitsBins() method during initialization, after which each continuous feature becomes
an ordered discretized feature with at most maxBins possible values.
The main loop in the algorithm operates on a queue of nodes (nodeQueue). These nodes
lie at the periphery of the tree being trained. If multiple trees are being trained at once,
then this queue contains nodes from all of them. Each iteration works roughly as follows:
On the master node:
- Some number of nodes are pulled off of the queue (based on the amount of memory
required for their sufficient statistics).
- For random forests, if featureSubsetStrategy is not "all," then a subset of candidate
features are chosen for each node. See method selectNodesToSplit().
On worker nodes, via method findBestSplits():
- The worker makes one pass over its subset of instances.
- For each (tree, node, feature, split) tuple, the worker collects statistics about
splitting. Note that the set of (tree, node) pairs is limited to the nodes selected
from the queue for this iteration. The set of features considered can also be limited
based on featureSubsetStrategy.
- For each node, the statistics for that node are aggregated to a particular worker
via reduceByKey(). The designated worker chooses the best (feature, split) pair,
or chooses to stop splitting if the stopping criteria are met.
On the master node:
- The master collects all decisions about splitting nodes and updates the model.
- The updated model is passed to the workers on the next iteration.
This process continues until the node queue is empty.
Most of the methods in this implementation support the statistics aggregation, which is
the heaviest part of the computation. In general, this implementation is bound by either
the cost of statistics computation on workers or by communicating the sufficient statistics.
*/
strategy.assertValid()
require(numTrees > 0, s"RandomForest requires numTrees > 0, but was given numTrees = $numTrees.")
require(RandomForest.supportedFeatureSubsetStrategies.contains(featureSubsetStrategy),
s"RandomForest given invalid featureSubsetStrategy: $featureSubsetStrategy." +
s" Supported values: ${RandomForest.supportedFeatureSubsetStrategies.mkString(", ")}.")
/**
* Method to train a decision tree model over an RDD
* @param input Training data: RDD of [[org.apache.spark.mllib.regression.LabeledPoint]]
* @return a random forest model that can be used for prediction
*/
def run(input: RDD[LabeledPoint]): RandomForestModel = {
val timer = new TimeTracker()
timer.start("total")
timer.start("init")
val retaggedInput = input.retag(classOf[LabeledPoint])
val metadata =
DecisionTreeMetadata.buildMetadata(retaggedInput, strategy, numTrees, featureSubsetStrategy)
logDebug("algo = " + strategy.algo)
logDebug("numTrees = " + numTrees)
logDebug("seed = " + seed)
logDebug("maxBins = " + metadata.maxBins)
logDebug("featureSubsetStrategy = " + featureSubsetStrategy)
logDebug("numFeaturesPerNode = " + metadata.numFeaturesPerNode)
logDebug("subsamplingRate = " + strategy.subsamplingRate)
// Find the splits and the corresponding bins (interval between the splits) using a sample
// of the input data.
timer.start("findSplitsBins")
val (splits, bins) = DecisionTree.findSplitsBins(retaggedInput, metadata)
timer.stop("findSplitsBins")
logDebug("numBins: feature: number of bins")
logDebug(Range(0, metadata.numFeatures).map { featureIndex =>
s"\\t$featureIndex\\t${metadata.numBins(featureIndex)}"
}.mkString("\\n"))
// Bin feature values (TreePoint representation).
// Cache input RDD for speedup during multiple passes.
val treeInput = TreePoint.convertToTreeRDD(retaggedInput, bins, metadata)
val withReplacement = if (numTrees > 1) true else false
val baggedInput
= BaggedPoint.convertToBaggedRDD(treeInput,
strategy.subsamplingRate, numTrees,
withReplacement, seed).persist(StorageLevel.MEMORY_AND_DISK)
// depth of the decision tree
val maxDepth = strategy.maxDepth
require(maxDepth <= 30,
s"DecisionTree currently only supports maxDepth <= 30, but was given maxDepth = $maxDepth.")
// Max memory usage for aggregates
// TODO: Calculate memory usage more precisely.
val maxMemoryUsage: Long = strategy.maxMemoryInMB * 1024L * 1024L
logDebug("max memory usage for aggregates = " + maxMemoryUsage + " bytes.")
val maxMemoryPerNode = {
val featureSubset: Option[Array[Int]] = if (metadata.subsamplingFeatures) {
// Find numFeaturesPerNode largest bins to get an upper bound on memory usage.
Some(metadata.numBins.zipWithIndex.sortBy(- _._1)
.take(metadata.numFeaturesPerNode).map(_._2))
} else {
None
}
RandomForest.aggregateSizeForNode(metadata, featureSubset) * 8L
}
require(maxMemoryPerNode <= maxMemoryUsage,
s"RandomForest/DecisionTree given maxMemoryInMB = ${strategy.maxMemoryInMB}," +
" which is too small for the given features." +
s" Minimum value = ${maxMemoryPerNode / (1024L * 1024L)}")
timer.stop("init")
/*
* The main idea here is to perform group-wise training of the decision tree nodes thus
* reducing the passes over the data from (# nodes) to (# nodes / maxNumberOfNodesPerGroup).
* Each data sample is handled by a particular node (or it reaches a leaf and is not used
* in lower levels).
*/
// Create an RDD of node Id cache.
// At first, all the rows belong to the root nodes (node Id == 1).
val nodeIdCache = if (strategy.useNodeIdCache) {
Some(NodeIdCache.init(
data = baggedInput,
numTrees = numTrees,
checkpointInterval = strategy.checkpointInterval,
initVal = 1))
} else {
None
}
// FIFO queue of nodes to train: (treeIndex, node)
val nodeQueue = new mutable.Queue[(Int, Node)]()
val rng = new scala.util.Random()
rng.setSeed(seed)
// Allocate and queue root nodes.
val topNodes: Array[Node] = Array.fill[Node](numTrees)(Node.emptyNode(nodeIndex = 1))
Range(0, numTrees).foreach(treeIndex => nodeQueue.enqueue((treeIndex, topNodes(treeIndex))))
while (nodeQueue.nonEmpty) {
// Collect some nodes to split, and choose features for each node (if subsampling).
// Each group of nodes may come from one or multiple trees, and at multiple levels.
val (nodesForGroup, treeToNodeToIndexInfo) =
RandomForest.selectNodesToSplit(nodeQueue, maxMemoryUsage, metadata, rng)
// Sanity check (should never occur):
assert(nodesForGroup.size > 0,
s"RandomForest selected empty nodesForGroup. Error for unknown reason.")
// Choose node splits, and enqueue new nodes as needed.
timer.start("findBestSplits")
DecisionTree.findBestSplits(baggedInput, metadata, topNodes, nodesForGroup,
treeToNodeToIndexInfo, splits, bins, nodeQueue, timer, nodeIdCache = nodeIdCache)
timer.stop("findBestSplits")
}
baggedInput.unpersist()
timer.stop("total")
logInfo("Internal timing for DecisionTree:")
logInfo(s"$timer")
// Delete any remaining checkpoints used for node Id cache.
if (nodeIdCache.nonEmpty) {
try {
nodeIdCache.get.deleteAllCheckpoints()
} catch {
case e:IOException =>
logWarning(s"delete all chackpoints failed. Error reason: ${e.getMessage}")
}
}
val trees = topNodes.map(topNode => new DecisionTreeModel(topNode, strategy.algo))
new RandomForestModel(strategy.algo, trees)
}
}
object RandomForest extends Serializable with Logging {
/**
* Method to train a decision tree model for binary or multiclass classification.
*
* @param input Training dataset: RDD of [[org.apache.spark.mllib.regression.LabeledPoint]].
* Labels should take values {0, 1, ..., numClasses-1}.
* @param strategy Parameters for training each tree in the forest.
* @param numTrees Number of trees in the random forest.
* @param featureSubsetStrategy Number of features to consider for splits at each node.
* Supported: "auto", "all", "sqrt", "log2", "onethird".
* If "auto" is set, this parameter is set based on numTrees:
* if numTrees == 1, set to "all";
* if numTrees > 1 (forest) set to "sqrt".
* @param seed Random seed for bootstrapping and choosing feature subsets.
* @return a random forest model that can be used for prediction
*/
def trainClassifier(
input: RDD[LabeledPoint],
strategy: Strategy,
numTrees: Int,
featureSubsetStrategy: String,
seed: Int): RandomForestModel = {
require(strategy.algo == Classification,
s"RandomForest.trainClassifier given Strategy with invalid algo: ${strategy.algo}")
val rf = new RandomForest(strategy, numTrees, featureSubsetStrategy, seed)
rf.run(input)
}
/**
* Method to train a decision tree model for binary or multiclass classification.
*
* @param input Training dataset: RDD of [[org.apache.spark.mllib.regression.LabeledPoint]].
* Labels should take values {0, 1, ..., numClasses-1}.
* @param numClasses number of classes for classification.
* @param categoricalFeaturesInfo Map storing arity of categorical features.
* E.g., an entry (n -> k) indicates that feature n is categorical
* with k categories indexed from 0: {0, 1, ..., k-1}.
* @param numTrees Number of trees in the random forest.
* @param featureSubsetStrategy Number of features to consider for splits at each node.
* Supported: "auto", "all", "sqrt", "log2", "onethird".
* If "auto" is set, this parameter is set based on numTrees:
* if numTrees == 1, set to "all";
* if numTrees > 1 (forest) set to "sqrt".
* @param impurity Criterion used for information gain calculation.
* Supported values: "gini" (recommended) or "entropy".
* @param maxDepth Maximum depth of the tree.
* E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.
* (suggested value: 4)
* @param maxBins maximum number of bins used for splitting features
* (suggested value: 100)
* @param seed Random seed for bootstrapping and choosing feature subsets.
* @return a random forest model that can be used for prediction
*/
def trainClassifier(
input: RDD[LabeledPoint],
numClasses: Int,
categoricalFeaturesInfo: Map[Int, Int],
numTrees: Int,
featureSubsetStrategy: String,
impurity: String,
maxDepth: Int,
maxBins: Int,
seed: Int = Utils.random.nextInt()): RandomForestModel = {
val impurityType = Impurities.fromString(impurity)
val strategy = new Strategy(Classification, impurityType, maxDepth,
numClasses, maxBins, Sort, categoricalFeaturesInfo)
trainClassifier(input, strategy, numTrees, featureSubsetStrategy, seed)
}
/**
* Java-friendly API for [[org.apache.spark.mllib.tree.RandomForest$#trainClassifier]]
*/
def trainClassifier(
input: JavaRDD[LabeledPoint],
numClasses: Int,
categoricalFeaturesInfo: java.util.Map[java.lang.Integer, java.lang.Integer],
numTrees: Int,
featureSubsetStrategy: String,
impurity: String,
maxDepth: Int,
maxBins: Int,
seed: Int): RandomForestModel = {
trainClassifier(input.rdd, numClasses,
categoricalFeaturesInfo.asInstanceOf[java.util.Map[Int, Int]].asScala.toMap,
numTrees, featureSubsetStrategy, impurity, maxDepth, maxBins, seed)
}
/**
* Method to train a decision tree model for regression.
*
* @param input Training dataset: RDD of [[org.apache.spark.mllib.regression.LabeledPoint]].
* Labels are real numbers.
* @param strategy Parameters for training each tree in the forest.
* @param numTrees Number of trees in the random forest.
* @param featureSubsetStrategy Number of features to consider for splits at each node.
* Supported: "auto", "all", "sqrt", "log2", "onethird".
* If "auto" is set, this parameter is set based on numTrees:
* if numTrees == 1, set to "all";
* if numTrees > 1 (forest) set to "onethird".
* @param seed Random seed for bootstrapping and choosing feature subsets.
* @return a random forest model that can be used for prediction
*/
def trainRegressor(
input: RDD[LabeledPoint],
strategy: Strategy,
numTrees: Int,
featureSubsetStrategy: String,
seed: Int): RandomForestModel = {
require(strategy.algo == Regression,
s"RandomForest.trainRegressor given Strategy with invalid algo: ${strategy.algo}")
val rf = new RandomForest(strategy, numTrees, featureSubsetStrategy, seed)
rf.run(input)
}
/**
* Method to train a decision tree model for regression.
*
* @param input Training dataset: RDD of [[org.apache.spark.mllib.regression.LabeledPoint]].
* Labels are real numbers.
* @param categoricalFeaturesInfo Map storing arity of categorical features.
* E.g., an entry (n -> k) indicates that feature n is categorical
* with k categories indexed from 0: {0, 1, ..., k-1}.
* @param numTrees Number of trees in the random forest.
* @param featureSubsetStrategy Number of features to consider for splits at each node.
* Supported: "auto", "all", "sqrt", "log2", "onethird".
* If "auto" is set, this parameter is set based on numTrees:
* if numTrees == 1, set to "all";
* if numTrees > 1 (forest) set to "onethird".
* @param impurity Criterion used for information gain calculation.
* Supported values: "variance".
* @param maxDepth Maximum depth of the tree.
* E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.
* (suggested value: 4)
* @param maxBins maximum number of bins used for splitting features
* (suggested value: 100)
* @param seed Random seed for bootstrapping and choosing feature subsets.
* @return a random forest model that can be used for prediction
*/
def trainRegressor(
input: RDD[LabeledPoint],
categoricalFeaturesInfo: Map[Int, Int],
numTrees: Int,
featureSubsetStrategy: String,
impurity: String,
maxDepth: Int,
maxBins: Int,
seed: Int = Utils.random.nextInt()): RandomForestModel = {
val impurityType = Impurities.fromString(impurity)
val strategy = new Strategy(Regression, impurityType, maxDepth,
0, maxBins, Sort, categoricalFeaturesInfo)
trainRegressor(input, strategy, numTrees, featureSubsetStrategy, seed)
}
/**
* Java-friendly API for [[org.apache.spark.mllib.tree.RandomForest$#trainRegressor]]
*/
def trainRegressor(
input: JavaRDD[LabeledPoint],
categoricalFeaturesInfo: java.util.Map[java.lang.Integer, java.lang.Integer],
numTrees: Int,
featureSubsetStrategy: String,
impurity: String,
maxDepth: Int,
maxBins: Int,
seed: Int): RandomForestModel = {
trainRegressor(input.rdd,
categoricalFeaturesInfo.asInstanceOf[java.util.Map[Int, Int]].asScala.toMap,
numTrees, featureSubsetStrategy, impurity, maxDepth, maxBins, seed)
}
/**
* List of supported feature subset sampling strategies.
*/
val supportedFeatureSubsetStrategies: Array[String] =
Array("auto", "all", "sqrt", "log2", "onethird")
private[tree] class NodeIndexInfo(
val nodeIndexInGroup: Int,
val featureSubset: Option[Array[Int]]) extends Serializable
/**
* Pull nodes off of the queue, and collect a group of nodes to be split on this iteration.
* This tracks the memory usage for aggregates and stops adding nodes when too much memory
* will be needed; this allows an adaptive number of nodes since different nodes may require
* different amounts of memory (if featureSubsetStrategy is not "all").
*
* @param nodeQueue Queue of nodes to split.
* @param maxMemoryUsage Bound on size of aggregate statistics.
* @return (nodesForGroup, treeToNodeToIndexInfo).
* nodesForGroup holds the nodes to split: treeIndex --> nodes in tree.
*
* treeToNodeToIndexInfo holds indices selected features for each node:
* treeIndex --> (global) node index --> (node index in group, feature indices).
* The (global) node index is the index in the tree; the node index in group is the
* index in [0, numNodesInGroup) of the node in this group.
* The feature indices are None if not subsampling features.
*/
private[tree] def selectNodesToSplit(
nodeQueue: mutable.Queue[(Int, Node)],
maxMemoryUsage: Long,
metadata: DecisionTreeMetadata,
rng: scala.util.Random): (Map[Int, Array[Node]], Map[Int, Map[Int, NodeIndexInfo]]) = {
// Collect some nodes to split:
// nodesForGroup(treeIndex) = nodes to split
val mutableNodesForGroup = new mutable.HashMap[Int, mutable.ArrayBuffer[Node]]()
val mutableTreeToNodeToIndexInfo =
new mutable.HashMap[Int, mutable.HashMap[Int, NodeIndexInfo]]()
var memUsage: Long = 0L
var numNodesInGroup = 0
while (nodeQueue.nonEmpty && memUsage < maxMemoryUsage) {
val (treeIndex, node) = nodeQueue.head
// Choose subset of features for node (if subsampling).
val featureSubset: Option[Array[Int]] = if (metadata.subsamplingFeatures) {
// TODO: Use more efficient subsampling? (use selection-and-rejection or reservoir)
Some(rng.shuffle(Range(0, metadata.numFeatures).toList)
.take(metadata.numFeaturesPerNode).toArray)
} else {
None
}
// Check if enough memory remains to add this node to the group.
val nodeMemUsage = RandomForest.aggregateSizeForNode(metadata, featureSubset) * 8L
if (memUsage + nodeMemUsage <= maxMemoryUsage) {
nodeQueue.dequeue()
mutableNodesForGroup.getOrElseUpdate(treeIndex, new mutable.ArrayBuffer[Node]()) += node
mutableTreeToNodeToIndexInfo
.getOrElseUpdate(treeIndex, new mutable.HashMap[Int, NodeIndexInfo]())(node.id)
= new NodeIndexInfo(numNodesInGroup, featureSubset)
}
numNodesInGroup += 1
memUsage += nodeMemUsage
}
// Convert mutable maps to immutable ones.
val nodesForGroup: Map[Int, Array[Node]] = mutableNodesForGroup.mapValues(_.toArray).toMap
val treeToNodeToIndexInfo = mutableTreeToNodeToIndexInfo.mapValues(_.toMap).toMap
(nodesForGroup, treeToNodeToIndexInfo)
}
/**
* Get the number of values to be stored for this node in the bin aggregates.
* @param featureSubset Indices of features which may be split at this node.
* If None, then use all features.
*/
private[tree] def aggregateSizeForNode(
metadata: DecisionTreeMetadata,
featureSubset: Option[Array[Int]]): Long = {
val totalBins = if (featureSubset.nonEmpty) {
featureSubset.get.map(featureIndex => metadata.numBins(featureIndex).toLong).sum
} else {
metadata.numBins.map(_.toLong).sum
}
if (metadata.isClassification) {
metadata.numClasses * totalBins
} else {
3 * totalBins
}
}
}
| trueyao/spark-lever | mllib/src/main/scala/org/apache/spark/mllib/tree/RandomForest.scala | Scala | apache-2.0 | 24,240 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.util.{Locale, ServiceConfigurationError, ServiceLoader}
import scala.collection.JavaConverters._
import scala.util.{Failure, Success, Try}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkException
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogStorageFormat, CatalogTable, CatalogUtils}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, TypeUtils}
import org.apache.spark.sql.connector.catalog.TableProvider
import org.apache.spark.sql.errors.{QueryCompilationErrors, QueryExecutionErrors}
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.command.DataWritingCommand
import org.apache.spark.sql.execution.datasources.csv.CSVFileFormat
import org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider
import org.apache.spark.sql.execution.datasources.json.JsonFileFormat
import org.apache.spark.sql.execution.datasources.orc.OrcFileFormat
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat
import org.apache.spark.sql.execution.datasources.v2.FileDataSourceV2
import org.apache.spark.sql.execution.datasources.v2.orc.OrcDataSourceV2
import org.apache.spark.sql.execution.metric.SQLMetric
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.sources.{RateStreamProvider, TextSocketSourceProvider}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources._
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.{DataType, StructField, StructType}
import org.apache.spark.sql.util.SchemaUtils
import org.apache.spark.util.{HadoopFSUtils, ThreadUtils, Utils}
/**
* The main class responsible for representing a pluggable Data Source in Spark SQL. In addition to
* acting as the canonical set of parameters that can describe a Data Source, this class is used to
* resolve a description to a concrete implementation that can be used in a query plan
* (either batch or streaming) or to write out data using an external library.
*
* From an end user's perspective a DataSource description can be created explicitly using
* [[org.apache.spark.sql.DataFrameReader]] or CREATE TABLE USING DDL. Additionally, this class is
* used when resolving a description from a metastore to a concrete implementation.
*
* Many of the arguments to this class are optional, though depending on the specific API being used
* these optional arguments might be filled in during resolution using either inference or external
* metadata. For example, when reading a partitioned table from a file system, partition columns
* will be inferred from the directory layout even if they are not specified.
*
* @param paths A list of file system paths that hold data. These will be globbed before if
* the "__globPaths__" option is true, and will be qualified. This option only works
* when reading from a [[FileFormat]].
* @param userSpecifiedSchema An optional specification of the schema of the data. When present
* we skip attempting to infer the schema.
* @param partitionColumns A list of column names that the relation is partitioned by. This list is
* generally empty during the read path, unless this DataSource is managed
* by Hive. In these cases, during `resolveRelation`, we will call
* `getOrInferFileFormatSchema` for file based DataSources to infer the
* partitioning. In other cases, if this list is empty, then this table
* is unpartitioned.
* @param bucketSpec An optional specification for bucketing (hash-partitioning) of the data.
* @param catalogTable Optional catalog table reference that can be used to push down operations
* over the datasource to the catalog service.
*/
case class DataSource(
sparkSession: SparkSession,
className: String,
paths: Seq[String] = Nil,
userSpecifiedSchema: Option[StructType] = None,
partitionColumns: Seq[String] = Seq.empty,
bucketSpec: Option[BucketSpec] = None,
options: Map[String, String] = Map.empty,
catalogTable: Option[CatalogTable] = None) extends Logging {
case class SourceInfo(name: String, schema: StructType, partitionColumns: Seq[String])
lazy val providingClass: Class[_] = {
val cls = DataSource.lookupDataSource(className, sparkSession.sessionState.conf)
// `providingClass` is used for resolving data source relation for catalog tables.
// As now catalog for data source V2 is under development, here we fall back all the
// [[FileDataSourceV2]] to [[FileFormat]] to guarantee the current catalog works.
// [[FileDataSourceV2]] will still be used if we call the load()/save() method in
// [[DataFrameReader]]/[[DataFrameWriter]], since they use method `lookupDataSource`
// instead of `providingClass`.
cls.newInstance() match {
case f: FileDataSourceV2 => f.fallbackFileFormat
case _ => cls
}
}
private def providingInstance() = providingClass.getConstructor().newInstance()
private def newHadoopConfiguration(): Configuration =
sparkSession.sessionState.newHadoopConfWithOptions(options)
lazy val sourceInfo: SourceInfo = sourceSchema()
private val caseInsensitiveOptions = CaseInsensitiveMap(options)
private val equality = sparkSession.sessionState.conf.resolver
/**
* Whether or not paths should be globbed before being used to access files.
*/
def globPaths: Boolean = {
options.get(DataSource.GLOB_PATHS_KEY)
.map(_ == "true")
.getOrElse(true)
}
bucketSpec.map { bucket =>
SchemaUtils.checkColumnNameDuplication(
bucket.bucketColumnNames, "in the bucket definition", equality)
SchemaUtils.checkColumnNameDuplication(
bucket.sortColumnNames, "in the sort definition", equality)
}
/**
* Get the schema of the given FileFormat, if provided by `userSpecifiedSchema`, or try to infer
* it. In the read path, only managed tables by Hive provide the partition columns properly when
* initializing this class. All other file based data sources will try to infer the partitioning,
* and then cast the inferred types to user specified dataTypes if the partition columns exist
* inside `userSpecifiedSchema`, otherwise we can hit data corruption bugs like SPARK-18510.
* This method will try to skip file scanning whether `userSpecifiedSchema` and
* `partitionColumns` are provided. Here are some code paths that use this method:
* 1. `spark.read` (no schema): Most amount of work. Infer both schema and partitioning columns
* 2. `spark.read.schema(userSpecifiedSchema)`: Parse partitioning columns, cast them to the
* dataTypes provided in `userSpecifiedSchema` if they exist or fallback to inferred
* dataType if they don't.
* 3. `spark.readStream.schema(userSpecifiedSchema)`: For streaming use cases, users have to
* provide the schema. Here, we also perform partition inference like 2, and try to use
* dataTypes in `userSpecifiedSchema`. All subsequent triggers for this stream will re-use
* this information, therefore calls to this method should be very cheap, i.e. there won't
* be any further inference in any triggers.
*
* @param format the file format object for this DataSource
* @param getFileIndex [[InMemoryFileIndex]] for getting partition schema and file list
* @return A pair of the data schema (excluding partition columns) and the schema of the partition
* columns.
*/
private def getOrInferFileFormatSchema(
format: FileFormat,
getFileIndex: () => InMemoryFileIndex): (StructType, StructType) = {
lazy val tempFileIndex = getFileIndex()
val partitionSchema = if (partitionColumns.isEmpty) {
// Try to infer partitioning, because no DataSource in the read path provides the partitioning
// columns properly unless it is a Hive DataSource
tempFileIndex.partitionSchema
} else {
// maintain old behavior before SPARK-18510. If userSpecifiedSchema is empty used inferred
// partitioning
if (userSpecifiedSchema.isEmpty) {
val inferredPartitions = tempFileIndex.partitionSchema
inferredPartitions
} else {
val partitionFields = partitionColumns.map { partitionColumn =>
userSpecifiedSchema.flatMap(_.find(c => equality(c.name, partitionColumn))).orElse {
val inferredPartitions = tempFileIndex.partitionSchema
val inferredOpt = inferredPartitions.find(p => equality(p.name, partitionColumn))
if (inferredOpt.isDefined) {
logDebug(
s"""Type of partition column: $partitionColumn not found in specified schema
|for $format.
|User Specified Schema
|=====================
|${userSpecifiedSchema.orNull}
|
|Falling back to inferred dataType if it exists.
""".stripMargin)
}
inferredOpt
}.getOrElse {
throw QueryCompilationErrors.partitionColumnNotSpecifiedError(
format.toString, partitionColumn)
}
}
StructType(partitionFields)
}
}
val dataSchema = userSpecifiedSchema.map { schema =>
StructType(schema.filterNot(f => partitionSchema.exists(p => equality(p.name, f.name))))
}.orElse {
// Remove "path" option so that it is not added to the paths returned by
// `tempFileIndex.allFiles()`.
format.inferSchema(
sparkSession,
caseInsensitiveOptions - "path",
tempFileIndex.allFiles())
}.getOrElse {
throw QueryCompilationErrors.dataSchemaNotSpecifiedError(format.toString)
}
// We just print a warning message if the data schema and partition schema have the duplicate
// columns. This is because we allow users to do so in the previous Spark releases and
// we have the existing tests for the cases (e.g., `ParquetHadoopFsRelationSuite`).
// See SPARK-18108 and SPARK-21144 for related discussions.
try {
SchemaUtils.checkColumnNameDuplication(
(dataSchema ++ partitionSchema).map(_.name),
"in the data schema and the partition schema",
equality)
} catch {
case e: AnalysisException => logWarning(e.getMessage)
}
(dataSchema, partitionSchema)
}
/** Returns the name and schema of the source that can be used to continually read data. */
private def sourceSchema(): SourceInfo = {
providingInstance() match {
case s: StreamSourceProvider =>
val (name, schema) = s.sourceSchema(
sparkSession.sqlContext, userSpecifiedSchema, className, caseInsensitiveOptions)
SourceInfo(name, schema, Nil)
case format: FileFormat =>
val path = caseInsensitiveOptions.getOrElse("path", {
throw QueryExecutionErrors.dataPathNotSpecifiedError()
})
// Check whether the path exists if it is not a glob pattern.
// For glob pattern, we do not check it because the glob pattern might only make sense
// once the streaming job starts and some upstream source starts dropping data.
val hdfsPath = new Path(path)
if (!globPaths || !SparkHadoopUtil.get.isGlobPath(hdfsPath)) {
val fs = hdfsPath.getFileSystem(newHadoopConfiguration())
if (!fs.exists(hdfsPath)) {
throw QueryCompilationErrors.dataPathNotExistError(path)
}
}
val isSchemaInferenceEnabled = sparkSession.sessionState.conf.streamingSchemaInference
val isTextSource = providingClass == classOf[text.TextFileFormat]
// If the schema inference is disabled, only text sources require schema to be specified
if (!isSchemaInferenceEnabled && !isTextSource && userSpecifiedSchema.isEmpty) {
throw QueryExecutionErrors.createStreamingSourceNotSpecifySchemaError()
}
val (dataSchema, partitionSchema) = getOrInferFileFormatSchema(format, () => {
// The operations below are expensive therefore try not to do them if we don't need to,
// e.g., in streaming mode, we have already inferred and registered partition columns,
// we will never have to materialize the lazy val below
val globbedPaths =
checkAndGlobPathIfNecessary(checkEmptyGlobPath = false, checkFilesExist = false)
createInMemoryFileIndex(globbedPaths)
})
val forceNullable =
sparkSession.sessionState.conf.getConf(SQLConf.FILE_SOURCE_SCHEMA_FORCE_NULLABLE)
val sourceDataSchema = if (forceNullable) dataSchema.asNullable else dataSchema
SourceInfo(
s"FileSource[$path]",
StructType(sourceDataSchema ++ partitionSchema),
partitionSchema.fieldNames)
case _ =>
throw QueryExecutionErrors.streamedOperatorUnsupportedByDataSourceError(
className, "reading")
}
}
/** Returns a source that can be used to continually read data. */
def createSource(metadataPath: String): Source = {
providingInstance() match {
case s: StreamSourceProvider =>
s.createSource(
sparkSession.sqlContext,
metadataPath,
userSpecifiedSchema,
className,
caseInsensitiveOptions)
case format: FileFormat =>
val path = caseInsensitiveOptions.getOrElse("path", {
throw QueryExecutionErrors.dataPathNotSpecifiedError()
})
new FileStreamSource(
sparkSession = sparkSession,
path = path,
fileFormatClassName = className,
schema = sourceInfo.schema,
partitionColumns = sourceInfo.partitionColumns,
metadataPath = metadataPath,
options = caseInsensitiveOptions)
case _ =>
throw QueryExecutionErrors.streamedOperatorUnsupportedByDataSourceError(
className, "reading")
}
}
/** Returns a sink that can be used to continually write data. */
def createSink(outputMode: OutputMode): Sink = {
providingInstance() match {
case s: StreamSinkProvider =>
s.createSink(sparkSession.sqlContext, caseInsensitiveOptions, partitionColumns, outputMode)
case fileFormat: FileFormat =>
val path = caseInsensitiveOptions.getOrElse("path", {
throw QueryExecutionErrors.dataPathNotSpecifiedError()
})
if (outputMode != OutputMode.Append) {
throw QueryCompilationErrors.dataSourceOutputModeUnsupportedError(className, outputMode)
}
new FileStreamSink(sparkSession, path, fileFormat, partitionColumns, caseInsensitiveOptions)
case _ =>
throw QueryExecutionErrors.streamedOperatorUnsupportedByDataSourceError(
className, "writing")
}
}
/**
* Create a resolved [[BaseRelation]] that can be used to read data from or write data into this
* [[DataSource]]
*
* @param checkFilesExist Whether to confirm that the files exist when generating the
* non-streaming file based datasource. StructuredStreaming jobs already
* list file existence, and when generating incremental jobs, the batch
* is considered as a non-streaming file based data source. Since we know
* that files already exist, we don't need to check them again.
*/
def resolveRelation(checkFilesExist: Boolean = true): BaseRelation = {
val relation = (providingInstance(), userSpecifiedSchema) match {
// TODO: Throw when too much is given.
case (dataSource: SchemaRelationProvider, Some(schema)) =>
dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions, schema)
case (dataSource: RelationProvider, None) =>
dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions)
case (_: SchemaRelationProvider, None) =>
throw QueryCompilationErrors.schemaNotSpecifiedForSchemaRelationProviderError(className)
case (dataSource: RelationProvider, Some(schema)) =>
val baseRelation =
dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions)
if (baseRelation.schema != schema) {
throw QueryCompilationErrors.userSpecifiedSchemaMismatchActualSchemaError(
schema, baseRelation.schema)
}
baseRelation
// We are reading from the results of a streaming query. Load files from the metadata log
// instead of listing them using HDFS APIs.
case (format: FileFormat, _)
if FileStreamSink.hasMetadata(
caseInsensitiveOptions.get("path").toSeq ++ paths,
newHadoopConfiguration(),
sparkSession.sessionState.conf) =>
val basePath = new Path((caseInsensitiveOptions.get("path").toSeq ++ paths).head)
val fileCatalog = new MetadataLogFileIndex(sparkSession, basePath,
caseInsensitiveOptions, userSpecifiedSchema)
val dataSchema = userSpecifiedSchema.orElse {
// Remove "path" option so that it is not added to the paths returned by
// `fileCatalog.allFiles()`.
format.inferSchema(
sparkSession,
caseInsensitiveOptions - "path",
fileCatalog.allFiles())
}.getOrElse {
throw QueryCompilationErrors.dataSchemaNotSpecifiedError(
format.toString, fileCatalog.allFiles().mkString(","))
}
HadoopFsRelation(
fileCatalog,
partitionSchema = fileCatalog.partitionSchema,
dataSchema = dataSchema,
bucketSpec = None,
format,
caseInsensitiveOptions)(sparkSession)
// This is a non-streaming file based datasource.
case (format: FileFormat, _) =>
val useCatalogFileIndex = sparkSession.sqlContext.conf.manageFilesourcePartitions &&
catalogTable.isDefined && catalogTable.get.tracksPartitionsInCatalog &&
catalogTable.get.partitionColumnNames.nonEmpty
val (fileCatalog, dataSchema, partitionSchema) = if (useCatalogFileIndex) {
val defaultTableSize = sparkSession.sessionState.conf.defaultSizeInBytes
val index = new CatalogFileIndex(
sparkSession,
catalogTable.get,
catalogTable.get.stats.map(_.sizeInBytes.toLong).getOrElse(defaultTableSize))
(index, catalogTable.get.dataSchema, catalogTable.get.partitionSchema)
} else {
val globbedPaths = checkAndGlobPathIfNecessary(
checkEmptyGlobPath = true, checkFilesExist = checkFilesExist)
val index = createInMemoryFileIndex(globbedPaths)
val (resultDataSchema, resultPartitionSchema) =
getOrInferFileFormatSchema(format, () => index)
(index, resultDataSchema, resultPartitionSchema)
}
HadoopFsRelation(
fileCatalog,
partitionSchema = partitionSchema,
dataSchema = dataSchema.asNullable,
bucketSpec = bucketSpec,
format,
caseInsensitiveOptions)(sparkSession)
case _ =>
throw QueryCompilationErrors.invalidDataSourceError(className)
}
relation match {
case hs: HadoopFsRelation =>
SchemaUtils.checkSchemaColumnNameDuplication(
hs.dataSchema,
"in the data schema",
equality)
SchemaUtils.checkSchemaColumnNameDuplication(
hs.partitionSchema,
"in the partition schema",
equality)
DataSourceUtils.verifySchema(hs.fileFormat, hs.dataSchema)
case _ =>
SchemaUtils.checkSchemaColumnNameDuplication(
relation.schema,
"in the data schema",
equality)
}
relation
}
/**
* Creates a command node to write the given [[LogicalPlan]] out to the given [[FileFormat]].
* The returned command is unresolved and need to be analyzed.
*/
private def planForWritingFileFormat(
format: FileFormat, mode: SaveMode, data: LogicalPlan): InsertIntoHadoopFsRelationCommand = {
// Don't glob path for the write path. The contracts here are:
// 1. Only one output path can be specified on the write path;
// 2. Output path must be a legal HDFS style file system path;
// 3. It's OK that the output path doesn't exist yet;
val allPaths = paths ++ caseInsensitiveOptions.get("path")
val outputPath = if (allPaths.length == 1) {
val path = new Path(allPaths.head)
val fs = path.getFileSystem(newHadoopConfiguration())
path.makeQualified(fs.getUri, fs.getWorkingDirectory)
} else {
throw QueryExecutionErrors.multiplePathsSpecifiedError(allPaths)
}
val caseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis
PartitioningUtils.validatePartitionColumn(data.schema, partitionColumns, caseSensitive)
val fileIndex = catalogTable.map(_.identifier).map { tableIdent =>
sparkSession.table(tableIdent).queryExecution.analyzed.collect {
case LogicalRelation(t: HadoopFsRelation, _, _, _) => t.location
}.head
}
// For partitioned relation r, r.schema's column ordering can be different from the column
// ordering of data.logicalPlan (partition columns are all moved after data column). This
// will be adjusted within InsertIntoHadoopFsRelation.
InsertIntoHadoopFsRelationCommand(
outputPath = outputPath,
staticPartitions = Map.empty,
ifPartitionNotExists = false,
partitionColumns = partitionColumns.map(UnresolvedAttribute.quoted),
bucketSpec = bucketSpec,
fileFormat = format,
options = options,
query = data,
mode = mode,
catalogTable = catalogTable,
fileIndex = fileIndex,
outputColumnNames = data.output.map(_.name))
}
/**
* Writes the given [[LogicalPlan]] out to this [[DataSource]] and returns a [[BaseRelation]] for
* the following reading.
*
* @param mode The save mode for this writing.
* @param data The input query plan that produces the data to be written. Note that this plan
* is analyzed and optimized.
* @param outputColumnNames The original output column names of the input query plan. The
* optimizer may not preserve the output column's names' case, so we need
* this parameter instead of `data.output`.
* @param physicalPlan The physical plan of the input query plan. We should run the writing
* command with this physical plan instead of creating a new physical plan,
* so that the metrics can be correctly linked to the given physical plan and
* shown in the web UI.
*/
def writeAndRead(
mode: SaveMode,
data: LogicalPlan,
outputColumnNames: Seq[String],
physicalPlan: SparkPlan,
metrics: Map[String, SQLMetric]): BaseRelation = {
val outputColumns = DataWritingCommand.logicalPlanOutputWithNames(data, outputColumnNames)
disallowWritingIntervals(outputColumns.map(_.dataType))
providingInstance() match {
case dataSource: CreatableRelationProvider =>
dataSource.createRelation(
sparkSession.sqlContext, mode, caseInsensitiveOptions, Dataset.ofRows(sparkSession, data))
case format: FileFormat =>
val cmd = planForWritingFileFormat(format, mode, data)
val resolvedPartCols = cmd.partitionColumns.map { col =>
// The partition columns created in `planForWritingFileFormat` should always be
// `UnresolvedAttribute` with a single name part.
assert(col.isInstanceOf[UnresolvedAttribute])
val unresolved = col.asInstanceOf[UnresolvedAttribute]
assert(unresolved.nameParts.length == 1)
val name = unresolved.nameParts.head
outputColumns.find(a => equality(a.name, name)).getOrElse {
throw QueryCompilationErrors.cannotResolveAttributeError(name, data)
}
}
val resolved = cmd.copy(
partitionColumns = resolvedPartCols,
outputColumnNames = outputColumnNames)
resolved.run(sparkSession, physicalPlan)
DataWritingCommand.propogateMetrics(sparkSession.sparkContext, resolved, metrics)
// Replace the schema with that of the DataFrame we just wrote out to avoid re-inferring
copy(userSpecifiedSchema = Some(outputColumns.toStructType.asNullable)).resolveRelation()
case _ =>
sys.error(s"${providingClass.getCanonicalName} does not allow create table as select.")
}
}
/**
* Returns a logical plan to write the given [[LogicalPlan]] out to this [[DataSource]].
*/
def planForWriting(mode: SaveMode, data: LogicalPlan): LogicalPlan = {
disallowWritingIntervals(data.schema.map(_.dataType))
providingInstance() match {
case dataSource: CreatableRelationProvider =>
SaveIntoDataSourceCommand(data, dataSource, caseInsensitiveOptions, mode)
case format: FileFormat =>
DataSource.validateSchema(data.schema)
planForWritingFileFormat(format, mode, data)
case _ =>
sys.error(s"${providingClass.getCanonicalName} does not allow create table as select.")
}
}
/** Returns an [[InMemoryFileIndex]] that can be used to get partition schema and file list. */
private def createInMemoryFileIndex(globbedPaths: Seq[Path]): InMemoryFileIndex = {
val fileStatusCache = FileStatusCache.getOrCreate(sparkSession)
new InMemoryFileIndex(
sparkSession, globbedPaths, options, userSpecifiedSchema, fileStatusCache)
}
/**
* Checks and returns files in all the paths.
*/
private def checkAndGlobPathIfNecessary(
checkEmptyGlobPath: Boolean,
checkFilesExist: Boolean): Seq[Path] = {
val allPaths = caseInsensitiveOptions.get("path") ++ paths
DataSource.checkAndGlobPathIfNecessary(allPaths.toSeq, newHadoopConfiguration(),
checkEmptyGlobPath, checkFilesExist, enableGlobbing = globPaths)
}
private def disallowWritingIntervals(dataTypes: Seq[DataType]): Unit = {
dataTypes.foreach(TypeUtils.invokeOnceForInterval(_) {
throw QueryCompilationErrors.cannotSaveIntervalIntoExternalStorageError()
})
}
}
object DataSource extends Logging {
/** A map to maintain backward compatibility in case we move data sources around. */
private val backwardCompatibilityMap: Map[String, String] = {
val jdbc = classOf[JdbcRelationProvider].getCanonicalName
val json = classOf[JsonFileFormat].getCanonicalName
val parquet = classOf[ParquetFileFormat].getCanonicalName
val csv = classOf[CSVFileFormat].getCanonicalName
val libsvm = "org.apache.spark.ml.source.libsvm.LibSVMFileFormat"
val orc = "org.apache.spark.sql.hive.orc.OrcFileFormat"
val nativeOrc = classOf[OrcFileFormat].getCanonicalName
val socket = classOf[TextSocketSourceProvider].getCanonicalName
val rate = classOf[RateStreamProvider].getCanonicalName
Map(
"org.apache.spark.sql.jdbc" -> jdbc,
"org.apache.spark.sql.jdbc.DefaultSource" -> jdbc,
"org.apache.spark.sql.execution.datasources.jdbc.DefaultSource" -> jdbc,
"org.apache.spark.sql.execution.datasources.jdbc" -> jdbc,
"org.apache.spark.sql.json" -> json,
"org.apache.spark.sql.json.DefaultSource" -> json,
"org.apache.spark.sql.execution.datasources.json" -> json,
"org.apache.spark.sql.execution.datasources.json.DefaultSource" -> json,
"org.apache.spark.sql.parquet" -> parquet,
"org.apache.spark.sql.parquet.DefaultSource" -> parquet,
"org.apache.spark.sql.execution.datasources.parquet" -> parquet,
"org.apache.spark.sql.execution.datasources.parquet.DefaultSource" -> parquet,
"org.apache.spark.sql.hive.orc.DefaultSource" -> orc,
"org.apache.spark.sql.hive.orc" -> orc,
"org.apache.spark.sql.execution.datasources.orc.DefaultSource" -> nativeOrc,
"org.apache.spark.sql.execution.datasources.orc" -> nativeOrc,
"org.apache.spark.ml.source.libsvm.DefaultSource" -> libsvm,
"org.apache.spark.ml.source.libsvm" -> libsvm,
"com.databricks.spark.csv" -> csv,
"org.apache.spark.sql.execution.streaming.TextSocketSourceProvider" -> socket,
"org.apache.spark.sql.execution.streaming.RateSourceProvider" -> rate
)
}
/**
* Class that were removed in Spark 2.0. Used to detect incompatibility libraries for Spark 2.0.
*/
private val spark2RemovedClasses = Set(
"org.apache.spark.sql.DataFrame",
"org.apache.spark.sql.sources.HadoopFsRelationProvider",
"org.apache.spark.Logging")
/** Given a provider name, look up the data source class definition. */
def lookupDataSource(provider: String, conf: SQLConf): Class[_] = {
val provider1 = backwardCompatibilityMap.getOrElse(provider, provider) match {
case name if name.equalsIgnoreCase("orc") &&
conf.getConf(SQLConf.ORC_IMPLEMENTATION) == "native" =>
classOf[OrcDataSourceV2].getCanonicalName
case name if name.equalsIgnoreCase("orc") &&
conf.getConf(SQLConf.ORC_IMPLEMENTATION) == "hive" =>
"org.apache.spark.sql.hive.orc.OrcFileFormat"
case "com.databricks.spark.avro" if conf.replaceDatabricksSparkAvroEnabled =>
"org.apache.spark.sql.avro.AvroFileFormat"
case name => name
}
val provider2 = s"$provider1.DefaultSource"
val loader = Utils.getContextOrSparkClassLoader
val serviceLoader = ServiceLoader.load(classOf[DataSourceRegister], loader)
try {
serviceLoader.asScala.filter(_.shortName().equalsIgnoreCase(provider1)).toList match {
// the provider format did not match any given registered aliases
case Nil =>
try {
Try(loader.loadClass(provider1)).orElse(Try(loader.loadClass(provider2))) match {
case Success(dataSource) =>
// Found the data source using fully qualified path
dataSource
case Failure(error) =>
if (provider1.startsWith("org.apache.spark.sql.hive.orc")) {
throw QueryCompilationErrors.orcNotUsedWithHiveEnabledError()
} else if (provider1.toLowerCase(Locale.ROOT) == "avro" ||
provider1 == "com.databricks.spark.avro" ||
provider1 == "org.apache.spark.sql.avro") {
throw QueryCompilationErrors.failedToFindAvroDataSourceError(provider1)
} else if (provider1.toLowerCase(Locale.ROOT) == "kafka") {
throw QueryCompilationErrors.failedToFindKafkaDataSourceError(provider1)
} else {
throw QueryExecutionErrors.failedToFindDataSourceError(provider1, error)
}
}
} catch {
case e: NoClassDefFoundError => // This one won't be caught by Scala NonFatal
// NoClassDefFoundError's class name uses "/" rather than "." for packages
val className = e.getMessage.replaceAll("/", ".")
if (spark2RemovedClasses.contains(className)) {
throw QueryExecutionErrors.removedClassInSpark2Error(className, e)
} else {
throw e
}
}
case head :: Nil =>
// there is exactly one registered alias
head.getClass
case sources =>
// There are multiple registered aliases for the input. If there is single datasource
// that has "org.apache.spark" package in the prefix, we use it considering it is an
// internal datasource within Spark.
val sourceNames = sources.map(_.getClass.getName)
val internalSources = sources.filter(_.getClass.getName.startsWith("org.apache.spark"))
if (internalSources.size == 1) {
logWarning(s"Multiple sources found for $provider1 (${sourceNames.mkString(", ")}), " +
s"defaulting to the internal datasource (${internalSources.head.getClass.getName}).")
internalSources.head.getClass
} else {
throw QueryCompilationErrors.findMultipleDataSourceError(provider1, sourceNames)
}
}
} catch {
case e: ServiceConfigurationError if e.getCause.isInstanceOf[NoClassDefFoundError] =>
// NoClassDefFoundError's class name uses "/" rather than "." for packages
val className = e.getCause.getMessage.replaceAll("/", ".")
if (spark2RemovedClasses.contains(className)) {
throw QueryExecutionErrors.incompatibleDataSourceRegisterError(e)
} else {
throw e
}
}
}
/**
* Returns an optional [[TableProvider]] instance for the given provider. It returns None if
* there is no corresponding Data Source V2 implementation, or the provider is configured to
* fallback to Data Source V1 code path.
*/
def lookupDataSourceV2(provider: String, conf: SQLConf): Option[TableProvider] = {
val useV1Sources = conf.getConf(SQLConf.USE_V1_SOURCE_LIST).toLowerCase(Locale.ROOT)
.split(",").map(_.trim)
val cls = lookupDataSource(provider, conf)
cls.newInstance() match {
case d: DataSourceRegister if useV1Sources.contains(d.shortName()) => None
case t: TableProvider
if !useV1Sources.contains(cls.getCanonicalName.toLowerCase(Locale.ROOT)) =>
Some(t)
case _ => None
}
}
/**
* The key in the "options" map for deciding whether or not to glob paths before use.
*/
val GLOB_PATHS_KEY = "__globPaths__"
/**
* Checks and returns files in all the paths.
*/
private[sql] def checkAndGlobPathIfNecessary(
pathStrings: Seq[String],
hadoopConf: Configuration,
checkEmptyGlobPath: Boolean,
checkFilesExist: Boolean,
numThreads: Integer = 40,
enableGlobbing: Boolean): Seq[Path] = {
val qualifiedPaths = pathStrings.map { pathString =>
val path = new Path(pathString)
val fs = path.getFileSystem(hadoopConf)
path.makeQualified(fs.getUri, fs.getWorkingDirectory)
}
// Split the paths into glob and non glob paths, because we don't need to do an existence check
// for globbed paths.
val (globPaths, nonGlobPaths) = qualifiedPaths.partition(SparkHadoopUtil.get.isGlobPath)
val globbedPaths =
try {
ThreadUtils.parmap(globPaths, "globPath", numThreads) { globPath =>
val fs = globPath.getFileSystem(hadoopConf)
val globResult = if (enableGlobbing) {
SparkHadoopUtil.get.globPath(fs, globPath)
} else {
qualifiedPaths
}
if (checkEmptyGlobPath && globResult.isEmpty) {
throw QueryCompilationErrors.dataPathNotExistError(globPath.toString)
}
globResult
}.flatten
} catch {
case e: SparkException => throw e.getCause
}
if (checkFilesExist) {
try {
ThreadUtils.parmap(nonGlobPaths, "checkPathsExist", numThreads) { path =>
val fs = path.getFileSystem(hadoopConf)
if (!fs.exists(path)) {
throw QueryCompilationErrors.dataPathNotExistError(path.toString)
}
}
} catch {
case e: SparkException => throw e.getCause
}
}
val allPaths = globbedPaths ++ nonGlobPaths
if (checkFilesExist) {
val (filteredOut, filteredIn) = allPaths.partition { path =>
HadoopFSUtils.shouldFilterOutPathName(path.getName)
}
if (filteredIn.isEmpty) {
logWarning(
s"All paths were ignored:\\n ${filteredOut.mkString("\\n ")}")
} else {
logDebug(
s"Some paths were ignored:\\n ${filteredOut.mkString("\\n ")}")
}
}
allPaths
}
/**
* When creating a data source table, the `path` option has a special meaning: the table location.
* This method extracts the `path` option and treat it as table location to build a
* [[CatalogStorageFormat]]. Note that, the `path` option is removed from options after this.
*/
def buildStorageFormatFromOptions(options: Map[String, String]): CatalogStorageFormat = {
val path = CaseInsensitiveMap(options).get("path")
val optionsWithoutPath = options.filterKeys(_.toLowerCase(Locale.ROOT) != "path")
CatalogStorageFormat.empty.copy(
locationUri = path.map(CatalogUtils.stringToURI), properties = optionsWithoutPath.toMap)
}
/**
* Called before writing into a FileFormat based data source to make sure the
* supplied schema is not empty.
* @param schema
*/
def validateSchema(schema: StructType): Unit = {
def hasEmptySchema(schema: StructType): Boolean = {
schema.size == 0 || schema.exists {
case StructField(_, b: StructType, _, _) => hasEmptySchema(b)
case _ => false
}
}
if (hasEmptySchema(schema)) {
throw QueryCompilationErrors.writeEmptySchemasUnsupportedByDataSourceError()
}
}
}
| BryanCutler/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala | Scala | apache-2.0 | 38,329 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.util
import scala.collection.JavaConverters._
import scala.collection.mutable
import org.apache.hadoop.fs.s3a.Constants.{ACCESS_KEY, ENDPOINT, SECRET_KEY}
import org.apache.spark.sql.hive.{CarbonMetaData, CarbonRelation, DictionaryMap}
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.metadata.encoder.Encoding
import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, TableInfo}
import org.apache.carbondata.core.metadata.schema.table.column.{CarbonColumn, ColumnSchema}
import org.apache.carbondata.core.util.CarbonUtil
case class TransformHolder(rdd: Any, mataData: CarbonMetaData)
/**
* carbon spark common methods
*/
object CarbonSparkUtil {
def createSparkMeta(carbonTable: CarbonTable): CarbonMetaData = {
val dimensionsAttr = carbonTable.getDimensionByTableName(carbonTable.getTableName)
.asScala.map(x => x.getColName) // wf : may be problem
val measureAttr = carbonTable.getMeasureByTableName(carbonTable.getTableName)
.asScala.map(x => x.getColName)
val dictionary =
carbonTable.getDimensionByTableName(carbonTable.getTableName).asScala.map { f =>
(f.getColName.toLowerCase,
f.hasEncoding(Encoding.DICTIONARY) && !f.hasEncoding(Encoding.DIRECT_DICTIONARY) &&
!f.getDataType.isComplexType)
}
CarbonMetaData(dimensionsAttr,
measureAttr,
carbonTable,
DictionaryMap(dictionary.toMap),
CarbonUtil.hasAggregationDataMap(carbonTable))
}
def createCarbonRelation(tableInfo: TableInfo, tablePath: String): CarbonRelation = {
val table = CarbonTable.buildFromTableInfo(tableInfo)
CarbonRelation(
tableInfo.getDatabaseName,
tableInfo.getFactTable.getTableName,
CarbonSparkUtil.createSparkMeta(table),
table)
}
/**
* return's the formatted column comment if column comment is present else empty("")
*
* @param carbonColumn the column of carbonTable
* @return string
*/
def getColumnComment(carbonColumn: CarbonColumn): String = {
{
val columnProperties = carbonColumn.getColumnProperties
if (columnProperties != null) {
val comment: String = columnProperties.get(CarbonCommonConstants.COLUMN_COMMENT)
if (comment != null && comment != null) {
return " comment \\"" + comment + "\\""
}
}
""
}
}
/**
* the method return's raw schema
*
* @param carbonRelation logical plan for one carbon table
* @return schema
*/
def getRawSchema(carbonRelation: CarbonRelation): String = {
val fields = new Array[String](
carbonRelation.dimensionsAttr.size + carbonRelation.measureAttr.size)
val carbonTable = carbonRelation.carbonTable
val columnSchemas: mutable.Buffer[ColumnSchema] = carbonTable.getTableInfo.getFactTable.
getListOfColumns.asScala
.filter(cSchema => !cSchema.isInvisible && cSchema.getSchemaOrdinal != -1).
sortWith(_.getSchemaOrdinal < _.getSchemaOrdinal)
val columnList = columnSchemas.toList.asJava
carbonRelation.dimensionsAttr.foreach(attr => {
val carbonColumn = carbonTable.getColumnByName(carbonRelation.tableName, attr.name)
val columnComment = getColumnComment(carbonColumn)
fields(columnList.indexOf(carbonColumn.getColumnSchema)) =
'`' + attr.name + '`' + ' ' + attr.dataType.catalogString + columnComment
})
carbonRelation.measureAttr.foreach(msrAtrr => {
val carbonColumn = carbonTable.getColumnByName(carbonRelation.tableName, msrAtrr.name)
val columnComment = getColumnComment(carbonColumn)
fields(columnList.indexOf(carbonColumn.getColumnSchema)) =
'`' + msrAtrr.name + '`' + ' ' + msrAtrr.dataType.catalogString + columnComment
})
fields.mkString(",")
}
/**
* add escape prefix for delimiter
*
* @param delimiter A delimiter is a sequence of one or more characters
* used to specify the boundary between separate
* @return delimiter
*/
def delimiterConverter4Udf(delimiter: String): String = delimiter match {
case "|" | "*" | "." | ":" | "^" | "\\\\" | "$" | "+" | "?" | "(" | ")" | "{" | "}" | "[" | "]" =>
"\\\\\\\\" + delimiter
case _ =>
delimiter
}
def getKeyOnPrefix(path: String): (String, String, String) = {
val prefix = "spark.hadoop."
val endPoint = prefix + ENDPOINT
if (path.startsWith(CarbonCommonConstants.S3A_PREFIX)) {
(prefix + ACCESS_KEY, prefix + SECRET_KEY, endPoint)
} else if (path.startsWith(CarbonCommonConstants.S3N_PREFIX)) {
(prefix + CarbonCommonConstants.S3N_ACCESS_KEY,
prefix + CarbonCommonConstants.S3N_SECRET_KEY, endPoint)
} else if (path.startsWith(CarbonCommonConstants.S3_PREFIX)) {
(prefix + CarbonCommonConstants.S3_ACCESS_KEY,
prefix + CarbonCommonConstants.S3_SECRET_KEY, endPoint)
} else {
throw new Exception("Incorrect Store Path")
}
}
def getS3EndPoint(args: Array[String]): String = {
if (args.length >= 4 && args(3).contains(".com")) args(3)
else ""
}
}
| manishgupta88/carbondata | integration/spark2/src/main/scala/org/apache/carbondata/spark/util/CarbonSparkUtil.scala | Scala | apache-2.0 | 5,911 |
package org.jetbrains.plugins.scala.lang.resolve2
/**
* Pavel.Fatin, 02.02.2010
*/
class ImportAliasTest extends ResolveTestBase {
override def folderPath: String = {
super.folderPath + "import/alias/"
}
//TODO importexclude
// def testExclude = doTest
//TODO importexclude
// def testExcludeOnRename = doTest
def testHide = doTest
def testRename = doTest
} | triggerNZ/intellij-scala | test/org/jetbrains/plugins/scala/lang/resolve2/ImportAliasTest.scala | Scala | apache-2.0 | 381 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.charts.template
import com.dongxiguo.fastring.Fastring.Implicits._
import io.gatling.charts.report.Container.{ Group, Request }
private[charts] class MenuTemplate {
def getOutput: Fastring = fast"""
function getItemLink(item){
return item.pathFormatted + '.html';
}
function setDetailsLinkUrl(){
$$.each(stats.contents, function (name, data) {
$$('#details_link').attr('href', getItemLink(data));
return false;
});
}
var MENU_ITEM_MAX_LENGTH = 50;
function menuItem(item, level, parent, group) {
if (group)
var style = 'group';
else
var style = '';
if (item.name.length > MENU_ITEM_MAX_LENGTH) {
var title = ' title="' + item.name + '"';
var displayName = item.name.substr(0, MENU_ITEM_MAX_LENGTH) + '...';
}
else {
var title = '';
var displayName = item.name;
}
if (parent) {
if (level == 0)
var childOfRoot = 'child-of-ROOT ';
else
var childOfRoot = '';
var style = ' class="' + childOfRoot + 'child-of-menu-' + parent + '"';
} else
var style = '';
if (group)
var expandButton = '<span id="menu-' + item.pathFormatted + '" style="margin-left: ' + (level * 10) + 'px;" class="expand-button"> </span>';
else
var expandButton = '<span id="menu-' + item.pathFormatted + '" style="margin-left: ' + (level * 10) + 'px;" class="expand-button hidden"> </span>';
return '<li' + style + '><div class="item">' + expandButton + '<a href="' + getItemLink(item) + '"' + title + '>' + displayName + '</a></div></li>';
}
function menuItemsForGroup(group, level, parent) {
var items = '';
if (level > 0)
items += menuItem(group, level - 1, parent, true);
$$.each(group.contents, function (contentName, content) {
if (content.type == '$Group')
items += menuItemsForGroup(content, level + 1, group.pathFormatted);
else if (content.type == '$Request')
items += menuItem(content, level, group.pathFormatted);
});
return items;
}
function setDetailsMenu(){
$$('.nav ul').append(menuItemsForGroup(stats, 0));
$$('.nav').expandable();
}
function setGlobalMenu(){
$$('.nav ul').append('<li><div class="item"><a href="#active_users">Active Users</a></div></li> \\\\
<li><div class="item"><a href="#requests">Requests / sec</a></div></li> \\\\
<li><div class="item"><a href="#responses">Responses / sec</a></div></li>');
}
function getLink(link){
var a = link.split('/');
return (a.length<=1)? link : a[a.length-1];
}
function setActiveMenu(){
$$('.nav a').each(function(){
if(!$$(this).hasClass('expand-button') && $$(this).attr('href') == getLink(window.location.pathname)){
$$(this).parents('li').addClass('on');
return false;
}
});
}
"""
}
| MykolaB/gatling | gatling-charts/src/main/scala/io/gatling/charts/template/MenuTemplate.scala | Scala | apache-2.0 | 3,516 |
package akka.http.scaladsl.marshallers.argonaut
import argonaut._
import akka.http.scaladsl.model.{HttpCharsets, MediaTypes}
import akka.http.scaladsl.model.MediaTypes.`application/json`
import akka.http.scaladsl.marshalling.{ToEntityMarshaller, Marshaller}
import akka.http.scaladsl.unmarshalling.{FromEntityUnmarshaller, Unmarshaller}
import scala.language.implicitConversions
trait ArgonautSupport {
implicit def argonautUnmarshaller[T](
implicit e: DecodeJson[T]
): FromEntityUnmarshaller[T] =
argonautJsonUnmarshaller.map(e.decodeJson(_))
implicit val argonautJsonUnmarshaller: FromEntityUnmarshaller[Json] =
Unmarshaller.byteStringUnmarshaller
.forContentTypes(`application/json`)
.mapWithCharset { (data, charset) =>
val input =
if (charset == HttpCharsets.`UTF-8`) data.utf8String
else data.decodeString(charset.nioCharset.name)
Parse.parse(input)
}
implicit def argonautMarshaller[T](
implicit e: EncodeJson[T],
p: Pretty = prettifier
): ToEntityMarshaller[T] =
argonautJsonMarshaller.compose(e.encode)
implicit def argonautJsonMarshaller(
implicit p: Pretty = prettifier
): ToEntityMarshaller[Json] =
Marshaller.StringMarshaller.wrap(MediaTypes.`application/json`)(p)
private implicit def collapseDecodeResult[T](d: DecodeResult[T]): T =
d.result match {
case Left((msg, cursor)) =>
throw new IllegalArgumentException(s"$msg - $cursor")
case Right(t) => t
}
private implicit def collapseEither[T](d: Either[String, T]): T = d match {
case Left(msg) => throw new IllegalArgumentException(msg)
case Right(t) => t
}
type Pretty = Json => String
protected val prettifier: Pretty = _.nospaces
}
object ArgonautSupport extends ArgonautSupport
| vikraman/akka-argonaut | src/main/scala/akka/http/scaladsl/marshallers/argonaut/ArgonautSupport.scala | Scala | mit | 1,814 |
package com.rasterfoundry.database
import com.rasterfoundry.common.Generators.Implicits._
import com.rasterfoundry.database.Implicits._
import com.rasterfoundry.datamodel._
import cats.implicits._
import doobie.implicits._
import org.scalacheck.Prop.forAll
import org.scalatestplus.scalacheck.Checkers
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class ToolDaoSpec
extends AnyFunSuite
with Matchers
with Checkers
with DBTestConfig
with PropTestHelpers {
test("selection types") {
ToolDao.query.list.transact(xa).unsafeRunSync.length should be >= 0
}
test("list tools") {
check {
forAll {
(
userCreate: User.Create,
orgCreate: Organization.Create,
platform: Platform,
toolCreates: List[Tool.Create]
) =>
{
val toolListIO = for {
(dbUser, _, _) <- insertUserOrgPlatform(
userCreate,
orgCreate,
platform
)
dbTools <- toolCreates traverse { toolCreate =>
ToolDao.insert(toolCreate, dbUser)
}
listed <- ToolDao
.authQuery(
dbUser,
ObjectType.Template,
ownershipTypeO = Some("owned")
)
.list
} yield (dbTools, listed)
val (inserted, listed) =
toolListIO.transact(xa).unsafeRunSync
assert(
toolCreates.length == inserted.length,
"all of the tools were inserted into the db"
)
assert(
inserted.length == listed.length,
"counts of inserted and listed tools match"
)
assert(Set(toolCreates map { _.title }: _*) == Set(listed map {
_.title
}: _*), "titles of listed tools are the same as the Tool.Creates")
true
}
}
}
}
test("update a tool") {
check {
forAll {
(
userCreate: User.Create,
orgCreate: Organization.Create,
platform: Platform,
toolCreate1: Tool.Create
) =>
{
val updateIO = for {
(dbUser, _, _) <- insertUserOrgPlatform(
userCreate,
orgCreate,
platform
)
dbTool1 <- ToolDao.insert(toolCreate1, dbUser)
dbTool2 <- ToolDao.insert(toolCreate1, dbUser)
_ <- ToolDao.update(dbTool2, dbTool1.id)
fetched <- ToolDao.query.filter(dbTool1.id).select
} yield (dbTool2, fetched)
val (updateTool, fetched) =
updateIO.transact(xa).unsafeRunSync
assert(
fetched.title == updateTool.title,
"Title of fetched tool should be the title of the udpate tool"
)
assert(
fetched.description == updateTool.description,
"Description of fetched tool should be the description of the udpate tool"
)
assert(
fetched.requirements == updateTool.requirements,
"Requirements of fetched tool should be the requirements of the udpate tool"
)
assert(
fetched.visibility == updateTool.visibility,
"Visibility of fetched tool should be the visibility of the udpate tool"
)
assert(
fetched.compatibleDataSources == updateTool.compatibleDataSources,
"CompatibleDataSources of fetched tool should be the compatibleDataSources of the udpate tool"
)
assert(
fetched.stars == updateTool.stars,
"Stars of fetched tool should be the stars of the udpate tool"
)
assert(
fetched.singleSource == updateTool.singleSource,
"SingleSource of fetched tool should be the singleSource of the udpate tool"
)
true
}
}
}
}
}
| raster-foundry/raster-foundry | app-backend/db/src/test/scala/com/azavea/rf/database/ToolDaoSpec.scala | Scala | apache-2.0 | 4,162 |
package users.models
import authentication.models.api.PlainTextPassword
import commons.models.Login
private[users] case class UserRegistration(login: Login, password: PlainTextPassword) {
}
| Dasiu/play-framework-test-project | app/users/models/UserRegistration.scala | Scala | mit | 192 |
/**
* Copyright (C) 2017 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.xforms
import org.scalajs.dom
import org.scalajs.dom.raw
import scala.scalajs.js
import scala.scalajs.js.annotation.{JSExportAll, JSExportTopLevel}
// Store for values as we think they are known to the server
@JSExportTopLevel("ORBEON.xforms.ServerValueStore")
@JSExportAll
object ServerValueStore {
private case class ControlValue(controlElem: raw.Element, value: String)
private var idToControlValue = Map[String, ControlValue]()
// Store a value for a given control by id
def set(id: String, valueOrUndef: js.UndefOr[String]): Unit =
for {
controlElem ← Option(dom.document.getElementById(id)) // unclear if callers are sure the element exists
value ← valueOrUndef.toOption // some callers pass `undefined` (e.g. triggers)
} locally {
idToControlValue += id → ControlValue(controlElem, value)
}
// Return the value of a control as known by the server or null
def get(id: String): String =
idToControlValue.get(id) match {
case None ⇒
// We known nothing about this control
null
case Some(ControlValue(controlElem, value)) if controlElem eq dom.document.getElementById(id) ⇒
// We have the value and it is for the right control
value
case Some(_) ⇒
// We have a value but it is for an obsolete control
remove(id)
null
}
// Remove the value we know for a specific control
def remove(id: String): Unit = idToControlValue -= id
// Purge controls which are no longer in the DOM
def purgeExpired(): Unit =
for {
(id, ControlValue(controlElem, _)) ← idToControlValue.iterator
if controlElem ne dom.document.getElementById(id)
} locally {
remove(id)
}
}
| brunobuzzi/orbeon-forms | xforms/js/src/main/scala/org/orbeon/xforms/ServerValueStore.scala | Scala | lgpl-2.1 | 2,437 |
package org.falcon.main
import org.falcon.streaming.Collector
import org.falcon.util.Util
/**
* Project: falcon
* Package: org.falcon.main
*
* Author: Sergio Álvarez
* Date: 09/2013
*/
object Main {
def main(args: Array[String]): Unit = {
CLIParser.parse(args, Configuration()) map { config => run(config) }
}
def run(config: Configuration) = {
println("========================================")
println(" falcon ")
println("========================================")
println()
Util.loadConfiguration(config)
val collector = new Collector()
collector.collect
}
}
| sergio-alvarez/falcon | src/main/scala/org/falcon/main/Main.scala | Scala | apache-2.0 | 647 |
package leibniz
package internal
import leibniz.inhabitance.{Contractible, Inhabited, SingletonOf}
import scala.reflect.macros.blackbox
import scala.reflect.macros.whitebox
import scala.tools.nsc.ast.NodePrinters
import scala.reflect.internal.Types
@SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf"))
sealed abstract class Shared[C <: blackbox.Context] {
val c: C
import c.universe._
import definitions.{ NothingClass, AnyClass }
final val NothingType: Type = NothingClass.toType
final val AnyType: Type = AnyClass.toType
final val AnyRefType: Type = typeOf[AnyRef]
final val inhabitants: Map[Type, Tree] = Map(
typeOf[Unit] -> q"()",
typeOf[Boolean] -> q"false",
typeOf[Byte] -> q"0.toByte",
typeOf[Short] -> q"0.toShort",
typeOf[Int] -> q"0",
typeOf[Long] -> q"0L",
typeOf[Float] -> q"0.0f",
typeOf[Double] -> q"0.0d",
typeOf[Symbol] -> q"'a",
typeOf[String] -> q""" "" """)
final val EqType: Symbol = typeOf[Eq[_]].typeSymbol
final val EqTypeConstructor: Type = typeOf[Eq[_]].typeConstructor
def findCosingleton(tpe: Type): Option[(c.Tree, c.Type)] = {
val exactEq = c.inferImplicitValue(appliedType(EqTypeConstructor, tpe), silent = true)
if (exactEq != EmptyTree) {
exactEq.tpe match {
case TypeRef(_, EqType, List(result)) => Some((exactEq, result))
case _ => None
}
} else None
}
/**
* 0, null, "a", a.type | supported for types with Eq
*
* ExistentialType | nominalRef[ExistentialType]("ExistentialType"): T[ExistentialType]
* ClassName | nominalRef[ClassName]("ClassName"): T[ClassName]
* F[A] | nominalRef1[F, A]("F", T[A]): T[F[A]]
* F[G] | nominalRef2[G, A]("F"
*
* A with B | unsupported
*
* A { ... } | unsupported
* A forSome { ... } | unsupported
*/
trait TypeRefAlg[A] {
def nominal(tpe: Type, name: String, args: List[A]): A
def singleton(tpe: Type, parent: A, value: Tree, eq: Tree): A
}
def foldConcreteType[T](tpe: Type)(alg: TypeRefAlg[T]): T = tpe.dealias match {
case t if (t <:< NothingType) && (NothingType <:< t) =>
alg.nominal(NothingType, "scala.Nothing", Nil)
case t if (t <:< AnyType) && (AnyType <:< t) =>
alg.nominal(AnyType, "scala.Any", Nil)
case tpe@SingleType(_, path) =>
findCosingleton(tpe) match {
case Some((eq, cosingleton)) =>
val parent = foldConcreteType[T](cosingleton)(alg)
alg.singleton(tpe, parent, q"$path.asInstanceOf[$tpe]", eq)
case None =>
c.abort(c.enclosingPosition, s"Could not widen a singleton $tpe: no Eq[$tpe] found.")
}
case tpe@ConstantType(value) =>
findCosingleton(tpe) match {
case Some((eq, cosingleton)) =>
val parent = foldConcreteType[T](cosingleton)(alg)
alg.singleton(tpe, parent, q"$value.asInstanceOf[$tpe]", eq)
case None =>
c.abort(c.enclosingPosition, s"Could not widen a singleton $tpe: no Eq[$tpe] found.")
}
case tpe: TypeRef if tpe.sym.isClass =>
val args = tpe.typeArgs.map(foldConcreteType[T](_)(alg))
alg.nominal(tpe, tpe.typeSymbol.fullName, args)
case x =>
c.abort(c.enclosingPosition, s"$tpe is not a concrete type (${x.getClass}).")
// case RefinedType(parents, decls) =>
// // a with b { }
// if(decls.nonEmpty)
// c.abort(c.enclosingPosition, "Refinements with non-empty scope are not yet supported.")
//
// parents.map(go).reduce(alg.intersection).asInstanceOf[T[A]]
}
def isConcreteType(tpe: Type): Boolean = tpe.dealias match {
case t if (t <:< NothingType) && (NothingType <:< t) => true
case t if (t <:< AnyType) && (AnyType <:< t) => true
case SingleType(_, v) if !v.isParameter => true
case ConstantType(c) => true
case t: TypeRef if t.typeSymbol.isClass => t.typeArgs.forall(isConcreteType)
case _ => false
}
final class Hidden1
final class Hidden2
def isConstant[F[_]](F: c.WeakTypeTag[F[_]]): Boolean = {
val applied1 = c.universe.appliedType(F.tpe, weakTypeOf[Hidden1])
val applied2 = c.universe.appliedType(F.tpe, weakTypeOf[Hidden2])
applied1 =:= applied2
}
def isInjective[F[_]](F: c.WeakTypeTag[F[_]]): Boolean = {
val applied1 = c.universe.appliedType(F.tpe, weakTypeOf[Hidden1])
val applied2 = c.universe.appliedType(F.tpe, weakTypeOf[Hidden2])
!(applied1 =:= applied2) && isConcreteType(applied1) && isConcreteType(applied2)
}
}
final class Whitebox(val c: whitebox.Context) extends Shared[whitebox.Context] {
import c.universe._
import internal._
import definitions.NothingClass
def cosingleton[A : c.WeakTypeTag]: c.Tree = {
val tpe = weakTypeOf[A]
findCosingleton(tpe) match {
case Some((eqi, x)) =>
q"""_root_.leibniz.Cosingleton.witness[$x, $tpe]($eqi)"""
case None =>
c.abort(c.enclosingPosition, s"Could not find a cosingleton for $tpe.")
}
}
}
final class MacroUtil(val c: blackbox.Context) extends Shared[blackbox.Context] {
import c.universe._
import internal._
val typeIdTpe = typeOf[TypeId[_]].typeConstructor
def makeConcreteType(tpe: Type): c.Tree = {
// c.warning(c.enclosingPosition,
// dealiased.toString + "\n" +
// tpe.toString + "\n" +
// tpe.getClass.toString)
// case RefinedType(parents, decls) =>
// // a with b { }
// if(decls.nonEmpty)
// c.abort(c.enclosingPosition, "Refinements with non-empty scope are not yet supported.")
//
// val parentTypes = parents.filterNot(_ =:= AnyRefType).map { parent =>
// c.inferImplicitValue(appliedType(typeableTpe, List(parent)))
// }
//
// if (parentTypes.contains(EmptyTree))
// c.abort(c.enclosingPosition, "Missing ConcreteType for parent of a refinement")
//
// if (parentTypes.length != 1)
// q"""
// new _root_.leibniz.ConcreteType.CTIntersection(
// _root_.scala.Array[_root_.leibniz.ConcreteType[_]](..$parentTypes)
// )
// """
// else
// parentTypes.head
val (_, tree) = foldConcreteType[(Type, Tree)](tpe)(new TypeRefAlg[(Type, Tree)] {
def nominal(tpe: Type, name: String, args: List[(Type, Tree)]): (Type, Tree) = {
val tree = q"""
_root_.leibniz.TypeId.$$nominal[$tpe](
$name,
_root_.scala.Array[_root_.leibniz.TypeId[_]](..${args.map(_._2)}))
"""
(tpe, tree)
}
def singleton(tpe: Type, parent: (Type, Tree), value: Tree, eq: Tree): (Type, Tree) = {
val (parentType, parentTree) = parent
val tree = q"""
_root_.leibniz.TypeId.$$singleton[$parentType, $tpe](
$parentTree, $value.asInstanceOf[$tpe], $eq)
"""
(tpe, tree)
}
})
tree
}
def mkTypeId[A : c.WeakTypeTag]: c.Tree =
makeConcreteType(weakTypeOf[A])
def mkInhabited[A](implicit A: c.WeakTypeTag[A]): c.Tree =
weakTypeOf[A].dealias match {
case tpe@SingleType(_, path) =>
q"""_root_.leibniz.inhabitance.Inhabited.value[$tpe]($path.asInstanceOf[$tpe])"""
case tpe@ConstantType(value) =>
q"""_root_.leibniz.inhabitance.Inhabited.value[$tpe]($value)"""
case tpe@ThisType(_) =>
q"""_root_.leibniz.inhabitance.Inhabited.value[$tpe](this)"""
case tpe =>
inhabitants.find { case (t, _) => t <:< tpe } match {
case Some((_, tree)) => q"""_root_.leibniz.inhabitance.Inhabited.value[$tpe]($tree)"""
case None => c.abort(c.enclosingPosition, s"Can't prove that $tpe is inhabited.")
}
}
def mkUninhabited[A](implicit A: c.WeakTypeTag[A]): c.Tree =
weakTypeOf[A].dealias match {
case tpe if tpe <:< NothingType =>
q"""_root_.leibniz.inhabitance.Uninhabited.witness[$tpe](a => a)"""
case tpe@TypeRef(pre, sym, args) =>
// println(s"$pre $sym $args ${sym.isFinal} ${sym.isClass} ${sym.isPublic} ${sym.asClass.isSealed}")
// println(s"${sym.asClass.toType.members}")
c.abort(c.enclosingPosition, s"Can't prove that $tpe is uninhabited (yet).")
case tpe =>
// println(s"$tpe ${tpe.getClass}")
c.abort(c.enclosingPosition, s"Can't prove that $tpe is uninhabited.")
}
def mkInjective[F[_]](implicit F: c.WeakTypeTag[F[_]]): c.Tree =
if (isInjective[F](F))
q"_root_.leibniz.variance.Injective.force[${F.tpe}](_root_.leibniz.internal.Unsafe.unsafe)"
else
c.abort(c.enclosingPosition, s"Can't prove that ${F.tpe} is injective.")
def mkConstant[F[_]](implicit F: c.WeakTypeTag[F[_]]): c.Tree =
if (isConstant[F](F))
q"_root_.leibniz.variance.Constant.force[${F.tpe}](_root_.leibniz.internal.Unsafe.unsafe)"
else
c.abort(c.enclosingPosition, s"Can't prove that ${F.tpe} is injective.")
def mkApart[A : c.WeakTypeTag, B : c.WeakTypeTag]: c.Tree = {
val ta = weakTypeOf[A]
val tb = weakTypeOf[B]
if (isConcreteType(ta) && isConcreteType(tb) && !(ta <:< tb && tb <:< ta)) {
val ca = makeConcreteType(ta)
val cb = makeConcreteType(tb)
q"_root_.leibniz.internal.Unsafe.apart[$ta, $tb]($ca, $cb)"
} else {
c.abort(c.enclosingPosition, s"Could not prove that $ta =!= $tb.")
}
}
def mkWeakApart[A : c.WeakTypeTag, B : c.WeakTypeTag]: c.Tree = {
val ta = weakTypeOf[A]
val tb = weakTypeOf[B]
if (isConcreteType(ta) && isConcreteType(tb) && !(ta <:< tb && tb <:< ta)) {
// val ca = makeConcreteType(ta)
// val cb = makeConcreteType(tb)
q"""_root_.leibniz.internal.Unsafe.weakApart[$ta, $tb]"""
} else {
c.abort(c.enclosingPosition, s"Could not prove that $ta =!= $tb.")
}
}
} | alexknvl/leibniz | src/main/scala/leibniz/internal/MacroUtil.scala | Scala | mit | 9,851 |
package Tutorial
import Chisel._
import Node._
import Literal._
import scala.collection.mutable.HashMap
import scala.collection.mutable.ArrayBuffer
class KReduce extends gComponentLeaf (() => new KEngineIO_t)(() => new KEngineIO_t) (ArrayBuffer(("partialAccumulatorMem", () => new pointMemInput_t , () => new pointMemOutput_t), ("add", () => new FU_in_t , () => new FU_out_t))) with include {
val numOfThreads = 1
val NONE_SELECTED = UFix(numOfThreads,log2Up(numOfThreads)+1)
val WaitForInputValid = UFix(0, 8)
val WaitForOutputReady = UFix(255, 8)
val WaitForReady = UFix(0, 1)
val WaitForValid = UFix(1, 1)
val inputTag = Vec(numOfThreads) {Reg(UFix(width=5))}
val State = Vec(numOfThreads) {Reg(UFix(width=8), resetVal=WaitForInputValid)}
val EmitReturnState = Vec(numOfThreads) {Reg(UFix(width=8), resetVal=WaitForInputValid)}
//val outstandingOffs = Vec(numOfThreads) {Reg(resetVal=UFix(0, 5))}
val AllOffloadsReady = Bool()
val AllOffloadsValid = Vec(numOfThreads) {Bool()}
/*******************Thread states*********************************/
val subStateTh = Vec(numOfThreads) {Reg(resetVal=WaitForReady)}
def myOff = io.elements.find(_._1 == "off").getOrElse(elseV)._2
val pMemOut = Vec (numOfThreads) {Reg(new pointMemOutput_t)} //Global variable
val pMemInGlobal = Vec (numOfThreads) {Reg(new pointMemInput_t)} //Global variable
val centeroidIndex = Vec (numOfThreads) {Reg(UFix(width = 32), resetVal=UFix(0, width = 32))} //Global variable
val inputReg = Vec(numOfThreads) {Reg(new KEngineIO_t)}
val outputReg = Vec(numOfThreads) {Reg(new KEngineIO_t)}
def mymyOffpartialAccumulatorMem = myOff.asInstanceOf[Bundle].elements.find(_._1 == "partialAccumulatorMem").getOrElse(elseV)._2
val partialAccumulatorMemPort = new gOffBundleND(() => new pointMemInput_t, () => new pointMemOutput_t)
partialAccumulatorMemPort <> mymyOffpartialAccumulatorMem
def mymyOffadd = myOff.asInstanceOf[Bundle].elements.find(_._1 == "add").getOrElse(elseV)._2
val addPort = new gOffBundleND(() => new FU_in_t, () => new FU_out_t)
addPort <> mymyOffadd
val GS_RECEIVE = UFix(1)
val pMemIn1 = new pointMemInput_t
val GS_ADD_X = UFix(2)
val addIn1 = new FU_in_t
val addOut1 = new FU_out_t
val GS_ADD_Y = UFix(3)
val addIn2 = new FU_in_t
val addOut2 = new FU_out_t
val GS_ADD_Z = UFix(4)
val addIn3 = new FU_in_t
val addOut3 = new FU_out_t
val GS_WRITE = UFix(5)
val pMemIn2 = new pointMemInput_t
val GS_SEND = UFix(6)
/******************Winner threads*********************************/
val rThreadEncoder = new RREncode (numOfThreads)
val rThread = rThreadEncoder.io.chosen
Range(0, numOfThreads, 1).map(i =>
rThreadEncoder.io.valid(i) := (subStateTh(i) === WaitForReady))
rThreadEncoder.io.ready := (rThread != NONE_SELECTED)
val vThreadEncoder = new RREncode (numOfThreads)
val vThread = vThreadEncoder.io.chosen
Range(0, numOfThreads, 1).map(i =>
vThreadEncoder.io.valid(i) := (subStateTh(i) === WaitForValid) && AllOffloadsValid(i))
vThreadEncoder.io.ready := vThread != NONE_SELECTED
val sThreadEncoder = new RREncode (numOfThreads)
val sThread = sThreadEncoder.io.chosen
Range(0, numOfThreads, 1).map(i => sThreadEncoder.io.valid(i) := (subStateTh(i) === WaitForReady) && (State(i) === WaitForInputValid))
sThreadEncoder.io.ready := sThread != NONE_SELECTED
Range(0, numOfThreads, 1).foreach(i => subStateTh(i) := MuxCase(subStateTh(i), Seq((AllOffloadsReady && UFix(i) === rThread && State(i) != WaitForInputValid && State(i) != WaitForOutputReady , WaitForValid), (UFix(i) === vThread, WaitForReady))))
partialAccumulatorMemPort.rep.ready := Bool(true)
addPort.rep.ready := Bool(true)
/******************Ready stage handler************************/
val partialAccumulatorMemPortHadReadyRequest = Reg(resetVal=Bool(false))
val partialAccumulatorMem_ready_received = Reg(resetVal=Bool(false))
val addPortHadReadyRequest = Reg(resetVal=Bool(false))
val add_ready_received = Reg(resetVal=Bool(false))
AllOffloadsReady :=
(partialAccumulatorMemPort.req.ready || partialAccumulatorMem_ready_received || (!partialAccumulatorMemPortHadReadyRequest && !partialAccumulatorMemPort.req.valid)) &&
(addPort.req.ready || add_ready_received || (!addPortHadReadyRequest && !addPort.req.valid)) &&
Bool(true)
partialAccumulatorMem_ready_received := !(AllOffloadsReady) && (partialAccumulatorMem_ready_received || partialAccumulatorMemPort.req.ready)
partialAccumulatorMemPortHadReadyRequest := !AllOffloadsReady && (partialAccumulatorMemPortHadReadyRequest || partialAccumulatorMemPort.req.valid)
add_ready_received := !(AllOffloadsReady) && (add_ready_received || addPort.req.ready)
addPortHadReadyRequest := !AllOffloadsReady && (addPortHadReadyRequest || addPort.req.valid)
/******************Valid stage handler************************/
val partialAccumulatorMemPortHadValidRequest = Vec(numOfThreads) {Reg(resetVal=Bool(false))}
val partialAccumulatorMem_valid_received = Vec(numOfThreads) {Reg(resetVal=Bool(false))}
val addPortHadValidRequest = Vec(numOfThreads) {Reg(resetVal=Bool(false))}
val add_valid_received = Vec(numOfThreads) {Reg(resetVal=Bool(false))}
for (i <- 0 to numOfThreads-1) {
AllOffloadsValid(i) :=
((partialAccumulatorMemPort.rep.valid && (partialAccumulatorMemPort.rep.tag === UFix(i, 5)))|| partialAccumulatorMem_valid_received(i) || !partialAccumulatorMemPortHadValidRequest(i)) &&
((addPort.rep.valid && (addPort.rep.tag === UFix(i, 5)))|| add_valid_received(i) || !addPortHadValidRequest(i)) &&
Bool(true)
partialAccumulatorMem_valid_received(i) := !(vThread === UFix(i, 5)) && ((partialAccumulatorMem_valid_received(i)) || (partialAccumulatorMemPort.rep.valid && partialAccumulatorMemPort.rep.tag === UFix(i, 5)))
partialAccumulatorMemPortHadValidRequest(i) := !(vThread === UFix(i,5)) && (partialAccumulatorMemPortHadValidRequest(i) || (UFix(i,5)===rThread && partialAccumulatorMemPort.req.valid)/*(partialAccumulatorMemPortHadReadyRequest && AllOffloadsReady && (UFix(i,5) === rThread))*/)
add_valid_received(i) := !(vThread === UFix(i, 5)) && ((add_valid_received(i)) || (addPort.rep.valid && addPort.rep.tag === UFix(i, 5)))
addPortHadValidRequest(i) := !(vThread === UFix(i,5)) && (addPortHadValidRequest(i) || (UFix(i,5)===rThread && addPort.req.valid)/*(addPortHadReadyRequest && AllOffloadsReady && (UFix(i,5) === rThread))*/)
}
pMemIn1.addr:=inputReg(rThread).centeroidIndex
pMemIn1.data:=inputReg(rThread).point
pMemIn1.rw:=READ
addIn1.in1:=pMemOut(rThread).data.x
addIn1.in2:=inputReg(rThread).point.x
addOut1 := addPort.rep.bits
addIn2.in1:=pMemOut(rThread).data.y
addIn2.in2:=inputReg(rThread).point.y
addOut2 := addPort.rep.bits
addIn3.in1:=pMemOut(rThread).data.z
addIn3.in2:=inputReg(rThread).point.z
addOut3 := addPort.rep.bits
pMemIn2.addr:=inputReg(rThread).centeroidIndex
pMemIn2.rw:=WRITE
pMemIn2.data:=pMemInGlobal(rThread).data
val finishCenteroidIndex=NUM_OF_CENTEROIDS-UFix(1, width = 32)
partialAccumulatorMemPort.req.tag := rThread
partialAccumulatorMemPort.req.valid := (rThread != NONE_SELECTED) && !partialAccumulatorMem_valid_received(rThread) && ( (rThread != NONE_SELECTED && State(rThread) === GS_RECEIVE) || (rThread != NONE_SELECTED && State(rThread) === GS_WRITE) || (rThread != NONE_SELECTED && State(rThread) === GS_SEND))
partialAccumulatorMemPort.req.bits := MuxCase(UFix(0, 32),Seq( ((rThread != NONE_SELECTED && State(rThread) === GS_RECEIVE),pMemIn1) , ((rThread != NONE_SELECTED && State(rThread) === GS_WRITE),pMemIn2) , ((rThread != NONE_SELECTED && State(rThread) === GS_SEND),pMemInGlobal(rThread))))
addPort.req.tag := rThread
addPort.req.valid := (rThread != NONE_SELECTED) && !add_valid_received(rThread) && ( (rThread != NONE_SELECTED && State(rThread) === GS_ADD_X) || (rThread != NONE_SELECTED && State(rThread) === GS_ADD_Y) || (rThread != NONE_SELECTED && State(rThread) === GS_ADD_Z))
addPort.req.bits := MuxCase(UFix(0, 32),Seq( ((rThread != NONE_SELECTED && State(rThread) === GS_ADD_X),addIn1) , ((rThread != NONE_SELECTED && State(rThread) === GS_ADD_Y),addIn2) , ((rThread != NONE_SELECTED && State(rThread) === GS_ADD_Z),addIn3)))
when (sThread != NONE_SELECTED && io.in.valid) {
inputReg(sThread) := io.in.bits
inputTag(sThread) := io.in.tag
State(sThread) := GS_RECEIVE
}
when (rThread != NONE_SELECTED && State(rThread) === WaitForOutputReady && io.out.ready) {
State(rThread) := EmitReturnState(rThread)
}
when (vThread != NONE_SELECTED && State(vThread) === GS_RECEIVE){
pMemOut(vThread) := partialAccumulatorMemPort.rep.bits
State(vThread):=GS_ADD_X
}
when (vThread != NONE_SELECTED && State(vThread) === GS_ADD_X){
pMemInGlobal(vThread).data.x:=addOut1.out
State(vThread):=GS_ADD_Y
}
when (vThread != NONE_SELECTED && State(vThread) === GS_ADD_Y){
pMemInGlobal(vThread).data.y:=addOut2.out
State(vThread):=GS_ADD_Z
}
when (vThread != NONE_SELECTED && State(vThread) === GS_ADD_Z){
pMemInGlobal(vThread).data.z:=addOut3.out
State(vThread):=GS_WRITE
}
when (vThread != NONE_SELECTED && State(vThread) === GS_WRITE){
pMemOut(vThread) := partialAccumulatorMemPort.rep.bits
when (inputReg(vThread).centeroidIndex===finishCenteroidIndex) {
State(vThread):=GS_SEND
centeroidIndex(vThread):=UFix(0, width = 32)
}
.otherwise {
State(vThread):=WaitForInputValid
}
}
when (vThread != NONE_SELECTED && State(vThread) === GS_SEND){
pMemInGlobal(vThread).addr:=centeroidIndex(vThread)
pMemInGlobal(vThread).rw:=READ
pMemOut(vThread) := partialAccumulatorMemPort.rep.bits
outputReg(vThread).point:=pMemOut(vThread).data
centeroidIndex(vThread):=centeroidIndex(vThread)+UFix(1, width = 32)
when (centeroidIndex(vThread)===NUM_OF_CENTEROIDS) {
centeroidIndex(vThread):=UFix(0, width = 32)
EmitReturnState(vThread):=WaitForInputValid
}
.otherwise {
EmitReturnState(vThread):=State(vThread)
}
State(vThread) := WaitForOutputReady
}
io.out.tag := inputTag(rThread)
io.out.bits := outputReg(rThread)
io.out.valid := rThread != NONE_SELECTED && State(rThread) === WaitForOutputReady
io.in.ready := sThread != NONE_SELECTED
}
| seyedmaysamlavasani/GorillaPP | chisel/Gorilla++/emulator/src/KReduce.scala | Scala | bsd-3-clause | 10,399 |
package com.tribbloids.spookystuff.utils
import com.tribbloids.spookystuff.utils.lifespan.LifespanContext
case class ThreadLocal[A](init: LifespanContext => A) extends java.lang.ThreadLocal[A] with (() => A) {
override def initialValue: A = {
val ctx = LifespanContext()
init(ctx)
}
def apply: A = get
}
| tribbloid/spookystuff | mldsl/src/main/scala/com/tribbloids/spookystuff/utils/ThreadLocal.scala | Scala | apache-2.0 | 322 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fr
import org.dom4j.{QName, Document}
import org.orbeon.oxf.pipeline.api.{PipelineContext}
import org.orbeon.oxf.processor.SimpleProcessor
import org.orbeon.oxf.properties.{PropertySet, Properties}
import org.orbeon.oxf.util.XPath
import org.orbeon.oxf.xml.{XMLReceiver, TransformerUtils, Dom4j}
import org.orbeon.saxon.dom4j.DocumentWrapper
import org.orbeon.saxon.om.NodeInfo
import org.orbeon.scaxon.XML._
// Processor to replace or add resources based on properties
//
// An property looks like: oxf.fr.resource.*.*.en.detail.labels.save
//
// NOTE: We used to do this in XSLT, but when it came to implement *adding* missing resources, the level of complexity
// increased too much and readability would have suffered so we rewrote in Scala.
class ResourcesPatcher extends SimpleProcessor {
def generateData(pipelineContext: PipelineContext, xmlReceiver: XMLReceiver): Unit = {
// Read inputs
val resourcesDocument = readInputAsDOM4J(pipelineContext, "data")
val instanceElement = new DocumentWrapper(readInputAsDOM4J(pipelineContext, "instance"), null, XPath.GlobalConfiguration) \ *
val app = instanceElement \ "app" stringValue
val form = instanceElement \ "form" stringValue
// Transform and write out the document
ResourcesPatcher.transform(resourcesDocument, app, form)(Properties.instance.getPropertySet)
TransformerUtils.writeDom4j(resourcesDocument, xmlReceiver)
}
}
object ResourcesPatcher {
def transform(resourcesDocument: Document, app: String, form: String)(implicit properties: PropertySet): Unit = {
val resourcesElement = new DocumentWrapper(resourcesDocument, null, XPath.GlobalConfiguration) \ *
val propertyNames = properties.propertiesStartsWith("oxf.fr.resource" :: app :: form :: Nil mkString ".")
// In 4.6 summary/detail buttons are at the top level
def filterPathForBackwardCompatibility(path: Seq[String]) = path take 2 match {
case Seq("detail" | "summary", "buttons") ⇒ path drop 1
case _ ⇒ path
}
val langPathValue =
for {
name ← propertyNames
tokens = name split """\."""
lang = tokens(5)
path = filterPathForBackwardCompatibility(tokens drop 6) mkString "/"
value = properties.getString(name)
if value ne null // got one case where this happened
} yield
(lang, path, value)
// Return all languages or the language specified if it exists
// For now we don't support creating new top-level resource elements for new languages.
def findConcreteLanguages(langOrWildcard: String) = {
val allLanguages =
eval(resourcesElement, "resource/@xml:lang/string()").asInstanceOf[Seq[String]]
val filtered =
if (langOrWildcard == "*")
allLanguages
else
allLanguages filter (_ == langOrWildcard)
filtered.distinct // there *shouldn't* be duplicate languages in the source
}
def resourceElementsForLang(lang: String) =
eval(resourcesElement, s"resource[@xml:lang = '$lang']").asInstanceOf[Seq[NodeInfo]] map unwrapElement
// Update or create elements and set values
for {
(langOrWildcard, path, value) ← langPathValue
lang ← findConcreteLanguages(langOrWildcard)
rootForLang ← resourceElementsForLang(lang)
} locally {
Dom4j.ensurePath(rootForLang, path split "/" map QName.get).setText(value)
}
}
} | wesley1001/orbeon-forms | src/main/scala/org/orbeon/oxf/fr/ResourcesPatcher.scala | Scala | lgpl-2.1 | 4,179 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600j.v3
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.ct600j.v3.retriever.CT600JBoxRetriever
abstract class SchemeReferenceNumberBox extends CtBoxIdentifier("Scheme reference number") with CtOptionalString with Input with ValidatableBox[CT600JBoxRetriever] {
def validateSchemeReferenceNumber(previousSchemeNumberBox: CtOptionalString, previousSchemeDateBox: CtOptionalDate, schemeDateBox: CtOptionalDate) = (previousSchemeNumberBox.value, previousSchemeDateBox.value, schemeDateBox.value) match {
case (None, None, _) => validateStringAsBlank(id, this)
case (_, _, Some(_)) => validateAsMandatory(this) ++ validateOptionalStringByRegex(id, this, taxAvoidanceSchemeNumberRegex)
case _ => validateOptionalStringByRegex(id, this, taxAvoidanceSchemeNumberRegex)
}
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600j/v3/SchemeReferenceNumberBox.scala | Scala | apache-2.0 | 1,416 |
package org.bm
import scala.collection.immutable.Queue
/**
* .
* @author Baptiste Morin
*/
package object logviewer {
type Token = String
type Tokens = Queue[String]
type ParsedMessage = Map[String, String]
}
| morinb/logviewer | src/main/scala/org/bm/logviewer/package.scala | Scala | apache-2.0 | 221 |
package net.fwbrasil.smirror
class SParameterSpecTestClass(
val m1: Int,
m2: String,
m3: Array[Int],
m4: Object,
m5: Any,
m6: String = "a") {
def m7(m8: Exception, m9: Any = 1, m10: Int = m1) = null
}
class SParameterSpec extends SMirrorSpec {
"Contructor parameters" should "be reflected" in
test[SParameterSpecTestClass] { (sClass, jClass) =>
val params = sClass.constructors.head.parameters.map {
parameter =>
(parameter.name,
parameter.sClass,
parameter.defaultValueOption)
}
params.toSet should equal(Set(
("m1", sClassOf[Int], None),
("m2", sClassOf[String], None),
("m3", sClassOf[Array[Int]], None),
("m4", sClassOf[Object], None),
("m5", sClassOf[Any], None),
("m6", sClassOf[String], Some("a"))))
}
"Method parameters" should "be reflected" in
test[SParameterSpecTestClass] { (sClass, jClass) =>
val instance = new SParameterSpecTestClass(99, "b", Array(1), new Object, "c")
val params = sClass.methods.head.parameters.map {
parameter =>
(parameter.name,
parameter.sClass,
parameter.defaultValueOption(instance))
}
params.toSet should equal(Set(
("m8", sClassOf[Exception], None),
("m9", sClassOf[Any], Some(1)),
("m10", sClassOf[Int], Some(99))))
}
} | fwbrasil/smirror | src/test/scala/net/fwbrasil/smirror/SParameterSpec.scala | Scala | lgpl-2.1 | 1,290 |
package io.skysail.api.osgi.events.impl
import org.apache.felix.bundlerepository.RepositoryAdmin
import io.skysail.api.osgi.bundlerepository.RepositoryService
class SkysailObrCommands(repoService: RepositoryService) {
//private val app = new ObrApplication(null, null, null, repoAdmin)
def listRepos(): Unit = {
//val repos = new RepoService().repoAdmin.
// repos
// //.map(_.toString)
// //.sorted
// .foreach(key =>
// //log.info(s"$key.name => '${key.uri}'"
// )
// )
}
// def search(searchFor: String): Unit = {
// log.info("Search Repositories:")
// log.info("====================")
//
// val filter = "(|(presentationname=*)(symbolicname=*))"
// val resources = repoAdmin.discoverResources(filter)
// resources
// //.map(_.toString)
// //.sorted
// .foreach(key =>
// log.info(s"$key")// => '${key.getSymbolicName}'")
// )
//
// }
} | evandor/skysail-server | skysail.api/src/io/skysail/api/osgi/events/impl/SkysailObrCommands.scala | Scala | apache-2.0 | 926 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc.interpreter
package shell
trait Completion {
final def complete(buffer: String, cursor: Int): CompletionResult = complete(buffer, cursor, filter = true)
def complete(buffer: String, cursor: Int, filter: Boolean): CompletionResult
}
object NoCompletion extends Completion {
def complete(buffer: String, cursor: Int, filter: Boolean) = NoCompletions
}
case class CompletionResult(line: String, cursor: Int, candidates: List[CompletionCandidate], typeAtCursor: String = "", typedTree: String = "") {
final def orElse(other: => CompletionResult): CompletionResult =
if (candidates.nonEmpty) this else other
}
object CompletionResult {
val empty: CompletionResult = NoCompletions
}
object NoCompletions extends CompletionResult("", -1, Nil, "", "")
case class MultiCompletion(underlying: Completion*) extends Completion {
override def complete(buffer: String, cursor: Int, filter: Boolean) =
underlying.foldLeft(CompletionResult.empty)((r,c) => r.orElse(c.complete(buffer, cursor, filter)))
}
| scala/scala | src/repl-frontend/scala/tools/nsc/interpreter/shell/Completion.scala | Scala | apache-2.0 | 1,325 |
package spatial.tests
import org.scalatest.{Matchers, FlatSpec}
import spatial.dsl._
import virtualized._
object BinaryFileTest extends SpatialTest {
type Nibble = FixPt[TRUE,_4,_0]
type UByte = FixPt[FALSE,_8,_0]
type UShort = FixPt[FALSE,_16,_0]
@virtualize def main(): Unit = {
Accel { /* No hardware stuff in this test :( */ }
val nibbles = Array.tabulate(32){i => i.to[Nibble] }
val bytes = Array.tabulate(256){i => (i - 128).to[Byte] }
val ubytes = Array.tabulate(256){i => i.to[UByte] }
val shorts = Array.tabulate(256){i => ((i - 128)*256).to[Short] }
val ushorts = Array.tabulate(256){i => (i * 256).to[UShort] }
val ints = Array.tabulate(256){i => ((i - 128) * 256 * 256).to[Int] }
val uints = Array.tabulate(256){i => (i * 256).to[UInt32] * 256 }
writeBinary(nibbles, "nibbles.dat")
writeBinary(bytes, "bytes.dat")
writeBinary(ubytes, "ubytes.dat")
writeBinary(shorts, "shorts.dat")
writeBinary(ushorts, "ushorts.dat")
writeBinary(ints, "ints.dat")
writeBinary(uints, "uints.dat")
val nibblesIn = loadBinary[Nibble]("nibbles.dat")
val bytesIn = loadBinary[Byte]("bytes.dat")
val ubytesIn = loadBinary[UByte]("ubytes.dat")
val shortsIn = loadBinary[Short]("shorts.dat")
val ushortsIn = loadBinary[UShort]("ushorts.dat")
val intsIn = loadBinary[Int]("ints.dat")
val uintsIn = loadBinary[UInt32]("uints.dat")
val nibblesMatch = compare(nibbles, nibblesIn, "Nibbles")
val bytesMatch = compare(bytes, bytesIn, "Bytes")
val ubytesMatch = compare(ubytes, ubytesIn, "UBytes")
val shortsMatch = compare(shorts, shortsIn, "Shorts")
val ushortsMatch = compare(ushorts, ushortsIn, "UShorts")
val intsMatch = compare(ints, intsIn, "Ints")
val uintsMatch = compare(uints, uintsIn, "UInts")
assert(nibblesMatch && bytesMatch && ubytesMatch && shortsMatch && ushortsMatch && intsMatch && uintsMatch, "One or more tests failed")
}
@virtualize def compare[T:Type:Num](a: Array[T], b: Array[T], name: CString): Boolean = {
val matches = a == b
if (!matches) {
println(s"$name: FAIL")
a.zip(b){(x,y) => pack(x,y) }.foreach{x => if (x._1 != x._2) println("expected: " + x._1 + ", result: " + x._2) else () }
}
else {
println(s"$name: PASS")
}
matches
}
}
class FileIOTests extends FlatSpec with Matchers {
"BinaryFileTest" should "not corrupt data" in { BinaryFileTest.runTest() }
}
| stanford-ppl/spatial-lang | spatial/core/test/spatial/tests/FileIOTests.scala | Scala | mit | 2,462 |
import scala.language.experimental.macros
object Unused {
// seen as used before expansion
private def usedMacro(): Unit = macro UnusedMacro.usedMacroImpl
// never used
private def unusedMacro(): Unit = macro UnusedMacro.usedMacroImpl
def f() = usedMacro()
}
| martijnhoekstra/scala | test/files/neg/t10296-warn/Unused_2.scala | Scala | apache-2.0 | 273 |
package com.amichalo.mooolelo.protocol
import spray.json._
//FIXME implement it using macros
trait AnyValJsonFormat {
def fromStringJsonFormat[T](factory: String => T, extractor: T => String): RootJsonFormat[T] = new RootJsonFormat[T] {
override def read(json: JsValue): T = json match {
case JsString(value) => factory(value)
case _ => deserializationError("String type expected")
}
override def write(value: T): JsValue = {
JsString(extractor(value))
}
}
def fromIntJsonFormat[T](factory: Int => T, extractor: T => Int): RootJsonFormat[T] = new RootJsonFormat[T] {
override def read(json: JsValue): T = json match {
case JsNumber(value) => factory(value.toInt)
case _ => deserializationError("Int type expected")
}
override def write(value: T): JsValue = {
JsNumber(extractor(value))
}
}
def fromLongJsonFormat[T](factory: Long => T, extractor: T => Long): RootJsonFormat[T] = new RootJsonFormat[T] {
override def read(json: JsValue): T = json match {
case JsNumber(value) => factory(value.toLong)
case _ => deserializationError("Long type expected")
}
override def write(value: T): JsValue = {
JsNumber(extractor(value))
}
}
} | amichalo/mooolelo | src/main/scala/com/amichalo/mooolelo/protocol/AnyValJsonFormat.scala | Scala | apache-2.0 | 1,253 |
package japgolly.scalajs.react.hooks
// DO NOT MANUALLY EDIT
// DO NOT MANUALLY EDIT
//
// THIS IS GENERATED BY RUNNING genHooks IN SBT
//
// DO NOT MANUALLY EDIT
// DO NOT MANUALLY EDIT
import japgolly.scalajs.react.Reusable
import japgolly.scalajs.react.hooks.Hooks.UseCallbackArg
import japgolly.scalajs.react.util.Effect._
import scala.scalajs.js
trait UseCallbackArgInstances {
implicit def c1[A, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A) => Z[Unit]] =
UseCallbackArg[(A) => Z[Unit], js.Function1[A, Unit]](
z => (a) => Z.dispatch(z(a)))(
z => Reusable.byRef(z).withValue((a) => Z.delay(z(a))))
implicit def c2[A, B, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B) => Z[Unit]] =
UseCallbackArg[(A, B) => Z[Unit], js.Function2[A, B, Unit]](
z => (a, b) => Z.dispatch(z(a, b)))(
z => Reusable.byRef(z).withValue((a, b) => Z.delay(z(a, b))))
implicit def c3[A, B, C, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C) => Z[Unit]] =
UseCallbackArg[(A, B, C) => Z[Unit], js.Function3[A, B, C, Unit]](
z => (a, b, c) => Z.dispatch(z(a, b, c)))(
z => Reusable.byRef(z).withValue((a, b, c) => Z.delay(z(a, b, c))))
implicit def c4[A, B, C, D, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D) => Z[Unit]] =
UseCallbackArg[(A, B, C, D) => Z[Unit], js.Function4[A, B, C, D, Unit]](
z => (a, b, c, d) => Z.dispatch(z(a, b, c, d)))(
z => Reusable.byRef(z).withValue((a, b, c, d) => Z.delay(z(a, b, c, d))))
implicit def c5[A, B, C, D, E, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E) => Z[Unit], js.Function5[A, B, C, D, E, Unit]](
z => (a, b, c, d, e) => Z.dispatch(z(a, b, c, d, e)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e) => Z.delay(z(a, b, c, d, e))))
implicit def c6[A, B, C, D, E, F, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E, F) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E, F) => Z[Unit], js.Function6[A, B, C, D, E, F, Unit]](
z => (a, b, c, d, e, f) => Z.dispatch(z(a, b, c, d, e, f)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e, f) => Z.delay(z(a, b, c, d, e, f))))
implicit def c7[A, B, C, D, E, F, G, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E, F, G) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E, F, G) => Z[Unit], js.Function7[A, B, C, D, E, F, G, Unit]](
z => (a, b, c, d, e, f, g) => Z.dispatch(z(a, b, c, d, e, f, g)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e, f, g) => Z.delay(z(a, b, c, d, e, f, g))))
implicit def c8[A, B, C, D, E, F, G, H, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E, F, G, H) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E, F, G, H) => Z[Unit], js.Function8[A, B, C, D, E, F, G, H, Unit]](
z => (a, b, c, d, e, f, g, h) => Z.dispatch(z(a, b, c, d, e, f, g, h)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e, f, g, h) => Z.delay(z(a, b, c, d, e, f, g, h))))
implicit def c9[A, B, C, D, E, F, G, H, I, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E, F, G, H, I) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E, F, G, H, I) => Z[Unit], js.Function9[A, B, C, D, E, F, G, H, I, Unit]](
z => (a, b, c, d, e, f, g, h, i) => Z.dispatch(z(a, b, c, d, e, f, g, h, i)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e, f, g, h, i) => Z.delay(z(a, b, c, d, e, f, g, h, i))))
implicit def c10[A, B, C, D, E, F, G, H, I, J, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E, F, G, H, I, J) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E, F, G, H, I, J) => Z[Unit], js.Function10[A, B, C, D, E, F, G, H, I, J, Unit]](
z => (a, b, c, d, e, f, g, h, i, j) => Z.dispatch(z(a, b, c, d, e, f, g, h, i, j)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e, f, g, h, i, j) => Z.delay(z(a, b, c, d, e, f, g, h, i, j))))
implicit def c11[A, B, C, D, E, F, G, H, I, J, K, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K) => Z[Unit], js.Function11[A, B, C, D, E, F, G, H, I, J, K, Unit]](
z => (a, b, c, d, e, f, g, h, i, j, k) => Z.dispatch(z(a, b, c, d, e, f, g, h, i, j, k)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e, f, g, h, i, j, k) => Z.delay(z(a, b, c, d, e, f, g, h, i, j, k))))
implicit def c12[A, B, C, D, E, F, G, H, I, J, K, L, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L) => Z[Unit], js.Function12[A, B, C, D, E, F, G, H, I, J, K, L, Unit]](
z => (a, b, c, d, e, f, g, h, i, j, k, l) => Z.dispatch(z(a, b, c, d, e, f, g, h, i, j, k, l)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e, f, g, h, i, j, k, l) => Z.delay(z(a, b, c, d, e, f, g, h, i, j, k, l))))
implicit def c13[A, B, C, D, E, F, G, H, I, J, K, L, M, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M) => Z[Unit], js.Function13[A, B, C, D, E, F, G, H, I, J, K, L, M, Unit]](
z => (a, b, c, d, e, f, g, h, i, j, k, l, m) => Z.dispatch(z(a, b, c, d, e, f, g, h, i, j, k, l, m)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e, f, g, h, i, j, k, l, m) => Z.delay(z(a, b, c, d, e, f, g, h, i, j, k, l, m))))
implicit def c14[A, B, C, D, E, F, G, H, I, J, K, L, M, N, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N) => Z[Unit], js.Function14[A, B, C, D, E, F, G, H, I, J, K, L, M, N, Unit]](
z => (a, b, c, d, e, f, g, h, i, j, k, l, m, n) => Z.dispatch(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e, f, g, h, i, j, k, l, m, n) => Z.delay(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n))))
implicit def c15[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O) => Z[Unit], js.Function15[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, Unit]](
z => (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o) => Z.dispatch(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e, f, g, h, i, j, k, l, m, n, o) => Z.delay(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o))))
implicit def c16[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P) => Z[Unit], js.Function16[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Unit]](
z => (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) => Z.dispatch(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) => Z.delay(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p))))
implicit def c17[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q) => Z[Unit], js.Function17[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, Unit]](
z => (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q) => Z.dispatch(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q) => Z.delay(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q))))
implicit def c18[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R) => Z[Unit], js.Function18[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, Unit]](
z => (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r) => Z.dispatch(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r) => Z.delay(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r))))
implicit def c19[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S) => Z[Unit], js.Function19[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, Unit]](
z => (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s) => Z.dispatch(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s) => Z.delay(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s))))
implicit def c20[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T) => Z[Unit], js.Function20[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, Unit]](
z => (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) => Z.dispatch(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) => Z.delay(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t))))
implicit def c21[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U) => Z[Unit], js.Function21[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, Unit]](
z => (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u) => Z.dispatch(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u) => Z.delay(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u))))
implicit def c22[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, Z[_]](implicit Z: Dispatch[Z]): UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V) => Z[Unit]] =
UseCallbackArg[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V) => Z[Unit], js.Function22[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, Unit]](
z => (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v) => Z.dispatch(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v)))(
z => Reusable.byRef(z).withValue((a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v) => Z.delay(z(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v))))
}
| japgolly/scalajs-react | coreGeneric/src/main/scala/japgolly/scalajs/react/hooks/UseCallbackBoilerplate.scala | Scala | apache-2.0 | 11,349 |
package com.bigchange.datamining
import java.io.File
import org.apache.spark.SparkContext
import org.apache.spark.mllib.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.io.Source
/**
* Created by C.J.YOU on 2016/9/13.
*/
object CustomNaiveBayes {
type CNB = (mutable.HashMap[(String,String),Int], mutable.HashMap[String,Int], mutable.HashMap[String,Int])
// 训练贝叶斯模型
def naiveBayeModel(lambda: Double, modelType:String, data:RDD[LabeledPoint] ): NaiveBayesModel = {
val model = new NaiveBayes().setLambda(lambda).setModelType(modelType).run(data)
model
}
// 不同数据训练的结果,id: 排除在外的Test数据集的id
def model(id: Int,data: Map[Int,List[String]]): CNB = {
// 统计非数值型数据处理方法
val dHMap = new mutable.HashMap[(String,String),Int]()
val hMap = new mutable.HashMap[String,Int]()
val dMap = new mutable.HashMap[String,Int]()
data.foreach { x =>
val index = x._1
if( index != id) {
x._2.foreach(y => y.split(" ").toList.toArray match {
case Array(f1,f2,f3,f4,la) =>
if(hMap.contains(la)) {
hMap.update(la,hMap(la) + 1)
} else {
hMap.put(la,1)
}
Array(f1,f2,f3,f4).foreach(x => {
if(dHMap.contains((x,la))) dHMap.update((x,la),dHMap((x, la)) + 1)
else {
dHMap.put((x,la),1)
}
if(dMap.contains(x)) {
dMap.update(x,dMap(x) + 1)
} else {
dMap.put(x,1)
}
})
})
}
}
(dHMap, hMap, dMap)
}
def predict(data:Array[String], modelNB: CNB) = {
var posb = 1.0
// 计算先验概率P(h)
val listProb = new ListBuffer[(String, Double)]
val totalH = modelNB._2.values.sum
modelNB._2.foreach(h => {
data.foreach(feature => {
// D|h * h
val value = modelNB._1.getOrElse((feature,h._1),0) / ( h._2 * 1.0 )
println("value:" + value + "," + feature + "," + h._1 + "," + h._2)
posb *= value
})
posb *= ( modelNB._2(h._1) / ( totalH * 1.0 ) )
listProb.+=((h._1,posb))
})
println("listProb:" + listProb)
listProb.sortWith(_._2 > _._2).head
}
// 数值型数据 - 使用高斯分布的方式计算: 概率密度(pdf) D|h
def pdf(mean:Double, ssd: Double, D:Double): Double = {
val ePart = math.pow(math.E, -1 * math.pow(D - mean,2) / (2 * math.pow(ssd,2)))
(1.0 / (math.sqrt(2 * math.Pi) * ssd)) * ePart
}
// 非结构化文本数据处理
/*"""朴素贝叶斯分类器
dataDir 训练集目录,子目录是分类,子目录中包含若干文本
stopWordsPath 停词列表(一行一个)
"""
*/
case class Prob(map:mutable.HashMap[String,AnyVal])
case class Total(int: Int)
def prepareData(dataDir: String, stopWordsPath: String): Unit = {
// 获取stopword
val stopwords = Source.fromFile(stopWordsPath).getLines().map(_.trim).toList
// 将不是目录的元素过滤掉
val dir = new File(dataDir)
// 获取分类类别列表
}
// train: """计算分类下各单词出现的次数"""
def trainData(dataDir: String, categories:List[String], stopWordsList:List[String]): Unit = {
// 每个类别的统计数据保存在这两个map中
val cateProb = new mutable.HashMap[String,Prob]()
val cateCounts = new mutable.HashMap[String, Total]()
var beforeRdd: RDD[(String,Int)] = null
var finalVocalRdd:RDD[(String,Int)] = null
// 对每个类别进行计算
for(category <- categories) {
val currentFiles = dataDir + "/" + category + "/*"
val data = Source.fromFile(currentFiles).getLines().flatMap(_.split(" ")).map(_.toLowerCase).filter(x =>x != "" && !stopWordsList.contains(x)).map((_, 1)).toList
val rdd = SparkContext.getOrCreate().parallelize(data).reduceByKey(_+_)
finalVocalRdd = rdd.++(beforeRdd)
beforeRdd = rdd
val keyVale = rdd.collectAsMap().asInstanceOf[mutable.HashMap[String,AnyVal]]
cateProb.+=((category,new Prob(keyVale)))
cateCounts.+=((category, new Total(keyVale.values.asInstanceOf[Iterable[Int]].sum)))
}
// 所有数据的统计结果保存在这个map中
val vocabulary = finalVocalRdd.collectAsMap().asInstanceOf[mutable.HashMap[String,Int]]
// 删除出现次数小于3次的单词
val finalVocal = vocabulary.filter(_._2 < 3)
val lenVocal = finalVocal.size
// 计算概率
for(category <- categories) {
val map = new mutable.HashMap[String,Double]
val denominator = cateCounts(category).int + lenVocal
finalVocal.foreach { x =>
val count = cateProb.get(category).get.map.getOrElse(x._1, 1).asInstanceOf[Int]
map.+=((category, (count + 1) / (denominator * 1.0)))
}
cateProb.update(category, new Prob(map.asInstanceOf[mutable.HashMap[String,AnyVal]]))
}
}
// 用训练好的模型概率数据集分类
def classify(file: String, vocal:mutable.HashMap[String,Int], categories:List[String], stopWordsList:List[String], modelProb:mutable.HashMap[String,Prob]): (String, Double) = {
// 加载数据
val test = Source.fromFile(file).getLines().flatMap(_.split(" ")).map(_.toLowerCase).filter(x =>x != "" && !stopWordsList.contains(x)).toList
// 开始分类计算
val resultList = new ListBuffer[(String, Double)]
categories.foreach { y =>
var result = 0.0
test.foreach { x =>
val prob = modelProb.get(y).get.map.get(x).get.asInstanceOf[Double]
result += math.log(prob) // D1|h * D2|h2 采用取对数的形式:变为对数相加
}
resultList.+=((y, result))
}
resultList.sortBy(_._2).head
}
}
| bigchange/AI | src/main/scala/com/bigchange/datamining/CustomNaiveBayes.scala | Scala | apache-2.0 | 5,976 |
package com.rackspace.prefs
import com.nparry.orderly._
import com.rackspace.prefs.model.DBTables._
import com.rackspace.prefs.model.{DBTables, Preferences, PreferencesMetadata}
import org.joda.time.DateTime
import org.json4s.{JValue, JNothing, DefaultFormats, Formats}
import org.json4s.JsonDSL.WithDouble._
import org.scalatra._
import org.scalatra.json._
import org.scalatra.scalate.ScalateSupport
import org.slf4j.LoggerFactory
import org.apache.commons.validator.routines.UrlValidator
import org.springframework.web.util.UriUtils
import scala.slick.driver.JdbcDriver.simple._
import scala.slick.jdbc.JdbcBackend.Database
import scala.util.control.Breaks._
import collection.JavaConverters._
import javax.servlet.http.HttpServletRequest
import com.fasterxml.jackson.core.{JsonParser, JsonFactory}
import org.json4s.jackson.JsonMethods
case class PreferencesService(db: Database) extends ScalatraServlet
with ScalateSupport
with JacksonJsonSupport {
protected implicit val jsonFormats: Formats = DefaultFormats
val X_TENANT_ID = "x-tenant-id"
val logger = LoggerFactory.getLogger(getClass)
get("/") {
NotFound(jsonifyError("Invalid URI: /"))
}
get("/status") {
db.withSession { implicit session =>
val metadataCount = Query(preferencesMetadata.length).first
jsonifyStatus(metadataCount)
}
}
get("/metadata/:preference_slug/?") {
val preferenceSlug = params("preference_slug")
contentType = formats("json")
getMetadata(preferenceSlug) match {
case Some(metadata: PreferencesMetadata) => metadata.schema
case None => NotFound(jsonifyError("Metadata preferences for /" + preferenceSlug + " not found"))
}
}
// anything that's not /metadata* goes here
get( """^/(?!metadata)([^/]*)/([^/]*)/?$""".r) {
val uriParts = multiParams("captures")
val preferenceSlug = uriParts(0)
val id = uriParts(1)
contentType = formats("json")
db.withSession { implicit session =>
val getPayloadQuery = for {
(prefs, metadata) <- preferences innerJoin preferencesMetadata on (_.preferencesMetadataId === _.id)
if prefs.id === id && metadata.slug === preferenceSlug
} yield (prefs.payload)
getPayloadQuery.list match {
case List(payload: String) => payload
case _ => NotFound(jsonifyError("Preferences for " + preferenceSlug + " with id " + id + " not found"))
}
}
}
post("/:preference_slug/:id/?", request.getContentType() == "application/json") {
val preferenceSlug = params("preference_slug")
val id = params("id")
val payload = request.body
val actionResult: ActionResult = getMetadata(preferenceSlug) match {
case Some(metadata: PreferencesMetadata) =>
validateAndWritePreference(metadata, preferenceSlug, id, payload)
case None =>
BadRequest(jsonifyError("Preferences for /" + preferenceSlug + " does not have any metadata"))
}
if ( !(actionResult.status.code == 200 || actionResult.status.code == 201)) {
logger.error(actionResult.toString)
}
actionResult
}
/**
* Validate the preference json payload and write to the database
* @param metadata
* @param preferenceSlug
* @param id
* @param payload
* @return
*/
def validateAndWritePreference(metadata: PreferencesMetadata, preferenceSlug: String, id: String, payload: String): ActionResult = {
// check that payload is valid json
var validateError = validateJson(preferenceSlug, id, payload)
if (validateError != null) return validateError
val orderly = Orderly(metadata.schema)
orderly.validate(payload) match {
case head :: tail =>
// give them hints of what's wrong. Only print the first violation.
return BadRequest(jsonifyError("Preferences for /" + preferenceSlug + "/" + id +
" does not validate properly. " + head.path + " " + head.message))
case Nil => ()
}
// parse payload
val jsonContent = parse(payload)
val defaultContainer = jsonContent \ "default_archive_container_url"
// validate default_archive_container_url
validateError = validateContainer(preferenceSlug, id, defaultContainer)
if (validateError != null) return validateError
//validate urls of archive_container_urls if defaultContainer is ok
val archiveContainers = (jsonContent \ "archive_container_urls").children
breakable {
archiveContainers.foreach { container =>
// validate and break when first validation failure occurred
validateError = validateContainer(preferenceSlug, id, container)
if (validateError != null) break
}
}
if (validateError != null) return validateError
// if container urls and names are ok, validate that either default container is provided
// or all datacenters container urls are provided
if ((defaultContainer == JNothing) && (!allDataCenterArePresent(preferenceSlug, id, jsonContent))) {
// default container was not provided, and not all data centers container urls are provided , bad request
return BadRequest(jsonifyError("Preferences for /" + preferenceSlug + "/" + id + " must have a default_container_url or must have all datacenter archive_container_urls present." +
" See Cloud Feeds documentation for a list of valid datacenters."))
}
getAlternateId(request) match {
// write to db if content pass validation
case Some(alternateIdFromHeader) => {
//validate alternateId in payload with the the value of alternate id from request header
val errorMessages = ( defaultContainer :: archiveContainers)
.flatMap (container => validateAlternateId(preferenceSlug, id, alternateIdFromHeader, container))
errorMessages match {
case head :: tail => BadRequest(jsonifyError(errorMessages.mkString(";")))
case Nil => writePreferenceToDb(metadata, id, payload, alternateIdFromHeader)
}
}
case None => BadRequest(jsonifyError("Preferences for /" + preferenceSlug + "/" + id +
" must have a x-tenant-id header with NastID as value. "))
}
}
/**
* Verify that jsonString is valid json
* @param preferenceSlug
* @param id
* @param payload
* @return
*/
def validateJson(preferenceSlug: String, id: String, payload: String): ActionResult = {
// using Jackson JsonFactory and JsonParser
var result:ActionResult = null
val f: JsonFactory = JsonMethods.mapper.getFactory
try {
// parse the json
val p: JsonParser = f.createParser(payload)
// valid first token of json
mapper.readValue(p, classOf[JValue])
// check for more tokens: if there are more token, then jsonString is invalid, return BadRequest;
// we check for this because the current version of the Jackson parser is forgiving and allow extra token such as
// ';', ',', or "{}" after the top level closing curly bracelet.
// valid json should only have one top level valid json body enclosed by opening and closing curly bracelets.
if (p.nextToken != null) {
result = BadRequest(jsonifyError("Preferences for /" + preferenceSlug + "/" + id + " have more than one json body."))
}
}
catch {
// failed to parse jsonString, not valid
case e: Exception => {
logger.error(s"Invalid json payload:[$payload] sent for for id:[$id]. Failed parsing with exception: ${e.getMessage}", e)
result = BadRequest(jsonifyError("Preferences for /" + preferenceSlug + "/" + id + " must have valid json formatted payload. " + e.getMessage))
}
}
result
}
/**
* Validates the alternate id present in the container url with the value present in the
* request header (x-tenant-id set by repose for that tenant)
*
* @param preferenceSlug
* @param id
* @param alternateIdFromHeader
* @param container
* @return error message
*/
def validateAlternateId(preferenceSlug: String, id: String, alternateIdFromHeader: String, container: JValue): Option[String] = {
if (container == JNothing) {
return None
}
val containerUrl = container.extract[String]
// this pattern will match url of format http[s]://hostname/rootpath/nastId/container_name, and capture container_name
val patternForContainer = "^https?://[^/]+/[^/]+/([^/]+)/.*$".r
patternForContainer.findFirstMatchIn( container.extract[String] ) match {
case Some(m) => {
// first captured group will contain the nastId
val alternateIdFromUrl = m.group(1)
if (alternateIdFromHeader != alternateIdFromUrl) {
Some(s"Preferences for /$preferenceSlug/$id has an nast id: $alternateIdFromUrl in url: $containerUrl which doesn't correspond to the tenant. Tenant nast id : $alternateIdFromHeader")
} else
None
}
case None => None
}
}
/**
* Validate that the container url is valid and that the container name is valid
* @param preferenceSlug
* @param id
* @param container
* @return
*/
def validateContainer(preferenceSlug: String, id: String, container: JValue): ActionResult = {
var result:ActionResult = null
if (container != JNothing) {
val validator = new UrlValidator()
val containerUrl = container.extract[String]
if (!validator.isValid(containerUrl)) {
// validate url
result = BadRequest(jsonifyError("Preferences for /" + preferenceSlug + "/" + id + " has an invalid url: " + containerUrl))
}
else {
// validate container name in the url
result = validateContainerName(preferenceSlug, id, containerUrl)
}
}
result
}
/**
* Cloud files has the following requirements for container name. This method validates to make sure the container name is compatible
* with cloud files.
*
* The only restrictions on container names is that they cannot contain a forward slash (/) and must be less than 256 bytes in length.
* Note that the length restriction applies to the name after it has been URL-encoded. For example, a container name of Course Docs
* would be URL-encoded as Course%20Docs and is therefore 13 bytes in length rather than the expected 11.
*
* http://docs.rackspace.com/files/api/v1/cf-devguide/content/Containers-d1e458.html
*
* @param preferenceSlug
* @param id
* @param containerUrl
* @return
*/
def validateContainerName(preferenceSlug: String, id: String, containerUrl: String): ActionResult = {
// validate container name
var result:ActionResult = null
// this pattern will match url of format http[s]://hostname/rootpath/nastId/container_name, and capture container_name
val patternForContainer = "^https?://[^/]+/[^/]+/[^/]+/(.*)$".r
val containerName = {
patternForContainer.findFirstMatchIn(containerUrl) match {
case Some(m) => m.group(1).replaceAll("/$", "") // get first captured group and remove trailing slash if present
case None => ""
}
}
if (containerName == "") {
// container name cannot be empty
result = BadRequest(jsonifyError("Preferences for /" + preferenceSlug + "/" + id + " is missing container name: " + containerUrl))
}
else {
if (containerName.length() >= 256) {
logger.debug(s"Encoded container name should be less than 256 bytes in length:[$containerUrl]")
result = BadRequest(jsonifyError("Preferences for /" + preferenceSlug + "/" + id + " has an encoded container name longer than 255 bytes: " + containerUrl + ". " +
"Url must be encoded and should not contain query parameters or url fragments. Encoded container name cannot contain a forward slash(/) and must be less than 256 bytes in length."))
} else {
// container name must be less than 256 bytes in length, url encoded, and does not contain '/'
val msgInvalidUrl =
"Preferences for /" + preferenceSlug + "/" + id + " has an invalid url: " + containerUrl + ". " +
"Url must be encoded and should not contain query parameters or url fragments. Encoded container name cannot contain a forward slash(/) and must be less than 256 bytes in length."
try {
// check to see if container has special chars and url encoded
// first decode the containerName
val decoded = UriUtils.decode(containerName, "UTF-8")
//If decoding results with the same container name, either it hasnt been encoded or encoding doesnt actually change anything (Ex: a alpha-numeric string like "Tom")
if (containerName == decoded) {
//To make sure whether encoding changes anything
val encode = UriUtils.encodePathSegment(containerName, "UTF-8")
// if encoding the container name isn't same as the original, then container has special chars that are not encoded, bad request
if (encode != containerName) {
logger.debug(s"Encoding the container name isn't same as the original:[$containerUrl]")
result = BadRequest(jsonifyError(msgInvalidUrl))
}
} else {
//Since decoded container name is not same as the original, we can think container name is probably already encoded.
//But they could have send mixed case where only part of the container name is encoded. So decoding results in a different container
//name but that doesnt mean the entire container name is properly encoded.
//Removing any hex-characters from original.
//Encoding the resultant string should not change it. If it changes, it indicates there are still special chars.
val hexStrippedContainerName = containerName.replaceAll("%[a-fA-F0-9][a-fA-F0-9]", "")
val encodedHexStrippedContainerName = UriUtils.encodePathSegment(hexStrippedContainerName, "UTF-8")
if (hexStrippedContainerName != encodedHexStrippedContainerName) {
logger.debug(s"mixed case(partially encoded) container name:[$containerUrl]")
// if encoding the container name isn't the same as the original, then container has special chars that are not encoded, bad request
result = BadRequest(jsonifyError(msgInvalidUrl))
}
if (decoded contains '/') {
logger.debug(s"Container name contains forward slash(/) which is invalid:[$containerUrl]")
// containerName contains '/', bad request
result = BadRequest(jsonifyError("Preferences for /" + preferenceSlug + "/" + id + " has an invalid container name containing '/': " + containerUrl + ". " +
"Url must be encoded and should not contain query parameters or url fragments."))
}
}
}
catch {
case e: Exception => {
logger.error(s"Validating container name in the url:[$containerUrl] for id:[$id] failed with exception: ${e.getMessage}", e)
result = BadRequest(jsonifyError(msgInvalidUrl + " Reason: " + e.getMessage))
}
}
}
}
result
}
/**
* true if all data centers are present, false otherwise
* @param preferenceSlug
* @param id
* @param preferenceJson
* @return
*/
def allDataCenterArePresent(preferenceSlug: String, id: String, preferenceJson: JValue): Boolean = {
// extract "archive_container_urls": { datacenter: url } to Map(String, Any)
val containerUrlsObj = (preferenceJson \ "archive_container_urls")
if ( containerUrlsObj == JNothing ) {
return false
}
// check for all data centers and return boolean
val containerUrls = containerUrlsObj.extract[Map[String, Any]]
containerUrls.contains("iad") &&
containerUrls.contains("dfw") &&
containerUrls.contains("ord") &&
containerUrls.contains("lon") &&
containerUrls.contains("hkg") &&
containerUrls.contains("syd")
}
/**
* Write the preference json to the database
*
* @param metadata
* @param id
* @param payload
* @return
*/
def writePreferenceToDb(metadata: PreferencesMetadata, id: String, payload: String, alternateId: String): ActionResult = {
db.withSession { implicit session =>
val prefsForIdandSlug = preferences.filter(prefs => prefs.id === id && prefs.preferencesMetadataId === metadata.id)
prefsForIdandSlug.list match {
case List(_: Preferences) => {
preferences
.filter(prefs => prefs.id === id && prefs.preferencesMetadataId === metadata.id)
.map(prefs => (prefs.payload, prefs.updated, prefs.alternateId))
.update(payload, DateTime.now, Some(alternateId))
Ok()
}
case _ => {
preferences
.map(p => (p.id, p.preferencesMetadataId, p.payload, p.alternateId))
.insert(id, metadata.id.get, payload, Some(alternateId))
Created()
}
}
}
}
def getMetadata(slug: String): Option[PreferencesMetadata] = {
db.withSession { implicit session =>
preferencesMetadata.filter(_.slug === slug).list match {
case List(metadata: PreferencesMetadata) => Some(metadata)
case _ => None
}
}
}
def getAlternateId(request: HttpServletRequest): Option[String] = {
// this is a temporary hack, until something better comes along
// to find which one is the NAST tenantId, we pick the longest string
request.getHeaders(X_TENANT_ID).asScala.reduceLeftOption((str1: String, str2: String) => if (str1.length > str2.length) str1 else str2) match {
case Some(tenant) => {
logger.debug("For request " + request.getRequestURI + ", alternateId is " + tenant)
Some(tenant)
}
case _ => None
}
}
def jsonifyError(errorMessage: String) : String = {
val json = ("error" -> errorMessage)
return pretty(render(json))
}
def jsonifyStatus(metadataCount: Int) : String = {
"{ \"metadata-count\": " + metadataCount + " }"
}
error {
case e => {
logger.error("Request failed with exception", e)
InternalServerError(jsonifyError("Request failed with exception:" + e + " message:" + e.getMessage))
}
}
}
| shintasmith/cloudfeeds-preferences-svc | app/src/main/scala/com/rackspace/prefs/PreferencesService.scala | Scala | apache-2.0 | 20,218 |
package controllers
import java.util.UUID
import javax.inject.Inject
import com.mohiva.play.silhouette.api._
import com.mohiva.play.silhouette.api.services.{ AuthInfoService, AvatarService }
import com.mohiva.play.silhouette.api.util.PasswordHasher
import com.mohiva.play.silhouette.impl.authenticators.SessionAuthenticator
import com.mohiva.play.silhouette.impl.providers._
import forms.SignUpForm
import models.User
import models.services.UserService
import play.api.i18n.Messages
import play.api.libs.concurrent.Execution.Implicits._
import play.api.mvc.Action
import scala.concurrent.Future
/**
* The sign up controller.
*
* @param env The Silhouette environment.
* @param userService The user service implementation.
* @param authInfoService The auth info service implementation.
* @param avatarService The avatar service implementation.
* @param passwordHasher The password hasher implementation.
*/
class SignUpController @Inject() (
implicit val env: Environment[User, SessionAuthenticator],
val userService: UserService,
val authInfoService: AuthInfoService,
val avatarService: AvatarService,
val passwordHasher: PasswordHasher)
extends Silhouette[User, SessionAuthenticator] {
/**
* Registers a new user.
*
* @return The result to display.
*/
def signUp = Action.async { implicit request =>
SignUpForm.form.bindFromRequest.fold(
form => Future.successful(BadRequest(views.html.signUp(form))),
data => {
val loginInfo = LoginInfo(CredentialsProvider.ID, data.email)
userService.retrieve(loginInfo).flatMap {
case Some(user) =>
Future.successful(Redirect(routes.ApplicationController.signUp()).flashing("error" -> Messages("user.exists")))
case None =>
val authInfo = passwordHasher.hash(data.password)
val user = User(
userID = UUID.randomUUID(),
loginInfo = loginInfo,
firstName = Some(data.firstName),
lastName = Some(data.lastName),
fullName = Some(data.firstName + " " + data.lastName),
email = Some(data.email),
avatarURL = None
)
for {
avatar <- avatarService.retrieveURL(data.email)
user <- userService.save(user.copy(avatarURL = avatar))
authInfo <- authInfoService.save(loginInfo, authInfo)
authenticator <- env.authenticatorService.create(user.loginInfo)
value <- env.authenticatorService.init(authenticator)
result <- env.authenticatorService.embed(value, Future.successful(
Redirect(routes.ApplicationController.index())
))
} yield {
env.eventBus.publish(SignUpEvent(user, request, request2lang))
env.eventBus.publish(LoginEvent(user, request, request2lang))
result
}
}
}
)
}
}
| hariDasu/play-silhouette-seed | app/controllers/SignUpController.scala | Scala | apache-2.0 | 2,950 |
import scala.reflect.runtime.universe._
import scala.tools.reflect.Eval
object Test extends App {
reify {
class Y {
def y = 100
}
trait Z { this: Y =>
val z = 2 * y
}
class X extends Y with Z {
def println() = Predef.println(z)
}
new X().println()
}.eval
} | felixmulder/scala | test/files/run/t5270.scala | Scala | bsd-3-clause | 310 |
import Dependencies._
import sbt.Keys._
import sbt._
object Shared {
lazy val sparkVersion = SettingKey[String]("x-spark-version")
lazy val hadoopVersion = SettingKey[String]("x-hadoop-version")
lazy val jets3tVersion = SettingKey[String]("x-jets3t-version")
lazy val jlineDef = SettingKey[(String, String)]("x-jline-def")
lazy val withHive = SettingKey[Boolean]("x-with-hive")
lazy val sharedSettings: Seq[Def.Setting[_]] = Seq(
publishArtifact in Test := false,
scalaVersion := defaultScalaVersion,
sparkVersion := defaultSparkVersion,
hadoopVersion := defaultHadoopVersion,
jets3tVersion := defaultJets3tVersion,
jlineDef := (if (defaultScalaVersion.startsWith("2.10")) {
("org.scala-lang", defaultScalaVersion)
} else {
("jline", "2.12")
}),
withHive := defaultWithHive,
libraryDependencies += guava
)
val gisSettings: Seq[Def.Setting[_]] = Seq(
libraryDependencies ++= geometryDeps
)
val repl: Seq[Def.Setting[_]] = Seq(
libraryDependencies <+= (sparkVersion) { sv => sparkRepl(sv) }
)
val hive: Seq[Def.Setting[_]] = Seq(
libraryDependencies <++= (withHive, sparkVersion) { (wh, sv) =>
if (wh) List(sparkHive(sv)) else Nil
}
)
val yarnWebProxy: Seq[Def.Setting[_]] = Seq(
libraryDependencies <++= (hadoopVersion) { (hv) =>
if (!hv.startsWith("1")) List(yarnProxy(hv)) else Nil
}
)
lazy val sparkSettings: Seq[Def.Setting[_]] = Seq(
libraryDependencies <++= (scalaVersion, sparkVersion, hadoopVersion, jets3tVersion) { (v, sv, hv, jv) =>
val jets3tVersion = sys.props.get("jets3t.version") match {
case Some(jv) => jets3t(Some(jv), None)
case _ => jets3t(None, Some(hv))
}
val jettyVersion = "8.1.14.v20131031"
val libs = Seq(
sparkCore(sv),
sparkYarn(sv),
sparkSQL(sv),
hadoopClient(hv),
jets3tVersion,
commonsCodec
) ++ (
if (!v.startsWith("2.10")) {
// in 2.11
//Boot.scala → HttpServer → eclipse
// eclipse → provided boohooo :'-(
Seq(
"org.eclipse.jetty" % "jetty-http" % jettyVersion,
"org.eclipse.jetty" % "jetty-continuation" % jettyVersion,
"org.eclipse.jetty" % "jetty-servlet" % jettyVersion,
"org.eclipse.jetty" % "jetty-util" % jettyVersion,
"org.eclipse.jetty" % "jetty-security" % jettyVersion,
"org.eclipse.jetty" % "jetty-plus" % jettyVersion,
"org.eclipse.jetty" % "jetty-server" % jettyVersion
)
} else Nil
)
libs
}
) ++ repl ++ hive ++ yarnWebProxy
}
| antonkulaga/spark-notebook | project/Shared.scala | Scala | apache-2.0 | 2,780 |
package org.jetbrains.plugins.scala.lang.scaladoc
import com.intellij.codeInsight.editorActions.{CommentCompleteHandler, JavaLikeQuoteHandler, QuoteHandler, TypedHandler}
import com.intellij.lang.{CodeDocumentationAwareCommenter, Language, LanguageParserDefinitions}
import com.intellij.lexer.Lexer
import com.intellij.openapi.editor.Editor
import com.intellij.psi.tree.IElementType
import com.intellij.psi.{PsiComment, PsiElement, PsiErrorElement, PsiFile}
import org.jetbrains.plugins.scala.ScalaFileType
/**
* @author Alexander Podkhalyuzin
*/
class ScalaIsCommentComplete extends CommentCompleteHandler {
def isApplicable(comment: PsiComment, commenter: CodeDocumentationAwareCommenter): Boolean = {
comment.getParent.getLanguage == ScalaFileType.SCALA_LANGUAGE
}
//same code in com.intellij.codeInsight.editorActions.EnterHandler
def isCommentComplete(comment: PsiComment, commenter: CodeDocumentationAwareCommenter, editor: Editor): Boolean = {
val commentText: String = comment.getText
val docComment: Boolean = isDocComment(comment, commenter)
val expectedCommentEnd: String = if (docComment) commenter.getDocumentationCommentSuffix else commenter.getBlockCommentSuffix
if (!commentText.endsWith(expectedCommentEnd)) return false
val containingFile: PsiFile = comment.getContainingFile
val language: Language = comment.getParent.getLanguage
val lexer: Lexer = LanguageParserDefinitions.INSTANCE.forLanguage(language).createLexer(containingFile.getProject)
val commentPrefix: String = if (docComment) commenter.getDocumentationCommentPrefix else commenter.getBlockCommentPrefix
lexer.start(commentText, if (commentPrefix eq null) 0 else commentPrefix.length, commentText.length)
val fileTypeHandler: QuoteHandler = TypedHandler.getQuoteHandler(containingFile, editor)
val javaLikeQuoteHandler: JavaLikeQuoteHandler =
fileTypeHandler match {
case quoteHandler: JavaLikeQuoteHandler => quoteHandler
case _ => null
}
while (true) {
val tokenType: IElementType = lexer.getTokenType
if (tokenType eq null) {
return false
}
if (javaLikeQuoteHandler != null && javaLikeQuoteHandler.getStringTokenTypes != null &&
javaLikeQuoteHandler.getStringTokenTypes.contains(tokenType)) {
val text: String = commentText.substring(lexer.getTokenStart, lexer.getTokenEnd)
val endOffset: Int = comment.getTextRange.getEndOffset
if (text.endsWith(expectedCommentEnd) && endOffset < containingFile.getTextLength && containingFile.getText.charAt(endOffset) == '\\n') {
return true
}
}
var continue = false
if (lexer.getTokenEnd == commentText.length) {
if (lexer.getTokenType eq commenter.getLineCommentTokenType) {
lexer.start(commentText, lexer.getTokenStart + commenter.getLineCommentPrefix.length, commentText.length)
lexer.advance()
continue = true
}
else if (isInvalidPsi(comment)) {
return false
} else {
return lexer.getTokenEnd - lexer.getTokenStart == 2 //difference from EnterHandler
}
}
if (!continue && (tokenType == commenter.getDocumentationCommentTokenType ||
tokenType == commenter.getBlockCommentTokenType)) {
return false
} else if (!continue) {
lexer.advance()
}
}
false
}
private def isDocComment(element: PsiElement, commenter: CodeDocumentationAwareCommenter): Boolean = {
if (!element.isInstanceOf[PsiComment]) return false
val comment: PsiComment = element.asInstanceOf[PsiComment]
commenter.isDocumentationComment(comment)
}
private def isInvalidPsi(base: PsiElement): Boolean = {
var current: PsiElement = base.getNextSibling
while (current != null) {
if (current.getTextLength != 0) {
return current.isInstanceOf[PsiErrorElement]
}
current = current.getNextSibling
}
false
}
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/scaladoc/ScalaIsCommentComplete.scala | Scala | apache-2.0 | 3,993 |
package gitbucket.core.plugin
import java.io.{File, FilenameFilter, InputStream}
import java.net.URLClassLoader
import javax.servlet.ServletContext
import javax.servlet.http.{HttpServletRequest, HttpServletResponse}
import gitbucket.core.controller.{Context, ControllerBase}
import gitbucket.core.service.RepositoryService.RepositoryInfo
import gitbucket.core.service.SystemSettingsService.SystemSettings
import gitbucket.core.util.ControlUtil._
import gitbucket.core.util.Directory._
import gitbucket.core.util.JDBCUtil._
import gitbucket.core.util.{Version, Versions}
import org.apache.commons.codec.binary.{Base64, StringUtils}
import org.slf4j.LoggerFactory
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
class PluginRegistry {
private val plugins = new ListBuffer[PluginInfo]
private val javaScripts = new ListBuffer[(String, String)]
private val controllers = new ListBuffer[(ControllerBase, String)]
private val images = mutable.Map[String, String]()
private val renderers = mutable.Map[String, Renderer]()
renderers ++= Seq(
"md" -> MarkdownRenderer, "markdown" -> MarkdownRenderer
)
private val repositoryRoutings = new ListBuffer[GitRepositoryRouting]
def addPlugin(pluginInfo: PluginInfo): Unit = {
plugins += pluginInfo
}
def getPlugins(): List[PluginInfo] = plugins.toList
def addImage(id: String, bytes: Array[Byte]): Unit = {
val encoded = StringUtils.newStringUtf8(Base64.encodeBase64(bytes, false))
images += ((id, encoded))
}
@deprecated("Use addImage(id: String, bytes: Array[Byte]) instead", "3.4.0")
def addImage(id: String, in: InputStream): Unit = {
val bytes = using(in){ in =>
val bytes = new Array[Byte](in.available)
in.read(bytes)
bytes
}
addImage(id, bytes)
}
def getImage(id: String): String = images(id)
def addController(path: String, controller: ControllerBase): Unit = {
controllers += ((controller, path))
}
@deprecated("Use addController(path: String, controller: ControllerBase) instead", "3.4.0")
def addController(controller: ControllerBase, path: String): Unit = {
addController(path, controller)
}
def getControllers(): Seq[(ControllerBase, String)] = controllers.toSeq
def addJavaScript(path: String, script: String): Unit = {
javaScripts += ((path, script))
}
def getJavaScript(currentPath: String): List[String] = {
javaScripts.filter(x => currentPath.matches(x._1)).toList.map(_._2)
}
def addRenderer(extension: String, renderer: Renderer): Unit = {
renderers += ((extension, renderer))
}
def getRenderer(extension: String): Renderer = {
renderers.get(extension).getOrElse(DefaultRenderer)
}
def renderableExtensions: Seq[String] = renderers.keys.toSeq
def addRepositoryRouting(routing: GitRepositoryRouting): Unit = {
repositoryRoutings += routing
}
def getRepositoryRoutings(): Seq[GitRepositoryRouting] = {
repositoryRoutings.toSeq
}
def getRepositoryRouting(repositoryPath: String): Option[GitRepositoryRouting] = {
PluginRegistry().getRepositoryRoutings().find {
case GitRepositoryRouting(urlPath, _, _) => {
repositoryPath.matches("/" + urlPath + "(/.*)?")
}
}
}
private case class GlobalAction(
method: String,
path: String,
function: (HttpServletRequest, HttpServletResponse, Context) => Any
)
private case class RepositoryAction(
method: String,
path: String,
function: (HttpServletRequest, HttpServletResponse, Context, RepositoryInfo) => Any
)
}
/**
* Provides entry point to PluginRegistry.
*/
object PluginRegistry {
private val logger = LoggerFactory.getLogger(classOf[PluginRegistry])
private val instance = new PluginRegistry()
/**
* Returns the PluginRegistry singleton instance.
*/
def apply(): PluginRegistry = instance
/**
* Initializes all installed plugins.
*/
def initialize(context: ServletContext, settings: SystemSettings, conn: java.sql.Connection): Unit = {
val pluginDir = new File(PluginHome)
if(pluginDir.exists && pluginDir.isDirectory){
pluginDir.listFiles(new FilenameFilter {
override def accept(dir: File, name: String): Boolean = name.endsWith(".jar")
}).foreach { pluginJar =>
val classLoader = new URLClassLoader(Array(pluginJar.toURI.toURL), Thread.currentThread.getContextClassLoader)
try {
val plugin = classLoader.loadClass("Plugin").newInstance().asInstanceOf[Plugin]
// Migration
val headVersion = plugin.versions.head
val currentVersion = conn.find("SELECT * FROM PLUGIN WHERE PLUGIN_ID = ?", plugin.pluginId)(_.getString("VERSION")) match {
case Some(x) => {
val dim = x.split("\\.")
Version(dim(0).toInt, dim(1).toInt)
}
case None => Version(0, 0)
}
Versions.update(conn, headVersion, currentVersion, plugin.versions, new URLClassLoader(Array(pluginJar.toURI.toURL))){ conn =>
currentVersion.versionString match {
case "0.0" =>
conn.update("INSERT INTO PLUGIN (PLUGIN_ID, VERSION) VALUES (?, ?)", plugin.pluginId, headVersion.versionString)
case _ =>
conn.update("UPDATE PLUGIN SET VERSION = ? WHERE PLUGIN_ID = ?", headVersion.versionString, plugin.pluginId)
}
}
// Initialize
plugin.initialize(instance, context, settings)
instance.addPlugin(PluginInfo(
pluginId = plugin.pluginId,
pluginName = plugin.pluginName,
version = plugin.versions.head.versionString,
description = plugin.description,
pluginClass = plugin
))
} catch {
case e: Throwable => {
logger.error(s"Error during plugin initialization", e)
}
}
}
}
}
def shutdown(context: ServletContext, settings: SystemSettings): Unit = {
instance.getPlugins().foreach { pluginInfo =>
try {
pluginInfo.pluginClass.shutdown(instance, context, settings)
} catch {
case e: Exception => {
logger.error(s"Error during plugin shutdown", e)
}
}
}
}
}
case class PluginInfo(
pluginId: String,
pluginName: String,
version: String,
description: String,
pluginClass: Plugin
)
| noc06140728/gitbucket | src/main/scala/gitbucket/core/plugin/PluginRegistory.scala | Scala | apache-2.0 | 6,420 |
package com.codeseq.mcdc
import akka.io.{PipelineContext, PipelinePorts, PipelineFactory}
import akka.util.ByteString
import org.scalatest.{WordSpec, Matchers}
import scala.concurrent.duration._
class protocolSpec extends WordSpec with Matchers with Implicits {
import MemcacheProtocol._
"marshalling" should { //{1
"transform a Get object to a Request" in { //{2
validateGetRequest(0, get.toRequest)
} //}2
"transform a GetQ object to a Request" in { //{2
validateGetRequest(0x09, getq.toRequest)
} //}2
"transform a GetK object to a Request" in { //{2
validateGetRequest(0x0c, getk.toRequest)
} //}2
"transform a GetKQ object to a Request" in { //{2
validateGetRequest(0x0d, getkq.toRequest)
} //}2
"transform an Add object to a Request" in { //{2
validateAddRequest(0x02, add.toRequest)
} //}2
"transform an AddQ object to a Request" in { //{2
validateAddRequest(0x12, addq.toRequest)
} //}2
"transform a Set object to a Request" in { //{2
validateSetRequest(0x01, set.toRequest)
} //}2
"transform a SetQ object to a Request" in { //{2
validateSetRequest(0x11, setq.toRequest)
} //}2
"transform a Delete object to a Request" in { //{2
validateDeleteRequest(0x04, delete.toRequest)
} //}2
"transform a DeleteQ object to a Request" in { //{2
validateDeleteRequest(0x14, deleteq.toRequest)
} //}2
"transform a Replace object to a Request" in { //{2
validateReplaceRequest(0x03, replace.toRequest)
} //}2
"transform a ReplaceQ object to a Request" in { //{2
validateReplaceRequest(0x13, replaceq.toRequest)
} //}2
"transform a Quit object to a Request" in { //{2
validateEmptyRequest(0x07, quit.toRequest)
} //}2
"transform a QuitQ object to a Request" in { //{2
validateEmptyRequest(0x17, quitq.toRequest)
} //}2
"transform a Noop object to a Request" in { //{2
validateEmptyRequest(0x0a, noop.toRequest)
} //}2
"transform a Version object to a Request" in { //{2
validateEmptyRequest(0x0b, version.toRequest)
} //}2
"properly encode a request to a ByteString" in { //{2
val bs = get.toRequest.toByteString
bs should be (getRequest)
} //}2
"properly decode a ByteString to a Response" in { //{2
val expected = Response(ResponseHeader(0, 3, 0, 0, 0, 8, 0, 0),
Some("key"), Some(ByteString("value")), None)
Response.fromByteString(`key/value-response`) should be (expected)
} //}2
} //}1
val ctx = new PipelineContext { }
val frame = new MemcacheFrame
"MemcacheFrame" should { //{1
"extract nothing" in { //{2
val PipelinePorts(cmd, evt, mgmt) = PipelineFactory.buildFunctionTriple(ctx, frame)
val (segmented, _) = evt(ByteString.empty)
segmented should be ('empty)
} //}2
"extract something when that's all there is" in { //{2
val PipelinePorts(cmd, evt, mgmt) = PipelineFactory.buildFunctionTriple(ctx, frame)
val (segmented, _) = evt(`key/value-response`)
segmented should have size (1)
} //}2
"extract something when there's more but not enough for two" in { //{2
val PipelinePorts(cmd, evt, mgmt) = PipelineFactory.buildFunctionTriple(ctx, frame)
val (segmented, _) = evt(`key/value-response` ++ fragment)
segmented should have size (1)
}
"extract two things when there's two there" in { //{2
val PipelinePorts(cmd, evt, mgmt) = PipelineFactory.buildFunctionTriple(ctx, frame)
val (segmented, _) = evt(`key/value-response` ++ `key/value-response`)
segmented should have size (2)
}
} //}1
val message = new MemcacheMessage >> new MemcacheFrame
"MemcacheMessage" should { //{1
"create two responses when there's two there" in { //{2
val PipelinePorts(cmd, evt, mgmt) = PipelineFactory.buildFunctionTriple(ctx, message)
val (segmented, _) = evt(`key/value-response` ++ `key/value-response`)
segmented should have size (2)
segmented foreach { r =>
r.header.opCode should be (0)
r.header.keyLength should be (3)
r.header.extrasLength should be (0)
r.header.dataType should be (0)
r.header.status should be (0)
r.header.totalBodyLength should be (8)
r.header.opaque should be (0)
r.header.cas should be (0)
r.key should be (Some("key"))
r.value.map(_.utf8String) should be (Some("value"))
r.extras should be (None)
}
}
} //}1
val bsValue = ByteString("value")
val get = Get("key")
val getq = GetQ("key")
val getk = GetK("key")
val getkq = GetKQ("key")
val add = Add("key", bsValue)
val addq = AddQ("key", bsValue)
val set = Set("key", bsValue)
val setq = SetQ("key", bsValue)
val replace = Replace("key", bsValue)
val replaceq = ReplaceQ("key", bsValue)
val delete = Delete("key")
val deleteq = DeleteQ("key")
val quit = Quit()
val quitq = QuitQ()
val noop = Noop()
val version = Version()
val `key/value-response` = ByteString(0x81.toByte, // magic
0, // opCode
0, 3, // key length
0, // extras length
0, // data type
0, 0, // status
0, 0, 0, 8, // total body length
0, 0, 0, 0, // opaque
0, 0, 0, 0, 0, 0, 0, 0, // cas
'k', 'e', 'y', // key
'v', 'a', 'l', 'u', 'e') // key
val fragment = ByteString(1, 2, 3)
val getRequest = ByteString(0x80.toByte, // magic
0, // opCode
0, 3, // key length
0, // extras length
0, // data type
0, 0, // vbucket id
0, 0, 0, 3, // total body length
0, 0, 0, 0, // opaque
0, 0, 0, 0, 0, 0, 0, 0, // cas
'k', 'e', 'y') // payload
def validateGetRequest(opCode: Byte, req: Request): Unit = {
val (h, p) = (req.header, req.payload)
h.opCode should be (opCode)
h.keyLength should be (3)
h.extrasLength should be (0)
h.vBucketId should be (0)
h.totalBodyLength should be (3)
h.opaque should be (0)
h.cas should be (0)
p should be (ByteString("key"))
}
def validateAddRequest(opCode: Byte, req: Request): Unit = {
val (h, p) = (req.header, req.payload)
h.opCode should be (opCode)
h.keyLength should be (3)
h.extrasLength should be (8)
h.vBucketId should be (0)
h.totalBodyLength should be (16)
h.opaque should be (0)
h.cas should be (0)
p should be (extras(1.hour) ++ "key" ++ "value")
}
def validateSetRequest(opCode: Byte, req: Request): Unit = {
val (h, p) = (req.header, req.payload)
h.opCode should be (opCode)
h.keyLength should be (3)
h.extrasLength should be (8)
h.vBucketId should be (0)
h.totalBodyLength should be (16)
h.opaque should be (0)
h.cas should be (0)
p should be (extras(1.hour) ++ "key" ++ "value")
}
def validateDeleteRequest(opCode: Byte, req: Request): Unit = {
val (h, p) = (req.header, req.payload)
h.opCode should be (opCode)
h.keyLength should be (3)
h.extrasLength should be (0)
h.vBucketId should be (0)
h.totalBodyLength should be (3)
h.opaque should be (0)
h.cas should be (0)
p should be (ByteString("key"))
}
def validateReplaceRequest(opCode: Byte, req: Request): Unit = {
val (h, p) = (req.header, req.payload)
h.opCode should be (opCode)
h.keyLength should be (3)
h.extrasLength should be (8)
h.vBucketId should be (0)
h.totalBodyLength should be (16)
h.opaque should be (0)
h.cas should be (0)
p should be (extras(1.hour) ++ "key" ++ "value")
}
def validateEmptyRequest(opCode: Byte, req: Request): Unit = {
val (h, p) = (req.header, req.payload)
h.opCode should be (opCode)
h.keyLength should be (0)
h.extrasLength should be (0)
h.vBucketId should be (0)
h.totalBodyLength should be (0)
h.opaque should be (0)
h.cas should be (0)
p should be (ByteString.empty)
}
}
// vim:fdl=1:
| derekwyatt/akka-memcached | src/test/scala/com/codeseq/mcdc/protocolSpec.scala | Scala | apache-2.0 | 8,861 |
package io.atal.butterfly
import org.scalatest._
class ButterflyTest extends FlatSpec {
"A simple integer" should "be equals with its same value" in {
assert(1 == 1)
}
}
| Matthieu-Riou/Butterfly | src/test/scala/io/atal/butterfly/ButterflyTest.scala | Scala | mit | 181 |
package com.wavesplatform.mining
import cats.data.NonEmptyList
import com.wavesplatform.block.Block
import com.wavesplatform.features.BlockchainFeatures
import com.wavesplatform.settings.MinerSettings
import com.wavesplatform.state.Blockchain
case class MiningConstraints(total: MiningConstraint, keyBlock: MiningConstraint, micro: MiningConstraint)
object MiningConstraints {
val MaxScriptRunsInBlock = 100
object MaxScriptsComplexityInBlock {
val BeforeRideV5 = 1000000
val AfterRideV5 = 2500000
}
val ClassicAmountOfTxsInBlock = 100
val MaxTxsSizeInBytes = 1 * 1024 * 1024 // 1 megabyte
def apply(blockchain: Blockchain, height: Int, minerSettings: Option[MinerSettings] = None): MiningConstraints = {
val activatedFeatures = blockchain.activatedFeaturesAt(height)
val isNgEnabled = activatedFeatures.contains(BlockchainFeatures.NG.id)
val isMassTransferEnabled = activatedFeatures.contains(BlockchainFeatures.MassTransfer.id)
val isScriptEnabled = activatedFeatures.contains(BlockchainFeatures.SmartAccounts.id)
val isDAppsEnabled = activatedFeatures.contains(BlockchainFeatures.Ride4DApps.id)
val total: MiningConstraint =
if (isMassTransferEnabled) OneDimensionalMiningConstraint(MaxTxsSizeInBytes, TxEstimators.sizeInBytes, "MaxTxsSizeInBytes")
else {
val maxTxs = if (isNgEnabled) Block.MaxTransactionsPerBlockVer3 else ClassicAmountOfTxsInBlock
OneDimensionalMiningConstraint(maxTxs, TxEstimators.one, "MaxTxs")
}
new MiningConstraints(
total =
if (isDAppsEnabled) {
val complexityLimit =
if (blockchain.isFeatureActivated(BlockchainFeatures.SynchronousCalls)) MaxScriptsComplexityInBlock.AfterRideV5
else MaxScriptsComplexityInBlock.BeforeRideV5
MultiDimensionalMiningConstraint(
NonEmptyList
.of(OneDimensionalMiningConstraint(complexityLimit, TxEstimators.scriptsComplexity, "MaxScriptsComplexityInBlock"), total)
)
} else if (isScriptEnabled)
MultiDimensionalMiningConstraint(
NonEmptyList.of(OneDimensionalMiningConstraint(MaxScriptRunsInBlock, TxEstimators.scriptRunNumber, "MaxScriptRunsInBlock"), total)
)
else total,
keyBlock =
if (isNgEnabled) OneDimensionalMiningConstraint(0, TxEstimators.one, "MaxTxsInKeyBlock")
else OneDimensionalMiningConstraint(ClassicAmountOfTxsInBlock, TxEstimators.one, "MaxTxsInKeyBlock"),
micro =
if (isNgEnabled && minerSettings.isDefined)
OneDimensionalMiningConstraint(minerSettings.get.maxTransactionsInMicroBlock, TxEstimators.one, "MaxTxsInMicroBlock")
else MiningConstraint.Unlimited
)
}
}
| wavesplatform/Waves | node/src/main/scala/com/wavesplatform/mining/MiningConstraints.scala | Scala | mit | 2,772 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.csv
import java.util.Locale
import scala.util.control.Exception.allCatch
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.analysis.TypeCoercion
import org.apache.spark.sql.catalyst.expressions.ExprUtils
import org.apache.spark.sql.catalyst.util.LegacyDateFormats.FAST_DATE_FORMAT
import org.apache.spark.sql.catalyst.util.TimestampFormatter
import org.apache.spark.sql.types._
class CSVInferSchema(val options: CSVOptions) extends Serializable {
private val timestampParser = TimestampFormatter(
options.timestampFormat,
options.zoneId,
options.locale,
legacyFormat = FAST_DATE_FORMAT,
isParsing = true)
private val decimalParser = if (options.locale == Locale.US) {
// Special handling the default locale for backward compatibility
s: String => new java.math.BigDecimal(s)
} else {
ExprUtils.getDecimalParser(options.locale)
}
/**
* Similar to the JSON schema inference
* 1. Infer type of each row
* 2. Merge row types to find common type
* 3. Replace any null types with string type
*/
def infer(
tokenRDD: RDD[Array[String]],
header: Array[String]): StructType = {
val fields = if (options.inferSchemaFlag) {
val startType: Array[DataType] = Array.fill[DataType](header.length)(NullType)
val rootTypes: Array[DataType] =
tokenRDD.aggregate(startType)(inferRowType, mergeRowTypes)
toStructFields(rootTypes, header)
} else {
// By default fields are assumed to be StringType
header.map(fieldName => StructField(fieldName, StringType, nullable = true))
}
StructType(fields)
}
def toStructFields(
fieldTypes: Array[DataType],
header: Array[String]): Array[StructField] = {
header.zip(fieldTypes).map { case (thisHeader, rootType) =>
val dType = rootType match {
case _: NullType => StringType
case other => other
}
StructField(thisHeader, dType, nullable = true)
}
}
def inferRowType(rowSoFar: Array[DataType], next: Array[String]): Array[DataType] = {
var i = 0
while (i < math.min(rowSoFar.length, next.length)) { // May have columns on right missing.
rowSoFar(i) = inferField(rowSoFar(i), next(i))
i+=1
}
rowSoFar
}
def mergeRowTypes(first: Array[DataType], second: Array[DataType]): Array[DataType] = {
first.zipAll(second, NullType, NullType).map { case (a, b) =>
compatibleType(a, b).getOrElse(NullType)
}
}
/**
* Infer type of string field. Given known type Double, and a string "1", there is no
* point checking if it is an Int, as the final type must be Double or higher.
*/
def inferField(typeSoFar: DataType, field: String): DataType = {
if (field == null || field.isEmpty || field == options.nullValue) {
typeSoFar
} else {
val typeElemInfer = typeSoFar match {
case NullType => tryParseInteger(field)
case IntegerType => tryParseInteger(field)
case LongType => tryParseLong(field)
case _: DecimalType => tryParseDecimal(field)
case DoubleType => tryParseDouble(field)
case TimestampType => tryParseTimestamp(field)
case BooleanType => tryParseBoolean(field)
case StringType => StringType
case other: DataType =>
throw new UnsupportedOperationException(s"Unexpected data type $other")
}
compatibleType(typeSoFar, typeElemInfer).getOrElse(StringType)
}
}
private def isInfOrNan(field: String): Boolean = {
field == options.nanValue || field == options.negativeInf || field == options.positiveInf
}
private def tryParseInteger(field: String): DataType = {
if ((allCatch opt field.toInt).isDefined) {
IntegerType
} else {
tryParseLong(field)
}
}
private def tryParseLong(field: String): DataType = {
if ((allCatch opt field.toLong).isDefined) {
LongType
} else {
tryParseDecimal(field)
}
}
private def tryParseDecimal(field: String): DataType = {
val decimalTry = allCatch opt {
// The conversion can fail when the `field` is not a form of number.
val bigDecimal = decimalParser(field)
// Because many other formats do not support decimal, it reduces the cases for
// decimals by disallowing values having scale (e.g. `1.1`).
if (bigDecimal.scale <= 0) {
// `DecimalType` conversion can fail when
// 1. The precision is bigger than 38.
// 2. scale is bigger than precision.
DecimalType(bigDecimal.precision, bigDecimal.scale)
} else {
tryParseDouble(field)
}
}
decimalTry.getOrElse(tryParseDouble(field))
}
private def tryParseDouble(field: String): DataType = {
if ((allCatch opt field.toDouble).isDefined || isInfOrNan(field)) {
DoubleType
} else {
tryParseTimestamp(field)
}
}
private def tryParseTimestamp(field: String): DataType = {
// This case infers a custom `dataFormat` is set.
if ((allCatch opt timestampParser.parse(field)).isDefined) {
TimestampType
} else {
tryParseBoolean(field)
}
}
private def tryParseBoolean(field: String): DataType = {
if ((allCatch opt field.toBoolean).isDefined) {
BooleanType
} else {
stringType()
}
}
// Defining a function to return the StringType constant is necessary in order to work around
// a Scala compiler issue which leads to runtime incompatibilities with certain Spark versions;
// see issue #128 for more details.
private def stringType(): DataType = {
StringType
}
/**
* Returns the common data type given two input data types so that the return type
* is compatible with both input data types.
*/
private def compatibleType(t1: DataType, t2: DataType): Option[DataType] = {
TypeCoercion.findTightestCommonType(t1, t2).orElse(findCompatibleTypeForCSV(t1, t2))
}
/**
* The following pattern matching represents additional type promotion rules that
* are CSV specific.
*/
private val findCompatibleTypeForCSV: (DataType, DataType) => Option[DataType] = {
case (StringType, t2) => Some(StringType)
case (t1, StringType) => Some(StringType)
// These two cases below deal with when `IntegralType` is larger than `DecimalType`.
case (t1: IntegralType, t2: DecimalType) =>
compatibleType(DecimalType.forType(t1), t2)
case (t1: DecimalType, t2: IntegralType) =>
compatibleType(t1, DecimalType.forType(t2))
// Double support larger range than fixed decimal, DecimalType.Maximum should be enough
// in most case, also have better precision.
case (DoubleType, _: DecimalType) | (_: DecimalType, DoubleType) =>
Some(DoubleType)
case (t1: DecimalType, t2: DecimalType) =>
val scale = math.max(t1.scale, t2.scale)
val range = math.max(t1.precision - t1.scale, t2.precision - t2.scale)
if (range + scale > 38) {
// DecimalType can't support precision > 38
Some(DoubleType)
} else {
Some(DecimalType(range + scale, scale))
}
case _ => None
}
}
| witgo/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/CSVInferSchema.scala | Scala | apache-2.0 | 7,973 |
/*
* Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see http://www.gnu.org/licenses/agpl.html.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package generated.scala
class BooleanUnsupervisedTrainingSet(val _data: BooleanDenseMatrix)
| TiarkRompf/lancet | src/main/scala/generated/scala/BooleanUnsupervisedTrainingSet.scala | Scala | agpl-3.0 | 1,084 |
/*************************************************************************
* *
* This file is part of the 20n/act project. *
* 20n/act enables DNA prediction for synthetic biology/bioengineering. *
* Copyright (C) 2017 20n Labs, Inc. *
* *
* Please direct all queries to act@20n.com. *
* *
* This program is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
* *
*************************************************************************/
package com.act.biointerpretation.l2expansion
import java.io.File
import com.act.workflow.tool_manager.jobs.Job
import com.act.workflow.tool_manager.tool_wrappers.{ScalaJobWrapper, SparkWrapper}
import com.act.workflow.tool_manager.workflow.Workflow
import org.apache.commons.cli.{CommandLine, Options, Option => CliOption}
import org.apache.commons.io.FileUtils
import org.apache.logging.log4j.LogManager
import scala.collection.JavaConverters._
class SparkSubstrateExpansionDriverWorkflow extends Workflow {
val DEFAULT_SPARK_MASTER = "spark://spark-master:7077"
val LOCAL_JAR_PATH = "target/scala-2.10/reachables-assembly-0.1.jar"
val logger = LogManager.getLogger(getClass.getName)
override val HELP_MESSAGE = "Workflow for doing substrate expansions with Spark. " +
"Handles reconverting the predictions to InChIs and starting the next level of expansion."
val OPTION_LICENSE_FILE = "l"
val OPTION_SUBSTRATES_LIST = "i"
val OPTION_OUTPUT_DIRECTORY = "o"
val OPTION_NUMBER_OF_REPEATS = "n"
val OPTION_SPARK_MASTER = "m"
val OPTION_ASSEMBLED_JAR_FILE = "j"
val OPTION_CREATE_JAR_FILE_AT_RUNTIME = "r"
override def getCommandLineOptions: Options = {
val options = List[CliOption.Builder](
CliOption.builder(OPTION_LICENSE_FILE).
required.
hasArg.
longOpt("license-file")
.desc("A path to the Chemaxon license file to load, mainly for checking license validity"),
CliOption.builder(OPTION_SUBSTRATES_LIST).
required.
hasArg.
longOpt("substrates-list").
desc("A list of substrate InChIs onto which to project ROs"),
CliOption.builder(OPTION_OUTPUT_DIRECTORY).
required.
hasArg.
longOpt("output-directory").
desc("A directory in which to write per-RO result files"),
CliOption.builder(OPTION_NUMBER_OF_REPEATS).
required.
hasArg.
longOpt("repeat-number").
desc("The number of expansions that should be done"),
CliOption.builder(OPTION_ASSEMBLED_JAR_FILE).
hasArg.
longOpt("assembled-jar-file").
desc("The path to an assembled JAR from this project."),
CliOption.builder(OPTION_CREATE_JAR_FILE_AT_RUNTIME).
longOpt("assembled-jar-file").
desc("This toggle tries to use the current project as the assembled JAR file, assembling it if need be."),
CliOption.builder(OPTION_SPARK_MASTER).
hasArg.
longOpt("spark-master").
desc(s"Spark master for the spark cluster. Defaults to '$DEFAULT_SPARK_MASTER'."),
CliOption.builder("h").argName("help").desc("Prints this help message").longOpt("help")
)
val opts: Options = new Options()
for (opt <- options) {
opts.addOption(opt.build)
}
opts
}
override def defineWorkflow(cl: CommandLine): Job = {
require(!cl.hasOption(OPTION_ASSEMBLED_JAR_FILE) | !cl.hasOption(OPTION_CREATE_JAR_FILE_AT_RUNTIME),
s"A JAR file must either be explicitly supplied [Option: $OPTION_ASSEMBLED_JAR_FILE <File Path>] or you must " +
s"indicate that you'd like to try to assemble the JAR file at runtime [Option $OPTION_CREATE_JAR_FILE_AT_RUNTIME]")
val assembledJarPath = if (cl.hasOption(OPTION_ASSEMBLED_JAR_FILE)) {
val existingJar = new File(cl.getOptionValue(OPTION_ASSEMBLED_JAR_FILE))
require(existingJar.exists(), s"Assembled JAR file that was supplied does not exist. " +
s"Supplied ${existingJar.getAbsolutePath}")
existingJar
} else {
// Make sure to assemble jar first
headerJob.thenRun(
SparkWrapper.assembleJarAtRuntime(LOCAL_JAR_PATH).doNotWriteErrorStream())
new File(LOCAL_JAR_PATH)
}
val repeatRange = Range(1, cl.getOptionValue(OPTION_NUMBER_OF_REPEATS).toInt + 1)
val substrateListFile = new File(cl.getOptionValue(OPTION_SUBSTRATES_LIST))
require(substrateListFile.exists(),
s"Substrate list file supplied does not exist. Supplied ${substrateListFile.getAbsolutePath}")
val chemaxonLicenseFile = new File(cl.getOptionValue(OPTION_LICENSE_FILE))
require(chemaxonLicenseFile.exists(),
s"Chemaxon license file supplied does not exist. Supplied ${chemaxonLicenseFile.getAbsolutePath}")
val workingDirectory = new File(cl.getOptionValue(OPTION_OUTPUT_DIRECTORY))
if (!workingDirectory.exists()) workingDirectory.mkdirs()
// Drop right to remove $
val singleSubstrateRoProjectorClassPath = SparkSingleSubstrateROProjector.getClass.getName.dropRight(1)
val sparkMaster = cl.getOptionValue(OPTION_SPARK_MASTER, DEFAULT_SPARK_MASTER)
// Tries to assemble JAR for spark export. Step 1 towards Skynet is self-assembly of jar files.
val outputInchiIdentifier = "uniqueInchisIteration"
// Copy file so we can just use the same
FileUtils.copyFile(substrateListFile, new File(workingDirectory, s"$outputInchiIdentifier.0.txt"))
repeatRange.foreach(iteration => {
val iterationOutputDirectory = new File(workingDirectory, s"$iteration.ExpansionOf.${substrateListFile.getName}")
if (!iterationOutputDirectory.exists()) iterationOutputDirectory.mkdirs()
val outputUniqueInchiFile = new File(workingDirectory, s"$outputInchiIdentifier.$iteration.txt")
val substrateList = new File(workingDirectory, s"$outputInchiIdentifier.${iteration - 1}.txt")
val roProjectionArgs = List(
SparkSingleSubstrateROProjector.OPTION_SUBSTRATES_LIST, substrateList.getAbsolutePath,
SparkSingleSubstrateROProjector.OPTION_OUTPUT_DIRECTORY, iterationOutputDirectory.getAbsolutePath,
SparkSingleSubstrateROProjector.OPTION_LICENSE_FILE, chemaxonLicenseFile.getAbsolutePath,
SparkSingleSubstrateROProjector.OPTION_FILTER_FOR_SPECTROMETERY
)
// Scales memory up as the size gets larger in hopes of avoiding memory issues.
// Expansion size grows at about 50x, so memory requirements get large as size increases.
val expansion =
SparkWrapper.runClassPath(
assembledJarPath.getAbsolutePath, sparkMaster)(singleSubstrateRoProjectorClassPath,
roProjectionArgs)(memory = s"${iteration*iteration}G")
val processing: () => Unit = () => {
// Each RO has its own file.
val allFilesInOutputDir: List[File] = iterationOutputDirectory.listFiles().toList
logger.info(s"Found ${allFilesInOutputDir.length} projection files.")
val allInchis: Set[String] = allFilesInOutputDir.flatMap(inputFile => {
val predictionCorpus = L2PredictionCorpus.readPredictionsFromJsonFile(inputFile)
val uniqueSubstrates = predictionCorpus.getUniqueSubstrateInchis.asScala.toSet
val uniqueProducts = predictionCorpus.getUniqueProductInchis.asScala.toSet
logger.info(s"Found ${uniqueSubstrates.size} unique substrates and ${uniqueProducts.size} unique products. " +
s"Combining and writing them to a file.")
uniqueProducts ++ uniqueSubstrates
}).toSet
val inchis = new L2InchiCorpus(allInchis.asJava)
inchis.writeToFile(outputUniqueInchiFile)
}
val convertPredictionToUniqueInchis = ScalaJobWrapper.wrapScalaFunction(s"Condense $iteration into unique molecules.", processing)
// Skip if already created
if (!outputUniqueInchiFile.exists) {
headerJob.thenRun(expansion.doNotWriteOutputStream())
headerJob.thenRun(convertPredictionToUniqueInchis)
} else {
logger.info(s"Skipping trying to create ${outputUniqueInchiFile.getAbsolutePath} as it already exists.")
}
})
headerJob
}
}
| 20n/act | reachables/src/main/scala/com/act/biointerpretation/l2expansion/SparkSubstrateExpansionDriverWorkflow.scala | Scala | gpl-3.0 | 9,297 |
package org.jetbrains.plugins.scala.lang.refactoring.rename
import java.awt.BorderLayout
import java.util
import javax.swing.{JCheckBox, JComponent, JPanel}
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.psi.{PsiElement, PsiReference}
import com.intellij.refactoring.listeners.RefactoringElementListener
import com.intellij.refactoring.rename.{RenameDialog, RenameJavaClassProcessor}
import com.intellij.usageView.UsageInfo
import org.jetbrains.plugins.scala.ScalaBundle
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScTypeParam
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.light.PsiClassWrapper
import org.jetbrains.plugins.scala.lang.scaladoc.parser.parsing.MyScaladocParsing
import org.jetbrains.plugins.scala.lang.scaladoc.psi.api.ScDocComment
import org.jetbrains.plugins.scala.settings.ScalaApplicationSettings
import scala.annotation.tailrec
/**
* User: Alexander Podkhalyuzin
* Date: 15.09.2009
*/
class RenameScalaClassProcessor extends RenameJavaClassProcessor with ScalaRenameProcessor {
override def canProcessElement(element: PsiElement): Boolean = {
element.isInstanceOf[ScTypeDefinition] || element.isInstanceOf[PsiClassWrapper] || element.isInstanceOf[ScTypeParam]
}
override def substituteElementToRename(element: PsiElement, editor: Editor): PsiElement = {
element match {
case wrapper: PsiClassWrapper => wrapper.definition
case _ => element
}
}
override def findReferences(element: PsiElement): util.Collection[PsiReference] = ScalaRenameUtil.replaceImportClassReferences(ScalaRenameUtil.findReferences(element))
override def prepareRenaming(element: PsiElement, newName: String, allRenames: util.Map[PsiElement, String]) {
element match {
case td: ScTypeDefinition =>
ScalaPsiUtil.getCompanionModule(td) match {
case Some(companion) if ScalaApplicationSettings.getInstance().RENAME_COMPANION_MODULE => allRenames.put(companion, newName)
case _ =>
}
@tailrec
def isTop(element: PsiElement): Boolean = {
element match {
case null => true
case _: ScTemplateDefinition => false
case _ => isTop(element.getContext)
}
}
val file = td.getContainingFile
if (file != null && isTop(element.getContext) && file.name == td.name + ".scala") {
allRenames.put(file, newName + ".scala")
}
case docTagParam: ScTypeParam =>
docTagParam.owner match {
case commentOwner: ScDocCommentOwner =>
commentOwner.getDocComment match {
case comment: ScDocComment =>
comment.findTagsByName(MyScaladocParsing.TYPE_PARAM_TAG).foreach {
b => if (b.getValueElement != null && b.getValueElement.getText == docTagParam.name)
allRenames.put(b.getValueElement, newName)
}
case _ =>
}
case _ =>
}
case _ =>
}
//put rename for fake object companion class
def addLightClasses(element: PsiElement) {
element match {
case o: ScObject =>
o.fakeCompanionClass match {
case Some(clazz) => allRenames.put(clazz, newName)
case None =>
}
case t: ScTrait =>
allRenames.put(t.fakeCompanionClass, newName + "$class")
case _ =>
}
}
import scala.collection.JavaConverters.asScalaSetConverter
val elems = allRenames.keySet().asScala.clone()
elems.foreach(addLightClasses)
ScalaElementToRenameContributor.getAll(element, newName, allRenames)
}
override def getElementToSearchInStringsAndComments(element: PsiElement): PsiElement = {
element match {
case o: ScObject => o.fakeCompanionClassOrCompanionClass
case wrapper: PsiClassWrapper => wrapper.definition match {
case _: ScObject => wrapper
case definition => definition
}
case _ => element
}
}
override def createRenameDialog(project: Project, element: PsiElement, nameSuggestionContext: PsiElement, editor: Editor): RenameDialog =
new ScalaClassRenameDialog(project, element, nameSuggestionContext, editor)
override def renameElement(element: PsiElement, newName: String, usages: Array[UsageInfo], listener: RefactoringElementListener) {
ScalaRenameUtil.doRenameGenericNamedElement(element, newName, usages, listener)
}
}
class ScalaClassRenameDialog(project: Project, psiElement: PsiElement, nameSuggestionContext: PsiElement, editor: Editor)
extends {
private val chbRenameCompanion: JCheckBox = new JCheckBox("", true)
}
with RenameDialog(project: Project, psiElement: PsiElement, nameSuggestionContext: PsiElement, editor: Editor) {
override def createCenterPanel(): JComponent = {
val companion = Option(psiElement).collect {
case definition: ScTypeDefinition => definition
}.flatMap {
_.baseCompanionModule
}
companion.collect {
case _: ScObject => "object"
case _: ScTrait => "trait"
case _: ScClass => "class"
}.foreach { text =>
chbRenameCompanion.setText(ScalaBundle.message("rename.companion.module", text))
chbRenameCompanion.setSelected(true)
}
companion.map { _ =>
val panel = Option(super.createCenterPanel()).getOrElse {
new JPanel(new BorderLayout())
}
panel.add(chbRenameCompanion, BorderLayout.WEST)
panel
}.orNull
}
override def performRename(newName: String) {
ScalaApplicationSettings.getInstance().RENAME_COMPANION_MODULE = chbRenameCompanion.isSelected
super.performRename(newName)
ScalaApplicationSettings.getInstance().RENAME_COMPANION_MODULE = true
}
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/refactoring/rename/RenameScalaClassProcessor.scala | Scala | apache-2.0 | 5,988 |
package controllers
object Utility {
final val _BR_ = System.getProperty("line.separator")
def htmlEscape(s: String): String = {
val s1: String = s.replaceAll("&", "&");
val s2: String = s1.replaceAll("<", "<");
val s3: String = s2.replaceAll(">", ">");
val s4: String = s3.replaceAll('"'.toString, """);
s4.replaceAll("'", "'")
}
def nanasify(name: String): String = name match {
case "" ⇒ "P2Pの名無しさん"
case s ⇒ s
}
val Otimestamp2str: Option[Long] ⇒ String =
(Otime: Option[Long]) ⇒ Otime.flatMap(t ⇒ Some(new java.util.Date(t * 1000).toString)).getOrElse("???")
val stream_spliting = (resData: Stream[Byte]) ⇒ new String(resData.toArray[Byte]).split("""<>""").toSeq.toList
def filterNotEmpty[T](lis: List[Option[T]]): List[T] = lis.collect {
case Some(x) ⇒ x
}
val logo = """
|┏━━━┓┏━━━┓┏━━━┓┏━━━┓ ┃
|┃ ┃ ┃┃ ┃ ┃ ┃
|┣━━━┛┏━━━┛┣━━━┛┏━━━┛┏━━┣━━┓
|┃ ┃ ┃ ┃ ┃ ┃ ┃
|┃ ┗━━━┛┃ ┗━━━┛┗━━┃ ┃
| """.stripMargin('|')
def string2LongOpt(s: String): Option[Long] = try {
Some(s.toLong)
} catch {
case e: NumberFormatException ⇒ None
case _: Throwable ⇒ None
}
}
| windymelt/p2p2ch | app/controllers/Utility.scala | Scala | bsd-3-clause | 1,565 |
package se.lu.nateko.cp.meta.test.persistence.postgres
import se.lu.nateko.cp.meta.persistence.postgres.PostgresRdfLog
import org.eclipse.rdf4j.model.impl.SimpleValueFactory
import se.lu.nateko.cp.meta.persistence.postgres._
import se.lu.nateko.cp.meta.instanceserver.RdfUpdate
import se.lu.nateko.cp.meta.instanceserver.InstanceServer
import se.lu.nateko.cp.meta.instanceserver.LoggingInstanceServer
import se.lu.nateko.cp.meta.test.TestConfig
import se.lu.nateko.cp.meta.persistence.RdfUpdateLogIngester
import se.lu.nateko.cp.meta.instanceserver.Rdf4jInstanceServer
import se.lu.nateko.cp.meta.ConfigLoader
object Manual {
val factory = SimpleValueFactory.getInstance()
def getLog: PostgresRdfLog = {
val config = ConfigLoader.default
PostgresRdfLog("rdflog", config.rdfLog, factory)
}
def getServer: InstanceServer = new LoggingInstanceServer(TestConfig.instServer, getLog)
def fromInstFileToDb(ncycles: Int): Unit = {
val server = TestConfig.instServer
val log = getLog
val stats = server.getStatements(None, None, None).toIndexedSeq
val assertions = stats.map(RdfUpdate(_, true))
val retractions = stats.map(RdfUpdate(_, false))
log.dropLog()
log.initLog()
log.appendAll(assertions)
for(_ <- 1 to ncycles){
log.appendAll(retractions)
log.appendAll(assertions)
}
log.close()
}
def serverFromLog: InstanceServer = {
val log = getLog
val ctxt = factory.createIRI(TestConfig.instOntUri)
val repo = RdfUpdateLogIngester.ingestIntoMemory(log.updates, ctxt)
log.close()
new Rdf4jInstanceServer(repo, ctxt)
}
} | ICOS-Carbon-Portal/meta | src/test/scala/se/lu/nateko/cp/meta/test/persistence/postgres/Manual.scala | Scala | gpl-3.0 | 1,568 |
package com.malaska.spark.training.timeseries
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import scala.collection.mutable
object SessionWindowing {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
def main(args:Array[String]): Unit = {
val sessionJson = args(0)
val timeGap = args(1).toInt
val isLocal = true
val sparkSession = if (isLocal) {
SparkSession.builder
.master("local")
.appName("my-spark-app")
.config("spark.some.config.option", "config-value")
.config("spark.driver.host", "127.0.0.1")
.config("spark.sql.parquet.compression.codec", "gzip")
.enableHiveSupport()
.getOrCreate()
} else {
SparkSession.builder
.appName("my-spark-app")
.config("spark.some.config.option", "config-value")
.enableHiveSupport()
.getOrCreate()
}
println("---")
import sparkSession.implicits._
val sessionDs = sparkSession.read.json(sessionJson).as[JsonLeadLag]
sessionDs.createOrReplaceTempView("session_table")
sparkSession.sql("select * from session_table").collect().foreach(println)
val sessionDefinitinonDf = sessionDs.rdd.map(r => {
(r.group, r)
}).groupByKey().flatMap{ case (group, jsonObjIt) =>
var lastStart:Long = -1
var lastEnd:Long = -1
var sessionCount = 1
var eventsInASession = 0
val sessionList = new mutable.MutableList[SessionDefinition]
jsonObjIt.toSeq.sortBy(r => r.ts).foreach(record => {
val ts = record.ts
eventsInASession += 1
if (lastStart == -1) {
lastStart = ts
} else if (ts > lastEnd + timeGap) {
sessionList += SessionDefinition(group, lastStart, lastEnd, lastEnd - lastStart, eventsInASession)
lastStart = ts
eventsInASession = 0
}
lastEnd = ts
})
sessionList
}
sessionDefinitinonDf.collect().foreach(println)
}
}
case class SessionDefinition(group:String, sessionStart:Long, sessionEnd:Long, sessionLength:Long, sessionEvents:Int)
| TedBear42/spark_training | src/main/scala/com/malaska/spark/training/timeseries/SessionWindowing.scala | Scala | apache-2.0 | 2,159 |
package org.randi3.simulation
import org.junit.runner.RunWith
import org.scalatest.matchers.MustMatchers
import org.scalatest.FunSpec
import org.scalatest.junit.JUnitRunner
import org.randi3.model.criterion.OrdinalCriterion
import org.randi3.simulation.distributions.OrdinalCriterionEqualDistributed
import org.randi3.simulation.distributions.OrdinalCriterionFixedRatio
import org.randi3.simulation.utility.TestingEnvironmentSimulation._
import org.randi3.simulation.service.SimulationUtil._
import org.randi3.randomization.CompleteRandomization
import org.apache.commons.math3.random.MersenneTwister
import org.randi3.simulation.distributions.CriterionDistribution
import org.apache.commons.math3.distribution.IntegerDistribution
import org.randi3.simulation.distributions.TrialSiteDistribution
import org.randi3.model.criterion.Criterion
import org.randi3.model.criterion.constraint.Constraint
import org.randi3.simulation.distributions.TrialSiteDistribution
import org.apache.commons.math3.distribution.PascalDistribution
import org.randi3.simulation.model._
@RunWith(classOf[JUnitRunner])
class SimulationUtilSpec extends FunSpec with MustMatchers {
describe("The SimulationUtilSpec simulate method") {
it("should be able to simulate a trial without stages") {
val ordinalCriterion = OrdinalCriterion(id=1, name = "ordinalCrit", description = "criterion", values = Set("m", "w"), inclusionConstraint = None, strata = List()).toOption.get
val ordinalCriterionEqualD = new OrdinalCriterionEqualDistributed(ordinalCriterion, System.currentTimeMillis)
val method = randomMethod.toOption.get
val plainTrial = createTrial.copy(criterions = List(ordinalCriterion), randomizationMethod = Some(method))
val trialSiteDistribution = new TrialSiteDistribution(System.currentTimeMillis(), Map(plainTrial.participatingSites.head -> 1))
val simulationScenario = new SimulationScenario {
val trial = plainTrial
val criterionProbability = List(ordinalCriterionEqualD).asInstanceOf[List[CriterionDistribution[Any]]]
def stageProbabilities: List[(String, List[CriterionDistribution[Any]], PascalDistribution)] = Nil
def siteRatio: TrialSiteDistribution = trialSiteDistribution
def randomizationMethods = List(("complete", method))
}
val res = simulate(simulationScenario, 100, System.currentTimeMillis())
// println(res.duration / 1000)
// println(res.results.head.marginaBalances)
// val aus = res.map(trial => (trial.treatmentArms.head.name + "-" + trial.treatmentArms.head.subjects.size, trial.treatmentArms.last.name + "-" + trial.treatmentArms.last.subjects.size))
// aus.foreach(println(_))
}
}
} | dschrimpf/randi3-simulation | src/test/scala/org/randi3/simulation/SimulationUtilSpec.scala | Scala | gpl-3.0 | 2,716 |
object O {
trait T
class VC(val self: Any) extends AnyVal {
def extMethod(f: F1[T, Any]) = ()
}
}
trait F1[A, B]
| loskutov/intellij-scala | testdata/scalacTests/pos/t9542.scala | Scala | apache-2.0 | 124 |
package com.blinkbox.books.schemas.ingestion.file.pending.v2
import java.net.URI
import com.blinkbox.books.messaging.{EventBody, MediaType, JsonEventBody}
import org.joda.time.DateTime
object FilePending {
case class Details(source: FileSource)
case class FileSource(
deliveredAt: DateTime,
uri: URI,
username: String,
fileName: String,
contentType: String,
system: SystemDetails,
role: String = "publisher_ftp"
)
case class SystemDetails(
name: String = "Marvin/watcher",
version: String
)
implicit object Details extends JsonEventBody[Details] {
val jsonMediaType = MediaType("application/vnd.blinkbox.books.ingestion.file.pending.v2+json")
def unapply(body: EventBody): Option[(FileSource)] = JsonEventBody.unapply[Details](body).flatMap(Details.unapply)
}
} | blinkboxbooks/message-schemas.scala | src/main/scala/com/blinkbox/books/schemas/ingestion/file/pending/v2/Schema.scala | Scala | mit | 822 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.NotSerializableException
import java.nio.ByteBuffer
import java.util.concurrent.ConcurrentLinkedQueue
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import scala.math.max
import scala.util.control.NonFatal
import org.apache.spark._
import org.apache.spark.TaskState.TaskState
import org.apache.spark.internal.{config, Logging}
import org.apache.spark.scheduler.SchedulingMode._
import org.apache.spark.util.{AccumulatorV2, Clock, SystemClock, Utils}
import org.apache.spark.util.collection.MedianHeap
/**
* Schedules the tasks within a single TaskSet in the TaskSchedulerImpl. This class keeps track of
* each task, retries tasks if they fail (up to a limited number of times), and
* handles locality-aware scheduling for this TaskSet via delay scheduling. The main interfaces
* to it are resourceOffer, which asks the TaskSet whether it wants to run a task on one node,
* and statusUpdate, which tells it that one of its tasks changed state (e.g. finished).
*
* THREADING: This class is designed to only be called from code with a lock on the
* TaskScheduler (e.g. its event handlers). It should not be called from other threads.
*
* @param sched the TaskSchedulerImpl associated with the TaskSetManager
* @param taskSet the TaskSet to manage scheduling for
* @param maxTaskFailures if any particular task fails this number of times, the entire
* task set will be aborted
*/
private[spark] class TaskSetManager(
sched: TaskSchedulerImpl,
val taskSet: TaskSet,
val maxTaskFailures: Int,
blacklistTracker: Option[BlacklistTracker] = None,
clock: Clock = new SystemClock()) extends Schedulable with Logging {
private val conf = sched.sc.conf
// SPARK-21563 make a copy of the jars/files so they are consistent across the TaskSet
private val addedJars = HashMap[String, Long](sched.sc.addedJars.toSeq: _*)
private val addedFiles = HashMap[String, Long](sched.sc.addedFiles.toSeq: _*)
// Quantile of tasks at which to start speculation
val SPECULATION_QUANTILE = conf.getDouble("spark.speculation.quantile", 0.75)
val SPECULATION_MULTIPLIER = conf.getDouble("spark.speculation.multiplier", 1.5)
val maxResultSize = conf.get(config.MAX_RESULT_SIZE)
val speculationEnabled = conf.getBoolean("spark.speculation", false)
// Serializer for closures and tasks.
val env = SparkEnv.get
val ser = env.closureSerializer.newInstance()
val tasks = taskSet.tasks
private[scheduler] val partitionToIndex = tasks.zipWithIndex
.map { case (t, idx) => t.partitionId -> idx }.toMap
val numTasks = tasks.length
val copiesRunning = new Array[Int](numTasks)
// For each task, tracks whether a copy of the task has succeeded. A task will also be
// marked as "succeeded" if it failed with a fetch failure, in which case it should not
// be re-run because the missing map data needs to be regenerated first.
val successful = new Array[Boolean](numTasks)
private val numFailures = new Array[Int](numTasks)
// Add the tid of task into this HashSet when the task is killed by other attempt tasks.
// This happened while we set the `spark.speculation` to true. The task killed by others
// should not resubmit while executor lost.
private val killedByOtherAttempt = new HashSet[Long]
val taskAttempts = Array.fill[List[TaskInfo]](numTasks)(Nil)
private[scheduler] var tasksSuccessful = 0
val weight = 1
val minShare = 0
var priority = taskSet.priority
var stageId = taskSet.stageId
val name = "TaskSet_" + taskSet.id
var parent: Pool = null
private var totalResultSize = 0L
private var calculatedTasks = 0
private[scheduler] val taskSetBlacklistHelperOpt: Option[TaskSetBlacklist] = {
blacklistTracker.map { _ =>
new TaskSetBlacklist(sched.sc.listenerBus, conf, stageId, taskSet.stageAttemptId, clock)
}
}
private[scheduler] val runningTasksSet = new HashSet[Long]
override def runningTasks: Int = runningTasksSet.size
def someAttemptSucceeded(tid: Long): Boolean = {
successful(taskInfos(tid).index)
}
// True once no more tasks should be launched for this task set manager. TaskSetManagers enter
// the zombie state once at least one attempt of each task has completed successfully, or if the
// task set is aborted (for example, because it was killed). TaskSetManagers remain in the zombie
// state until all tasks have finished running; we keep TaskSetManagers that are in the zombie
// state in order to continue to track and account for the running tasks.
// TODO: We should kill any running task attempts when the task set manager becomes a zombie.
private[scheduler] var isZombie = false
// Set of pending tasks for each executor. These collections are actually
// treated as stacks, in which new tasks are added to the end of the
// ArrayBuffer and removed from the end. This makes it faster to detect
// tasks that repeatedly fail because whenever a task failed, it is put
// back at the head of the stack. These collections may contain duplicates
// for two reasons:
// (1): Tasks are only removed lazily; when a task is launched, it remains
// in all the pending lists except the one that it was launched from.
// (2): Tasks may be re-added to these lists multiple times as a result
// of failures.
// Duplicates are handled in dequeueTaskFromList, which ensures that a
// task hasn't already started running before launching it.
private val pendingTasksForExecutor = new HashMap[String, ArrayBuffer[Int]]
// Set of pending tasks for each host. Similar to pendingTasksForExecutor,
// but at host level.
private val pendingTasksForHost = new HashMap[String, ArrayBuffer[Int]]
// Set of pending tasks for each rack -- similar to the above.
private val pendingTasksForRack = new HashMap[String, ArrayBuffer[Int]]
// Set containing pending tasks with no locality preferences.
private[scheduler] var pendingTasksWithNoPrefs = new ArrayBuffer[Int]
// Set containing all pending tasks (also used as a stack, as above).
private val allPendingTasks = new ArrayBuffer[Int]
// Tasks that can be speculated. Since these will be a small fraction of total
// tasks, we'll just hold them in a HashSet.
private[scheduler] val speculatableTasks = new HashSet[Int]
// Task index, start and finish time for each task attempt (indexed by task ID)
private[scheduler] val taskInfos = new HashMap[Long, TaskInfo]
// Use a MedianHeap to record durations of successful tasks so we know when to launch
// speculative tasks. This is only used when speculation is enabled, to avoid the overhead
// of inserting into the heap when the heap won't be used.
val successfulTaskDurations = new MedianHeap()
// How frequently to reprint duplicate exceptions in full, in milliseconds
val EXCEPTION_PRINT_INTERVAL =
conf.getLong("spark.logging.exceptionPrintInterval", 10000)
// Map of recent exceptions (identified by string representation and top stack frame) to
// duplicate count (how many times the same exception has appeared) and time the full exception
// was printed. This should ideally be an LRU map that can drop old exceptions automatically.
private val recentExceptions = HashMap[String, (Int, Long)]()
// Figure out the current map output tracker epoch and set it on all tasks
val epoch = sched.mapOutputTracker.getEpoch
logDebug("Epoch for " + taskSet + ": " + epoch)
for (t <- tasks) {
t.epoch = epoch
}
// Add all our tasks to the pending lists. We do this in reverse order
// of task index so that tasks with low indices get launched first.
for (i <- (0 until numTasks).reverse) {
addPendingTask(i)
}
/**
* Track the set of locality levels which are valid given the tasks locality preferences and
* the set of currently available executors. This is updated as executors are added and removed.
* This allows a performance optimization, of skipping levels that aren't relevant (eg., skip
* PROCESS_LOCAL if no tasks could be run PROCESS_LOCAL for the current set of executors).
*/
private[scheduler] var myLocalityLevels = computeValidLocalityLevels()
// Time to wait at each level
private[scheduler] var localityWaits = myLocalityLevels.map(getLocalityWait)
// Delay scheduling variables: we keep track of our current locality level and the time we
// last launched a task at that level, and move up a level when localityWaits[curLevel] expires.
// We then move down if we manage to launch a "more local" task.
private var currentLocalityIndex = 0 // Index of our current locality level in validLocalityLevels
private var lastLaunchTime = clock.getTimeMillis() // Time we last launched a task at this level
override def schedulableQueue: ConcurrentLinkedQueue[Schedulable] = null
override def schedulingMode: SchedulingMode = SchedulingMode.NONE
private[scheduler] var emittedTaskSizeWarning = false
/** Add a task to all the pending-task lists that it should be on. */
private[spark] def addPendingTask(index: Int) {
for (loc <- tasks(index).preferredLocations) {
loc match {
case e: ExecutorCacheTaskLocation =>
pendingTasksForExecutor.getOrElseUpdate(e.executorId, new ArrayBuffer) += index
case e: HDFSCacheTaskLocation =>
val exe = sched.getExecutorsAliveOnHost(loc.host)
exe match {
case Some(set) =>
for (e <- set) {
pendingTasksForExecutor.getOrElseUpdate(e, new ArrayBuffer) += index
}
logInfo(s"Pending task $index has a cached location at ${e.host} " +
", where there are executors " + set.mkString(","))
case None => logDebug(s"Pending task $index has a cached location at ${e.host} " +
", but there are no executors alive there.")
}
case _ =>
}
pendingTasksForHost.getOrElseUpdate(loc.host, new ArrayBuffer) += index
for (rack <- sched.getRackForHost(loc.host)) {
pendingTasksForRack.getOrElseUpdate(rack, new ArrayBuffer) += index
}
}
if (tasks(index).preferredLocations == Nil) {
pendingTasksWithNoPrefs += index
}
allPendingTasks += index // No point scanning this whole list to find the old task there
}
/**
* Return the pending tasks list for a given executor ID, or an empty list if
* there is no map entry for that host
*/
private def getPendingTasksForExecutor(executorId: String): ArrayBuffer[Int] = {
pendingTasksForExecutor.getOrElse(executorId, ArrayBuffer())
}
/**
* Return the pending tasks list for a given host, or an empty list if
* there is no map entry for that host
*/
private def getPendingTasksForHost(host: String): ArrayBuffer[Int] = {
pendingTasksForHost.getOrElse(host, ArrayBuffer())
}
/**
* Return the pending rack-local task list for a given rack, or an empty list if
* there is no map entry for that rack
*/
private def getPendingTasksForRack(rack: String): ArrayBuffer[Int] = {
pendingTasksForRack.getOrElse(rack, ArrayBuffer())
}
/**
* Dequeue a pending task from the given list and return its index.
* Return None if the list is empty.
* This method also cleans up any tasks in the list that have already
* been launched, since we want that to happen lazily.
*/
private def dequeueTaskFromList(
execId: String,
host: String,
list: ArrayBuffer[Int]): Option[Int] = {
var indexOffset = list.size
while (indexOffset > 0) {
indexOffset -= 1
val index = list(indexOffset)
if (!isTaskBlacklistedOnExecOrNode(index, execId, host)) {
// This should almost always be list.trimEnd(1) to remove tail
list.remove(indexOffset)
if (copiesRunning(index) == 0 && !successful(index)) {
return Some(index)
}
}
}
None
}
/** Check whether a task once ran an attempt on a given host */
private def hasAttemptOnHost(taskIndex: Int, host: String): Boolean = {
taskAttempts(taskIndex).exists(_.host == host)
}
private def isTaskBlacklistedOnExecOrNode(index: Int, execId: String, host: String): Boolean = {
taskSetBlacklistHelperOpt.exists { blacklist =>
blacklist.isNodeBlacklistedForTask(host, index) ||
blacklist.isExecutorBlacklistedForTask(execId, index)
}
}
/**
* Return a speculative task for a given executor if any are available. The task should not have
* an attempt running on this host, in case the host is slow. In addition, the task should meet
* the given locality constraint.
*/
// Labeled as protected to allow tests to override providing speculative tasks if necessary
protected def dequeueSpeculativeTask(execId: String, host: String, locality: TaskLocality.Value)
: Option[(Int, TaskLocality.Value)] =
{
speculatableTasks.retain(index => !successful(index)) // Remove finished tasks from set
def canRunOnHost(index: Int): Boolean = {
!hasAttemptOnHost(index, host) &&
!isTaskBlacklistedOnExecOrNode(index, execId, host)
}
if (!speculatableTasks.isEmpty) {
// Check for process-local tasks; note that tasks can be process-local
// on multiple nodes when we replicate cached blocks, as in Spark Streaming
for (index <- speculatableTasks if canRunOnHost(index)) {
val prefs = tasks(index).preferredLocations
val executors = prefs.flatMap(_ match {
case e: ExecutorCacheTaskLocation => Some(e.executorId)
case _ => None
});
if (executors.contains(execId)) {
speculatableTasks -= index
return Some((index, TaskLocality.PROCESS_LOCAL))
}
}
// Check for node-local tasks
if (TaskLocality.isAllowed(locality, TaskLocality.NODE_LOCAL)) {
for (index <- speculatableTasks if canRunOnHost(index)) {
val locations = tasks(index).preferredLocations.map(_.host)
if (locations.contains(host)) {
speculatableTasks -= index
return Some((index, TaskLocality.NODE_LOCAL))
}
}
}
// Check for no-preference tasks
if (TaskLocality.isAllowed(locality, TaskLocality.NO_PREF)) {
for (index <- speculatableTasks if canRunOnHost(index)) {
val locations = tasks(index).preferredLocations
if (locations.size == 0) {
speculatableTasks -= index
return Some((index, TaskLocality.PROCESS_LOCAL))
}
}
}
// Check for rack-local tasks
if (TaskLocality.isAllowed(locality, TaskLocality.RACK_LOCAL)) {
for (rack <- sched.getRackForHost(host)) {
for (index <- speculatableTasks if canRunOnHost(index)) {
val racks = tasks(index).preferredLocations.map(_.host).flatMap(sched.getRackForHost)
if (racks.contains(rack)) {
speculatableTasks -= index
return Some((index, TaskLocality.RACK_LOCAL))
}
}
}
}
// Check for non-local tasks
if (TaskLocality.isAllowed(locality, TaskLocality.ANY)) {
for (index <- speculatableTasks if canRunOnHost(index)) {
speculatableTasks -= index
return Some((index, TaskLocality.ANY))
}
}
}
None
}
/**
* Dequeue a pending task for a given node and return its index and locality level.
* Only search for tasks matching the given locality constraint.
*
* @return An option containing (task index within the task set, locality, is speculative?)
*/
private def dequeueTask(execId: String, host: String, maxLocality: TaskLocality.Value)
: Option[(Int, TaskLocality.Value, Boolean)] =
{
for (index <- dequeueTaskFromList(execId, host, getPendingTasksForExecutor(execId))) {
return Some((index, TaskLocality.PROCESS_LOCAL, false))
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.NODE_LOCAL)) {
for (index <- dequeueTaskFromList(execId, host, getPendingTasksForHost(host))) {
return Some((index, TaskLocality.NODE_LOCAL, false))
}
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.NO_PREF)) {
// Look for noPref tasks after NODE_LOCAL for minimize cross-rack traffic
for (index <- dequeueTaskFromList(execId, host, pendingTasksWithNoPrefs)) {
return Some((index, TaskLocality.PROCESS_LOCAL, false))
}
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.RACK_LOCAL)) {
for {
rack <- sched.getRackForHost(host)
index <- dequeueTaskFromList(execId, host, getPendingTasksForRack(rack))
} {
return Some((index, TaskLocality.RACK_LOCAL, false))
}
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.ANY)) {
for (index <- dequeueTaskFromList(execId, host, allPendingTasks)) {
return Some((index, TaskLocality.ANY, false))
}
}
// find a speculative task if all others tasks have been scheduled
dequeueSpeculativeTask(execId, host, maxLocality).map {
case (taskIndex, allowedLocality) => (taskIndex, allowedLocality, true)}
}
/**
* Respond to an offer of a single executor from the scheduler by finding a task
*
* NOTE: this function is either called with a maxLocality which
* would be adjusted by delay scheduling algorithm or it will be with a special
* NO_PREF locality which will be not modified
*
* @param execId the executor Id of the offered resource
* @param host the host Id of the offered resource
* @param maxLocality the maximum locality we want to schedule the tasks at
*/
@throws[TaskNotSerializableException]
def resourceOffer(
execId: String,
host: String,
maxLocality: TaskLocality.TaskLocality)
: Option[TaskDescription] =
{
val offerBlacklisted = taskSetBlacklistHelperOpt.exists { blacklist =>
blacklist.isNodeBlacklistedForTaskSet(host) ||
blacklist.isExecutorBlacklistedForTaskSet(execId)
}
if (!isZombie && !offerBlacklisted) {
val curTime = clock.getTimeMillis()
var allowedLocality = maxLocality
if (maxLocality != TaskLocality.NO_PREF) {
allowedLocality = getAllowedLocalityLevel(curTime)
if (allowedLocality > maxLocality) {
// We're not allowed to search for farther-away tasks
allowedLocality = maxLocality
}
}
dequeueTask(execId, host, allowedLocality).map { case ((index, taskLocality, speculative)) =>
// Found a task; do some bookkeeping and return a task description
val task = tasks(index)
val taskId = sched.newTaskId()
// Do various bookkeeping
copiesRunning(index) += 1
val attemptNum = taskAttempts(index).size
val info = new TaskInfo(taskId, index, attemptNum, curTime,
execId, host, taskLocality, speculative)
taskInfos(taskId) = info
taskAttempts(index) = info :: taskAttempts(index)
// Update our locality level for delay scheduling
// NO_PREF will not affect the variables related to delay scheduling
if (maxLocality != TaskLocality.NO_PREF) {
currentLocalityIndex = getLocalityIndex(taskLocality)
lastLaunchTime = curTime
}
// Serialize and return the task
val serializedTask: ByteBuffer = try {
ser.serialize(task)
} catch {
// If the task cannot be serialized, then there's no point to re-attempt the task,
// as it will always fail. So just abort the whole task-set.
case NonFatal(e) =>
val msg = s"Failed to serialize task $taskId, not attempting to retry it."
logError(msg, e)
abort(s"$msg Exception during serialization: $e")
throw new TaskNotSerializableException(e)
}
if (serializedTask.limit() > TaskSetManager.TASK_SIZE_TO_WARN_KB * 1024 &&
!emittedTaskSizeWarning) {
emittedTaskSizeWarning = true
logWarning(s"Stage ${task.stageId} contains a task of very large size " +
s"(${serializedTask.limit() / 1024} KB). The maximum recommended task size is " +
s"${TaskSetManager.TASK_SIZE_TO_WARN_KB} KB.")
}
addRunningTask(taskId)
// We used to log the time it takes to serialize the task, but task size is already
// a good proxy to task serialization time.
// val timeTaken = clock.getTime() - startTime
val taskName = s"task ${info.id} in stage ${taskSet.id}"
logInfo(s"Starting $taskName (TID $taskId, $host, executor ${info.executorId}, " +
s"partition ${task.partitionId}, $taskLocality, ${serializedTask.limit()} bytes)")
sched.dagScheduler.taskStarted(task, info)
new TaskDescription(
taskId,
attemptNum,
execId,
taskName,
index,
addedFiles,
addedJars,
task.localProperties,
serializedTask)
}
} else {
None
}
}
private def maybeFinishTaskSet() {
if (isZombie && runningTasks == 0) {
sched.taskSetFinished(this)
if (tasksSuccessful == numTasks) {
blacklistTracker.foreach(_.updateBlacklistForSuccessfulTaskSet(
taskSet.stageId,
taskSet.stageAttemptId,
taskSetBlacklistHelperOpt.get.execToFailures))
}
}
}
/**
* Get the level we can launch tasks according to delay scheduling, based on current wait time.
*/
private def getAllowedLocalityLevel(curTime: Long): TaskLocality.TaskLocality = {
// Remove the scheduled or finished tasks lazily
def tasksNeedToBeScheduledFrom(pendingTaskIds: ArrayBuffer[Int]): Boolean = {
var indexOffset = pendingTaskIds.size
while (indexOffset > 0) {
indexOffset -= 1
val index = pendingTaskIds(indexOffset)
if (copiesRunning(index) == 0 && !successful(index)) {
return true
} else {
pendingTaskIds.remove(indexOffset)
}
}
false
}
// Walk through the list of tasks that can be scheduled at each location and returns true
// if there are any tasks that still need to be scheduled. Lazily cleans up tasks that have
// already been scheduled.
def moreTasksToRunIn(pendingTasks: HashMap[String, ArrayBuffer[Int]]): Boolean = {
val emptyKeys = new ArrayBuffer[String]
val hasTasks = pendingTasks.exists {
case (id: String, tasks: ArrayBuffer[Int]) =>
if (tasksNeedToBeScheduledFrom(tasks)) {
true
} else {
emptyKeys += id
false
}
}
// The key could be executorId, host or rackId
emptyKeys.foreach(id => pendingTasks.remove(id))
hasTasks
}
while (currentLocalityIndex < myLocalityLevels.length - 1) {
val moreTasks = myLocalityLevels(currentLocalityIndex) match {
case TaskLocality.PROCESS_LOCAL => moreTasksToRunIn(pendingTasksForExecutor)
case TaskLocality.NODE_LOCAL => moreTasksToRunIn(pendingTasksForHost)
case TaskLocality.NO_PREF => pendingTasksWithNoPrefs.nonEmpty
case TaskLocality.RACK_LOCAL => moreTasksToRunIn(pendingTasksForRack)
}
if (!moreTasks) {
// This is a performance optimization: if there are no more tasks that can
// be scheduled at a particular locality level, there is no point in waiting
// for the locality wait timeout (SPARK-4939).
lastLaunchTime = curTime
logDebug(s"No tasks for locality level ${myLocalityLevels(currentLocalityIndex)}, " +
s"so moving to locality level ${myLocalityLevels(currentLocalityIndex + 1)}")
currentLocalityIndex += 1
} else if (curTime - lastLaunchTime >= localityWaits(currentLocalityIndex)) {
// Jump to the next locality level, and reset lastLaunchTime so that the next locality
// wait timer doesn't immediately expire
lastLaunchTime += localityWaits(currentLocalityIndex)
logDebug(s"Moving to ${myLocalityLevels(currentLocalityIndex + 1)} after waiting for " +
s"${localityWaits(currentLocalityIndex)}ms")
currentLocalityIndex += 1
} else {
return myLocalityLevels(currentLocalityIndex)
}
}
myLocalityLevels(currentLocalityIndex)
}
/**
* Find the index in myLocalityLevels for a given locality. This is also designed to work with
* localities that are not in myLocalityLevels (in case we somehow get those) by returning the
* next-biggest level we have. Uses the fact that the last value in myLocalityLevels is ANY.
*/
def getLocalityIndex(locality: TaskLocality.TaskLocality): Int = {
var index = 0
while (locality > myLocalityLevels(index)) {
index += 1
}
index
}
/**
* Check whether the given task set has been blacklisted to the point that it can't run anywhere.
*
* It is possible that this taskset has become impossible to schedule *anywhere* due to the
* blacklist. The most common scenario would be if there are fewer executors than
* spark.task.maxFailures. We need to detect this so we can fail the task set, otherwise the job
* will hang.
*
* There's a tradeoff here: we could make sure all tasks in the task set are schedulable, but that
* would add extra time to each iteration of the scheduling loop. Here, we take the approach of
* making sure at least one of the unscheduled tasks is schedulable. This means we may not detect
* the hang as quickly as we could have, but we'll always detect the hang eventually, and the
* method is faster in the typical case. In the worst case, this method can take
* O(maxTaskFailures + numTasks) time, but it will be faster when there haven't been any task
* failures (this is because the method picks one unscheduled task, and then iterates through each
* executor until it finds one that the task isn't blacklisted on).
*/
private[scheduler] def abortIfCompletelyBlacklisted(
hostToExecutors: HashMap[String, HashSet[String]]): Unit = {
taskSetBlacklistHelperOpt.foreach { taskSetBlacklist =>
val appBlacklist = blacklistTracker.get
// Only look for unschedulable tasks when at least one executor has registered. Otherwise,
// task sets will be (unnecessarily) aborted in cases when no executors have registered yet.
if (hostToExecutors.nonEmpty) {
// find any task that needs to be scheduled
val pendingTask: Option[Int] = {
// usually this will just take the last pending task, but because of the lazy removal
// from each list, we may need to go deeper in the list. We poll from the end because
// failed tasks are put back at the end of allPendingTasks, so we're more likely to find
// an unschedulable task this way.
val indexOffset = allPendingTasks.lastIndexWhere { indexInTaskSet =>
copiesRunning(indexInTaskSet) == 0 && !successful(indexInTaskSet)
}
if (indexOffset == -1) {
None
} else {
Some(allPendingTasks(indexOffset))
}
}
pendingTask.foreach { indexInTaskSet =>
// try to find some executor this task can run on. Its possible that some *other*
// task isn't schedulable anywhere, but we will discover that in some later call,
// when that unschedulable task is the last task remaining.
val blacklistedEverywhere = hostToExecutors.forall { case (host, execsOnHost) =>
// Check if the task can run on the node
val nodeBlacklisted =
appBlacklist.isNodeBlacklisted(host) ||
taskSetBlacklist.isNodeBlacklistedForTaskSet(host) ||
taskSetBlacklist.isNodeBlacklistedForTask(host, indexInTaskSet)
if (nodeBlacklisted) {
true
} else {
// Check if the task can run on any of the executors
execsOnHost.forall { exec =>
appBlacklist.isExecutorBlacklisted(exec) ||
taskSetBlacklist.isExecutorBlacklistedForTaskSet(exec) ||
taskSetBlacklist.isExecutorBlacklistedForTask(exec, indexInTaskSet)
}
}
}
if (blacklistedEverywhere) {
val partition = tasks(indexInTaskSet).partitionId
abort(s"""
|Aborting $taskSet because task $indexInTaskSet (partition $partition)
|cannot run anywhere due to node and executor blacklist.
|Most recent failure:
|${taskSetBlacklist.getLatestFailureReason}
|
|Blacklisting behavior can be configured via spark.blacklist.*.
|""".stripMargin)
}
}
}
}
}
/**
* Marks the task as getting result and notifies the DAG Scheduler
*/
def handleTaskGettingResult(tid: Long): Unit = {
val info = taskInfos(tid)
info.markGettingResult(clock.getTimeMillis())
sched.dagScheduler.taskGettingResult(info)
}
/**
* Check whether has enough quota to fetch the result with `size` bytes
*/
def canFetchMoreResults(size: Long): Boolean = sched.synchronized {
totalResultSize += size
calculatedTasks += 1
if (maxResultSize > 0 && totalResultSize > maxResultSize) {
val msg = s"Total size of serialized results of ${calculatedTasks} tasks " +
s"(${Utils.bytesToString(totalResultSize)}) is bigger than spark.driver.maxResultSize " +
s"(${Utils.bytesToString(maxResultSize)})"
logError(msg)
abort(msg)
false
} else {
true
}
}
/**
* Marks a task as successful and notifies the DAGScheduler that the task has ended.
*/
def handleSuccessfulTask(tid: Long, result: DirectTaskResult[_]): Unit = {
val info = taskInfos(tid)
val index = info.index
info.markFinished(TaskState.FINISHED, clock.getTimeMillis())
if (speculationEnabled) {
successfulTaskDurations.insert(info.duration)
}
removeRunningTask(tid)
// Kill any other attempts for the same task (since those are unnecessary now that one
// attempt completed successfully).
for (attemptInfo <- taskAttempts(index) if attemptInfo.running) {
logInfo(s"Killing attempt ${attemptInfo.attemptNumber} for task ${attemptInfo.id} " +
s"in stage ${taskSet.id} (TID ${attemptInfo.taskId}) on ${attemptInfo.host} " +
s"as the attempt ${info.attemptNumber} succeeded on ${info.host}")
killedByOtherAttempt += attemptInfo.taskId
sched.backend.killTask(
attemptInfo.taskId,
attemptInfo.executorId,
interruptThread = true,
reason = "another attempt succeeded")
}
if (!successful(index)) {
tasksSuccessful += 1
logInfo(s"Finished task ${info.id} in stage ${taskSet.id} (TID ${info.taskId}) in" +
s" ${info.duration} ms on ${info.host} (executor ${info.executorId})" +
s" ($tasksSuccessful/$numTasks)")
// Mark successful and stop if all the tasks have succeeded.
successful(index) = true
if (tasksSuccessful == numTasks) {
isZombie = true
}
} else {
logInfo("Ignoring task-finished event for " + info.id + " in stage " + taskSet.id +
" because task " + index + " has already completed successfully")
}
// There may be multiple tasksets for this stage -- we let all of them know that the partition
// was completed. This may result in some of the tasksets getting completed.
sched.markPartitionCompletedInAllTaskSets(stageId, tasks(index).partitionId, info)
// This method is called by "TaskSchedulerImpl.handleSuccessfulTask" which holds the
// "TaskSchedulerImpl" lock until exiting. To avoid the SPARK-7655 issue, we should not
// "deserialize" the value when holding a lock to avoid blocking other threads. So we call
// "result.value()" in "TaskResultGetter.enqueueSuccessfulTask" before reaching here.
// Note: "result.value()" only deserializes the value when it's called at the first time, so
// here "result.value()" just returns the value and won't block other threads.
sched.dagScheduler.taskEnded(tasks(index), Success, result.value(), result.accumUpdates, info)
maybeFinishTaskSet()
}
private[scheduler] def markPartitionCompleted(partitionId: Int, taskInfo: TaskInfo): Unit = {
partitionToIndex.get(partitionId).foreach { index =>
if (!successful(index)) {
if (speculationEnabled && !isZombie) {
successfulTaskDurations.insert(taskInfo.duration)
}
tasksSuccessful += 1
successful(index) = true
if (tasksSuccessful == numTasks) {
isZombie = true
}
maybeFinishTaskSet()
}
}
}
/**
* Marks the task as failed, re-adds it to the list of pending tasks, and notifies the
* DAG Scheduler.
*/
def handleFailedTask(tid: Long, state: TaskState, reason: TaskFailedReason) {
val info = taskInfos(tid)
if (info.failed || info.killed) {
return
}
removeRunningTask(tid)
info.markFinished(state, clock.getTimeMillis())
val index = info.index
copiesRunning(index) -= 1
var accumUpdates: Seq[AccumulatorV2[_, _]] = Seq.empty
val failureReason = s"Lost task ${info.id} in stage ${taskSet.id} (TID $tid, ${info.host}," +
s" executor ${info.executorId}): ${reason.toErrorString}"
val failureException: Option[Throwable] = reason match {
case fetchFailed: FetchFailed =>
logWarning(failureReason)
if (!successful(index)) {
successful(index) = true
tasksSuccessful += 1
}
isZombie = true
if (fetchFailed.bmAddress != null) {
blacklistTracker.foreach(_.updateBlacklistForFetchFailure(
fetchFailed.bmAddress.host, fetchFailed.bmAddress.executorId))
}
None
case ef: ExceptionFailure =>
// ExceptionFailure's might have accumulator updates
accumUpdates = ef.accums
if (ef.className == classOf[NotSerializableException].getName) {
// If the task result wasn't serializable, there's no point in trying to re-execute it.
logError("Task %s in stage %s (TID %d) had a not serializable result: %s; not retrying"
.format(info.id, taskSet.id, tid, ef.description))
abort("Task %s in stage %s (TID %d) had a not serializable result: %s".format(
info.id, taskSet.id, tid, ef.description))
return
}
val key = ef.description
val now = clock.getTimeMillis()
val (printFull, dupCount) = {
if (recentExceptions.contains(key)) {
val (dupCount, printTime) = recentExceptions(key)
if (now - printTime > EXCEPTION_PRINT_INTERVAL) {
recentExceptions(key) = (0, now)
(true, 0)
} else {
recentExceptions(key) = (dupCount + 1, printTime)
(false, dupCount + 1)
}
} else {
recentExceptions(key) = (0, now)
(true, 0)
}
}
if (printFull) {
logWarning(failureReason)
} else {
logInfo(
s"Lost task ${info.id} in stage ${taskSet.id} (TID $tid) on ${info.host}, executor" +
s" ${info.executorId}: ${ef.className} (${ef.description}) [duplicate $dupCount]")
}
ef.exception
case tk: TaskKilled =>
// TaskKilled might have accumulator updates
accumUpdates = tk.accums
logWarning(failureReason)
None
case e: ExecutorLostFailure if !e.exitCausedByApp =>
logInfo(s"Task $tid failed because while it was being computed, its executor " +
"exited for a reason unrelated to the task. Not counting this failure towards the " +
"maximum number of failures for the task.")
None
case e: TaskFailedReason => // TaskResultLost and others
logWarning(failureReason)
None
}
sched.dagScheduler.taskEnded(tasks(index), reason, null, accumUpdates, info)
if (!isZombie && reason.countTowardsTaskFailures) {
assert (null != failureReason)
taskSetBlacklistHelperOpt.foreach(_.updateBlacklistForFailedTask(
info.host, info.executorId, index, failureReason))
numFailures(index) += 1
if (numFailures(index) >= maxTaskFailures) {
logError("Task %d in stage %s failed %d times; aborting job".format(
index, taskSet.id, maxTaskFailures))
abort("Task %d in stage %s failed %d times, most recent failure: %s\\nDriver stacktrace:"
.format(index, taskSet.id, maxTaskFailures, failureReason), failureException)
return
}
}
if (successful(index)) {
logInfo(s"Task ${info.id} in stage ${taskSet.id} (TID $tid) failed, but the task will not" +
s" be re-executed (either because the task failed with a shuffle data fetch failure," +
s" so the previous stage needs to be re-run, or because a different copy of the task" +
s" has already succeeded).")
} else {
addPendingTask(index)
}
maybeFinishTaskSet()
}
def abort(message: String, exception: Option[Throwable] = None): Unit = sched.synchronized {
// TODO: Kill running tasks if we were not terminated due to a Mesos error
sched.dagScheduler.taskSetFailed(taskSet, message, exception)
isZombie = true
maybeFinishTaskSet()
}
/** If the given task ID is not in the set of running tasks, adds it.
*
* Used to keep track of the number of running tasks, for enforcing scheduling policies.
*/
def addRunningTask(tid: Long) {
if (runningTasksSet.add(tid) && parent != null) {
parent.increaseRunningTasks(1)
}
}
/** If the given task ID is in the set of running tasks, removes it. */
def removeRunningTask(tid: Long) {
if (runningTasksSet.remove(tid) && parent != null) {
parent.decreaseRunningTasks(1)
}
}
override def getSchedulableByName(name: String): Schedulable = {
null
}
override def addSchedulable(schedulable: Schedulable) {}
override def removeSchedulable(schedulable: Schedulable) {}
override def getSortedTaskSetQueue(): ArrayBuffer[TaskSetManager] = {
val sortedTaskSetQueue = new ArrayBuffer[TaskSetManager]()
sortedTaskSetQueue += this
sortedTaskSetQueue
}
/** Called by TaskScheduler when an executor is lost so we can re-enqueue our tasks */
override def executorLost(execId: String, host: String, reason: ExecutorLossReason) {
// Re-enqueue any tasks that ran on the failed executor if this is a shuffle map stage,
// and we are not using an external shuffle server which could serve the shuffle outputs.
// The reason is the next stage wouldn't be able to fetch the data from this dead executor
// so we would need to rerun these tasks on other executors.
if (tasks(0).isInstanceOf[ShuffleMapTask] && !env.blockManager.externalShuffleServiceEnabled
&& !isZombie) {
for ((tid, info) <- taskInfos if info.executorId == execId) {
val index = taskInfos(tid).index
if (successful(index) && !killedByOtherAttempt.contains(tid)) {
successful(index) = false
copiesRunning(index) -= 1
tasksSuccessful -= 1
addPendingTask(index)
// Tell the DAGScheduler that this task was resubmitted so that it doesn't think our
// stage finishes when a total of tasks.size tasks finish.
sched.dagScheduler.taskEnded(
tasks(index), Resubmitted, null, Seq.empty, info)
}
}
}
for ((tid, info) <- taskInfos if info.running && info.executorId == execId) {
val exitCausedByApp: Boolean = reason match {
case exited: ExecutorExited => exited.exitCausedByApp
case ExecutorKilled => false
case _ => true
}
handleFailedTask(tid, TaskState.FAILED, ExecutorLostFailure(info.executorId, exitCausedByApp,
Some(reason.toString)))
}
// recalculate valid locality levels and waits when executor is lost
recomputeLocality()
}
/**
* Check for tasks to be speculated and return true if there are any. This is called periodically
* by the TaskScheduler.
*
*/
override def checkSpeculatableTasks(minTimeToSpeculation: Int): Boolean = {
// Can't speculate if we only have one task, and no need to speculate if the task set is a
// zombie.
if (isZombie || numTasks == 1) {
return false
}
var foundTasks = false
val minFinishedForSpeculation = (SPECULATION_QUANTILE * numTasks).floor.toInt
logDebug("Checking for speculative tasks: minFinished = " + minFinishedForSpeculation)
if (tasksSuccessful >= minFinishedForSpeculation && tasksSuccessful > 0) {
val time = clock.getTimeMillis()
val medianDuration = successfulTaskDurations.median
val threshold = max(SPECULATION_MULTIPLIER * medianDuration, minTimeToSpeculation)
// TODO: Threshold should also look at standard deviation of task durations and have a lower
// bound based on that.
logDebug("Task length threshold for speculation: " + threshold)
for (tid <- runningTasksSet) {
val info = taskInfos(tid)
val index = info.index
if (!successful(index) && copiesRunning(index) == 1 && info.timeRunning(time) > threshold &&
!speculatableTasks.contains(index)) {
logInfo(
"Marking task %d in stage %s (on %s) as speculatable because it ran more than %.0f ms"
.format(index, taskSet.id, info.host, threshold))
speculatableTasks += index
sched.dagScheduler.speculativeTaskSubmitted(tasks(index))
foundTasks = true
}
}
}
foundTasks
}
private def getLocalityWait(level: TaskLocality.TaskLocality): Long = {
val defaultWait = conf.get(config.LOCALITY_WAIT)
val localityWaitKey = level match {
case TaskLocality.PROCESS_LOCAL => "spark.locality.wait.process"
case TaskLocality.NODE_LOCAL => "spark.locality.wait.node"
case TaskLocality.RACK_LOCAL => "spark.locality.wait.rack"
case _ => null
}
if (localityWaitKey != null) {
conf.getTimeAsMs(localityWaitKey, defaultWait.toString)
} else {
0L
}
}
/**
* Compute the locality levels used in this TaskSet. Assumes that all tasks have already been
* added to queues using addPendingTask.
*
*/
private def computeValidLocalityLevels(): Array[TaskLocality.TaskLocality] = {
import TaskLocality.{PROCESS_LOCAL, NODE_LOCAL, NO_PREF, RACK_LOCAL, ANY}
val levels = new ArrayBuffer[TaskLocality.TaskLocality]
if (!pendingTasksForExecutor.isEmpty &&
pendingTasksForExecutor.keySet.exists(sched.isExecutorAlive(_))) {
levels += PROCESS_LOCAL
}
if (!pendingTasksForHost.isEmpty &&
pendingTasksForHost.keySet.exists(sched.hasExecutorsAliveOnHost(_))) {
levels += NODE_LOCAL
}
if (!pendingTasksWithNoPrefs.isEmpty) {
levels += NO_PREF
}
if (!pendingTasksForRack.isEmpty &&
pendingTasksForRack.keySet.exists(sched.hasHostAliveOnRack(_))) {
levels += RACK_LOCAL
}
levels += ANY
logDebug("Valid locality levels for " + taskSet + ": " + levels.mkString(", "))
levels.toArray
}
def recomputeLocality() {
val previousLocalityLevel = myLocalityLevels(currentLocalityIndex)
myLocalityLevels = computeValidLocalityLevels()
localityWaits = myLocalityLevels.map(getLocalityWait)
currentLocalityIndex = getLocalityIndex(previousLocalityLevel)
}
def executorAdded() {
recomputeLocality()
}
}
private[spark] object TaskSetManager {
// The user will be warned if any stages contain a task that has a serialized size greater than
// this.
val TASK_SIZE_TO_WARN_KB = 100
}
| tejasapatil/spark | core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala | Scala | apache-2.0 | 44,957 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.