code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Copyright (c) 2015 Daniel Higuero.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.spark.examples.streaming
import java.util.Calendar
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Seconds, StreamingContext}
/**
* Code here the solution for the proposed exercises.
*/
object ejercicio2 {
/**
* Field separator.
*/
val Separator = ";";
/**
* Threshold that determines when a number of failed auth entries is considered an attack.
*/
val ThresholdAuth = 1;
/**
* Threshold that determines when a number of failed web access entries is considered an attack.
*/
val ThresholdWeb = 1;
def main(args: Array[String]): Unit = {
//Suppress Spark output
Logger.getLogger("org").setLevel(Level.ERROR)
Logger.getLogger("akka").setLevel(Level.ERROR)
//Define the Spark configuration. In this case we are using the local mode
val sparkConf = new SparkConf().setMaster("local[4]").setAppName("ReadingLogs_exercise2")
//Define a SparkStreamingContext with a batch interval of 10 seconds
val ssc = new StreamingContext(sparkConf, Seconds(5))
// Using the web request event source, count the number of events in a sliding window of length 10 seconds
// and a sliding
// length of 5 seconds.
val autorizacion = ssc.socketTextStream("localhost", 10001, StorageLevel.MEMORY_AND_DISK_SER).window(Seconds(10),Seconds(5))
val numberEvents_authRDD = autorizacion.map(x => {
val arr = x.split(';')
new AuthEvent(arr(0), arr(1), arr(2), arr(3))
}).foreachRDD(x=>x.cache())
//Obtengo la fecha del sistema para imprimirla
val today = Calendar.getInstance().getTime()
println()
println("Ejercicio 2: ")
println("**********************************************")
println()
println(today)
val events = ssc.socketTextStream("localhost", 10002, StorageLevel.MEMORY_AND_DISK_SER).window(Seconds(10),Seconds(5))
//Filter out empty lines and print the count
val numberEventsRDD = events.map(x => {
val arr = x.split(';')
new WebEvent(arr(0), arr(1), arr(2), arr(3), arr(4))
}).foreachRDD(x=>println("Number of web request in the last 10 seconds with slide: " + x.count()))
//Start the streaming context
ssc.start()
ssc.awaitTermination()
}
}
| anazamarron/spark-streaming-exercises | src/main/scala/org/spark/examples/streaming/ejercicio2.scala | Scala | apache-2.0 | 2,931 |
package edu.neu.coe.csye._7200.parse
import org.scalatest.{FlatSpec, Matchers}
/**
* @author scalaprof
*/
class ArithSpec extends FlatSpec with Matchers {
"1" should "be 1.0" in {
val parser = new Arith
val r = parser.parseAll(parser.expr, "1")
r should matchPattern { case parser.Success(_, _) => }
r.get.eval shouldBe 1.0
}
"(1)" should "be 1.0" in {
val parser = new Arith
val r = parser.parseAll(parser.expr, "1")
r should matchPattern { case parser.Success(_, _) => }
r.get.eval shouldBe 1.0
}
"(1+1)" should "be 2.0" in {
val parser = new Arith
val r = parser.parseAll(parser.expr, "1+1")
r should matchPattern { case parser.Success(_, _) => }
r.get.eval shouldBe 2.0
}
"(1*2+1)" should "be 3.0" in {
val parser = new Arith
val r = parser.parseAll(parser.expr, "1*2+1")
r should matchPattern { case parser.Success(_, _) => }
r.get.eval shouldBe 3.0
}
"(1*2+1-1.5)" should "be 1.5" in {
val parser = new Arith
val r = parser.parseAll(parser.expr, "1*2+1-1.5")
r should matchPattern { case parser.Success(_, _) => }
r.get.eval shouldBe 1.5
}
"(1*2+1-3/2)" should "be 1.5" in {
val parser = new Arith
val r = parser.parseAll(parser.expr, "1*2+1-3/2")
r should matchPattern { case parser.Success(_, _) => }
r.get.eval shouldBe 1.5
}
"(1*2+1-pi/2)" should "fail" in {
val parser = new Arith
val r = parser.parseAll(parser.expr, "1*2+1-pi/2")
r should matchPattern { case parser.Failure("factor", _) => }
}
"(1?2)" should "fail" in {
val parser = new Arith
val r = parser.parseAll(parser.expr, "(1?2)")
r should matchPattern { case parser.Failure("`)' expected but `?' found", _) => }
}
"(" should "fail" in {
val parser = new Arith
val r = parser.parseAll(parser.expr, "(")
r should matchPattern { case parser.Failure("factor", _) => }
}
"1+2=2" should "fail" in {
val parser = new Arith
val r = parser.parseAll(parser.expr, "1+2=2")
r should matchPattern { case parser.Failure("expr", _) => }
}
}
| rchillyard/Scalaprof | FunctionalProgramming/src/test/scala/edu/neu/coe/csye/_7200/parse/ArithSpec.scala | Scala | gpl-2.0 | 2,087 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.data.format
import java.sql.Timestamp
import java.time._
import java.time.format.DateTimeFormatter
import java.util.UUID
import play.api.data._
import annotation.implicitNotFound
/**
* Handles field binding and unbinding.
*/
@implicitNotFound(
msg = "Cannot find Formatter type class for ${T}. Perhaps you will need to import play.api.data.format.Formats._ "
)
trait Formatter[T] {
/**
* The expected format of `Any`.
*/
val format: Option[(String, Seq[Any])] = None
/**
* Binds this field, i.e. constructs a concrete value from submitted data.
*
* @param key the field key
* @param data the submitted data
* @return Either a concrete value of type T or a set of error if the binding failed.
*/
def bind(key: String, data: Map[String, String]): Either[Seq[FormError], T]
/**
* Unbinds this field, i.e. transforms a concrete value to plain data.
*
* @param key the field ke
* @param value the value to unbind
* @return either the plain data or a set of errors if unbinding failed
*/
def unbind(key: String, value: T): Map[String, String]
}
/** This object defines several default formatters. */
object Formats {
/**
* Formatter for ignored values.
*
* @param value As we ignore this parameter in binding/unbinding we have to provide a default value.
*/
def ignoredFormat[A](value: A): Formatter[A] = new Formatter[A] {
def bind(key: String, data: Map[String, String]) = Right(value)
def unbind(key: String, value: A) = Map.empty
}
/**
* Default formatter for the `String` type.
*/
implicit def stringFormat: Formatter[String] = new Formatter[String] {
def bind(key: String, data: Map[String, String]) = data.get(key).toRight(Seq(FormError(key, "error.required", Nil)))
def unbind(key: String, value: String) = Map(key -> value)
}
/**
* Default formatter for the `Char` type.
*/
implicit def charFormat: Formatter[Char] = new Formatter[Char] {
def bind(key: String, data: Map[String, String]) =
data
.get(key)
.filter(s => s.length == 1 && s != " ")
.map(s => Right(s.charAt(0)))
.getOrElse(
Left(Seq(FormError(key, "error.required", Nil)))
)
def unbind(key: String, value: Char) = Map(key -> value.toString)
}
/**
* Helper for formatters binders
* @param parse Function parsing a String value into a T value, throwing an exception in case of failure
* @param errArgs Error to set in case of parsing failure
* @param key Key name of the field to parse
* @param data Field data
*/
def parsing[T](parse: String => T, errMsg: String, errArgs: Seq[Any])(
key: String,
data: Map[String, String]
): Either[Seq[FormError], T] = {
stringFormat.bind(key, data).flatMap { s =>
scala.util.control.Exception
.allCatch[T]
.either(parse(s))
.left
.map(e => Seq(FormError(key, errMsg, errArgs)))
}
}
private def numberFormatter[T](convert: String => T, real: Boolean = false): Formatter[T] = {
val (formatString, errorString) = if (real) ("format.real", "error.real") else ("format.numeric", "error.number")
new Formatter[T] {
override val format = Some(formatString -> Nil)
def bind(key: String, data: Map[String, String]) =
parsing(convert, errorString, Nil)(key, data)
def unbind(key: String, value: T) = Map(key -> value.toString)
}
}
/**
* Default formatter for the `Long` type.
*/
implicit def longFormat: Formatter[Long] = numberFormatter(_.toLong)
/**
* Default formatter for the `Int` type.
*/
implicit def intFormat: Formatter[Int] = numberFormatter(_.toInt)
/**
* Default formatter for the `Short` type.
*/
implicit def shortFormat: Formatter[Short] = numberFormatter(_.toShort)
/**
* Default formatter for the `Byte` type.
*/
implicit def byteFormat: Formatter[Byte] = numberFormatter(_.toByte)
/**
* Default formatter for the `Float` type.
*/
implicit def floatFormat: Formatter[Float] = numberFormatter(_.toFloat, real = true)
/**
* Default formatter for the `Double` type.
*/
implicit def doubleFormat: Formatter[Double] = numberFormatter(_.toDouble, real = true)
/**
* Default formatter for the `BigDecimal` type.
*/
def bigDecimalFormat(precision: Option[(Int, Int)]): Formatter[BigDecimal] = new Formatter[BigDecimal] {
override val format = Some(("format.real", Nil))
def bind(key: String, data: Map[String, String]) = {
Formats.stringFormat.bind(key, data).flatMap { s =>
scala.util.control.Exception
.allCatch[BigDecimal]
.either {
val bd = BigDecimal(s)
precision
.map({
case (p, s) =>
if (bd.precision - bd.scale > p - s) {
throw new java.lang.ArithmeticException("Invalid precision")
}
bd.setScale(s)
})
.getOrElse(bd)
}
.left
.map { e =>
Seq(
precision match {
case Some((p, s)) => FormError(key, "error.real.precision", Seq(p, s))
case None => FormError(key, "error.real", Nil)
}
)
}
}
}
def unbind(key: String, value: BigDecimal) =
Map(
key -> precision
.map({ p =>
value.setScale(p._2)
})
.getOrElse(value)
.toString
)
}
/**
* Default formatter for the `BigDecimal` type with no precision
*/
implicit val bigDecimalFormat: Formatter[BigDecimal] = bigDecimalFormat(None)
/**
* Default formatter for the `Boolean` type.
*/
implicit def booleanFormat: Formatter[Boolean] = new Formatter[Boolean] {
override val format = Some(("format.boolean", Nil))
def bind(key: String, data: Map[String, String]) = {
Right(data.getOrElse(key, "false")).flatMap {
case "true" => Right(true)
case "false" => Right(false)
case _ => Left(Seq(FormError(key, "error.boolean", Nil)))
}
}
def unbind(key: String, value: Boolean) = Map(key -> value.toString)
}
import java.util.Date
import java.util.TimeZone
/**
* Formatter for the `java.util.Date` type.
*
* @param pattern a date pattern, as specified in `java.time.format.DateTimeFormatter`.
* @param timeZone the `java.util.TimeZone` to use for parsing and formatting
*/
def dateFormat(pattern: String, timeZone: TimeZone = TimeZone.getDefault): Formatter[Date] = new Formatter[Date] {
val javaTimeZone = timeZone.toZoneId
val formatter = DateTimeFormatter.ofPattern(pattern)
def dateParse(data: String) = {
val instant = PlayDate.parse(data, formatter).toZonedDateTime(ZoneOffset.UTC)
Date.from(instant.withZoneSameLocal(javaTimeZone).toInstant)
}
override val format = Some(("format.date", Seq(pattern)))
def bind(key: String, data: Map[String, String]) = parsing(dateParse, "error.date", Nil)(key, data)
def unbind(key: String, value: Date) = Map(key -> formatter.format(value.toInstant.atZone(javaTimeZone)))
}
/**
* Default formatter for the `java.util.Date` type with pattern `yyyy-MM-dd`.
*/
implicit val dateFormat: Formatter[Date] = dateFormat("yyyy-MM-dd")
/**
* Formatter for the `java.sql.Date` type.
*
* @param pattern a date pattern as specified in `java.time.DateTimeFormatter`.
*/
def sqlDateFormat(pattern: String): Formatter[java.sql.Date] = new Formatter[java.sql.Date] {
private val dateFormatter: Formatter[LocalDate] = localDateFormat(pattern)
override val format = Some(("format.date", Seq(pattern)))
def bind(key: String, data: Map[String, String]) = {
dateFormatter.bind(key, data).map(d => java.sql.Date.valueOf(d))
}
def unbind(key: String, value: java.sql.Date) = dateFormatter.unbind(key, value.toLocalDate)
}
/**
* Default formatter for `java.sql.Date` type with pattern `yyyy-MM-dd`.
*/
implicit val sqlDateFormat: Formatter[java.sql.Date] = sqlDateFormat("yyyy-MM-dd")
/**
* Formatter for the `java.sql.Timestamp` type.
*
* @param pattern a date pattern as specified in `java.time.DateTimeFormatter`.
* @param timeZone the `java.util.TimeZone` to use for parsing and formatting
*/
def sqlTimestampFormat(pattern: String, timeZone: TimeZone = TimeZone.getDefault): Formatter[java.sql.Timestamp] =
new Formatter[java.sql.Timestamp] {
import java.time.LocalDateTime
private val formatter = java.time.format.DateTimeFormatter.ofPattern(pattern).withZone(timeZone.toZoneId)
private def timestampParse(data: String) = java.sql.Timestamp.valueOf(LocalDateTime.parse(data, formatter))
override val format = Some(("format.timestamp", Seq(pattern)))
override def bind(key: String, data: Map[String, String]): Either[Seq[FormError], Timestamp] =
parsing(timestampParse, "error.timestamp", Nil)(key, data)
override def unbind(key: String, value: java.sql.Timestamp) = Map(key -> value.toLocalDateTime.format(formatter))
}
/**
* Default formatter for `java.sql.Timestamp` type with pattern `yyyy-MM-dd HH:mm:ss`.
*/
implicit val sqlTimestampFormat: Formatter[java.sql.Timestamp] = sqlTimestampFormat("yyyy-MM-dd HH:mm:ss")
/**
* Formatter for the `java.time.LocalDate` type.
*
* @param pattern a date pattern as specified in `java.time.format.DateTimeFormatter`.
*/
def localDateFormat(pattern: String): Formatter[java.time.LocalDate] = new Formatter[java.time.LocalDate] {
import java.time.LocalDate
val formatter = java.time.format.DateTimeFormatter.ofPattern(pattern)
def localDateParse(data: String) = LocalDate.parse(data, formatter)
override val format = Some(("format.date", Seq(pattern)))
def bind(key: String, data: Map[String, String]) = parsing(localDateParse, "error.date", Nil)(key, data)
def unbind(key: String, value: LocalDate) = Map(key -> value.format(formatter))
}
/**
* Default formatter for `java.time.LocalDate` type with pattern `yyyy-MM-dd`.
*/
implicit val localDateFormat: Formatter[java.time.LocalDate] = localDateFormat("yyyy-MM-dd")
/**
* Formatter for the `java.time.LocalDateTime` type.
*
* @param pattern a date pattern as specified in `java.time.format.DateTimeFormatter`.
* @param zoneId the `java.time.ZoneId` to use for parsing and formatting
*/
def localDateTimeFormat(
pattern: String,
zoneId: java.time.ZoneId = java.time.ZoneId.systemDefault()
): Formatter[java.time.LocalDateTime] = new Formatter[java.time.LocalDateTime] {
import java.time.LocalDateTime
val formatter = java.time.format.DateTimeFormatter.ofPattern(pattern).withZone(zoneId)
def localDateTimeParse(data: String) = LocalDateTime.parse(data, formatter)
override val format = Some(("format.localDateTime", Seq(pattern)))
def bind(key: String, data: Map[String, String]) =
parsing(localDateTimeParse, "error.localDateTime", Nil)(key, data)
def unbind(key: String, value: LocalDateTime) = Map(key -> value.format(formatter))
}
/**
* Default formatter for `java.time.LocalDateTime` type with pattern `yyyy-MM-dd`.
*/
implicit val localDateTimeFormat: Formatter[java.time.LocalDateTime] = localDateTimeFormat("yyyy-MM-dd HH:mm:ss")
/**
* Formatter for the `java.time.LocalTime` type.
*
* @param pattern a date pattern as specified in `java.time.format.DateTimeFormatter`.
*/
def localTimeFormat(pattern: String): Formatter[java.time.LocalTime] = new Formatter[java.time.LocalTime] {
import java.time.LocalTime
val formatter = java.time.format.DateTimeFormatter.ofPattern(pattern)
def localTimeParse(data: String) = LocalTime.parse(data, formatter)
override val format = Some(("format.localTime", Seq(pattern)))
def bind(key: String, data: Map[String, String]) = parsing(localTimeParse, "error.localTime", Nil)(key, data)
def unbind(key: String, value: LocalTime) = Map(key -> value.format(formatter))
}
/**
* Default formatter for `java.time.LocalTime` type with pattern `HH:mm:ss`.
*/
implicit val localTimeFormat: Formatter[java.time.LocalTime] = localTimeFormat("HH:mm:ss")
/**
* Default formatter for the `java.util.UUID` type.
*/
implicit def uuidFormat: Formatter[UUID] = new Formatter[UUID] {
override val format = Some(("format.uuid", Nil))
override def bind(key: String, data: Map[String, String]) = parsing(UUID.fromString, "error.uuid", Nil)(key, data)
override def unbind(key: String, value: UUID) = Map(key -> value.toString)
}
}
| mkurz/playframework | core/play/src/main/scala/play/api/data/format/Format.scala | Scala | apache-2.0 | 12,948 |
package com.dys.chatwork4s
import com.dys.chatwork4s.beans._
import com.dys.chatwork4s.beans.rooms.RoomInfo
import com.dys.chatwork4s.beans.users.Member
import com.dys.chatwork4s.http.HttpMethod
import com.dys.chatwork4s.http.parameters._
import com.dys.chatwork4s.utils.Sync._
import scala.concurrent.ExecutionContext.global
import scala.concurrent.duration.Duration
/**
* チャットルームにひもづくメッセージ、タスク、ファイル、概要、メンバー情報などにアクセスできます。
*
* @param roomId roomId
* @param httpMethod httpMethod
*/
class ChatWorkRoom(val roomId: Long, httpMethod: HttpMethod) {
private val room = new ChatWorkRoomAsync(roomId, httpMethod)
implicit private val atMost: Duration = Duration.Inf
implicit private val ec = global
/**
* チャットの名前、アイコン、種類(my/direct/group)を取得
*
* @return
*/
def roomInfo(): RoomInfo = await[RoomInfo](room.roomInfo)
/**
* チャットの名前、アイコンをアップデート
*
* @param updateRoomInfo parameters
* @return
*/
def updateRoomInfo(updateRoomInfo: UpdateRoomInfo): RoomId = await[RoomId](room.updateRoomInfo(updateRoomInfo))
/**
* グループチャットを退席/削除する
*
* @param deleteRoom parameters
* @return
*/
def deleteRoom(deleteRoom: DeleteRoom): Unit = await[Unit](room.deleteRoom(deleteRoom))
/**
* チャットのメンバー一覧を取得
*
* @return
*/
def members(): Seq[Member] = await[Seq[Member]](room.members())
/**
* チャットのメンバーを一括変更
*
* @param updateMembers parameters
* @return
*/
def updateMembers(updateMembers: UpdateMembers): RoomMembers = await[RoomMembers](room.updateMembers(updateMembers))
/**
* チャットのメッセージ一覧を取得。パラメータ未指定だと前回取得分からの差分のみを返します。(最大100件まで取得)
*
* @param getMessage parameters
* @return
*/
def messages(getMessage: GetMessage = GetMessage.unreadOnly): Seq[Message] = await[Seq[Message]](room.messages(getMessage))
/**
* チャットに新しいメッセージを追加
*
* @param postMessage parameters
* @return
*/
def postMessage(postMessage: PostMessage): MessageId = await[MessageId](room.postMessage(postMessage))
/**
* メッセージ情報を取得
*
* @param messageId messageId
* @return
*/
def message(messageId: String): Message = await[Message](room.message(messageId))
/**
* チャットのタスク一覧を取得 (※100件まで取得可能。今後、より多くのデータを取得する為のページネーションの仕組みを提供予定)
*
* @param getTask parameters
* @return
*/
def tasks(getTask: GetTask = GetTask.empty): Seq[Task] = await[Seq[Task]](room.tasks(getTask))
/**
* チャットに新しいタスクを追加
*
* @param postTask parameters
* @return
*/
def postTask(postTask: PostTask): TaskIds = await[TaskIds](room.postTask(postTask))
/**
* タスク情報を取得
*
* @param taskId taskId
* @return
*/
def task(taskId: Long): Task = await[Task](room.task(taskId))
/**
* チャットのファイル一覧を取得 (※100件まで取得可能。今後、より多くのデータを取得する為のページネーションの仕組みを提供予定)
*
* @param getFiles parameters
* @return
*/
def files(getFiles: GetFiles = GetFiles.empty): Seq[File] = await[Seq[File]](room.files(getFiles))
/**
* ファイル情報を取得
*
* @param fileId fileId
* @param getFile parameters
* @return
*/
def file(fileId: Long, getFile: GetFile = GetFile.empty): File = await[File](room.file(fileId, getFile))
}
| kado-yasuyuki/chatwork4s | src/main/scala/com/dys/chatwork4s/ChatWorkRoom.scala | Scala | apache-2.0 | 3,913 |
package sbt.complete
import org.specs2.mutable.Specification
import org.specs2.specification.Scope
class FixedSetExamplesTest extends Specification {
"adding a prefix" should {
"produce a smaller set of examples with the prefix removed" in new examples {
fixedSetExamples.withAddedPrefix("f")() must containTheSameElementsAs(List("oo", "ool", "u"))
fixedSetExamples.withAddedPrefix("fo")() must containTheSameElementsAs(List("o", "ol"))
fixedSetExamples.withAddedPrefix("b")() must containTheSameElementsAs(List("ar"))
}
}
"without a prefix" should {
"produce the original set" in new examples {
fixedSetExamples() mustEqual exampleSet
}
}
trait examples extends Scope {
val exampleSet = List("foo", "bar", "fool", "fu")
val fixedSetExamples = FixedSetExamples(exampleSet)
}
}
| niktrop/sbt | util/complete/src/test/scala/sbt/complete/FixedSetExamplesTest.scala | Scala | bsd-3-clause | 842 |
package com.cleawing.finagle
import com.twitter.util.{Future => TFuture}
import scala.concurrent.{Promise, Future}
import scala.language.implicitConversions
import scala.util.{Try, Success, Failure}
object Utils {
object Implicits {
implicit def twitterFuture2ScalaFuture[T](future: TFuture[T]) : Future[T] = {
val promise = Promise[T]()
future
.onSuccess(promise.success(_))
.onFailure(promise.failure(_))
promise.future
}
}
} | Cleawing/united | finagle-services/src/main/scala/com/cleawing/finagle/Utils.scala | Scala | apache-2.0 | 475 |
/*
* Copyright (c) 2013 Aviat Networks.
* This file is part of DocReg+Web. Please refer to the NOTICE.txt file for license details.
*/
package vvv.docreg.util
import java.util.Date
import vvv.docreg.util.StringUtil._
class Ago(private val date: Date) {
def now = new Date
override def toString = {
val mins = (now.getTime - date.getTime) / (1000 * 60)
if (mins < 60) pluralise(mins, "min") else pluralise (mins / 60, "hour")
}
}
| scott-abernethy/docreg-web | src/main/scala/vvv/docreg/util/Ago.scala | Scala | gpl-3.0 | 449 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.avocado.preprocessing
import java.io.File
import org.apache.commons.configuration.SubnodeConfiguration
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.models.SnpTable
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.read.AlignmentRecordContext._
import org.bdgenomics.formats.avro.AlignmentRecord
object RecalibrateBaseQualities extends PreprocessingStage {
val stageName = "recalibrateBaseQualities"
def apply(rdd: RDD[AlignmentRecord], config: SubnodeConfiguration): RDD[AlignmentRecord] = {
val sc = rdd.sparkContext
// check for snp table
val snpTable = if (config.containsKey("snpTable")) {
sc.broadcast(SnpTable(new File(config.getString("snpTable"))))
} else {
sc.broadcast(SnpTable())
}
// run bqsr with snp table loaded
rdd.adamBQSR(snpTable)
}
}
| tdanford/avocado | avocado-core/src/main/scala/org/bdgenomics/avocado/preprocessing/RecalibrateBaseQualities.scala | Scala | apache-2.0 | 1,664 |
/*
* Created on 2012/02/20
* Copyright (c) 2010-2012, Wei-ju Wu.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Wei-ju Wu nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package org.zmpp.glulx
import org.scalatest.FlatSpec
import org.scalatest.matchers.ShouldMatchers
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.BeforeAndAfterEach
import org.zmpp.base._
/**
* A test specification for the arithmetic and logical operations.
*/
@RunWith(classOf[JUnitRunner])
class GlulxAluOpsSpec extends FlatSpec with ShouldMatchers with BeforeAndAfterEach {
// Sets up a getstringtbl instruction
val DummyMem = Array[Byte](0x47, 0x6c, 0x75, 0x6c,
0x00, 0x03, 0x01, 0x01, // Version
0x00, 0x00, 0x00, 0x24, // RAMSTART
0x00, 0x00, 0x00, 0x38, // EXTSTART
0x00, 0x00, 0x00, 0x38, // ENDMEM
0x00, 0x00, 0x00, 0xff.asInstanceOf[Byte], // STACKSIZE
0x00, 0x00, 0x00, 0x24, // STARTFUNC
0x04, 0x07, 0x01, 0x01, // Decoding table
0x01, 0x02, 0x03, 0x04, // Checksum
// 3 locals of size byte
0xc0.asInstanceOf[Byte], 0x01, 0x03, 0x00, // 0x24
0x00, 0x00, 0x00, 0x00, // 0x28
0x00, 0x00, 0x00, 0x00, // 0x2c
0x00, 0x00, 0x00, 0x00, // 0x30
0x00, 0x00, 0x00, 0x00 // 0x34
)
var vm = new GlulxVM()
override def beforeEach {
// clear the clearable area
for (i <- 0x28 until 0x38) DummyMem(i) = 0
}
"GlulxVM" should "perform an add" in {
vm.init(DummyMem, null)
DummyMem(0x29) = 0x10 // add
DummyMem(0x2a) = 0x11 // address mode 1 (const byte) * 2
DummyMem(0x2b) = 0x08 // address mode 8 (stack)
DummyMem(0x2c) = 0x05
DummyMem(0x2d) = 0xff.asInstanceOf[Byte]
DummyMem(0x2e) = 0x81.asInstanceOf[Byte] // quit instruction
DummyMem(0x2f) = 0x20
vm.executeTurn
vm.popInt should be (4)
}
it should "perform an sub" in {
vm.init(DummyMem, null)
DummyMem(0x29) = 0x11 // sub
DummyMem(0x2a) = 0x11 // address mode 1 (const byte) * 2
DummyMem(0x2b) = 0x08 // address mode 8 (stack)
DummyMem(0x2c) = 15
DummyMem(0x2d) = 3
DummyMem(0x2e) = 0x81.asInstanceOf[Byte] // quit instruction
DummyMem(0x2f) = 0x20
vm.executeTurn
vm.popInt should be (12)
}
it should "perform mul" in {
vm.init(DummyMem, null)
DummyMem(0x29) = 0x12
DummyMem(0x2a) = 0x11 // address mode 1 (const byte) * 2
DummyMem(0x2b) = 0x08 // address mode 8 (stack)
DummyMem(0x2c) = 15
DummyMem(0x2d) = -3
DummyMem(0x2e) = 0x81.asInstanceOf[Byte] // quit instruction
DummyMem(0x2f) = 0x20
vm.executeTurn
vm.popInt should be (-45)
}
it should "perform div" in {
vm.init(DummyMem, null)
DummyMem(0x29) = 0x13
DummyMem(0x2a) = 0x11 // address mode 1 (const byte) * 2
DummyMem(0x2b) = 0x08 // address mode 8 (stack)
DummyMem(0x2c) = 13
DummyMem(0x2d) = 4
DummyMem(0x2e) = 0x81.asInstanceOf[Byte] // quit instruction
DummyMem(0x2f) = 0x20
vm.executeTurn
vm.popInt should be (3)
}
it should "perform mod" in {
vm.init(DummyMem, null)
DummyMem(0x29) = 0x14
DummyMem(0x2a) = 0x11 // address mode 1 (const byte) * 2
DummyMem(0x2b) = 0x08 // address mode 8 (stack)
DummyMem(0x2c) = 13
DummyMem(0x2d) = 4
DummyMem(0x2e) = 0x81.asInstanceOf[Byte] // quit instruction
DummyMem(0x2f) = 0x20
vm.executeTurn
vm.popInt should be (1)
}
it should "perform neg" in {
vm.init(DummyMem, null)
DummyMem(0x29) = 0x15
DummyMem(0x2a) = 0x81.asInstanceOf[Byte] // address mode 1 + address mode 8
DummyMem(0x2b) = 4
DummyMem(0x2c) = 0x81.asInstanceOf[Byte] // quit instruction
DummyMem(0x2d) = 0x20
vm.executeTurn
vm.popInt should be (-4)
}
it should "perform bitand" in {
vm.init(DummyMem, null)
DummyMem(0x29) = 0x18
DummyMem(0x2a) = 0x11 // address mode 1 (const byte) * 2
DummyMem(0x2b) = 0x08 // address mode 8 (stack)
DummyMem(0x2c) = 0xff.asInstanceOf[Byte]
DummyMem(0x2d) = 0x23
DummyMem(0x2e) = 0x81.asInstanceOf[Byte] // quit instruction
DummyMem(0x2f) = 0x20
vm.executeTurn
vm.popInt should be (0xffffffff & 0x23)
}
it should "perform bitor" in {
vm.init(DummyMem, null)
DummyMem(0x29) = 0x19
DummyMem(0x2a) = 0x11 // address mode 1 (const byte) * 2
DummyMem(0x2b) = 0x08 // address mode 8 (stack)
DummyMem(0x2c) = 0xff.asInstanceOf[Byte]
DummyMem(0x2d) = 0x23
DummyMem(0x2e) = 0x81.asInstanceOf[Byte] // quit instruction
DummyMem(0x2f) = 0x20
vm.executeTurn
vm.popInt should be (0xffffffff | 0x23)
}
it should "perform bitxor" in {
vm.init(DummyMem, null)
DummyMem(0x29) = 0x1a
DummyMem(0x2a) = 0x11 // address mode 1 (const byte) * 2
DummyMem(0x2b) = 0x08 // address mode 8 (stack)
DummyMem(0x2c) = 0x03
DummyMem(0x2d) = 0x01
DummyMem(0x2e) = 0x81.asInstanceOf[Byte] // quit instruction
DummyMem(0x2f) = 0x20
vm.executeTurn
vm.popInt should be (0x03 ^ 0x01)
}
it should "perform bitnot" in {
vm.init(DummyMem, null)
DummyMem(0x29) = 0x1b
DummyMem(0x2a) = 0x81.asInstanceOf[Byte] // address mode 1 + address mode 8
DummyMem(0x2b) = 0x0f.asInstanceOf[Byte]
DummyMem(0x2c) = 0x81.asInstanceOf[Byte] // quit instruction
DummyMem(0x2d) = 0x20
vm.executeTurn
vm.popInt should be (~0x0f)
}
}
| logicmoo/zmpp2 | zmpp-glulx/src/test/scala/org/zmpp/glulx/GlulxAluOpsTest.scala | Scala | bsd-3-clause | 7,153 |
package org.novetta.zoo.types
import org.json4s._
import org.json4s.jackson.JsonMethods._
case class ZooWork(primaryURI: String, secondaryURI: String, filename: String, tasks: Map[String, List[String]], attempts: Int) {
/**
* Helper function to manage download callbacks. This attempts a download, moves to the fallback URI if the first one fails
* and reports the overall result to the originating actor.
*
* @return Unit. Returns are via callback messages.
* @param sender: a String: The ActorRef that we will reply to.
* @param id: a Long: The ID of the message in question.
* @param filename: a String: The name of the file we are downloading.
* @param svc1: a Req: The first URI to try.
* @param svc2: a Req: The fallback URI.
* @param attempts: an Int: Number of times this download has been attempted.
*/
def +(that: WorkFailure): ZooWork = {
val newtasks = this.tasks + (that.WorkType -> that.Arguments)
new ZooWork(
primaryURI = this.primaryURI,
secondaryURI = this.secondaryURI,
filename = this.filename,
tasks = newtasks,
attempts = this.attempts
)
}
}
object Parsers {
type Parser[T] = (Array[Byte] => T)
implicit val formats = DefaultFormats
/**
def jsonToChild:Parser[Child] = {
json =>
val result: Child = parse(new String(json)).extract[Child]
result
}
**/
/**
* Helper function to manage download callbacks. This attempts a download, moves to the fallback URI if the first one fails
* and reports the overall result to the originating actor.
*
* @return Unit. Returns are via callback messages.
* @param sender: a String: The ActorRef that we will reply to.
* @param id: a Long: The ID of the message in question.
* @param filename: a String: The name of the file we are downloading.
* @param svc1: a Req: The first URI to try.
* @param svc2: a Req: The fallback URI.
* @param attempts: an Int: Number of times this download has been attempted.
*/
def parseJ[T: Manifest](data: Array[Byte]): T = {
val result: T = parse(new String(data)).extract[T]
result
}
}
| Novetta/totem | src/main/scala/org/novetta/zoo/types/RMQM.scala | Scala | bsd-3-clause | 2,138 |
/*
* Copyright 2011-2019 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.compiler
package operator
package core
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner
import java.io.{ DataInput, DataOutput }
import scala.collection.JavaConversions._
import org.apache.hadoop.io.Writable
import org.apache.spark.broadcast.Broadcast
import com.asakusafw.lang.compiler.model.description.ClassDescription
import com.asakusafw.lang.compiler.model.graph.CoreOperator
import com.asakusafw.lang.compiler.model.graph.CoreOperator.CoreOperatorKind
import com.asakusafw.runtime.model.DataModel
import com.asakusafw.runtime.value.{ DoubleOption, IntOption, LongOption }
import com.asakusafw.spark.compiler.spi.{ OperatorCompiler, OperatorType }
import com.asakusafw.spark.runtime.fragment.{ Fragment, GenericOutputFragment }
import com.asakusafw.spark.runtime.graph.BroadcastId
import com.asakusafw.spark.tools.asm._
@RunWith(classOf[JUnitRunner])
class ProjectionOperatorsCompilerSpecTest extends ProjectionOperatorsCompilerSpec
class ProjectionOperatorsCompilerSpec extends FlatSpec with UsingCompilerContext {
import ProjectionOperatorsCompilerSpec._
behavior of classOf[ProjectionOperatorsCompiler].getSimpleName
it should "compile Project operator" in {
import Project._
val operator = CoreOperator.builder(CoreOperatorKind.PROJECT)
.input("input", ClassDescription.of(classOf[Input]))
.output("output", ClassDescription.of(classOf[Output]))
.build()
implicit val context = newOperatorCompilerContext("flowId")
val thisType = OperatorCompiler.compile(operator, OperatorType.ExtractType)
val cls = context.loadClass[Fragment[Input]](thisType.getClassName)
val out = new GenericOutputFragment[Output]()
val fragment = cls
.getConstructor(classOf[Map[BroadcastId, Broadcast[_]]], classOf[Fragment[_]])
.newInstance(Map.empty, out)
val input = new Project.Input()
for (i <- 0 until 10) {
input.i.modify(i)
input.l.modify(i)
fragment.add(input)
}
out.iterator.zipWithIndex.foreach {
case (output, i) =>
assert(output.i.get === i)
}
fragment.reset()
}
it should "compile Extend operator" in {
import Extend._
val operator = CoreOperator.builder(CoreOperatorKind.EXTEND)
.input("input", ClassDescription.of(classOf[Input]))
.output("output", ClassDescription.of(classOf[Output]))
.build()
implicit val context = newOperatorCompilerContext("flowId")
val thisType = OperatorCompiler.compile(operator, OperatorType.ExtractType)
val cls = context.loadClass[Fragment[Input]](thisType.getClassName)
val out = new GenericOutputFragment[Output]()
val fragment = cls
.getConstructor(classOf[Map[BroadcastId, Broadcast[_]]], classOf[Fragment[_]])
.newInstance(Map.empty, out)
val input = new Input()
for (i <- 0 until 10) {
input.i.modify(i)
fragment.add(input)
}
out.iterator.zipWithIndex.foreach {
case (output, i) =>
assert(output.i.get === i)
assert(output.l.isNull)
}
fragment.reset()
}
it should "compile Restructure operator" in {
import Restructure._
val operator = CoreOperator.builder(CoreOperatorKind.RESTRUCTURE)
.input("input", ClassDescription.of(classOf[Input]))
.output("output", ClassDescription.of(classOf[Output]))
.build()
implicit val context = newOperatorCompilerContext("flowId")
val thisType = OperatorCompiler.compile(operator, OperatorType.ExtractType)
val cls = context.loadClass[Fragment[Input]](thisType.getClassName)
val out = new GenericOutputFragment[Output]()
val fragment = cls
.getConstructor(classOf[Map[BroadcastId, Broadcast[_]]], classOf[Fragment[_]])
.newInstance(Map.empty, out)
fragment.reset()
val input = new Input()
for (i <- 0 until 10) {
input.i.modify(i)
fragment.add(input)
}
out.iterator.zipWithIndex.foreach {
case (output, i) =>
assert(output.i.get === i)
assert(output.d.isNull)
}
fragment.reset()
}
}
object ProjectionOperatorsCompilerSpec {
object Project {
class Input extends DataModel[Input] with Writable {
val i: IntOption = new IntOption()
val l: LongOption = new LongOption()
override def reset: Unit = {
i.setNull()
l.setNull()
}
override def copyFrom(other: Input): Unit = {
i.copyFrom(other.i)
l.copyFrom(other.l)
}
override def readFields(in: DataInput): Unit = {
i.readFields(in)
l.readFields(in)
}
override def write(out: DataOutput): Unit = {
i.write(out)
l.write(out)
}
def getIOption: IntOption = i
def getLOption: LongOption = l
}
class Output extends DataModel[Output] with Writable {
val i: IntOption = new IntOption()
override def reset: Unit = {
i.setNull()
}
override def copyFrom(other: Output): Unit = {
i.copyFrom(other.i)
}
override def readFields(in: DataInput): Unit = {
i.readFields(in)
}
override def write(out: DataOutput): Unit = {
i.write(out)
}
def getIOption: IntOption = i
}
}
object Extend {
class Input extends DataModel[Input] with Writable {
val i: IntOption = new IntOption()
override def reset: Unit = {
i.setNull()
}
override def copyFrom(other: Input): Unit = {
i.copyFrom(other.i)
}
override def readFields(in: DataInput): Unit = {
i.readFields(in)
}
override def write(out: DataOutput): Unit = {
i.write(out)
}
def getIOption: IntOption = i
}
class Output extends DataModel[Output] with Writable {
val i: IntOption = new IntOption()
val l: LongOption = new LongOption()
override def reset: Unit = {
i.setNull()
l.setNull()
}
override def copyFrom(other: Output): Unit = {
i.copyFrom(other.i)
l.copyFrom(other.l)
}
override def readFields(in: DataInput): Unit = {
i.readFields(in)
l.readFields(in)
}
override def write(out: DataOutput): Unit = {
i.write(out)
l.write(out)
}
def getIOption: IntOption = i
def getLOption: LongOption = l
}
}
object Restructure {
class Input extends DataModel[Input] with Writable {
val i: IntOption = new IntOption()
val l: LongOption = new LongOption()
override def reset: Unit = {
i.setNull()
l.setNull()
}
override def copyFrom(other: Input): Unit = {
i.copyFrom(other.i)
l.copyFrom(other.l)
}
override def readFields(in: DataInput): Unit = {
i.readFields(in)
l.readFields(in)
}
override def write(out: DataOutput): Unit = {
i.write(out)
l.write(out)
}
def getIOption: IntOption = i
def getLOption: LongOption = l
}
class Output extends DataModel[Output] with Writable {
val i: IntOption = new IntOption()
val d: DoubleOption = new DoubleOption()
override def reset: Unit = {
i.setNull()
d.setNull()
}
override def copyFrom(other: Output): Unit = {
i.copyFrom(other.i)
d.copyFrom(other.d)
}
override def readFields(in: DataInput): Unit = {
i.readFields(in)
d.readFields(in)
}
override def write(out: DataOutput): Unit = {
i.write(out)
d.write(out)
}
def getIOption: IntOption = i
def getDOption: DoubleOption = d
}
}
}
| ueshin/asakusafw-spark | compiler/src/test/scala/com/asakusafw/spark/compiler/operator/core/ProjectionOperatorsCompilerSpec.scala | Scala | apache-2.0 | 8,355 |
package org.scalaide.extensions.autoedits
import org.junit.runner.RunWith
import org.junit.runners.Suite
@RunWith(classOf[Suite])
@Suite.SuiteClasses(Array(
classOf[ConvertToUnicodeTest],
classOf[SmartSemicolonInsertionTest],
classOf[CloseCurlyBraceTest],
classOf[JumpOverClosingCurlyBraceTest],
classOf[RemoveCurlyBracePairTest],
classOf[CloseParenthesisTest],
classOf[CloseBracketTest],
classOf[CloseAngleBracketTest],
classOf[RemoveParenthesisPairTest],
classOf[CreateMultiplePackageDeclarationsTest],
classOf[ApplyTemplateTest],
classOf[RemoveBracketPairTest],
classOf[RemoveAngleBracketPairTest],
classOf[JumpOverClosingParenthesisTest],
classOf[JumpOverClosingBracketTest],
classOf[JumpOverClosingAngleBracketTest],
classOf[CloseStringTest],
classOf[CloseCharTest],
classOf[SurroundBlockTest]
))
class AutoEditTestSuite
| Kwestor/scala-ide | org.scala-ide.sdt.core.tests/src/org/scalaide/extensions/autoedits/AutoEditTestSuite.scala | Scala | bsd-3-clause | 866 |
package io.findify.sqsmock
import com.amazonaws.services.sqs.AmazonSQSClient
import com.amazonaws.services.sqs.model.CreateQueueRequest
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
import scala.collection.JavaConversions._
/**
* Created by shutty on 3/30/16.
*/
class ReceiveDeleteTest extends FlatSpec with Matchers with SQSStartStop {
"sqs mock" should "receive & delete message" in {
val queue = "http://localhost:8001/123/foo"
val cr = new CreateQueueRequest("foo")
cr.setAttributes(Map("VisibilityTimeout" -> "1"))
client.createQueue(cr)
client.sendMessage(queue, "hello_world")
val received = client.receiveMessage(queue)
assert(received.getMessages.nonEmpty)
assert(client.receiveMessage(queue).getMessages.isEmpty)
client.deleteMessage(queue, received.getMessages.head.getReceiptHandle)
Thread.sleep(2000)
assert(client.receiveMessage(queue).getMessages.isEmpty)
}
}
| findify/sqsmock | src/test/scala/io/findify/sqsmock/ReceiveDeleteTest.scala | Scala | mit | 945 |
package typeclass.instances
import typeclass.Monoid
object string {
implicit val stringMonoid: Monoid[String] = new Monoid[String] {
def empty: String = ""
def combine(x: String, y: String): String = x + y
}
} | julien-truffaut/Typeclass | answer/src/main/scala/typeclass/instances/String.scala | Scala | mit | 225 |
package com.mogproject.mogami.core.io
/**
*
*/
trait IOFactoryLike {
final protected[io] def toLines(s: String, normalizeString: Seq[String] => Lines): Lines = normalizeString(s.split("\\n").toIndexedSeq)
final protected[io] def toNonEmptyLines(lines: Lines): NonEmptyLines =
if (lines.isEmpty) throw new RecordFormatException(0, "Empty input") else NonEmptyLines(lines)
}
| mogproject/mog-core-scala | shared/src/main/scala/com/mogproject/mogami/core/io/IOFactoryLike.scala | Scala | apache-2.0 | 386 |
package org.joda.time.chrono
import org.joda.time.DateTimeFieldType
import org.joda.time.DurationField
import org.joda.time.ReadablePartial
import org.joda.time.field.PreciseDurationDateTimeField
@SerialVersionUID(-4677223814028011723L)
class BasicDayOfMonthDateTimeField(private val iChronology: BasicChronology,
days: DurationField)
extends PreciseDurationDateTimeField(DateTimeFieldType.dayOfMonth(), days) {
def get(instant: Long): Int = iChronology.getDayOfMonth(instant)
def getRangeDurationField(): DurationField = iChronology.months()
override def getMinimumValue(): Int = 1
def getMaximumValue(): Int = iChronology.getDaysInMonthMax
override def getMaximumValue(instant: Long): Int =
iChronology.getDaysInMonthMax(instant)
override def getMaximumValue(partial: ReadablePartial): Int = {
if (partial.isSupported(DateTimeFieldType.monthOfYear())) {
val month = partial.get(DateTimeFieldType.monthOfYear())
if (partial.isSupported(DateTimeFieldType.year())) {
val year = partial.get(DateTimeFieldType.year())
return iChronology.getDaysInYearMonth(year, month)
}
return iChronology.getDaysInMonthMax(month)
}
getMaximumValue
}
override def getMaximumValue(partial: ReadablePartial,
values: Array[Int]): Int = {
val size = partial.size
for (i <- 0 until size
if partial.getFieldType(i) == DateTimeFieldType.monthOfYear()) {
val month = values(i)
for (j <- 0 until size
if partial.getFieldType(j) == DateTimeFieldType.year()) {
val year = values(j)
return iChronology.getDaysInYearMonth(year, month)
}
return iChronology.getDaysInMonthMax(month)
}
getMaximumValue
}
override protected def getMaximumValueForSet(instant: Long,
value: Int): Int = {
iChronology.getDaysInMonthMaxForSet(instant, value)
}
override def isLeap(instant: Long): Boolean = iChronology.isLeapDay(instant)
private def readResolve(): AnyRef = iChronology.dayOfMonth()
}
| mdedetrich/soda-time | shared/src/main/scala/org/joda/time/chrono/BasicDayOfMonthDateTimeField.scala | Scala | bsd-2-clause | 2,135 |
package kafka.metrics
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.junit.Test
import org.scalatest.junit.JUnit3Suite
import java.util.concurrent.TimeUnit
import junit.framework.Assert._
import com.yammer.metrics.core.{MetricsRegistry, Clock}
class KafkaTimerTest extends JUnit3Suite {
@Test
def testKafkaTimer() {
val clock = new ManualClock
val testRegistry = new MetricsRegistry(clock)
val metric = testRegistry.newTimer(this.getClass, "TestTimer")
val timer = new KafkaTimer(metric)
timer.time {
clock.addMillis(1000)
}
assertEquals(1, metric.getCount())
assertTrue((metric.getMax() - 1000).abs <= Double.Epsilon)
assertTrue((metric.getMin() - 1000).abs <= Double.Epsilon)
}
private class ManualClock extends Clock {
private var ticksInNanos = 0L
override def getTick() = {
ticksInNanos
}
override def getTime() = {
TimeUnit.NANOSECONDS.toMillis(ticksInNanos)
}
def addMillis(millis: Long) {
ticksInNanos += TimeUnit.MILLISECONDS.toNanos(millis)
}
}
}
| akosiaris/kafka | core/src/test/scala/unit/kafka/metrics/KafkaTimerTest.scala | Scala | apache-2.0 | 1,831 |
package org.jetbrains.plugins.scala.codeInsight.intentions.caseClauses
import junit.framework.ComparisonFailure
import org.jetbrains.plugins.scala.codeInsight.intention.matcher.CreateCaseClausesIntention
import org.jetbrains.plugins.scala.codeInsight.intentions.ScalaIntentionTestBase
/**
* Nikolay.Tropin
* 22-May-17
*/
class CreateCaseClausesIntentionTest extends ScalaIntentionTestBase {
override def familyName: String = new CreateCaseClausesIntention().getFamilyName
def testSealedTrait(): Unit = {
val text =
"""sealed trait X
|
|class A(s: String) extends X
|
|case class B(s: String) extends X
|
|val x: X = ???
|x match {<caret>}""".stripMargin
val result =
"""sealed trait X
|
|class A(s: String) extends X
|
|case class B(s: String) extends X
|
|val x: X = ???
|x match {<caret>
| case _: A =>
| case B(s) =>
|}
""".stripMargin
doTest(text, result)
}
def testJavaEnum(): Unit = {
val text =
"""
|import java.nio.file.FileVisitResult
|
|val x: FileVisitResult = ???
|x match {<caret>}
""".stripMargin
val result =
"""
|import java.nio.file.FileVisitResult
|
|val x: FileVisitResult = ???
|x match {<caret>
| case FileVisitResult.CONTINUE =>
| case FileVisitResult.TERMINATE =>
| case FileVisitResult.SKIP_SUBTREE =>
| case FileVisitResult.SKIP_SIBLINGS =>
|}
""".stripMargin
doTest(text, result)
}
def testFromScalaPackage(): Unit = {
val text =
"""
|val list: List[String] = ???
|list match {<caret>}
""".stripMargin
val result =
"""
|val list: List[String] = ???
|list match {<caret>
| case Nil =>
| case ::(head, tl) =>
|}
""".stripMargin
try {
doTest(text, result)
} catch {
case c: ComparisonFailure =>
doTest(text, result.replace("head", "hd")) //parameter name depends on scala version
}
}
}
| loskutov/intellij-scala | test/org/jetbrains/plugins/scala/codeInsight/intentions/caseClauses/CreateCaseClausesIntentionTest.scala | Scala | apache-2.0 | 2,176 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution
import monix.execution.annotations.{UnsafeBecauseImpure, UnsafeProtocol}
import monix.execution.atomic.PaddingStrategy
import monix.execution.atomic.PaddingStrategy.NoPadding
import monix.execution.internal.GenericSemaphore.Listener
import monix.execution.internal.GenericSemaphore
import monix.execution.schedulers.TrampolineExecutionContext.immediate
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.control.NonFatal
/** The `AsyncSemaphore` is an asynchronous semaphore implementation that
* limits the parallelism on `Future` execution.
*
* The following example instantiates a semaphore with a
* maximum parallelism of 10:
*
* {{{
* val semaphore = AsyncSemaphore(maxParallelism = 10)
*
* def makeRequest(r: HttpRequest): Future[HttpResponse] = ???
*
* // For such a task no more than 10 requests
* // are allowed to be executed in parallel.
* val future = semaphore.greenLight(() => makeRequest(???))
* }}}
*/
final class AsyncSemaphore private (provisioned: Long, ps: PaddingStrategy)
extends GenericSemaphore[Cancelable](provisioned, ps) {
require(provisioned >= 0, "provisioned >= 0")
import AsyncSemaphore.executionContext
protected def emptyCancelable: Cancelable =
Cancelable.empty
protected def makeCancelable(f: (Listener[Unit]) => Unit, p: Listener[Unit]): Cancelable =
new Cancelable { def cancel() = f(p) }
/** Returns the number of permits currently available. Always non-negative.
*
* The protocol is unsafe, the semaphore is used in concurrent settings
* and thus the value returned isn't stable or reliable. Use with care.
*/
@UnsafeProtocol
@UnsafeBecauseImpure
def available(): Long = unsafeAvailable()
/** Obtains a snapshot of the current count. Can be negative.
*
* Like [[available]] when permits are available but returns the
* number of permits callers are waiting for when there are no permits
* available.
*/
@UnsafeProtocol
@UnsafeBecauseImpure
def count(): Long = unsafeCount()
/** Returns a new future, ensuring that the given source
* acquires an available permit from the semaphore before
* it is executed.
*
* The returned future also takes care of resource handling,
* releasing its permit after being complete.
*
* @param f is a function returning the `Future` instance we
* want to evaluate after we get the permit from the
* semaphore
*/
@UnsafeBecauseImpure
def withPermit[A](f: () => Future[A]): CancelableFuture[A] =
withPermitN(1)(f)
/** Returns a new future, ensuring that the given source
* acquires `n` available permits from the semaphore before
* it is executed.
*
* The returned future also takes care of resource handling,
* releasing its permits after being complete.
*
* @param n is the number of permits required for the given
* function to be executed
*
* @param f is a function returning the `Future` instance we
* want to evaluate after we get the permit from the
* semaphore
*/
@UnsafeBecauseImpure
def withPermitN[A](n: Long)(f: () => Future[A]): CancelableFuture[A] =
acquireN(n).flatMap { _ =>
val result =
try f()
catch { case NonFatal(e) => Future.failed(e) }
FutureUtils.transform[A, A](result, r => { releaseN(n); r })
}
/** Acquires a single permit. Alias for `[[acquireN]](1)`.
*
* @see [[withPermit]], the preferred way to acquire and release
* @see [[acquireN]] for a version that can acquire multiple permits
*/
@UnsafeBecauseImpure
def acquire(): CancelableFuture[Unit] = acquireN(1)
/** Acquires `n` permits.
*
* The returned effect semantically blocks until all requested permits are
* available. Note that acquires are satisfied in strict FIFO order, so given
* an `AsyncSemaphore` with 2 permits available, an `acquireN(3)` will
* always be satisfied before a later call to `acquireN(1)`.
*
* @see [[withPermit]], the preferred way to acquire and release
* @see [[acquire]] for a version acquires a single permit
*
* @param n number of permits to acquire - must be >= 0
*
* @return a future that will complete when the acquisition has succeeded
* or that can be cancelled, removing the listener from the queue
* (to prevent memory leaks in race conditions)
*/
@UnsafeBecauseImpure
def acquireN(n: Long): CancelableFuture[Unit] = {
if (unsafeTryAcquireN(n)) {
CancelableFuture.unit
} else {
val p = Promise[Unit]()
unsafeAcquireN(n, Callback.fromPromise(p)) match {
case Cancelable.empty => CancelableFuture.unit
case c => CancelableFuture(p.future, c)
}
}
}
/** Alias for `[[tryAcquireN]](1)`.
*
* The protocol is unsafe, because with the "try*" methods the user needs a
* firm grasp of what race conditions are and how they manifest and usage of
* such methods can lead to very fragile logic.
*
* @see [[tryAcquireN]] for the version that can acquire multiple permits
* @see [[acquire]] for the version that can wait for acquisition
* @see [[withPermit]] the preferred way to acquire and release
*/
@UnsafeProtocol
@UnsafeBecauseImpure
def tryAcquire(): Boolean = tryAcquireN(1)
/** Acquires `n` permits now and returns `true`, or returns `false`
* immediately. Error if `n < 0`.
*
* The protocol is unsafe, because with the "try*" methods the user needs a
* firm grasp of what race conditions are and how they manifest and usage of
* such methods can lead to very fragile logic.
*
* @see [[tryAcquire]] for the alias that acquires a single permit
* @see [[acquireN]] for the version that can wait for acquisition
* @see [[withPermit]], the preferred way to acquire and release
*
* @param n number of permits to acquire - must be >= 0
*/
@UnsafeProtocol
@UnsafeBecauseImpure
def tryAcquireN(n: Long): Boolean = unsafeTryAcquireN(n)
/** Releases a permit, returning it to the pool.
*
* If there are consumers waiting on permits being available,
* then the first in the queue will be selected and given
* a permit immediately.
*
* @see [[withPermit]], the preferred way to acquire and release
*/
@UnsafeBecauseImpure
def release(): Unit = releaseN(1)
/** Releases `n` permits, potentially unblocking up to `n`
* outstanding acquires.
*
* @see [[withPermit]], the preferred way to acquire and release
*
* @param n number of permits to release - must be >= 0
*/
@UnsafeBecauseImpure
def releaseN(n: Long): Unit = unsafeReleaseN(n)
/** Returns a future that will be complete when the specified
* number of permits are available.
*
* The protocol is unsafe because by the time the returned
* future completes, some other process might have already
* acquired the available permits and thus usage of `awaitAvailable`
* can lead to fragile concurrent logic. Use with care.
*
* Can be useful for termination logic, for example to execute
* a piece of logic once all available permits have been released.
*
* @param n is the number of permits waited on
*/
@UnsafeProtocol
@UnsafeBecauseImpure
def awaitAvailable(n: Long): CancelableFuture[Unit] = {
val p = Promise[Unit]()
unsafeAwaitAvailable(n, Callback.fromPromise(p)) match {
case Cancelable.empty => CancelableFuture.unit
case c => CancelableFuture(p.future, c)
}
}
}
object AsyncSemaphore {
/** Builder for [[AsyncSemaphore]].
*
* @param provisioned is the number of permits initially available
*
* @param ps is an optional padding strategy for avoiding the
* "false sharing problem", a common JVM effect when multiple threads
* read and write in shared variables
*/
def apply(provisioned: Long, ps: PaddingStrategy = NoPadding): AsyncSemaphore =
new AsyncSemaphore(provisioned, ps)
/** Used internally for flatMapping futures. */
private implicit def executionContext: ExecutionContext =
immediate
}
| alexandru/monifu | monix-execution/shared/src/main/scala/monix/execution/AsyncSemaphore.scala | Scala | apache-2.0 | 8,921 |
/*
* Copyright 2009 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.json
import extensions._
import org.specs._
import scala.collection.immutable
class JsonSpec extends Specification {
"Json" should {
"quote strings" in {
"unicode within latin-1" in {
Json.quote("hello\\n\\u009f") mustEqual "\\"hello\\\\n\\\\u009f\\""
}
"unicode outside of latin-1 (the word Tokyo)" in {
Json.quote("\\u6771\\u4eac") mustEqual "\\"\\\\u6771\\\\u4eac\\""
}
"string containing unicode outside of the BMP (using UTF-16 surrogate pairs)" in {
// NOTE: The json.org spec is unclear on how to handle supplementary characters.
val ridiculous = new java.lang.StringBuilder()
ridiculous.appendCodePoint(0xfe03e)
Json.quote(ridiculous.toString) mustEqual "\\"\\\\udbb8\\\\udc3e\\""
}
"xml" in {
Json.quote("<xml>sucks</xml>") mustEqual "\\"<xml>sucks<\\\\/xml>\\""
}
"nested objects" in {
Json.build(Json.build(List(1, 2))).toString mustEqual "[1,2]"
// If this triggers, it means you are excessively escaping, sucker.
Json.build(Json.build(List(1, 2))).toString must_!= "\\"[1,2]\\""
}
}
"parse strings" in {
"double slashes like one finds in URLs" in {
Json.parse("""["hey! http:\\/\\/www.lollerskates.com"]""") mustEqual
List("hey! http://www.lollerskates.com")
}
"quoted newline" in {
Json.parse("""["hi\\njerk"]""") mustEqual
List("hi\\njerk")
}
"empty string" in {
Json.parse("""[""]""") mustEqual List("")
}
"quoted quote" in {
Json.parse("""["x\\"x"]""") mustEqual
List("x\\"x")
}
"accept unquoted DEL char, as isn't considered control char in Json spec" in {
//Json.parse("""["A^?B"]""") mustEqual List("A^?B")
Json.parse("[\\"A\\u007fB\\"]") mustEqual List("A\\u007fB")
}
"parse escaped string thing followed by whitespace" in {
Json.parse("[\\"\\\\u2603 q\\"]") mustEqual List("\\u2603 q")
Json.parse("[\\"\\\\t q\\"]") mustEqual List("\\t q")
}
"parse unicode outside of the BMP" in {
Json.parse("[\\"\\\\udbb8\\\\udc3e\\"]") mustEqual List(new String(Character.toChars(0x0FE03E)))
}
"does not strip leading whitespace" in {
Json.parse("""[" f"]""") mustEqual List(" f")
}
"parse escaped backspace at end of string" in {
Json.parse("""["\\\\", "\\\\"]""") mustEqual List("""\\""", """\\""")
}
"parse long string" in {
Json.parse("{ \\"long string\\":\\"" + (1 to 1000).map(x=>"That will be a long string").mkString + "\\" } ") must
not throwA(new Exception)
}
}
"parse numbers" in {
"floating point numbers" in {
Json.parse("[1.42]") mustEqual List(BigDecimal("1.42"))
}
"floating point with exponent" in {
Json.parse("[1.42e10]") mustEqual List(BigDecimal("1.42e10"))
}
"integer with exponent" in {
Json.parse("[42e10]") mustEqual List(BigDecimal("42e10"))
}
"integer numbers" in {
Json.parse("[42]") mustEqual List(42)
}
}
"parse maps" in {
"empty map" in {
Json.parse("{}") mustEqual Map()
}
"empty list" in {
Json.parse("{\\"nil\\":[]}") mustEqual Map("nil" -> Nil)
}
"empty map as value" in {
Json.parse("{\\"empty\\":{}}") mustEqual Map("empty" -> Map())
}
"simple map" in {
Json.parse("{\\"user_id\\": 1554, \\"message\\": \\"your phone is being turned off.\\"}") mustEqual
Map("user_id" -> 1554, "message" -> "your phone is being turned off.")
}
"simple map with long" in {
Json.parse("{\\"user_id\\": 1554, \\"status_id\\": 9015551486 }") mustEqual
Map("user_id" -> 1554, "status_id" -> 9015551486L)
}
"map with map" in {
Json.parse("{\\"name\\":\\"nathaniel\\",\\"status\\":{\\"text\\":\\"i like to dance!\\"," +
"\\"created_at\\":666},\\"zipcode\\":94103}") mustEqual
Map("name" -> "nathaniel",
"status" -> Map("text" -> "i like to dance!",
"created_at" -> 666),
"zipcode" -> 94103)
}
"map with list" in {
Json.parse("{\\"names\\":[\\"nathaniel\\",\\"brittney\\"]}") mustEqual
Map("names" -> List("nathaniel", "brittney"))
}
"map with two lists" in {
Json.parse("{\\"names\\":[\\"nathaniel\\",\\"brittney\\"],\\"ages\\":[4,7]}") mustEqual
Map("names" -> List("nathaniel", "brittney"),
"ages" -> List(4, 7))
}
"map with list, boolean and map" in {
Json.parse("{\\"names\\":[\\"nathaniel\\",\\"brittney\\"],\\"adults\\":false," +
"\\"ages\\":{\\"nathaniel\\":4,\\"brittney\\":7}}") mustEqual
Map("names" -> List("nathaniel", "brittney"),
"adults" -> false,
"ages" -> Map("nathaniel" -> 4,
"brittney" -> 7))
}
}
"build maps" in {
"empty map" in {
Json.build(Map()).toString mustEqual "{}"
}
"empty list" in {
Json.build(Map("nil" -> Nil)).toString mustEqual "{\\"nil\\":[]}"
}
"empty map as value" in {
Json.build(Map("empty" -> Map())).toString mustEqual "{\\"empty\\":{}}"
}
"simple map" in {
Json.build(Map("name" -> "nathaniel",
"likes" -> "to dance",
"age" -> 4)).toString mustEqual
"{\\"age\\":4,\\"likes\\":\\"to dance\\",\\"name\\":\\"nathaniel\\"}"
Json.build(List(1, 2, 3)).toString mustEqual "[1,2,3]"
}
"simple map with long" in {
Json.build(Map("user_id" -> 1554, "status_id" -> 9015551486L)).toString mustEqual
"{\\"status_id\\":9015551486,\\"user_id\\":1554}"
}
"Map with nested Map" in {
Json.build(Map("name" -> "nathaniel",
"status" -> Map("text" -> "i like to dance!",
"created_at" -> 666),
"zipcode" -> 94103)).toString mustEqual
"{\\"name\\":\\"nathaniel\\",\\"status\\":{\\"created_at\\":666,\\"text\\":\\"i like to dance!\\"}," +
"\\"zipcode\\":94103}"
}
"immutable maps" in {
import scala.collection.immutable.Map
"nested" in {
Json.build(Map("name" -> "nathaniel",
"status" -> Map("created_at" -> 666, "text" -> "i like to dance!"),
"zipcode" -> 94103)).toString mustEqual
"{\\"name\\":\\"nathaniel\\",\\"status\\":{\\"created_at\\":666,\\"text\\":\\"i like to dance!\\"}," +
"\\"zipcode\\":94103}"
}
"appended" in {
val statusMap = Map("status" -> Map("text" -> "i like to dance!",
"created_at" -> 666))
Json.build(Map.empty ++
Map("name" -> "nathaniel") ++
statusMap ++
Map("zipcode" -> 94103)).toString mustEqual
"{\\"name\\":\\"nathaniel\\",\\"status\\":{\\"created_at\\":666,\\"text\\":\\"i like to dance!\\"}," +
"\\"zipcode\\":94103}"
}
}
"mutable maps" in {
"nested" in {
import scala.collection.mutable.Map
"literal map" in {
val map = Map("name" -> "nathaniel",
"status" -> Map("text" -> "i like to dance!",
"created_at" -> 666),
"zipcode" -> 94103)
val output = Json.build(map).toString
val rehydrated = Json.parse(output)
rehydrated mustEqual map
}
"appended" in {
val statusMap = Map("status" -> Map("text" -> "i like to dance!",
"created_at" -> 666))
val nestedMap = Map[String,Any]() ++
Map("name" -> "nathaniel") ++
statusMap ++
Map("zipcode" -> 94103)
val output = Json.build(nestedMap).toString
val rehydrated = Json.parse(output)
rehydrated mustEqual nestedMap
}
}
}
"map with list" in {
Json.build(Map("names" -> List("nathaniel", "brittney"))).toString mustEqual
"{\\"names\\":[\\"nathaniel\\",\\"brittney\\"]}"
}
"map with two lists" in {
Json.build(Map("names" -> List("nathaniel", "brittney"),
"ages" -> List(4, 7))).toString mustEqual
"{\\"ages\\":[4,7],\\"names\\":[\\"nathaniel\\",\\"brittney\\"]}"
}
"map with list, boolean and map" in {
Json.build(Map("names" -> List("nathaniel", "brittney"),
"adults" -> false,
"ages" -> Map("nathaniel" -> 4,
"brittney" -> 7))).toString mustEqual
"{\\"adults\\":false," +
"\\"ages\\":{\\"brittney\\":7,\\"nathaniel\\":4}," +
"\\"names\\":[\\"nathaniel\\",\\"brittney\\"]}"
}
}
"parse lists" in {
"empty list" in {
Json.parse("[]") mustEqual Nil
}
"empty empty list" in {
Json.parse("[[]]") mustEqual List(Nil)
}
"list with empty Map" in {
Json.parse("[{}]") mustEqual List(Map())
}
"simple list" in {
Json.parse("[\\"id\\", 1]") mustEqual List("id", 1)
}
"nested list" in {
Json.parse("[\\"more lists!\\",[1,2,\\"three\\"]]") mustEqual
List("more lists!", List(1, 2, "three"))
}
"list with map" in {
Json.parse("[\\"maptastic!\\",{\\"1\\":2}]") mustEqual
List("maptastic!", Map("1" -> 2))
}
"list with two maps" in {
Json.parse("[{\\"1\\":2},{\\"3\\":4}]") mustEqual
List(Map("1" -> 2), Map("3" -> 4))
}
"list with list, boolean, map" in {
Json.parse("{\\"names\\":[\\"nathaniel\\",\\"brittney\\"],\\"adults\\":false," +
"\\"ages\\":{\\"nathaniel\\":4,\\"brittney\\":7}}") mustEqual
Map("names" -> List("nathaniel", "brittney"),
"adults" -> false,
"ages" -> Map("nathaniel" -> 4,
"brittney" -> 7))
}
"list with map containing list" in {
Json.parse("[{\\"1\\":[2,3]}]") mustEqual
List(Map("1" -> List(2, 3)))
}
"list with map containing map" in {
Json.parse("[{\\"1\\":{\\"2\\":\\"3\\"}}]") mustEqual
List(Map("1" -> Map("2" -> "3")))
}
"list in the middle" in {
Json.parse("""{"JobWithTasks":{"tasks":[{"Add":{"updated_at":12,"position":13}}],"error_count":1}}""") mustEqual
Map("JobWithTasks" -> Map("tasks" -> List(Map("Add" -> Map("updated_at" -> 12, "position" -> 13))), "error_count" -> 1))
}
}
"build lists" in {
"empty empty list" in {
Json.build(List(Nil)).toString mustEqual "[[]]"
}
"list with empty Map" in {
Json.build(List(Map())).toString mustEqual "[{}]"
}
"simple list" in {
Json.build(List("id", 1)).toString mustEqual "[\\"id\\",1]"
}
"nested list" in {
Json.build(List("more lists!", List(1, 2, "three"))).toString mustEqual
"[\\"more lists!\\",[1,2,\\"three\\"]]"
}
"list with map" in {
Json.build(List("maptastic!", Map("1" -> 2))).toString mustEqual
"[\\"maptastic!\\",{\\"1\\":2}]"
}
"list with two maps" in {
Json.build(List(Map("1" -> 2), Map("3" -> 4))).toString mustEqual
"[{\\"1\\":2},{\\"3\\":4}]"
}
"list with map containing list" in {
Json.build(List(Map("1" -> List(2, 3)))).toString mustEqual
"[{\\"1\\":[2,3]}]"
}
"list with map containing map" in {
Json.build(List(Map("1" -> Map("2" -> "3")))).toString mustEqual
"[{\\"1\\":{\\"2\\":\\"3\\"}}]"
}
}
"build numbers" in {
Json.build(List(42, 23L, 1.67, BigDecimal("1.67456352431287348917591342E+50"))).toString mustEqual "[42,23,1.67,1.67456352431287348917591342E+50]";
Json.build(List(0.0, 5.25)).toString mustEqual "[0.0,5.25]"
}
"arrays" in {
"simple arrays can be encoded" in {
Json.build(Array(0, 1)).toString mustEqual "[0,1]"
}
"nested" in {
"inside of arrays" in {
Json.build(Array(Array(0, 1), 2.asInstanceOf[AnyRef])).toString mustEqual "[[0,1],2]"
Json.build(Array(Array(0, 1), Array(2, 3))).toString mustEqual
"[[0,1],[2,3]]"
}
"inside of Lists" in {
Json.build(List(Array(0, 1))).toString mustEqual "[[0,1]]"
Json.build(List(Array(0, 1), Array(2, 3))).toString mustEqual "[[0,1],[2,3]]"
}
}
"maps" in {
"can contain arrays" in {
Json.build(List(Map("1" -> Array(0, 2)))).toString mustEqual
"[{\\"1\\":[0,2]}]"
}
"can be contained in arrays" in {
Json.build(Array(Map("1" -> 2))).toString mustEqual "[{\\"1\\":2}]"
}
}
}
"build JsonSerializable objects" in {
val obj = new JsonSerializable {
def toJson() = "\\"abracadabra\\""
}
Json.build(List(obj, 23)).toString mustEqual "[\\"abracadabra\\",23]"
}
}
}
| stevej/scala-json | src/test/scala/com/twitter/json/JsonSpec.scala | Scala | apache-2.0 | 13,917 |
package org.jetbrains.jps.incremental.scala
import _root_.java.io._
import java.net.InetAddress
import com.intellij.openapi.application.PathManager
import com.intellij.openapi.diagnostic.{Logger => JpsLogger}
import org.jetbrains.jps.ModuleChunk
import org.jetbrains.jps.builders.java.JavaBuilderUtil
import org.jetbrains.jps.incremental._
import org.jetbrains.jps.incremental.messages.ProgressMessage
import org.jetbrains.jps.incremental.scala.data.{CompilationData, CompilerData, SbtData}
import org.jetbrains.jps.incremental.scala.local.LocalServer
import org.jetbrains.jps.incremental.scala.model.{GlobalSettings, ProjectSettings}
import org.jetbrains.jps.incremental.scala.remote.RemoteServer
import org.jetbrains.jps.model.module.JpsModule
import _root_.scala.collection.JavaConverters._
/**
* Nikolay.Tropin
* 11/19/13
*/
object ScalaBuilder {
def compile(context: CompileContext,
chunk: ModuleChunk,
sources: Seq[File],
allSources: Seq[File],
modules: Set[JpsModule],
client: Client): Either[String, ModuleLevelBuilder.ExitCode] = {
context.processMessage(new ProgressMessage("Reading compilation settings..."))
for {
sbtData <- sbtData
compilerData <- CompilerData.from(context, chunk)
compilationData <- CompilationData.from(sources, allSources, context, chunk)
}
yield {
scalaLibraryWarning(modules, compilationData, client)
val server = getServer(context)
server.compile(sbtData, compilerData, compilationData, client)
}
}
def hasBuildModules(chunk: ModuleChunk): Boolean = {
chunk.getModules.asScala.exists(_.getName.endsWith("-build")) // gen-idea doesn't use the sbt module type
}
def projectSettings(context: CompileContext): ProjectSettings = SettingsManager.getProjectSettings(context.getProjectDescriptor.getProject)
val Log: JpsLogger = JpsLogger.getInstance(ScalaBuilder.getClass.getName)
// Cached local localServer
private var cachedServer: Option[Server] = None
private val lock = new Object()
def localServer: Server = {
lock.synchronized {
val server = cachedServer.getOrElse(new LocalServer())
cachedServer = Some(server)
server
}
}
private def cleanLocalServerCache() {
lock.synchronized {
cachedServer = None
}
}
private lazy val sbtData = {
val classLoader = getClass.getClassLoader
val pluginRoot = new File(PathManager.getJarPathForClass(getClass)).getParentFile
val javaClassVersion = System.getProperty("java.class.version")
SbtData.from(classLoader, pluginRoot, javaClassVersion)
}
private def scalaLibraryWarning(modules: Set[JpsModule], compilationData: CompilationData, client: Client) {
val hasScalaFacet = modules.exists(SettingsManager.hasScalaSdk)
val hasScalaLibrary = compilationData.classpath.exists(_.getName.startsWith("scala-library"))
val hasScalaSources = compilationData.sources.exists(_.getName.endsWith(".scala"))
if (hasScalaFacet && !hasScalaLibrary && hasScalaSources) {
val names = modules.map(_.getName).mkString(", ")
client.warning("No 'scala-library*.jar' in module dependencies [%s]".format(names))
}
}
private def getServer(context: CompileContext): Server = {
if (isCompileServerEnabled(context)) {
cleanLocalServerCache()
new RemoteServer(InetAddress.getByName(null), globalSettings(context).getCompileServerPort)
} else {
localServer
}
}
def isCompileServerEnabled(context: CompileContext): Boolean =
globalSettings(context).isCompileServerEnabled && isCompilationFromIDEA(context)
//hack to not run compile server on teamcity; is there a better way?
private def isCompilationFromIDEA(context: CompileContext): Boolean =
JavaBuilderUtil.CONSTANT_SEARCH_SERVICE.get(context) != null
private def globalSettings(context: CompileContext): GlobalSettings =
SettingsManager.getGlobalSettings(context.getProjectDescriptor.getModel.getGlobal)
}
| jastice/intellij-scala | scala/compiler-jps/src/org/jetbrains/jps/incremental/scala/ScalaBuilder.scala | Scala | apache-2.0 | 4,039 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.datastream
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall, RelTraitSet}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.flink.table.functions.FunctionLanguage
import org.apache.flink.table.plan.nodes.FlinkConventions
import org.apache.flink.table.plan.nodes.datastream.DataStreamCalc
import org.apache.flink.table.plan.nodes.logical.FlinkLogicalCalc
import org.apache.flink.table.plan.schema.RowSchema
import org.apache.flink.table.plan.util.PythonUtil.containsFunctionOf
import scala.collection.JavaConverters._
class DataStreamCalcRule
extends ConverterRule(
classOf[FlinkLogicalCalc],
FlinkConventions.LOGICAL,
FlinkConventions.DATASTREAM,
"DataStreamCalcRule") {
override def matches(call: RelOptRuleCall): Boolean = {
val calc: FlinkLogicalCalc = call.rel(0).asInstanceOf[FlinkLogicalCalc]
val program = calc.getProgram
!program.getExprList.asScala.exists(containsFunctionOf(_, FunctionLanguage.PYTHON))
}
def convert(rel: RelNode): RelNode = {
val calc: FlinkLogicalCalc = rel.asInstanceOf[FlinkLogicalCalc]
val traitSet: RelTraitSet = rel.getTraitSet.replace(FlinkConventions.DATASTREAM)
val convInput: RelNode = RelOptRule.convert(calc.getInput, FlinkConventions.DATASTREAM)
new DataStreamCalc(
rel.getCluster,
traitSet,
convInput,
new RowSchema(convInput.getRowType),
new RowSchema(rel.getRowType),
calc.getProgram,
"DataStreamCalcRule")
}
}
object DataStreamCalcRule {
val INSTANCE: RelOptRule = new DataStreamCalcRule
}
| mbode/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/rules/datastream/DataStreamCalcRule.scala | Scala | apache-2.0 | 2,457 |
package scala.meta.cli
import scala.meta.internal.cli.Args
import scala.meta.internal.metai.Main
import scala.meta.io.Classpath
import scala.meta.metai.Result
import scala.meta.metai.Settings
object Metai {
def main(args: Array[String]): Unit = {
sys.exit(process(args, Reporter()))
}
def process(args: Array[String], reporter: Reporter): Int = {
val expandedArgs = Args.expand(args)
Settings.parse(expandedArgs, reporter) match {
case Some(settings) =>
val result = process(settings, reporter)
if (result.isSuccess) 0
else 1
case None =>
1
}
}
def process(settings: Settings, reporter: Reporter): Result = {
val main = new Main(settings, reporter)
main.process()
}
}
| olafurpg/scalameta | semanticdb/metai/src/main/scala/scala/meta/cli/Metai.scala | Scala | bsd-3-clause | 752 |
package com.basrikahveci
package cardgame.messaging.request
import cardgame.messaging.Request
import cardgame.core.Session
import cardgame.domain.User
class StartGameRequest extends Request {
def handle(session: Session, user: User) = user.startGame
}
| metanet/cardgame-server-scala | src/main/scala/com/basrikahveci/cardgame/messaging/request/StartGameRequest.scala | Scala | mit | 258 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.io.File
import java.util.Collections
import java.util.concurrent.TimeUnit
import kafka.common.Topic
import kafka.coordinator.OffsetConfig
import kafka.utils.{CoreUtils, TestUtils}
import kafka.zk.ZooKeeperTestHarness
import org.apache.kafka.clients.consumer.{ConsumerRecord, KafkaConsumer}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.kafka.common.config.SslConfigs
import org.apache.kafka.common.network.{ListenerName, Mode}
import org.apache.kafka.common.protocol.SecurityProtocol
import org.junit.Assert.assertEquals
import org.junit.{After, Before, Test}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.collection.JavaConverters._
class MultipleListenersWithSameSecurityProtocolTest extends ZooKeeperTestHarness {
private val trustStoreFile = File.createTempFile("truststore", ".jks")
private val servers = new ArrayBuffer[KafkaServer]
private val producers = mutable.Map[ListenerName, KafkaProducer[Array[Byte], Array[Byte]]]()
private val consumers = mutable.Map[ListenerName, KafkaConsumer[Array[Byte], Array[Byte]]]()
@Before
override def setUp(): Unit = {
super.setUp()
// 2 brokers so that we can test that the data propagates correctly via UpdateMetadadaRequest
val numServers = 2
(0 until numServers).foreach { brokerId =>
val props = TestUtils.createBrokerConfig(brokerId, zkConnect, trustStoreFile = Some(trustStoreFile))
// Ensure that we can support multiple listeners per security protocol and multiple security protocols
props.put(KafkaConfig.ListenersProp, "SECURE_INTERNAL://localhost:0, INTERNAL://localhost:0, " +
"SECURE_EXTERNAL://localhost:0, EXTERNAL://localhost:0")
props.put(KafkaConfig.ListenerSecurityProtocolMapProp, "INTERNAL:PLAINTEXT, SECURE_INTERNAL:SSL," +
"EXTERNAL:PLAINTEXT, SECURE_EXTERNAL:SSL")
props.put(KafkaConfig.InterBrokerListenerNameProp, "INTERNAL")
props.putAll(TestUtils.sslConfigs(Mode.SERVER, false, Some(trustStoreFile), s"server$brokerId"))
// set listener-specific configs and set an invalid path for the global config to verify that the overrides work
Seq("SECURE_INTERNAL", "SECURE_EXTERNAL").foreach { listenerName =>
props.put(new ListenerName(listenerName).configPrefix + SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG,
props.get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG))
}
props.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, "invalid/file/path")
servers += TestUtils.createServer(KafkaConfig.fromProps(props))
}
val serverConfig = servers.head.config
assertEquals(4, serverConfig.listeners.size)
TestUtils.createTopic(zkUtils, Topic.GroupMetadataTopicName, OffsetConfig.DefaultOffsetsTopicNumPartitions,
replicationFactor = 2, servers, servers.head.groupCoordinator.offsetsTopicConfigs)
serverConfig.listeners.foreach { endPoint =>
val listenerName = endPoint.listenerName
TestUtils.createTopic(zkUtils, listenerName.value, 2, 2, servers)
val trustStoreFile =
if (endPoint.securityProtocol == SecurityProtocol.SSL) Some(this.trustStoreFile)
else None
val bootstrapServers = TestUtils.bootstrapServers(servers, listenerName)
producers(listenerName) = TestUtils.createNewProducer(bootstrapServers, acks = -1,
securityProtocol = endPoint.securityProtocol, trustStoreFile = trustStoreFile)
consumers(listenerName) = TestUtils.createNewConsumer(bootstrapServers, groupId = listenerName.value,
securityProtocol = endPoint.securityProtocol, trustStoreFile = trustStoreFile)
}
}
@After
override def tearDown() {
producers.values.foreach(_.close())
consumers.values.foreach(_.close())
servers.foreach { s =>
s.shutdown()
CoreUtils.delete(s.config.logDirs)
}
super.tearDown()
}
/**
* Tests that we can produce and consume to/from all broker-defined listeners and security protocols. We produce
* with acks=-1 to ensure that replication is also working.
*/
@Test
def testProduceConsume(): Unit = {
producers.foreach { case (listenerName, producer) =>
val producerRecords = (1 to 10).map(i => new ProducerRecord(listenerName.value, s"key$i".getBytes,
s"value$i".getBytes))
producerRecords.map(producer.send).map(_.get(10, TimeUnit.SECONDS))
val consumer = consumers(listenerName)
consumer.subscribe(Collections.singleton(listenerName.value))
val records = new ArrayBuffer[ConsumerRecord[Array[Byte], Array[Byte]]]
TestUtils.waitUntilTrue(() => {
records ++= consumer.poll(50).asScala
records.size == producerRecords.size
}, s"Consumed ${records.size} records until timeout instead of the expected ${producerRecords.size} records")
}
}
}
| ijuma/kafka | core/src/test/scala/integration/kafka/server/MultipleListenersWithSameSecurityProtocolTest.scala | Scala | apache-2.0 | 5,701 |
// Databricks notebook source
// configuration
val cosmosEndpoint = "https://REPLACEME.documents.azure.com:443/"
val cosmosMasterKey = "REPLACEME"
val cosmosDatabaseName = "sampleDB"
val cosmosContainerName = "sampleContainer"
val cfg = Map("spark.cosmos.accountEndpoint" -> cosmosEndpoint,
"spark.cosmos.accountKey" -> cosmosMasterKey,
"spark.cosmos.database" -> cosmosDatabaseName,
"spark.cosmos.container" -> cosmosContainerName
)
val cfgWithAutoSchemaInference = Map("spark.cosmos.accountEndpoint" -> cosmosEndpoint,
"spark.cosmos.accountKey" -> cosmosMasterKey,
"spark.cosmos.database" -> cosmosDatabaseName,
"spark.cosmos.container" -> cosmosContainerName,
"spark.cosmos.read.inferSchema.enabled" -> "true"
)
// COMMAND ----------
// create Cosmos Database and Cosmos Container using Catalog APIs
spark.conf.set(s"spark.sql.catalog.cosmosCatalog", "com.azure.cosmos.spark.CosmosCatalog")
spark.conf.set(s"spark.sql.catalog.cosmosCatalog.spark.cosmos.accountEndpoint", cosmosEndpoint)
spark.conf.set(s"spark.sql.catalog.cosmosCatalog.spark.cosmos.accountKey", cosmosMasterKey)
// create a cosmos database
spark.sql(s"CREATE DATABASE IF NOT EXISTS cosmosCatalog.${cosmosDatabaseName};")
// create a cosmos container
spark.sql(s"CREATE TABLE IF NOT EXISTS cosmosCatalog.${cosmosDatabaseName}.${cosmosContainerName} using cosmos.oltp " +
s"TBLPROPERTIES(partitionKeyPath = '/id', manualThroughput = '1100')")
// COMMAND ----------
// ingestion
spark.createDataFrame(Seq(("cat-alive", "Schrodinger cat", 2, true), ("cat-dead", "Schrodinger cat", 2, false)))
.toDF("id","name","age","isAlive")
.write
.format("cosmos.oltp")
.options(cfg)
.mode("APPEND")
.save()
// COMMAND ----------
// Show the schema of the table and data without auto schema inference
val df = spark.read.format("cosmos.oltp").options(cfg).load()
df.printSchema()
df.show()
// COMMAND ----------
// Show the schema of the table and data with auto schema inference
val df = spark.read.format("cosmos.oltp").options(cfgWithAutoSchemaInference).load()
df.printSchema()
df.show()
// COMMAND ----------
import org.apache.spark.sql.functions.col
// Query to find the live cat and increment age of the alive cat
df.filter(col("isAlive") === true)
.withColumn("age", col("age") + 1)
.show()
| Azure/azure-sdk-for-java | sdk/cosmos/azure-cosmos-spark_3_2-12/Samples/Scala-Sample.scala | Scala | mit | 2,335 |
/*
Copyright 2014 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import com.twitter.scalding.typed.CoGrouped.distinctBy
import org.scalacheck.Properties
import org.scalacheck.Prop.forAll
object DistinctByProps extends Properties("CoGrouped.DistinctBy") {
property("distinctBy never increases size") = forAll { (l: List[Int], fn: Int => Byte) =>
distinctBy(l)(fn).size <= l.size
}
property("distinctBy.size == map(fn).toSet.size") = forAll { (l: List[Int], fn: Int => Byte) =>
distinctBy(l)(fn).size == l.map(fn).toSet.size
}
property("distinctBy to unit gives size 0 or 1") = forAll { (l: List[Int], fn: Int => Unit) =>
val dsize = distinctBy(l)(fn).size
((dsize == 0) && l.isEmpty) || dsize == 1
}
property("distinctBy to different values never changes the list") = forAll { (l: List[Int]) =>
var idx = 0
val fn = { (i: Int) => idx += 1; idx }
distinctBy(l)(fn) == l
}
property("distinctBy works like groupBy(fn).map(_._2.head).toSet") = forAll {
(l: List[Int], fn: Int => Byte) =>
distinctBy(l)(fn).toSet == l.groupBy(fn).map(_._2.head).toSet
}
property("distinctBy matches a mutable implementation") = forAll { (l: List[Int], fn: Int => Byte) =>
val dlist = distinctBy(l)(fn)
var seen = Set[Byte]()
l.flatMap { it =>
if (seen(fn(it))) Nil
else {
seen += fn(it)
List(it)
}
} == dlist
}
}
| twitter/scalding | scalding-core/src/test/scala/com/twitter/scalding/DistinctByTest.scala | Scala | apache-2.0 | 1,927 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.eval
import monix.catnap.CancelableF
import monix.catnap.cancelables.BooleanCancelableF
import monix.execution.cancelables.BooleanCancelable
import monix.eval.internal.TaskConnectionRef
import monix.execution.ExecutionModel.SynchronousExecution
object TaskConnectionRefSuite extends BaseTestSuite {
test("assign and cancel a Cancelable") { implicit s =>
var effect = 0
val cr = TaskConnectionRef()
val b = BooleanCancelable { () =>
effect += 1
}
cr := b
assert(!b.isCanceled, "!b.isCanceled")
cr.cancel.runAsyncAndForget; s.tick()
assert(b.isCanceled, "b.isCanceled")
assert(effect == 1)
cr.cancel.runAsyncAndForget; s.tick()
assert(effect == 1)
}
test("assign and cancel a CancelableF") { implicit s =>
var effect = 0
val cr = TaskConnectionRef()
val b = CancelableF.wrap(Task { effect += 1 })
cr := b
assertEquals(effect, 0)
cr.cancel.runAsyncAndForget; s.tick()
assert(effect == 1)
}
test("assign and cancel a CancelToken[Task]") { implicit s =>
var effect = 0
val cr = TaskConnectionRef()
val b = Task { effect += 1 }
cr := b
assertEquals(effect, 0)
cr.cancel.runAsyncAndForget; s.tick()
assertEquals(effect, 1)
cr.cancel.runAsyncAndForget; s.tick()
assertEquals(effect, 1)
}
test("cancel a Cancelable on single assignment") { implicit s =>
val cr = TaskConnectionRef()
cr.cancel.runAsyncAndForget; s.tick()
var effect = 0
val b = BooleanCancelable { () =>
effect += 1
}
cr := b
assert(b.isCanceled)
assertEquals(effect, 1)
cr.cancel.runAsyncAndForget; s.tick()
assertEquals(effect, 1)
val b2 = BooleanCancelable { () =>
effect += 1
}
intercept[IllegalStateException] { cr := b2; () }
assertEquals(effect, 2)
}
test("cancel a CancelableF on single assignment") { scheduler =>
implicit val s = scheduler.withExecutionModel(SynchronousExecution)
val cr = TaskConnectionRef()
cr.cancel.runAsyncAndForget; s.tick()
var effect = 0
val b = BooleanCancelableF(Task { effect += 1 }).runToFuture.value.get.get
cr := b
assert(b.isCanceled.runToFuture.value.get.get)
assertEquals(effect, 1)
cr.cancel.runAsyncAndForget; s.tick()
assertEquals(effect, 1)
val b2 = BooleanCancelableF(Task { effect += 1 }).runToFuture.value.get.get
intercept[IllegalStateException] { cr := b2; () }
assertEquals(effect, 2)
}
test("cancel a Task on single assignment") { implicit s =>
val cr = TaskConnectionRef()
cr.cancel.runAsyncAndForget; s.tick()
var effect = 0
val b = Task { effect += 1 }
cr := b; s.tick()
assertEquals(effect, 1)
cr.cancel.runAsyncAndForget; s.tick()
assertEquals(effect, 1)
intercept[IllegalStateException] {
cr := b
()
}
assertEquals(effect, 2)
}
}
| alexandru/monifu | monix-eval/shared/src/test/scala/monix/eval/TaskConnectionRefSuite.scala | Scala | apache-2.0 | 3,579 |
package xml.claim
import app.PaymentTypes
import models.{DayMonthYear}
import models.domain.{Claim, _}
import models.view.{CachedClaim}
import org.specs2.mutable._
import utils.WithApplication
class IncomesSpec extends Specification {
section("unit")
"Incomes section xml generation" should {
"Generate correct header xml items for Emp, SelfEmp, SickPay, PatMat, Fost, DP, Other" in new WithApplication {
var claim = new Claim(CachedClaim.key, uuid = "1234")
val claimDate = ClaimDate(DayMonthYear(20, 3, 2016))
// SE, EMP, SICK, PATMATADOP, FOST, DP, OTHER
val incomeHeader = new YourIncomes("Yes", "Yes", Some("true"), Some("true"), Some("true"), Some("true"), Some("true"), Some("true"))
val xml = Incomes.xml(claim + claimDate + incomeHeader)
(xml \\ "Incomes" \\ "Employed" \\ "QuestionLabel").text must contain("been an employee")
(xml \\ "Incomes" \\ "Employed" \\ "Answer").text shouldEqual "Yes"
(xml \\ "Incomes" \\ "SelfEmployed" \\ "QuestionLabel").text must contain("been self-employed")
(xml \\ "Incomes" \\ "SelfEmployed" \\ "Answer").text shouldEqual "Yes"
(xml \\ "Incomes" \\ "OtherPaymentQuestion" \\ "QuestionLabel").text shouldEqual "What other income have you had since 20 March 2016?"
(xml \\ "Incomes" \\ "OtherPaymentQuestion" \\ "Answer").text shouldEqual "Some"
(xml \\ "Incomes" \\ "SickPayment" \\ "QuestionLabel").text must contain("Sick Pay")
(xml \\ "Incomes" \\ "SickPayment" \\ "Answer").text shouldEqual "Yes"
(xml \\ "Incomes" \\ "PatMatAdopPayment" \\ "QuestionLabel").text must contain("Paternity")
(xml \\ "Incomes" \\ "PatMatAdopPayment" \\ "QuestionLabel").text must contain("Maternity")
(xml \\ "Incomes" \\ "PatMatAdopPayment" \\ "QuestionLabel").text must contain("Adoption")
(xml \\ "Incomes" \\ "PatMatAdopPayment" \\ "Answer").text shouldEqual "Yes"
(xml \\ "Incomes" \\ "FosteringPayment" \\ "QuestionLabel").text must contain("Fostering")
(xml \\ "Incomes" \\ "FosteringPayment" \\ "Answer").text shouldEqual "Yes"
(xml \\ "Incomes" \\ "DirectPayment" \\ "QuestionLabel").text must contain("Direct payment")
(xml \\ "Incomes" \\ "DirectPayment" \\ "Answer").text shouldEqual "Yes"
(xml \\ "Incomes" \\ "RentalIncome" \\ "QuestionLabel").text must contain("Rental income")
(xml \\ "Incomes" \\ "RentalIncome" \\ "Answer").text shouldEqual "Yes"
(xml \\ "Incomes" \\ "AnyOtherPayment" \\ "QuestionLabel").text must contain("other income")
(xml \\ "Incomes" \\ "AnyOtherPayment" \\ "Answer").text shouldEqual "Yes"
}
"Generate correct header xml items for None Selected" in new WithApplication {
var claim = new Claim(CachedClaim.key, uuid = "1234")
val claimDate = ClaimDate(DayMonthYear(20, 3, 2016))
// SE, EMP, SICK, PATMATADOP, FOST, DP, OTHER, NONE
val incomeHeader = new YourIncomes("No", "No", None, None, None, None, None, None, Some("true"))
val xml = Incomes.xml(claim + claimDate + incomeHeader)
(xml \\ "Incomes" \\ "Employed" \\ "QuestionLabel").text must contain("been an employee")
(xml \\ "Incomes" \\ "Employed" \\ "Answer").text shouldEqual "No"
(xml \\ "Incomes" \\ "SelfEmployed" \\ "QuestionLabel").text must contain("been self-employed")
(xml \\ "Incomes" \\ "SelfEmployed" \\ "Answer").text shouldEqual "No"
(xml \\ "Incomes" \\ "OtherPaymentQuestion" \\ "QuestionLabel").text shouldEqual "What other income have you had since 20 March 2016?"
(xml \\ "Incomes" \\ "OtherPaymentQuestion" \\ "Answer").text shouldEqual "None"
(xml \\ "Incomes" \\ "NoOtherPayment" \\ "QuestionLabel").text shouldEqual "None"
(xml \\ "Incomes" \\ "NoOtherPayment" \\ "Answer").text shouldEqual "Yes"
}
"Generate correct xml for Sick Pay Section" in new WithApplication {
var claim = new Claim(CachedClaim.key, uuid = "1234")
val incomeHeader = new YourIncomes("No", "No", Some("true"), Some("false"), Some("false"), Some("false"), Some("false"), Some("false"))
val sickPay = new StatutorySickPay("No", Some(DayMonthYear(31, 1, 2016)), "Asda", "10.00", "Weekly", None)
val xml = Incomes.xml(claim + incomeHeader + sickPay)
(xml \\ "Incomes" \\ "SickPayment" \\ "Answer").text shouldEqual "Yes"
(xml \\ "Incomes" \\ "SickPay" \\ "StillBeingPaidThisPay" \\ "QuestionLabel").text should contain("still being paid Statutory Sick Pay")
(xml \\ "Incomes" \\ "SickPay" \\ "StillBeingPaidThisPay" \\ "Answer").text shouldEqual "No"
(xml \\ "Incomes" \\ "SickPay" \\ "WhenDidYouLastGetPaid" \\ "QuestionLabel").text should contain("last paid")
(xml \\ "Incomes" \\ "SickPay" \\ "WhenDidYouLastGetPaid" \\ "Answer").text shouldEqual "31-01-2016"
(xml \\ "Incomes" \\ "SickPay" \\ "AmountOfThisPay" \\ "QuestionLabel").text should contain("Amount")
(xml \\ "Incomes" \\ "SickPay" \\ "AmountOfThisPay" \\ "Answer").text shouldEqual "10.00"
(xml \\ "Incomes" \\ "SickPay" \\ "WhoPaidYouThisPay" \\ "QuestionLabel").text should contain("Who paid you Statutory Sick Pay")
(xml \\ "Incomes" \\ "SickPay" \\ "WhoPaidYouThisPay" \\ "Answer").text shouldEqual "Asda"
(xml \\ "Incomes" \\ "SickPay" \\ "HowOftenPaidThisPay" \\ "QuestionLabel").text should contain("How often")
(xml \\ "Incomes" \\ "SickPay" \\ "HowOftenPaidThisPay" \\ "Answer").text shouldEqual "Weekly"
(xml \\ "Incomes" \\ "SickPay" \\ "HowOftenPaidThisPayOther").length shouldEqual 0
}
"Generate correct xml for PatMatAdopt Pay Section" in new WithApplication {
var claim = new Claim(CachedClaim.key, uuid = "1234")
val incomeHeader = new YourIncomes("No", "No", Some("false"), Some("true"), Some("false"), Some("false"), Some("false"), Some("false"))
val patMatAdoptPay = new StatutoryMaternityPaternityAdoptionPay(PaymentTypes.MaternityPaternity, "No", Some(DayMonthYear(31, 3, 2016)), "Tesco", "50.01", "Weekly", None)
val xml = Incomes.xml(claim + incomeHeader + patMatAdoptPay)
(xml \\ "Incomes" \\ "PatMatAdopPayment" \\ "Answer").text shouldEqual "Yes"
(xml \\ "Incomes" \\ "StatutoryMaternityPaternityAdopt" \\ "PaymentTypesForThisPay" \\ "QuestionLabel").text should contain("Which are you paid")
(xml \\ "Incomes" \\ "StatutoryMaternityPaternityAdopt" \\ "PaymentTypesForThisPay" \\ "Answer").text should contain("Maternity or Paternity Pay")
(xml \\ "Incomes" \\ "StatutoryMaternityPaternityAdopt" \\ "StillBeingPaidThisPay" \\ "QuestionLabel").text should contain("still being paid this")
(xml \\ "Incomes" \\ "StatutoryMaternityPaternityAdopt" \\ "StillBeingPaidThisPay" \\ "Answer").text shouldEqual "No"
(xml \\ "Incomes" \\ "StatutoryMaternityPaternityAdopt" \\ "WhenDidYouLastGetPaid" \\ "QuestionLabel").text should contain("last paid")
(xml \\ "Incomes" \\ "StatutoryMaternityPaternityAdopt" \\ "WhenDidYouLastGetPaid" \\ "Answer").text shouldEqual "31-03-2016"
(xml \\ "Incomes" \\ "StatutoryMaternityPaternityAdopt" \\ "AmountOfThisPay" \\ "QuestionLabel").text should contain("Amount")
(xml \\ "Incomes" \\ "StatutoryMaternityPaternityAdopt" \\ "AmountOfThisPay" \\ "Answer").text shouldEqual "50.01"
(xml \\ "Incomes" \\ "StatutoryMaternityPaternityAdopt" \\ "WhoPaidYouThisPay" \\ "QuestionLabel").text should contain("Who paid you this")
(xml \\ "Incomes" \\ "StatutoryMaternityPaternityAdopt" \\ "WhoPaidYouThisPay" \\ "Answer").text shouldEqual "Tesco"
(xml \\ "Incomes" \\ "StatutoryMaternityPaternityAdopt" \\ "HowOftenPaidThisPay" \\ "QuestionLabel").text should contain("How often")
(xml \\ "Incomes" \\ "StatutoryMaternityPaternityAdopt" \\ "HowOftenPaidThisPay" \\ "Answer").text shouldEqual "Weekly"
(xml \\ "Incomes" \\ "StatutoryMaternityPaternityAdopt" \\ "HowOftenPaidThisPayOther").length shouldEqual 0
}
"Generate correct xml for PatMatAdopt Pay Section with Adoption Selected" in new WithApplication {
var claim = new Claim(CachedClaim.key, uuid = "1234")
val incomeHeader = new YourIncomes("No", "No", Some("false"), Some("true"), Some("false"), Some("false"), Some("false"), Some("false"))
val patMatAdoptPay = new StatutoryMaternityPaternityAdoptionPay(PaymentTypes.Adoption, "No", Some(DayMonthYear(31, 3, 2016)), "Tesco", "50.01", "Weekly", None)
val xml = Incomes.xml(claim + incomeHeader + patMatAdoptPay)
(xml \\ "Incomes" \\ "PatMatAdopPayment" \\ "Answer").text shouldEqual "Yes"
(xml \\ "Incomes" \\ "StatutoryMaternityPaternityAdopt" \\ "PaymentTypesForThisPay" \\ "QuestionLabel").text should contain("Which are you paid")
(xml \\ "Incomes" \\ "StatutoryMaternityPaternityAdopt" \\ "PaymentTypesForThisPay" \\ "Answer").text should contain("Adoption Pay")
}
"Generate correct xml for Foster Pay Section for Paid By Local Authority" in new WithApplication {
var claim = new Claim(CachedClaim.key, uuid = "1234")
val incomeHeader = new YourIncomes("No", "No", Some("false"), Some("false"), Some("true"), Some("false"), Some("false"), Some("false"))
val foster = new FosteringAllowance(PaymentTypes.LocalAuthority, None, "No", Some(DayMonthYear(31, 3, 2016)), "LCC", "50.01", "Weekly", None)
val xml = Incomes.xml(claim + incomeHeader + foster)
(xml \\ "Incomes" \\ "FosteringPayment" \\ "Answer").text shouldEqual "Yes"
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "PaymentTypesForThisPay" \\ "QuestionLabel").text should contain("What type of organisation pays you for Fostering")
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "PaymentTypesForThisPay" \\ "Answer").text should contain("Local Authority")
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "PaymentTypesForThisPayOther").length shouldEqual 0
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "StillBeingPaidThisPay" \\ "QuestionLabel").text should contain("still being paid this")
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "StillBeingPaidThisPay" \\ "Answer").text shouldEqual "No"
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "WhenDidYouLastGetPaid" \\ "QuestionLabel").text should contain("last paid")
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "WhenDidYouLastGetPaid" \\ "Answer").text shouldEqual "31-03-2016"
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "AmountOfThisPay" \\ "QuestionLabel").text should contain("Amount")
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "AmountOfThisPay" \\ "Answer").text shouldEqual "50.01"
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "WhoPaidYouThisPay" \\ "QuestionLabel").text should contain("Who paid you this")
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "WhoPaidYouThisPay" \\ "Answer").text shouldEqual "LCC"
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "HowOftenPaidThisPay" \\ "QuestionLabel").text should contain("How often")
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "HowOftenPaidThisPay" \\ "Answer").text shouldEqual "Weekly"
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "HowOftenPaidThisPayOther").length shouldEqual 0
}
"Generate correct xml for Foster Pay Section for Paid By Foster Agency" in new WithApplication {
var claim = new Claim(CachedClaim.key, uuid = "1234")
val incomeHeader = new YourIncomes("No", "No", Some("false"), Some("false"), Some("true"), Some("false"), Some("false"), Some("false"))
val foster = new FosteringAllowance(PaymentTypes.FosteringAllowance, None, "No", Some(DayMonthYear(31, 3, 2016)), "LCC", "50.01", "Weekly", None)
val xml = Incomes.xml(claim + incomeHeader + foster)
(xml \\ "Incomes" \\ "FosteringPayment" \\ "Answer").text shouldEqual "Yes"
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "PaymentTypesForThisPay" \\ "QuestionLabel").text should contain("What type of organisation pays you for Fostering")
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "PaymentTypesForThisPay" \\ "Answer").text should contain("Fostering Agency")
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "PaymentTypesForThisPayOther").length shouldEqual 0
}
"Generate correct xml for Foster Pay Section for Paid By Other" in new WithApplication {
var claim = new Claim(CachedClaim.key, uuid = "1234")
val incomeHeader = new YourIncomes("No", "No", Some("false"), Some("false"), Some("true"), Some("false"), Some("false"), Some("false"))
val foster = new FosteringAllowance(PaymentTypes.Other, Some("Foster charity paid"), "No", Some(DayMonthYear(31, 3, 2016)), "LCC", "50.01", "Weekly", None)
val xml = Incomes.xml(claim + incomeHeader + foster)
(xml \\ "Incomes" \\ "FosteringPayment" \\ "Answer").text shouldEqual "Yes"
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "PaymentTypesForThisPay" \\ "QuestionLabel").text should contain("What type of organisation pays you for Fostering")
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "PaymentTypesForThisPay" \\ "Answer").text should contain("Other")
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "PaymentTypesForThisPayOther" \\ "QuestionLabel").text should contain("Who paid you Fostering Allowance")
(xml \\ "Incomes" \\ "FosteringAllowance" \\ "PaymentTypesForThisPayOther" \\ "Answer").text should contain("Foster charity paid")
}
"Generate correct xml for DirectPay Section" in new WithApplication {
var claim = new Claim(CachedClaim.key, uuid = "1234")
val claimDate = ClaimDate(DayMonthYear(10, 2, 2016))
val incomeHeader = new YourIncomes("No", "No", Some("false"), Some("false"), Some("false"), Some("true"), Some("false"), Some("false"))
val directPayment = new DirectPayment("No", Some(DayMonthYear(29, 2, 2016)), "Disabled person", "25.00", "Weekly", None)
val xml = Incomes.xml(claim + claimDate + incomeHeader + directPayment)
(xml \\ "Incomes" \\ "DirectPayment" \\ "Answer").text shouldEqual "Yes"
(xml \\ "Incomes" \\ "DirectPay" \\ "StillBeingPaidThisPay" \\ "QuestionLabel").text should contain("still being paid this")
(xml \\ "Incomes" \\ "DirectPay" \\ "StillBeingPaidThisPay" \\ "Answer").text shouldEqual "No"
(xml \\ "Incomes" \\ "DirectPay" \\ "WhenDidYouLastGetPaid" \\ "QuestionLabel").text should contain("last paid")
(xml \\ "Incomes" \\ "DirectPay" \\ "WhenDidYouLastGetPaid" \\ "Answer").text shouldEqual "29-02-2016"
(xml \\ "Incomes" \\ "DirectPay" \\ "AmountOfThisPay" \\ "QuestionLabel").text should contain("Amount")
(xml \\ "Incomes" \\ "DirectPay" \\ "AmountOfThisPay" \\ "Answer").text shouldEqual "25.00"
(xml \\ "Incomes" \\ "DirectPay" \\ "WhoPaidYouThisPay" \\ "QuestionLabel").text should contain("Who paid you this")
(xml \\ "Incomes" \\ "DirectPay" \\ "WhoPaidYouThisPay" \\ "Answer").text shouldEqual "Disabled person"
(xml \\ "Incomes" \\ "DirectPay" \\ "HowOftenPaidThisPay" \\ "QuestionLabel").text should contain("How often")
(xml \\ "Incomes" \\ "DirectPay" \\ "HowOftenPaidThisPay" \\ "Answer").text shouldEqual "Weekly"
(xml \\ "Incomes" \\ "DirectPay" \\ "HowOftenPaidThisPayOther").length shouldEqual 0
}
"Generate correct xml for RentalIncome Section" in new WithApplication {
var claim = new Claim(CachedClaim.key, uuid = "1234")
val claimDate = ClaimDate(DayMonthYear(10, 2, 2016))
val incomeHeader = new YourIncomes("No", "No", Some("false"), Some("false"), Some("false"), Some("false"), Some("true"), Some("false"))
val rentalIncome = new RentalIncome("Some rent money paid by tenant")
val xml = Incomes.xml(claim + claimDate + incomeHeader + rentalIncome)
(xml \\ "Incomes" \\ "RentalIncome" \\ "Answer").text shouldEqual "Yes"
(xml \\ "Incomes" \\ "RentalIncomeInfo" \\ "QuestionLabel").text should contain("What rental income have you had since 10 February 2016?")
(xml \\ "Incomes" \\ "RentalIncomeInfo" \\ "Answer").text shouldEqual "Some rent money paid by tenant"
}
"Generate correct xml for OtherPay Section" in new WithApplication {
var claim = new Claim(CachedClaim.key, uuid = "1234")
val claimDate = ClaimDate(DayMonthYear(10, 2, 2016))
val incomeHeader = new YourIncomes("No", "No", Some("false"), Some("false"), Some("false"), Some("false"), Some("false"), Some("true"))
val otherPayment = new OtherPayments("Was paid some money by carees brother")
val xml = Incomes.xml(claim + claimDate + incomeHeader + otherPayment)
(xml \\ "Incomes" \\ "AnyOtherPayment" \\ "Answer").text shouldEqual "Yes"
(xml \\ "Incomes" \\ "OtherPaymentsInfo" \\ "QuestionLabel").text should contain("What other income have you had since 10 February 2016?")
(xml \\ "Incomes" \\ "OtherPaymentsInfo" \\ "Answer").text shouldEqual "Was paid some money by carees brother"
}
}
section("unit")
}
| Department-for-Work-and-Pensions/ClaimCapture | c3/test/xml/claim/IncomesSpec.scala | Scala | mit | 16,886 |
/* libnp
* Copyright (c) 2013, Lloyd T. Elliott and Yee Whye Teh
*/
package libnp.random
import libnp.statistics.Generator
trait variable[T] extends Serializable {
type Self <: variable[T]
def logDensity(): Double
def get(): T
def set(x:T): Unit
def mutate(x: T): variable[T]
}
trait resetable[T] extends variable[T] {
def reset(): variable[T]
}
trait sampleable[T] extends variable[T] with Serializable {
def sample(gen: Generator): variable[T]
}
class dirac[T](var x: T) extends variable[T] with Serializable {
type Self = dirac[T]
def logDensity(): Double = Double PositiveInfinity
def get(): T = x
def set(X:T): Unit = { x = X }
def mutate(x: T) = null
}
object variable {
implicit def variable2T[T](X: variable[T]): T = X get
implicit def T2dirac[Double](x: Double): variable[Double] = new dirac(x)
}
| lell/libnp | src/libnp/random/variables.scala | Scala | bsd-2-clause | 843 |
package com.xah.chat.framework
import android.app.{Service, Activity}
import android.content.Intent
/**
* Project: xaHChat
* Created on 2015-03-11 by
* lemonxah -
* https://github.com/lemonxah
* http://stackoverflow.com/users/2919672/lemon-xah
*/
object XIntent {
def apply[T](c: Class[T])(implicit context: Activity) = new Intent(context, c)
}
class XActivity[T <: Activity](c: Class[T])(context: Activity) {
lazy val intent = XIntent(c)(context)
def start() = context.startActivity(intent)
}
object XActivity {
def apply[T <: Activity](c: Class[T])(implicit context: Activity) = new XActivity[T](c)(context)
}
object XActivityStart {
def apply[T <: Activity](c: Class[T])(implicit context: Activity) = new XActivity[T](c)(context).start()
}
object XServiceStart {
def apply[T <: Service](c: Class[T])(implicit context: Activity) = context.startService(XIntent[T](c)(context))
} | lemonxah/xaHChat | src/main/scala/com/xah/chat/framework/XIntent.scala | Scala | mit | 900 |
package im.actor.botkit
import akka.actor.{ Status, ActorRef, ActorLogging, Actor }
import akka.pattern.ask
import akka.util.Timeout
import im.actor.bots.BotMessages
import im.actor.bots.macros.BotInterface
import scala.collection.concurrent.TrieMap
import scala.concurrent.Future
import scala.concurrent.forkjoin.ThreadLocalRandom
@BotInterface
private[botkit] abstract class BotBaseBase extends Actor with ActorLogging
abstract class BotBase extends BotBaseBase {
import BotMessages._
import context.dispatcher
protected implicit val timeout: Timeout
private var requestCounter = 0L
private var requests = Map.empty[Long, (ActorRef, RequestBody)]
private val users = TrieMap.empty[Int, User]
private val groups = TrieMap.empty[Int, Group]
private var rqSource = context.system.deadLetters
protected def setRqSource(ref: ActorRef): Unit = {
this.rqSource = ref
}
override final def request[T <: RequestBody](body: T): Future[body.Response] = {
(self ? body) map (_.asInstanceOf[body.Response])
}
protected def onStreamFailure(cause: Throwable): Unit
protected final def workingBehavior: Receive = {
case Status.Failure(cause) ⇒
onStreamFailure(cause)
case rq: RequestBody ⇒
requestCounter += 1
val request = BotRequest(requestCounter, rq.service, rq)
requests += (requestCounter → (sender() → rq))
rqSource ! request
case upd: BotUpdate ⇒
log.info("Update: {}", upd)
upd match {
case BotFatSeqUpdate(_, _, newUsers, newGroups) ⇒
newUsers foreach {
case (id, user) ⇒ this.users.put(id, user)
}
newGroups foreach {
case (id, group) ⇒ this.groups.put(id, group)
}
case _ ⇒
}
onUpdate(upd.body)
case rsp: BotResponse ⇒
log.info("Response #{}: {}", rsp.id, rsp.body)
requests.get(rsp.id) foreach {
case (replyTo, rq) ⇒
val reply = rsp.body match {
case err: BotError ⇒ Status.Failure(err)
case BotSuccess(obj) ⇒ rq.readResponse(obj)
}
replyTo ! reply
}
}
override def preRestart(reason: Throwable, message: Option[Any]): Unit = {
val prefix = "Actor will restart."
message match {
case Some(msg) ⇒
log.error(reason, prefix + " Last message received: {}", msg)
case None ⇒
log.error(reason, prefix)
}
super.preRestart(reason, message)
}
protected def nextRandomId() = ThreadLocalRandom.current().nextLong()
protected def getUser(id: Int) = this.users.getOrElse(id, throw new RuntimeException(s"User $id not found"))
protected def getGroup(id: Int) = this.groups.getOrElse(id, throw new RuntimeException(s"Group $id not found"))
}
| EaglesoftZJ/actor-platform | actor-server/actor-botkit/src/main/scala/im/actor/botkit/BotBase.scala | Scala | agpl-3.0 | 2,799 |
/*
Copyright 2013 Tomas Tauber
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.mathematics
import org.scalacheck.Arbitrary
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.Properties
import org.scalacheck.Prop.forAll
import org.scalacheck._
import org.scalacheck.Gen._
import org.scalatest.{ Matchers, WordSpec }
import com.twitter.scalding._
import Matrix2._
import cascading.flow.FlowDef
import com.twitter.algebird.Ring
import com.twitter.scalding.IterableSource
/**
* Unit tests used in development
* (stronger properties are tested in ScalaCheck tests at the end)
*/
class Matrix2OptimizationSpec extends WordSpec with Matchers {
import Dsl._
import com.twitter.scalding.Test
implicit val mode = Test(Map())
implicit val fd = new FlowDef
val globM = TypedPipe.from(IterableSource(List((1, 2, 3.0), (2, 2, 4.0))))
implicit val ring: Ring[Double] = Ring.doubleRing
implicit val ord1: Ordering[Int] = Ordering.Int
implicit val ord2: Ordering[(Int, Int)] = Ordering.Tuple2[Int, Int]
def literal(tpipe: TypedPipe[(Int, Int, Double)], sizeHint: SizeHint): MatrixLiteral[Any, Any, Double] = MatrixLiteral(tpipe, sizeHint).asInstanceOf[MatrixLiteral[Any, Any, Double]]
def product(left: Matrix2[Any, Any, Double], right: Matrix2[Any, Any, Double], optimal: Boolean = false): Product[Any, Any, Any, Double] = Product(left, right, ring)
def sum(left: Matrix2[Any, Any, Double], right: Matrix2[Any, Any, Double]): Sum[Any, Any, Double] = Sum(left, right, ring)
/**
* Values used in tests
*/
// ((A1(A2 A3))((A4 A5) A6)
val optimizedPlan = product(
product(literal(globM, FiniteHint(30, 35)),
product(literal(globM, FiniteHint(35, 15)),
literal(globM, FiniteHint(15, 5)), true), true),
product(
product(literal(globM, FiniteHint(5, 10)),
literal(globM, FiniteHint(10, 20)), true),
literal(globM, FiniteHint(20, 25)), true), true)
val optimizedPlanCost = 1850 // originally 15125.0
// A1(A2(A3(A4(A5 A6))))
val unoptimizedPlan = product(literal(globM, FiniteHint(30, 35)),
product(literal(globM, FiniteHint(35, 15)),
product(literal(globM, FiniteHint(15, 5)),
product(literal(globM, FiniteHint(5, 10)),
product(literal(globM, FiniteHint(10, 20)), literal(globM, FiniteHint(20, 25)))))))
val simplePlan = product(literal(globM, FiniteHint(30, 35)), literal(globM, FiniteHint(35, 25)), true)
val simplePlanCost = 750 //originally 26250
val combinedUnoptimizedPlan = sum(unoptimizedPlan, simplePlan)
val combinedOptimizedPlan = sum(optimizedPlan, simplePlan)
val combinedOptimizedPlanCost = optimizedPlanCost + simplePlanCost
// A1 * (A2 * (A3 * ( A4 + A4 ) * (A5 * (A6))))
val unoptimizedGlobalPlan = product(literal(globM, FiniteHint(30, 35)),
product(literal(globM, FiniteHint(35, 15)),
product(literal(globM, FiniteHint(15, 5)),
product(sum(literal(globM, FiniteHint(5, 10)), literal(globM, FiniteHint(5, 10))),
product(literal(globM, FiniteHint(10, 20)), literal(globM, FiniteHint(20, 25)))))))
// ((A1(A2 A3))(((A4 + A4) A5) A6)
val optimizedGlobalPlan = product(
product(literal(globM, FiniteHint(30, 35)),
product(literal(globM, FiniteHint(35, 15)),
literal(globM, FiniteHint(15, 5)), true), true),
product(
product(sum(literal(globM, FiniteHint(5, 10)), literal(globM, FiniteHint(5, 10))),
literal(globM, FiniteHint(10, 20)), true),
literal(globM, FiniteHint(20, 25)), true), true)
val productSequence = IndexedSeq(literal(globM, FiniteHint(30, 35)), literal(globM, FiniteHint(35, 15)),
literal(globM, FiniteHint(15, 5)), literal(globM, FiniteHint(5, 10)), literal(globM, FiniteHint(10, 20)),
literal(globM, FiniteHint(20, 25)))
val combinedSequence = List(IndexedSeq(literal(globM, FiniteHint(30, 35)), literal(globM, FiniteHint(35, 15)),
literal(globM, FiniteHint(15, 5)), literal(globM, FiniteHint(5, 10)), literal(globM, FiniteHint(10, 20)),
literal(globM, FiniteHint(20, 25))), IndexedSeq(literal(globM, FiniteHint(30, 35)), literal(globM, FiniteHint(35, 25))))
val planWithSum = product(literal(globM, FiniteHint(30, 35)), sum(literal(globM, FiniteHint(35, 25)), literal(globM, FiniteHint(35, 25))), true)
val g = literal(globM, FiniteHint(30, 30))
val g2 = product(g, g, true)
val g4 = product(g2, g2, true)
val optimizedGraph8 = product(g4, g4, true)
val unoptimizedGraphVectorPlan = (g ^ (5)) * literal(globM, FiniteHint(Long.MaxValue, 1))
val optimizedGraphVectorPlan = product(
product(
literal(globM, FiniteHint(30, 30)),
literal(globM, FiniteHint(30, 30))),
product(
literal(globM, FiniteHint(30, 30)),
product(
literal(globM, FiniteHint(30, 30)),
product(
literal(globM, FiniteHint(30, 30)),
literal(globM, FiniteHint(Long.MaxValue, 1))))))
"Matrix multiplication chain optimization" should {
"handle a single matrix" in {
val p = IndexedSeq(literal(globM, FiniteHint(30, 35)))
val result = optimizeProductChain(p, Some(ring, MatrixJoiner2.default))
result shouldBe (0, literal(globM, FiniteHint(30, 35)))
}
"handle two matrices" in {
val p = IndexedSeq(literal(globM, FiniteHint(30, 35)), literal(globM, FiniteHint(35, 25)))
val result = optimizeProductChain(p, Some(ring, MatrixJoiner2.default))
(simplePlanCost, simplePlan) shouldBe result
}
"handle an example with 6 matrices" in {
val result = optimizeProductChain(productSequence, Some(ring, MatrixJoiner2.default))
(optimizedPlanCost, optimizedPlan) shouldBe result
}
"not change an optimized plan" in {
(optimizedPlanCost, optimizedPlan) shouldBe optimize(optimizedPlan)
}
"change an unoptimized plan" in {
(optimizedPlanCost, optimizedPlan) shouldBe optimize(unoptimizedPlan)
}
"handle an optimized plan with sum" in {
(combinedOptimizedPlanCost, combinedOptimizedPlan) shouldBe optimize(combinedOptimizedPlan)
}
"handle an unoptimized plan with sum" in {
(combinedOptimizedPlanCost, combinedOptimizedPlan) shouldBe (optimize(combinedUnoptimizedPlan))
}
"not break A*(B+C)" in {
planWithSum shouldBe (optimize(planWithSum)._2)
}
"handle an unoptimized global plan" in {
optimizedGlobalPlan shouldBe (optimize(unoptimizedGlobalPlan)._2)
}
"handle an optimized global plan" in {
optimizedGlobalPlan shouldBe (optimize(optimizedGlobalPlan)._2)
}
"handle a G^5 V plan" in {
optimizedGraphVectorPlan shouldBe (optimize(unoptimizedGraphVectorPlan)._2)
}
"handle an optimized G^5 V plan" in {
optimizedGraphVectorPlan shouldBe (optimize(optimizedGraphVectorPlan)._2)
}
"handle a G^8 plan" in {
optimizedGraph8 shouldBe (optimize(g ^ 8)._2)
}
}
}
object Matrix2Props extends Properties("Matrix2") {
import com.twitter.scalding.Test
implicit val mode = Test(Map())
implicit val fd = new FlowDef
val globM = TypedPipe.from(IterableSource(List((1, 2, 3.0), (2, 2, 4.0))))
implicit val ring: Ring[Double] = Ring.doubleRing
implicit val ord1: Ordering[Int] = Ordering.Int
def literal(tpipe: TypedPipe[(Int, Int, Double)], sizeHint: SizeHint): MatrixLiteral[Any, Any, Double] = MatrixLiteral(tpipe, sizeHint).asInstanceOf[MatrixLiteral[Any, Any, Double]]
def product(left: Matrix2[Any, Any, Double], right: Matrix2[Any, Any, Double], optimal: Boolean = false): Product[Any, Any, Any, Double] = Product(left, right, ring)
def sum(left: Matrix2[Any, Any, Double], right: Matrix2[Any, Any, Double]): Sum[Any, Any, Double] = Sum(left, right, ring)
/**
* Helper methods used in tests for randomized generations
*/
def genLeaf(dims: (Long, Long)): (MatrixLiteral[Any, Any, Double], Long) = {
val (rows, cols) = dims
val sparGen = Gen.choose(0.0f, 1.0f)
val sparsity = sparGen.sample.get
val rowGen = Gen.choose(1, 1000)
val nextRows = if (rows <= 0) rowGen.sample.get else rows
if (cols <= 0) {
val colGen = Gen.choose(1, 1000)
val nextCols = colGen.sample.get
(literal(globM, SparseHint(sparsity, nextRows, nextCols)), nextCols)
} else {
(literal(globM, SparseHint(sparsity, nextRows, cols)), cols)
}
}
def productChainGen(current: Int, target: Int, prevCol: Long, result: List[MatrixLiteral[Any, Any, Double]]): List[MatrixLiteral[Any, Any, Double]] = {
if (current == target) result
else {
val (randomMatrix, cols) = genLeaf((prevCol, 0))
productChainGen(current + 1, target, cols, result ++ List(randomMatrix))
}
}
def randomProduct(p: Int): Matrix2[Any, Any, Double] = {
if (p == 1) genLeaf((0, 0))._1
else {
val full = productChainGen(0, p, 0, Nil).toIndexedSeq
generateRandomPlan(0, full.size - 1, full)
}
}
def genNode(depth: Int): Gen[Matrix2[Any, Any, Double]] = for {
v <- arbitrary[Int]
p <- Gen.choose(1, 10)
left <- genFormula(depth + 1)
right <- genFormula(depth + 1)
} yield if (depth > 5) randomProduct(p) else (if (v > 0) randomProduct(p) else Sum(left, right, ring))
def genFormula(depth: Int): Gen[Matrix2[Any, Any, Double]] =
if (depth > 5)
genLeaf((0, 0))._1
else
(oneOf(genNode(depth + 1), Gen.const(genLeaf((0, 0))._1)))
implicit def arbT: Arbitrary[Matrix2[Any, Any, Double]] = Arbitrary(genFormula(0))
val genProdSeq = for {
v <- Gen.choose(1, 10)
} yield productChainGen(0, v, 0, Nil).toIndexedSeq
implicit def arbSeq: Arbitrary[IndexedSeq[MatrixLiteral[Any, Any, Double]]] = Arbitrary(genProdSeq)
def generateRandomPlan(i: Int, j: Int, p: IndexedSeq[MatrixLiteral[Any, Any, Double]]): Matrix2[Any, Any, Double] = {
if (i == j) p(i)
else {
val genK = Gen.choose(i, j - 1)
val k = genK.sample.getOrElse(i)
val X = generateRandomPlan(i, k, p)
val Y = generateRandomPlan(k + 1, j, p)
Product(X, Y, ring)
}
}
/**
* Function that recursively estimates a cost of a given MatrixFormula / plan.
* This is the used in the tests for checking whether an optimized plan has
* a cost <= a randomized plan.
* The cost estimation of this evaluation should return the same values as the one
* used in building optimized plans -- this is checked in the tests below.
* @return resulting cost
*/
def evaluate(mf: Matrix2[Any, Any, Double]): BigInt = {
/**
* This function strips off the formula into a list of independent product chains
* (i.e. same as matrixFormulaToChains in Prototype, but has Products
* instead of IndexedSeq[Literal])
*/
def toProducts(mf: Matrix2[Any, Any, Double]): (Option[Product[Any, Any, Any, Double]], List[Product[Any, Any, Any, Double]]) = {
mf match {
case element @ MatrixLiteral(_, _) => (None, Nil)
case Sum(left, right, _) => {
val (lastLP, leftR) = toProducts(left)
val (lastRP, rightR) = toProducts(right)
val total = leftR ++ rightR ++ (if (lastLP.isDefined) List(lastLP.get) else Nil) ++
(if (lastRP.isDefined) List(lastRP.get) else Nil)
(None, total)
}
case Product(leftp @ MatrixLiteral(_, _), rightp @ MatrixLiteral(_, _), _, _) => {
(Some(Product(leftp, rightp, ring)), Nil)
}
case Product(left @ Product(_, _, _, _), right @ MatrixLiteral(_, _), _, _) => {
val (lastLP, leftR) = toProducts(left)
if (lastLP.isDefined) (Some(Product(lastLP.get, right, ring)), leftR)
else (None, leftR)
}
case Product(left @ MatrixLiteral(_, _), right @ Product(_, _, _, _), _, _) => {
val (lastRP, rightR) = toProducts(right)
if (lastRP.isDefined) (Some(Product(left, lastRP.get, ring)), rightR)
else (None, rightR)
}
case Product(left, right, _, _) => {
val (lastLP, leftR) = toProducts(left)
val (lastRP, rightR) = toProducts(right)
if (lastLP.isDefined && lastRP.isDefined) {
(Some(Product(lastLP.get, lastRP.get, ring)), leftR ++ rightR)
} else {
val newP = if (lastLP.isDefined) List(lastLP.get) else if (lastRP.isDefined) List(lastRP.get) else Nil
(None, newP ++ leftR ++ rightR)
}
}
}
}
/**
* To create a companion tree which has respective ranges of each product
*/
class LabeledTree(val range: (Int, Int), val left: Option[LabeledTree], val right: Option[LabeledTree]) {
def diff: Int = range._2 - range._1
}
def labelTree(p: Matrix2[Any, Any, Double], start: Int): Option[LabeledTree] = {
p match {
case Product(left @ MatrixLiteral(_, _), right @ MatrixLiteral(_, _), _, _) => {
Some(new LabeledTree((start, start + 1), None, None))
}
case Product(left @ MatrixLiteral(_, _), right @ Product(_, _, _, _), _, _) => {
val labelRight = labelTree(right, start + 1)
Some(new LabeledTree((start, labelRight.get.range._2), None, labelRight))
}
case Product(left @ Product(_, _, _, _), right @ MatrixLiteral(_, _), _, _) => {
val labelLeft = labelTree(left, start)
Some(new LabeledTree((labelLeft.get.range._1, labelLeft.get.range._2 + 1), labelLeft, None))
}
case Product(left, right, _, _) => {
val labelLeft = labelTree(left, start)
val labelRight = labelTree(right, labelLeft.get.range._2 + 1)
Some(new LabeledTree((labelLeft.get.range._1, labelRight.get.range._2), labelLeft, labelRight))
}
case _ => None
}
}
/**
* This function evaluates a product chain in the same way
* as the dynamic programming procedure computes cost
* (optimizeProductChain - computeCosts in Prototype)
*/
def evaluateProduct(p: Matrix2[Any, Any, Double], labels: LabeledTree): Option[(BigInt, Matrix2[Any, Any, Double], Matrix2[Any, Any, Double])] = {
p match {
case Product(left @ MatrixLiteral(_, _), right @ MatrixLiteral(_, _), _, _) => {
// reflects optimize when k==i: p(i).sizeHint * (p(k).sizeHint * p(j).sizeHint)
Some((left.sizeHint * (left.sizeHint * right.sizeHint)).total.get,
left, right)
}
case Product(left @ MatrixLiteral(_, _), right @ Product(_, _, _, _), _, _) => {
val (cost, pLeft, pRight) = evaluateProduct(right, labels.right.get).get
// reflects optimize when k==i: p(i).sizeHint * (p(k).sizeHint * p(j).sizeHint)
// diff is computed in the labeled tree - it measures "spread" of the tree
// diff corresponds to (k - i) or (j - k - 1) in optimize: (k - i) * computeCosts(p, i, k) + (j - k - 1) * computeCosts(p, k + 1, j)
Some(labels.right.get.diff * cost + (left.sizeHint * (left.sizeHint * pRight.sizeHint)).total.get,
left, pRight)
}
case Product(left @ Product(_, _, _, _), right @ MatrixLiteral(_, _), _, _) => {
val (cost, pLeft, pRight) = evaluateProduct(left, labels.left.get).get
Some(labels.left.get.diff * cost + (pLeft.sizeHint * (pRight.sizeHint * right.sizeHint)).total.get,
pLeft, right)
}
case Product(left, right, _, _) => {
val (cost1, p1Left, p1Right) = evaluateProduct(left, labels.left.get).get
val (cost2, p2Left, p2Right) = evaluateProduct(right, labels.right.get).get
Some(labels.left.get.diff * cost1 + labels.right.get.diff * cost2 + (p1Left.sizeHint * (p1Right.sizeHint * p2Right.sizeHint)).total.get,
p1Left, p2Right)
}
case _ => None
}
}
val (last, productList) = toProducts(mf)
val products = if (last.isDefined) last.get :: productList else productList
products.map(p => evaluateProduct(p, labelTree(p, 0).get).get._1).sum
}
// ScalaCheck properties
/**
* Verifying "evaluate" function - that it does return
* the same overall costs as what is estimated in the optimization procedure
*/
property("evaluate function returns the same cost as optimize") = forAll { (a: Matrix2[Any, Any, Double]) =>
optimize(a)._1 == evaluate(optimize(a)._2)
}
/**
* "Proof": the goal property that estimated costs of optimized plans or product chains
* are less than or equal to costs of randomized equivalent plans or product chains
*/
property("a cost of an optimized chain of matrix products is <= a random one") = forAll { (a: IndexedSeq[MatrixLiteral[Any, Any, Double]]) =>
optimizeProductChain(a, Some(ring, MatrixJoiner2.default))._1 <=
evaluate(generateRandomPlan(0, a.length - 1, a))
}
property("cost of a random plan is <= a random one") = forAll { (a: Matrix2[Any, Any, Double]) =>
optimize(a)._1 <= evaluate(a)
}
/**
* Sanity check
*/
property("optimizing an optimized plan does not change it") = forAll { (a: Matrix2[Any, Any, Double]) =>
optimize(a) == optimize(optimize(a)._2)
}
}
| bendridi/scalding | scalding-core/src/test/scala/com/twitter/scalding/mathematics/Matrix2OptimizationTest.scala | Scala | apache-2.0 | 17,578 |
package adtoyou.spark.analysis.azkaban
import java.text.SimpleDateFormat
import java.util.{Calendar, Date}
import adtoyou.spark.analysis.MyRDDFunctions._
import adtoyou.spark.analysis.util.MD5Util
import com.redislabs.provider.redis._
import org.apache.spark.{SparkConf, SparkContext}
import redis.clients.util.JedisClusterCRC16
/**
* Created by wangqy on 2017/5/19.
*/
object UMLRefresh {
def main(args: Array[String]): Unit = {
val conf = new SparkConf()
.set("spark.shuffle.file.buffer", "128k")
.set("spark.reducer.maxSizeInFlight", "96m")
.set("redis.host", "192.168.3.156")
.set("redis.port", "9801")
.set("redis.timeout", "30000")
val sc = new SparkContext(conf)
rmFromRedis(sc)
val dtFormat = new SimpleDateFormat("yyyyMM")
val cal: Calendar = Calendar.getInstance
val now = new Date
cal.setTime(now)
cal.add(Calendar.MONTH, -2)
val prev2Month = dtFormat.format(cal.getTime)
cal.add(Calendar.MONTH, -1)
val prev3Month = dtFormat.format(cal.getTime)
cal.add(Calendar.MONTH, -1)
val prev4Month = dtFormat.format(cal.getTime)
val userFile: String = s"/drp/tyv2/data/user_data/{$prev2Month*,$prev3Month*,$prev4Month*}"
val userRDD = sc.textFile(userFile)
.map(_.split('\t')(1).trim)
.filter(u => u.length != 0 && u.length != 32 && u.length != 40)
.map(u => (MD5Util.toMD516(u), u))
.filter(_._1 != null)
.map(x => ("UML|" + x._1, x._2))
.reduceByKey((v1, _) => v1)
sc.toRedisKV(userRDD, 10, 250)
sc.stop
}
def rmFromRedis(sc: SparkContext)
(implicit redisConfig: RedisConfig = new RedisConfig(new RedisEndpoint(sc.getConf))) {
// val hosts = testScaleHosts(redisConfig, 250)
// println(hosts.size)
// hosts.foreach(x => println(x.productIterator.mkString(",")))
sc.fromRedisKeyPattern("UML|*", 250)
.foreachPartition { keys =>
if (keys.hasNext) {
val keyArr = keys.map(k => (JedisClusterCRC16.getSlot(k), k)).toArray
val k = keyArr(0)._2
val conn = redisConfig.connectionForKey(k)
val pipeline = conn.pipelined()
keyArr.groupBy(_._1)
.foreach(x =>
pipeline.del(x._2.map(_._2): _*)
)
conn.close()
}
}
// redisConfig.hosts.foreach { host =>
// println("clear host=" + host.endpoint.host + ":" + host.endpoint.port)
// val jedis = host.connect()
// try {
// val pipeline = jedis.pipelined
// for (i <- '0' to 'f') {
// val response = pipeline.keys(s"UML|$i*")
// pipeline.sync
// val keySet = response.get
// val len = keySet.size
// val strArr = new Array[String](len)
// val keyArr = keySet.toArray(strArr)
// .map(k => (JedisClusterCRC16.getSlot(k), k))
// keyArr.groupBy(_._1)
// .foreach(x =>
// pipeline.del(x._2.map(_._2): _*)
// )
// pipeline.sync
// }
// } catch {
// case e: Throwable =>
// System.out.println(ExceptionUtils.getFullStackTrace(e))
// } finally {
// if (jedis != null) jedis.close()
// }
// }
}
// for test only
def testScaleHosts(redisConfig: RedisConfig, partitionNum: Int): Seq[(String, Int, Int, Int)] = {
def split(host: RedisNode, cnt: Int) = {
val endpoint = host.endpoint
val start = host.startSlot
val end = host.endSlot
val range = (end - start) / cnt
println(endpoint.host + ":" + endpoint.port)
println(start + "~" + end)
println("cnt=" + cnt)
(0 until cnt).map(i => {
(endpoint.host,
endpoint.port,
if (i == 0) start else (start + range * i + 1),
if (i != cnt - 1) (start + range * (i + 1)) else end)
})
}
val hosts = redisConfig.hosts.sortBy(_.startSlot)
println("hosts size=" + hosts.size)
if (hosts.size == partitionNum) {
hosts.map(x => (x.endpoint.host, x.endpoint.port, x.startSlot, x.endSlot))
} else if (hosts.size < partitionNum) {
val presExtCnt = partitionNum / hosts.size
val lastExtCnt = if (presExtCnt * hosts.size < partitionNum)
(partitionNum - presExtCnt * (hosts.size - 1)) else presExtCnt
println("presExtCnt=" + presExtCnt)
println("lastExtCnt=" + lastExtCnt)
hosts.zipWithIndex.flatMap {
case (host, idx) => {
split(host, if (idx == hosts.size - 1) lastExtCnt else presExtCnt)
}
}
} else {
val presExtCnt = hosts.size / partitionNum
(0 until partitionNum).map {
idx => {
val ip = hosts(idx * presExtCnt).endpoint.host
val port = hosts(idx * presExtCnt).endpoint.port
val start = hosts(idx * presExtCnt).startSlot
val end = hosts(if (idx == partitionNum - 1) {
(hosts.size - 1)
} else {
((idx + 1) * presExtCnt - 1)
}).endSlot
(ip, port, start, end)
}
}
}
}
}
| 7u/spark-learning | azkaban/azprojects/analysis-spark/src/main/scala/adtoyou/spark/analysis/azkaban/UMLRefresh.scala | Scala | apache-2.0 | 5,380 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Sun Sep 16 14:09:25 EDT 2012
* @see LICENSE (MIT style license file).
*/
package scalation.linalgebra
import collection.Traversable
import util.Sorting.quickSort
import math.{abs => ABS, max => MAX, sqrt}
import scalation.math.long_exp
import scalation.util.Error
import scalation.util.SortingL.{iqsort, qsort2}
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `VectorL` class stores and operates on Numeric Vectors of base type `Long`.
* It follows the framework of `gen.VectorN [T]` and is provided for performance.
* @param dim the dimension/size of the vector
* @param v the 1D array used to store vector elements
*/
class VectorL (val dim: Int,
protected var v: Array [Long] = null)
extends Traversable [Long] with PartiallyOrdered [VectorL] with Vec with Error with Serializable
{
if (v == null) {
v = Array.ofDim [Long] (dim)
} else if (dim != v.length) {
flaw ("constructor", "dimension is wrong")
} // if
/** Number of elements in the vector as a Double
*/
val nd = dim.toDouble
/** Range for the storage array
*/
private val range = 0 until dim
/** Format String used for printing vector values (change using setFormat)
* Ex: "%d,\\t", "%.6g,\\t" or "%12.6g,\\t"
*/
private var fString = "%d,\\t"
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a vector from an array of values.
* @param u the array of values
*/
def this (u: Array [Long]) { this (u.length, u) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a vector and assign values from vector 'u'.
* @param u the other vector
*/
def this (u: VectorL)
{
this (u.dim) // invoke primary constructor
for (i <- range) v(i) = u(i)
} // constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the size (number of elements) of 'this' vector.
*/
override def size: Int = dim
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Produce the range of all indices (0 to one less than dim).
*/
def indices: Range = 0 until dim
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Expand the size (dim) of 'this' vector by 'more' elements.
* @param more the number of new elements to add
*/
def expand (more: Int = dim): VectorL =
{
if (more < 1) this // no change
else new VectorL (dim + more, Array.concat (v, new Array [Long] (more)))
} // expand
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a vector of the form (0, ... 1, ... 0) where the 1 is at position j.
* @param j the position to place the 1
* @param size the size of the vector (upper bound = size - 1)
*/
def oneAt (j: Int, size: Int = dim): VectorL =
{
val c = new VectorL (size)
c.v(j) = 1l
c
} // oneAt
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a vector of the form (0, ... -1, ... 0) where the -1 is at position j.
* @param j the position to place the -1
* @param size the size of the vector (upper bound = size - 1)
*/
def _oneAt (j: Int, size: Int = dim): VectorL =
{
val c = new VectorL (size)
c.v(j) = -1l
c
} // _oneAt
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert 'this' `VectorL` into a `VectorI`.
*/
def toInt: VectorI =
{
val c = new VectorI (dim)
for (i <- range) c(i) = v(i).toInt
c
} // toInt
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert 'this' `VectorL` into a `VectorD`.
*/
def toDouble: VectorD =
{
val c = new VectorD (dim)
for (i <- range) c(i) = v(i).toDouble
c
} // toDouble
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get 'this' vector's element at the 'i'-th index position.
* @param i the given index
*/
def apply (i: Int): Long = v(i)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get 'this' vector's elements within the given range (vector slicing).
* @param r the given range
*/
def apply (r: Range): VectorL = slice (r.start, r.end)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get 'this' vector's entire array.
*/
def apply (): Array [Long] = v
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' vector's element at the 'i'-th index position.
* @param i the given index
* @param x the value to assign
*/
def update (i: Int, x: Long) { v(i) = x }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' vector's elements over the given range (vector slicing).
* @param r the given range
* @param x the value to assign
*/
def update (r: Range, x: Long) { for (i <- r) v(i) = x }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' vector's elements over the given range (vector slicing).
* @param r the given range
* @param u the vector to assign
*/
def update (r: Range, u: VectorL) { for (i <- r) v(i) = u(i - r.start) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set each value in 'this' vector to 'x'.
* @param x the value to be assigned
*/
def set (x: Long) { for (i <- range) v(i) = x }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set the values in 'this' vector to the values in array 'u'.
* @param u the array of values to be assigned
*/
def setAll (u: Array [Long]) { for (i <- range) v(i) = u(i) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Iterate over 'this' vector element by element.
* @param f the function to apply
*/
def foreach [U] (f: Long => U)
{
var i = 0
while (i < dim) { f (v(i)); i += 1 }
} // foreach
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Filter the elements of 'this' vector based on the predicate 'p', returning
* a new vector.
* @param p the predicate (Boolean function) to apply
*/
override def filter (p: Long => Boolean): VectorL = VectorL (v.filter (p))
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Filter the elements of 'this' vector based on the predicate 'p', returning
* the index positions.
* @param p the predicate (Boolean function) to apply
*/
def filterPos (p: Long => Boolean): Array [Int] =
{
(for (i <- range if p (v(i))) yield i).toArray
} // filterPos
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Map the elements of 'this' vector by applying the mapping function 'f'.
* @param f the function to apply
*/
def map (f: Long => Long): VectorL = new VectorL (this ().map (f))
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Slice 'this' vector 'from' to 'end'.
* @param from the start of the slice (included)
* @param till the end of the slice (excluded)
*/
override def slice (from: Int, till: Int): VectorL = new VectorL (till - from, v.slice (from, till))
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Select a subset of elements of 'this' vector corresponding to a 'basis'.
* @param basis the set of index positions (e.g., 0, 2, 5)
*/
def select (basis: Array [Int]): VectorL =
{
val c = new VectorL (basis.length)
for (i <- c.range) c.v(i) = v(basis(i))
c
} // select
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate 'this' vector and vector' b'.
* @param b the vector to be concatenated
*/
def ++ (b: VectorL): VectorL =
{
val c = new VectorL (dim + b.dim)
for (i <- c.range) c.v(i) = if (i < dim) v(i) else b.v(i - dim)
c
} // ++
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate 'this' vector and scalar 's'.
* @param s the scalar to be concatenated
*/
def ++ (s: Long): VectorL =
{
val c = new VectorL (dim + 1)
for (i <- c.range) c.v(i) = if (i < dim) v(i) else s
c
} // ++
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' vector and vector 'b'.
* @param b the vector to add
*/
def + (b: VectorL): VectorL =
{
val c = new VectorL (dim)
for (i <- range) c.v(i) = v(i) + b.v(i)
c
} // +
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' vector and scalar 's'.
* @param s the scalar to add
*/
def + (s: Long): VectorL =
{
val c = new VectorL (dim)
for (i <- range) c.v(i) = v(i) + s
c
} // +
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' vector and scalar 's._1' only at position 's._2'.
* @param s the (scalar, position) to add
*/
def + (s: Tuple2 [Long, Int]): VectorL =
{
val c = new VectorL (dim)
for (i <- range) c.v(i) = if (i == s._2) v(i) + s._1 else v(i)
c
} // +
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add in-place 'this' vector and vector 'b'.
* @param b the vector to add
*/
def += (b: VectorL): VectorL = { for (i <- range) v(i) += b.v(i); this }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add in-place 'this' vector and scalar 's'.
* @param s the scalar to add
*/
def += (s: Long): VectorL = { for (i <- range) v(i) += s; this }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the negative of 'this' vector (unary minus).
*/
def unary_- (): VectorL =
{
val c = new VectorL (dim)
for (i <- range) c.v(i) = -v(i)
c
} // unary_-
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' vector subtract vector 'b'.
* @param b the vector to subtract
*/
def - (b: VectorL): VectorL =
{
val c = new VectorL (dim)
for (i <- range) c.v(i) = v(i) - b.v(i)
c
} // -
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' vector subtract scalar 's'.
* @param s the scalar to subtract
*/
def - (s: Long): VectorL =
{
val c = new VectorL (dim)
for (i <- range) c.v(i) = v(i) - s
c
} // -
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' vector subtract scalar 's._1' only at position 's._2'.
* @param s the (scalar, position) to subtract
*/
def - (s: Tuple2 [Long, Int]): VectorL =
{
val c = new VectorL (dim)
for (i <- range) c.v(i) = if (i == s._2) v(i) - s._1 else v(i)
c
} // -
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' vector subtract in-place vector 'b'.
* @param b the vector to add
*/
def -= (b: VectorL): VectorL = { for (i <- range) v(i) -= b.v(i); this }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' vector subtract in-place scalar 's'.
* @param s the scalar to add
*/
def -= (s: Long): VectorL = { for (i <- range) v(i) -= s; this }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' vector by vector 'b'.
* @param b the vector to multiply by
*/
def * (b: VectorL): VectorL =
{
val c = new VectorL (dim)
for (i <- range) c.v(i) = v(i) * b.v(i)
c
} // *
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' vector by scalar 's'.
* @param s the scalar to multiply by
*/
def * (s: Long): VectorL =
{
val c = new VectorL (dim)
for (i <- range) c.v(i) = v(i) * s
c
} // *
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' (row) vector by matrix 'm'.
* @param m the matrix to multiply by
*/
def * (m: MatriL): VectorL = m.t * this
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' vector and vector 'b'.
* @param b the vector to add
*/
def *= (b: VectorL): VectorL = { for (i <- range) v(i) *= b.v(i); this }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' vector and scalar 's'.
* @param s the scalar to add
*/
def *= (s: Long): VectorL = { for (i <- range) v(i) *= s; this }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide 'this' vector by vector 'b' (element-by-element).
* @param b the vector to divide by
*/
def / (b: VectorL): VectorL =
{
val c = new VectorL (dim)
for (i <- range) c.v(i) = v(i) / b.v(i)
c
} // /
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide 'this' vector by scalar 's'.
* @param s the scalar to divide by
*/
def / (s: Long): VectorL =
{
val c = new VectorL (dim)
for (i <- range) c.v(i) = v(i) / s
c
} // /
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide in-place 'this' vector and vector 'b'.
* @param b the vector to add
*/
def /= (b: VectorL): VectorL = { for (i <- range) v(i) /= b.v(i); this }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide in-place 'this' vector and scalar 's'.
* @param s the scalar to add
*/
def /= (s: Long): VectorL = { for (i <- range) v(i) /= s; this }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the vector containing each element of 'this' vector raised to the
* s-th power.
* @param s the scalar exponent
*/
def ~^ (s: Long): VectorL =
{
val c = new VectorL (dim)
for (i <- range) c.v(i) = v(i) ~^ s
c
} // ~^
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compare 'this' vector with that vector 'b' for inequality.
* @param b that vector
*/
def ≠ (b: VectorL) = this != b
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compare 'this' vector with that vector 'b' for less than or equal to.
* @param b that vector
*/
def ≤ (b: VectorL) = this <= b
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compare 'this' vector with that vector 'b' for greater than or equal to.
* @param b that vector
*/
def ≥ (b: VectorL) = this >= b
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Raise each element of 'this' vector to the 's'-th power.
* @param s the scalar exponent
*/
def ~^= (s: Long) { for (i <- range) v(i) = v(i) ~^ s }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the vector containing the square of each element of 'this' vector.
*/
def sq: VectorL = this * this
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the vector containing the reciprocal of each element of 'this' vector.
*/
def recip: VectorL =
{
val c = new VectorL (dim)
for (i <- range) c.v(i) = 1l / v(i)
c
} // inverse
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the vector that is the element-wise absolute value of 'this' vector.
*/
def abs: VectorL =
{
val c = new VectorL (dim)
for (i <- range) c.v(i) = ABS (v(i))
c
} // abs
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sum the elements of 'this' vector.
*/
def sum: Long = v.foldLeft (0l)((s, x) => s + x)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sum the absolute value of the elements of 'this' vector.
*/
def sumAbs: Long = v.foldLeft (0l)((s, x) => s + ABS (x))
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sum the elements of 'this' vector skipping the 'i'-th element (Not Equal 'i').
* @param i the index of the element to skip
*/
def sumNE (i: Int): Long = sum - v(i)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sum the positive (> 0) elements of 'this' vector.
*/
def sumPos: Long = v.foldLeft (0l)((s, x) => s + MAX (x, 0l))
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the mean of the elements of 'this' vector.
*/
def mean = sum / nd
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the (unbiased) sample variance of the elements of 'this' vector.
*/
def variance = (normSq - sum * sum / nd) / (nd-1.0)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the population variance of the elements of 'this' vector.
* This is also the (biased) MLE estimator for sample variance.
*/
def pvariance = (normSq - sum * sum / nd) / nd
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Establish the rank order of the elements in 'self' vector, e.g.,
* (8.0, 2.0, 4.0, 6.0) is (3, 0, 1, 2).
*/
def rank: VectorI = new VectorI (iqsort (v))
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Cumulate the values of 'this' vector from left to right (e.g., create a
* CDF from a pmf). Example: (4, 2, 3, 1) --> (4, 6, 9, 10)
*/
def cumulate: VectorL =
{
val c = new VectorL (dim)
var sum: Long = 0l
for (i <- range) { sum += v(i); c.v(i) = sum }
c
} // cumulate
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Normalize 'this' vector so that it sums to one (like a probability vector).
*/
def normalize: VectorL = this * (1l / sum)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Normalize 'this' vector so its length is one (unit vector).
*/
def normalizeU: VectorL = this * (1l / norm)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Normalize 'this' vector to have a maximum of one.
*/
def normalize1: VectorL = this * (1l / max ())
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the dot product (or inner product) of 'this' vector with vector 'b'.
* @param b the other vector
*/
def dot (b: VectorL): Long =
{
var sum: Long = 0l
for (i <- range) sum += v(i) * b.v(i)
sum
} // dot
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the dot product (or inner product) of 'this' vector with vector 'b'.
* @param b the other vector
*/
def ∙ (b: VectorL): Long =
{
var sum: Long = 0l
for (i <- range) sum += v(i) * b.v(i)
sum
} // ∙
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the Euclidean norm (2-norm) squared of 'this' vector.
*/
def normSq: Long = this dot this
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the Euclidean norm (2-norm) of 'this' vector.
*/
def norm: Long = sqrt (normSq).toLong
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the Manhattan norm (1-norm) of 'this' vector.
*/
def norm1: Long =
{
var sum: Long = 0l
for (i <- range) sum += ABS (v(i))
sum
} // norm1
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the maximum element in 'this' vector.
* @param e the ending index (exclusive) for the search
*/
def max (e: Int = dim): Long =
{
var x = v(0)
for (i <- 1 until e if v(i) > x) x = v(i)
x
} // max
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Take the maximum of 'this' vector with vector 'b' (element-by element).
* @param b the other vector
*/
def max (b: VectorL): VectorL =
{
val c = new VectorL (dim)
for (i <- range) c.v(i) = if (b.v(i) > v(i)) b.v(i) else v(i)
c
} // max
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the minimum element in 'this' vector.
* @param e the ending index (exclusive) for the search
*/
def min (e: Int = dim): Long =
{
var x = v(0)
for (i <- 1 until e if v(i) < x) x = v(i)
x
} // max
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Take the minimum of 'this' vector with vector 'b' (element-by element).
* @param b the other vector
*/
def min (b: VectorL): VectorL =
{
val c = new VectorL (dim)
for (i <- range) c.v(i) = if (b.v(i) < v(i)) b.v(i) else v(i)
c
} // min
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the element with the greatest magnitude in 'this' vector.
*/
def mag: Long = ABS (max ()) max ABS (min ())
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the argument maximum of 'this' vector (index of maximum element).
* @param e the ending index (exclusive) for the search
*/
def argmax (e: Int = dim): Int =
{
var j = 0
for (i <- 1 until e if v(i) > v(j)) j = i
j
} // argmax
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the argument minimum of 'this' vector (index of minimum element).
* @param e the ending index (exclusive) for the search
*/
def argmin (e: Int = dim): Int =
{
var j = 0
for (i <- 1 until e if v(i) < v(j)) j = i
j
} // argmin
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the argument minimum of 'this' vector (-1 if its not negative).
* @param e the ending index (exclusive) for the search
*/
def argminNeg (e: Int = dim): Int =
{
val j = argmin (e); if (v(j) < 0l) j else -1
} // argmaxNeg
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the argument maximum of 'this' vector (-1 if its not positive).
* @param e the ending index (exclusive) for the search
*/
def argmaxPos (e: Int = dim): Int =
{
val j = argmax (e); if (v(j) > 0l) j else -1
} // argmaxPos
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the index of the first negative element in 'this' vector (-1 otherwise).
* @param e the ending index (exclusive) for the search
*/
def firstNeg (e: Int = dim): Int =
{
for (i <- 0 until e if v(i) < 0l) return i; -1
} // firstNeg
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the index of the first positive element in 'this' vector (-1 otherwise).
* @param e the ending index (exclusive) for the search
*/
def firstPos (e: Int = dim): Int =
{
for (i <- 0 until e if v(i) > 0l) return i; -1
} // firstPos
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the index of the first occurrence of element 'x' in 'this' vector,
* or -1 if not found.
* @param x the given element
* @param e the ending index (exclusive) for the search
*/
def indexOf (x: Int, e: Int = dim): Int =
{
for (i <- 0 until e if v(i) == x) return i; -1
} // indexOf
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Count the number of strictly negative elements in 'this' vector.
*/
def countNeg: Int =
{
var count = 0
for (i <- 0 until dim if v(i) < 0l) count += 1
count
} // countNeg
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Count the number of strictly positive elements in 'this' vector.
*/
def countPos: Int =
{
var count = 0
for (i <- 0 until dim if v(i) > 0l) count += 1
count
} // countPos
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Count the number of distinct elements in 'this' vector.
*/
def distinct: Int =
{
var count = 1
val us = new VectorL (this); us.sort () // sorted vector
for (i <- 1 until dim if us(i) != us(i-1)) count += 1
count
} // distinct
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Determine whether the predicate 'pred' holds for some element in 'this' vector.
* @param pred the predicate to test (e.g., "_ == 5.")
*/
// def exists (pred: (Long) => Boolean): Boolean = v.exists (pred)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Determine whether 'x' is contained in 'this' vector.
* @param x the element to be checked
*/
def contains (x: Long): Boolean = v contains x
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sort 'this' vector in-place in ascending (non-decreasing) order.
*/
def sort () { quickSort (v) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sort 'this' vector in-place in descending (non-increasing) order.
*/
def sort2 () { qsort2 (v) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Swap elements 'i' and 'j' in 'this' vector.
* @param i the first element in the swap
* @param j the second element in the swap
*/
def swap (i: Int, j: Int)
{
val t = v(j); v(j) = v(i); v(i) = t
} // swap
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Check whether the other vector 'b' is at least as long as 'this' vector.
* @param b the other vector
*/
def sameDimensions (b: VectorL): Boolean = dim <= b.dim
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Check whether 'this' vector is nonnegative (has no negative elements).
*/
def isNonnegative: Boolean =
{
for (i <- range if v(i) < 0l) return false
true
} // isNonnegative
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compare 'this' vector with vector 'b'.
* @param b the other vector
*/
def tryCompareTo [B >: VectorL] (b: B)
(implicit view_1: (B) => PartiallyOrdered [B]): Option [Int] =
{
var le = true
var ge = true
for (i <- range) {
val b_i = b.asInstanceOf [VectorL] (i)
if (ge && (v(i) compare b_i) < 0) ge = false
else if (le && (v(i) compare b_i) > 0) le = false
} // for
if (ge && le) Some (0) else if (le) Some (-1) else if (ge) Some (1) else None
} // tryCompareTo
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Override equals to determine whether 'this' vector equals vector 'b..
* @param b the vector to compare with this
*/
override def equals (b: Any): Boolean =
{
b.isInstanceOf [VectorL] && (v.deep equals b.asInstanceOf [VectorL].v.deep)
} // equals
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Must also override hashCode for 'this' vector to be compatible with equals.
*/
override def hashCode: Int = v.deep.hashCode
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set the format to the 'newFormat' (e.g., "%.6g,\\t" or "%12.6g,\\t").
* @param newFormat the new format String
*/
def setFormat (newFormat: String) { fString = newFormat }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert 'this' vector to a String.
*/
override def toString: String =
{
var sb = new StringBuilder ("VectorL(")
if (dim == 0) return sb.append (")").mkString
for (i <- range) {
sb.append (fString.format (v(i)))
if (i == dim-1) sb = sb.dropRight (1)
} // for
sb.replace (sb.length-1, sb.length, ")").mkString
} // toString
} // VectorL class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `VectorL` object is the companion object for the `VectorL` class.
*/
object VectorL
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a `VectorL` from one or more values (repeated values Long*).
* @param x the first Long number
* @param xs the rest of the Long numbers
*/
def apply (x: Long, xs: Long*): VectorL =
{
val c = new VectorL (1 + xs.length)
c(0) = x
for (i <- 0 until c.dim-1) c.v(i+1) = xs(i)
c
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a `VectorL` from a sequence of Longs.
* @param xs the sequence of the Long numbers
*/
def apply (xs: Seq [Long]): VectorL =
{
val c = new VectorL (xs.length)
for (i <- 0 until c.dim) c.v(i) = xs(i)
c
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a `VectorL` from one or more values (repeated values String*).
* @param x the first String
* @param xs the rest of the Strings
*/
def apply (x: String, xs: String*): VectorL =
{
val c = new VectorL (1 + xs.length)
c(0) = x.toLong
for (i <- 0 until c.dim-1) c.v(i+1) = xs(i).toLong
c
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a `VectorL` from an array of Strings.
* @param xs the array of the Strings
*/
def apply (xs: Array [String]): VectorL =
{
val c = new VectorL (xs.length)
for (i <- c.range) c.v(i) = xs(i).toLong
c
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a `VectorL` from an array of Strings, skipping the first 'skip'
* elements. If an element is non-numeric, use its hashcode.
* FIX: Might be better to map non-numeric Strings to ordinal values.
* @param xs the array of the Strings
* @param skip the number of elements at the beginning to skip (e.g., id column)
*/
def apply (xs: Array [String], skip: Int): VectorL =
{
val c = new VectorL (xs.length - skip)
for (i <- skip until xs.length) {
c.v(i - skip) = if (xs(i) matches "[\\\\-\\\\+]?\\\\d+") xs(i).toLong else xs(i).hashCode ()
} // for
c
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a one vector (all elements are one) of length 'size'.
* @param size the size of the vector
*/
def one (size: Int): VectorL =
{
val c = new VectorL (size)
c.set (1l)
c
} // one
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate scalar 'b' and vector 'u'.
* @param b the scalar to be concatenated - first part
* @param u the vector to be concatenated - second part
*/
def ++ (b: Long, u: VectorL): VectorL =
{
val c = new VectorL (u.dim + 1)
for (i <- c.range) c(i) = if (i == 0) b else u.v(i - 1)
c
} // ++
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return a `VectorL` containing a sequence of increasing integers in a range.
* @param start the start value of the vector, inclusive
* @param end the end value of the vector, exclusive (i.e., the first value not returned)
*/
def range (start: Int, end: Int): VectorL =
{
val c = new VectorL (end - start)
for (i <- c.range) c.v(i) = (start + i).toLong
c
} // range
} // VectorL object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `VectorLTest` object tests the operations provided by `VectorL`.
* > run-main scalation.linalgebra.VectorLTest
*/
object VectorLTest extends App
{
var x: VectorL = null
var y: VectorL = null
for (l <- 1 to 4) {
println ("\\n\\tTest VectorL on vectors of dim " + l)
x = new VectorL (l)
y = new VectorL (l)
x.set (2)
y.set (3)
// test vector op scalar
println ("x + 4 = " + (x + 4))
println ("x - 4 = " + (x - 4))
println ("x * 4 = " + (x * 4))
println ("x / 4 = " + (x / 4))
println ("x ~^ 4 = " + (x ~^ 4))
// test vector op vector
println ("x + y = " + (x + y))
println ("x - y = " + (x - y))
println ("x * y = " + (x * y))
println ("x / y = " + (x / y))
println ("x.min = " + x.min ())
println ("x.max = " + x.max ())
println ("x.sum = " + x.sum)
println ("x.sumNE = " + x.sumNE (0))
println ("x dot y = " + (x dot y))
println ("x ∙ y = " + (x ∙ y))
println ("x.normSq = " + x.normSq)
println ("x.norm = " + x.norm)
println ("x < y = " + (x < y))
} // for
println ("hashCode (" + x + ") = " + x.hashCode ())
println ("hashCode (" + y + ") = " + y.hashCode ())
val z = VectorL ("1", "2", "3", "4")
println ("z = " + z)
println ("z.map (_ * 2) = " + z.map ((e: Long) => e * 2))
println ("z.filter (_ > 2) = " + z.filter (_ > 2))
} // VectorLTest
| mvnural/scalation | src/main/scala/scalation/linalgebra/VectorL.scala | Scala | mit | 35,907 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.services.nodes
import org.junit.runner.RunWith
import org.specs2.mutable._
import org.specs2.matcher._
import org.specs2.runner.JUnitRunner
import net.liftweb.common.Loggable
import com.normation.rudder.domain.queries.DitQueryData
import com.normation.rudder.domain.NodeDit
import com.normation.ldap.ldif.DefaultLDIFFileLogger
import com.normation.inventory.ldap.core.{InventoryDit,LDAPConstants}
import com.normation.rudder.repository.ldap.LDAPEntityMapper
import com.normation.ldap.listener.InMemoryDsConnectionProvider
import com.unboundid.ldap.sdk.DN
import org.specs2.specification.Fragments
import org.specs2.specification.Step
import net.liftweb.common._
import com.normation.ldap.sdk.RoLDAPConnection
@RunWith(classOf[JUnitRunner])
class TestQuickSearchService extends QuickSearchServiceSpec {
//example test
"test1: the search for 'node' in id" should {
"yield one result for 'node1'" in {
quickSearch.lookup("node1", 100) match {
case eb:EmptyBox =>
val e = eb ?~! "test1 failed"
e.exceptionChain.foreach( t => logger.error(t) )
failure(e.messageChain)
case Full(res) => res must have size(1)
}
}
/*
* TODO, that test does not pass because in our test datas, we have node
* in inventory branch, which does not have a corresponding node in
* node branch. That is really a business error, because all accepted inventory
* nodes in Rudder should have been registered in the "node" branch, but
* it is not a reason to completly fails a quicksearch (all other results should
* be returned).
* So, quichsearch should be change to either:
* - change the last sequence with a fold which ignore erroneous nodes
* - change the second search to also filter with result node IDs
* from the first search
*
*/
"ignore superfluous server entries" in {
quickSearch.lookup("node", 100) match {
case eb:EmptyBox =>
val e = eb ?~! "test1 failed"
e.exceptionChain.foreach( t => logger.error(t) )
failure(e.messageChain)
case Full(res) => res must have size(8)
}
}
"not matchsuperfluous server entries" in {
quickSearch.lookup("node0_0", 100) match {
case Full(res) => res must have size(0)
case eb:EmptyBox =>
val e = eb ?~"QuichSearch lookup failed"
failure(e.messageChain)
}
}
}
"" should { "succeed" in success }
"when entry is invalid" should {
"return an empty sequence" in {
quickSearch.lookup("", 100) must beEqualTo(Full(Seq()))
}
}
}
//a trait which handle all service init, etc
trait QuickSearchServiceSpec extends Specification with Loggable {
/**
* Stop the directory after all test.
* A slower alternative would be to fully init each services before
* each fragment, using scoped variable:
* http://etorreborre.github.com/specs2/guide/org.specs2.guide.SpecStructure.html#Variables+isolation
*/
override def map(fs: => Fragments) = fs ^ Step(stopLDAPServer)
def stopLDAPServer = ldap.server.shutDown(true)
//set-up the LDAP servers and required services
private[this] val ldap = {
val ldifLogger = new DefaultLDIFFileLogger("TestQueryProcessor","/tmp/normation/rudder/ldif")
//init of in memory LDAP directory
val schemaLDIFs = (
"00-core" ::
"01-pwpolicy" ::
"04-rfc2307bis" ::
"05-rfc4876" ::
"099-0-inventory" ::
"099-1-rudder" ::
Nil
) map { name =>
this.getClass.getClassLoader.getResource("ldap-data/schema/" + name + ".ldif").getPath
}
val bootstrapLDIFs = ("ldap/bootstrap.ldif" :: "ldap-data/inventory-sample-data.ldif" :: Nil) map { name =>
this.getClass.getClassLoader.getResource(name).getPath
}
val ldap = InMemoryDsConnectionProvider.apply[RoLDAPConnection](
baseDNs = "cn=rudder-configuration" :: Nil
, schemaLDIFPaths = schemaLDIFs
, bootstrapLDIFPaths = bootstrapLDIFs
, ldifLogger
)
ldap
}
//end inMemory ds
private[this] val inventoryDit = new InventoryDit(new DN("ou=Accepted Inventories,ou=Inventories,cn=rudder-configuration"),new DN("ou=Inventories,cn=rudder-configuration"),"test")
final val nodeDit = new NodeDit(new DN("cn=rudder-configuration"))
final val ldapMapper = new LDAPEntityMapper(
rudderDit = null
, nodeDit = nodeDit
, inventoryDit = inventoryDit
, cmdbQueryParser = null
)
//the actual service to test
final val quickSearch = new QuickSearchServiceImpl(ldap, nodeDit, inventoryDit, ldapMapper
//nodeAttributes
, Seq(LDAPConstants.A_NAME, LDAPConstants.A_NODE_UUID)
//serverAttributes
, Seq(
LDAPConstants.A_HOSTNAME
, LDAPConstants.A_LIST_OF_IP
, LDAPConstants.A_OS_NAME
, LDAPConstants.A_OS_FULL_NAME
, LDAPConstants.A_OS_VERSION
, LDAPConstants.A_OS_SERVICE_PACK
, LDAPConstants.A_OS_KERNEL_VERSION
))
} | jooooooon/rudder | rudder-core/src/test/scala/com/normation/rudder/services/nodes/TestQuickSearchService.scala | Scala | agpl-3.0 | 6,717 |
package com.arcusys.valamis.web.portlet
import javax.portlet._
import com.arcusys.valamis.web.portlet.base.PortletBase
class ParticipantReportView extends GenericPortlet with PortletBase {
override def doView(request: RenderRequest, response: RenderResponse) {
implicit val out = response.getWriter
val data = Map(
"contextPath" -> getContextPath(request))
sendMustacheFile(data, "participant_report.html")
}
}
| igor-borisov/JSCORM | valamis-portlets/src/main/scala/com/arcusys/valamis/web/portlet/ParticipantReportView.scala | Scala | gpl-3.0 | 436 |
package parsing.combinator.lexer
import scala.util.parsing.input.Positional
/**
* Created by hongdi.ren.
*/
sealed trait WorkflowToken extends Positional
case class IDENTIFIER(str: String) extends WorkflowToken
case class LITERAL(str: String) extends WorkflowToken
case class INDENTATION(spaces: Int) extends WorkflowToken
case class EXIT() extends WorkflowToken
case class READINPUT() extends WorkflowToken
case class CALLSERVICE() extends WorkflowToken
case class SWITCH() extends WorkflowToken
case class OTHERWISE() extends WorkflowToken
case class COLON() extends WorkflowToken
case class ARROW() extends WorkflowToken
case class EQUALS() extends WorkflowToken
case class COMMA() extends WorkflowToken
case class INDENT() extends WorkflowToken
case class DEDENT() extends WorkflowToken | Ryan-Git/LangImplPatterns | src/main/scala/parsing/combinator/lexer/WorkflowToken.scala | Scala | apache-2.0 | 810 |
package com.wavesplatform.lang.contract
import com.wavesplatform.lang.v1.compiler.Types.{BOOLEAN, BYTESTR, LONG, STRING}
package object meta {
private val definedTypes =
List(LONG, BYTESTR, BOOLEAN, STRING)
private[meta] val singleTypeMapper =
SingleTypeMapper(definedTypes)
private[meta] val unionTypeMapper =
UnionTypeMapper(singleTypeMapper)
private[meta] val listTypeMapper =
ListTypeMapper(unionTypeMapper)
object MetaMapperStrategyV1
extends DataMetaMapper(unionTypeMapper, V1)
with MetaMapperStrategy[V1.Self]
object MetaMapperStrategyV2
extends DataMetaMapper(listTypeMapper, V2)
with MetaMapperStrategy[V2.Self]
}
| wavesplatform/Waves | lang/shared/src/main/scala/com/wavesplatform/lang/contract/meta/package.scala | Scala | mit | 674 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.rdd.read
import htsjdk.samtools.{ TextCigarCodec, ValidationStringency }
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.models.{ MdTag, ReferenceRegion }
import org.bdgenomics.adam.util.ReferenceFile
import org.bdgenomics.formats.avro.AlignmentRecord
import org.bdgenomics.utils.misc.Logging
private[read] case class MDTagging(
reads: RDD[AlignmentRecord],
@transient referenceFile: ReferenceFile,
overwriteExistingTags: Boolean = false,
validationStringency: ValidationStringency = ValidationStringency.STRICT) extends Logging {
@transient val sc = reads.sparkContext
val mdTagsAdded = sc.accumulator(0L, "MDTags Added")
val mdTagsExtant = sc.accumulator(0L, "MDTags Extant")
val numUnmappedReads = sc.accumulator(0L, "Unmapped Reads")
val incorrectMDTags = sc.accumulator(0L, "Incorrect Extant MDTags")
val taggedReads = addMDTagsBroadcast.cache
def maybeMDTagRead(read: AlignmentRecord, refSeq: String): AlignmentRecord = {
val cigar = TextCigarCodec.decode(read.getCigar)
val mdTag = MdTag(read.getSequence, refSeq, cigar, read.getStart)
if (read.getMismatchingPositions != null) {
mdTagsExtant += 1
if (mdTag.toString != read.getMismatchingPositions) {
incorrectMDTags += 1
if (overwriteExistingTags) {
read.setMismatchingPositions(mdTag.toString)
} else {
val exception = IncorrectMDTagException(read, mdTag.toString)
if (validationStringency == ValidationStringency.STRICT) {
throw exception
} else if (validationStringency == ValidationStringency.LENIENT) {
log.warn(exception.getMessage)
}
}
}
} else {
read.setMismatchingPositions(mdTag.toString)
mdTagsAdded += 1
}
read
}
def addMDTagsBroadcast(): RDD[AlignmentRecord] = {
val referenceFileB = sc.broadcast(referenceFile)
reads.map(read => {
(for {
contig <- Option(read.getContigName)
if read.getReadMapped
} yield {
try {
maybeMDTagRead(read, referenceFileB.value
.extract(ReferenceRegion.unstranded(read)))
} catch {
case t: Throwable => {
if (validationStringency == ValidationStringency.STRICT) {
throw t
} else if (validationStringency == ValidationStringency.LENIENT) {
log.warn("Caught exception when processing read %s: %s".format(
read.getContigName, t))
}
read
}
}
}).getOrElse({
numUnmappedReads += 1
read
})
})
}
}
/**
* A class describing an exception where a read's MD tag was recomputed and did
* not match the MD tag originally attached to the read.
*
* @param read The read whose MD tag was recomputed, with original MD tag.
* @param mdTag The recomputed MD tag.
*/
case class IncorrectMDTagException(read: AlignmentRecord, mdTag: String) extends Exception {
override def getMessage: String =
s"Read: ${read.getReadName}, pos: ${read.getContigName}:${read.getStart}, cigar: ${read.getCigar}, existing MD tag: ${read.getMismatchingPositions}, correct MD tag: $mdTag"
}
| massie/adam | adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/MDTagging.scala | Scala | apache-2.0 | 4,023 |
package net.premereur.binaro
object SampleBinaroSolving extends App {
import BinaroSolver.solve
import Binaro.Board
println(solve(Board(".1....|0...11|00..1.|..1..0|...1..|1...0.")))
println()
println(solve(Board("11....|...0.0|......|...1..|..0..0|.0.00.")))
println()
println(solve(Board("111...|...0.0|......|...1..|..0..0|.0.00.")))
println()
println(solve(Board("00........|.0.......1|..00...0..|.0.0.0.0..|....00....|..1.....0.|...0...0.0|..1.1.....|...1..00.0|11......1.")))
println()
println(solve(Board("0010101101|.0.......1|..00...0..|00.0.0.0..|0...00....|1.1.....0.|...0.010.0|..1.1.....|...1..00.0|11....1.1.")))
}
| gpremer/binaro | src/main/scala/net/premereur/binaro/SampleBinaroSolving.scala | Scala | gpl-2.0 | 653 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.nio.ByteBuffer
import org.apache.kafka.common.KafkaException
/**
* Helper functions specific to parsing or serializing requests and responses
*/
object ApiUtils {
val ProtocolEncoding = "UTF-8"
/**
* Read size prefixed string where the size is stored as a 2 byte short.
* @param buffer The buffer to read from
*/
def readShortString(buffer: ByteBuffer): String = {
val size: Int = buffer.getShort()
if(size < 0)
return null
val bytes = new Array[Byte](size)
buffer.get(bytes)
new String(bytes, ProtocolEncoding)
}
/**
* Write a size prefixed string where the size is stored as a 2 byte short
* @param buffer The buffer to write to
* @param string The string to write
*/
def writeShortString(buffer: ByteBuffer, string: String): Unit = {
if(string == null) {
buffer.putShort(-1)
} else {
val encodedString = string.getBytes(ProtocolEncoding)
if(encodedString.length > Short.MaxValue) {
throw new KafkaException("String exceeds the maximum size of " + Short.MaxValue + ".")
} else {
buffer.putShort(encodedString.length.asInstanceOf[Short])
buffer.put(encodedString)
}
}
}
/**
* Return size of a size prefixed string where the size is stored as a 2 byte short
* @param string The string to write
*/
def shortStringLength(string: String): Int = {
if(string == null) {
2
} else {
val encodedString = string.getBytes(ProtocolEncoding)
if(encodedString.length > Short.MaxValue) {
throw new KafkaException("String exceeds the maximum size of " + Short.MaxValue + ".")
} else {
2 + encodedString.length
}
}
}
}
| noslowerdna/kafka | core/src/main/scala/kafka/api/ApiUtils.scala | Scala | apache-2.0 | 2,556 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark.storage
import spark.{Utils, SparkContext}
import BlockManagerMasterActor.BlockStatus
private[spark]
case class StorageStatus(blockManagerId: BlockManagerId, maxMem: Long,
blocks: Map[String, BlockStatus]) {
def memUsed(blockPrefix: String = "") = {
blocks.filterKeys(_.startsWith(blockPrefix)).values.map(_.memSize).
reduceOption(_+_).getOrElse(0l)
}
def diskUsed(blockPrefix: String = "") = {
blocks.filterKeys(_.startsWith(blockPrefix)).values.map(_.diskSize).
reduceOption(_+_).getOrElse(0l)
}
def memRemaining : Long = maxMem - memUsed()
}
case class RDDInfo(id: Int, name: String, storageLevel: StorageLevel,
numCachedPartitions: Int, numPartitions: Int, memSize: Long, diskSize: Long)
extends Ordered[RDDInfo] {
override def toString = {
import Utils.memoryBytesToString
"RDD \\"%s\\" (%d) Storage: %s; CachedPartitions: %d; TotalPartitions: %d; MemorySize: %s; DiskSize: %s".format(name, id,
storageLevel.toString, numCachedPartitions, numPartitions, memoryBytesToString(memSize), memoryBytesToString(diskSize))
}
override def compare(that: RDDInfo) = {
this.id - that.id
}
}
/* Helper methods for storage-related objects */
private[spark]
object StorageUtils {
/* Returns RDD-level information, compiled from a list of StorageStatus objects */
def rddInfoFromStorageStatus(storageStatusList: Seq[StorageStatus],
sc: SparkContext) : Array[RDDInfo] = {
rddInfoFromBlockStatusList(storageStatusList.flatMap(_.blocks).toMap, sc)
}
/* Returns a map of blocks to their locations, compiled from a list of StorageStatus objects */
def blockLocationsFromStorageStatus(storageStatusList: Seq[StorageStatus]) = {
val blockLocationPairs = storageStatusList
.flatMap(s => s.blocks.map(b => (b._1, s.blockManagerId.hostPort)))
blockLocationPairs.groupBy(_._1).map{case (k, v) => (k, v.unzip._2)}.toMap
}
/* Given a list of BlockStatus objets, returns information for each RDD */
def rddInfoFromBlockStatusList(infos: Map[String, BlockStatus],
sc: SparkContext) : Array[RDDInfo] = {
// Group by rddId, ignore the partition name
val groupedRddBlocks = infos.filterKeys(_.startsWith("rdd_")).groupBy { case(k, v) =>
k.substring(0,k.lastIndexOf('_'))
}.mapValues(_.values.toArray)
// For each RDD, generate an RDDInfo object
val rddInfos = groupedRddBlocks.map { case (rddKey, rddBlocks) =>
// Add up memory and disk sizes
val memSize = rddBlocks.map(_.memSize).reduce(_ + _)
val diskSize = rddBlocks.map(_.diskSize).reduce(_ + _)
// Find the id of the RDD, e.g. rdd_1 => 1
val rddId = rddKey.split("_").last.toInt
// Get the friendly name and storage level for the RDD, if available
sc.persistentRdds.get(rddId).map { r =>
val rddName = Option(r.name).getOrElse(rddKey)
val rddStorageLevel = r.getStorageLevel
RDDInfo(rddId, rddName, rddStorageLevel, rddBlocks.length, r.partitions.size, memSize, diskSize)
}
}.flatten.toArray
scala.util.Sorting.quickSort(rddInfos)
rddInfos
}
/* Removes all BlockStatus object that are not part of a block prefix */
def filterStorageStatusByPrefix(storageStatusList: Array[StorageStatus],
prefix: String) : Array[StorageStatus] = {
storageStatusList.map { status =>
val newBlocks = status.blocks.filterKeys(_.startsWith(prefix))
//val newRemainingMem = status.maxMem - newBlocks.values.map(_.memSize).reduce(_ + _)
StorageStatus(status.blockManagerId, status.maxMem, newBlocks)
}
}
}
| wgpshashank/spark | core/src/main/scala/spark/storage/StorageUtils.scala | Scala | apache-2.0 | 4,398 |
package no.netcompany.testdatagen.generators.sample
// Copyright (C) 2014 Lars Reed -- GNU GPL 2.0 -- see LICENSE.txt
import no.netcompany.testdatagen.generators.misc.Markov
import scala.language.postfixOps
object MarkovSample extends App {
println(Markov.english() mkString 1000)
}
| lre-mesan/testdata | src/test/scala/no/netcompany/testdatagen/generators/sample/MarkovSample.scala | Scala | gpl-2.0 | 288 |
trait Tc1[A]
trait Tc2[A] extends Tc1[A]
class PinTypeTo[K[_]]
object PinTypeTo {
implicit val pinType: PinTypeTo[Tc2] = new PinTypeTo[Tc2]
}
class X
object X {
implicit def Tc2Instance[F[x] >: Tc2[x]: PinTypeTo]: F[X] = new Tc2[X] {}
}
object app extends App {
implicitly[Tc2[X]]
implicitly[Tc1[X]]
} | som-snytt/dotty | tests/pos/i6385.scala | Scala | apache-2.0 | 312 |
package knot.msgpack.codec
import knot.msgpack.{MsgPackEncoder, MsgPackInput}
import org.scalatest.FunSpec
import org.scalatest.Matchers._
class DoubleCodecSpec extends FunSpec {
val t = new DoubleCodec {}
val data: Array[(Double, Int)] = Array(
(Double.MinValue, 9),
(0.0D, 9),
(12345.6789D, 9),
(-12345.6789D, 9),
(Double.NaN, 9),
(Double.PositiveInfinity, 9),
(Double.NegativeInfinity, 9),
(Double.MinPositiveValue, 9),
(Double.MaxValue, 9)
)
describe("DoubleCodec") {
it("values") {
for (d <- data) {
val enc = MsgPackEncoder()
val o = enc.out
t.encode(o, d._1)
o.flush()
o.size should be(d._2)
val i = MsgPackInput.fromArray(enc.toArray())
val e = t.decode(i)
if (d._1.isNaN) {
d._1.isNaN && e.isNaN should be(true)
} else {
e should be(d._1)
}
}
}
}
}
| defvar/knot | knot-msgpack/src/test/scala/knot/msgpack/codec/DoubleCodecSpec.scala | Scala | mit | 927 |
package mesosphere.marathon.core.task.bus
trait TaskStatusEmitter {
def publish(status: TaskStatusObservables.TaskStatusUpdate)
}
| EasonYi/marathon | src/main/scala/mesosphere/marathon/core/task/bus/TaskStatusEmitter.scala | Scala | apache-2.0 | 133 |
package fi.pelam.csv.table
import fi.pelam.csv.table.TableTest._
import fi.pelam.csv.table.TableUtil._
import org.junit.Assert._
import org.junit.Test
class TableUtilTest {
@Test
def testRenumberDown: Unit = {
assertEquals("List(StringCell with value '4b' at Row 2, Column B (1), " +
"StringCell with value '3c' at Row 3, Column B (1))",
renumberedAsRows(List(cell4b, cell3c), (cell2b.cellKey, cell3c.cellKey)).toList.toString())
}
@Test
def testRenumberDown2Cols: Unit = {
assertEquals("List(StringCell with value '4b' at Row 2, Column B (1), " +
"StringCell with value '3c' at Row 2, Column C (2), " +
"StringCell with value '4c' at Row 3, Column B (1)" +
")",
renumberedAsRows(List(cell4b, cell3c, cell4c), (cell2b.cellKey, cell3d.cellKey)).toList.toString())
}
} | pelamfi/pelam-scala-csv | src/test/scala/fi/pelam/csv/table/TableUtilTest.scala | Scala | apache-2.0 | 826 |
/*
* Copyright 2018 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
trait Settings {
def THIS_YEAR(): Int
def POUNDS_AND_PENCE(): Boolean
def NUMBER_OF_YEARS(): Int
}
trait AppSettings extends {
def THIS_YEAR(): Int = config.PaacConfiguration.year()
def POUNDS_AND_PENCE(): Boolean = config.PaacConfiguration.supportPence()
def NUMBER_OF_YEARS(): Int = config.PaacConfiguration.numberOfYears()
}
| hmrc/paac-frontend | app/config/Settings.scala | Scala | apache-2.0 | 962 |
/**
* Copyright 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.appjet.oui;
import net.appjet.bodylock.{BodyLock, Executable};
import java.io.File;
import java.util.{Properties, Date};
import java.lang.annotation.Annotation;
import java.text.SimpleDateFormat;
import scala.collection.mutable.{HashMap, SynchronizedMap, HashSet};
import scala.collection.jcl.{IterableWrapper, Conversions};
import org.mortbay.thread.QueuedThreadPool;
import org.mortbay.jetty.servlet.{Context, HashSessionIdManager, FilterHolder, ServletHolder};
import org.mortbay.jetty.handler.{HandlerCollection, RequestLogHandler, HandlerList};
import org.mortbay.jetty.{Server, NCSARequestLog, Request, Response};
import org.mortbay.servlet.GzipFilter;
// removed due to license restrictions; REMOVED_COS_OF_COS
// import com.oreilly.servlet.MultipartFilter;
import net.appjet.common.util.{BetterFile, HttpServletRequestFactory};
import net.appjet.common.cli._;
import net.appjet.bodylock.JSCompileException;
import Util.enumerationToRichEnumeration;
object main {
val startTime = new java.util.Date();
def quit(status: Int) {
java.lang.Runtime.getRuntime().halt(status);
}
def setupFilesystem() {
val logdir = new File(config.logDir+"/backend/access");
if (! logdir.isDirectory())
if (! logdir.mkdirs())
quit(1);
}
val options =
for (m <- config.allProperties if (m.getAnnotation(classOf[ConfigParam]) != null)) yield {
val cp = m.getAnnotation(classOf[ConfigParam])
new CliOption(m.getName(), cp.value(), if (cp.argName().length > 0) Some(cp.argName()) else None);
}
def printUsage() {
println("\\n--------------------------------------------------------------------------------");
println("usage:");
println((new CliParser(options)).usage);
println("--------------------------------------------------------------------------------\\n");
}
def extractOptions(args: Array[String]) {
val parser = new CliParser(options);
val opts =
try {
parser.parseOptions(args)._1;
} catch {
case e: ParseException => {
println("error: "+e.getMessage());
printUsage();
System.exit(1);
null;
}
}
if (opts.contains("configFile")) {
val p = new Properties();
p.load(new java.io.FileInputStream(opts("configFile")));
extractOptions(p);
}
for ((k, v) <- opts) {
config.values(k) = v;
}
}
def extractOptions(props: Properties) {
for (k <- for (o <- props.propertyNames()) yield o.asInstanceOf[String]) {
config.values(k) = props.getProperty(k);
}
}
val startupExecutable = (new FixedDiskLibrary(new SpecialJarOrNotFile(config.ajstdlibHome, "onstartup.js"))).executable;
def runOnStartup() {
execution.runOutOfBand(startupExecutable, "Startup", None, { error =>
error match {
case e: JSCompileException => { }
case e: Throwable => { e.printStackTrace(); }
case (sc: Int, msg: String) => { println(msg); }
case x => println(x);
}
System.exit(1);
});
}
lazy val shutdownExecutable = (new FixedDiskLibrary(new SpecialJarOrNotFile(config.ajstdlibHome, "onshutdown.js"))).executable;
def runOnShutdown() {
execution.runOutOfBand(shutdownExecutable, "Shutdown", None, { error =>
error match {
case e: JSCompileException => { }
case e: Throwable => { }
case (sc: Int, msg: String) => { println(msg); }
case x => println(x);
}
});
}
def runOnSars(q: String) = {
val ec = execution.runOutOfBand(execution.sarsExecutable, "SARS", Some(Map("sarsRequest" -> q)), { error =>
error match {
case e: JSCompileException => { throw e; }
case e: Throwable => { exceptionlog(e); throw e; }
case (sc: Int, msg: String) => { println(msg); throw new RuntimeException(""+sc+": "+msg) }
case x => { println(x); throw new RuntimeException(x.toString()) }
}
});
ec.attributes.get("sarsResponse").map(_.toString());
}
def stfu() {
System.setProperty("org.mortbay.log.class", "net.appjet.oui.STFULogger");
System.setProperty("com.mchange.v2.log.MLog", "com.mchange.v2.log.FallbackMLog");
System.setProperty("com.mchange.v2.log.FallbackMLog.DEFAULT_CUTOFF_LEVEL", "OFF");
}
var server: Server = null;
var sarsServer: net.appjet.common.sars.SarsServer = null;
var loggers = new HashSet[GenericLogger];
def main(args: Array[String]) {
val etherpadProperties = getClass.getResource("/etherpad.properties");
if (etherpadProperties != null) {
val p = new Properties();
p.load(etherpadProperties.openStream);
extractOptions(p);
}
extractOptions(args);
if (! config.verbose)
stfu();
setupFilesystem();
if (config.devMode)
config.print;
if (config.profile)
profiler.start();
if (config.listenMonitoring != "0:0")
monitoring.startMonitoringServer();
// this needs a better place.
if (config.devMode)
BodyLock.map = Some(new HashMap[String, String] with SynchronizedMap[String, String]);
server = new Server();
if (config.maxThreads > 0)
server.setThreadPool(new QueuedThreadPool(config.maxThreads));
else
server.setThreadPool(new QueuedThreadPool());
// set up socket connectors
val nioconnector = new CometSelectChannelConnector;
var sslconnector: CometSslSelectChannelConnector = null;
nioconnector.setPort(config.listenPort);
if (config.listenHost.length > 0)
nioconnector.setHost(config.listenHost);
if (config.listenSecurePort == 0) {
server.setConnectors(Array(nioconnector));
} else {
sslconnector = new CometSslSelectChannelConnector;
sslconnector.setPort(config.listenSecurePort);
if (config.listenSecureHost.length > 0)
sslconnector.setHost(config.listenSecureHost);
if (! config.sslKeyStore_isSet) {
val url = getClass.getResource("/mirror/snakeoil-ssl-cert");
if (url != null)
sslconnector.setKeystore(url.toString());
else
sslconnector.setKeystore(config.sslKeyStore);
} else {
sslconnector.setKeystore(config.sslKeyStore);
}
sslconnector.setPassword(config.sslStorePassword);
sslconnector.setKeyPassword(config.sslKeyPassword);
sslconnector.setTrustPassword(config.sslStorePassword);
sslconnector.setExcludeCipherSuites(Array[String](
"SSL_RSA_WITH_3DES_EDE_CBC_SHA",
"SSL_DHE_RSA_WITH_DES_CBC_SHA",
"SSL_DHE_DSS_WITH_DES_CBC_SHA",
"SSL_DHE_RSA_WITH_3DES_EDE_CBC_SHA",
"SSL_DHE_DSS_WITH_3DES_EDE_CBC_SHA",
"SSL_RSA_WITH_DES_CBC_SHA",
"SSL_RSA_EXPORT_WITH_RC4_40_MD5",
"SSL_RSA_EXPORT_WITH_DES40_CBC_SHA",
"SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA",
"SSL_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA",
"SSL_RSA_WITH_NULL_MD5",
"SSL_RSA_WITH_NULL_SHA",
"SSL_DH_anon_WITH_3DES_EDE_CBC_SHA",
"SSL_DH_anon_WITH_DES_CBC_SHA",
"SSL_DH_anon_EXPORT_WITH_RC4_40_MD5",
"SSL_DH_anon_EXPORT_WITH_DES40_CBC_SHA"));
server.setConnectors(Array(nioconnector, sslconnector));
}
// set up Context and Servlet
val handler = new Context(server, "/", Context.NO_SESSIONS | Context.NO_SECURITY);
handler.addServlet(new ServletHolder(new OuiServlet), "/");
// removed due to license restrictions; REMOVED_COS_OF_COS
// val filterHolder = new FilterHolder(new MultipartFilter());
// filterHolder.setInitParameter("uploadDir", System.getProperty("java.io.tmpdir"));
// handler.addFilter(filterHolder, "/*", 1);
global.context = handler;
// set up apache-style logging
val requestLogHandler = new RequestLogHandler();
val requestLog = new NCSARequestLog(config.logDir+"/backend/access/access-yyyy_mm_dd.request.log") {
override def log(req: Request, res: Response) {
try {
if (config.devMode || config.specialDebug)
super.log(req, res);
else if (res.getStatus() != 200 || config.transportPrefix == null || ! req.getRequestURI().startsWith(config.transportPrefix))
super.log(req, res);
val d = new Date();
appstats.stati.foreach(_(if (res.getStatus() < 0) 404 else res.getStatus()).hit(d));
} catch {
case e => { exceptionlog("Error writing to log?"); exceptionlog(e); }
}
}
};
requestLog.setRetainDays(365);
requestLog.setAppend(true);
requestLog.setExtended(true);
requestLog.setLogServer(true);
requestLog.setLogLatency(true);
requestLog.setLogTimeZone("PST");
requestLogHandler.setRequestLog(requestLog);
// set handlers with server
val businessHandlers = new HandlerList();
businessHandlers.setHandlers(Array(handler));
val allHandlers = new HandlerCollection();
allHandlers.setHandlers(Array(businessHandlers, requestLogHandler));
server.setHandler(allHandlers);
// fix slow startup bug
server.setSessionIdManager(new HashSessionIdManager(new java.util.Random()));
// run the onStartup script.
runOnStartup();
// preload some runners, if necessary.
if (config.preloadRunners > 0) {
val b = new java.util.concurrent.CountDownLatch(config.preloadRunners);
for (i <- 0 until config.preloadRunners)
(new Thread {
ScopeReuseManager.freeRunner(ScopeReuseManager.newRunner);
b.countDown();
}).start();
while (b.getCount() > 0) {
b.await();
}
println("Preloaded "+config.preloadRunners+" runners.");
}
// start SARS server.
if (config.listenSarsPort > 0) {
try {
import net.appjet.common.sars._;
sarsServer = new SarsServer(config.sarsAuthKey,
new SarsMessageHandler { override def handle(q: String) = runOnSars(q) },
if (config.listenSarsHost.length > 0) Some(config.listenSarsHost) else None,
config.listenSarsPort);
sarsServer.daemon = true;
sarsServer.start();
} catch {
case e: java.net.SocketException => {
println("SARS: A socket exception occurred: "+e.getMessage()+" on SARS server at "+config.listenSarsHost+":"+config.listenSarsPort);
java.lang.Runtime.getRuntime().halt(1);
}
}
}
// start server
java.lang.Runtime.getRuntime().addShutdownHook(new Thread() {
override def run() {
val df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSSZ");
def printts(str: String) {
println("["+df.format(new Date())+"]: "+str);
}
printts("Shutting down...");
handler.setShutdown(true);
Thread.sleep(if (config.devMode) 500 else 3000);
printts("...done, running onshutdown.");
runOnShutdown();
printts("...done, stopping server.");
server.stop();
server.join();
printts("...done, flushing logs.");
for (l <- loggers) { l.flush(); l.close(); }
printts("...done.");
}
});
def socketError(c: org.mortbay.jetty.Connector, e: java.net.SocketException) {
var msg = e.getMessage();
println("SOCKET ERROR: "+msg+" - "+(c match {
case null => "(unknown socket)";
case x => {
(x.getHost() match {
case null => "localhost";
case y => y;
})+":"+x.getPort();
}
}));
if (msg.contains("Address already in use")) {
println("Did you make sure that ports "+config.listenPort+" and "+config.listenSecurePort+" are not in use?");
}
if (msg.contains("Permission denied")) {
println("Perhaps you need to run as the root user or as an Administrator?");
}
}
var c: org.mortbay.jetty.Connector = null;
try {
c = nioconnector;
c.open();
if (sslconnector != null) {
c = sslconnector;
c.open();
}
c = null;
allHandlers.start();
server.start();
} catch {
case e: java.net.SocketException => {
socketError(c, e);
java.lang.Runtime.getRuntime().halt(1);
}
case e: org.mortbay.util.MultiException => {
println("SERVER ERROR: Couldn't start server; multiple errors.");
for (i <- new IterableWrapper[Throwable] { override val underlying = e.getThrowables.asInstanceOf[java.util.List[Throwable]] }) {
i match {
case se: java.net.SocketException => {
socketError(c, se);
}
case e =>
println("SERVER ERROR::: Couldn't start server: "+i.getMessage() + c);
}
}
java.lang.Runtime.getRuntime().halt(1);
}
case e => {
println("SERVER ERROR:: Couldn't start server: "+e.getMessage() +c);
java.lang.Runtime.getRuntime().halt(1);
}
}
println("HTTP server listening on http://"+
(if (config.listenHost.length > 0) config.listenHost else "localhost")+
":"+config.listenPort+"/");
if (config.listenSecurePort > 0)
println("HTTPS server listening on https://"+
(if (config.listenSecureHost.length > 0) config.listenSecureHost else "localhost")+
":"+config.listenSecurePort+"/");
if (config.listenSarsPort > 0)
println("SARS server listening on "+
(if (config.listenSarsHost.length > 0) config.listenSarsHost else "localhost")+
":"+config.listenSarsPort);
}
}
| railscook/etherpad | infrastructure/net.appjet.oui/main.scala | Scala | apache-2.0 | 14,184 |
package io.youi.path
trait PathAction extends Drawable {
override final def draw(context: Context, x: Double, y: Double): Unit = draw(context, x, y, 1.0, 1.0)
def draw(context: Context, x: Double, y: Double, scaleX: Double, scaleY: Double): Unit
} | outr/youi | ui/js/src/main/scala/io/youi/path/PathAction.scala | Scala | mit | 253 |
package org.jetbrains.plugins.scala
package codeInspection.collections
import org.jetbrains.plugins.scala.codeInspection.InspectionBundle
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScExpression
/**
* Nikolay.Tropin
* 2014-05-05
*/
class FilterHeadOptionInspection extends OperationOnCollectionInspection {
override def possibleSimplificationTypes: Array[SimplificationType] =
Array(FilterHeadOption)
}
object FilterHeadOption extends SimplificationType {
def hint: String = InspectionBundle.message("filter.headOption.hint")
override def getSimplification(expr: ScExpression): Option[Simplification] = {
expr match {
case qual`.filter`(cond)`.headOption`() if !hasSideEffects(cond) =>
Some(replace(expr).withText(invocationText(qual, "find", cond)).highlightFrom(qual))
case _ => None
}
}
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/codeInspection/collections/FilterHeadOptionInspection.scala | Scala | apache-2.0 | 849 |
package autolift
/**
* Typeclass supporting lifting the discovery of the greatest element over an arbitrary nesting
* of type constructors, assuming one can be produced.
*
* @author Owein Reese
*
* @tparam Obj The type which will be lifted and searched
* @tparam Function The function used to produce the values by which a maximum will be chosen
*/
trait LiftMaximumBy[Obj, Function] extends DFunction2[Obj, Function]
trait LiftMaximumBySyntax{
///Syntax extension providing for a `liftMinBy` method.
implicit class LiftMaximumByOps[F[_], A](fa: F[A]){
/**
* Automatic lifting of a max dictated by the signature of a function and given that the mapping maps one type
* to another which has a Monoid.
*
* @tparam B The type on which to find a maximum, assuming one can be produced
*/
def liftMaxBy[B, C](f: B => C)(implicit lift: LiftMaximumBy[F[A], B => C]): lift.Out = lift(fa, f)
}
}
//See individual instances for liftMaximumBy Context. | wheaties/AutoLifts | autolift-core/src/main/scala/autolift/LiftMaximumBy.scala | Scala | apache-2.0 | 989 |
package com.technophobia.substeps.services
import com.technophobia.substeps.domain.events.{ExecutionCompleted, ExecutionStarted, SubstepsDomainEvent, DomainEventSubscriber}
import java.lang.reflect.Method
import org.reflections.ReflectionUtils
import com.technophobia.substeps.runner.setupteardown.Annotations
import java.lang.annotation.Annotation
import collection.JavaConversions._
import com.technophobia.substeps.domain.{BasicScenario, Feature}
import scala.collection.mutable.ListBuffer
/**
* @author rbarefield
*/
class SetupAndTeardownSubscriber extends DomainEventSubscriber {
var initializationClasses: List[Class[_]] = Nil
val beforeAllFeatures = collection.mutable.ListBuffer[() => Any]()
val beforeEveryFeature = collection.mutable.ListBuffer[() => Any]()
val beforeEveryScenario = collection.mutable.ListBuffer[() => Any]()
val afterEveryScenario = collection.mutable.ListBuffer[() => Any]()
val afterEveryFeature = collection.mutable.ListBuffer[() => Any]()
val afterAllFeatures = collection.mutable.ListBuffer[() => Any]()
def featuresStarting() = beforeAllFeatures.foreach(_())
def featuresComplete() = afterAllFeatures.foreach(_())
def addInitializationClass(initializationClass: Class[_]) {
if(initializationClasses.contains(initializationClass)) return
val instance = initializationClass.newInstance()
val annotationToFunctionHolders = List[(Class[_ <: Annotation], ListBuffer[() => Any])]((classOf[Annotations.BeforeAllFeatures], beforeAllFeatures),
(classOf[Annotations.BeforeEveryFeature], beforeEveryFeature),
(classOf[Annotations.BeforeEveryScenario], beforeEveryScenario),
(classOf[Annotations.AfterEveryFeature], afterEveryFeature),
(classOf[Annotations.AfterEveryScenario], afterEveryScenario),
(classOf[Annotations.AfterAllFeatures], afterAllFeatures))
for((annotation, functionHolder) <- annotationToFunctionHolders;method <- methodsForAnnotation(initializationClass, annotation)){
functionHolder.+=(() => method.invoke(instance))
}
initializationClasses ::= initializationClass
}
private def methodsForAnnotation(initializationClass: Class[_], annotation: Class[_ <: Annotation]): Set[Method] = {
ReflectionUtils.getAllMethods(initializationClass, ReflectionUtils.withAnnotation(annotation)).toSet
}
def handle(event: SubstepsDomainEvent) = {
event match {
case ExecutionStarted(Feature(_, _, _, _), _) => beforeEveryFeature.foreach(_())
case ExecutionStarted(BasicScenario(_, _, _), _) => beforeEveryScenario.foreach(_())
case ExecutionCompleted(BasicScenario(_, _, _), _, _) => afterEveryScenario.foreach(_())
case ExecutionCompleted(Feature(_, _, _, _), _, _) => afterEveryFeature.foreach(_())
case _ => {}
}
}
}
| G2G3Digital/substeps-scala-core | src/main/scala/com/technophobia/substeps/services/SetupAndTeardownSubscriber.scala | Scala | lgpl-3.0 | 2,809 |
/*
* Copyright 2017 Michael Stringer
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package software.purpledragon.poi4s.kml
import enumeratum.EnumEntry
import software.purpledragon.poi4s.FileVersion
/** Representation of a supported version of KML files. See the companion object for possible values.
*/
sealed abstract class KmlVersion(override val entryName: String) extends EnumEntry with FileVersion
/** Supported versions of KML files.
*/
object KmlVersion {
/** KML Version 2.2 ''(default version)''.
*
* This is the default version used for KML files.
*/
case object Version22 extends KmlVersion("2.2")
}
| stringbean/poi4s | poi4s-kml/src/main/scala/software/purpledragon/poi4s/kml/KmlVersion.scala | Scala | apache-2.0 | 1,153 |
/*
* Copyright (c) 2014-2019 Israel Herraiz <isra@herraiz.org>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
// ---------------------
// Test for example 3.18
// ---------------------
package chap03
import org.specs2.mutable._
object Ex18Spec extends Specification {
"The map function" should {
"return Nil with an identity function" in {
Ex18.map(Nil: List[Double])(x => x) mustEqual Nil
}
"behave like Scala map with Nil" in {
Ex18.map(Nil: List[Double])(x => x) mustEqual Nil.map(x => x)
}
"behave like Scala map with single value list" in {
Ex18.map(List(7))(_.toString) mustEqual List(7).map(_.toString)
}
"behave like Scala map with a list" in {
Ex18.map(List(7,8,9,10))(_*3) mustEqual List(7,8,9,10).map(_*3)
}
}
}
| iht/fpinscala | src/test/scala/chap03/ex18Spec.scala | Scala | mit | 1,831 |
import org.springframework.context.support.ClassPathXmlApplicationContext
import play.api.{Application, GlobalSettings}
object Global extends GlobalSettings {
val possibleEnvironments = Set("production", "test", "development")
val ctx = new ClassPathXmlApplicationContext
override def onStart(app: Application) {
super.onStart(app)
ctx.setConfigLocation("applicationContext.xml")
app.configuration.getString("application.environment", Some(possibleEnvironments)).foreach(
env => ctx.getEnvironment.setActiveProfiles(env)
)
ctx.refresh()
ctx.start()
}
override def onStop(app: Application) {
ctx.stop()
super.onStop(app)
}
}
| oswaldo/play-cxf | samples/play-cxf-multienv/app/Global.scala | Scala | apache-2.0 | 679 |
/*
* Copyright (C) 2020 MapRoulette contributors (see CONTRIBUTORS.md).
* Licensed under the Apache License, Version 2.0 (see LICENSE).
*/
package org.maproulette.framework.repository
import java.util.UUID
import org.maproulette.framework.model.{User, UserMetrics, UserSettings, CustomBasemap}
import org.maproulette.framework.psql.Query
import org.maproulette.framework.psql.filter.{BaseParameter, Operator}
import org.maproulette.framework.service.UserService
import org.maproulette.framework.util.{FrameworkHelper, UserRepoTag, UserTag}
import play.api.Application
import play.api.libs.oauth.RequestToken
/**
* @author mcuthbert
*/
class UserRepositorySpec(implicit val application: Application) extends FrameworkHelper {
val userRepository: UserRepository = this.application.injector.instanceOf(classOf[UserRepository])
val userService: UserService = this.serviceManager.user
"UserRepository" should {
"upsert user" taggedAs UserRepoTag in {
val insertedUser = this.insertBaseUser(1, "name1")
val retrievedUser = this.repositoryGet(insertedUser.id)
retrievedUser.get mustEqual insertedUser
}
"update user" taggedAs UserRepoTag in {
val insertedUser = this.insertBaseUser(2, "name2")
val updatedApiKey = UUID.randomUUID().toString
val updateUser = insertedUser.copy(
osmProfile = insertedUser.osmProfile.copy(
displayName = "name3",
avatarURL = "UPDATE_avatarURL",
requestToken = RequestToken("UPDATED_TOKEN", "UPDATED_SECRET")
),
apiKey = Some(updatedApiKey),
settings = UserSettings(
Some(1),
Some(2),
Some("id"),
Some("en-US"),
Some("email_address"),
Some(true),
Some(false),
Some(5),
Some(true),
None,
None,
None
)
)
this.userRepository.update(updateUser, "POINT (14.0 22.0)")
val updatedUser = this.repositoryGet(insertedUser.id).get
updatedUser.osmProfile.displayName mustEqual updateUser.osmProfile.displayName
updatedUser.osmProfile.avatarURL mustEqual updateUser.osmProfile.avatarURL
updatedUser.osmProfile.requestToken mustEqual updateUser.osmProfile.requestToken
// API Key should not be allowed to updated here
updatedUser.apiKey mustEqual insertedUser.apiKey
updatedUser.settings mustEqual updateUser.settings
}
"update user's customBasemaps" taggedAs UserRepoTag in {
val insertedUser = this.insertBaseUser(30, "name30")
val updatedApiKey = UUID.randomUUID().toString
val updateUser = insertedUser.copy(
osmProfile = insertedUser.osmProfile.copy(
displayName = "name31",
avatarURL = "UPDATE_avatarURL",
requestToken = RequestToken("UPDATED_TOKEN", "UPDATED_SECRET")
),
apiKey = Some(updatedApiKey),
settings = UserSettings(
Some(1),
Some(2),
Some("id"),
Some("en-US"),
Some("email_address2"),
Some(true),
Some(false),
Some(5),
Some(true),
None,
None,
Some(
List(
CustomBasemap(
name = "my_custom_basemap",
url = "http://maproulette.org/this/is/a/url"
)
)
)
)
)
this.userRepository.update(updateUser, "POINT (14.0 22.0)")
val updatedUser = this.repositoryGet(insertedUser.id).get
updatedUser.osmProfile.displayName mustEqual updateUser.osmProfile.displayName
updatedUser.settings.customBasemaps.get.length mustEqual 1
updatedUser.settings.customBasemaps.get.head.name mustEqual "my_custom_basemap"
updatedUser.settings.customBasemaps.get.head.url mustEqual "http://maproulette.org/this/is/a/url"
// Change basemaps
val basemaps = List(
CustomBasemap(
id = updatedUser.settings.customBasemaps.get.head.id,
name = "updated_custom_basemap",
url = "http://updated/url",
overlay = true
),
CustomBasemap(name = "new_basemap", url = "new_url")
)
val user2 = updatedUser.copy(
settings = updatedUser.settings.copy(customBasemaps = Some(basemaps))
)
this.userRepository.update(user2, "POINT (14.0 22.0)")
val updatedUser2 = this.repositoryGet(user2.id).get
updatedUser2.settings.customBasemaps.get.length mustEqual 2
val updatedBasemaps = updatedUser2.settings.customBasemaps.getOrElse(List())
val first = updatedBasemaps.head
first.id mustEqual basemaps.head.id
first.name mustEqual "updated_custom_basemap"
first.url mustEqual "http://updated/url"
first.overlay mustEqual true
val second = updatedBasemaps(1)
second.id must not be -1
second.name mustEqual "new_basemap"
second.url mustEqual "new_url"
// Remove basemap by not including in list
val LessBasemaps = List(
CustomBasemap(
id = first.id,
name = "updated_custom_basemap",
url = "http://updated/url",
overlay = true
)
)
val user3 = updatedUser2.copy(
settings = updatedUser2.settings.copy(customBasemaps = Some(LessBasemaps))
)
this.userRepository.update(user3, "POINT (14.0 22.0)")
val updatedUser3 = this.repositoryGet(user3.id).get
updatedUser3.settings.customBasemaps.get.length mustEqual 1
// Not passing customBasemaps preserves existing ones.
val user4 = updatedUser3.copy(
settings = updatedUser.settings.copy(customBasemaps = None)
)
this.userRepository.update(user4, "POINT (14.0 22.0)")
val updatedUser4 = this.repositoryGet(user4.id).get
updatedUser4.settings.customBasemaps.get.length mustEqual 1
//Passing an empty list of customBasemaps deletes all
val user5 = updatedUser4.copy(
settings = updatedUser4.settings.copy(customBasemaps = Some(List()))
)
this.userRepository.update(user5, "POINT (14.0 22.0)")
val updatedUser5 = this.repositoryGet(user5.id).get
updatedUser5.settings.customBasemaps mustEqual None
}
"update API key" taggedAs UserRepoTag in {
val insertedUser =
this.userRepository.upsert(this.getTestUser(3, "APITest"), "TestAPIKey", "POINT (20 40)")
this.userRepository.updateAPIKey(insertedUser.id, "NEW_updated_key")
val retrievedUser = this.repositoryGet(insertedUser.id)
retrievedUser.get.apiKey.get mustEqual "NEW_updated_key"
}
"delete user" taggedAs UserRepoTag in {
val insertedUser = this.userRepository
.upsert(this.getTestUser(4, "DeleteTest"), "TestAPIKey", "POINT (20 40)")
this.userRepository.delete(insertedUser.id)
val retrievedUser = this.repositoryGet(insertedUser.id)
retrievedUser.isEmpty mustEqual true
}
"delete user by OSMID" taggedAs UserRepoTag in {
val insertedUser = this.userRepository
.upsert(this.getTestUser(5, "DeleteByOidTest"), "TestAPIKey", "POINT (20 40)")
this.userRepository.deleteByOSMID(5)
val retrievedUser = this.repositoryGet(insertedUser.id)
retrievedUser.isEmpty mustEqual true
}
"update user score" taggedAs UserRepoTag in {
val insertedUser = this.userRepository
.upsert(this.getTestUser(61, "UpdateUserO"), "TestAPIKey", "POINT (20 40)")
val updatedUser = this.userRepository.updateUserScore(
insertedUser.id,
List(
BaseParameter(
UserMetrics.FIELD_SCORE,
s"=(${UserMetrics.FIELD_SCORE}+1000)",
Operator.CUSTOM
),
BaseParameter(
UserMetrics.FIELD_TOTAL_REJECTED,
s"=(${UserMetrics.FIELD_TOTAL_REJECTED}+1)",
Operator.CUSTOM
)
)
)
this.userRepository.deleteByOSMID(61)
}
"add user achievements" taggedAs UserRepoTag in {
val insertedUser = this.userRepository
.upsert(this.getTestUser(62, "AddAchievementsTest"), "TestAPIKey", "POINT (20 40)")
// Brand-new users have no achievements
insertedUser.achievements.getOrElse(List.empty).length mustEqual 0
this.userRepository.addAchievements(insertedUser.id, List(1, 2, 3))
this.repositoryGet(insertedUser.id).get.achievements.getOrElse(List.empty).length mustEqual 3
// Make sure dup achievements don't get added
this.userRepository.addAchievements(insertedUser.id, List(3, 4, 5))
this.repositoryGet(insertedUser.id).get.achievements.getOrElse(List.empty).length mustEqual 5
this.userRepository.deleteByOSMID(62)
}
}
override implicit val projectTestName: String = "UserRepositorySpecProject"
private def repositoryGet(id: Long): Option[User] = {
this.userRepository
.query(
Query.simple(
List(BaseParameter(User.FIELD_ID, id))
)
)
.headOption
}
private def insertBaseUser(osmId: Long, osmName: String): User =
this.userService.create(this.getTestUser(osmId, osmName), User.superUser)
}
| mgcuthbert/maproulette2 | test/org/maproulette/framework/repository/UserRepositorySpec.scala | Scala | apache-2.0 | 9,190 |
package com.nibado.projects.adventscala
class Day17 extends Day {
override def run(): Unit = {
val containers = getResource("/day17.txt").map(s => s.toInt)
val start = System.currentTimeMillis()
println("Start: " + start)
println(containers.permutations.size)
println("Duration: " + (System.currentTimeMillis() - start))
}
}
object Test17 {
def main(args: Array[String]) = {
new Day17().run()
}
} | nielsutrecht/adventofcode | src/main/scala/com/nibado/projects/adventscala/Day17.scala | Scala | mit | 431 |
package lore.compiler.target
/**
* An intermediate representation of the target language the compiler generates code for. The main purpose of this
* representation is to remove the burden of working with strings during the transpilation phase.
*
* In the long-term, such an intermediate representation may be used to support multiple target languages. The
* representation is chosen in such a way as to keep it as decoupled from JS as possible.
*/
object Target {
sealed trait TargetStatement
sealed trait TargetExpression extends TargetStatement
/**
* A target name wraps a Lore identifier so that it can later be generated to a name that's legal in the generated
* code. For example, the Javascript target doesn't allow question marks, so we have to convert them to a different
* character first.
*/
class TargetName(val name: String) extends AnyVal {
def asVariable: Variable = Variable(this)
def asParameter: Parameter = Parameter(this)
override def toString: String = name
}
case object Empty extends TargetStatement
case object Divider extends TargetStatement
// Control Structures.
case class Block(statements: Vector[TargetStatement]) extends TargetStatement
case class IfElse(condition: TargetExpression, thenStatement: TargetStatement, elseStatement: TargetStatement) extends TargetStatement
case class While(condition: TargetExpression, body: TargetStatement) extends TargetStatement
case class For(init: TargetStatement, condition: TargetExpression, post: TargetStatement, body: TargetStatement) extends TargetStatement
case class Iteration(collection: TargetExpression, elementName: TargetName, body: TargetStatement) extends TargetStatement
case class Return(value: TargetExpression) extends TargetStatement
def block(statements: TargetStatement*): Block = Block(statements.toVector)
// Variables.
case class VariableDeclaration(name: TargetName, value: TargetExpression, isMutable: Boolean = false, shouldExport: Boolean = false) extends TargetStatement
case class Assignment(left: TargetExpression, right: TargetExpression) extends TargetStatement
case class Variable(name: TargetName) extends TargetExpression {
lazy val asParameter: Parameter = Parameter(name)
override val toString: String = name.toString
}
// Functions.
case class Function(name: TargetName, parameters: Vector[Parameter], body: Block, shouldExport: Boolean = false) extends TargetStatement
case class Lambda(parameters: Vector[Parameter], body: TargetStatement) extends TargetExpression
case class Parameter(name: TargetName, default: Option[TargetExpression] = None, isRestParameter: Boolean = false) {
lazy val asVariable: Variable = Variable(name)
}
/**
* @param isRestCall The last argument is generated as a rest parameter spread.
*/
case class Call(function: TargetExpression, arguments: Vector[TargetExpression], isRestCall: Boolean = false) extends TargetExpression
case class New(constructor: TargetExpression, arguments: Vector[TargetExpression]) extends TargetExpression
// Values.
case class NumberLiteral(value: Double) extends TargetExpression
case class BooleanLiteral(value: Boolean) extends TargetExpression
case class StringLiteral(value: String) extends TargetExpression
case object Undefined extends TargetExpression
case object Null extends TargetExpression
case class Dictionary(properties: Vector[Property]) extends TargetExpression
case class Property(name: String, value: TargetExpression)
case class List(elements: Vector[TargetExpression]) extends TargetExpression
// Operations.
case class Operation(operator: TargetOperator, operands: Vector[TargetExpression]) extends TargetExpression
case class PropertyAccess(instance: TargetExpression, name: TargetName) extends TargetExpression
case class ListAccess(list: TargetExpression, key: TargetExpression) extends TargetExpression
/**
* Whether the given statement has no semantic significance at all.
*/
def isEmpty(statement: TargetStatement): Boolean = statement match {
case Target.Empty => true
case Target.Block(statements) => statements.isEmpty
case _ => false
}
}
| marcopennekamp/lore | compiler/src/lore/compiler/target/Target.scala | Scala | mit | 4,218 |
package doodlebot
import cats.data.ValidatedNel
import cats.std.list._
import cats.syntax.cartesian._
import java.util.UUID
object model {
import doodlebot.validation._
import doodlebot.syntax.validation._
import doodlebot.validation.Predicate._
// Messages to the client
final case class Authenticated(name: String, session: String)
final case class Log(offset: Int, messages: List[Message])
final case class Message(author: String, message: String)
final case class FormErrors(errors: InputError) extends Exception
// Messages from the client
final case class Login(name: Name, password: Password)
// Wrappers
final case class Name(get: String) extends AnyVal
object Name {
def validate(name: String): ValidatedNel[String,Name] = {
name.validate(lengthAtLeast(6) and onlyLettersOrDigits).map(n => Name(n))
}
}
final case class Email(get: String) extends AnyVal
object Email {
def validate(email: String): ValidatedNel[String,Email] = {
email.validate(containsAllChars("@.")).map(e => Email(e))
}
}
final case class Password(get: String) extends AnyVal
object Password {
def validate(password: String): ValidatedNel[String,Password] = {
password.validate(lengthAtLeast(8)).map(p => Password(p))
}
}
final case class Session(get: UUID = UUID.randomUUID()) extends AnyVal
// State
final case class User(name: Name, email: Email, password: Password)
object User {
def validate(name: String, email: String, password: String): ValidatedNel[String,User] = {
(Name.validate(name) |@| Email.validate(email) |@| Password.validate(password)).map { User.apply _ }
}
}
}
| underscoreio/doodlebot | server/src/main/scala/doodlebot/model.scala | Scala | apache-2.0 | 1,672 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.scaladsl.persistence.jdbc
import java.sql.Connection
import akka.persistence.query.Offset
import akka.stream.scaladsl.Flow
import akka.Done
import akka.NotUsed
import com.lightbend.lagom.internal.persistence.jdbc.SlickOffsetDao
import com.lightbend.lagom.internal.persistence.jdbc.SlickOffsetStore
import com.lightbend.lagom.internal.persistence.jdbc.SlickProvider
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor.ReadSideHandler
import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcReadSide
import com.lightbend.lagom.scaladsl.persistence.AggregateEvent
import com.lightbend.lagom.scaladsl.persistence.AggregateEventTag
import com.lightbend.lagom.scaladsl.persistence.EventStreamElement
import org.slf4j.LoggerFactory
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.reflect.ClassTag
/**
* INTERNAL API
*/
private[lagom] class JdbcReadSideImpl(slick: SlickProvider, offsetStore: SlickOffsetStore)(
implicit val ec: ExecutionContext
) extends JdbcReadSide {
private val log = LoggerFactory.getLogger(this.getClass)
override def builder[Event <: AggregateEvent[Event]](readSideId: String): ReadSideHandlerBuilder[Event] =
new ReadSideHandlerBuilder[Event] {
var globalPrepare: Connection => Unit = { _ =>
()
}
var prepare: (Connection, AggregateEventTag[Event]) => Unit = (_, _) => ()
var eventHandlers = Map.empty[Class[_ <: Event], (Connection, EventStreamElement[_ <: Event]) => Unit]
override def setGlobalPrepare(callback: Connection => Unit): ReadSideHandlerBuilder[Event] = {
globalPrepare = callback
this
}
override def setPrepare(
callback: (Connection, AggregateEventTag[Event]) => Unit
): ReadSideHandlerBuilder[Event] = {
prepare = callback
this
}
override def setEventHandler[E <: Event: ClassTag](
handler: (Connection, EventStreamElement[E]) => Unit
): ReadSideHandlerBuilder[Event] = {
val eventClass = implicitly[ClassTag[E]].runtimeClass.asInstanceOf[Class[Event]]
eventHandlers += (eventClass -> handler.asInstanceOf[(Connection, EventStreamElement[_ <: Event]) => Unit])
this
}
override def build(): ReadSideHandler[Event] =
new JdbcReadSideHandler[Event](readSideId, globalPrepare, prepare, eventHandlers)
}
private class JdbcReadSideHandler[Event <: AggregateEvent[Event]](
readSideId: String,
globalPrepareCallback: Connection => Any,
prepareCallback: (Connection, AggregateEventTag[Event]) => Any,
eventHandlers: Map[Class[_ <: Event], (Connection, EventStreamElement[_ <: Event]) => Any]
) extends ReadSideHandler[Event] {
import slick.profile.api._
@volatile
private var offsetDao: SlickOffsetDao = _
override def globalPrepare(): Future[Done] =
slick.ensureTablesCreated().flatMap { _ =>
slick.db.run {
SimpleDBIO { ctx =>
globalPrepareCallback(ctx.connection)
Done.getInstance()
}
}
}
override def prepare(tag: AggregateEventTag[Event]): Future[Offset] =
for {
_ <- slick.db.run {
SimpleDBIO { ctx =>
prepareCallback(ctx.connection, tag)
}
}
dao <- offsetStore.prepare(readSideId, tag.tag)
} yield {
offsetDao = dao
dao.loadedOffset
}
override def handle(): Flow[EventStreamElement[Event], Done, NotUsed] =
Flow[EventStreamElement[Event]]
.mapAsync(parallelism = 1) { element =>
val dbAction = eventHandlers
.get(element.event.getClass)
.map { handler =>
val castedHandler = handler.asInstanceOf[(Connection, EventStreamElement[Event]) => Unit]
SimpleDBIO { ctx =>
castedHandler(ctx.connection, element)
}
}
.getOrElse {
// fallback to empty action if no handler is found
if (log.isDebugEnabled) log.debug("Unhandled event [{}]", element.event.getClass.getName)
DBIO.successful(())
}
.flatMap { _ =>
offsetDao.updateOffsetQuery(element.offset)
}
.map(_ => Done)
slick.db.run(dbAction.transactionally)
}
}
}
| ignasi35/lagom | persistence-jdbc/scaladsl/src/main/scala/com/lightbend/lagom/internal/scaladsl/persistence/jdbc/JdbcReadSideImpl.scala | Scala | apache-2.0 | 4,519 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.util.concurrent.TimeUnit
import scala.collection.mutable
import org.mockito.ArgumentMatchers.any
import org.mockito.Mockito._
import org.scalatest.PrivateMethodTester
import org.apache.spark.executor.ExecutorMetrics
import org.apache.spark.internal.config
import org.apache.spark.internal.config.DECOMMISSION_ENABLED
import org.apache.spark.internal.config.Tests.TEST_DYNAMIC_ALLOCATION_SCHEDULE_ENABLED
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.resource._
import org.apache.spark.resource.ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.ExecutorInfo
import org.apache.spark.util.{Clock, ManualClock, SystemClock}
/**
* Test add and remove behavior of ExecutorAllocationManager.
*/
class ExecutorAllocationManagerSuite extends SparkFunSuite {
import ExecutorAllocationManager._
import ExecutorAllocationManagerSuite._
private val managers = new mutable.ListBuffer[ExecutorAllocationManager]()
private var listenerBus: LiveListenerBus = _
private var client: ExecutorAllocationClient = _
private val clock = new SystemClock()
private var rpManager: ResourceProfileManager = _
override def beforeEach(): Unit = {
super.beforeEach()
managers.clear()
listenerBus = new LiveListenerBus(new SparkConf())
listenerBus.start(null, mock(classOf[MetricsSystem]))
client = mock(classOf[ExecutorAllocationClient])
when(client.isExecutorActive(any())).thenReturn(true)
}
override def afterEach(): Unit = {
try {
listenerBus.stop()
managers.foreach(_.stop())
} finally {
listenerBus = null
super.afterEach()
}
}
private def post(event: SparkListenerEvent): Unit = {
listenerBus.post(event)
listenerBus.waitUntilEmpty()
}
test("initialize dynamic allocation in SparkContext") {
val conf = createConf(0, 1, 0)
.setMaster("local-cluster[1,1,1024]")
.setAppName(getClass().getName())
val sc0 = new SparkContext(conf)
try {
assert(sc0.executorAllocationManager.isDefined)
} finally {
sc0.stop()
}
}
test("verify min/max executors") {
// Min < 0
intercept[SparkException] {
createManager(createConf().set(config.DYN_ALLOCATION_MIN_EXECUTORS, -1))
}
// Max < 0
intercept[SparkException] {
createManager(createConf().set(config.DYN_ALLOCATION_MAX_EXECUTORS, -1))
}
// Both min and max, but min > max
intercept[SparkException] {
createManager(createConf(2, 1))
}
// Both min and max, and min == max
createManager(createConf(1, 1))
// Both min and max, and min < max
createManager(createConf(1, 2))
}
test("starting state") {
val manager = createManager(createConf())
assert(numExecutorsTargetForDefaultProfileId(manager) === 1)
assert(executorsPendingToRemove(manager).isEmpty)
assert(addTime(manager) === ExecutorAllocationManager.NOT_SET)
}
test("add executors default profile") {
val manager = createManager(createConf(1, 10, 1))
post(SparkListenerStageSubmitted(createStageInfo(0, 1000)))
val updatesNeeded =
new mutable.HashMap[ResourceProfile, ExecutorAllocationManager.TargetNumUpdates]
// Keep adding until the limit is reached
assert(numExecutorsTargetForDefaultProfileId(manager) === 1)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 2)
assert(numExecutorsToAddForDefaultProfile(manager) === 2)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 4)
assert(numExecutorsToAddForDefaultProfile(manager) === 4)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 4)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 8)
assert(numExecutorsToAddForDefaultProfile(manager) === 8)
// reached the limit of 10
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 10)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 0)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 10)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
// Register previously requested executors
onExecutorAddedDefaultProfile(manager, "first")
assert(numExecutorsTargetForDefaultProfileId(manager) === 10)
onExecutorAddedDefaultProfile(manager, "second")
onExecutorAddedDefaultProfile(manager, "third")
onExecutorAddedDefaultProfile(manager, "fourth")
assert(numExecutorsTargetForDefaultProfileId(manager) === 10)
onExecutorAddedDefaultProfile(manager, "first") // duplicates should not count
onExecutorAddedDefaultProfile(manager, "second")
assert(numExecutorsTargetForDefaultProfileId(manager) === 10)
// Try adding again
// This should still fail because the number pending + running is still at the limit
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 0)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 10)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 0)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 10)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
}
test("add executors multiple profiles") {
val manager = createManager(createConf(1, 10, 1))
post(SparkListenerStageSubmitted(createStageInfo(0, 1000, rp = defaultProfile)))
val rp1 = new ResourceProfileBuilder()
val execReqs = new ExecutorResourceRequests().cores(4).resource("gpu", 4)
val taskReqs = new TaskResourceRequests().cpus(1).resource("gpu", 1)
rp1.require(execReqs).require(taskReqs)
val rprof1 = rp1.build
rpManager.addResourceProfile(rprof1)
post(SparkListenerStageSubmitted(createStageInfo(1, 1000, rp = rprof1)))
val updatesNeeded =
new mutable.HashMap[ResourceProfile, ExecutorAllocationManager.TargetNumUpdates]
// Keep adding until the limit is reached
assert(numExecutorsTargetForDefaultProfileId(manager) === 1)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
assert(numExecutorsToAdd(manager, rprof1) === 1)
assert(numExecutorsTarget(manager, rprof1.id) === 1)
assert(addExecutorsToTarget(manager, updatesNeeded, rprof1) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 2)
assert(numExecutorsToAddForDefaultProfile(manager) === 2)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
assert(numExecutorsToAdd(manager, rprof1) === 2)
assert(numExecutorsTarget(manager, rprof1.id) === 2)
assert(addExecutorsToTarget(manager, updatesNeeded, rprof1) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 4)
assert(numExecutorsToAddForDefaultProfile(manager) === 4)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 4)
assert(numExecutorsToAdd(manager, rprof1) === 4)
assert(numExecutorsTarget(manager, rprof1.id) === 4)
assert(addExecutorsToTarget(manager, updatesNeeded, rprof1) === 4)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 8)
assert(numExecutorsToAddForDefaultProfile(manager) === 8)
// reached the limit of 10
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
assert(numExecutorsToAdd(manager, rprof1) === 8)
assert(numExecutorsTarget(manager, rprof1.id) === 8)
assert(addExecutorsToTarget(manager, updatesNeeded, rprof1) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 10)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 0)
assert(numExecutorsToAdd(manager, rprof1) === 1)
assert(numExecutorsTarget(manager, rprof1.id) === 10)
assert(addExecutorsToTarget(manager, updatesNeeded, rprof1) === 0)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 10)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
assert(numExecutorsToAdd(manager, rprof1) === 1)
assert(numExecutorsTarget(manager, rprof1.id) === 10)
// Register previously requested executors
onExecutorAddedDefaultProfile(manager, "first")
onExecutorAdded(manager, "firstrp1", rprof1)
assert(numExecutorsTargetForDefaultProfileId(manager) === 10)
assert(numExecutorsTarget(manager, rprof1.id) === 10)
onExecutorAddedDefaultProfile(manager, "second")
onExecutorAddedDefaultProfile(manager, "third")
onExecutorAddedDefaultProfile(manager, "fourth")
onExecutorAdded(manager, "secondrp1", rprof1)
onExecutorAdded(manager, "thirdrp1", rprof1)
onExecutorAdded(manager, "fourthrp1", rprof1)
assert(numExecutorsTargetForDefaultProfileId(manager) === 10)
assert(numExecutorsTarget(manager, rprof1.id) === 10)
onExecutorAddedDefaultProfile(manager, "first") // duplicates should not count
onExecutorAddedDefaultProfile(manager, "second")
onExecutorAdded(manager, "firstrp1", rprof1)
onExecutorAdded(manager, "secondrp1", rprof1)
assert(numExecutorsTargetForDefaultProfileId(manager) === 10)
assert(numExecutorsTarget(manager, rprof1.id) === 10)
// Try adding again
// This should still fail because the number pending + running is still at the limit
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 0)
assert(addExecutorsToTarget(manager, updatesNeeded, rprof1) === 0)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 10)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
assert(numExecutorsToAdd(manager, rprof1) === 1)
assert(numExecutorsTarget(manager, rprof1.id) === 10)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 0)
assert(addExecutorsToTarget(manager, updatesNeeded, rprof1) === 0)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 10)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
assert(numExecutorsToAdd(manager, rprof1) === 1)
assert(numExecutorsTarget(manager, rprof1.id) === 10)
}
test("add executors multiple profiles initial num same as needed") {
// test when the initial number of executors equals the number needed for the first
// stage using a non default profile to make sure we request the initial number
// properly. Here initial is 2, each executor in ResourceProfile 1 can have 2 tasks
// per executor, and start a stage with 4 tasks, which would need 2 executors.
val clock = new ManualClock(8888L)
val manager = createManager(createConf(0, 10, 2), clock)
val rp1 = new ResourceProfileBuilder()
val execReqs = new ExecutorResourceRequests().cores(2).resource("gpu", 2)
val taskReqs = new TaskResourceRequests().cpus(1).resource("gpu", 1)
rp1.require(execReqs).require(taskReqs)
val rprof1 = rp1.build
rpManager.addResourceProfile(rprof1)
when(client.requestTotalExecutors(any(), any(), any())).thenReturn(true)
post(SparkListenerStageSubmitted(createStageInfo(1, 4, rp = rprof1)))
// called once on start and a second time on stage submit with initial number
verify(client, times(2)).requestTotalExecutors(any(), any(), any())
assert(numExecutorsTarget(manager, rprof1.id) === 2)
}
test("remove executors multiple profiles") {
val manager = createManager(createConf(5, 10, 5))
val rp1 = new ResourceProfileBuilder()
val execReqs = new ExecutorResourceRequests().cores(4).resource("gpu", 4)
val taskReqs = new TaskResourceRequests().cpus(1).resource("gpu", 1)
rp1.require(execReqs).require(taskReqs)
val rprof1 = rp1.build
val rp2 = new ResourceProfileBuilder()
val execReqs2 = new ExecutorResourceRequests().cores(1)
val taskReqs2 = new TaskResourceRequests().cpus(1)
rp2.require(execReqs2).require(taskReqs2)
val rprof2 = rp2.build
rpManager.addResourceProfile(rprof1)
rpManager.addResourceProfile(rprof2)
post(SparkListenerStageSubmitted(createStageInfo(1, 10, rp = rprof1)))
post(SparkListenerStageSubmitted(createStageInfo(2, 10, rp = rprof2)))
(1 to 10).map(_.toString).foreach { id => onExecutorAdded(manager, id, rprof1) }
(11 to 20).map(_.toString).foreach { id => onExecutorAdded(manager, id, rprof2) }
(21 to 30).map(_.toString).foreach { id => onExecutorAdded(manager, id, defaultProfile) }
// Keep removing until the limit is reached
assert(executorsPendingToRemove(manager).isEmpty)
assert(removeExecutor(manager, "1", rprof1.id))
assert(executorsPendingToRemove(manager).size === 1)
assert(executorsPendingToRemove(manager).contains("1"))
assert(removeExecutor(manager, "11", rprof2.id))
assert(removeExecutor(manager, "2", rprof1.id))
assert(executorsPendingToRemove(manager).size === 3)
assert(executorsPendingToRemove(manager).contains("2"))
assert(executorsPendingToRemove(manager).contains("11"))
assert(removeExecutor(manager, "21", defaultProfile.id))
assert(removeExecutor(manager, "3", rprof1.id))
assert(removeExecutor(manager, "4", rprof1.id))
assert(executorsPendingToRemove(manager).size === 6)
assert(executorsPendingToRemove(manager).contains("21"))
assert(executorsPendingToRemove(manager).contains("3"))
assert(executorsPendingToRemove(manager).contains("4"))
assert(removeExecutor(manager, "5", rprof1.id))
assert(!removeExecutor(manager, "6", rprof1.id)) // reached the limit of 5
assert(executorsPendingToRemove(manager).size === 7)
assert(executorsPendingToRemove(manager).contains("5"))
assert(!executorsPendingToRemove(manager).contains("6"))
// Kill executors previously requested to remove
onExecutorRemoved(manager, "1")
assert(executorsPendingToRemove(manager).size === 6)
assert(!executorsPendingToRemove(manager).contains("1"))
onExecutorRemoved(manager, "2")
onExecutorRemoved(manager, "3")
assert(executorsPendingToRemove(manager).size === 4)
assert(!executorsPendingToRemove(manager).contains("2"))
assert(!executorsPendingToRemove(manager).contains("3"))
onExecutorRemoved(manager, "2") // duplicates should not count
onExecutorRemoved(manager, "3")
assert(executorsPendingToRemove(manager).size === 4)
onExecutorRemoved(manager, "4")
onExecutorRemoved(manager, "5")
assert(executorsPendingToRemove(manager).size === 2)
assert(executorsPendingToRemove(manager).contains("11"))
assert(executorsPendingToRemove(manager).contains("21"))
// Try removing again
// This should still fail because the number pending + running is still at the limit
assert(!removeExecutor(manager, "7", rprof1.id))
assert(executorsPendingToRemove(manager).size === 2)
assert(!removeExecutor(manager, "8", rprof1.id))
assert(executorsPendingToRemove(manager).size === 2)
// make sure rprof2 has the same min limit or 5
assert(removeExecutor(manager, "12", rprof2.id))
assert(removeExecutor(manager, "13", rprof2.id))
assert(removeExecutor(manager, "14", rprof2.id))
assert(removeExecutor(manager, "15", rprof2.id))
assert(!removeExecutor(manager, "16", rprof2.id)) // reached the limit of 5
assert(executorsPendingToRemove(manager).size === 6)
assert(!executorsPendingToRemove(manager).contains("16"))
onExecutorRemoved(manager, "11")
onExecutorRemoved(manager, "12")
onExecutorRemoved(manager, "13")
onExecutorRemoved(manager, "14")
onExecutorRemoved(manager, "15")
assert(executorsPendingToRemove(manager).size === 1)
}
def testAllocationRatio(cores: Int, divisor: Double, expected: Int): Unit = {
val updatesNeeded =
new mutable.HashMap[ResourceProfile, ExecutorAllocationManager.TargetNumUpdates]
val conf = createConf(3, 15)
.set(config.DYN_ALLOCATION_EXECUTOR_ALLOCATION_RATIO, divisor)
.set(config.EXECUTOR_CORES, cores)
val manager = createManager(conf)
post(SparkListenerStageSubmitted(createStageInfo(0, 20)))
for (i <- 0 to 5) {
addExecutorsToTargetForDefaultProfile(manager, updatesNeeded)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
}
assert(numExecutorsTargetForDefaultProfileId(manager) === expected)
}
test("executionAllocationRatio is correctly handled") {
testAllocationRatio(1, 0.5, 10)
testAllocationRatio(1, 1.0/3.0, 7)
testAllocationRatio(2, 1.0/3.0, 4)
testAllocationRatio(1, 0.385, 8)
// max/min executors capping
testAllocationRatio(1, 1.0, 15) // should be 20 but capped by max
testAllocationRatio(4, 1.0/3.0, 3) // should be 2 but elevated by min
}
test("add executors capped by num pending tasks") {
val manager = createManager(createConf(0, 10, 0))
post(SparkListenerStageSubmitted(createStageInfo(0, 5)))
val updatesNeeded =
new mutable.HashMap[ResourceProfile, ExecutorAllocationManager.TargetNumUpdates]
// Verify that we're capped at number of tasks in the stage
assert(numExecutorsTargetForDefaultProfileId(manager) === 0)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 1)
assert(numExecutorsToAddForDefaultProfile(manager) === 2)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 3)
assert(numExecutorsToAddForDefaultProfile(manager) === 4)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 5)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
// Verify that running a task doesn't affect the target
post(SparkListenerStageSubmitted(createStageInfo(1, 3)))
post(SparkListenerExecutorAdded(
0L, "executor-1", new ExecutorInfo("host1", 1, Map.empty, Map.empty)))
post(SparkListenerTaskStart(1, 0, createTaskInfo(0, 0, "executor-1")))
assert(numExecutorsTargetForDefaultProfileId(manager) === 5)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 6)
assert(numExecutorsToAddForDefaultProfile(manager) === 2)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 8)
assert(numExecutorsToAddForDefaultProfile(manager) === 4)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 0)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 8)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
// Verify that re-running a task doesn't blow things up
post(SparkListenerStageSubmitted(createStageInfo(2, 3)))
post(SparkListenerTaskStart(2, 0, createTaskInfo(0, 0, "executor-1")))
post(SparkListenerTaskStart(2, 0, createTaskInfo(1, 0, "executor-1")))
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 9)
assert(numExecutorsToAddForDefaultProfile(manager) === 2)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 10)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
// Verify that running a task once we're at our limit doesn't blow things up
post(SparkListenerTaskStart(2, 0, createTaskInfo(0, 1, "executor-1")))
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 0)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 10)
}
test("add executors when speculative tasks added") {
val manager = createManager(createConf(0, 10, 0))
val updatesNeeded =
new mutable.HashMap[ResourceProfile, ExecutorAllocationManager.TargetNumUpdates]
post(SparkListenerStageSubmitted(createStageInfo(1, 2)))
// Verify that we're capped at number of tasks including the speculative ones in the stage
post(SparkListenerSpeculativeTaskSubmitted(1))
assert(numExecutorsTargetForDefaultProfileId(manager) === 0)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
post(SparkListenerSpeculativeTaskSubmitted(1))
post(SparkListenerSpeculativeTaskSubmitted(1))
assert(numExecutorsTargetForDefaultProfileId(manager) === 1)
assert(numExecutorsToAddForDefaultProfile(manager) === 2)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 3)
assert(numExecutorsToAddForDefaultProfile(manager) === 4)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 5)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
// Verify that running a task doesn't affect the target
post(SparkListenerTaskStart(1, 0, createTaskInfo(0, 0, "executor-1")))
assert(numExecutorsTargetForDefaultProfileId(manager) === 5)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 0)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
// Verify that running a speculative task doesn't affect the target
post(SparkListenerTaskStart(1, 0, createTaskInfo(1, 0, "executor-2", true)))
assert(numExecutorsTargetForDefaultProfileId(manager) === 5)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 0)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
}
test("SPARK-31418: one stage being unschedulable") {
val clock = new ManualClock()
val conf = createConf(0, 5, 0).set(config.EXECUTOR_CORES, 2)
val manager = createManager(conf, clock = clock)
val updatesNeeded =
new mutable.HashMap[ResourceProfile, ExecutorAllocationManager.TargetNumUpdates]
post(SparkListenerStageSubmitted(createStageInfo(0, 2)))
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 0)
onExecutorAddedDefaultProfile(manager, "0")
val t1 = createTaskInfo(0, 0, executorId = s"0")
val t2 = createTaskInfo(1, 1, executorId = s"0")
post(SparkListenerTaskStart(0, 0, t1))
post(SparkListenerTaskStart(0, 0, t2))
assert(numExecutorsTarget(manager, defaultProfile.id) === 1)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 1)
// Stage 0 becomes unschedulable due to excludeOnFailure
post(SparkListenerUnschedulableTaskSetAdded(0, 0))
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
// Assert that we are getting additional executor to schedule unschedulable tasks
assert(numExecutorsTarget(manager, defaultProfile.id) === 2)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 2)
// Add a new executor
onExecutorAddedDefaultProfile(manager, "1")
// Now once the task becomes schedulable, clear the unschedulableTaskSets
post(SparkListenerUnschedulableTaskSetRemoved(0, 0))
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
assert(numExecutorsTarget(manager, defaultProfile.id) === 1)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 1)
}
test("SPARK-31418: multiple stages being unschedulable") {
val clock = new ManualClock()
val conf = createConf(0, 10, 0).set(config.EXECUTOR_CORES, 2)
val manager = createManager(conf, clock = clock)
val updatesNeeded =
new mutable.HashMap[ResourceProfile, ExecutorAllocationManager.TargetNumUpdates]
post(SparkListenerStageSubmitted(createStageInfo(0, 2)))
post(SparkListenerStageSubmitted(createStageInfo(1, 2)))
post(SparkListenerStageSubmitted(createStageInfo(2, 2)))
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 0)
// Add necessary executors
(0 to 2).foreach(execId => onExecutorAddedDefaultProfile(manager, execId.toString))
// Start all the tasks
(0 to 2).foreach {
i =>
val t1Info = createTaskInfo(0, (i * 2) + 1, executorId = s"${i / 2}")
val t2Info = createTaskInfo(1, (i * 2) + 2, executorId = s"${i / 2}")
post(SparkListenerTaskStart(i, 0, t1Info))
post(SparkListenerTaskStart(i, 0, t2Info))
}
assert(numExecutorsTarget(manager, defaultProfile.id) === 3)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 3)
// Complete the stage 0 tasks.
val t1Info = createTaskInfo(0, 0, executorId = s"0")
val t2Info = createTaskInfo(1, 1, executorId = s"0")
post(SparkListenerTaskEnd(0, 0, null, Success, t1Info, new ExecutorMetrics, null))
post(SparkListenerTaskEnd(0, 0, null, Success, t2Info, new ExecutorMetrics, null))
post(SparkListenerStageCompleted(createStageInfo(0, 2)))
// Stage 1 and 2 becomes unschedulable now due to excludeOnFailure
post(SparkListenerUnschedulableTaskSetAdded(1, 0))
post(SparkListenerUnschedulableTaskSetAdded(2, 0))
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
// Assert that we are getting additional executor to schedule unschedulable tasks
assert(numExecutorsTarget(manager, defaultProfile.id) === 4)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 4)
// Add a new executor
onExecutorAddedDefaultProfile(manager, "3")
// Now once the task becomes schedulable, clear the unschedulableTaskSets
post(SparkListenerUnschedulableTaskSetRemoved(1, 0))
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
assert(numExecutorsTarget(manager, defaultProfile.id) === 4)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 5)
}
test("SPARK-31418: remove executors after unschedulable tasks end") {
val clock = new ManualClock()
val stage = createStageInfo(0, 10)
val conf = createConf(0, 6, 0).set(config.EXECUTOR_CORES, 2)
val manager = createManager(conf, clock = clock)
val updatesNeeded =
new mutable.HashMap[ResourceProfile, ExecutorAllocationManager.TargetNumUpdates]
post(SparkListenerStageSubmitted(stage))
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 0)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
(0 to 4).foreach(execId => onExecutorAddedDefaultProfile(manager, execId.toString))
(0 to 9).map { i => createTaskInfo(i, i, executorId = s"${i / 2}") }.foreach {
info => post(SparkListenerTaskStart(0, 0, info))
}
assert(numExecutorsTarget(manager, defaultProfile.id) === 5)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 5)
// 8 tasks (0 - 7) finished
(0 to 7).map { i => createTaskInfo(i, i, executorId = s"${i / 2}") }.foreach {
info => post(SparkListenerTaskEnd(0, 0, null, Success, info, new ExecutorMetrics, null))
}
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
assert(numExecutorsTarget(manager, defaultProfile.id) === 1)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 1)
(0 to 3).foreach { i => assert(removeExecutorDefaultProfile(manager, i.toString)) }
(0 to 3).foreach { i => onExecutorRemoved(manager, i.toString) }
// Now due to executor being excluded, the task becomes unschedulable
post(SparkListenerUnschedulableTaskSetAdded(0, 0))
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
assert(numExecutorsTarget(manager, defaultProfile.id) === 2)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 2)
// New executor got added
onExecutorAddedDefaultProfile(manager, "5")
// Now once the task becomes schedulable, clear the unschedulableTaskSets
post(SparkListenerUnschedulableTaskSetRemoved(0, 0))
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
assert(numExecutorsTarget(manager, defaultProfile.id) === 1)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 1)
post(SparkListenerTaskEnd(0, 0, null, Success,
createTaskInfo(9, 9, "4"), new ExecutorMetrics, null))
// Unschedulable task successfully ran on the new executor provisioned
post(SparkListenerTaskEnd(0, 0, null, Success,
createTaskInfo(8, 8, "5"), new ExecutorMetrics, null))
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
post(SparkListenerStageCompleted(stage))
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
assert(numExecutorsTarget(manager, defaultProfile.id) === 0)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 0)
assert(removeExecutorDefaultProfile(manager, "4"))
onExecutorRemoved(manager, "4")
assert(removeExecutorDefaultProfile(manager, "5"))
onExecutorRemoved(manager, "5")
}
test("SPARK-30511 remove executors when speculative tasks end") {
val clock = new ManualClock()
val stage = createStageInfo(0, 40)
val conf = createConf(0, 10, 0).set(config.EXECUTOR_CORES, 4)
val manager = createManager(conf, clock = clock)
val updatesNeeded =
new mutable.HashMap[ResourceProfile, ExecutorAllocationManager.TargetNumUpdates]
post(SparkListenerStageSubmitted(stage))
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 4)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 3)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
(0 to 9).foreach(execId => onExecutorAddedDefaultProfile(manager, execId.toString))
(0 to 39).map { i => createTaskInfo(i, i, executorId = s"${i / 4}")}.foreach {
info => post(SparkListenerTaskStart(0, 0, info))
}
assert(numExecutorsTarget(manager, defaultProfile.id) === 10)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 10)
// 30 tasks (0 - 29) finished
(0 to 29).map { i => createTaskInfo(i, i, executorId = s"${i / 4}")}.foreach {
info => post(SparkListenerTaskEnd(0, 0, null, Success, info, new ExecutorMetrics, null)) }
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
assert(numExecutorsTarget(manager, defaultProfile.id) === 3)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 3)
(0 to 6).foreach { i => assert(removeExecutorDefaultProfile(manager, i.toString))}
(0 to 6).foreach { i => onExecutorRemoved(manager, i.toString)}
// 10 speculative tasks (30 - 39) launch for the remaining tasks
(30 to 39).foreach { _ => post(SparkListenerSpeculativeTaskSubmitted(0))}
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTarget(manager, defaultProfile.id) == 5)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 5)
(10 to 12).foreach(execId => onExecutorAddedDefaultProfile(manager, execId.toString))
(40 to 49).map { i =>
createTaskInfo(taskId = i, taskIndex = i - 10, executorId = s"${i / 4}", speculative = true)}
.foreach { info => post(SparkListenerTaskStart(0, 0, info))}
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
// At this point, we still have 6 executors running
assert(numExecutorsTarget(manager, defaultProfile.id) == 5)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 5)
// 6 speculative tasks (40 - 45) finish before the original tasks, with 4 speculative remaining
(40 to 45).map { i =>
createTaskInfo(taskId = i, taskIndex = i - 10, executorId = s"${i / 4}", speculative = true)}
.foreach {
info => post(SparkListenerTaskEnd(0, 0, null, Success, info, new ExecutorMetrics, null))}
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
assert(numExecutorsTarget(manager, defaultProfile.id) === 4)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 4)
assert(removeExecutorDefaultProfile(manager, "10"))
onExecutorRemoved(manager, "10")
// At this point, we still have 5 executors running: ["7", "8", "9", "11", "12"]
// 6 original tasks (30 - 35) are intentionally killed
(30 to 35).map { i =>
createTaskInfo(i, i, executorId = s"${i / 4}")}
.foreach { info => post(
SparkListenerTaskEnd(0, 0, null, TaskKilled("test"), info, new ExecutorMetrics, null))}
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
assert(numExecutorsTarget(manager, defaultProfile.id) === 2)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 2)
(7 to 8).foreach { i => assert(removeExecutorDefaultProfile(manager, i.toString))}
(7 to 8).foreach { i => onExecutorRemoved(manager, i.toString)}
// At this point, we still have 3 executors running: ["9", "11", "12"]
// Task 36 finishes before the speculative task 46, task 46 killed
post(SparkListenerTaskEnd(0, 0, null, Success,
createTaskInfo(36, 36, executorId = "9"), new ExecutorMetrics, null))
post(SparkListenerTaskEnd(0, 0, null, TaskKilled("test"),
createTaskInfo(46, 36, executorId = "11", speculative = true), new ExecutorMetrics, null))
// We should have 3 original tasks (index 37, 38, 39) running, with corresponding 3 speculative
// tasks running. Target lowers to 2, but still hold 3 executors ["9", "11", "12"]
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
assert(numExecutorsTarget(manager, defaultProfile.id) === 2)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 2)
// At this point, we still have 3 executors running: ["9", "11", "12"]
// Task 37 and 47 succeed at the same time
post(SparkListenerTaskEnd(0, 0, null, Success,
createTaskInfo(37, 37, executorId = "9"), new ExecutorMetrics, null))
post(SparkListenerTaskEnd(0, 0, null, Success,
createTaskInfo(47, 37, executorId = "11", speculative = true), new ExecutorMetrics, null))
// We should have 2 original tasks (index 38, 39) running, with corresponding 2 speculative
// tasks running
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
assert(numExecutorsTarget(manager, defaultProfile.id) === 1)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 1)
assert(removeExecutorDefaultProfile(manager, "11"))
onExecutorRemoved(manager, "11")
// At this point, we still have 2 executors running: ["9", "12"]
// Task 38 fails and task 49 fails, new speculative task 50 is submitted to speculate on task 39
post(SparkListenerTaskEnd(0, 0, null, UnknownReason,
createTaskInfo(38, 38, executorId = "9"), new ExecutorMetrics, null))
post(SparkListenerTaskEnd(0, 0, null, UnknownReason,
createTaskInfo(49, 39, executorId = "12", speculative = true), new ExecutorMetrics, null))
post(SparkListenerSpeculativeTaskSubmitted(0))
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
// maxNeeded = 1, allocate one more to satisfy speculation locality requirement
assert(numExecutorsTarget(manager, defaultProfile.id) === 2)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 2)
post(SparkListenerTaskStart(0, 0,
createTaskInfo(50, 39, executorId = "12", speculative = true)))
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
assert(numExecutorsTarget(manager, defaultProfile.id) === 1)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 1)
// Task 39 and 48 succeed, task 50 killed
post(SparkListenerTaskEnd(0, 0, null, Success,
createTaskInfo(39, 39, executorId = "9"), new ExecutorMetrics, null))
post(SparkListenerTaskEnd(0, 0, null, Success,
createTaskInfo(48, 38, executorId = "12", speculative = true), new ExecutorMetrics, null))
post(SparkListenerTaskEnd(0, 0, null, TaskKilled("test"),
createTaskInfo(50, 39, executorId = "12", speculative = true), new ExecutorMetrics, null))
post(SparkListenerStageCompleted(stage))
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
assert(numExecutorsTarget(manager, defaultProfile.id) === 0)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 0)
assert(removeExecutorDefaultProfile(manager, "9"))
onExecutorRemoved(manager, "9")
assert(removeExecutorDefaultProfile(manager, "12"))
onExecutorRemoved(manager, "12")
}
test("properly handle task end events from completed stages") {
val manager = createManager(createConf(0, 10, 0))
// We simulate having a stage fail, but with tasks still running. Then another attempt for
// that stage is started, and we get task completions from the first stage attempt. Make sure
// the value of `totalTasksRunning` is consistent as tasks finish from both attempts (we count
// all running tasks, from the zombie & non-zombie attempts)
val stage = createStageInfo(0, 5)
post(SparkListenerStageSubmitted(stage))
val taskInfo1 = createTaskInfo(0, 0, "executor-1")
val taskInfo2 = createTaskInfo(1, 1, "executor-1")
post(SparkListenerTaskStart(0, 0, taskInfo1))
post(SparkListenerTaskStart(0, 0, taskInfo2))
// The tasks in the zombie attempt haven't completed yet, so we still count them
post(SparkListenerStageCompleted(stage))
// There are still two tasks that belong to the zombie stage running.
assert(totalRunningTasksPerResourceProfile(manager) === 2)
// submit another attempt for the stage. We count completions from the first zombie attempt
val stageAttempt1 = createStageInfo(stage.stageId, 5, attemptId = 1)
post(SparkListenerStageSubmitted(stageAttempt1))
post(SparkListenerTaskEnd(0, 0, null, Success, taskInfo1, new ExecutorMetrics, null))
assert(totalRunningTasksPerResourceProfile(manager) === 1)
val attemptTaskInfo1 = createTaskInfo(3, 0, "executor-1")
val attemptTaskInfo2 = createTaskInfo(4, 1, "executor-1")
post(SparkListenerTaskStart(0, 1, attemptTaskInfo1))
post(SparkListenerTaskStart(0, 1, attemptTaskInfo2))
assert(totalRunningTasksPerResourceProfile(manager) === 3)
post(SparkListenerTaskEnd(0, 1, null, Success, attemptTaskInfo1, new ExecutorMetrics, null))
assert(totalRunningTasksPerResourceProfile(manager) === 2)
post(SparkListenerTaskEnd(0, 0, null, Success, taskInfo2, new ExecutorMetrics, null))
assert(totalRunningTasksPerResourceProfile(manager) === 1)
post(SparkListenerTaskEnd(0, 1, null, Success, attemptTaskInfo2, new ExecutorMetrics, null))
assert(totalRunningTasksPerResourceProfile(manager) === 0)
}
testRetry("cancel pending executors when no longer needed") {
val manager = createManager(createConf(0, 10, 0))
post(SparkListenerStageSubmitted(createStageInfo(2, 5)))
val updatesNeeded =
new mutable.HashMap[ResourceProfile, ExecutorAllocationManager.TargetNumUpdates]
assert(numExecutorsTargetForDefaultProfileId(manager) === 0)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 1)
assert(numExecutorsToAddForDefaultProfile(manager) === 2)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 3)
val task1Info = createTaskInfo(0, 0, "executor-1")
post(SparkListenerTaskStart(2, 0, task1Info))
assert(numExecutorsToAddForDefaultProfile(manager) === 4)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
val task2Info = createTaskInfo(1, 0, "executor-1")
post(SparkListenerTaskStart(2, 0, task2Info))
task1Info.markFinished(TaskState.FINISHED, System.currentTimeMillis())
post(SparkListenerTaskEnd(2, 0, null, Success, task1Info, new ExecutorMetrics, null))
task2Info.markFinished(TaskState.FINISHED, System.currentTimeMillis())
post(SparkListenerTaskEnd(2, 0, null, Success, task2Info, new ExecutorMetrics, null))
assert(adjustRequestedExecutors(manager) === -1)
}
test("remove executors") {
val manager = createManager(createConf(5, 10, 5))
(1 to 10).map(_.toString).foreach { id => onExecutorAddedDefaultProfile(manager, id) }
// Keep removing until the limit is reached
assert(executorsPendingToRemove(manager).isEmpty)
assert(removeExecutorDefaultProfile(manager, "1"))
assert(executorsPendingToRemove(manager).size === 1)
assert(executorsPendingToRemove(manager).contains("1"))
assert(removeExecutorDefaultProfile(manager, "2"))
assert(removeExecutorDefaultProfile(manager, "3"))
assert(executorsPendingToRemove(manager).size === 3)
assert(executorsPendingToRemove(manager).contains("2"))
assert(executorsPendingToRemove(manager).contains("3"))
assert(removeExecutorDefaultProfile(manager, "4"))
assert(removeExecutorDefaultProfile(manager, "5"))
assert(!removeExecutorDefaultProfile(manager, "6")) // reached the limit of 5
assert(executorsPendingToRemove(manager).size === 5)
assert(executorsPendingToRemove(manager).contains("4"))
assert(executorsPendingToRemove(manager).contains("5"))
assert(!executorsPendingToRemove(manager).contains("6"))
// Kill executors previously requested to remove
onExecutorRemoved(manager, "1")
assert(executorsPendingToRemove(manager).size === 4)
assert(!executorsPendingToRemove(manager).contains("1"))
onExecutorRemoved(manager, "2")
onExecutorRemoved(manager, "3")
assert(executorsPendingToRemove(manager).size === 2)
assert(!executorsPendingToRemove(manager).contains("2"))
assert(!executorsPendingToRemove(manager).contains("3"))
onExecutorRemoved(manager, "2") // duplicates should not count
onExecutorRemoved(manager, "3")
assert(executorsPendingToRemove(manager).size === 2)
onExecutorRemoved(manager, "4")
onExecutorRemoved(manager, "5")
assert(executorsPendingToRemove(manager).isEmpty)
// Try removing again
// This should still fail because the number pending + running is still at the limit
assert(!removeExecutorDefaultProfile(manager, "7"))
assert(executorsPendingToRemove(manager).isEmpty)
assert(!removeExecutorDefaultProfile(manager, "8"))
assert(executorsPendingToRemove(manager).isEmpty)
}
test("SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)") {
val manager = createManager(createConf(3, 5, 3))
(1 to 5).map(_.toString).foreach { id => onExecutorAddedDefaultProfile(manager, id) }
assert(executorsPendingToRemove(manager).isEmpty)
assert(removeExecutorsDefaultProfile(manager, Seq("1", "2")) === Seq("1", "2"))
assert(executorsPendingToRemove(manager).contains("1"))
assert(executorsPendingToRemove(manager).contains("2"))
onExecutorRemoved(manager, "1", "driver requested exit")
assert(manager.executorAllocationManagerSource.driverKilled.getCount() === 1)
assert(manager.executorAllocationManagerSource.exitedUnexpectedly.getCount() === 0)
onExecutorRemoved(manager, "2", "another driver requested exit")
assert(manager.executorAllocationManagerSource.driverKilled.getCount() === 2)
assert(manager.executorAllocationManagerSource.exitedUnexpectedly.getCount() === 0)
onExecutorRemoved(manager, "3", "this will be an unexpected exit")
assert(manager.executorAllocationManagerSource.driverKilled.getCount() === 2)
assert(manager.executorAllocationManagerSource.exitedUnexpectedly.getCount() === 1)
}
test("SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)") {
val manager = createManager(createConf(3, 5, 3, decommissioningEnabled = true))
(1 to 5).map(_.toString).foreach { id => onExecutorAddedDefaultProfile(manager, id) }
assert(executorsPendingToRemove(manager).isEmpty)
assert(removeExecutorsDefaultProfile(manager, Seq("1", "2")) === Seq("1", "2"))
assert(executorsDecommissioning(manager).contains("1"))
assert(executorsDecommissioning(manager).contains("2"))
onExecutorRemoved(manager, "1", ExecutorLossMessage.decommissionFinished)
assert(manager.executorAllocationManagerSource.gracefullyDecommissioned.getCount() === 1)
assert(manager.executorAllocationManagerSource.decommissionUnfinished.getCount() === 0)
assert(manager.executorAllocationManagerSource.exitedUnexpectedly.getCount() === 0)
onExecutorRemoved(manager, "2", "stopped before gracefully finished")
assert(manager.executorAllocationManagerSource.gracefullyDecommissioned.getCount() === 1)
assert(manager.executorAllocationManagerSource.decommissionUnfinished.getCount() === 1)
assert(manager.executorAllocationManagerSource.exitedUnexpectedly.getCount() === 0)
onExecutorRemoved(manager, "3", "this will be an unexpected exit")
assert(manager.executorAllocationManagerSource.gracefullyDecommissioned.getCount() === 1)
assert(manager.executorAllocationManagerSource.decommissionUnfinished.getCount() === 1)
assert(manager.executorAllocationManagerSource.exitedUnexpectedly.getCount() === 1)
}
test("remove multiple executors") {
val manager = createManager(createConf(5, 10, 5))
(1 to 10).map(_.toString).foreach { id => onExecutorAddedDefaultProfile(manager, id) }
// Keep removing until the limit is reached
assert(executorsPendingToRemove(manager).isEmpty)
assert(removeExecutorsDefaultProfile(manager, Seq("1")) === Seq("1"))
assert(executorsPendingToRemove(manager).size === 1)
assert(executorsPendingToRemove(manager).contains("1"))
assert(removeExecutorsDefaultProfile(manager, Seq("2", "3")) === Seq("2", "3"))
assert(executorsPendingToRemove(manager).size === 3)
assert(executorsPendingToRemove(manager).contains("2"))
assert(executorsPendingToRemove(manager).contains("3"))
assert(executorsPendingToRemove(manager).size === 3)
assert(removeExecutorDefaultProfile(manager, "4"))
assert(removeExecutorsDefaultProfile(manager, Seq("5")) === Seq("5"))
assert(!removeExecutorDefaultProfile(manager, "6")) // reached the limit of 5
assert(executorsPendingToRemove(manager).size === 5)
assert(executorsPendingToRemove(manager).contains("4"))
assert(executorsPendingToRemove(manager).contains("5"))
assert(!executorsPendingToRemove(manager).contains("6"))
// Kill executors previously requested to remove
onExecutorRemoved(manager, "1")
assert(executorsPendingToRemove(manager).size === 4)
assert(!executorsPendingToRemove(manager).contains("1"))
onExecutorRemoved(manager, "2")
onExecutorRemoved(manager, "3")
assert(executorsPendingToRemove(manager).size === 2)
assert(!executorsPendingToRemove(manager).contains("2"))
assert(!executorsPendingToRemove(manager).contains("3"))
onExecutorRemoved(manager, "2") // duplicates should not count
onExecutorRemoved(manager, "3")
assert(executorsPendingToRemove(manager).size === 2)
onExecutorRemoved(manager, "4")
onExecutorRemoved(manager, "5")
assert(executorsPendingToRemove(manager).isEmpty)
// Try removing again
// This should still fail because the number pending + running is still at the limit
assert(!removeExecutorDefaultProfile(manager, "7"))
assert(executorsPendingToRemove(manager).isEmpty)
assert(removeExecutorsDefaultProfile(manager, Seq("8")) !== Seq("8"))
assert(executorsPendingToRemove(manager).isEmpty)
}
test ("Removing with various numExecutorsTargetForDefaultProfileId condition") {
val manager = createManager(createConf(5, 12, 5))
post(SparkListenerStageSubmitted(createStageInfo(0, 8)))
val updatesNeeded =
new mutable.HashMap[ResourceProfile, ExecutorAllocationManager.TargetNumUpdates]
// Remove when numExecutorsTargetForDefaultProfileId is the same as the current
// number of executors
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
(1 to 8).foreach(execId => onExecutorAddedDefaultProfile(manager, execId.toString))
(1 to 8).map { i => createTaskInfo(i, i, s"$i") }.foreach {
info => post(SparkListenerTaskStart(0, 0, info)) }
assert(manager.executorMonitor.executorCount === 8)
assert(numExecutorsTargetForDefaultProfileId(manager) === 8)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 8)
// won't work since numExecutorsTargetForDefaultProfileId == numExecutors
assert(!removeExecutorDefaultProfile(manager, "1"))
// Remove executors when numExecutorsTargetForDefaultProfileId is lower than
// current number of executors
(1 to 3).map { i => createTaskInfo(i, i, s"$i") }.foreach { info =>
post(SparkListenerTaskEnd(0, 0, null, Success, info, new ExecutorMetrics, null))
}
adjustRequestedExecutors(manager)
assert(manager.executorMonitor.executorCount === 8)
assert(numExecutorsTargetForDefaultProfileId(manager) === 5)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 5)
assert(removeExecutorDefaultProfile(manager, "1"))
assert(removeExecutorsDefaultProfile(manager, Seq("2", "3"))=== Seq("2", "3"))
onExecutorRemoved(manager, "1")
onExecutorRemoved(manager, "2")
onExecutorRemoved(manager, "3")
// numExecutorsTargetForDefaultProfileId is lower than minNumExecutors
post(SparkListenerTaskEnd(0, 0, null, Success, createTaskInfo(4, 4, "4"),
new ExecutorMetrics, null))
assert(manager.executorMonitor.executorCount === 5)
assert(numExecutorsTargetForDefaultProfileId(manager) === 5)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 4)
assert(!removeExecutorDefaultProfile(manager, "4")) // lower limit
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 0) // upper limit
}
test ("interleaving add and remove") {
// use ManualClock to disable ExecutorAllocationManager.schedule()
// in order to avoid unexpected update of target executors
val clock = new ManualClock()
val manager = createManager(createConf(5, 12, 5), clock)
post(SparkListenerStageSubmitted(createStageInfo(0, 1000)))
val updatesNeeded =
new mutable.HashMap[ResourceProfile, ExecutorAllocationManager.TargetNumUpdates]
// Add a few executors
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
onExecutorAddedDefaultProfile(manager, "1")
onExecutorAddedDefaultProfile(manager, "2")
onExecutorAddedDefaultProfile(manager, "3")
onExecutorAddedDefaultProfile(manager, "4")
onExecutorAddedDefaultProfile(manager, "5")
onExecutorAddedDefaultProfile(manager, "6")
onExecutorAddedDefaultProfile(manager, "7")
onExecutorAddedDefaultProfile(manager, "8")
assert(manager.executorMonitor.executorCount === 8)
assert(numExecutorsTargetForDefaultProfileId(manager) === 8)
// Remove when numTargetExecutors is equal to the current number of executors
assert(!removeExecutorDefaultProfile(manager, "1"))
assert(removeExecutorsDefaultProfile(manager, Seq("2", "3")) !== Seq("2", "3"))
// Remove until limit
onExecutorAddedDefaultProfile(manager, "9")
onExecutorAddedDefaultProfile(manager, "10")
onExecutorAddedDefaultProfile(manager, "11")
onExecutorAddedDefaultProfile(manager, "12")
assert(manager.executorMonitor.executorCount === 12)
assert(numExecutorsTargetForDefaultProfileId(manager) === 8)
assert(removeExecutorDefaultProfile(manager, "1"))
assert(removeExecutorsDefaultProfile(manager, Seq("2", "3", "4")) === Seq("2", "3", "4"))
assert(!removeExecutorDefaultProfile(manager, "5")) // lower limit reached
assert(!removeExecutorDefaultProfile(manager, "6"))
onExecutorRemoved(manager, "1")
onExecutorRemoved(manager, "2")
onExecutorRemoved(manager, "3")
onExecutorRemoved(manager, "4")
assert(manager.executorMonitor.executorCount === 8)
// Add until limit
assert(!removeExecutorDefaultProfile(manager, "7")) // still at lower limit
assert((manager, Seq("8")) !== Seq("8"))
onExecutorAddedDefaultProfile(manager, "13")
onExecutorAddedDefaultProfile(manager, "14")
onExecutorAddedDefaultProfile(manager, "15")
onExecutorAddedDefaultProfile(manager, "16")
assert(manager.executorMonitor.executorCount === 12)
// Remove succeeds again, now that we are no longer at the lower limit
assert(removeExecutorsDefaultProfile(manager, Seq("5", "6", "7")) === Seq("5", "6", "7"))
assert(removeExecutorDefaultProfile(manager, "8"))
assert(manager.executorMonitor.executorCount === 12)
onExecutorRemoved(manager, "5")
onExecutorRemoved(manager, "6")
assert(manager.executorMonitor.executorCount === 10)
assert(numExecutorsToAddForDefaultProfile(manager) === 4)
onExecutorRemoved(manager, "9")
onExecutorRemoved(manager, "10")
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 4) // at upper limit
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
onExecutorAddedDefaultProfile(manager, "17")
onExecutorAddedDefaultProfile(manager, "18")
assert(manager.executorMonitor.executorCount === 10)
// still at upper limit
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 0)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
onExecutorAddedDefaultProfile(manager, "19")
onExecutorAddedDefaultProfile(manager, "20")
assert(manager.executorMonitor.executorCount === 12)
assert(numExecutorsTargetForDefaultProfileId(manager) === 12)
}
test("starting/canceling add timer") {
val clock = new ManualClock(8888L)
val manager = createManager(createConf(2, 10, 2), clock = clock)
// Starting add timer is idempotent
assert(addTime(manager) === NOT_SET)
onSchedulerBacklogged(manager)
val firstAddTime = addTime(manager)
assert(firstAddTime === clock.nanoTime() + TimeUnit.SECONDS.toNanos(schedulerBacklogTimeout))
clock.advance(100L)
onSchedulerBacklogged(manager)
assert(addTime(manager) === firstAddTime) // timer is already started
clock.advance(200L)
onSchedulerBacklogged(manager)
assert(addTime(manager) === firstAddTime)
onSchedulerQueueEmpty(manager)
// Restart add timer
clock.advance(1000L)
assert(addTime(manager) === NOT_SET)
onSchedulerBacklogged(manager)
val secondAddTime = addTime(manager)
assert(secondAddTime === clock.nanoTime() + TimeUnit.SECONDS.toNanos(schedulerBacklogTimeout))
clock.advance(100L)
onSchedulerBacklogged(manager)
assert(addTime(manager) === secondAddTime) // timer is already started
assert(addTime(manager) !== firstAddTime)
assert(firstAddTime !== secondAddTime)
}
test("mock polling loop with no events") {
val clock = new ManualClock(2020L)
val manager = createManager(createConf(0, 20, 0), clock = clock)
// No events - we should not be adding or removing
assert(numExecutorsTargetForDefaultProfileId(manager) === 0)
assert(executorsPendingToRemove(manager).isEmpty)
schedule(manager)
assert(numExecutorsTargetForDefaultProfileId(manager) === 0)
assert(executorsPendingToRemove(manager).isEmpty)
clock.advance(100L)
schedule(manager)
assert(numExecutorsTargetForDefaultProfileId(manager) === 0)
assert(executorsPendingToRemove(manager).isEmpty)
clock.advance(1000L)
schedule(manager)
assert(numExecutorsTargetForDefaultProfileId(manager) === 0)
assert(executorsPendingToRemove(manager).isEmpty)
clock.advance(10000L)
schedule(manager)
assert(numExecutorsTargetForDefaultProfileId(manager) === 0)
assert(executorsPendingToRemove(manager).isEmpty)
}
test("mock polling loop add behavior") {
val clock = new ManualClock(2020L)
val manager = createManager(createConf(0, 20, 0), clock = clock)
post(SparkListenerStageSubmitted(createStageInfo(0, 1000)))
// Scheduler queue backlogged
onSchedulerBacklogged(manager)
clock.advance(schedulerBacklogTimeout * 1000 / 2)
schedule(manager)
assert(numExecutorsTargetForDefaultProfileId(manager) === 0) // timer not exceeded yet
clock.advance(schedulerBacklogTimeout * 1000)
schedule(manager)
assert(numExecutorsTargetForDefaultProfileId(manager) === 1) // first timer exceeded
clock.advance(sustainedSchedulerBacklogTimeout * 1000 / 2)
schedule(manager)
assert(numExecutorsTargetForDefaultProfileId(manager) === 1) // second timer not exceeded yet
clock.advance(sustainedSchedulerBacklogTimeout * 1000)
schedule(manager)
assert(numExecutorsTargetForDefaultProfileId(manager) === 1 + 2) // second timer exceeded
clock.advance(sustainedSchedulerBacklogTimeout * 1000)
schedule(manager)
assert(numExecutorsTargetForDefaultProfileId(manager) === 1 + 2 + 4) // third timer exceeded
// Scheduler queue drained
onSchedulerQueueEmpty(manager)
clock.advance(sustainedSchedulerBacklogTimeout * 1000)
schedule(manager)
assert(numExecutorsTargetForDefaultProfileId(manager) === 7) // timer is canceled
clock.advance(sustainedSchedulerBacklogTimeout * 1000)
schedule(manager)
assert(numExecutorsTargetForDefaultProfileId(manager) === 7)
// Scheduler queue backlogged again
onSchedulerBacklogged(manager)
clock.advance(schedulerBacklogTimeout * 1000)
schedule(manager)
assert(numExecutorsTargetForDefaultProfileId(manager) === 7 + 1) // timer restarted
clock.advance(sustainedSchedulerBacklogTimeout * 1000)
schedule(manager)
assert(numExecutorsTargetForDefaultProfileId(manager) === 7 + 1 + 2)
clock.advance(sustainedSchedulerBacklogTimeout * 1000)
schedule(manager)
assert(numExecutorsTargetForDefaultProfileId(manager) === 7 + 1 + 2 + 4)
clock.advance(sustainedSchedulerBacklogTimeout * 1000)
schedule(manager)
assert(numExecutorsTargetForDefaultProfileId(manager) === 20) // limit reached
}
test("mock polling loop remove behavior") {
val clock = new ManualClock(2020L)
val manager = createManager(createConf(1, 20, 1), clock = clock)
// Remove idle executors on timeout
onExecutorAddedDefaultProfile(manager, "executor-1")
onExecutorAddedDefaultProfile(manager, "executor-2")
onExecutorAddedDefaultProfile(manager, "executor-3")
assert(executorsPendingToRemove(manager).isEmpty)
// idle threshold not reached yet
clock.advance(executorIdleTimeout * 1000 / 2)
schedule(manager)
assert(manager.executorMonitor.timedOutExecutors().isEmpty)
assert(executorsPendingToRemove(manager).isEmpty)
// idle threshold exceeded
clock.advance(executorIdleTimeout * 1000)
assert(manager.executorMonitor.timedOutExecutors().size === 3)
schedule(manager)
assert(executorsPendingToRemove(manager).size === 2) // limit reached (1 executor remaining)
// Mark a subset as busy - only idle executors should be removed
onExecutorAddedDefaultProfile(manager, "executor-4")
onExecutorAddedDefaultProfile(manager, "executor-5")
onExecutorAddedDefaultProfile(manager, "executor-6")
onExecutorAddedDefaultProfile(manager, "executor-7")
assert(manager.executorMonitor.executorCount === 7)
assert(executorsPendingToRemove(manager).size === 2) // 2 pending to be removed
onExecutorBusy(manager, "executor-4")
onExecutorBusy(manager, "executor-5")
onExecutorBusy(manager, "executor-6") // 3 busy and 2 idle (of the 5 active ones)
// after scheduling, the previously timed out executor should be removed, since
// there are new active ones.
schedule(manager)
assert(executorsPendingToRemove(manager).size === 3)
// advance the clock so that idle executors should time out and move to the pending list
clock.advance(executorIdleTimeout * 1000)
schedule(manager)
assert(executorsPendingToRemove(manager).size === 4)
assert(!executorsPendingToRemove(manager).contains("executor-4"))
assert(!executorsPendingToRemove(manager).contains("executor-5"))
assert(!executorsPendingToRemove(manager).contains("executor-6"))
// Busy executors are now idle and should be removed
onExecutorIdle(manager, "executor-4")
onExecutorIdle(manager, "executor-5")
onExecutorIdle(manager, "executor-6")
schedule(manager)
assert(executorsPendingToRemove(manager).size === 4)
clock.advance(executorIdleTimeout * 1000)
schedule(manager)
assert(executorsPendingToRemove(manager).size === 6) // limit reached (1 executor remaining)
}
test("mock polling loop remove with decommissioning") {
val clock = new ManualClock(2020L)
val manager = createManager(createConf(1, 20, 1, true), clock = clock)
// Remove idle executors on timeout
onExecutorAddedDefaultProfile(manager, "executor-1")
onExecutorAddedDefaultProfile(manager, "executor-2")
onExecutorAddedDefaultProfile(manager, "executor-3")
assert(executorsDecommissioning(manager).isEmpty)
assert(executorsPendingToRemove(manager).isEmpty)
// idle threshold not reached yet
clock.advance(executorIdleTimeout * 1000 / 2)
schedule(manager)
assert(manager.executorMonitor.timedOutExecutors().isEmpty)
assert(executorsPendingToRemove(manager).isEmpty)
assert(executorsDecommissioning(manager).isEmpty)
// idle threshold exceeded
clock.advance(executorIdleTimeout * 1000)
assert(manager.executorMonitor.timedOutExecutors().size === 3)
schedule(manager)
assert(executorsPendingToRemove(manager).isEmpty) // limit reached (1 executor remaining)
assert(executorsDecommissioning(manager).size === 2) // limit reached (1 executor remaining)
// Mark a subset as busy - only idle executors should be removed
onExecutorAddedDefaultProfile(manager, "executor-4")
onExecutorAddedDefaultProfile(manager, "executor-5")
onExecutorAddedDefaultProfile(manager, "executor-6")
onExecutorAddedDefaultProfile(manager, "executor-7")
assert(manager.executorMonitor.executorCount === 7)
assert(executorsPendingToRemove(manager).isEmpty) // no pending to be removed
assert(executorsDecommissioning(manager).size === 2) // 2 decommissioning
onExecutorBusy(manager, "executor-4")
onExecutorBusy(manager, "executor-5")
onExecutorBusy(manager, "executor-6") // 3 busy and 2 idle (of the 5 active ones)
// after scheduling, the previously timed out executor should be removed, since
// there are new active ones.
schedule(manager)
assert(executorsDecommissioning(manager).size === 3)
// advance the clock so that idle executors should time out and move to the pending list
clock.advance(executorIdleTimeout * 1000)
schedule(manager)
assert(executorsPendingToRemove(manager).size === 0)
assert(executorsDecommissioning(manager).size === 4)
assert(!executorsDecommissioning(manager).contains("executor-4"))
assert(!executorsDecommissioning(manager).contains("executor-5"))
assert(!executorsDecommissioning(manager).contains("executor-6"))
// Busy executors are now idle and should be removed
onExecutorIdle(manager, "executor-4")
onExecutorIdle(manager, "executor-5")
onExecutorIdle(manager, "executor-6")
schedule(manager)
assert(executorsDecommissioning(manager).size === 4)
clock.advance(executorIdleTimeout * 1000)
schedule(manager)
assert(executorsDecommissioning(manager).size === 6) // limit reached (1 executor remaining)
}
test("listeners trigger add executors correctly") {
val manager = createManager(createConf(1, 20, 1))
assert(addTime(manager) === NOT_SET)
// Starting a stage should start the add timer
val numTasks = 10
post(SparkListenerStageSubmitted(createStageInfo(0, numTasks)))
assert(addTime(manager) !== NOT_SET)
// Starting a subset of the tasks should not cancel the add timer
val taskInfos = (0 to numTasks - 1).map { i => createTaskInfo(i, i, "executor-1") }
taskInfos.tail.foreach { info => post(SparkListenerTaskStart(0, 0, info)) }
assert(addTime(manager) !== NOT_SET)
// Starting all remaining tasks should cancel the add timer
post(SparkListenerTaskStart(0, 0, taskInfos.head))
assert(addTime(manager) === NOT_SET)
// Start two different stages
// The add timer should be canceled only if all tasks in both stages start running
post(SparkListenerStageSubmitted(createStageInfo(1, numTasks)))
post(SparkListenerStageSubmitted(createStageInfo(2, numTasks)))
assert(addTime(manager) !== NOT_SET)
taskInfos.foreach { info => post(SparkListenerTaskStart(1, 0, info)) }
assert(addTime(manager) !== NOT_SET)
taskInfos.foreach { info => post(SparkListenerTaskStart(2, 0, info)) }
assert(addTime(manager) === NOT_SET)
}
test("avoid ramp up when target < running executors") {
val manager = createManager(createConf(0, 100000, 0))
val stage1 = createStageInfo(0, 1000)
post(SparkListenerStageSubmitted(stage1))
val updatesNeeded =
new mutable.HashMap[ResourceProfile, ExecutorAllocationManager.TargetNumUpdates]
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 4)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 8)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 15)
(0 until 15).foreach { i =>
onExecutorAddedDefaultProfile(manager, s"executor-$i")
}
assert(manager.executorMonitor.executorCount === 15)
post(SparkListenerStageCompleted(stage1))
adjustRequestedExecutors(manager)
assert(numExecutorsTargetForDefaultProfileId(manager) === 0)
post(SparkListenerStageSubmitted(createStageInfo(1, 1000)))
addExecutorsToTargetForDefaultProfile(manager, updatesNeeded)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 16)
}
test("avoid ramp down initial executors until first job is submitted") {
val clock = new ManualClock(10000L)
val manager = createManager(createConf(2, 5, 3), clock = clock)
// Verify the initial number of executors
assert(numExecutorsTargetForDefaultProfileId(manager) === 3)
schedule(manager)
// Verify whether the initial number of executors is kept with no pending tasks
assert(numExecutorsTargetForDefaultProfileId(manager) === 3)
post(SparkListenerStageSubmitted(createStageInfo(1, 2)))
clock.advance(100L)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) === 2)
schedule(manager)
// Verify that current number of executors should be ramp down when first job is submitted
assert(numExecutorsTargetForDefaultProfileId(manager) === 2)
}
test("avoid ramp down initial executors until idle executor is timeout") {
val clock = new ManualClock(10000L)
val manager = createManager(createConf(2, 5, 3), clock = clock)
// Verify the initial number of executors
assert(numExecutorsTargetForDefaultProfileId(manager) === 3)
schedule(manager)
// Verify the initial number of executors is kept when no pending tasks
assert(numExecutorsTargetForDefaultProfileId(manager) === 3)
(0 until 3).foreach { i =>
onExecutorAddedDefaultProfile(manager, s"executor-$i")
}
clock.advance(executorIdleTimeout * 1000)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) === 0)
schedule(manager)
// Verify executor is timeout,numExecutorsTargetForDefaultProfileId is recalculated
assert(numExecutorsTargetForDefaultProfileId(manager) === 2)
}
test("get pending task number and related locality preference") {
val manager = createManager(createConf(2, 5, 3))
val localityPreferences1 = Seq(
Seq(TaskLocation("host1"), TaskLocation("host2"), TaskLocation("host3")),
Seq(TaskLocation("host1"), TaskLocation("host2"), TaskLocation("host4")),
Seq(TaskLocation("host2"), TaskLocation("host3"), TaskLocation("host4")),
Seq.empty,
Seq.empty
)
val stageInfo1 = createStageInfo(1, 5, localityPreferences1)
post(SparkListenerStageSubmitted(stageInfo1))
assert(localityAwareTasksForDefaultProfile(manager) === 3)
val hostToLocal = hostToLocalTaskCount(manager)
assert(hostToLocalTaskCount(manager) ===
Map("host1" -> 2, "host2" -> 3, "host3" -> 2, "host4" -> 2))
val localityPreferences2 = Seq(
Seq(TaskLocation("host2"), TaskLocation("host3"), TaskLocation("host5")),
Seq(TaskLocation("host3"), TaskLocation("host4"), TaskLocation("host5")),
Seq.empty
)
val stageInfo2 = createStageInfo(2, 3, localityPreferences2)
post(SparkListenerStageSubmitted(stageInfo2))
assert(localityAwareTasksForDefaultProfile(manager) === 5)
assert(hostToLocalTaskCount(manager) ===
Map("host1" -> 2, "host2" -> 4, "host3" -> 4, "host4" -> 3, "host5" -> 2))
post(SparkListenerStageCompleted(stageInfo1))
assert(localityAwareTasksForDefaultProfile(manager) === 2)
assert(hostToLocalTaskCount(manager) ===
Map("host2" -> 1, "host3" -> 2, "host4" -> 1, "host5" -> 2))
}
test("SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks") {
val manager = createManager(createConf())
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) === 0)
post(SparkListenerStageSubmitted(createStageInfo(0, 1)))
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) === 1)
val taskInfo = createTaskInfo(1, 1, "executor-1")
post(SparkListenerTaskStart(0, 0, taskInfo))
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) === 1)
// If the task is failed, we expect it to be resubmitted later.
val taskEndReason = ExceptionFailure(null, null, null, null, None)
post(SparkListenerTaskEnd(0, 0, null, taskEndReason, taskInfo, new ExecutorMetrics, null))
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) === 1)
}
test("reset the state of allocation manager") {
val manager = createManager(createConf())
assert(numExecutorsTargetForDefaultProfileId(manager) === 1)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
val updatesNeeded =
new mutable.HashMap[ResourceProfile, ExecutorAllocationManager.TargetNumUpdates]
// Allocation manager is reset when adding executor requests are sent without reporting back
// executor added.
post(SparkListenerStageSubmitted(createStageInfo(0, 10)))
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 2)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 2)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 4)
assert(addExecutorsToTargetForDefaultProfile(manager, updatesNeeded) === 1)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 5)
manager.reset()
assert(numExecutorsTargetForDefaultProfileId(manager) === 1)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
assert(manager.executorMonitor.executorCount === 0)
// Allocation manager is reset when executors are added.
post(SparkListenerStageSubmitted(createStageInfo(0, 10)))
addExecutorsToTargetForDefaultProfile(manager, updatesNeeded)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
addExecutorsToTargetForDefaultProfile(manager, updatesNeeded)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
addExecutorsToTargetForDefaultProfile(manager, updatesNeeded)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 5)
onExecutorAddedDefaultProfile(manager, "first")
onExecutorAddedDefaultProfile(manager, "second")
onExecutorAddedDefaultProfile(manager, "third")
onExecutorAddedDefaultProfile(manager, "fourth")
onExecutorAddedDefaultProfile(manager, "fifth")
assert(manager.executorMonitor.executorCount === 5)
// Cluster manager lost will make all the live executors lost, so here simulate this behavior
onExecutorRemoved(manager, "first")
onExecutorRemoved(manager, "second")
onExecutorRemoved(manager, "third")
onExecutorRemoved(manager, "fourth")
onExecutorRemoved(manager, "fifth")
manager.reset()
assert(numExecutorsTargetForDefaultProfileId(manager) === 1)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
assert(manager.executorMonitor.executorCount === 0)
// Allocation manager is reset when executors are pending to remove
addExecutorsToTargetForDefaultProfile(manager, updatesNeeded)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
addExecutorsToTargetForDefaultProfile(manager, updatesNeeded)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
addExecutorsToTargetForDefaultProfile(manager, updatesNeeded)
doUpdateRequest(manager, updatesNeeded.toMap, clock.getTimeMillis())
assert(numExecutorsTargetForDefaultProfileId(manager) === 5)
onExecutorAddedDefaultProfile(manager, "first")
onExecutorAddedDefaultProfile(manager, "second")
onExecutorAddedDefaultProfile(manager, "third")
onExecutorAddedDefaultProfile(manager, "fourth")
onExecutorAddedDefaultProfile(manager, "fifth")
onExecutorAddedDefaultProfile(manager, "sixth")
onExecutorAddedDefaultProfile(manager, "seventh")
onExecutorAddedDefaultProfile(manager, "eighth")
assert(manager.executorMonitor.executorCount === 8)
removeExecutorDefaultProfile(manager, "first")
removeExecutorsDefaultProfile(manager, Seq("second", "third"))
assert(executorsPendingToRemove(manager) === Set("first", "second", "third"))
assert(manager.executorMonitor.executorCount === 8)
// Cluster manager lost will make all the live executors lost, so here simulate this behavior
onExecutorRemoved(manager, "first")
onExecutorRemoved(manager, "second")
onExecutorRemoved(manager, "third")
onExecutorRemoved(manager, "fourth")
onExecutorRemoved(manager, "fifth")
manager.reset()
assert(numExecutorsTargetForDefaultProfileId(manager) === 1)
assert(numExecutorsToAddForDefaultProfile(manager) === 1)
assert(executorsPendingToRemove(manager) === Set.empty)
assert(manager.executorMonitor.executorCount === 0)
}
test("SPARK-23365 Don't update target num executors when killing idle executors") {
val clock = new ManualClock()
val manager = createManager(
createConf(1, 2, 1),
clock = clock)
when(client.requestTotalExecutors(any(), any(), any())).thenReturn(true)
// test setup -- job with 2 tasks, scale up to two executors
assert(numExecutorsTargetForDefaultProfileId(manager) === 1)
post(SparkListenerExecutorAdded(
clock.getTimeMillis(), "executor-1", new ExecutorInfo("host1", 1, Map.empty, Map.empty)))
post(SparkListenerStageSubmitted(createStageInfo(0, 2)))
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
assert(numExecutorsTargetForDefaultProfileId(manager) === 2)
val taskInfo0 = createTaskInfo(0, 0, "executor-1")
post(SparkListenerTaskStart(0, 0, taskInfo0))
post(SparkListenerExecutorAdded(
clock.getTimeMillis(), "executor-2", new ExecutorInfo("host1", 1, Map.empty, Map.empty)))
val taskInfo1 = createTaskInfo(1, 1, "executor-2")
post(SparkListenerTaskStart(0, 0, taskInfo1))
assert(numExecutorsTargetForDefaultProfileId(manager) === 2)
// have one task finish -- we should adjust the target number of executors down
// but we should *not* kill any executors yet
post(SparkListenerTaskEnd(0, 0, null, Success, taskInfo0, new ExecutorMetrics, null))
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) === 1)
assert(numExecutorsTargetForDefaultProfileId(manager) === 2)
clock.advance(1000)
manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime())
assert(numExecutorsTargetForDefaultProfileId(manager) === 1)
assert(manager.executorMonitor.executorsPendingToRemove().isEmpty)
// now we cross the idle timeout for executor-1, so we kill it. the really important
// thing here is that we do *not* ask the executor allocation client to adjust the target
// number of executors down
clock.advance(3000)
schedule(manager)
assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) === 1)
assert(numExecutorsTargetForDefaultProfileId(manager) === 1)
// here's the important verify -- we did kill the executors, but did not adjust the target count
assert(manager.executorMonitor.executorsPendingToRemove() === Set("executor-1"))
}
test("SPARK-26758 check executor target number after idle time out ") {
val clock = new ManualClock(10000L)
val manager = createManager(createConf(1, 5, 3), clock = clock)
assert(numExecutorsTargetForDefaultProfileId(manager) === 3)
post(SparkListenerExecutorAdded(
clock.getTimeMillis(), "executor-1", new ExecutorInfo("host1", 1, Map.empty)))
post(SparkListenerExecutorAdded(
clock.getTimeMillis(), "executor-2", new ExecutorInfo("host1", 2, Map.empty)))
post(SparkListenerExecutorAdded(
clock.getTimeMillis(), "executor-3", new ExecutorInfo("host1", 3, Map.empty)))
// make all the executors as idle, so that it will be killed
clock.advance(executorIdleTimeout * 1000)
schedule(manager)
// once the schedule is run target executor number should be 1
assert(numExecutorsTargetForDefaultProfileId(manager) === 1)
}
private def createConf(
minExecutors: Int = 1,
maxExecutors: Int = 5,
initialExecutors: Int = 1,
decommissioningEnabled: Boolean = false): SparkConf = {
val sparkConf = new SparkConf()
.set(config.DYN_ALLOCATION_ENABLED, true)
.set(config.DYN_ALLOCATION_MIN_EXECUTORS, minExecutors)
.set(config.DYN_ALLOCATION_MAX_EXECUTORS, maxExecutors)
.set(config.DYN_ALLOCATION_INITIAL_EXECUTORS, initialExecutors)
.set(config.DYN_ALLOCATION_SCHEDULER_BACKLOG_TIMEOUT.key,
s"${schedulerBacklogTimeout.toString}s")
.set(config.DYN_ALLOCATION_SUSTAINED_SCHEDULER_BACKLOG_TIMEOUT.key,
s"${sustainedSchedulerBacklogTimeout.toString}s")
.set(config.DYN_ALLOCATION_EXECUTOR_IDLE_TIMEOUT.key, s"${executorIdleTimeout.toString}s")
.set(config.SHUFFLE_SERVICE_ENABLED, true)
.set(config.DYN_ALLOCATION_TESTING, true)
// SPARK-22864/SPARK-32287: effectively disable the allocation schedule for the tests so that
// we won't result in the race condition between thread "spark-dynamic-executor-allocation"
// and thread "pool-1-thread-1-ScalaTest-running".
.set(TEST_DYNAMIC_ALLOCATION_SCHEDULE_ENABLED, false)
.set(DECOMMISSION_ENABLED, decommissioningEnabled)
sparkConf
}
private def createManager(
conf: SparkConf,
clock: Clock = new SystemClock()): ExecutorAllocationManager = {
ResourceProfile.reInitDefaultProfile(conf)
rpManager = new ResourceProfileManager(conf, listenerBus)
val manager = new ExecutorAllocationManager(client, listenerBus, conf, clock = clock,
resourceProfileManager = rpManager)
managers += manager
manager.start()
manager
}
private val execInfo = new ExecutorInfo("host1", 1, Map.empty,
Map.empty, Map.empty, DEFAULT_RESOURCE_PROFILE_ID)
private def onExecutorAddedDefaultProfile(
manager: ExecutorAllocationManager,
id: String): Unit = {
post(SparkListenerExecutorAdded(0L, id, execInfo))
}
private def onExecutorAdded(
manager: ExecutorAllocationManager,
id: String,
rp: ResourceProfile): Unit = {
val cores = rp.getExecutorCores.getOrElse(1)
val execInfo = new ExecutorInfo("host1", cores, Map.empty, Map.empty, Map.empty, rp.id)
post(SparkListenerExecutorAdded(0L, id, execInfo))
}
private def onExecutorRemoved(
manager: ExecutorAllocationManager,
id: String,
reason: String = null): Unit = {
post(SparkListenerExecutorRemoved(0L, id, reason))
}
private def onExecutorBusy(manager: ExecutorAllocationManager, id: String): Unit = {
val info = new TaskInfo(1, 1, 1, 0, id, "foo.example.com", TaskLocality.PROCESS_LOCAL, false)
post(SparkListenerTaskStart(1, 1, info))
}
private def onExecutorIdle(manager: ExecutorAllocationManager, id: String): Unit = {
val info = new TaskInfo(1, 1, 1, 0, id, "foo.example.com", TaskLocality.PROCESS_LOCAL, false)
info.markFinished(TaskState.FINISHED, 1)
post(SparkListenerTaskEnd(1, 1, "foo", Success, info, new ExecutorMetrics, null))
}
private def removeExecutorDefaultProfile(
manager: ExecutorAllocationManager,
executorId: String): Boolean = {
val executorsRemoved = removeExecutorsDefaultProfile(manager, Seq(executorId))
executorsRemoved.nonEmpty && executorsRemoved(0) == executorId
}
private def removeExecutor(
manager: ExecutorAllocationManager,
executorId: String,
rpId: Int): Boolean = {
val executorsRemoved = removeExecutors(manager, Seq((executorId, rpId)))
executorsRemoved.nonEmpty && executorsRemoved(0) == executorId
}
private def executorsPendingToRemove(manager: ExecutorAllocationManager): Set[String] = {
manager.executorMonitor.executorsPendingToRemove()
}
private def executorsDecommissioning(manager: ExecutorAllocationManager): Set[String] = {
manager.executorMonitor.executorsDecommissioning()
}
}
/**
* Helper methods for testing ExecutorAllocationManager.
* This includes methods to access private methods and fields in ExecutorAllocationManager.
*/
private object ExecutorAllocationManagerSuite extends PrivateMethodTester {
private val schedulerBacklogTimeout = 1L
private val sustainedSchedulerBacklogTimeout = 2L
private val executorIdleTimeout = 3L
private def createStageInfo(
stageId: Int,
numTasks: Int,
taskLocalityPreferences: Seq[Seq[TaskLocation]] = Seq.empty,
attemptId: Int = 0,
rp: ResourceProfile = defaultProfile
): StageInfo = {
new StageInfo(stageId, attemptId, "name", numTasks, Seq.empty, Seq.empty, "no details",
taskLocalityPreferences = taskLocalityPreferences, resourceProfileId = rp.id)
}
private def createTaskInfo(
taskId: Int,
taskIndex: Int,
executorId: String,
speculative: Boolean = false): TaskInfo = {
new TaskInfo(taskId, taskIndex, 0, 0, executorId, "", TaskLocality.ANY, speculative)
}
/* ------------------------------------------------------- *
| Helper methods for accessing private methods and fields |
* ------------------------------------------------------- */
private val _numExecutorsToAddPerResourceProfileId =
PrivateMethod[mutable.HashMap[Int, Int]](
Symbol("numExecutorsToAddPerResourceProfileId"))
private val _numExecutorsTargetPerResourceProfileId =
PrivateMethod[mutable.HashMap[Int, Int]](
Symbol("numExecutorsTargetPerResourceProfileId"))
private val _maxNumExecutorsNeededPerResourceProfile =
PrivateMethod[Int](Symbol("maxNumExecutorsNeededPerResourceProfile"))
private val _addTime = PrivateMethod[Long](Symbol("addTime"))
private val _schedule = PrivateMethod[Unit](Symbol("schedule"))
private val _doUpdateRequest = PrivateMethod[Unit](Symbol("doUpdateRequest"))
private val _updateAndSyncNumExecutorsTarget =
PrivateMethod[Int](Symbol("updateAndSyncNumExecutorsTarget"))
private val _addExecutorsToTarget = PrivateMethod[Int](Symbol("addExecutorsToTarget"))
private val _removeExecutors = PrivateMethod[Seq[String]](Symbol("removeExecutors"))
private val _onSchedulerBacklogged = PrivateMethod[Unit](Symbol("onSchedulerBacklogged"))
private val _onSchedulerQueueEmpty = PrivateMethod[Unit](Symbol("onSchedulerQueueEmpty"))
private val _localityAwareTasksPerResourceProfileId =
PrivateMethod[mutable.HashMap[Int, Int]](Symbol("numLocalityAwareTasksPerResourceProfileId"))
private val _rpIdToHostToLocalTaskCount =
PrivateMethod[Map[Int, Map[String, Int]]](Symbol("rpIdToHostToLocalTaskCount"))
private val _onSpeculativeTaskSubmitted =
PrivateMethod[Unit](Symbol("onSpeculativeTaskSubmitted"))
private val _totalRunningTasksPerResourceProfile =
PrivateMethod[Int](Symbol("totalRunningTasksPerResourceProfile"))
private val defaultProfile = ResourceProfile.getOrCreateDefaultProfile(new SparkConf)
private def numExecutorsToAddForDefaultProfile(manager: ExecutorAllocationManager): Int = {
numExecutorsToAdd(manager, defaultProfile)
}
private def numExecutorsToAdd(
manager: ExecutorAllocationManager,
rp: ResourceProfile): Int = {
val nmap = manager invokePrivate _numExecutorsToAddPerResourceProfileId()
nmap(rp.id)
}
private def updateAndSyncNumExecutorsTarget(
manager: ExecutorAllocationManager,
now: Long): Unit = {
manager invokePrivate _updateAndSyncNumExecutorsTarget(now)
}
private def numExecutorsTargetForDefaultProfileId(manager: ExecutorAllocationManager): Int = {
numExecutorsTarget(manager, defaultProfile.id)
}
private def numExecutorsTarget(
manager: ExecutorAllocationManager,
rpId: Int): Int = {
val numMap = manager invokePrivate _numExecutorsTargetPerResourceProfileId()
numMap(rpId)
}
private def addExecutorsToTargetForDefaultProfile(
manager: ExecutorAllocationManager,
updatesNeeded: mutable.HashMap[ResourceProfile,
ExecutorAllocationManager.TargetNumUpdates]
): Int = {
addExecutorsToTarget(manager, updatesNeeded, defaultProfile)
}
private def addExecutorsToTarget(
manager: ExecutorAllocationManager,
updatesNeeded: mutable.HashMap[ResourceProfile,
ExecutorAllocationManager.TargetNumUpdates],
rp: ResourceProfile
): Int = {
val maxNumExecutorsNeeded =
manager invokePrivate _maxNumExecutorsNeededPerResourceProfile(rp.id)
manager invokePrivate
_addExecutorsToTarget(maxNumExecutorsNeeded, rp.id, updatesNeeded)
}
private def addTime(manager: ExecutorAllocationManager): Long = {
manager invokePrivate _addTime()
}
private def doUpdateRequest(
manager: ExecutorAllocationManager,
updates: Map[ResourceProfile, ExecutorAllocationManager.TargetNumUpdates],
now: Long): Unit = {
manager invokePrivate _doUpdateRequest(updates, now)
}
private def schedule(manager: ExecutorAllocationManager): Unit = {
manager invokePrivate _schedule()
}
private def maxNumExecutorsNeededPerResourceProfile(
manager: ExecutorAllocationManager,
rp: ResourceProfile): Int = {
manager invokePrivate _maxNumExecutorsNeededPerResourceProfile(rp.id)
}
private def adjustRequestedExecutors(manager: ExecutorAllocationManager): Int = {
manager invokePrivate _updateAndSyncNumExecutorsTarget(0L)
}
private def removeExecutorsDefaultProfile(
manager: ExecutorAllocationManager,
ids: Seq[String]): Seq[String] = {
val idsAndProfileIds = ids.map((_, defaultProfile.id))
manager invokePrivate _removeExecutors(idsAndProfileIds)
}
private def removeExecutors(
manager: ExecutorAllocationManager,
ids: Seq[(String, Int)]): Seq[String] = {
manager invokePrivate _removeExecutors(ids)
}
private def onSchedulerBacklogged(manager: ExecutorAllocationManager): Unit = {
manager invokePrivate _onSchedulerBacklogged()
}
private def onSchedulerQueueEmpty(manager: ExecutorAllocationManager): Unit = {
manager invokePrivate _onSchedulerQueueEmpty()
}
private def onSpeculativeTaskSubmitted(manager: ExecutorAllocationManager, id: String) : Unit = {
manager invokePrivate _onSpeculativeTaskSubmitted(id)
}
private def localityAwareTasksForDefaultProfile(manager: ExecutorAllocationManager): Int = {
val localMap = manager invokePrivate _localityAwareTasksPerResourceProfileId()
localMap(defaultProfile.id)
}
private def totalRunningTasksPerResourceProfile(manager: ExecutorAllocationManager): Int = {
manager invokePrivate _totalRunningTasksPerResourceProfile(defaultProfile.id)
}
private def hostToLocalTaskCount(
manager: ExecutorAllocationManager): Map[String, Int] = {
val rpIdToHostLocal = manager invokePrivate _rpIdToHostToLocalTaskCount()
rpIdToHostLocal(defaultProfile.id)
}
private def getResourceProfileIdOfExecutor(manager: ExecutorAllocationManager): Int = {
defaultProfile.id
}
}
| maropu/spark | core/src/test/scala/org/apache/spark/ExecutorAllocationManagerSuite.scala | Scala | apache-2.0 | 95,815 |
package com.aergonaut.lifeaquatic.block.vat
import java.util
import cofh.api.block.IBlockDebug
import cofh.lib.util.UtilLiquidMover
import com.aergonaut.lib.block.BlockSide
import com.aergonaut.lifeaquatic.LifeAquatic
import com.aergonaut.lifeaquatic.block.BlockBase
import com.aergonaut.lifeaquatic.constants.{Guis, Textures}
import com.aergonaut.lifeaquatic.tileentity.TileEntityVat
import cpw.mods.fml.relauncher.{Side, SideOnly}
import net.minecraft.block.ITileEntityProvider
import net.minecraft.client.renderer.texture.IIconRegister
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.tileentity.TileEntity
import net.minecraft.util.{IChatComponent, IIcon}
import net.minecraft.world.{IBlockAccess, World}
import net.minecraftforge.common.util.ForgeDirection
abstract class VatBlockBase(name: String) extends BlockBase(name: String) with ITileEntityProvider with IBlockDebug {
var iconSide: IIcon = _
var iconTop: IIcon = _
var iconBottom: IIcon = _
override def registerBlockIcons(iIconRegister: IIconRegister): Unit = {
iconBottom = iIconRegister.registerIcon(s"${Textures.ResourcePrefix}machine/vat.0")
iconTop = iIconRegister.registerIcon(s"${Textures.ResourcePrefix}machine/vat.1")
iconSide = registerIconSide(iIconRegister)
}
protected def registerIconSide(iIconRegister: IIconRegister): IIcon
@SideOnly(Side.CLIENT)
override def getIcon(par1: Int, par2: Int): IIcon = par1 match {
case BlockSide.Top => iconTop
case BlockSide.Bottom => iconBottom
case _ => iconSide
}
override def createNewTileEntity(world: World, meta: Int): TileEntity = new TileEntityVat
override def onBlockActivated(world: World, x: Int, y: Int, z: Int, player: EntityPlayer, side: Int, hitX: Float, hitY: Float, hitZ: Float): Boolean = {
if (world.getTileEntity(x, y, z).isInstanceOf[TileEntityVat] && !player.isSneaking) {
val tev = world.getTileEntity(x, y, z).asInstanceOf[TileEntityVat]
if (tev.formed) {
val heldItem = Option(player.getHeldItem)
heldItem match {
case Some(item) => {
if (tev.allowBucketFill(item)) {
UtilLiquidMover.manuallyFillTank(tev, player)
return true
}
}
case None => {
tev.master.foreach(master => {
if (!world.isRemote) {
player.openGui(LifeAquatic, Guis.Vat, world, master.xCoord, master.yCoord, master.zCoord)
}
return true
})
}
}
}
}
false
}
override def debugBlock(world: IBlockAccess, x: Int, y: Int, z: Int, side: ForgeDirection, player: EntityPlayer): Unit =
Option(world.getTileEntity(x, y, z)) match {
case Some(te: TileEntityVat) => {
val messages = new util.ArrayList[IChatComponent]()
te.getTileInfo(messages, side, player, true)
for (i <- 0 until messages.size()) {
player.addChatMessage(messages.get(i))
}
}
case _ => {}
}
}
| aergonaut/LifeAquatic | src/main/scala/com/aergonaut/lifeaquatic/block/vat/VatBlockBase.scala | Scala | mit | 3,030 |
package com.twitter.finagle.memcached.util
import com.twitter.io.Buf
import scala.collection.mutable.ArrayBuffer
object ParserUtils {
// Used by byteArrayStringToInt. The maximum length of a non-negative Int in chars
private[this] val MaxLengthOfIntString = Int.MaxValue.toString.length
private[this] object isWhitespaceProcessor extends Buf.Processor {
private[this] val TokenDelimiter: Byte = ' '
def apply(byte: Byte): Boolean = byte != TokenDelimiter
}
private[this] object isDigitProcessor extends Buf.Processor {
def apply(byte: Byte): Boolean = byte >= '0' && byte <= '9'
}
/**
* @return true if the Buf is non empty and every byte in the Buf is a digit.
*/
def isDigits(buf: Buf): Boolean =
if (buf.isEmpty) false
else -1 == buf.process(isDigitProcessor)
private[memcached] def splitOnWhitespace(bytes: Buf): IndexedSeq[Buf] = {
val len = bytes.length
val split = new ArrayBuffer[Buf](6)
var segmentStart = 0
while (segmentStart < len) {
val segmentEnd = bytes.process(segmentStart, len, isWhitespaceProcessor)
if (segmentEnd == -1) {
// At the end
split += bytes.slice(segmentStart, len)
segmentStart = len // terminate loop
} else {
// We don't add an empty Buf instance at the front
if (segmentEnd != 0) {
split += bytes.slice(segmentStart, segmentEnd)
}
segmentStart = segmentEnd + 1
}
}
split
}
/**
* Converts the `Buf`, representing a non-negative integer in chars,
* to a base 10 Int.
* Returns -1 if any of the bytes are not digits, or the length is invalid
*/
private[memcached] def bufToInt(buf: Buf): Int = {
val length = buf.length
if (length > MaxLengthOfIntString) -1
else {
var num = 0
var i = 0
while (i < length) {
val b = buf.get(i)
if (b >= '0' && b <= '9')
num = (num * 10) + (b - '0')
else
return -1
i += 1
}
num
}
}
}
| luciferous/finagle | finagle-memcached/src/main/scala/com/twitter/finagle/memcached/util/ParserUtils.scala | Scala | apache-2.0 | 2,035 |
package org.phasanix.svggraph
import java.awt.geom.Point2D
/**
* Convenience interface for constructing SVG path strings,
* absolute positions only
* TODO: split into absolute and relative path implementations
*/
class PathBuilder(start: Point2D.Float) {
import Helper._
private val points = collection.mutable.ArrayBuffer.empty[Either[Point2D.Float, String]]
private var current = start
points += Left(start)
/** Move to absolute position */
def moveAbs(point: Point2D.Float): PathBuilder = {
current = point
points += Left(point)
this
}
/** Insert path op string before current point */
def op(s: String): PathBuilder = {
points += Right(s)
this
}
/** Move relative to current position */
def moveRel(dx: Float, dy: Float): PathBuilder = {
val p = new Point2D.Float(current.x, current.y)
p.x += dx
p.y += dy
moveAbs(p)
this
}
/**Return to start */
def toStart: PathBuilder = {
moveAbs(start)
this
}
/** path string */
def path: String = {
val sb = new StringBuilder
sb.fmt('M')
for (x <- points) {
sb.fmt(' ')
x match {
case Left(pt) => sb.fmt(pt)
case Right(s) => sb.fmt(s)
}
}
sb.toString()
}
}
| richardclose/svggraph | src/main/scala/org/phasanix/svggraph/PathBuilder.scala | Scala | mit | 1,255 |
/** Hello fellow compiler developer.
if you are wondering why does test suite hang on this test
then it's likely that the lambda inside map has been compiled into static method
unfotrunatelly, as it is executed inside static object initializer,
it is executed inside class-loader, in a synchronized block that is not source defined.
If the lambda will be static Test$#foo, calling it through a different thread would require grabbing the
lock inside classloader. Unlike if it not static and is called through This(Test).foo, no lock is grabbed.
@DarkDimius
*/
object Test extends App {
val foos = (1 to 1000).toSeq
try
foos.par.map(i => if (i % 37 == 0) sys.error("i div 37") else i)
catch {
case ex: RuntimeException => println("Runtime exception")
}
}
| som-snytt/dotty | tests/disabled/run/t5375.scala | Scala | apache-2.0 | 812 |
package org.jetbrains.plugins.scala.lang.psi.impl.search
import com.intellij.openapi.application.QueryExecutorBase
import com.intellij.openapi.progress.ProgressManager
import com.intellij.psi._
import com.intellij.psi.search.searches.ClassInheritorsSearch
import com.intellij.psi.search.searches.ClassInheritorsSearch.SearchParameters
import com.intellij.psi.search.{LocalSearchScope, PsiSearchScopeUtil, SearchScope}
import com.intellij.util.Processor
import org.jetbrains.plugins.scala.extensions.{PsiElementExt, inReadAction}
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScNewTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScTemplateDefinition, ScTypeDefinition}
/**
* @author Nikolay.Tropin
*/
class ScalaLocalInheritorsSearcher extends QueryExecutorBase[PsiClass, ClassInheritorsSearch.SearchParameters] {
override def processQuery(params: SearchParameters, consumer: Processor[PsiClass]): Unit = {
val clazz = params.getClassToProcess
val (_, virtualFiles) = params.getScope match {
case local: LocalSearchScope if clazz.isInstanceOf[ScalaPsiElement] => (local, local.getVirtualFiles)
case _ => return
}
val project = clazz.getProject
for (virtualFile <- virtualFiles) {
ProgressManager.checkCanceled()
var continue = true
inReadAction {
if (continue) {
val psiFile: PsiFile = PsiManager.getInstance(project).findFile(virtualFile)
if (psiFile != null) {
psiFile.depthFirst().foreach {
case td: ScTemplateDefinition if continue =>
if (td.isInheritor(clazz, deep = true) && checkCandidate(td, params))
continue = consumer.process(td)
case _ =>
}
}
}
}
}
}
private def checkCandidate(candidate: PsiClass, parameters: ClassInheritorsSearch.SearchParameters): Boolean = {
val searchScope: SearchScope = parameters.getScope
ProgressManager.checkCanceled()
if (!PsiSearchScopeUtil.isInScope(searchScope, candidate)) false
else candidate match {
case _: ScNewTemplateDefinition => true
case td: ScTypeDefinition => parameters.getNameCondition.value(td.name)
}
}
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/search/ScalaLocalInheritorsSearcher.scala | Scala | apache-2.0 | 2,311 |
/*
Facsimile: A Discrete-Event Simulation Library
Copyright © 2004-2020, Michael J Allen.
This file is part of Facsimile.
Facsimile is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later
version.
Facsimile is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with Facsimile. If not, see
http://www.gnu.org/licenses/lgpl.
The developers welcome all comments, suggestions and offers of assistance. For further information, please visit the
project home page at:
http://facsim.org/
Thank you for your interest in the Facsimile project!
IMPORTANT NOTE: All patches (modifications to existing files and/or the addition of new files) submitted for inclusion
as part of the official Facsimile code base, must comply with the published Facsimile Coding Standards. If your code
fails to comply with the standard, then your patches will be rejected. For further information, please visit the coding
standards at:
http://facsim.org/Documentation/CodingStandards/
========================================================================================================================
Scala source file from the org.facsim.anim.cell package.
*/
package org.facsim.anim.cell
import org.facsim.LibResource
/**
Line style enumeration.
Encodes ''[[http://www.automod.com/ AutoMod®]]'' line style codes and maps them
to the corresponding line styles.
@see [[http://facsim.org/Documentation/Resources/AutoModCellFile/LineStyle.html
Line Styles]]
*/
private[cell] object LineStyle
extends Enumeration {
/**
Solid, having the ''cell'' line style 0.
*/
private[cell] val Solid = Value
/**
Dashed, having the ''cell'' line style 1.
*/
private[cell] val Dashed = Value
/**
Dotted, having the ''cell'' line style 2.
*/
private[cell] val Dotted = Value
/**
Halftone, having the ''cell'' line style 3.
*/
private[cell] val Halftone = Value
/**
Default line style, which is used if an explicit style is not available.
*/
private[cell] val Default = Solid
/**
Minimum line width value.
*/
private[cell] val minValue = 0
/**
Maximum line width value.
*/
private[cell] val maxValue = maxId - 1
/**
Verify a line style code.
@param lineStyleCode Code for the line style to be verified.
@return `true` if the code maps to a valid line style, `false` otherwise.
*/
private[cell] def verify(lineStyleCode: Int) =
(lineStyleCode >= minValue && lineStyleCode <= maxValue)
/**
Read line style from ''cell'' data stream.
@param scene Scene from which the line style is to be read.
@return Line style read, if valid.
@throws org.facsim.anim.cell.IncorrectFormatException if the file supplied is
not an ''AutoMod® cell'' file.
@throws org.facsim.anim.cell.ParsingErrorException if errors are encountered
during parsing of the file.
@see [[http://facsim.org/Documentation/Resources/AutoModCellFile/LineStyle.html
Line Styles]]
*/
private[cell] def read(scene: CellScene) = {
/*
Read the line style code from the data stream.
*/
val code = scene.readInt(verify(_), LibResource
("anim.cell.LineStyle.read", minValue, maxValue))
/*
Convert to a line style and return.
*/
LineStyle(code)
}
} | MichaelJAllen/facsimile | core/src/main/scala/org/facsim/anim/cell/LineStyle.scala | Scala | lgpl-3.0 | 3,550 |
package scalaoauth2.provider
import java.util.Date
import org.scalatest._
import org.scalatest.Matchers._
import org.scalatest.concurrent.ScalaFutures
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class ProtectedResourceSpec extends FlatSpec with ScalaFutures {
def successfulProtectedResourceHandler() = new ProtectedResourceHandler[User] {
override def findAccessToken(token: String): Future[Option[AccessToken]] = Future.successful(Some(AccessToken("token1", Some("refreshToken1"), Some("all"), Some(3600), new Date())))
override def findAuthInfoByAccessToken(accessToken: AccessToken): Future[Option[AuthInfo[User]]] = Future.successful(Some(
AuthInfo(user = MockUser(10000, "username"), clientId = Some("clientId1"), scope = Some("all"), redirectUri = None)
))
}
it should "be handled request with token into header" in {
val request = ProtectedResourceRequest(
Map("Authorization" -> Seq("OAuth token1")),
Map("username" -> Seq("user"), "password" -> Seq("pass"), "scope" -> Seq("all"))
)
val dataHandler = successfulProtectedResourceHandler()
ProtectedResource.handleRequest(request, dataHandler).map(_ should be ('right))
}
it should "be handled request with token into body" in {
val request = ProtectedResourceRequest(
Map(),
Map("access_token" -> Seq("token1"), "username" -> Seq("user"), "password" -> Seq("pass"), "scope" -> Seq("all"))
)
val dataHandler = successfulProtectedResourceHandler()
ProtectedResource.handleRequest(request, dataHandler).map(_ should be ('right))
}
it should "be lost expired" in {
val request = ProtectedResourceRequest(
Map("Authorization" -> Seq("OAuth token1")),
Map("username" -> Seq("user"), "password" -> Seq("pass"), "scope" -> Seq("all"))
)
val dataHandler = new ProtectedResourceHandler[User] {
override def findAccessToken(token: String): Future[Option[AccessToken]] = Future.successful(Some(AccessToken("token1", Some("refreshToken1"), Some("all"), Some(3600), new Date(new Date().getTime() - 4000 * 1000))))
override def findAuthInfoByAccessToken(accessToken: AccessToken): Future[Option[AuthInfo[MockUser]]] = Future.successful(Some(
AuthInfo(user = MockUser(10000, "username"), clientId = Some("clientId1"), scope = Some("all"), redirectUri = None)
))
}
val f = ProtectedResource.handleRequest(request, dataHandler)
whenReady(f) { result =>
intercept[ExpiredToken] {
result match {
case Left(e) => throw e
case _ =>
}
}
}
}
it should "be invalid request without token" in {
val request = ProtectedResourceRequest(
Map(),
Map("username" -> Seq("user"), "password" -> Seq("pass"), "scope" -> Seq("all"))
)
val dataHandler = successfulProtectedResourceHandler()
val f = ProtectedResource.handleRequest(request, dataHandler)
whenReady(f) { result =>
val e = intercept[InvalidRequest] {
result match {
case Left(e) => throw e
case _ =>
}
}
e.description should be ("Access token is not found")
}
}
it should "be invalid request when not find an access token" in {
val request = ProtectedResourceRequest(
Map("Authorization" -> Seq("OAuth token1")),
Map("username" -> Seq("user"), "password" -> Seq("pass"), "scope" -> Seq("all"))
)
val dataHandler = new ProtectedResourceHandler[User] {
override def findAccessToken(token: String): Future[Option[AccessToken]] = Future.successful(None)
override def findAuthInfoByAccessToken(accessToken: AccessToken): Future[Option[AuthInfo[MockUser]]] = Future.successful(None)
}
val f = ProtectedResource.handleRequest(request, dataHandler)
whenReady(f) { result =>
val e = intercept[InvalidToken] {
result match {
case Left(e) => throw e
case _ =>
}
}
e.description should be ("The access token is not found")
}
}
it should "be invalid request when not find AuthInfo by token" in {
val request = ProtectedResourceRequest(
Map("Authorization" -> Seq("OAuth token1")),
Map("username" -> Seq("user"), "password" -> Seq("pass"), "scope" -> Seq("all"))
)
val dataHandler = new ProtectedResourceHandler[User] {
override def findAccessToken(token: String): Future[Option[AccessToken]] = Future.successful(Some(AccessToken("token1", Some("refreshToken1"), Some("all"), Some(3600), new Date())))
override def findAuthInfoByAccessToken(accessToken: AccessToken): Future[Option[AuthInfo[MockUser]]] = Future.successful(None)
}
val f = ProtectedResource.handleRequest(request, dataHandler)
whenReady(f) { result =>
val e = intercept[InvalidToken] {
result match {
case Left(e) => throw e
case _ =>
}
}
e.description should be ("The access token is invalid")
}
}
}
| beni55/scala-oauth2-provider | scala-oauth2-core/src/test/scala/scalaoauth2/provider/ProtectedResourceSpec.scala | Scala | mit | 5,036 |
object Solution {
def sievePrimeGenerator(n: Int): Array[Int] = {
val nums = Array.fill(n + 1)(true)
val primes = for (i <- (2 to n).toIterator if nums(i)) yield {
var j = 2
while (i * j <= n) {
nums(i * j) = false
j += 1
}
i
}
primes.toArray
}
def main(args: Array[String]) {
/* Enter your code here. Read input from STDIN. Print output to STDOUT. Your class should be named Solution */
val upperBound = math.pow(10, 4).toInt
val number = (2 * upperBound * math.log(upperBound)).toInt
val primes = sievePrimeGenerator(number)
val t = readLine.toInt
for (i <- 1 to t) {
val nth = readLine.toInt
println(primes(nth - 1))
}
}
}
| advancedxy/hackerrank | project-euler/problem-7/10001stPrime.scala | Scala | mit | 730 |
/**
* Copyright (C) 2007-2008 Scala OTP Team
*/
package scala.actors.behavior
import scala.actors._
import scala.actors.Actor._
import scala.collection.Map
import scala.collection.mutable.HashMap
import org.testng.annotations.{Test, BeforeMethod}
import org.scalatest.testng.TestNGSuite
import org.scalatest._
/**
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
*/
class SupervisorSuite extends TestNGSuite {
var messageLog: String = ""
val pingpong1 = new GenericServerContainer("pingpong1", () => new PingPong1Actor)
val pingpong2 = new GenericServerContainer("pingpong2", () => new PingPong2Actor)
val pingpong3 = new GenericServerContainer("pingpong3", () => new PingPong3Actor)
pingpong1.setTimeout(100)
pingpong2.setTimeout(100)
pingpong3.setTimeout(100)
override protected def runTest(testName: String, reporter: Reporter, stopper: Stopper, properties: scala.collection.immutable.Map[String, Any]) {
setup
super.runTest(testName, reporter, stopper, properties)
}
@BeforeMethod
def setup = messageLog = ""
// ===========================================
@Test
def testStartServer = {
val sup = getSingleActorAllForOneSupervisor
sup ! Start
expect("pong") {
(pingpong1 !!! Ping).getOrElse("nil")
}
}
// ===========================================
@Test
def testGetServer = {
val sup = getSingleActorAllForOneSupervisor
sup ! Start
val server = sup.getServerOrElse("pingpong1", throw new RuntimeException("server not found"))
assert(server.isInstanceOf[GenericServerContainer])
assert(server === pingpong1)
}
// ===========================================
@Test
def testGetServerOrFail = {
val sup = getSingleActorAllForOneSupervisor
sup ! Start
intercept(classOf[RuntimeException]) {
sup.getServerOrElse("wrong_name", throw new RuntimeException("server not found"))
}
}
// ===========================================
@Test
def testKillSingleActorOneForOne = {
val sup = getSingleActorOneForOneSupervisor
sup ! Start
intercept(classOf[RuntimeException]) {
pingpong1 !!! (Die, throw new RuntimeException("TIME OUT"))
}
Thread.sleep(100)
expect("oneforone") {
messageLog
}
}
// ===========================================
@Test
def testCallKillCallSingleActorOneForOne = {
val sup = getSingleActorOneForOneSupervisor
sup ! Start
expect("pong") {
(pingpong1 !!! Ping).getOrElse("nil")
}
Thread.sleep(100)
expect("ping") {
messageLog
}
intercept(classOf[RuntimeException]) {
pingpong1 !!! (Die, throw new RuntimeException("TIME OUT"))
}
Thread.sleep(100)
expect("pingoneforone") {
messageLog
}
expect("pong") {
(pingpong1 !!! Ping).getOrElse("nil")
}
Thread.sleep(100)
expect("pingoneforoneping") {
messageLog
}
}
// ===========================================
@Test
def testKillSingleActorAllForOne = {
val sup = getSingleActorAllForOneSupervisor
sup ! Start
intercept(classOf[RuntimeException]) {
pingpong1 !!! (Die, throw new RuntimeException("TIME OUT"))
}
Thread.sleep(100)
expect("allforone") {
messageLog
}
}
// ===========================================
@Test
def testCallKillCallSingleActorAllForOne = {
val sup = getSingleActorAllForOneSupervisor
sup ! Start
expect("pong") {
(pingpong1 !!! Ping).getOrElse("nil")
}
Thread.sleep(100)
expect("ping") {
messageLog
}
intercept(classOf[RuntimeException]) {
pingpong1 !!! (Die, throw new RuntimeException("TIME OUT"))
}
Thread.sleep(100)
expect("pingallforone") {
messageLog
}
expect("pong") {
(pingpong1 !!! Ping).getOrElse("nil")
}
Thread.sleep(100)
expect("pingallforoneping") {
messageLog
}
}
// ===========================================
@Test
def testKillMultipleActorsOneForOne = {
val sup = getMultipleActorsOneForOneConf
sup ! Start
intercept(classOf[RuntimeException]) {
pingpong3 !!! (Die, throw new RuntimeException("TIME OUT"))
}
Thread.sleep(100)
expect("oneforone") {
messageLog
}
}
// ===========================================
@Test
def tesCallKillCallMultipleActorsOneForOne = {
val sup = getMultipleActorsOneForOneConf
sup ! Start
expect("pong") {
(pingpong1 !!! Ping).getOrElse("nil")
}
Thread.sleep(100)
expect("pong") {
(pingpong2 !!! Ping).getOrElse("nil")
}
Thread.sleep(100)
expect("pong") {
(pingpong3 !!! Ping).getOrElse("nil")
}
Thread.sleep(100)
expect("pingpingping") {
messageLog
}
intercept(classOf[RuntimeException]) {
pingpong2 !!! (Die, throw new RuntimeException("TIME OUT"))
}
Thread.sleep(100)
expect("pingpingpingoneforone") {
messageLog
}
expect("pong") {
(pingpong1 !!! Ping).getOrElse("nil")
}
Thread.sleep(100)
expect("pong") {
(pingpong2 !!! Ping).getOrElse("nil")
}
Thread.sleep(100)
expect("pong") {
(pingpong3 !!! Ping).getOrElse("nil")
}
Thread.sleep(100)
expect("pingpingpingoneforonepingpingping") {
messageLog
}
}
// ===========================================
@Test
def testKillMultipleActorsAllForOne = {
val sup = getMultipleActorsAllForOneConf
sup ! Start
intercept(classOf[RuntimeException]) {
pingpong2 !!! (Die, throw new RuntimeException("TIME OUT"))
}
Thread.sleep(100)
expect("allforoneallforoneallforone") {
messageLog
}
}
// ===========================================
@Test
def tesCallKillCallMultipleActorsAllForOne = {
val sup = getMultipleActorsAllForOneConf
sup ! Start
expect("pong") {
(pingpong1 !!! Ping).getOrElse("nil")
}
Thread.sleep(100)
expect("pong") {
(pingpong2 !!! Ping).getOrElse("nil")
}
Thread.sleep(100)
expect("pong") {
(pingpong3 !!! Ping).getOrElse("nil")
}
Thread.sleep(100)
expect("pingpingping") {
messageLog
}
intercept(classOf[RuntimeException]) {
pingpong2 !!! (Die, throw new RuntimeException("TIME OUT"))
}
Thread.sleep(100)
expect("pingpingpingallforoneallforoneallforone") {
messageLog
}
expect("pong") {
(pingpong1 !!! Ping).getOrElse("nil")
}
Thread.sleep(100)
expect("pong") {
(pingpong2 !!! Ping).getOrElse("nil")
}
Thread.sleep(100)
expect("pong") {
(pingpong3 !!! Ping).getOrElse("nil")
}
Thread.sleep(100)
expect("pingpingpingallforoneallforoneallforonepingpingping") {
messageLog
}
}
@Test
def testTerminateFirstLevelActorAllForOne = {
val sup = getNestedSupervisorsAllForOneConf
sup ! Start
intercept(classOf[RuntimeException]) {
pingpong1 !!! (Die, throw new RuntimeException("TIME OUT"))
}
Thread.sleep(100)
expect("allforoneallforoneallforone") {
messageLog
}
}
// =============================================
// Creat some supervisors with different configurations
def getSingleActorAllForOneSupervisor: Supervisor = {
// Create an abstract SupervisorContainer that works for all implementations
// of the different Actors (Services).
//
// Then create a concrete container in which we mix in support for the specific
// implementation of the Actors we want to use.
object factory extends TestSupervisorFactory {
override def getSupervisorConfig: SupervisorConfig = {
SupervisorConfig(
RestartStrategy(AllForOne, 3, 100),
Worker(
pingpong1,
LifeCycle(Permanent, 100))
:: Nil)
}
}
factory.newSupervisor
}
def getSingleActorOneForOneSupervisor: Supervisor = {
object factory extends TestSupervisorFactory {
override def getSupervisorConfig: SupervisorConfig = {
SupervisorConfig(
RestartStrategy(OneForOne, 3, 100),
Worker(
pingpong1,
LifeCycle(Permanent, 100))
:: Nil)
}
}
factory.newSupervisor
}
def getMultipleActorsAllForOneConf: Supervisor = {
object factory extends TestSupervisorFactory {
override def getSupervisorConfig: SupervisorConfig = {
SupervisorConfig(
RestartStrategy(AllForOne, 3, 100),
Worker(
pingpong1,
LifeCycle(Permanent, 100))
::
Worker(
pingpong2,
LifeCycle(Permanent, 100))
::
Worker(
pingpong3,
LifeCycle(Permanent, 100))
:: Nil)
}
}
factory.newSupervisor
}
def getMultipleActorsOneForOneConf: Supervisor = {
object factory extends TestSupervisorFactory {
override def getSupervisorConfig: SupervisorConfig = {
SupervisorConfig(
RestartStrategy(OneForOne, 3, 100),
Worker(
pingpong1,
LifeCycle(Permanent, 100))
::
Worker(
pingpong2,
LifeCycle(Permanent, 100))
::
Worker(
pingpong3,
LifeCycle(Permanent, 100))
:: Nil)
}
}
factory.newSupervisor
}
def getNestedSupervisorsAllForOneConf: Supervisor = {
object factory extends TestSupervisorFactory {
override def getSupervisorConfig: SupervisorConfig = {
SupervisorConfig(
RestartStrategy(AllForOne, 3, 100),
Worker(
pingpong1,
LifeCycle(Permanent, 100))
::
SupervisorConfig(
RestartStrategy(AllForOne, 3, 100),
Worker(
pingpong2,
LifeCycle(Permanent, 100))
::
Worker(
pingpong3,
LifeCycle(Permanent, 100))
:: Nil)
:: Nil)
}
}
factory.newSupervisor
}
class PingPong1Actor extends GenericServer {
override def body: PartialFunction[Any, Unit] = {
case Ping =>
messageLog += "ping"
reply("pong")
case Die =>
throw new RuntimeException("Recieved Die message")
}
}
class PingPong2Actor extends GenericServer {
override def body: PartialFunction[Any, Unit] = {
case Ping =>
messageLog += "ping"
reply("pong")
case Die =>
throw new RuntimeException("Recieved Die message")
}
}
class PingPong3Actor extends GenericServer {
override def body: PartialFunction[Any, Unit] = {
case Ping =>
messageLog += "ping"
reply("pong")
case Die =>
throw new RuntimeException("Recieved Die message")
}
}
// =============================================
class TestAllForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int) extends AllForOneStrategy(maxNrOfRetries, withinTimeRange) {
override def postRestart(serverContainer: GenericServerContainer) = {
messageLog += "allforone"
}
}
class TestOneForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int) extends OneForOneStrategy(maxNrOfRetries, withinTimeRange) {
override def postRestart(serverContainer: GenericServerContainer) = {
messageLog += "oneforone"
}
}
abstract class TestSupervisorFactory extends SupervisorFactory {
override def create(strategy: RestartStrategy): Supervisor = strategy match {
case RestartStrategy(scheme, maxNrOfRetries, timeRange) =>
scheme match {
case AllForOne => new Supervisor(new TestAllForOneStrategy(maxNrOfRetries, timeRange))
case OneForOne => new Supervisor(new TestOneForOneStrategy(maxNrOfRetries, timeRange))
}
}
}
}
| bingoyang/scala-otp | behavior/src/test/scala/scala/actors/behavior/SupervisorSuite.scala | Scala | bsd-3-clause | 11,883 |
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
package elements
import api.statements.{ScVariableDefinition, ScVariable, ScVariableDeclaration}
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.{StubElement, IndexSink, StubOutputStream, StubInputStream}
import com.intellij.util.io.StringRef
import impl.ScVariableStubImpl
import index.ScalaIndexKeys
import com.intellij.util.IncorrectOperationException
/**
* User: Alexander Podkhalyuzin
* Date: 18.10.2008
*/
abstract class ScVariableElementType[Variable <: ScVariable](debugName: String)
extends ScStubElementType[ScVariableStub, ScVariable](debugName) {
def createStubImpl[ParentPsi <: PsiElement](psi: ScVariable, parentStub: StubElement[ParentPsi]): ScVariableStub = {
val isDecl = psi.isInstanceOf[ScVariableDeclaration]
val typeText = psi.typeElement match {
case Some(te) => te.getText
case None => ""
}
val bodyText = if (!isDecl) psi.asInstanceOf[ScVariableDefinition].expr.map(_.getText).getOrElse("") else ""
val containerText = if (isDecl) psi.asInstanceOf[ScVariableDeclaration].getIdList.getText
else psi.asInstanceOf[ScVariableDefinition].pList.getText
new ScVariableStubImpl[ParentPsi](parentStub, this,
(for (elem <- psi.declaredElements) yield elem.name).toArray,
isDecl, typeText, bodyText, containerText, psi.containingClass == null)
}
def serialize(stub: ScVariableStub, dataStream: StubOutputStream) {
dataStream.writeBoolean(stub.isDeclaration)
val names = stub.getNames
dataStream.writeInt(names.length)
for (name <- names) dataStream.writeName(name)
dataStream.writeName(stub.getTypeText)
dataStream.writeName(stub.getBodyText)
dataStream.writeName(stub.getBindingsContainerText)
dataStream.writeBoolean(stub.isLocal)
}
def deserializeImpl(dataStream: StubInputStream, parentStub: Any): ScVariableStub = {
val isDecl = dataStream.readBoolean
val namesLength = dataStream.readInt
val names = new Array[String](namesLength)
for (i <- 0 to (namesLength - 1)) names(i) = StringRef.toString(dataStream.readName)
val parent = parentStub.asInstanceOf[StubElement[PsiElement]]
val typeText = StringRef.toString(dataStream.readName)
val bodyText = StringRef.toString(dataStream.readName)
val bindingsText = StringRef.toString(dataStream.readName)
val isLocal = dataStream.readBoolean()
new ScVariableStubImpl(parent, this, names, isDecl, typeText, bodyText, bindingsText, isLocal)
}
def indexStub(stub: ScVariableStub, sink: IndexSink) {
val names = stub.getNames
for (name <- names if name != null) {
sink.occurrence(ScalaIndexKeys.VARIABLE_NAME_KEY, name)
}
}
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/ScVariableElementType.scala | Scala | apache-2.0 | 2,743 |
package com.eevolution.context.dictionary.domain.api.service
import com.eevolution.context.dictionary._
import com.eevolution.context.dictionary.domain.model.ReferenceTrl
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: eduardo.moreno@e-evolution.com, http://www.e-evolution.com , http://github.com/e-Evolution
* Created by eduardo.moreno@e-evolution.com , www.e-evolution.com
*/
/**
* Reference Trl Service
*/
trait ReferenceTrlService extends api.Service[ReferenceTrl, Int] {
//Definition
} | adempiere/ADReactiveSystem | dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/api/service/ReferenceTrlService.scala | Scala | gpl-3.0 | 1,208 |
package scalariform.lexer
import scalariform.lexer.Tokens._
object Keywords {
def apply(s: String): Option[TokenType] = keywords get s
private val keywords = Map(
"abstract" -> ABSTRACT,
"case" -> CASE,
"catch" -> CATCH,
"class" -> CLASS,
"def" -> DEF,
"do" -> DO,
"else" -> ELSE,
"extends" -> EXTENDS,
"false" -> FALSE,
"final" -> FINAL,
"finally" -> FINALLY,
"for" -> FOR,
"forSome" -> FORSOME,
"if" -> IF,
"implicit" -> IMPLICIT,
"import" -> IMPORT,
"lazy" -> LAZY,
"match" -> MATCH,
"new" -> NEW,
"null" -> NULL,
"object" -> OBJECT,
"override" -> OVERRIDE,
"package" -> PACKAGE,
"private" -> PRIVATE,
"protected" -> PROTECTED,
"return" -> RETURN,
"sealed" -> SEALED,
"super" -> SUPER,
"this" -> THIS,
"throw" -> THROW,
"trait" -> TRAIT,
"try" -> TRY,
"true" -> TRUE,
"type" -> TYPE,
"val" -> VAL,
"var" -> VAR,
"while" -> WHILE,
"with" -> WITH,
"yield" -> YIELD,
"_" -> USCORE,
":" -> COLON,
"=" -> EQUALS,
"=>" -> ARROW,
"<-" -> LARROW,
"->" -> RARROW,
"<:" -> SUBTYPE,
"<%" -> VIEWBOUND,
">:" -> SUPERTYPE,
"#" -> HASH,
"@" -> AT,
"." -> DOT,
"+" -> PLUS,
"-" -> MINUS,
"*" -> STAR,
"|" -> PIPE,
"~" -> TILDE,
"!" -> EXCLAMATION
)
}
| gawkermedia/scalariform | scalariform/src/main/scala/scalariform/lexer/Keywords.scala | Scala | mit | 1,378 |
/*
* ArtifactLocation.scala
* (LucreEvent)
*
* Copyright (c) 2011-2014 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Lesser General Public License v2.1+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.lucre.artifact
import java.io.File
import de.sciss.lucre.artifact.impl.{ArtifactImpl => Impl}
import de.sciss.lucre.data
import de.sciss.lucre.event.{Publisher, Sys}
import de.sciss.lucre.stm.Mutable
import de.sciss.model
import de.sciss.serial.{DataInput, Serializer}
object ArtifactLocation {
final val typeID = 0x10003
def tmp[S <: Sys[S]]()(implicit tx: S#Tx): Modifiable[S] = {
val dir = File.createTempFile("artifacts", "tmp")
dir.delete()
dir.mkdir()
dir.deleteOnExit()
apply(dir)
}
def apply[S <: Sys[S]](init: File)(implicit tx: S#Tx): Modifiable[S] =
Impl.newLocation[S](init)
object Modifiable {
implicit def serializer[S <: Sys[S]]: Serializer[S#Tx, S#Acc, ArtifactLocation.Modifiable[S]] =
Impl.modLocationSerializer
def read[S <: Sys[S]](in: DataInput, access: S#Acc)(implicit tx: S#Tx): ArtifactLocation.Modifiable[S] =
Impl.readModLocation[S](in, access)
}
trait Modifiable[S <: Sys[S]] extends ArtifactLocation[S] {
/** Registers a significant artifact with the system. That is,
* stores the artifact, which should have a real resource
* association, as belonging to the system.
*
* @param file the file to turn into a registered artifact
*/
def add(file: File)(implicit tx: S#Tx): Artifact.Modifiable[S]
def remove(artifact: Artifact[S])(implicit tx: S#Tx): Unit
def directory_=(value: File)(implicit tx: S#Tx): Unit
}
sealed trait Update[S <: Sys[S]] {
def location: ArtifactLocation[S]
}
final case class Added[S <: Sys[S]](location: ArtifactLocation[S], idx: Int, artifact: Artifact[S])
extends Update[S]
final case class Removed[S <: Sys[S]](location: ArtifactLocation[S], idx: Int, artifact: Artifact[S])
extends Update[S]
final case class Moved[S <: Sys[S]](location: ArtifactLocation[S], change: model.Change[File]) extends Update[S]
implicit def serializer[S <: Sys[S]]: Serializer[S#Tx, S#Acc, ArtifactLocation[S]] = Impl.locationSerializer
def read[S <: Sys[S]](in: DataInput, access: S#Acc)(implicit tx: S#Tx): ArtifactLocation[S] =
Impl.readLocation[S](in, access)
}
/** An artifact location is a directory on an external storage. */
trait ArtifactLocation[S <: Sys[S]] extends Mutable[S#ID, S#Tx] with Publisher[S, ArtifactLocation.Update[S]] {
def directory(implicit tx: S#Tx): File
def iterator (implicit tx: S#Tx): data.Iterator[S#Tx, Artifact[S]]
def modifiableOption: Option[ArtifactLocation.Modifiable[S]]
}
| Sciss/LucreEvent | artifact/src/main/scala/de/sciss/lucre/artifact/ArtifactLocation.scala | Scala | lgpl-2.1 | 2,821 |
package org.example.backpressure
import akka.actor.{ActorLogging, Actor}
/**
* Created by kailianghe on 1/12/15.
*/
class SlowReceiver extends Actor with ActorLogging {
override def postStop() {
log.info("SlowReceiver#postStop")
}
def receive: Actor.Receive = {
case msg: String =>
log.info(s"Received: $msg")
Thread.sleep(100) // simulate slow message processing
}
}
| hekailiang/akka-play | actor-samples/src/main/scala/org/example/backpressure/SlowReceiver.scala | Scala | apache-2.0 | 403 |
package akka.s3
import akka.http.scaladsl.model.Uri.Query
import akka.http.scaladsl.model.{HttpRequest, Multipart}
import akka.stream.scaladsl.Source
import akka.util.ByteString
import scala.collection.mutable
import scala.concurrent.Await
import scala.concurrent.duration.Duration
trait HeaderList {
def get(name: String): Option[String]
def filter(p: String => Boolean): Seq[(String, String)]
}
// List of (key, value) pairs but searching by key is case-insensitive
object KVList {
case class t(unwrap: Seq[(String, String)]) extends HeaderList {
def get(key: String): Option[String] = unwrap.find{a => a._1.toLowerCase == key.toLowerCase}.map(_._2)
def filter(p: String => Boolean) = unwrap.filter{a => p(a._1)}
}
def builder: Builder = Builder()
case class Builder() {
val l = mutable.ListBuffer[(String, String)]()
def append(k: String, v: Option[String]) = { if (v.isDefined) { l += k -> v.get }; this }
def build = t(l)
}
}
object HeaderList {
case class Aggregate(xs: Seq[HeaderList]) extends HeaderList {
override def get(name: String) = {
var ret: Option[String] = None
xs.foreach { x =>
ret = ret <+ x.get(name)
}
ret
}
override def filter(p: String => Boolean) = xs.map(_.filter(p)).fold(Seq())((a, b) => a ++ b)
}
case class FromRequestHeaders(req: HttpRequest) extends HeaderList {
override def get(name: String) = req.headers.find(_.is(name.toLowerCase)).map(_.value)
override def filter(p: String => Boolean) = {
val l = req.headers.map{a => (a.lowercaseName(), a.value())} |> KVList.t
l.filter(p)
}
}
case class FromRequestQuery(q: Query) extends HeaderList {
val headerList = KVList.t(q)
override def get(name: String) = headerList.get(name)
override def filter(p: String => Boolean) = headerList.filter(p)
}
case class FromMultipart(mfd: Multipart.FormData) extends HeaderList {
var bytes: ByteString = _
val tmp = mutable.ListBuffer[(String, String)]()
val fut = mfd.parts.runForeach { part =>
val name = part.name
val entity = part.entity
if (name == "file") {
entity.dataBytes.runForeach { b =>
bytes = b
}
} else {
part.entity.dataBytes.runForeach { data =>
val charset = entity.contentType.charset.value
val str = data.decodeString(charset)
tmp += Pair(name, str)
}
}
}
Await.ready(fut, Duration.Inf)
val headerList = KVList.t(tmp)
override def get(name: String) = headerList.get(name)
override def filter(p: String => Boolean) = headerList.filter(p)
}
}
| akiradeveloper/akka-s3 | src/main/scala/akka/s3/HeaderList.scala | Scala | apache-2.0 | 2,653 |
// scalac: -Ymacro-annotations
object Test extends App {
@deprecated
class Inner() {
}
lazy val Inner = new Inner()
@deprecated
class Inner2() {
}
val Inner2 = new Inner2()
}
| scala/scala | test/files/pos/macro-annot/t12366.scala | Scala | apache-2.0 | 195 |
/*
* Copyright 2013 Carnegie Mellon University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.cmu.lti.suim.examples
import scala.collection.JavaConversions.collectionAsScalaIterable
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext.rddToPairRDDFunctions
import org.apache.uima.examples.cpe.FileSystemCollectionReader
import org.apache.uima.fit.factory.AnalysisEngineFactory.createEngineDescription
import org.apache.uima.fit.factory.CollectionReaderFactory.createReader
import org.apache.uima.fit.factory.TypeSystemDescriptionFactory
import org.apache.uima.fit.util.JCasUtil
import org.apache.uima.tutorial.RoomNumber
import org.apache.uima.tutorial.ex1.RoomNumberAnnotator
import edu.cmu.lti.suim.SparkUimaUtils.makeRDD
import edu.cmu.lti.suim.SparkUimaUtils.process
object App {
def main(args: Array[String]) = {
val sc = new SparkContext(args(0), "App",
System.getenv("SPARK_HOME"), System.getenv("SPARK_CLASSPATH").split(":"))
val typeSystem = TypeSystemDescriptionFactory.createTypeSystemDescription()
val params = Seq(FileSystemCollectionReader.PARAM_INPUTDIR, "data")
val rdd = makeRDD(createReader(classOf[FileSystemCollectionReader], params: _*), sc)
val rnum = createEngineDescription(classOf[RoomNumberAnnotator])
val rooms = rdd.map(process(_, rnum)).flatMap(scas => JCasUtil.select(scas.jcas, classOf[RoomNumber]))
val counts = rooms.map(room => room.getBuilding()).map((_,1)).reduceByKey(_ + _)
counts.foreach(println(_))
}
}
| oaqa/suim | suim-examples/src/main/scala/edu/cmu/lti/suim/examples/App.scala | Scala | apache-2.0 | 2,051 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.io._
import scala.collection.mutable.HashSet
import scala.reflect._
import org.scalatest.BeforeAndAfter
import com.google.common.base.Charsets.UTF_8
import com.google.common.io.Files
import org.apache.spark.{Logging, SparkConf, SparkFunSuite}
import org.apache.spark.util.logging.{RollingFileAppender, SizeBasedRollingPolicy, TimeBasedRollingPolicy, FileAppender}
class FileAppenderSuite extends SparkFunSuite with BeforeAndAfter with Logging {
val testFile = new File(Utils.createTempDir(), "FileAppenderSuite-test").getAbsoluteFile
before {
cleanup()
}
after {
cleanup()
}
test("basic file appender") {
val testString = (1 to 1000).mkString(", ")
val inputStream = new ByteArrayInputStream(testString.getBytes(UTF_8))
val appender = new FileAppender(inputStream, testFile)
inputStream.close()
appender.awaitTermination()
assert(Files.toString(testFile, UTF_8) === testString)
}
test("rolling file appender - time-based rolling") {
// setup input stream and appender
val testOutputStream = new PipedOutputStream()
val testInputStream = new PipedInputStream(testOutputStream, 100 * 1000)
val rolloverIntervalMillis = 100
val durationMillis = 1000
val numRollovers = durationMillis / rolloverIntervalMillis
val textToAppend = (1 to numRollovers).map( _.toString * 10 )
val appender = new RollingFileAppender(testInputStream, testFile,
new TimeBasedRollingPolicy(rolloverIntervalMillis, s"--HH-mm-ss-SSSS", false),
new SparkConf(), 10)
testRolling(appender, testOutputStream, textToAppend, rolloverIntervalMillis)
}
test("rolling file appender - size-based rolling") {
// setup input stream and appender
val testOutputStream = new PipedOutputStream()
val testInputStream = new PipedInputStream(testOutputStream, 100 * 1000)
val rolloverSize = 1000
val textToAppend = (1 to 3).map( _.toString * 1000 )
val appender = new RollingFileAppender(testInputStream, testFile,
new SizeBasedRollingPolicy(rolloverSize, false), new SparkConf(), 99)
val files = testRolling(appender, testOutputStream, textToAppend, 0)
files.foreach { file =>
logInfo(file.toString + ": " + file.length + " bytes")
assert(file.length <= rolloverSize)
}
}
test("rolling file appender - cleaning") {
// setup input stream and appender
val testOutputStream = new PipedOutputStream()
val testInputStream = new PipedInputStream(testOutputStream, 100 * 1000)
val conf = new SparkConf().set(RollingFileAppender.RETAINED_FILES_PROPERTY, "10")
val appender = new RollingFileAppender(testInputStream, testFile,
new SizeBasedRollingPolicy(1000, false), conf, 10)
// send data to appender through the input stream, and wait for the data to be written
val allGeneratedFiles = new HashSet[String]()
val items = (1 to 10).map { _.toString * 10000 }
for (i <- 0 until items.size) {
testOutputStream.write(items(i).getBytes(UTF_8))
testOutputStream.flush()
allGeneratedFiles ++= RollingFileAppender.getSortedRolledOverFiles(
testFile.getParentFile.toString, testFile.getName).map(_.toString)
Thread.sleep(10)
}
testOutputStream.close()
appender.awaitTermination()
logInfo("Appender closed")
// verify whether the earliest file has been deleted
val rolledOverFiles = allGeneratedFiles.filter { _ != testFile.toString }.toArray.sorted
logInfo(s"All rolled over files generated:${rolledOverFiles.size}\\n" +
rolledOverFiles.mkString("\\n"))
assert(rolledOverFiles.size > 2)
val earliestRolledOverFile = rolledOverFiles.head
val existingRolledOverFiles = RollingFileAppender.getSortedRolledOverFiles(
testFile.getParentFile.toString, testFile.getName).map(_.toString)
logInfo("Existing rolled over files:\\n" + existingRolledOverFiles.mkString("\\n"))
assert(!existingRolledOverFiles.toSet.contains(earliestRolledOverFile))
}
test("file appender selection") {
// Test whether FileAppender.apply() returns the right type of the FileAppender based
// on SparkConf settings.
def testAppenderSelection[ExpectedAppender: ClassTag, ExpectedRollingPolicy](
properties: Seq[(String, String)], expectedRollingPolicyParam: Long = -1): Unit = {
// Set spark conf properties
val conf = new SparkConf
properties.foreach { p =>
conf.set(p._1, p._2)
}
// Create and test file appender
val testOutputStream = new PipedOutputStream()
val testInputStream = new PipedInputStream(testOutputStream)
val appender = FileAppender(testInputStream, testFile, conf)
// assert(appender.getClass === classTag[ExpectedAppender].getClass)
assert(appender.getClass.getSimpleName ===
classTag[ExpectedAppender].runtimeClass.getSimpleName)
if (appender.isInstanceOf[RollingFileAppender]) {
val rollingPolicy = appender.asInstanceOf[RollingFileAppender].rollingPolicy
val policyParam = if (rollingPolicy.isInstanceOf[TimeBasedRollingPolicy]) {
rollingPolicy.asInstanceOf[TimeBasedRollingPolicy].rolloverIntervalMillis
} else {
rollingPolicy.asInstanceOf[SizeBasedRollingPolicy].rolloverSizeBytes
}
assert(policyParam === expectedRollingPolicyParam)
}
testOutputStream.close()
appender.awaitTermination()
}
import RollingFileAppender._
def rollingStrategy(strategy: String): Seq[(String, String)] =
Seq(STRATEGY_PROPERTY -> strategy)
def rollingSize(size: String): Seq[(String, String)] = Seq(SIZE_PROPERTY -> size)
def rollingInterval(interval: String): Seq[(String, String)] =
Seq(INTERVAL_PROPERTY -> interval)
val msInDay = 24 * 60 * 60 * 1000L
val msInHour = 60 * 60 * 1000L
val msInMinute = 60 * 1000L
// test no strategy -> no rolling
testAppenderSelection[FileAppender, Any](Seq.empty)
// test time based rolling strategy
testAppenderSelection[RollingFileAppender, Any](rollingStrategy("time"), msInDay)
testAppenderSelection[RollingFileAppender, TimeBasedRollingPolicy](
rollingStrategy("time") ++ rollingInterval("daily"), msInDay)
testAppenderSelection[RollingFileAppender, TimeBasedRollingPolicy](
rollingStrategy("time") ++ rollingInterval("hourly"), msInHour)
testAppenderSelection[RollingFileAppender, TimeBasedRollingPolicy](
rollingStrategy("time") ++ rollingInterval("minutely"), msInMinute)
testAppenderSelection[RollingFileAppender, TimeBasedRollingPolicy](
rollingStrategy("time") ++ rollingInterval("123456789"), 123456789 * 1000L)
testAppenderSelection[FileAppender, Any](
rollingStrategy("time") ++ rollingInterval("xyz"))
// test size based rolling strategy
testAppenderSelection[RollingFileAppender, SizeBasedRollingPolicy](
rollingStrategy("size") ++ rollingSize("123456789"), 123456789)
testAppenderSelection[FileAppender, Any](rollingSize("xyz"))
// test illegal strategy
testAppenderSelection[FileAppender, Any](rollingStrategy("xyz"))
}
/**
* Run the rolling file appender with data and see whether all the data was written correctly
* across rolled over files.
*/
def testRolling(
appender: FileAppender,
outputStream: OutputStream,
textToAppend: Seq[String],
sleepTimeBetweenTexts: Long
): Seq[File] = {
// send data to appender through the input stream, and wait for the data to be written
val expectedText = textToAppend.mkString("")
for (i <- 0 until textToAppend.size) {
outputStream.write(textToAppend(i).getBytes(UTF_8))
outputStream.flush()
Thread.sleep(sleepTimeBetweenTexts)
}
logInfo("Data sent to appender")
outputStream.close()
appender.awaitTermination()
logInfo("Appender closed")
// verify whether all the data written to rolled over files is same as expected
val generatedFiles = RollingFileAppender.getSortedRolledOverFiles(
testFile.getParentFile.toString, testFile.getName)
logInfo("Filtered files: \\n" + generatedFiles.mkString("\\n"))
assert(generatedFiles.size > 1)
val allText = generatedFiles.map { file =>
Files.toString(file, UTF_8)
}.mkString("")
assert(allText === expectedText)
generatedFiles
}
/** Delete all the generated rolledover files */
def cleanup() {
testFile.getParentFile.listFiles.filter { file =>
file.getName.startsWith(testFile.getName)
}.foreach { _.delete() }
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | core/src/test/scala/org/apache/spark/util/FileAppenderSuite.scala | Scala | apache-2.0 | 9,398 |
/*
* Shadowsocks - A shadowsocks client for Android
* Copyright (C) 2014 <max.c.lv@gmail.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* ___====-_ _-====___
* _--^^^#####// \\\\#####^^^--_
* _-^##########// ( ) \\\\##########^-_
* -############// |\\^^/| \\\\############-
* _/############// (@::@) \\\\############\\_
* /#############(( \\\\// ))#############\\
* -###############\\\\ (oo) //###############-
* -#################\\\\ / VV \\ //#################-
* -###################\\\\/ \\//###################-
* _#/|##########/\\######( /\\ )######/\\##########|\\#_
* |/ |#/\\#/\\#/\\/ \\#/\\##\\ | | /##/\\#/ \\/\\#/\\#/\\#| \\|
* ` |/ V V ` V \\#\\| | | |/#/ V ' V V \\| '
* ` ` ` ` / | | | | \\ ' ' ' '
* ( | | | | )
* __\\ | | | | /__
* (vvv(VVV)(VVV)vvv)
*
* HERE BE DRAGONS
*
*/
package com.github.shadowsocks
import java.io.File
import java.net.{Inet6Address, InetAddress}
import java.util.Locale
import java.lang.{Process, ProcessBuilder}
import android.app._
import android.content._
import android.content.pm.{PackageInfo, PackageManager}
import android.net.{ConnectivityManager, Network}
import android.os._
import android.support.v4.app.NotificationCompat
import android.support.v4.content.ContextCompat
import android.util.{Log, SparseArray}
import android.widget.Toast
import com.github.shadowsocks.aidl.Config
import com.github.shadowsocks.utils._
import scala.collection._
import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class ShadowsocksNatService extends Service with BaseService {
val TAG = "ShadowsocksNatService"
val CMD_IPTABLES_RETURN = " -t nat -A OUTPUT -p tcp -d 0.0.0.0 -j RETURN"
val CMD_IPTABLES_DNAT_ADD_SOCKS = " -t nat -A OUTPUT -p tcp " +
"-j DNAT --to-destination 127.0.0.1:8123"
var lockReceiver: BroadcastReceiver = null
var closeReceiver: BroadcastReceiver = null
var connReceiver: BroadcastReceiver = null
var apps: Array[ProxiedApp] = null
val myUid = android.os.Process.myUid()
var sslocalProcess: Process = null
var sstunnelProcess: Process = null
var redsocksProcess: Process = null
var pdnsdProcess: Process = null
private val dnsAddressCache = new SparseArray[String]
def getNetId(network: Network): Int = {
network.getClass.getDeclaredField("netId").get(network).asInstanceOf[Int]
}
def restoreDnsForAllNetwork() {
val manager = getSystemService(Context.CONNECTIVITY_SERVICE).asInstanceOf[ConnectivityManager]
val networks = manager.getAllNetworks
val cmdBuf = new ArrayBuffer[String]()
networks.foreach(network => {
val netId = getNetId(network)
val oldDns = dnsAddressCache.get(netId)
if (oldDns != null) {
cmdBuf.append("ndc resolver setnetdns %d \\"\\" %s".formatLocal(Locale.ENGLISH, netId, oldDns))
dnsAddressCache.remove(netId)
}
})
if (cmdBuf.nonEmpty) Console.runRootCommand(cmdBuf.toArray)
}
def setDnsForAllNetwork(dns: String) {
val manager = getSystemService(Context.CONNECTIVITY_SERVICE).asInstanceOf[ConnectivityManager]
val networks = manager.getAllNetworks
if (networks == null) return
val cmdBuf = new ArrayBuffer[String]()
networks.foreach(network => {
val networkInfo = manager.getNetworkInfo(network)
if (networkInfo == null) return
if (networkInfo.isConnected) {
val netId = getNetId(network)
val curDnsList = manager.getLinkProperties(network).getDnsServers
if (curDnsList != null) {
import scala.collection.JavaConverters._
val curDns = curDnsList.asScala.map(ip => ip.getHostAddress).mkString(" ")
if (curDns != dns) {
dnsAddressCache.put(netId, curDns)
cmdBuf.append("ndc resolver setnetdns %d \\"\\" %s".formatLocal(Locale.ENGLISH, netId, dns))
}
}
}
})
if (cmdBuf.nonEmpty) Console.runRootCommand(cmdBuf.toArray)
}
def setupDns() {
setDnsForAllNetwork("127.0.0.1")
}
def resetDns() = {
restoreDnsForAllNetwork()
}
def flushDns() {
if (Utils.isLollipopOrAbove) {
val manager = getSystemService(Context.CONNECTIVITY_SERVICE).asInstanceOf[ConnectivityManager]
val networks = manager.getAllNetworks
val cmdBuf = new ArrayBuffer[String]()
networks.foreach(network => {
val networkInfo = manager.getNetworkInfo(network)
if (networkInfo.isAvailable) {
val netId = network.getClass.getDeclaredField("netId").get(network).asInstanceOf[Int]
cmdBuf.append("ndc resolver flushnet %d".formatLocal(Locale.ENGLISH, netId))
}
})
Console.runRootCommand(cmdBuf.toArray)
} else {
Console.runRootCommand(Array("ndc resolver flushdefaultif", "ndc resolver flushif wlan0"))
}
}
def destroyConnectionReceiver() {
if (connReceiver != null) {
unregisterReceiver(connReceiver)
connReceiver = null
}
resetDns()
}
def initConnectionReceiver() {
val filter = new IntentFilter(ConnectivityManager.CONNECTIVITY_ACTION)
connReceiver = (context: Context, intent: Intent) => setupDns()
registerReceiver(connReceiver, filter)
}
def startShadowsocksDaemon() {
if (config.route != Route.ALL) {
val acl: Array[String] = config.route match {
case Route.BYPASS_LAN => getResources.getStringArray(R.array.private_route)
case Route.BYPASS_CHN => getResources.getStringArray(R.array.chn_route_full)
}
ConfigUtils.printToFile(new File(Path.BASE + "acl.list"))(p => {
acl.foreach(item => p.println(item))
})
}
val conf = ConfigUtils
.SHADOWSOCKS.formatLocal(Locale.ENGLISH, config.proxy, config.remotePort, config.localPort,
config.sitekey, config.encMethod, 600)
ConfigUtils.printToFile(new File(Path.BASE + "ss-local-nat.conf"))(p => {
p.println(conf)
})
val cmd = new ArrayBuffer[String]
cmd += (Path.BASE + "ss-local"
, "-b" , "127.0.0.1"
, "-t" , "600"
, "-c" , Path.BASE + "ss-local-nat.conf")
if (config.isAuth) cmd += "-A"
if (config.route != Route.ALL) {
cmd += "--acl"
cmd += (Path.BASE + "acl.list")
}
if (BuildConfig.DEBUG) Log.d(TAG, cmd.mkString(" "))
sslocalProcess = new ProcessBuilder()
.command(cmd)
.redirectErrorStream(false)
.start()
}
def startTunnel() {
if (config.isUdpDns) {
val conf = ConfigUtils
.SHADOWSOCKS.formatLocal(Locale.ENGLISH, config.proxy, config.remotePort, 8153,
config.sitekey, config.encMethod, 10)
ConfigUtils.printToFile(new File(Path.BASE + "ss-tunnel-nat.conf"))(p => {
p.println(conf)
})
val cmd = new ArrayBuffer[String]
cmd += (Path.BASE + "ss-tunnel"
, "-u"
, "-t" , "10"
, "-b" , "127.0.0.1"
, "-L" , "8.8.8.8:53"
, "-c" , Path.BASE + "ss-tunnel-nat.conf")
cmd += ("-l" , "8153")
if (config.isAuth) cmd += "-A"
if (BuildConfig.DEBUG) Log.d(TAG, cmd.mkString(" "))
sstunnelProcess = new ProcessBuilder()
.command(cmd)
.redirectErrorStream(false)
.start()
} else {
val conf = ConfigUtils
.SHADOWSOCKS.formatLocal(Locale.ENGLISH, config.proxy, config.remotePort, 8163,
config.sitekey, config.encMethod, 10)
ConfigUtils.printToFile(new File(Path.BASE + "ss-tunnel-nat.conf"))(p => {
p.println(conf)
})
val cmdBuf = new ArrayBuffer[String]
cmdBuf += (Path.BASE + "ss-tunnel"
, "-u"
, "-t" , "10"
, "-b" , "127.0.0.1"
, "-l" , "8163"
, "-L" , "8.8.8.8:53"
, "-c" , Path.BASE + "ss-tunnel-nat.conf")
if (config.isAuth) cmdBuf += "-A"
if (BuildConfig.DEBUG) Log.d(TAG, cmdBuf.mkString(" "))
sstunnelProcess = new ProcessBuilder()
.command(cmdBuf)
.redirectErrorStream(false)
.start()
}
}
def startDnsDaemon() {
val conf = if (config.route == Route.BYPASS_CHN) {
val reject = ConfigUtils.getRejectList(getContext)
val blackList = ConfigUtils.getBlackList(getContext)
ConfigUtils.PDNSD_DIRECT.formatLocal(Locale.ENGLISH, "127.0.0.1", 8153,
reject, blackList, 8163, "")
} else {
ConfigUtils.PDNSD_LOCAL.formatLocal(Locale.ENGLISH, "127.0.0.1", 8153,
8163, "")
}
ConfigUtils.printToFile(new File(Path.BASE + "pdnsd-nat.conf"))(p => {
p.println(conf)
})
val cmd = Path.BASE + "pdnsd -c " + Path.BASE + "pdnsd-nat.conf"
if (BuildConfig.DEBUG) Log.d(TAG, cmd)
pdnsdProcess = new ProcessBuilder()
.command(cmd.split(" ").toSeq)
.redirectErrorStream(false)
.start()
}
def getVersionName: String = {
var version: String = null
try {
val pi: PackageInfo = getPackageManager.getPackageInfo(getPackageName, 0)
version = pi.versionName
} catch {
case e: PackageManager.NameNotFoundException =>
version = "Package name not found"
}
version
}
def startRedsocksDaemon() {
val conf = ConfigUtils.REDSOCKS.formatLocal(Locale.ENGLISH, config.localPort)
val cmd = Path.BASE + "redsocks -c %sredsocks-nat.conf"
.formatLocal(Locale.ENGLISH, Path.BASE, Path.BASE)
ConfigUtils.printToFile(new File(Path.BASE + "redsocks-nat.conf"))(p => {
p.println(conf)
})
if (BuildConfig.DEBUG) Log.d(TAG, cmd)
redsocksProcess = new ProcessBuilder()
.command(cmd.split(" ").toSeq)
.redirectErrorStream(false)
.start()
}
/** Called when the activity is first created. */
def handleConnection: Boolean = {
startTunnel()
if (!config.isUdpDns) startDnsDaemon()
startRedsocksDaemon()
startShadowsocksDaemon()
setupIptables()
true
}
def notifyForegroundAlert(title: String, info: String, visible: Boolean) {
val openIntent = new Intent(this, classOf[Shadowsocks])
openIntent.setFlags(Intent.FLAG_ACTIVITY_REORDER_TO_FRONT)
val contentIntent = PendingIntent.getActivity(this, 0, openIntent, 0)
val closeIntent = new Intent(Action.CLOSE)
val actionIntent = PendingIntent.getBroadcast(this, 0, closeIntent, 0)
val builder = new NotificationCompat.Builder(this)
builder
.setWhen(0)
.setColor(ContextCompat.getColor(this, R.color.material_accent_500))
.setTicker(title)
.setContentTitle(getString(R.string.app_name))
.setContentText(info)
.setContentIntent(contentIntent)
.setSmallIcon(R.drawable.ic_stat_shadowsocks)
.addAction(android.R.drawable.ic_menu_close_clear_cancel, getString(R.string.stop),
actionIntent)
if (visible)
builder.setPriority(NotificationCompat.PRIORITY_DEFAULT)
else
builder.setPriority(NotificationCompat.PRIORITY_MIN)
startForeground(1, builder.build)
}
def onBind(intent: Intent): IBinder = {
Log.d(TAG, "onBind")
if (Action.SERVICE == intent.getAction) {
binder
} else {
null
}
}
override def onCreate() {
super.onCreate()
ConfigUtils.refresh(this)
}
def killProcesses() {
if (sslocalProcess != null) {
sslocalProcess.destroy()
sslocalProcess = null
}
if (sstunnelProcess != null) {
sstunnelProcess.destroy()
sstunnelProcess = null
}
if (redsocksProcess != null) {
redsocksProcess.destroy()
redsocksProcess = null
}
if (pdnsdProcess != null) {
pdnsdProcess.destroy()
pdnsdProcess = null
}
Console.runRootCommand(Utils.getIptables + " -t nat -F OUTPUT")
}
def setupIptables() = {
val init_sb = new ArrayBuffer[String]
val http_sb = new ArrayBuffer[String]
init_sb.append("ulimit -n 4096")
init_sb.append(Utils.getIptables + " -t nat -F OUTPUT")
val cmd_bypass = Utils.getIptables + CMD_IPTABLES_RETURN
if (!InetAddress.getByName(config.proxy.toUpperCase).isInstanceOf[Inet6Address]) {
init_sb.append(cmd_bypass.replace("-p tcp -d 0.0.0.0", "-d " + config.proxy))
}
init_sb.append(cmd_bypass.replace("-p tcp -d 0.0.0.0", "-d 127.0.0.1"))
init_sb.append(cmd_bypass.replace("-p tcp -d 0.0.0.0", "-m owner --uid-owner " + myUid))
init_sb.append(cmd_bypass.replace("-d 0.0.0.0", "--dport 53"))
init_sb.append(Utils.getIptables
+ " -t nat -A OUTPUT -p udp --dport 53 -j DNAT --to-destination 127.0.0.1:" + 8153)
if (!config.isProxyApps || config.isBypassApps) {
http_sb.append(Utils.getIptables + CMD_IPTABLES_DNAT_ADD_SOCKS)
}
if (config.isProxyApps) {
if (apps == null || apps.length <= 0) {
apps = AppManager.getProxiedApps(this, config.proxiedAppString)
}
val uidSet: mutable.HashSet[Int] = new mutable.HashSet[Int]
for (app <- apps) {
if (app.proxied) {
uidSet.add(app.uid)
}
}
for (uid <- uidSet) {
if (!config.isBypassApps) {
http_sb.append((Utils.getIptables + CMD_IPTABLES_DNAT_ADD_SOCKS).replace("-t nat", "-t nat -m owner --uid-owner " + uid))
} else {
init_sb.append(cmd_bypass.replace("-d 0.0.0.0", "-m owner --uid-owner " + uid))
}
}
}
Console.runRootCommand(init_sb.toArray)
Console.runRootCommand(http_sb.toArray)
}
override def startRunner(config: Config) {
super.startRunner(config)
// register close receiver
val filter = new IntentFilter()
filter.addAction(Intent.ACTION_SHUTDOWN)
filter.addAction(Action.CLOSE)
closeReceiver = (context: Context, intent: Intent) => {
Toast.makeText(context, R.string.stopping, Toast.LENGTH_SHORT).show()
stopRunner()
}
registerReceiver(closeReceiver, filter)
if (Utils.isLollipopOrAbove) {
val screenFilter = new IntentFilter()
screenFilter.addAction(Intent.ACTION_SCREEN_ON)
screenFilter.addAction(Intent.ACTION_SCREEN_OFF)
screenFilter.addAction(Intent.ACTION_USER_PRESENT)
lockReceiver = (context: Context, intent: Intent) => if (getState == State.CONNECTED) {
val action = intent.getAction
if (action == Intent.ACTION_SCREEN_OFF) {
notifyForegroundAlert(getString(R.string.forward_success),
getString(R.string.service_running).formatLocal(Locale.ENGLISH, config.profileName), false)
} else if (action == Intent.ACTION_SCREEN_ON) {
val keyGuard = getSystemService(Context.KEYGUARD_SERVICE).asInstanceOf[KeyguardManager]
if (!keyGuard.inKeyguardRestrictedInputMode) {
notifyForegroundAlert(getString(R.string.forward_success),
getString(R.string.service_running).formatLocal(Locale.ENGLISH, config.profileName), true)
}
} else if (action == Intent.ACTION_USER_PRESENT) {
notifyForegroundAlert(getString(R.string.forward_success),
getString(R.string.service_running).formatLocal(Locale.ENGLISH, config.profileName), true)
}
}
registerReceiver(lockReceiver, screenFilter)
}
ShadowsocksApplication.track(TAG, "start")
changeState(State.CONNECTING)
Future {
if (config.proxy == "198.199.101.152") {
val holder = ShadowsocksApplication.containerHolder
try {
this.config = ConfigUtils.getPublicConfig(getBaseContext, holder.getContainer, config)
} catch {
case ex: Exception =>
changeState(State.STOPPED, getString(R.string.service_failed))
stopRunner()
this.config = null
}
}
if (this.config != null) {
// Clean up
killProcesses()
var resolved: Boolean = false
if (!Utils.isNumeric(config.proxy)) {
Utils.resolve(config.proxy, enableIPv6 = true) match {
case Some(a) =>
config.proxy = a
resolved = true
case None => resolved = false
}
} else {
resolved = true
}
if (resolved && handleConnection) {
// Set DNS
flushDns()
notifyForegroundAlert(getString(R.string.forward_success),
getString(R.string.service_running).formatLocal(Locale.ENGLISH, config.profileName), true)
changeState(State.CONNECTED)
} else {
changeState(State.STOPPED, getString(R.string.service_failed))
stopRunner()
}
}
}
}
override def stopRunner() {
super.stopRunner()
// channge the state
changeState(State.STOPPING)
// clean up recevier
if (closeReceiver != null) {
unregisterReceiver(closeReceiver)
closeReceiver = null
}
if (Utils.isLollipopOrAbove) {
if (lockReceiver != null) {
unregisterReceiver(lockReceiver)
lockReceiver = null
}
}
ShadowsocksApplication.track(TAG, "stop")
// reset NAT
killProcesses()
// stop the service if no callback registered
if (getCallbackCount == 0) {
stopSelf()
}
stopForeground(true)
// change the state
changeState(State.STOPPED)
}
override def stopBackgroundService() {
stopSelf()
}
override def getTag = TAG
override def getServiceMode = Mode.NAT
override def getContext = getBaseContext
}
| tenwx/shadowsocks-android | src/main/scala/com/github/shadowsocks/ShadowsocksNatService.scala | Scala | gpl-3.0 | 18,320 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.plans.logical.statsEstimation
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeMap}
import org.apache.spark.sql.catalyst.plans.logical.{ColumnStat, Statistics, Union}
import org.apache.spark.sql.types._
/**
* Estimate the number of output rows by doing the sum of output rows for each child of union,
* and estimate min and max stats for each column by finding the overall min and max of that
* column coming from its children.
*/
object UnionEstimation {
import EstimationUtils._
private def createStatComparator(dt: DataType): (Any, Any) => Boolean = dt match {
case ByteType => (a: Any, b: Any) =>
ByteType.ordering.lt(a.asInstanceOf[Byte], b.asInstanceOf[Byte])
case ShortType => (a: Any, b: Any) =>
ShortType.ordering.lt(a.asInstanceOf[Short], b.asInstanceOf[Short])
case IntegerType => (a: Any, b: Any) =>
IntegerType.ordering.lt(a.asInstanceOf[Int], b.asInstanceOf[Int])
case LongType => (a: Any, b: Any) =>
LongType.ordering.lt(a.asInstanceOf[Long], b.asInstanceOf[Long])
case FloatType => (a: Any, b: Any) =>
FloatType.ordering.lt(a.asInstanceOf[Float], b.asInstanceOf[Float])
case DoubleType => (a: Any, b: Any) =>
DoubleType.ordering.lt(a.asInstanceOf[Double], b.asInstanceOf[Double])
case _: DecimalType => (a: Any, b: Any) =>
dt.asInstanceOf[DecimalType].ordering.lt(a.asInstanceOf[Decimal], b.asInstanceOf[Decimal])
case DateType => (a: Any, b: Any) =>
DateType.ordering.lt(a.asInstanceOf[DateType.InternalType],
b.asInstanceOf[DateType.InternalType])
case TimestampType => (a: Any, b: Any) =>
TimestampType.ordering.lt(a.asInstanceOf[TimestampType.InternalType],
b.asInstanceOf[TimestampType.InternalType])
case _ =>
throw new IllegalStateException(s"Unsupported data type: ${dt.catalogString}")
}
private def isTypeSupported(dt: DataType): Boolean = dt match {
case ByteType | IntegerType | ShortType | FloatType | LongType |
DoubleType | DateType | _: DecimalType | TimestampType => true
case _ => false
}
def estimate(union: Union): Option[Statistics] = {
val sizeInBytes = union.children.map(_.stats.sizeInBytes).sum
val outputRows = if (rowCountsExist(union.children: _*)) {
Some(union.children.map(_.stats.rowCount.get).sum)
} else {
None
}
val unionOutput = union.output
val attrToComputeMinMaxStats = union.children.map(_.output).transpose.zipWithIndex.filter {
case (attrs, outputIndex) => isTypeSupported(unionOutput(outputIndex).dataType) &&
// checks if all the children has min/max stats for an attribute
attrs.zipWithIndex.forall {
case (attr, childIndex) =>
val attrStats = union.children(childIndex).stats.attributeStats
attrStats.get(attr).isDefined && attrStats(attr).hasMinMaxStats
}
}
val newAttrStats = if (attrToComputeMinMaxStats.nonEmpty) {
val outputAttrStats = new ArrayBuffer[(Attribute, ColumnStat)]()
attrToComputeMinMaxStats.foreach {
case (attrs, outputIndex) =>
val dataType = unionOutput(outputIndex).dataType
val statComparator = createStatComparator(dataType)
val minMaxValue = attrs.zipWithIndex.foldLeft[(Option[Any], Option[Any])]((None, None)) {
case ((minVal, maxVal), (attr, childIndex)) =>
val colStat = union.children(childIndex).stats.attributeStats(attr)
val min = if (minVal.isEmpty || statComparator(colStat.min.get, minVal.get)) {
colStat.min
} else {
minVal
}
val max = if (maxVal.isEmpty || statComparator(maxVal.get, colStat.max.get)) {
colStat.max
} else {
maxVal
}
(min, max)
}
val newStat = ColumnStat(min = minMaxValue._1, max = minMaxValue._2)
outputAttrStats += unionOutput(outputIndex) -> newStat
}
AttributeMap(outputAttrStats.toSeq)
} else {
AttributeMap.empty[ColumnStat]
}
Some(
Statistics(
sizeInBytes = sizeInBytes,
rowCount = outputRows,
attributeStats = newAttrStats))
}
}
| BryanCutler/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statsEstimation/UnionEstimation.scala | Scala | apache-2.0 | 5,184 |
package spire.std
import spire.algebra.{Field, IsRational, NRoot, Order, Signed, Trig}
import spire.math.Rational
import java.lang.Math
import java.lang.Integer.{ numberOfTrailingZeros, numberOfLeadingZeros }
import java.lang.Float.{ intBitsToFloat, floatToIntBits }
import scala.annotation.tailrec
trait FloatIsField extends Field[Float] {
override def minus(a:Float, b:Float): Float = a - b
def negate(a:Float): Float = -a
def one: Float = 1.0F
def plus(a:Float, b:Float): Float = a + b
override def pow(a:Float, b:Int): Float = Math.pow(a, b).toFloat
override def times(a:Float, b:Float): Float = a * b
def zero: Float = 0.0F
override def fromInt(n: Int): Float = n
def quot(a:Float, b:Float): Float = (a - (a % b)) / b
def mod(a:Float, b:Float): Float = a % b
final def gcd(a:Float, b:Float):Float = {
def value(bits: Int): Int = bits & 0x007FFFFF | 0x00800000
def exp(bits: Int): Int = ((bits >> 23) & 0xFF).toInt
// Computes the GCD of 2 fp values. Here, we are guaranteed that exp0 < exp1.
def gcd0(val0: Int, exp0: Int, val1: Int, exp1: Int): Float = {
val tz0 = numberOfTrailingZeros(val0)
val tz1 = numberOfTrailingZeros(val1)
val tzShared = spire.math.min(tz0, tz1 + exp1 - exp0)
// We trim of the power of 2s, then add back the shared portion.
val n = spire.math.gcd(val0 >>> tz0, val1 >>> tz1).toInt << tzShared
// Number of bits to move the leading 1 to bit position 23.
val shift = numberOfLeadingZeros(n) - 8
val exp = (exp0 - shift)
// If exp is 0, then the value is actually just the mantissa * 2^−126,
// so we need to adjust the *shift* accordingly.
val shift0 = if (exp == 0) shift - 1 else shift
val mantissa = (n << shift0) & 0x007FFFFF
// If exp < 0, then we have underflowed; not much we can do but return 0.
if (exp < 0) 0F
else intBitsToFloat((exp << 23) | mantissa)
}
if (a == 0F) b
else if (b == 0F) a
else {
val aBits = floatToIntBits(a)
val aVal = value(aBits)
val aExp = exp(aBits)
val bBits = floatToIntBits(b)
val bVal = value(bBits)
val bExp = exp(bBits)
if (aExp < bExp) gcd0(aVal, aExp, bVal, bExp)
else gcd0(bVal, bExp, aVal, aExp)
}
}
override def fromDouble(n: Double): Float = n.toFloat
def div(a:Float, b:Float): Float = a / b
}
trait FloatIsNRoot extends NRoot[Float] {
def nroot(a: Float, k: Int): Float = Math.pow(a, 1 / k.toDouble).toFloat
override def sqrt(a: Float): Float = Math.sqrt(a).toFloat
def fpow(a: Float, b: Float): Float = Math.pow(a, b).toFloat
}
trait FloatIsTrig extends Trig[Float] {
def e: Float = Math.E.toFloat
def pi: Float = Math.PI.toFloat
def exp(a: Float): Float = Math.exp(a).toFloat
def expm1(a: Float): Float = Math.expm1(a).toFloat
def log(a: Float): Float = Math.log(a).toFloat
def log1p(a: Float): Float = Math.log1p(a).toFloat
def sin(a: Float): Float = Math.sin(a.toDouble).toFloat
def cos(a: Float): Float = Math.cos(a.toDouble).toFloat
def tan(a: Float): Float = Math.tan(a.toDouble).toFloat
def asin(a: Float): Float = Math.asin(a.toDouble).toFloat
def acos(a: Float): Float = Math.acos(a.toDouble).toFloat
def atan(a: Float): Float = Math.atan(a.toDouble).toFloat
def atan2(y: Float, x: Float): Float = Math.atan2(y.toDouble, x.toDouble).toFloat
def sinh(x: Float): Float = Math.sinh(x.toDouble).toFloat
def cosh(x: Float): Float = Math.cosh(x.toDouble).toFloat
def tanh(x: Float): Float = Math.tanh(x.toDouble).toFloat
def toRadians(a: Float): Float = (a * 2 * pi) / 360
def toDegrees(a: Float): Float = (a * 360) / (2 * pi)
}
trait FloatIsSigned extends Signed[Float] {
def signum(a: Float): Int = Math.signum(a).toInt
def abs(a: Float): Float = if (a < 0.0f) -a else a
}
trait FloatOrder extends Order[Float] {
override def eqv(x:Float, y:Float): Boolean = x == y
override def neqv(x:Float, y:Float): Boolean = x != y
override def gt(x: Float, y: Float): Boolean = x > y
override def gteqv(x: Float, y: Float): Boolean = x >= y
override def lt(x: Float, y: Float): Boolean = x < y
override def lteqv(x: Float, y: Float): Boolean = x <= y
override def min(x: Float, y: Float): Float = Math.min(x, y)
override def max(x: Float, y: Float): Float = Math.max(x, y)
def compare(x: Float, y: Float): Int = java.lang.Float.compare(x, y)
}
trait FloatIsReal extends IsRational[Float] with FloatOrder with FloatIsSigned {
def toDouble(x: Float): Double = x.toDouble
def ceil(a:Float): Float = Math.floor(a).toFloat
def floor(a:Float): Float = Math.floor(a).toFloat
def round(a:Float): Float = spire.math.round(a)
def isWhole(a:Float): Boolean = a % 1.0 == 0.0
def toRational(a:Float): Rational = Rational(a)
}
@SerialVersionUID(0L)
class FloatAlgebra extends FloatIsField with FloatIsNRoot with FloatIsTrig with FloatIsReal with Serializable
trait FloatInstances {
implicit final val FloatAlgebra = new FloatAlgebra
import Float._
import spire.math.NumberTag._
implicit final val FloatTag = new BuiltinFloatTag(0F, MinValue, MaxValue, NaN, PositiveInfinity, NegativeInfinity) {
def isInfinite(a: Float): Boolean = java.lang.Float.isInfinite(a)
def isNaN(a: Float): Boolean = java.lang.Float.isNaN(a)
}
}
| woparry/spire | core/src/main/scala/spire/std/float.scala | Scala | mit | 5,305 |
import scala.quoted.*
object Macro {
def impl[A : Type](using Quotes): Expr[A] = {
import quotes.reflect.*
val tpe = TypeRepr.of[A].asType
'{ (a: ${tpe}) => ???} // error: tpe.Underlying cannot be used as a value type
'{???}
}
} | dotty-staging/dotty | tests/neg-macros/i8865.scala | Scala | apache-2.0 | 249 |
package jp.co.septeni_original.sbt.dao.generator
import sbt._
/**
* sbt-dao-generatorのキー定義。
*/
trait SbtDaoGeneratorKeys {
val generator = taskKey[Unit]("generator")
val driverClassName = settingKey[String]("driver-class-name")
val jdbcUrl = settingKey[String]("jdbc-url")
val jdbcUser = settingKey[String]("jdbc-user")
val jdbcPassword = settingKey[String]("jdbc-password")
val schemaName = settingKey[Option[String]]("schema-name")
val generateAll = taskKey[Seq[File]]("generate-all")
val generateOne = inputKey[Seq[File]]("generate-one")
val generateMany = inputKey[Seq[File]]("generate-many")
val templateDirectory = settingKey[File]("template-dir")
val classNameMapper = settingKey[String => Seq[String]]("class-name-mapper")
val templateNameMapper = settingKey[String => String]("template-name-mapper")
@deprecated
val typeNameMapper = settingKey[String => String]("type-mapper")
val propertyTypeNameMapper = settingKey[String => String]("property-type-mapper")
val tableNameFilter = settingKey[String => Boolean]("table-name-filter")
val propertyNameMapper = settingKey[String => String]("property-name-mapper")
val outputDirectoryMapper = settingKey[String => File]("output-directory-mapper")
val enableManagedClassPath = settingKey[Boolean]("enable-managed-class-path")
}
object SbtDaoGeneratorKeys extends SbtDaoGeneratorKeys
| septeni-original/sbt-dao-generator | src/main/scala/jp/co/septeni_original/sbt/dao/generator/SbtDaoGeneratorKeys.scala | Scala | mit | 1,412 |
package com.lynbrookrobotics.potassium.model.examples
import com.lynbrookrobotics.potassium.ClockMocking
import com.lynbrookrobotics.potassium.model.simulations.SimulatedMotor
import org.scalatest.FunSuite
import squants.Percent
import squants.time.{Milliseconds, Seconds}
import com.lynbrookrobotics.potassium.streams.Stream
class SimulatedMotorTest extends FunSuite {
val period = Milliseconds(5)
test("Simulated motor updates when clock is triggered") {
implicit val (clock, triggerClock) = ClockMocking.mockedClockTicker
val simulatedMotor = new SimulatedMotor(Stream.periodic(period)(()))
var lastOut = Percent(-10)
simulatedMotor.outputStream.foreach(lastOut = _)
triggerClock.apply(period)
assert(lastOut == simulatedMotor.initialOutput)
}
test("Simulated motor publishes value from set method") {
implicit val (clock, triggerClock) = ClockMocking.mockedClockTicker
val simulatedMotor = new SimulatedMotor(Stream.periodic(period)(()))
var lastOut = Percent(-10)
simulatedMotor.outputStream.foreach(lastOut = _)
val input = Percent(50)
simulatedMotor.set(input)
triggerClock.apply(period)
assert(lastOut == input)
val secondInput = Percent(100)
simulatedMotor.set(secondInput)
triggerClock.apply(period)
assert(lastOut == secondInput)
}
}
| Team846/potassium | model/src/test/scala/com/lynbrookrobotics/potassium/model/examples/SimulatedMotorTest.scala | Scala | mit | 1,337 |
package bundlepricing
import bundlepricing.NonEmptySetSpec.arbitraryNonEmptySet
import bundlepricing.util.NonEmptySet
import org.scalacheck.{Arbitrary, Gen}
import org.specs2.{ScalaCheck, Specification}
import scalaz.scalacheck.ScalazProperties
import scalaz.std.AllInstances._
import scalaz.syntax.std.list._
class NonEmptySetSpec extends Specification with ScalaCheck {
def is = s2"""
Semigroup laws ${ ScalazProperties.semigroup.laws[NonEmptySet[Int]] }
Monad laws ${ ScalazProperties.monad.laws[NonEmptySet] }
Foldable1 laws ${ ScalazProperties.foldable1.laws[NonEmptySet] }
removing the only element leaves nothing ${ prop((i: Int) => (NonEmptySet(i) - i).isEmpty) }
removing 2nd elements leaves something ${
prop((i1: Int, i2: Int) =>
(NonEmptySet(i1, i2) - i1).isEmpty === (i1 == i2))
}
removing non-member element leaves something ${
prop((i1: Int, i2: Int) =>
(NonEmptySet(i1) - i2).isEmpty === (i1 == i2))
}
++(NonEmptySet) ${ prop((a: NonEmptySet[Int], b: NonEmptySet[Int]) => (a ++ b).toSet === (a.toSet ++ b.toSet)) }
++(Set) ${ prop((a: NonEmptySet[Int], b: Set[Int]) => (a ++ b).toSet === (a.toSet ++ b)) }
.toNel consistent with .toList.toNel ${ prop((a: NonEmptySet[Int]) => a.toNel === a.toList.toNel.get) }
"""
}
object NonEmptySetSpec {
implicit def arbitraryNonEmptySet[A: Arbitrary]: Arbitrary[NonEmptySet[A]] = Arbitrary {
val arb = Arbitrary.arbitrary[A]
for {
first <- arb
rest <- Gen.listOf(arb)
} yield rest.foldLeft(NonEmptySet(first))((l, a) => l + a)
}
} | refried/bundle-pricing | src/test/scala/bundlepricing/NonEmptySetSpec.scala | Scala | mit | 1,592 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.h2o.sparkling.backend.utils
import ai.h2o.sparkling.H2OConf
import ai.h2o.sparkling.backend.SharedBackendConf
import org.apache.spark.SparkConf
import org.apache.spark.expose.Logging
object AzureDatabricksUtils extends Logging {
private val externalFlowPort = 9009 // This port is exposed in Azure DBC
private val defaultIncreasedTimeout = 600000
def setClientWebPort(conf: H2OConf): Int = {
if (conf.clientWebPort == SharedBackendConf.PROP_CLIENT_WEB_PORT._2) {
logInfo("Overriding client web port to " + externalFlowPort)
conf.setClientWebPort(externalFlowPort)
}
conf.clientWebPort
}
def setClientCheckRetryTimeout(conf: H2OConf): Int = {
if (conf.clientCheckRetryTimeout == SharedBackendConf.PROP_EXTERNAL_CLIENT_RETRY_TIMEOUT._2) {
logInfo("Overriding client check retry timeout to " + defaultIncreasedTimeout)
conf.setClientCheckRetryTimeout(defaultIncreasedTimeout)
}
conf.clientCheckRetryTimeout
}
def isRunningOnAzureDatabricks(conf: H2OConf): Boolean = isRunningOnAzureDatabricks(conf.sparkConf)
def isRunningOnAzureDatabricks(conf: SparkConf): Boolean = {
conf.getOption("spark.databricks.cloudProvider").contains("Azure")
}
def relativeFlowURL(conf: H2OConf): String = {
val clusterId = conf.get("spark.databricks.clusterUsageTags.clusterId")
val orgId = conf.get("spark.databricks.clusterUsageTags.clusterOwnerOrgId")
s"/driver-proxy/o/$orgId/$clusterId/${conf.clientWebPort}/flow/index.html"
}
}
| h2oai/sparkling-water | core/src/main/scala/ai/h2o/sparkling/backend/utils/AzureDatabricksUtils.scala | Scala | apache-2.0 | 2,316 |
// Copyright 2017 Foursquare Labs Inc. All Rights Reserved.
package io.fsq.rogue.connection.testlib.test
import io.fsq.rogue.connection.MongoIdentifier
import io.fsq.rogue.connection.testlib.RogueMongoTest
import java.util.concurrent.atomic.AtomicReference
import org.junit.{Assert, Before, Test}
object HarnessTest {
val dbName = "test"
val mongoIdentifier = MongoIdentifier("test")
private[test] val dbNameCache = new AtomicReference[String]
}
/** Tests ensuring the thread safety of the MongoTest harness. */
class HarnessTest extends RogueMongoTest {
@Before
override def initClientManagers(): Unit = {
asyncClientManager.defineDb(
HarnessTest.mongoIdentifier,
asyncMongoClient,
HarnessTest.dbName
)
blockingClientManager.defineDb(
HarnessTest.mongoIdentifier,
blockingMongoClient,
HarnessTest.dbName
)
}
/* Ensure async and blocking clients both use the same db. */
@Test
def clientConsistencyTest(): Unit = {
val asyncDbName = asyncClientManager.use(HarnessTest.mongoIdentifier)(_.getName)
val blockingDbName = blockingClientManager.use(HarnessTest.mongoIdentifier)(_.getName)
Assert.assertEquals(asyncDbName, blockingDbName)
}
/* Ensure expected database name format, foo-<counter>, and that all databases for a
* given test method share the same counter suffix. */
@Test
def databaseNameFormatTest(): Unit = {
val firstDbName = "first"
val firstMongoIdentifier = MongoIdentifier(firstDbName)
val otherDbName = "other"
val otherMongoIdentifier = MongoIdentifier(otherDbName)
asyncClientManager.defineDb(
firstMongoIdentifier,
asyncMongoClient,
firstDbName
)
asyncClientManager.defineDb(
otherMongoIdentifier,
asyncMongoClient,
otherDbName
)
val mangledFirstDbName = asyncClientManager.use(firstMongoIdentifier)(_.getName)
val dbId = mangledFirstDbName.split('-') match {
case Array(`firstDbName`, dbIdString) => dbIdString.toInt
case _ =>
throw new IllegalStateException(
s"Actual database name does not match expected '$firstDbName-<counter>' format: $mangledFirstDbName"
)
}
Assert.assertEquals(
s"$otherDbName-$dbId",
asyncClientManager.use(otherMongoIdentifier)(_.getName)
)
}
/** getClient and getDatabase must return the same db name. */
@Test
def getClientAndGetDatabaseConsistencyTest(): Unit = {
val (_, getClientDbName) = asyncClientManager.getClientOrThrow(HarnessTest.mongoIdentifier)
val getDatabaseDbName = asyncClientManager.use(HarnessTest.mongoIdentifier)(_.getName)
Assert.assertEquals(getClientDbName, getDatabaseDbName)
}
/* The way this works is a bit subtle. Essentially, we run two test methods which each
* race to read and update an atomic reference with the name of their test db and
* compare it with the previous value. Regardless of the order in which this happens,
* each method will always find a different db name than the one it's using if the
* MongoTest db name mangling is working correctly. If not, the second test method to
* complete the atomic read/update will find the duplicate db name and fail.
*/
private def unusedDbNameCheck(): Unit = {
asyncClientManager.use(HarnessTest.mongoIdentifier)(db => {
Assert.assertNotEquals(HarnessTest.dbNameCache.getAndSet(db.getName), db.getName)
})
}
@Test
def unusedDbNameTest1(): Unit = unusedDbNameCheck()
@Test
def unusedDbNameTest2(): Unit = unusedDbNameCheck()
}
| foursquare/fsqio | test/jvm/io/fsq/rogue/connection/testlib/test/HarnessTest.scala | Scala | apache-2.0 | 3,569 |
package enigma
import org.scalacheck._
import Prop._
object MachineSpec extends Properties("machine"){
import Rotors._, Reflectors._
val e1 = Machine(
plugboard = Plugboard(Alphabet.ordered),
left = I('A'),
middle = II('A'),
right = III('A'),
reflector = Reflectors.B
)
property("full scramble") = secure {
// println(">> "+ Machine.forward('K').gogogo)
// println("{{{ " + Machine.rotorL.asGetter)
println {
e1.use('A')
}
true
}
}
| timperrett/enigma | src/test/scala/MachineSpec.scala | Scala | apache-2.0 | 495 |
import scala.concurrent.duration._
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import io.gatling.jdbc.Predef._
class KataSimulation extends Simulation {
val httpProtocol = http
.baseURL("http://api:9000")
.inferHtmlResources()
.acceptHeader("text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8")
val headers = Map("Upgrade-Insecure-Requests" -> "1")
val scn = scenario("KataSimulation")
.exec(http("request_toto")
.get("/first/toto")
.headers(headers))
.exec(http("request_tata")
.get("/first/tata")
.headers(headers))
.exec(http("request_azertyuiop")
.get("/first/azertyuiop")
.headers(headers))
setUp(scn.inject(atOnceUsers(100))).protocols(httpProtocol)
} | migibert/jenkins-kata | src/perf-tests/user-files/simulations/KataSimulation.scala | Scala | mit | 729 |
/*
* Copyright 2015 Functional Objects, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.funobjects.akka.persistence.orientdb
import akka.actor.ActorLogging
import akka.persistence._
import akka.persistence.journal._
import akka.serialization._
import com.orientechnologies.orient.core.db.document.ODatabaseDocumentTx
import com.orientechnologies.orient.core.index.{OCompositeKey, OIndex}
import com.orientechnologies.orient.core.metadata.schema.{OClass, OType}
import com.orientechnologies.orient.core.record.impl.ODocument
import com.orientechnologies.orient.core.sql.OCommandSQL
import com.orientechnologies.orient.core.sql.query.OSQLSynchQuery
import com.typesafe.config.Config
import scala.collection.immutable.Seq
import scala.collection.JavaConversions._
import scala.concurrent.Future
import scala.util.Try
import scala.util.control.NonFatal
/**
* OrientDB Journal support for Akka Persistence
*/
class OrientDbJournal extends AsyncWriteJournal with ActorLogging {
val cfg: Config = context.system.settings.config
val dbUrl = cfg.getString("funobjects-akka-orientdb-journal.db.url")
val journalClass = "AkkaJournal"
val journalSeqClass = "AkkaJournalSeq"
// property names
val seq = "seq"
val persistenceId = "persistenceId"
val bytes = "bytes"
val seqIndex = s"$journalClass.$persistenceId.$seq"
val serializer = SerializationExtension(context.system)
def repr(bytes: Array[Byte]): PersistentRepr = serializer.deserialize(bytes, classOf[PersistentRepr]).get
def reprBytes(p: PersistentRepr): Array[Byte] = serializer.serialize(p).get
// cached database state, initialized in preStart
var db = checkDb()
var index: OIndex[_] = _ // set by checkDb
import context.dispatcher
override def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long): Future[Unit] = {
Future {
OrientDbHelper.setupThreadContext(db)
val qmax = new OSQLSynchQuery[ODocument]("select seq from AkkaJournalSeq where persistenceId = ?")
val resmax: java.util.List[ODocument] = db.command(qmax).execute(persistenceId)
val max = resmax.headOption.map(_.field("seq").asInstanceOf[Long]).getOrElse(0L)
var delmax = max
index.iterateEntriesMinor(key(persistenceId, toSequenceNr), true, true).foreach { oid =>
val delseq = oid.getRecord[ODocument].field(seq).asInstanceOf[Long]
db.delete(oid.getIdentity)
if (delseq > delmax) {
delmax = delseq
}
}
if (delmax > max) {
val cmd = new OCommandSQL("update AkkaJournalSeq set persistenceId = ?, seq = ? upsert where persistenceId = ?")
val resup: java.lang.Integer = db.command(cmd).execute(persistenceId,
new java.lang.Long(delmax),
persistenceId)
}
}
}
override def asyncWriteMessages(writes: Seq[AtomicWrite]): Future[Seq[Try[Unit]]] = {
Future {
OrientDbHelper.setupThreadContext(db)
writes.map { write =>
inTransaction {
write.payload.foreach { msg =>
db.save[ODocument](new ODocument(journalClass)
.field(seq, msg.sequenceNr)
.field(persistenceId, msg.persistenceId)
.field(bytes, reprBytes(msg)))
}
}
true
}.map(b => Try(()))
}
}
override def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)
(replayCallback: (PersistentRepr) => Unit): Future[Unit] = {
Future {
val lower = key(persistenceId, fromSequenceNr)
val upper = key(persistenceId, toSequenceNr)
index.iterateEntriesBetween(lower, true, upper, true, true)
.map ( oid => repr(oid.getRecord[ODocument].field(bytes)))
.zipWithIndex
.foreach { case (repr, n) => if (n < max) replayCallback(repr) }
}
}
override def asyncReadHighestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = Future {
OrientDbHelper.setupThreadContext(db)
val q = new OSQLSynchQuery[ODocument]("select max(seq) from AkkaJournal where persistenceId = ?")
val res: java.util.List[ODocument] = db.command(q).execute(persistenceId)
val id = res.headOption.map(_.field("max").asInstanceOf[Long]).getOrElse(0L)
if (id == 0L) {
val qmax = new OSQLSynchQuery[ODocument]("select seq from AkkaJournalSeq where persistenceId = ?")
val resmax: java.util.List[ODocument] = db.command(qmax).execute(persistenceId)
resmax.headOption.map(_.field("seq").asInstanceOf[Long]).getOrElse(0L)
} else {
id
}
}
override def preStart(): Unit = {
db = checkDb()
super.preStart()
}
override def postStop(): Unit = {
super.postStop()
db.close()
}
private[orientdb] def checkDb(): ODatabaseDocumentTx = {
// retrieve the schema
val db = OrientDbHelper.openOrCreate(dbUrl, "admin", "admin")
val schema = db.getMetadata.getSchema
// create the DB class and index, if necessary
val cls = Option(schema.getClass(journalClass)) getOrElse schema.createClass(journalClass)
// add indexed properties to the schema (required for index creation)
Option(cls.getProperty(seq)) getOrElse cls.createProperty(seq, OType.LONG)
Option(cls.getProperty(persistenceId)) getOrElse cls.createProperty(persistenceId, OType.STRING)
// create a unique index on the composite key of (persistentId, seq)
index = Option(cls.getClassIndex(seqIndex)) getOrElse cls.createIndex(seqIndex, OClass.INDEX_TYPE.UNIQUE, persistenceId, seq)
// create max seq records
val clsSeq = Option(schema.getClass(journalSeqClass)) getOrElse schema.createClass(journalSeqClass)
Option(clsSeq.getProperty(seq)) getOrElse clsSeq.createProperty(seq, OType.LONG)
Option(clsSeq.getProperty(persistenceId)) getOrElse clsSeq.createProperty(persistenceId, OType.STRING)
// make sure that everything ends up with right type
assert(cls.getProperty(seq).getType == OType.LONG)
assert(cls.getProperty(persistenceId).getType == OType.STRING)
assert(cls.getIndexes.map(_.getName).contains(seqIndex))
db
}
// execute the given code within a database transaction
private[orientdb] def inTransaction(f: => Unit): Unit = {
try {
db.begin()
f
db.commit()
} catch {
case NonFatal(ex) => db.rollback(); throw ex
}
}
// create a composite key
private[orientdb] def key(persistenceId: String, seq: Long) = {
val k = new OCompositeKey()
k.addKey(persistenceId)
k.addKey(seq)
k
}
// create a partial key with persistenceId only
private[orientdb] def partialKey(persistenceId: String) = {
val k = new OCompositeKey()
k.addKey(persistenceId)
k
}
// transform a PersistentRepr (in serialized form)
private[orientdb] def mapSerialized(bytes: Array[Byte])(transform: PersistentRepr => PersistentRepr): Array[Byte] =
reprBytes(transform(repr(bytes)))
private[orientdb] def find(keys: Seq[OCompositeKey]): Seq[ODocument] =
index.iterateEntries(keys, true)
.map { oid => oid.getRecord[ODocument] }
.toList
private[orientdb] def find(key: OCompositeKey): Option[ODocument] = find(Seq(key)).headOption
}
| funobjects/akka-persistence-orientdb | src/main/scala/org/funobjects/akka/persistence/orientdb/OrientDbJournal.scala | Scala | apache-2.0 | 7,740 |
package com.phaller.rasync
package lattice
import scala.annotation.implicitNotFound
trait PartialOrderingWithBottom[V] extends PartialOrdering[V] {
/**
* Result of comparing x with operand y. Returns None if operands are not comparable. If operands are comparable, returns Some(r) where
* r < 0 iff x < y
* r == 0 iff x == y
* r > 0 iff x > y
*/
override def tryCompare(x: V, y: V): Option[Int] =
if (lt(x, y)) Some(-1)
else if (gt(x, y)) Some(1)
else if (equiv(x, y)) Some(0)
else None
val bottom: V
}
object PartialOrderingWithBottom {
def trivial[T >: Null]: PartialOrderingWithBottom[T] = {
new PartialOrderingWithBottom[T] {
override val bottom: T = null
override def lteq(v1: T, v2: T): Boolean =
(v1 == bottom) || (v1 == v2)
}
}
}
@implicitNotFound("type ${V} does not have a Lattice instance")
trait Lattice[V] extends PartialOrderingWithBottom[V] {
/**
* Return the join of v1 and v2 wrt. the lattice.
*/
def join(v1: V, v2: V): V
override def lteq(v1: V, v2: V): Boolean = {
join(v1, v2) == v2
}
override def gteq(v1: V, v2: V): Boolean = {
join(v1, v2) == v1
}
}
object Lattice {
implicit def pair[T](implicit lattice: Lattice[T]): Lattice[(T, T)] = {
new Lattice[(T, T)] {
def join(v1: (T, T), v2: (T, T)): (T, T) =
(lattice.join(v1._1, v2._1), lattice.join(v1._2, v2._2))
val bottom: (T, T) =
(lattice.bottom, lattice.bottom)
override def lteq(v1: (T, T), v2: (T, T)): Boolean =
lattice.lteq(v1._1, v2._1) && lattice.lteq(v1._2, v2._2)
override def gteq(v1: (T, T), v2: (T, T)): Boolean =
lattice.gteq(v1._1, v2._1) && lattice.gteq(v1._2, v2._2)
}
}
}
| phaller/reactive-async | core/src/main/scala/com/phaller/rasync/lattice/Lattice.scala | Scala | bsd-2-clause | 1,736 |
package org.scalaide.ui.internal.preferences
import org.eclipse.core.runtime.Path
import org.eclipse.jface.preference.PreferencePage
import org.eclipse.jface.viewers.ISelectionChangedListener
import org.eclipse.jface.viewers.IStructuredSelection
import org.eclipse.jface.viewers.ListViewer
import org.eclipse.jface.viewers.SelectionChangedEvent
import org.eclipse.swt.SWT
import org.eclipse.swt.events.SelectionAdapter
import org.eclipse.swt.events.SelectionEvent
import org.eclipse.swt.layout.FillLayout
import org.eclipse.swt.layout.GridData
import org.eclipse.swt.layout.GridLayout
import org.eclipse.swt.widgets.Button
import org.eclipse.swt.widgets.Composite
import org.eclipse.swt.widgets.Control
import org.eclipse.swt.widgets.DirectoryDialog
import org.eclipse.ui.IWorkbench
import org.eclipse.ui.IWorkbenchPreferencePage
import org.scalaide.core.internal.project.LabeledDirectoryScalaInstallation
import org.scalaide.core.internal.project.DirectoryScalaInstallation.directoryScalaInstallationFactory
import org.scalaide.core.internal.project.ScalaInstallation
import org.scalaide.ui.internal.project.ScalaInstallationUIProviders
import scala.collection.JavaConverters.asScalaIteratorConverter
import org.eclipse.jface.dialogs.InputDialog
import org.eclipse.jface.dialogs.IInputValidator
import org.eclipse.jface.window.Window
import org.scalaide.core.internal.project.LabeledDirectoryScalaInstallation
import org.eclipse.core.runtime.IStatus
import org.eclipse.debug.core.DebugPlugin
import org.eclipse.core.runtime.Status
import org.scalaide.util.eclipse.FileUtils
import org.scalaide.core.IScalaPlugin
import org.scalaide.core.internal.project.ModifiedScalaInstallations
import scala.util.Try
import scala.util.Failure
import scala.util.Success
import scala.collection.mutable.Publisher
import org.scalaide.core.internal.project.CustomScalaInstallationLabel
import org.scalaide.core.internal.project.LabeledScalaInstallation
import scala.PartialFunction.cond
import org.scalaide.core.SdtConstants
class InstalledScalasPreferencePage extends PreferencePage with IWorkbenchPreferencePage with ScalaInstallationUIProviders with Publisher[ModifiedScalaInstallations] {
def itemTitle = "Scala"
var customInstallations = ScalaInstallation.customInstallations
// to save installations whenever they are edited
subscribe(ScalaInstallation.installationsTracker)
noDefaultAndApplyButton()
override def performOk(): Boolean = {
ScalaInstallation.customInstallations &~ customInstallations foreach {ScalaInstallation.customInstallations.remove(_)}
customInstallations &~ ScalaInstallation.customInstallations foreach {ScalaInstallation.customInstallations.add(_)}
publish(ModifiedScalaInstallations())
super.performOk()
}
def createContents(parent: Composite): Control = {
import org.scalaide.util.eclipse.SWTUtils.fnToSelectionAdapter
import org.scalaide.util.eclipse.SWTUtils.fnToSelectionChangedEvent
val composite = new Composite(parent, SWT.NONE)
composite.setLayout(new GridLayout(2, false))
val list = new ListViewer(composite)
list.getControl().setLayoutData(new GridData(SWT.FILL, SWT.FILL, true, true))
list.setContentProvider(new ContentProvider())
val installationLabels = new LabelProvider
list.setLabelProvider(installationLabels)
list.setInput(ScalaInstallation.availableInstallations)
val buttons = new Composite(composite, SWT.NONE)
buttons.setLayoutData(new GridData(SWT.BEGINNING, SWT.TOP, false, true))
buttons.setLayout(new FillLayout(SWT.VERTICAL))
val buttonAdd = new Button(composite, SWT.PUSH)
buttonAdd.setText("Add")
buttonAdd.setEnabled(true)
buttonAdd.addSelectionListener({ (e: SelectionEvent) =>
import org.scalaide.ui.internal.handlers.{ GenericExceptionStatusHandler => GS }
val shell = parent.getShell()
val dirDialog = new DirectoryDialog(shell)
dirDialog.setText("Select your scala directory")
val selectedDir = dirDialog.open()
if (selectedDir != null) {
def genericExceptionStatus(e: IllegalArgumentException) = new Status(IStatus.ERROR, SdtConstants.PluginId, GS.STATUS_CODE_EXCEPTION, "", e)
def manageStatus(status: IStatus) = {
val handler = DebugPlugin.getDefault().getStatusHandler(status)
handler.handleStatus(status, this)
}
val dir = new Path(selectedDir)
if (!dir.toFile().isDirectory()) {
val errorStatus = genericExceptionStatus(new IllegalArgumentException("This selection is not a valid directory !"))
manageStatus(errorStatus)
} else {
directoryScalaInstallationFactory(dir) match {
case Failure(thrown) => thrown match {
case e: IllegalArgumentException => manageStatus(genericExceptionStatus(e))
case _ => throw (thrown)
}
case Success(si) =>
// give a label to this DirectoryScalaInstallation
val dlg = new InputDialog(shell, "", "Enter a name for this Scala Installation", "", new IInputValidator() {
override def isValid(newText: String): String = {
if (labels contains newText) "This is a reserved name."
else if (customInstallations.flatMap(_.getName()) contains newText) "This name is already used by a custom Scala installation."
else null
}
})
if (dlg.open() == Window.OK) {
// User clicked OK; update the label with the input
val lsi = new LabeledDirectoryScalaInstallation(dlg.getValue(), si)
customInstallations += lsi
list.add(lsi)
}
}
}
}
})
val buttonRemove = new Button(composite, SWT.PUSH)
buttonRemove.setText("Remove")
buttonRemove.setEnabled(false)
buttonRemove.addSelectionListener({ (e: SelectionEvent) =>
val selection = list.getSelection().asInstanceOf[IStructuredSelection]
selection.iterator().asScala foreach { s =>
s match {
case d: LabeledScalaInstallation if cond(d.label) { case CustomScalaInstallationLabel(tag) => true } =>
customInstallations -= d
list.remove(d)
case _ => ()
}
}
})
list.addSelectionChangedListener({ (event: SelectionChangedEvent) =>
val selection = event.getSelection()
if (selection.isEmpty()) buttonRemove.setEnabled(false) else buttonRemove.setEnabled(true)
})
composite
}
def init(workbench: IWorkbench): Unit = {}
} | romanowski/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/ui/internal/preferences/InstalledScalasPreferencePage.scala | Scala | bsd-3-clause | 6,616 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.{File, FileOutputStream, InputStream, IOException}
import java.net.URI
import scala.collection.mutable
import scala.io.Source
import org.apache.hadoop.fs.Path
import org.json4s.jackson.JsonMethods._
import org.scalatest.BeforeAndAfter
import org.apache.spark._
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.io._
import org.apache.spark.util.{JsonProtocol, Utils}
/**
* Test whether EventLoggingListener logs events properly.
*
* This tests whether EventLoggingListener actually log files with expected name patterns while
* logging events, whether the parsing of the file names is correct, and whether the logged events
* can be read and deserialized into actual SparkListenerEvents.
*/
class EventLoggingListenerSuite extends SparkFunSuite with LocalSparkContext with BeforeAndAfter
with Logging {
import EventLoggingListenerSuite._
private val fileSystem = Utils.getHadoopFileSystem("/",
SparkHadoopUtil.get.newConfiguration(new SparkConf()))
private var testDir: File = _
private var testDirPath: Path = _
before {
testDir = Utils.createTempDir()
testDir.deleteOnExit()
testDirPath = new Path(testDir.getAbsolutePath())
}
after {
Utils.deleteRecursively(testDir)
}
test("Verify log file exist") {
// Verify logging directory exists
val conf = getLoggingConf(testDirPath)
val eventLogger = new EventLoggingListener("test", None, testDirPath.toUri(), conf)
eventLogger.start()
val logPath = new Path(eventLogger.logPath + EventLoggingListener.IN_PROGRESS)
assert(fileSystem.exists(logPath))
val logStatus = fileSystem.getFileStatus(logPath)
assert(!logStatus.isDir)
// Verify log is renamed after stop()
eventLogger.stop()
assert(!fileSystem.getFileStatus(new Path(eventLogger.logPath)).isDir)
}
test("Basic event logging") {
testEventLogging()
}
test("Basic event logging with compression") {
CompressionCodec.ALL_COMPRESSION_CODECS.foreach { codec =>
testEventLogging(compressionCodec = Some(CompressionCodec.getShortName(codec)))
}
}
test("End-to-end event logging") {
testApplicationEventLogging()
}
test("End-to-end event logging with compression") {
CompressionCodec.ALL_COMPRESSION_CODECS.foreach { codec =>
testApplicationEventLogging(compressionCodec = Some(CompressionCodec.getShortName(codec)))
}
}
test("Log overwriting") {
val logUri = EventLoggingListener.getLogPath(testDir.toURI, "test", None)
val logPath = new URI(logUri).getPath
// Create file before writing the event log
new FileOutputStream(new File(logPath)).close()
// Expected IOException, since we haven't enabled log overwrite.
intercept[IOException] { testEventLogging() }
// Try again, but enable overwriting.
testEventLogging(extraConf = Map("spark.eventLog.overwrite" -> "true"))
}
test("Event log name") {
// without compression
assert(s"file:/base-dir/app1" === EventLoggingListener.getLogPath(
Utils.resolveURI("/base-dir"), "app1", None))
// with compression
assert(s"file:/base-dir/app1.lzf" ===
EventLoggingListener.getLogPath(Utils.resolveURI("/base-dir"), "app1", None, Some("lzf")))
// illegal characters in app ID
assert(s"file:/base-dir/a-fine-mind_dollar_bills__1" ===
EventLoggingListener.getLogPath(Utils.resolveURI("/base-dir"),
"a fine:mind$dollar{bills}.1", None))
// illegal characters in app ID with compression
assert(s"file:/base-dir/a-fine-mind_dollar_bills__1.lz4" ===
EventLoggingListener.getLogPath(Utils.resolveURI("/base-dir"),
"a fine:mind$dollar{bills}.1", None, Some("lz4")))
}
/* ----------------- *
* Actual test logic *
* ----------------- */
import EventLoggingListenerSuite._
/**
* Test basic event logging functionality.
*
* This creates two simple events, posts them to the EventLoggingListener, and verifies that
* exactly these two events are logged in the expected file.
*/
private def testEventLogging(
compressionCodec: Option[String] = None,
extraConf: Map[String, String] = Map()) {
val conf = getLoggingConf(testDirPath, compressionCodec)
extraConf.foreach { case (k, v) => conf.set(k, v) }
val logName = compressionCodec.map("test-" + _).getOrElse("test")
val eventLogger = new EventLoggingListener(logName, None, testDirPath.toUri(), conf)
val listenerBus = new LiveListenerBus
val applicationStart = SparkListenerApplicationStart("Greatest App (N)ever", None,
125L, "Mickey", None)
val applicationEnd = SparkListenerApplicationEnd(1000L)
// A comprehensive test on JSON de/serialization of all events is in JsonProtocolSuite
eventLogger.start()
listenerBus.start(sc)
listenerBus.addListener(eventLogger)
listenerBus.postToAll(applicationStart)
listenerBus.postToAll(applicationEnd)
eventLogger.stop()
// Verify file contains exactly the two events logged
val logData = EventLoggingListener.openEventLog(new Path(eventLogger.logPath), fileSystem)
try {
val lines = readLines(logData)
val logStart = SparkListenerLogStart(SPARK_VERSION)
assert(lines.size === 3)
assert(lines(0).contains("SparkListenerLogStart"))
assert(lines(1).contains("SparkListenerApplicationStart"))
assert(lines(2).contains("SparkListenerApplicationEnd"))
assert(JsonProtocol.sparkEventFromJson(parse(lines(0))) === logStart)
assert(JsonProtocol.sparkEventFromJson(parse(lines(1))) === applicationStart)
assert(JsonProtocol.sparkEventFromJson(parse(lines(2))) === applicationEnd)
} finally {
logData.close()
}
}
/**
* Test end-to-end event logging functionality in an application.
* This runs a simple Spark job and asserts that the expected events are logged when expected.
*/
private def testApplicationEventLogging(compressionCodec: Option[String] = None) {
// Set defaultFS to something that would cause an exception, to make sure we don't run
// into SPARK-6688.
val conf = getLoggingConf(testDirPath, compressionCodec)
.set("spark.hadoop.fs.defaultFS", "unsupported://example.com")
val sc = new SparkContext("local-cluster[2,2,1024]", "test", conf)
assert(sc.eventLogger.isDefined)
val eventLogger = sc.eventLogger.get
val eventLogPath = eventLogger.logPath
val expectedLogDir = testDir.toURI()
assert(eventLogPath === EventLoggingListener.getLogPath(
expectedLogDir, sc.applicationId, None, compressionCodec.map(CompressionCodec.getShortName)))
// Begin listening for events that trigger asserts
val eventExistenceListener = new EventExistenceListener(eventLogger)
sc.addSparkListener(eventExistenceListener)
// Trigger asserts for whether the expected events are actually logged
sc.parallelize(1 to 10000).count()
sc.stop()
// Ensure all asserts have actually been triggered
eventExistenceListener.assertAllCallbacksInvoked()
// Make sure expected events exist in the log file.
val logData = EventLoggingListener.openEventLog(new Path(eventLogger.logPath), fileSystem)
val logStart = SparkListenerLogStart(SPARK_VERSION)
val lines = readLines(logData)
val eventSet = mutable.Set(
SparkListenerApplicationStart,
SparkListenerBlockManagerAdded,
SparkListenerExecutorAdded,
SparkListenerEnvironmentUpdate,
SparkListenerJobStart,
SparkListenerJobEnd,
SparkListenerStageSubmitted,
SparkListenerStageCompleted,
SparkListenerTaskStart,
SparkListenerTaskEnd,
SparkListenerApplicationEnd).map(Utils.getFormattedClassName)
lines.foreach { line =>
eventSet.foreach { event =>
if (line.contains(event)) {
val parsedEvent = JsonProtocol.sparkEventFromJson(parse(line))
val eventType = Utils.getFormattedClassName(parsedEvent)
if (eventType == event) {
eventSet.remove(event)
}
}
}
}
assert(JsonProtocol.sparkEventFromJson(parse(lines(0))) === logStart)
assert(eventSet.isEmpty, "The following events are missing: " + eventSet.toSeq)
}
private def readLines(in: InputStream): Seq[String] = {
Source.fromInputStream(in).getLines().toSeq
}
/**
* A listener that asserts certain events are logged by the given EventLoggingListener.
* This is necessary because events are posted asynchronously in a different thread.
*/
private class EventExistenceListener(eventLogger: EventLoggingListener) extends SparkListener {
var jobStarted = false
var jobEnded = false
var appEnded = false
override def onJobStart(jobStart: SparkListenerJobStart) {
jobStarted = true
}
override def onJobEnd(jobEnd: SparkListenerJobEnd) {
jobEnded = true
}
override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd) {
appEnded = true
}
def assertAllCallbacksInvoked() {
assert(jobStarted, "JobStart callback not invoked!")
assert(jobEnded, "JobEnd callback not invoked!")
assert(appEnded, "ApplicationEnd callback not invoked!")
}
}
}
object EventLoggingListenerSuite {
/** Get a SparkConf with event logging enabled. */
def getLoggingConf(logDir: Path, compressionCodec: Option[String] = None): SparkConf = {
val conf = new SparkConf
conf.set("spark.eventLog.enabled", "true")
conf.set("spark.eventLog.testing", "true")
conf.set("spark.eventLog.dir", logDir.toString)
compressionCodec.foreach { codec =>
conf.set("spark.eventLog.compress", "true")
conf.set("spark.io.compression.codec", codec)
}
conf
}
def getUniqueApplicationId: String = "test-" + System.currentTimeMillis
}
| ArvinDevel/onlineAggregationOnSparkV2 | core/src/test/scala/org/apache/spark/scheduler/EventLoggingListenerSuite.scala | Scala | apache-2.0 | 10,643 |
package sp.areus
import akka.actor._
import akka.pattern.ask
import akka.util.Timeout
import sp.domain._
import sp.system.messages._
import scala.concurrent.duration._
/**
* Created by Kristofer on 2014-06-27.
*/
class ImportKUKAFileService(modelHandler: ActorRef) extends Actor {
implicit val timeout = Timeout(1 seconds)
import context.dispatcher
def receive = {
case Request(_, attr, _, _) => {
val reply = sender
// extract(attr) match {
// case Some((file, name, model)) => {
// println(s"I got the file in importKUKA")
//
// val trajectory = extractTrajectory(file)
//
// val operationName = name.flatMap(_.asString).getOrElse("robotOP").replaceAll("\\\\.[^.]*$", "")
//
// val energy = {
// val joints = List(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
// val sum = trajectory.foldLeft(joints)((a, b) => {
// val current = b.asMap.flatMap(_.get("current")).flatMap(_.asList).map(_.flatMap(_.asDouble)).getOrElse(joints)
// current.zip(a).foldLeft(List[Double]())((res, z)=> res :+ ((z._1^2) + z._2^2))
// })
// }
// val attrib = Attr("trajectory"-> MapPrimitive(Map("samples" -> ListPrimitive(trajectory))))
//
// val op = Operation(operationName, List(), attrib)
// val res = modelHandler ? UpdateIDs(model, -1, List(op))
//
// reply ! "yes"
//
// }
// case None => reply ! errorMessage(attr)
// }
}
}
// def extract(attr: SPAttributes) = {
// for {
// file <- attr.getAsString("file")
// model <- attr.getAsID("model")
// } yield (file, attr.get("name"), model)
// }
//
// def errorMessage(attr: SPAttributes) = {
// SPError("The request is missing parameters: \\n" +
// s"file: ${attr.getAsString("file")}" + "\\n" +
// s"Request: ${attr}" )
// }
//
// def extractTrajectory(file: String): List[SPAttributeValue] = {
// val lines = file.lines.toList.map(_.trim.split("""\\s+""").toList )
// val numbs = lines.map(_.flatMap(parseDouble)).filterNot(_.isEmpty)
// val joints = numbs.map(_.slice(6, 12))
// val energy = numbs.map(_.slice(0, 6))
//
// val zip = joints zip energy zip (1 to joints.size)
//
// zip.map {
// case ((position, energy), sample) => {
// MapPrimitive(Map(
// "sample"-> IntPrimitive(sample),
// "position"-> ListPrimitive(position.map(DoublePrimitive.apply)),
// "current"-> ListPrimitive(energy.map(DoublePrimitive.apply))
// ))
// }
// }
// }
def parseDouble(s: String) = try { Some(s.toDouble) } catch { case _:Throwable => None }
implicit class ExtraDouble(d: Double) {
def ^(n: Int) = scala.math.pow(d, n)
}
}
object ImportKUKAFileService{
def props(modelHandler: ActorRef) = Props(classOf[ImportKUKAFileService], modelHandler)
}
| kristoferB/SP | sp1/src/main/scala/sp/areus/ImportKUKAFileService.scala | Scala | mit | 2,875 |
// !< 闭包
def makeIncreaser(more: Int) = (x: Int) => x + more
// !< 因为返回了一个闭包,所以inc1 和 inc9999都是一个闭包的函数!!!
val inc1 = makeIncreaser(1)
val inc9999 = makeIncreaser(9999)
inc1(10)
inc9999(10)
// !< 重复参数
def echo(args: String*) =
{
for (arg <- args)
{
println(arg)
}
}
echo()
echo("one")
// !< 数组传入
val arr = Array("What's", "up", "doc?")
echo(arr:_*)
// !< 尾递归
def approximate(guess: Double):Double =
{
if (isGoodEnough(guess)) guess
else approximate(improve(guess))
}
def boom(x: Int) :Int=
{
if (x == 0) throw new Exception("boom!")
else boom(x - 1) + 1 // !< 去掉+1则在崩溃的时候,堆栈只有自己一个调用.
} | fangguanya/study | Java/scala_shell/closuremethod.scala | Scala | mit | 706 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.integration.spark.testsuite.primitiveTypes
import java.util.Random
import org.apache.spark.sql.test.util.QueryTest
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SaveMode}
import org.scalatest.BeforeAndAfterAll
/**
* Test Class for filter query on Double datatypes
*/
class DoubleDataTypeTestCase extends QueryTest with BeforeAndAfterAll {
lazy val df: DataFrame = generateDataFrame
private def generateDataFrame(): DataFrame = {
val r = new Random()
val rdd = sqlContext.sparkContext
.parallelize(1 to 10, 2)
.map { x =>
Row(x, "London" + (x % 2), x.toDouble / 13, x.toDouble / 11)
}
val schema = StructType(
Seq(
StructField("id", IntegerType, nullable = false),
StructField("city", StringType, nullable = false),
StructField("m1", DoubleType, nullable = false),
StructField("m2", DoubleType, nullable = false)
)
)
sqlContext.createDataFrame(rdd, schema)
}
override def beforeAll {
sql("drop table if exists uniq_carbon")
sql("drop table if exists uniq_hive")
sql("drop table if exists doubleTypeCarbonTable")
sql("drop table if exists doubleTypeHiveTable")
df.write
.format("carbondata")
.option("tableName", "doubleTypeCarbonTable")
.option("tempCSV", "false")
.option("single_pass", "true")
.option("dictionary_exclude", "city")
.option("table_blocksize", "32")
.mode(SaveMode.Overwrite)
.save()
df.write
.mode(SaveMode.Overwrite)
.saveAsTable("doubleTypeHiveTable")
}
test("detail query") {
checkAnswer(sql("select * from doubleTypeCarbonTable order by id"),
sql("select * from doubleTypeHiveTable order by id"))
}
test("duplicate values") {
sql("create table uniq_carbon(name string, double_column double) stored by 'carbondata' TBLPROPERTIES ('DICTIONARY_INCLUDE'='double_column')")
sql(s"load data inpath '$resourcesPath/uniq.csv' into table uniq_carbon")
sql("create table uniq_hive(name string, double_column double) ROW FORMAT DELIMITED FIELDS TERMINATED BY ','")
sql(s"load data local inpath '$resourcesPath/uniqwithoutheader.csv' into table uniq_hive")
checkAnswer(sql("select * from uniq_carbon where double_column>=11"),
sql("select * from uniq_hive where double_column>=11"))
}
// test("agg query") {
// checkAnswer(sql("select city, sum(m1), avg(m1), count(m1), max(m1), min(m1) from doubleTypeCarbonTable group by city"),
// sql("select city, sum(m1), avg(m1), count(m1), max(m1), min(m1) from doubleTypeHiveTable group by city"))
//
// checkAnswer(sql("select city, sum(m2), avg(m2), count(m2), max(m2), min(m2) from doubleTypeCarbonTable group by city"),
// sql("select city, sum(m2), avg(m2), count(m2), max(m2), min(m2) from doubleTypeHiveTable group by city"))
// }
override def afterAll {
sql("drop table if exists uniq_carbon")
sql("drop table if exists uniq_hive")
sql("drop table if exists doubleTypeCarbonTable")
sql("drop table if exists doubleTypeHiveTable")
}
} | sgururajshetty/carbondata | integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/primitiveTypes/DoubleDataTypeTestCase.scala | Scala | apache-2.0 | 3,942 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.rdd.fragment
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.converters.AlignmentRecordConverter
import org.bdgenomics.adam.models.{
RecordGroupDictionary,
ReferenceRegion,
SequenceDictionary
}
import org.bdgenomics.adam.rdd.{ AvroReadGroupGenomicRDD, JavaSaveArgs }
import org.bdgenomics.adam.rdd.read.{
AlignedReadRDD,
AlignmentRecordRDD,
UnalignedReadRDD
}
import org.bdgenomics.formats.avro._
import org.bdgenomics.utils.misc.Logging
import scala.collection.JavaConversions._
/**
* Helper singleton object for building FragmentRDDs.
*/
private[rdd] object FragmentRDD {
/**
* Creates a FragmentRDD where no record groups or sequence info are attached.
*
* @param rdd RDD of fragments.
* @return Returns a FragmentRDD with an empty record group dictionary and sequence dictionary.
*/
def fromRdd(rdd: RDD[Fragment]): FragmentRDD = {
FragmentRDD(rdd, SequenceDictionary.empty, RecordGroupDictionary.empty)
}
}
/**
* A genomic RDD that supports RDDs of Fragments.
*
* @param rdd The underlying RDD of Fragment data.
* @param sequences The genomic sequences this data was aligned to, if any.
* @param recordGroups The record groups these Fragments came from.
*/
case class FragmentRDD(rdd: RDD[Fragment],
sequences: SequenceDictionary,
recordGroups: RecordGroupDictionary) extends AvroReadGroupGenomicRDD[Fragment, FragmentRDD] {
/**
* Replaces the underlying RDD with a new RDD.
*
* @param newRdd The RDD to replace our underlying RDD with.
* @return Returns a new FragmentRDD where the underlying RDD has been
* swapped out.
*/
protected def replaceRdd(newRdd: RDD[Fragment]): FragmentRDD = {
copy(rdd = newRdd)
}
/**
* Essentially, splits up the reads in a Fragment.
*
* @return Returns this RDD converted back to reads.
*/
def toReads(): AlignmentRecordRDD = {
val converter = new AlignmentRecordConverter
// convert the fragments to reads
val newRdd = rdd.flatMap(converter.convertFragment)
// are we aligned?
if (sequences.isEmpty) {
UnalignedReadRDD(newRdd, recordGroups)
} else {
AlignedReadRDD(newRdd, sequences, recordGroups)
}
}
/**
* Saves Fragments to Parquet.
*
* @param filePath Path to save fragments at.
*/
def save(filePath: java.lang.String) {
saveAsParquet(new JavaSaveArgs(filePath))
}
/**
* Returns the regions that this fragment covers.
*
* Since a fragment may be chimeric or multi-mapped, we do not try to compute
* the hull of the underlying element.
*
* @param elem The Fragment to get the region from.
* @return Returns all regions covered by this fragment.
*/
protected def getReferenceRegions(elem: Fragment): Seq[ReferenceRegion] = {
elem.getAlignments
.flatMap(r => ReferenceRegion.opt(r))
.toSeq
}
}
| tdanford/adam | adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/FragmentRDD.scala | Scala | apache-2.0 | 3,723 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.